diff --git "a/2985.jsonl" "b/2985.jsonl" new file mode 100644--- /dev/null +++ "b/2985.jsonl" @@ -0,0 +1,1863 @@ +{"seq_id":"72767799271","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport MITgcmutils as mit\n\nplt.ion()\n\n#dir0 = '/home/bderembl/work/MITgcm/myrun/test_kw_energetics/run/'\ndir0 = '/media/bderembl/workd/MITgcm/myrun/test_kw_energetics/run03/'\ndir1 = dir0 + 'mnc*/'\ndir2 = dir0 + 'mnc_test_0001/'\n\nfile0 = 'grid.t*'\nfile1 = 'state.*'\nfile2 = 'oceDiag.*'\n\nalphat = 2e-4\ngo = 9.81\n\n# grid\nf0 = mit.mnc_files(dir1 + file0)\n\nRC = f0.variables['RC'][:]\nDRC = f0.variables['drC'][:]\nDRF = f0.variables['drF'][:]\nRF = f0.variables['RF'][:]\n\nXC = f0.variables['XC'][:,:]\nYC = f0.variables['YC'][:,:]\n\nsi_y,si_x = XC.shape\nsi_z = RC.size\n\ndx = XC[1,1] - XC[0,0]\ndy = YC[1,1] - YC[0,0]\ndz = RC[1] - RC[0]\n\ndv = np.abs(dx*dy*dz)\n\nf2 = mit.mnc_files(dir1 + file2)\nT = f2.variables['T'][:]\nsi_t = len(T)\n\n# compute KE, PE\ncirc = np.zeros((si_t))\n\nfor nt in range (0,si_t):\n rv = f2.variables['momVort3'][nt,0,:si_y,:si_x]\n\n circ[nt] = np.sum(np.sum(rv,0),0)\n\n\ncirc = circ/(si_y*si_x)\n\nTd = T/86400\n\nnp.savetxt('mit_rv.dat',circ)\n\n\nplt.figure()\nplt.plot(T,circ[:],'k')\n\n","repo_name":"bderembl/mitgcm_configs","sub_path":"qg_kelvin/analysis/pv_budget.py","file_name":"pv_budget.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"74407160550","text":"from random import *\r\nfrom colorama import Fore, init\r\ninit()\r\n\r\nprint(Fore.GREEN)\r\nmyName = input('Привет! Как тебя зовут? ') # знакомимся с пользователем\r\nnumber = randint(1, 100)\r\nguessTaken = 0 # переменная хранит значения попыток пользователя\r\n\r\nprint(Fore.CYAN, f'{myName}, я загадал число от 1 до 100. Твоя задача угадать это число.')\r\n\r\n\r\nfor guessTaken in range(7): # считаем количество попыток\r\n print('Попробуй угадать.')\r\n guess = int(input('Вводи число: ')) # т.к. мы хотим получать только числа, сразу преобразуем тип данных\r\n\r\n if guess > number:\r\n print(guess, 'Слишком много!')\r\n print()\r\n\r\n if guess < number:\r\n print(guess, 'Слишком мало!')\r\n print()\r\n\r\n if guess == number:\r\n break\r\n\r\n\r\ndef getEnding(guessTaken):\r\n lastChars = guessTaken % 100\r\n\r\n if 2 <= lastChars <= 4:\r\n return 'ки'\r\n else:\r\n lastChars = guessTaken % 10\r\n if lastChars == 1:\r\n return 'ку'\r\n elif lastChars >= 5 <= 20:\r\n return 'ок'\r\n\r\n\r\nif guess == number:\r\n print(f'Поздравляю, {myName}, ты угадал число за {guessTaken + 1} попыт{getEnding(guessTaken)}.')\r\n\r\nif guess != number:\r\n print('К сожалению, тебе не удалось угадать. Я загадал число: ', number, '.')\r\n\r\n","repo_name":"GreatRaksin/GuessTheNumber","sub_path":"guessLevelProColorama.py","file_name":"guessLevelProColorama.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"39024426385","text":"import requests\nimport pandas as pd\nimport numpy as np\nimport re\nfrom bs4 import BeautifulSoup\n\n\nclass Scraper:\n def __init__(self, url, reqs=None, soup=None):\n self.url = url\n self.reqs = requests.get(self.url)\n self.soup = BeautifulSoup(self.reqs.text, 'html.parser')\n\n def is_clean(self, link):\n glitch_words = [\"twitter\", \"facebook\", \"pib.gov.in\",\n \"t.co\", \"whatsapp\", \"google\", \"linkedin\"]\n for g_word in glitch_words:\n if g_word in link:\n return False\n return True\n\n def scrape_links(self):\n urls = []\n for link in self.soup.find_all('a'):\n if self.is_clean(link.get('href')):\n urls.append(link.get('href'))\n return urls\n\n\n# SCRAPING TABLES FROM WEBPAGE {SETTING THE ANCHOR AND REVERSE ITERATING}\n\n\n def scrape_tables(self):\n page = pd.read_html(self.url)\n tables = []\n for table in range(int(len(page)/2)):\n df = page[table]\n Array2d = df.to_numpy()\n Array2d = Array2d\n temp_table = Array2d.tolist()\n tables.append(temp_table)\n return tables\n\n def scrape_images(self):\n image_data = []\n images = self.soup.select('img')\n for image in images:\n src = image.get('src')\n image_data.append(src)\n final_image_data = list(set(image_data))\n return final_image_data\n\n def preprocess(self, hs):\n remove_space = re.sub(' +', ' ', hs)\n remove_n = re.sub('\\n', '', remove_space)\n remove_r = re.sub('\\r', '', remove_n)\n return remove_r\n\n def scrape_text(self):\n final_text = []\n page = pd.read_html(self.url)\n try:\n SOUP = self.soup\n for data in SOUP('tbody'):\n data.decompose()\n hs = SOUP.text\n except:\n hs = self.soup.text\n print('soup:', hs)\n final_text = self.preprocess(hs)\n val0 = re.search(\"Posted On\", final_text).span()[1]+34\n val = re.search(\"\\*\\*\", final_text).span()[0]\n return [final_text[val0:val]]\n\n def scrape_page(self):\n text = self.scrape_text()\n assets = {}\n images = self.scrape_images()\n tables = self.scrape_tables()\n imp_links = self.scrape_links()\n exported_data = {}\n assets = {\"images\": images, \"tables\": tables, \"imp_links\": imp_links}\n exported_data.update({\"text\": text[0]})\n exported_data.update({\"assets\": assets})\n exported_data.update({\"link\": self.url})\n print(exported_data)\n return exported_data\n","repo_name":"saar-official/scraper","sub_path":"scraper/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5021771911","text":"\"\"\"Class methods for report charts.\"\"\"\n\n# Standard Python Libraries\nimport os\n\n# Third-Party Libraries\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\n\nmatplotlib.use(\"Agg\")\n\n\n# Factor to convert cm to inches\nCM_CONVERSION_FACTOR = 2.54\n\n# Get base directory to save images\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\nclass Charts:\n \"\"\"Build charts.\"\"\"\n\n def __init__(self, df, width, height, name, title, x_label, y_label):\n \"\"\"Initialize chart class.\"\"\"\n self.df = df\n self.title = title\n self.x_label = x_label\n self.y_label = y_label\n self.width = width\n self.height = height\n self.name = name\n\n def pie(self):\n \"\"\"Build pie chart.\"\"\"\n df = self.df\n width = self.width\n height = self.height\n name = self.name\n plt.rcParams.update({\"figure.max_open_warning\": 0})\n category_name = df.columns[0]\n value_name = df.columns[1]\n df = df.sort_values(by=value_name, ascending=False)\n category_column = df[category_name]\n value_column = df[df.columns[1]]\n labels = category_column\n plt.gca().axis(\"equal\")\n\n def autopct(pct):\n \"\"\"Get percentages for the pie chart slices > 10%.\"\"\"\n return (\"%1.0f%%\" % pct) if pct > 1 else \"\"\n\n pie = plt.pie(\n value_column,\n startangle=0,\n radius=1,\n autopct=autopct,\n textprops={\"color\": \"w\", \"fontsize\": 7},\n )\n plt.legend(\n pie[0],\n labels,\n bbox_to_anchor=(1, 0.5),\n loc=\"center right\",\n fontsize=7,\n bbox_transform=plt.gcf().transFigure,\n frameon=False,\n )\n plt.subplots_adjust(left=0.2, wspace=0.2)\n plt.gcf().set_size_inches(\n width / CM_CONVERSION_FACTOR, height / CM_CONVERSION_FACTOR\n )\n plt.savefig(\n BASE_DIR + \"/assets/\" + name, transparent=True, dpi=500, bbox_inches=\"tight\"\n )\n plt.clf()\n\n def stacked_bar(self):\n \"\"\"Build stacked bar chart.\"\"\"\n df = self.df\n title = self.title\n x_label = self.x_label\n y_label = self.y_label\n width = self.width\n height = self.height\n name = self.name\n color = [\"#1357BE\", \"#D0342C\"]\n df.plot(kind=\"bar\", stacked=True, zorder=3, color=color)\n # Add title to chart\n plt.title(title, pad=15, fontsize=10)\n # Format chart's axis\n plt.xlabel(x_label, labelpad=10, fontdict={\"size\": 8})\n plt.ylabel(y_label, labelpad=10, fontdict={\"size\": 8})\n plt.gca().yaxis.set_major_locator(MaxNLocator(integer=True))\n plt.rc(\"axes\", axisbelow=True)\n plt.grid(axis=\"y\", zorder=0)\n plt.xticks(rotation=0)\n plt.ylim(ymin=0)\n # Set sizing for image\n plt.gcf().set_size_inches(\n width / CM_CONVERSION_FACTOR, height / CM_CONVERSION_FACTOR\n )\n plt.tight_layout()\n # Save chart to assets directory\n plt.savefig(BASE_DIR + \"/assets/\" + name, transparent=True, dpi=500)\n plt.clf()\n\n def h_bar(self):\n \"\"\"Build horizontal bar chart.\"\"\"\n df = self.df\n x_label = self.x_label\n y_label = self.y_label\n width = self.width\n height = self.height\n name = self.name\n plt.rcParams.update({\"figure.max_open_warning\": 0})\n category_name = df.columns[0]\n value_name = df.columns[1]\n category_column = df[category_name].str.replace(\"Vulnerable Product - \", \"\")\n value_column = df[df.columns[1]]\n bar_width = 0.6\n fig, ax = plt.subplots()\n ax.spines.right.set_visible(False)\n ax.spines.top.set_visible(False)\n # Generate horizontal bar chart\n plt.barh(df.index, value_column, bar_width, align=\"center\", color=\"#466fc6\")\n # Specify axis atributes\n plt.xticks(fontsize=7)\n plt.yticks(fontsize=7)\n plt.xlim(xmin=0)\n plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.gca().set_ylim(-1.0, len(category_column))\n plt.gca().set_yticks(df.index)\n plt.gca().set_yticklabels(category_column)\n plt.gca().set_xlabel(x_label, fontdict={\"size\": 8})\n plt.gca().set_ylabel(y_label)\n # Set sizing for image\n plt.gcf().set_size_inches(\n width / CM_CONVERSION_FACTOR, height / CM_CONVERSION_FACTOR\n )\n plt.tight_layout()\n # Add data labels to each bar if greater than 0\n for i in range(len(df)):\n if df.loc[i, value_name] > 0:\n label = df.loc[i, value_name]\n plt.annotate(\n label, # this is the text\n (df.loc[i, value_name], i), # this is the point to label\n textcoords=\"offset points\", # how to position the text\n xytext=(7, -3), # distance from text to points (x,y)\n ha=\"center\", # horizontal alignment can be left, right or center\n fontsize=8,\n )\n # Save chart to assets directory\n plt.savefig(\n BASE_DIR + \"/assets/\" + name, transparent=True, dpi=500, bbox_inches=\"tight\"\n )\n plt.clf()\n\n def line_chart(self):\n \"\"\"Build line chart.\"\"\"\n df = self.df\n x_label = self.x_label\n y_label = self.y_label\n width = self.width\n height = self.height\n name = self.name\n color = [\"#7aa5c1\", \"#e08493\"]\n fig, ax = plt.subplots()\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n plt.set_loglevel(\"WARNING\")\n # Plot first line on chart\n plt.plot(\n df.index,\n df[df.columns[0]],\n color=color[0],\n label=df.columns[0],\n linewidth=3,\n marker=\".\",\n markersize=10,\n )\n # If there is another column chart the second line\n if len(df.columns) == 2:\n plt.plot(\n df.index,\n df[df.columns[1]],\n color=color[1],\n label=df.columns[1],\n linewidth=3,\n linestyle=\"dashed\",\n marker=\".\",\n markersize=10,\n )\n # Set the y-max to 110% of the max y value\n y_max = int(df[df.columns].max().max() * 1.1)\n plt.ylim(ymin=0, ymax=y_max * 1.10)\n # Place the legend in the upper right corner\n plt.legend(loc=\"upper right\")\n # Set size of the chart\n plt.gcf().set_size_inches(\n width / CM_CONVERSION_FACTOR, height / CM_CONVERSION_FACTOR\n )\n # Format tick marks and grid layout\n plt.xticks(fontsize=7)\n plt.yticks(fontsize=7)\n plt.gca().set_ylabel(y_label, labelpad=10, fontdict={\"size\": 8})\n plt.xlabel(x_label, labelpad=10, fontdict={\"size\": 8})\n plt.xticks(rotation=0)\n plt.grid(axis=\"y\")\n plt.tight_layout()\n\n # Add data labels\n # Loop through the dataframe\n for row in df.itertuples():\n # Check if there is only one row of values\n if len(row) == 2:\n plt.annotate(\n str(int(row[1])),\n xy=(row[0], row[1]),\n textcoords=\"offset points\", # Set the manner to position the text\n xytext=(\n 0,\n 8,\n ), # Distance from text to points (x,y)\n ha=\"center\", # Set horizontal alignment to center\n color=\"#003e67\",\n )\n # Check if there are two rows of data\n elif len(row) == 3:\n # Check if the two values are within 1/10th of the max y value\n value_diff = abs(row[1] - row[2])\n if value_diff < y_max / 10:\n # If the values are on the bottom quarter of the graph don't label below values\n if min(row[1], row[2]) < y_max / 4:\n y1 = y2 = max(row[1], row[2])\n if row[1] > row[2]:\n y1_offset = 18\n y2_offset = 8\n else:\n y1_offset = 8\n y2_offset = 18\n # If the values are not in the bottom quarter place the lower value below the point\n else:\n y1 = row[1]\n y2 = row[2]\n if row[1] > row[2]:\n y1_offset = 8\n y2_offset = -17\n else:\n y1_offset = -17\n y2_offset = 8\n # If values are not close to each other put the labels directly above the value\n else:\n y1 = row[1]\n y2 = row[2]\n y1_offset = 8\n y2_offset = 8\n\n # Annotate the data points\n plt.annotate(\n str(int(row[1])),\n xy=(row[0], y1),\n textcoords=\"offset points\", # Set how to position the text\n xytext=(\n 0,\n y1_offset,\n ), # Distance from text to points (x,y)\n ha=\"center\", # Horizontal alignment can be left, right or center\n color=\"#005288\",\n )\n plt.annotate(\n str(int(row[2])),\n xy=(row[0], y2),\n textcoords=\"offset points\", # Set how to position the text\n xytext=(\n 0,\n y2_offset,\n ), # Distance from text to points (x,y)\n ha=\"center\", # Set horizontal alignment to center\n # fontsize=2,\n color=\"#c41230\",\n )\n # Save chart to assets directory\n plt.savefig(\n BASE_DIR + \"/assets/\" + name, transparent=True, dpi=500, bbox_inches=\"tight\"\n )\n plt.clf()\n","repo_name":"cisagov/pe-reports","sub_path":"src/pe_reports/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":10390,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"71"} +{"seq_id":"34430011391","text":"import json\nimport openai\nimport argparse\nimport os\nfrom dotenv import load_dotenv\n\n# Load OpenAI API key from .env file\nload_dotenv()\nopenai.api_key = os.getenv('OPENAI_API_KEY')\n\ndef read_json_file(filename):\n \"\"\"Read a JSON file and return its contents.\"\"\"\n with open(filename, 'r') as file:\n return json.load(file)\n\ndef ask_openai(question, content):\n \"\"\"Ask a question to ChatGPT using the OpenAI API.\"\"\"\n response = openai.Completion.create(\n engine=\"text-davinci-002\",\n prompt=f\"{content}\\n\\n{question}\",\n max_tokens=150\n )\n return response.choices[0].text.strip()\n\ndef extract_details_from_json(json_content):\n \"\"\"Ask ChatGPT about details in the JSON content.\"\"\"\n # Extract ransom demand\n ransom_demand_question = \"How much was the ransom demand, answer only the figure of the amount?\"\n ransom_demand = ask_openai(ransom_demand_question, json_content)\n print(f\"Ransom Demand: {ransom_demand}\")\n\n # Extract negotiated ransom\n negotiated_ransom_question = \"How much was the negotiated ransom, answer only the figure of the amount?\"\n negotiated_ransom = ask_openai(negotiated_ransom_question, json_content)\n print(f\"Negotiated Ransom: {negotiated_ransom}\")\n\n # Check if victim paid the ransom\n paid_ransom_question = \"Did the victim pay the ransom, answer only yes or no?\"\n paid_ransom = ask_openai(paid_ransom_question, json_content)\n print(f\"Paid Ransom: {paid_ransom}\")\n\ndef main():\n parser = argparse.ArgumentParser(description='Ask questions about a provided JSON file using ChatGPT.')\n parser.add_argument('filename', help='Path to the JSON file.')\n\n args = parser.parse_args()\n json_content = read_json_file(args.filename)\n extract_details_from_json(str(json_content))\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"JMousqueton/ransomware.live","sub_path":"analyse_negotiation.py","file_name":"analyse_negotiation.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"71"} +{"seq_id":"72230741669","text":"import pandas as pd\nimport pytest\nfrom fluids.numerics import assert_close, assert_close1d, assert_close2d\n\nfrom chemicals.heat_capacity import CRC_standard_data, TRC_gas_data\nfrom chemicals.reaction import (\n Gibbs_formation,\n Hf_basis_converter,\n Hfg,\n Hfg_all_methods,\n Hfg_API_TDB_data,\n Hfg_ATcT_data,\n Hfg_methods,\n Hfg_S0g_YAWS_data,\n Hfl,\n Hfl_ATcT_data,\n Hfl_methods,\n Hfs,\n Hfs_methods,\n S0g,\n S0g_all_methods,\n S0g_methods,\n S0l,\n S0l_methods,\n S0s,\n S0s_methods,\n balance_stoichiometry,\n entropy_formation,\n standard_formation_reaction,\n stoichiometric_matrix,\n)\n\n\ndef test_API_TDB_data():\n assert Hfg_API_TDB_data['Hfg'].abs().sum() == 101711260\n assert Hfg_API_TDB_data.shape == (571, 2)\n\n\ndef test_ATcT_l():\n assert Hfl_ATcT_data.shape == (34,5)\n tots_calc = [Hfl_ATcT_data[i].abs().sum() for i in ['Hfl_0K', 'Hfl', 'uncertainty']]\n tots = [2179500.0, 6819443, 19290]\n assert_close1d(tots_calc, tots)\n\n\ndef test_Hfg_ATcT_data():\n assert Hfg_ATcT_data.shape == (595, 5)\n tots_calc = [Hfg_ATcT_data[i].abs().sum() for i in ['Hfg_0K', 'Hfg', 'uncertainty']]\n tots = [300788330, 300592764, 829204]\n assert_close1d(tots_calc, tots)\n\ndef test_Hfg_API_TDB_data():\n assert_close(Hfg('7732-18-5', method='API_TDB_G'), -241820.0)\n\n assert Hfg_methods('7732-18-5') == ['ATCT_G', 'CRC', 'API_TDB_G', 'WEBBOOK', 'TRC', 'JANAF', 'YAWS']\n\n assert None is Hfg('98-00-1')\n\n with pytest.raises(Exception):\n Hfg('98-00-0', method='BADMETHOD')\n\n@pytest.mark.slow\ndef test_Hfg_API_TDB_data_fuzz():\n tot = sum([abs(Hfg(i, method='API_TDB_G')) for i in Hfg_API_TDB_data.index])\n assert_close(tot, 101711260.0)\n\n\ndef test_Hfl():\n Hfs = [Hfl('67-56-1'), Hfl('67-56-1', method='ATCT_L')]\n assert_close1d(Hfs, [-238400.0]*2)\n\n assert Hfl_methods('67-56-1') == ['ATCT_L', 'CRC', 'WEBBOOK']\n assert None is Hfl('98-00-1')\n\n tot = sum([abs(Hfl(i)) for i in Hfl_ATcT_data.index])\n assert_close(tot, 6819443.0)\n\n with pytest.raises(Exception):\n Hfl('98-00-0', method='BADMETHOD')\n\n\ndef test_Hfg():\n # default method ATCT_G\n assert_close(Hfg('7732-18-5'), -241822.0)\n\n Hfs = [Hfg('67-56-1', method=i) for i in Hfg_all_methods]\n assert_close1d(Hfs, [-200700.0, -190100.0, -201000.0, -205000.0, None, -200900.0, -216200.0])\n\n assert Hfg_methods('67-56-1') == ['ATCT_G', 'CRC', 'API_TDB_G', 'WEBBOOK', 'TRC', 'YAWS', 'JOBACK']\n assert_close(-211800.0, Hfg('98-00-0'))\n\n with pytest.raises(Exception):\n Hfg('98-00-0', method='BADMETHOD')\n\ndef test_Hfs():\n assert_close(Hfs('101-81-5'), 71500)\n assert_close(Hfs('101-81-5', method='CRC'), 71500)\n assert ['CRC', 'WEBBOOK'] == Hfs_methods('101-81-5')\n\n\n\n@pytest.mark.fuzz\n@pytest.mark.slow\ndef test_Hfg_all_values():\n tot1 = sum([abs(Hfg(i, method='TRC')) for i in TRC_gas_data.index[pd.notnull(TRC_gas_data['Hfg'])]])\n assert_close(tot1, 495689880.0)\n\n tot2 = sum([abs(Hfg(i, method='ATCT_G')) for i in Hfg_ATcT_data.index])\n assert_close(tot2, 300592764.0)\n\n tot3 = sum([abs(Hfg(i, method='YAWS')) for i in Hfg_S0g_YAWS_data.index[pd.notnull(Hfg_S0g_YAWS_data['Hfg'])]])\n assert_close(tot3, 1544220403.0)\n\n tot4 = sum([abs(Hfg(i, method='CRC')) for i in CRC_standard_data.index[pd.notnull(CRC_standard_data['Hfg'])]])\n assert_close(tot4, 392946600.0)\n\ndef test_S0g():\n S0s = [S0g('7732-18-5', method=i) for i in S0g_all_methods]\n assert_close1d(S0s, [188.8, 188.83842, 188.834, 188.84])\n\n assert S0g_methods('67-56-1') == ['CRC', 'YAWS']\n\n assert_close(239.9, S0g('67-56-1'))\n\n with pytest.raises(Exception):\n S0g('98-00-0', method='BADMETHOD')\n\n@pytest.mark.fuzz\n@pytest.mark.slow\ndef test_S0g_all_values():\n tot3 = sum([abs(S0g(i, method='YAWS')) for i in Hfg_S0g_YAWS_data.index[pd.notnull(Hfg_S0g_YAWS_data['S0g'])]])\n assert_close(tot3, 2690113.4130000058)\n\n tot4 = sum([abs(S0g(i, method='CRC')) for i in CRC_standard_data.index[pd.notnull(CRC_standard_data['S0g'])]])\n assert_close(tot4, 141558.30000000008)\n\n\ndef test_S0s():\n assert_close(S0s('7439-93-2'), 29.1) # Lithium\n assert_close(S0s('7439-93-2', method='CRC'), 29.1)\n\n methods = S0s_methods('7439-93-2')\n assert methods == ['CRC', 'WEBBOOK']\n\ndef test_S0l():\n assert_close(S0l('7439-97-6'), 75.9) # Lithium\n assert_close(S0l('7439-97-6', method='CRC'), 75.9)\n\n methods = S0l_methods('7439-97-6')\n assert methods == ['CRC', 'WEBBOOK']\n\ndef test_Gibbs_formation():\n Gf = Gibbs_formation(-285830.0, 69.91, [0.0, 0.0], [130.571, 205.147], [1.0, .5])\n assert_close(Gf, -237161.633825)\n\n Gf = Gibbs_formation(-241818, 188.825, [0.0, 0], [130.571, 205.147], [1.0, .5])\n assert_close(Gf, -228604.141075)\n\n Gf = Gibbs_formation(-648980, 297.713, [0.0, 0.0, 0.0], [5.74, 152.206, 202.789], [1, .5, 1.5])\n assert_close(Gf, -622649.329975)\n\n\ndef test_Hf_basis_converter():\n assert_close(Hf_basis_converter(44018.0, Hf_liq=-285830.0), -241812)\n\n assert_close(Hf_basis_converter(44018, Hf_gas=-241812.0), -285830)\n\n with pytest.raises(ValueError):\n Hf_basis_converter(44018, Hf_liq=None)\n with pytest.raises(ValueError):\n Hf_basis_converter(2000, Hf_gas=None, Hf_liq=None)\n with pytest.raises(ValueError):\n Hf_basis_converter(Hvapm=-1, Hf_liq=1)\n with pytest.raises(ValueError):\n Hf_basis_converter(Hvapm=None, Hf_liq=1)\n\ndef test_entropy_formation():\n Sf = entropy_formation(Hf=-74520.0, Gf=-50490.0)\n assert_close(Sf, -80.59701492537314)\n\n Sf = entropy_formation(Hf=-241818, Gf=-228572)\n assert_close(Sf, -44.427301693778304)\n\n\n\ndef test_balance_stoichiometry():\n test_cases = [\n [[{'Hg': 1, 'O': 1}, {'Hg': 1}, {'O': 2}], [True, False, False], [2.0, 2.0, 1.0]],\n [[{'Cl': 2}, {'C': 3, 'H': 6}, {'C': 3, 'Cl': 1, 'H': 5}, {'Cl': 1, 'H': 1}],\n [True, True, False, False, False],\n [1, 1, 1, 1]],\n [[{'Al': 1}, {'H': 1, 'N': 1, 'O': 3}, {'Al': 1, 'N': 3, 'O': 9}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}],\n [True, True, False, False, False],\n [1.0, 4.0, 1.0, 1.0, 2.0]],\n [[{'Fe': 1}, {'O': 2}, {'Fe':2, 'O': 3}], [True, True, False], [4.0, 3.0, 2.0]],\n [[{'N': 1, 'H': 3}, {'O': 2}, {'N': 1, 'O': 1}, {'H': 2, 'O': 1}], [True, True, False, False], [4.0, 5.0, 4.0, 6.0]],\n [[{'O': 2}, {'H': 2, 'O': 1}, {'C': 1, 'O': 2}, {'C': 6, 'H': 14}], [True, False, False, True], [19.0, 14.0, 12.0, 2.0]],\n\n ]\n\n for atomss, statuses, products in test_cases:\n assert_close1d(balance_stoichiometry(stoichiometric_matrix(atomss, statuses)), products)\n\n\ndef test_stoichiometric_matrix():\n res = stoichiometric_matrix([{'Mg': 1, 'O': 1}, {'Mg': 1}, {'O': 2}], [True, False, False])\n assert_close2d([[1, -1, 0], [1, 0, -2]], res)\n\n\ndef test_standard_formation_reaction():\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 3, 'H': 8})\n assert coeff_test == [3.0, 4.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}]\n\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 3, 'H': 7, 'N': 1, 'O': 2, 'S': 1})\n assert coeff_test == [6.0, 7.0, 1.0, 2.0, 2.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}, {'N': 2}, {'O': 2}, {'S': 1}]\n\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 6, 'H': 7, 'B': 1, 'O': 2})\n assert coeff_test == [12.0, 7.0, 2.0, 2.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}, {'B': 1}, {'O': 2}]\n\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 4, 'H': 12, 'Si': 1})\n assert coeff_test == [4.0, 6.0, 1.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}, {'Si': 1}]\n\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 12, 'H': 10, 'Cl': 1, 'P': 1})\n assert coeff_test == [24.0, 10.0, 1.0, 2.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}, {'Cl': 2}, {'P': 1}]\n\n reactant_coeff, coeff_test, reactant_atomss_test = standard_formation_reaction({'C': 2, 'H': 4, 'Br': 1, 'F': 1})\n assert coeff_test == [4.0, 4.0, 1.0, 1.0]\n assert reactant_atomss_test == [{'C': 1}, {'H': 2}, {'Br': 2}, {'F': 2}]\n","repo_name":"CalebBell/chemicals","sub_path":"tests/test_reaction.py","file_name":"test_reaction.py","file_ext":"py","file_size_in_byte":8249,"program_lang":"python","lang":"en","doc_type":"code","stars":146,"dataset":"github-code","pt":"71"} +{"seq_id":"71223671911","text":"def dijkstra(start, end, graph):\n\t\"\"\"\n\n\tInput: start node, end (list of nodes) and graph\n\tGraph: 2D list with node objects\n\tNode: value, previous, dist_source, neighbors, visited\n\tReturns: Nothing, just modified previous in nodes so that shortest path can be found from end.\n\tNote: Meant for matrices\n\n\t\"\"\"\n\tunvisited = [vertex for row in graph for vertex in row]\n\tcurrent = start\n\twhile True:\n\t\tif current in end or len(unvisited) == 0:\n\t\t\tbreak\n\t\tfor vertex in current.neighbors: #I think this checks neighbors that are visited as well.\n\t\t\tif vertex.visited == True:\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tdistance = vertex.value + current.value + current.dist_source\n\t\t\t\tif distance < vertex.dist_source:\n\t\t\t\t\tvertex.dist_source = distance\n\t\t\t\t\tvertex.previous = current\n\t\tunvisited.remove(current)\n\t\tcurrent.visited = True\n\t\tcurrent = min(unvisited, key = lambda x: x.dist_source)\n\n\ndef path_sum(last_point, graph):\n\t\"\"\"\n\t\n\tInput: last point (starting here), graph\n\tGraph: 2D list with node objects\n\tNode: value, previous, dist_source, neighbors, visited\n\tReturns: The sum of the values in the shortest path (or any path)\n\tNote: Meant for matrices\n\n\t\"\"\"\n\tcurrent = last_point\n\tpath_sum = 0\n\twhile True:\n\t\t#print current.value\n\t\tpath_sum += current.value\n\t\tif not current.previous:\n\t\t\tbreak\n\t\tcurrent = current.previous\n\treturn path_sum\t\n","repo_name":"fugitiveinkc/Project_Euler","sub_path":"projecteuler_algorithms.py","file_name":"projecteuler_algorithms.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"13534589042","text":"from odoo import fields, models, api, _\n\n\nclass PackageOrder(models.Model):\n _name = 'package.order'\n _description = 'Description'\n\n name = fields.Char(default=_('New'), readonly=True, required=True)\n customer_id = fields.Many2one('res.partner', domain=[('is_company', '=', False)])\n address = fields.Text('Address', store=True)\n vendor_id = fields.Many2one('res.partner', domain=[('is_company', '=', True)])\n line_ids = fields.One2many('package.order.line', 'order_id', string='Lines')\n note = fields.Html()\n total = fields.Float('Total', compute=\"_compute_total\")\n customer_commission = fields.Float(\"Customer Commission\", compute=\"_compute_customer_commission\")\n refund_amount = fields.Float('Refund Amount', compute=\"_compute_refund_amount\")\n value_due = fields.Float(\"Value Due\", compute=\"_compute_value_due\")\n\n @api.onchange(\"customer_id\")\n def _change_address(self):\n for order in self:\n if order.customer_id:\n order.address = f'{order.customer_id.street} ' if order.customer_id.street else ''\n order.address += f'or {order.customer_id.street2} ' if order.customer_id.street2 else ''\n order.address += f', {order.customer_id.city}' if order.customer_id.city else ''\n order.address += f' in {order.customer_id.state_id.name}' if order.customer_id.state_id else ''\n else:\n order.address = ''\n\n\n @api.depends('total')\n def _compute_value_due(self):\n for order in self:\n order.value_due = order.total + (order.total * (order.customer_id.commission / 100))\n\n @api.depends('line_ids', 'customer_id')\n def _compute_total(self):\n for order in self:\n sum_line = 0\n for line in order.line_ids:\n sum_line += line.sub_total\n order.total = sum_line\n\n @api.depends('total', 'customer_id')\n def _compute_refund_amount(self):\n for order in self:\n order.refund_amount = order.value_due - (order.total * (order.customer_id.commission / 100))\n\n @api.depends('customer_id', 'total')\n def _compute_customer_commission(self):\n for order in self:\n order.customer_commission = order.total * (order.customer_id.commission / 100)\n\n @api.model\n def create(self, vals):\n # We generate a standard reference\n vals['name'] = self.env['ir.sequence'].next_by_code('package.order') or '/'\n return super(PackageOrder, self).create(vals)\n\n\nclass PackageOrderLine(models.Model):\n _name = 'package.order.line'\n\n order_id = fields.Many2one('package.order')\n product_id = fields.Many2one('product.product')\n quantity = fields.Integer('quantity', default=1)\n price = fields.Float('Price', related='product_id.lst_price')\n sub_total = fields.Float('Sub Total', compute=\"_compute_total\")\n\n @api.depends('product_id', 'quantity')\n def _compute_total(self):\n for line in self:\n line.sub_total = line.quantity * line.price\n","repo_name":"Matrixtarget9/Test","sub_path":"refunds/models/package_order.py","file_name":"package_order.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11813349407","text":"import requests, pprint\nfrom decouple import config #파이썬에서 환경변수 관리하는 패키지\n\n#1. 토큰 및 기본 URL 설정\ntoken = config('TELEGRAM_TOKEN')\nurl = f'https://api.telegram.org/bot{token}/'\n\nresponse = requests.get(url+'GETUPDATES').json()\n\nchat_id = response.get('result')[0].get('message').get('from').get('id')\npprint.pprint(chat_id)\n\n#4. CHAT_ID에 메시지 보내기\n #4-1 요청 보낼 URL 만들기\ntext = \"a;lsdkfj;alksdjf\"\napi_url = f'{url}sendMessage?chat_id={chat_id}&text={text}'\nrequests.get(api_url)\n #4-2 REQUESTS 로 보내기","repo_name":"ssshhh0402/telegram","sub_path":"telegram.py","file_name":"telegram.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7834957765","text":"from math import *\n\n#Exercise 1\ndef areaCalculation():\n print('Enter length and width:')\n length, width = int(input()), int(input())\n temporary = length * width\n SQFT_PER_ACRE = temporary / 43560\n print(\"Result =\", SQFT_PER_ACRE, \"acres\")\n\n#Exercise 2\ndef freeFall(accseleration=9.8):\n distance = int(input('Enter distance: '))\n Vf = sqrt(2 * (accseleration * distance))\n print('The velocity of an object in contact with the ground: ', Vf)\n\n#Exercise 3\ndef howManyDays():\n month = input('Enter a month: ')\n if month.capitalize() == 'February':\n print('There is 28, or 29 days in', month)\n elif month == 'April' or month == 'June' or month == 'September' or month == 'November':\n print('There is 30 days in', month)\n else:\n print('There is 31 days in', month)\n\n\nareaCalculation()\nprint('---------------------')\nfreeFall()\nprint('---------------------')\nhowManyDays()","repo_name":"MilezKilo/HomeworkOnPythonTwo","sub_path":"TestTwoVar.py","file_name":"TestTwoVar.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3630463559","text":"#import smbus\nimport time\nimport math\nfrom struct import pack, unpack\n\n#bus = smbus.SMBus(1)\ntime.sleep(1)\n\ndef sendData(slaveAddress, data):\n bus.write_i2c_block_data(slaveAddress, 0xFF,data)\n\ndef readData(slaveAddress,reg):\n bytes=bus.read_i2c_block_data(slaveAddress,reg,16)\n return bytes\n\ndef comando_checksum(offset, level,send_list):\n #comando para pedir checksum\n send_list[0]=5\n #numero de offset\n send_list[1]=(offset & (0xFF<<16))>>16\n send_list[2]=(offset & (0xFF<<8))>>8\n send_list[3]=(offset & (0xFF))\n #nivel\n send_list[4]=(level & (0xFF))\n #llenar cero espacios vacios\n for i in range(5, 13):\n send_list[i]=0\n #checksum\n send_list[13]=sum(send_list[:13])&0xFF\n sendData(0x03,send_list)\n\ndef llenar_comando(cont,send_list):\n #tipo comando(4 para indicar que paquete de la imagen se pide)\n send_list[0]=4\n #llenar cero espacios vacios\n for i in range(4, 13):\n send_list[i]=0\n #numero paquete a pedir\n send_list[1]=(cont & (0xFF<<16))>>16\n send_list[2]=(cont & (0xFF<<8))>>8\n send_list[3]=(cont & (0xFF))\n send_list[13]=sum(send_list[:13])&0xFF\n #regresa el comando listo para enviar\n return send_list\n\ndef skipped_checksum(offset,level,image):\n checksum_list= [0]*15 \n c_i=15*offset #index contenido \n r_i=0 #index result\n separa=(pow(15,level)-1)*15\n size=len(image)\n while True:\n for i in range(15):\n if c_i < size:\n checksum_list[r_i]+=image[c_i]\n checksum_list[r_i]&=0xFF\n c_i+=1\n else:\n return checksum_list\n if r_i >= 14:\n r_i*=0\n else: \n r_i+=1\n c_i+=separa\n \ndef get_skipped_checksum(offset,level,send_list):\n while True:\n #leer checksum de OBC1\n bytes=readData(0x03, 0xFF)\n #calcular checksum\n check_sum=sum(bytes[:15])\n #mantenemos ultimos 8 bits\n check_sum&=0xFF\n #si es igual al inicio de la cadena\n if check_sum==bytes[0]:\n check_sum+=1\n check_sum&=0xFF\n #check sum es correcto?\n if check_sum==bytes[15]:\n return bytes[:15]\n else:\n comando_checksum(offset,level,send_list)\n\n \ndef pedir_foto():\n #condicion termino whiles\n valido=False\n #checksum comandos recibidos\n check_sum=0\n #contador paquetes recibidos \n total=0\n print(\"inicio\")\n #comando pedir foto a OBC2\n send_list= [3] + [1]*12 + [15] \n sendData(0x03,send_list)\n \n while valido== False:\n #leer tamano foto\n bytes=readData(0x03, 0xFF)\n #comprobar si es comando\n if bytes[0]==6:\n #si OBC 2 encontro la foto que pedi\n #1 es si, 0 es no\n if bytes[1]==1:\n #calcula checksum del comando recibido\n check_sum=sum(bytes[:15])&0xFF\n #si checksum es igual a 6\n if check_sum==6:\n check_sum=7\n #comprobar checksum\n if check_sum==bytes[15]:\n #convertir numero de paquetes\n total=math.ceil(((bytes[2]<<24)+(bytes[3]<<16)+(bytes[4]<<8)+(bytes[5]))/15)\n return total\n else:\n sendData(0x03,send_list)\n \nclass Stepper:\n def __init__(self, img_size):\n self.img_size = img_size\n self.cont = 0 #contador paquetes\n self.lectura = 0 #contador lectura \n self.mal_check = 0 #contador checksum incorrecto\n self.image = [0]*(int(img_size)*15) #lista para almacenar los paquetes\n self.send_list = [0 for i in range(14)]\n print(\"total de paquetes: \", img_size)\n\n def next(self):\n if self.cont >= self.img_size:\n print(\"Finito\")\n return False\n llenar_comando(self.cont,self.send_list)\n sendData(0x03,self.send_list)\n #print(\"Pedi: \", self.send_list, self.cont)\n return True\n\n def read(self):\n #leer info de OBC2\n bytes=readData(0x03, 0xFF)\n self.lectura+=1\n #calcular checksum\n check_sum=sum(bytes[:15])\n #aumento el numero de paquete actual\n check_sum+=self.cont\n #mantenemos ultimos 8 bits\n check_sum&=0xFF\n #si es i\n if check_sum==bytes[0]:\n check_sum+=1\n check_sum&=0xFF\n #check sum es correcto?\n if check_sum==bytes[15]:\n #guardar datos para exportar la imagen\n for i in range(15):\n self.image[(self.cont*15)+i]=bytes[i]\n #print(\"Correcto: \", bytes, self.cont)\n self.cont+=1\n else:\n #print(\"Incorrecto: \", bytes, self.cont)\n self.mal_check+=1\n \n def correct_error(self,offset,level):\n print(\"offset: \",offset,\" level: \",level)\n #pedir checksum a OBC1\n comando_checksum(offset,level,self.send_list)\n #separación entre paquetes\n separa=15**(level+1)\n #separación entre paquetes del mismo tipo\n separa2=15**(level+2)\n #calcular checksum paquetes del mismo tipo\n own=skipped_checksum(offset,level,self.image[:self.img_size])\n #leer checksum de OBC1\n other=get_skipped_checksum(offset,level,self.send_list)\n for i in range(15):\n if own[i]!=other[i]:\n if (offset*15)+(i*separa)+separa2 >= len(image):\n #pedir paquete erroneo (offset+i*15**level)\n llenar_comando(offset+i*15**level,self.send_list)\n sendData(0x03,self.send_list)\n while True:\n #leer nuevo paquete\n bytes=readData(0x03, 0xFF)\n #calcular checksum\n check_sum=sum(bytes[:15])\n #mantenemos ultimos 8 bits\n check_sum&=0xFF\n #si es igual al inicio\n if check_sum==bytes[0]:\n check_sum+=1\n check_sum&=0xFF\n #check sum es correcto?\n if check_sum==bytes[15]:\n #guardar datos para exportar la imagen\n for j in range(15):\n self.image[((offset*15)+(i*separa))+j]=bytes[j]\n return\n else: \n sendData(0x03,self.send_list) \n else:\n self.correct_error(offset+i*15**level,level+1)\n return \n\ndef main():\n inicioT=time.time()\n print(\"Recibiendo paquetes...\")\n total = pedir_foto()\n stpr = Stepper(total)\n #stpr.read()\n total_time = 0\n event_count = 0\n min_time = 30\n max_time = 0\n while stpr.next():\n time.sleep(0.001)\n start = time.time()\n stpr.read()\n end = time.time()\n elapsed = end-start\n total_time += elapsed\n event_count += 1\n if elapsed < min_time:\n min_time = elapsed\n if elapsed > max_time:\n max_time = elapsed\n start = time.time()\n stpr.correct_error(0,0)\n end = time.time()\n print(\"tiempo lista checksum imagen:\",end-start)\n print(\"Event stats:\")\n print(\"Event count: {}\".format(event_count))\n print(\"Min time: {} us\".format(1000000*min_time))\n print(\"Max time: {} us\".format(1000000*max_time))\n print(\"Avg. time: {} us\".format(1000000*total_time/event_count))\n\n finT=time.time()\n print(\"fin: \",finT-inicioT)\n print(\"lectura: \",stpr.lectura)\n print(\"mal checksum: \",stpr.mal_check)\n f=open(\"image10.jpg\",\"wb\")\n Aarray=bytearray(stpr.image)\n f.write(Aarray)\n f.close()\n print(\"FIN\")\n \nmain()\n#total = pedir_foto()\n#s = Stepper(total)\n#image =list(open(\"test2.jpg\",\"rb\").read())\n#l = [i&0xFF for i in range(1001)]\n#print(image[:100])\n#print(\"-----------------------\")\n#print(skipped_checksum(0,0,image))\n","repo_name":"OctavioSaul/emulador_OBC1","sub_path":"V2_masterI2C.py","file_name":"V2_masterI2C.py","file_ext":"py","file_size_in_byte":7567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"7760670822","text":"#! /Users/jasonlu/.virtualenvs/pyven3_6/bin/python\n\ndef read_salary():\n\n while True:\n str_file_name = input(\"请输入雇员列表文件名(q或Q退出):\")\n \n if str_file_name is None or str_file_name == '':\n continue \n \n if str_file_name in ['q', 'Q']:\n break \n \n list_employ = []\n try:\n with open(str_file_name, 'r', encoding='utf-8') as f:\n for one_employ in f:\n employ = one_employ.strip()\n if employ != '':\n list_employ.append(employ)\n\n except FileNotFoundError as e:\n print('没有对应的文件...请重新输入文件名')\n\n if len(list_employ) <= 0:\n print('暂时没有员工信息!')\n\n print(len(list_employ))\n print('\\n')\n print('------------------员工支付工资信息表------------------')\n for employ in list_employ:\n print(employ)\n print('-----------------------------------------------------')\n\n\nread_salary()\n","repo_name":"jinzekid/codehub","sub_path":"python/练习_数据结构/c1_9_6.py","file_name":"c1_9_6.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5600329042","text":"from flask import Flask, request, Response, render_template, redirect, flash, url_for\nimport requests\nimport itertools\nfrom flask_wtf.csrf import CSRFProtect\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nimport wtforms\nfrom wtforms.validators import Regexp\nimport re\n\nclass WordForm(FlaskForm):\n avail_letters = StringField(\"Letters\", validators= [\n Regexp(r'^$|^[a-z]+$', message=\"Must contain only lowercase letters a-z\")\n ])\n \n pattern = StringField(\"Pattern\", validators= [\n Regexp(r'^$|^[a-z|.]+$', message=\"Must contain only lowercase letters a-z or .\")\n ])\n\n length = StringField(\"Length\", validators= [\n Regexp(r'^$|^(3|4|5|6|7|8|9|10)$', message=\"Must contain only one number in range 3-10\")\n ])\n\n submit = SubmitField(\"Search\")\n \n\ncsrf = CSRFProtect()\napp = Flask(__name__)\napp.config[\"SECRET_KEY\"] = '84247a35-6917-4697-b294-d6cca6cd9052' \ncsrf.init_app(app)\n\n@app.route('/')\ndef default():\n return redirect(url_for('index'))\n\n@app.route('/index')\ndef index():\n form = WordForm()\n return render_template(\"index.html\", form=form)\n\n@app.route('/words', methods=['POST','GET'])\ndef letters_2_words():\n form = WordForm()\n if form.validate_on_submit():\n letters = form.avail_letters.data\n pattern = form.pattern.data\n length = form.length.data\n\n if length != \"\" and pattern != \"\" and len(pattern) != int(length):\n return render_template(\"index.html\", form=form, error=\"Pattern Length and Length must be equal.\")\n elif letters == \"\" and pattern == \"\":\n return render_template(\"index.html\", form=form, error=\"Letters or Pattern must be provided\")\n elif length != \"\" and letters != \"\" and int(length) > len(letters):\n return render_template(\"index.html\", form=form, error=\"Length cannot be greater than number of letters.\")\n else:\n return render_template(\"index.html\", form=form)\n\n good_words = set()\n f = open('sowpods.txt')\n strings = []\n\n if(pattern != \"\"):\n new_pattern = \"^\" + pattern + \"$\"\n for line in f.readlines():\n word = line[:-1]\n if re.search(new_pattern, word):\n strings.append(word)\n else:\n strings = f.readlines()\n \n if length == \"\":\n length = 0\n else:\n length = int(length)\n length += 1\n\n if letters == \"\" and pattern != \"\" and length != \"\" and length != 0:\n length -= 1\n\n if pattern != \"\":\n length = len(pattern)\n\n for x in strings:\n word_length = len(x)\n if(length == 0):\n good_words.add(x.strip().lower())\n elif(length != 0 and length == word_length):\n good_words.add(x.strip().lower())\n f.close()\n word_set = set()\n if(letters != \"\"):\n for l in range(3,len(letters)+1):\n for word in itertools.permutations(letters,l):\n w = \"\".join(word)\n if w in good_words:\n word_set.add(w)\n else:\n word_set = list(good_words)\n \n word_set = sorted(word_set, reverse=False)\n word_set = sorted(word_set, reverse=False, key=len)\n\n message = \"\"\n if len(word_set) == 0:\n message = \"No matching words found.\"\n\n return render_template('wordlist.html',\n wordlist=word_set,\n name=\"CS4131\", message = message)\n\n@app.route('/proxy/')\ndef proxy(word):\n result = requests.get(f'https://www.dictionaryapi.com/api/v3/references/collegiate/json/' + word + '?key=' + app.config[\"SECRET_KEY\"])\n resp = Response(result.text)\n resp.headers['Content-Type'] = 'application/json'\n return resp","repo_name":"pankeelshah/WordFinder","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"41497025872","text":"class Node:\n def __init__(self, s=\"\", val=0, nxt=None, prev=None):\n self.str = s\n self.val = val\n self.next = nxt\n self.prev = prev\n \nclass AllOne:\n def __init__(self):\n self.head = Node(\"\",float(\"inf\"),)\n self.tail = Node(\"\",float(\"-inf\"))\n self.head.next = self.tail\n self.tail.prev = self.head\n self.dic = {}\n \n def inc(self, key: str) -> None:\n if key not in self.dic:\n newNode = Node(key, 1)\n self.createAtTail(newNode, self.tail)\n self.dic[key] = newNode\n else:\n node = self.dic[key]\n node.val +=1\n while node.val > node.prev.val:\n self.swap(node.prev, node)\n \n def dec(self, key: str) -> None:\n node = self.dic[key]\n node.val -=1\n if node.val == 0:\n self.deleteNode(node)\n del self.dic[key]\n elif node.val < node.next.val:\n self.swap(node, node.next)\n \n def getMaxKey(self) -> str:\n return self.head.next.str\n\n def getMinKey(self) -> str:\n return self.tail.prev.str\n \n def createAtTail(self,newNode, tail):\n tail.prev.next = newNode\n newNode.next = tail\n newNode.prev = tail.prev\n tail.prev = newNode\n def createAtHead(self, newNode, head):\n newNode.next = head.next\n newNode.next.prev = newNode\n newNode.prev = head\n head.next = newNode\n def deleteNode(self, node):\n node.prev.next = node.next\n node.next.prev = node.prev\n \n def swap(self, node1, node2):\n node1.prev.next = node2\n node2.next.prev = node1\n node2.prev = node1.prev\n node1.prev = node2\n node1.next = node2.next\n node2.next = node1\n \n \n\n# Your AllOne object will be instantiated and called as such:\n# obj = AllOne()\n# obj.inc(key)\n# obj.dec(key)\n# param_3 = obj.getMaxKey()\n# param_4 = obj.getMinKey()","repo_name":"Matiyas1994/Leetcode","sub_path":"0432-all-oone-data-structure/0432-all-oone-data-structure.py","file_name":"0432-all-oone-data-structure.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"31279776599","text":"\"\"\"\nTests for CRUD operations on views for the Topic model\nwith proper authentication and validation are written here.\n\"\"\"\nimport random\nimport typing\n\nfrom faker import Faker\n\nfrom django.shortcuts import reverse\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\nfrom rest_framework.response import Response\n\nfrom backend import utils as u\nfrom topic.models import Topic\nfrom author.models import Author\nfrom author.utils import auth_header\nfrom topic.tests.generators import create_topic\nfrom author.tests.generators import create_author\nfrom article.tests.generators import create_article\nfrom article.serializers import ArticleListSerializer\nfrom topic.serializers import (\n TopicListSerializer,\n TopicDetailSerializer\n)\n\nfake = Faker()\nBASE_URL = '/api/topics'\n\n\nclass TopicRetrieveAPIViewTest(APITestCase):\n \"\"\"\n All views dealing with data retrieval regarding the\n Topic model are tested here. This includes topic lists,\n detail view, etc. Test data of 25 topics is chosen.\n An Author model instance is also conducted for aid in\n creations of said 25 topics.\n \"\"\"\n\n @classmethod\n def setUpTestData(cls) -> None:\n\n # Create authors.\n cls.author: Author = create_author()\n\n # Create topics.\n cls.topics: typing.List[Topic] = list(reversed([\n create_topic(cls.author.id) for _ in range(25)\n ]))\n\n # For later testing.\n cls.topic_1: Topic = random.choice(cls.topics)\n cls.topic_2: Topic = random.choice(cls.topics)\n\n kwargs = {'draft': False, 'author_id': cls.author.id}\n\n cls.articles_for_topic_1 = [create_article(topic_id=cls.topic_1.id, **kwargs) for _ in range(5)]\n cls.articles_for_topic_2 = [create_article(topic_id=cls.topic_2.id, **kwargs) for _ in range(5)]\n\n def test_topic_list_paginated(self) -> None:\n \"\"\"\n Makes a request to /api/topics/ and checks for topics being\n properly paginated and in proper format.\n \"\"\"\n page = 1\n for topic_index in range(0, 25, 10):\n response: Response = self.client.get(f'{reverse(\"topic:list\")}?page={page}')\n data = u.get_json(response)\n results = data['results']\n\n current_page_topics = self.topics[topic_index:topic_index+10]\n serialized_current_page_topics = TopicListSerializer(current_page_topics, many=True).data\n\n self.assertEqual(results, serialized_current_page_topics)\n page += 1\n\n def test_topic_detail_view(self) -> None:\n \"\"\"\n Simply makes requests to all reverse urls for topic details\n and compares serialized data against them.\n \"\"\"\n\n for topic in self.topics:\n response: Response = self.client.get(topic.get_absolute_url())\n data = u.get_json(response)\n serialized_data = TopicDetailSerializer(topic).data\n self.assertEqual(data, serialized_data)\n\n def test_topic_sorted_articles_view(self) -> None:\n \"\"\"\n Makes a GET request to /api/topics/detail//articles/ to get\n a list of articles written under the topic queried from .\n \"\"\"\n # Create articles to populate the database.\n\n for topic_id in (1, 2):\n\n # Get data - both serialized and in ORM form.\n topic = getattr(self, f'topic_{topic_id}')\n articles = getattr(self, f'articles_for_topic_{topic_id}')\n articles_serialized_data = list(reversed(ArticleListSerializer(articles, many=True).data))\n\n # Make request.\n response = self.client.get(reverse('topic:articles', kwargs={'slug': topic.slug}))\n data = u.get_json(response)\n\n # Assert equality.\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(data.get('results'), articles_serialized_data)\n\n\nclass TopicCreationAPIViewTest(APITestCase):\n \"\"\"\n Tests the TopicCreateAPIView with various invalid inputs (and valid).\n \"\"\"\n\n @classmethod\n def setUpTestData(cls) -> None:\n \"\"\"\n Make a temporary author to check make authenticated responses with.\n \"\"\"\n cls.author = create_author()\n cls.data = {\n 'name': fake.text(45)[:-1],\n 'description': fake.text(150),\n 'thumbnail_url': 'https://picsum.photos/id/271/1900/1080',\n }\n\n def test_unauthenticated_topic_creation(self):\n \"\"\"\n Makes an unauthenticated request to /api/topics/create/ to \n (hopefully) raise Unauthorized error.\n \"\"\"\n response: Response = self.client.post(reverse('topic:create'))\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(data, {'detail': 'Authentication credentials were not provided.'})\n\n def test_authenticated_topic_creation_with_incomplete_data(self):\n \"\"\"\n Makes a properly authenticated request to /api/topics/create/ but\n with invalid (read: incomplete) data that should result in error.\n \"\"\"\n\n self.client.credentials(HTTP_AUTHORIZATION=auth_header(self.author.get_key()))\n\n for field in self.data.keys():\n\n # Make a copy of the data so that\n # the original isn't changed because\n # every iteration will remove one\n # field - going through all.\n temp_data = self.data.copy()\n del temp_data[field]\n\n response: Response = self.client.post(reverse('topic:create'), data=temp_data)\n response_data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_422_UNPROCESSABLE_ENTITY)\n\n # Special case for thumbnail_url field since the\n # view (and Topic model) either expects a FILE\n # upload or an image url. For testing purposes,\n # we only work with placeholder image urls.\n if field == 'thumbnail_url':\n self.assertEqual(response_data, {\n 'detail': 'Either provide a url for a thumbnail or an image upload.'\n })\n else:\n self.assertEqual(response_data, {\n 'detail': f\"Field '{field}' not provided.\"\n })\n\n def test_authenticated_topic_creation(self):\n \"\"\"\n Makes a valid request to /api/topics/create/ with proper auth creds\n and valid (read: complete) POST data.\n \"\"\"\n\n # Authenticate via header token\n self.client.credentials(HTTP_AUTHORIZATION=auth_header(self.author.get_key()))\n\n response: Response = self.client.post(reverse('topic:create'), data=self.data)\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n # Assert response with serialized last entry in Topic table\n self.assertEqual(data, TopicDetailSerializer(\n Topic.objects.last()\n ).data)\n\n\nclass TopicDeletionAPIViewTest(APITestCase):\n\n @classmethod\n def setUpTestData(cls) -> None:\n\n cls.authors: typing.List[typing.Tuple[int, Author]] = [\n (index, create_author()) for index in range(1, 3)\n ]\n\n # Create 4 topics, 2 by each author.\n cls.author_1_topics: typing.List[Topic] = [\n create_topic(cls.authors[0][1].pk) for _ in range(2)\n ]\n cls.author_2_topics: typing.List[Topic] = [\n create_topic(cls.authors[1][1].pk) for _ in range(2)\n ]\n cls.topics: typing.Set[Topic] = set(cls.author_1_topics + cls.author_2_topics)\n\n def test_unauthenticated_deletion(self):\n \"\"\"\n Make unauthenticated request to /api/topics/delete// to\n assert Unauthorized Error and apt response.\n \"\"\"\n\n response: Response = self.client.delete(reverse('topic:delete', kwargs={\n 'slug': random.choice(self.author_1_topics).slug\n }))\n data = u.get_json(response)\n\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n self.assertEqual(data, {'detail': 'Authentication credentials were not provided.'})\n\n def test_invalid_permission_topic_deletion(self):\n \"\"\"\n Make valid authorized delete requests to /api/topics/delete//\n to raise a Forbidden error with apt response.\n \"\"\"\n\n for index, author in self.authors:\n\n # Authenticate delete request with current author\n self.client.credentials(HTTP_AUTHORIZATION=auth_header(author.get_key()))\n\n topics_not_by_author = self.topics.difference(getattr(self, f'author_{index}_topics'))\n\n for topic in topics_not_by_author:\n response: Response = self.client.delete(reverse('topic:delete', kwargs={\n 'slug': topic.slug\n }))\n data = u.get_json(response)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(data, {'detail': 'Deletion is not authorized.'})\n\n def test_valid_permission_topic_deletion(self):\n \"\"\"\n Last test to run in this APITestCase - makes valid delete requests\n /api/topics/delete// and compares status code and check\n for existence inside of database.\n \"\"\"\n\n for index, author in self.authors:\n\n self.client.credentials(HTTP_AUTHORIZATION=auth_header(author.get_key()))\n topics_by_author = getattr(self, f'author_{index}_topics')\n\n for topic in topics_by_author:\n\n topic_slug = topic.slug\n\n response: Response = self.client.delete(reverse('topic:delete', kwargs={\n 'slug': topic_slug\n }))\n # No need to get data since a 204 response doesn't return anything.\n\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n # Now check for data.\n with self.assertRaises(ObjectDoesNotExist):\n Topic.objects.get(slug__iexact=topic_slug)\n","repo_name":"mentix02/medialist-backend","sub_path":"topic/tests/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"10759512316","text":"#\n# Henry Acevedo\n#\n# Purpose: Log courses that are published and not for data visualization purposes.\n#\n\nimport csv\nfrom canvasapi import Canvas\nfrom configparser import ConfigParser\n\nconfig = ConfigParser()\nconfig.read('config.ini')\nMYURL = config.get('instance', 'test')\nMYTOKEN = config.get('auth', 'token')\n\ncanvas = Canvas(MYURL, MYTOKEN)\n\n# Specify information for term and filename\n\n# Fall 18\n# fn = 'SubaccountsPublish.csv'\n# term_id = 27\n\n# Winter 19\n# fn = '2191SubaccountsPublish.csv'\n# term_id = 28\n\n# Spring 19\nfn = '2193SubaccountsPublish.csv'\nterm_id = 29\n\n\ndef main():\n # Academic courses subaccount\n root = canvas.get_account(10)\n accounts = root.get_subaccounts()\n\n # Create a .csv with filename from above and write heare\n with open(fn, 'w') as csvFile:\n csvWriter = csv.writer(csvFile, lineterminator='\\n')\n csvWriter.writerow(['account', 'Parent', 'Course', 'Status'])\n\n # Cycle through subaccounts in this account\n for account in accounts:\n # Get courses in term with a teacher, and include number of students in course\n courses = account.get_courses(\n enrollment_type=['teacher'],\n enrollment_term_id=term_id,\n include=['total_students'])\n\n # Cycle through courses\n for course in courses:\n # If no students ignore, otherwise log in csv as published or unpublished\n if course.total_students != 0:\n if course.workflow_state == 'unpublished':\n csvWriter.writerow([\n account.name,\n course.account_id,\n course.id, 'Unpublished'])\n else:\n csvWriter.writerow([\n account.name,\n course.account_id,\n course.id, 'Published'])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HenryAcevedo/canvas-scripts","sub_path":"scripts/get-published-courses.py","file_name":"get-published-courses.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"40053200660","text":"import requests\nfrom flight_data import FlightData\nfrom decouple import config\n\nTEQUILA_KIWI_TRAVEL_API_KEY = config(\"TEQUILA_KIWI_TRAVEL_API_KEY\", default=\"\")\nTEQUILA_FLIGHT_SEARCH_ENDPOINT = config(\"TEQUILA_FLIGHT_SEARCH_ENDPOINT\", default=\"\")\nTEQUILA_LOCATION_QUERY = config(\"TEQUILA_LOCATION_QUERY\", default=\"\")\ntequila_headers = {\n \"apikey\": TEQUILA_KIWI_TRAVEL_API_KEY\n}\n\n\nclass FlightSearch:\n # This class is responsible for talking to the Flight Search API.\n def __init__(self):\n self.iata_code = None\n\n def get_iata_code(self, city: str):\n parameters = {\n \"term\": city,\n \"location_types\": \"city\"\n }\n get_iata_code_response = requests.get(TEQUILA_LOCATION_QUERY, headers=tequila_headers,\n params=parameters)\n get_iata_code_response.raise_for_status()\n self.iata_code = get_iata_code_response.json()[\"locations\"][0][\"code\"]\n return self.iata_code\n\n def get_destination_city_and_price(self, destination_city_code: str, source_city_code: str, from_date: str,\n to_date: str, destination_city: str):\n parameters = {\n \"fly_from\": source_city_code,\n \"fly_to\": destination_city_code,\n \"dateFrom\": from_date,\n \"dateTo\": to_date,\n \"nights_in_dst_from\": 7,\n \"nights_in_dst_to\": 28,\n \"flight_type\": \"round\",\n \"one_for_city\": 1,\n \"max_stopovers\": 0,\n \"curr\": \"GBP\"\n }\n response = requests.get(TEQUILA_FLIGHT_SEARCH_ENDPOINT, params=parameters, headers=tequila_headers)\n response.raise_for_status()\n\n try:\n data = response.json()[\"data\"][0]\n except IndexError:\n print(f\"No flights found for {destination_city_code}.\")\n parameters[\"max_stopovers\"] = 1\n response = requests.get(TEQUILA_FLIGHT_SEARCH_ENDPOINT, params=parameters, headers=tequila_headers)\n response.raise_for_status()\n try:\n data = response.json()[\"data\"][0]\n print(data)\n except IndexError:\n print(f\"No flights found for {destination_city_code} with 1 stop over.\")\n return None\n\n flight_data = FlightData(\n price=data[\"price\"],\n origin_city=data[\"route\"][0][\"cityFrom\"],\n origin_airport=data[\"route\"][0][\"flyFrom\"],\n destination_city=destination_city,\n destination_airport=data[\"route\"][0][\"flyTo\"],\n out_date=data[\"route\"][0][\"local_departure\"].split(\"T\")[0],\n return_date=data[\"route\"][1][\"local_departure\"].split(\"T\")[0]\n )\n\n if parameters[\"max_stopovers\"] >= 1:\n flight_data.via_city = data[\"route\"][0][\"cityTo\"]\n flight_data.stop_overs = parameters[\"max_stopovers\"]\n\n print(f\"{flight_data.destination_city}: £{flight_data.price}\")\n return flight_data\n","repo_name":"sign4git/nandalal","sub_path":"Python/Flight Deals/flight_search.py","file_name":"flight_search.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"41918325604","text":"import time\nfrom functools import wraps\nfrom exporter import config\n\n\ndef retry(exceptions, tries=4, delay=3, backoff=2, logger=None):\n def deco_retry(f):\n @wraps(f)\n def f_retry(*args, **kwargs):\n mtries, mdelay = tries, delay\n while mtries > 1:\n try:\n return f(*args, **kwargs)\n except exceptions as e:\n msg = \"{}, Retrying in {} seconds...\".format(e, mdelay)\n if logger:\n logger.warning(msg)\n else:\n print(msg)\n time.sleep(mdelay)\n mtries -= 1\n mdelay *= backoff\n return f(*args, **kwargs)\n\n return f_retry\n\n return deco_retry\n\n\ndef catch_task_error(name, logger):\n def catch(func):\n def wrapper(*args, **kwargs):\n logger.info(\"{} task\".format(name))\n try:\n func(*args, **kwargs)\n except Exception as e:\n if logger:\n logger.error(\"{} task failed\".format(name))\n logger.error(e)\n print(e)\n if config.DEBUG:\n raise e\n return wrapper\n return catch\n","repo_name":"freeletics/ASO-collector","sub_path":"exporter/utils/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"36642918372","text":"\"\"\"\nMisc\n\"\"\"\n\nfrom discord import Embed, User, Color\nfrom discord.ext.commands import Cog, command, Context\n\nfrom models.bot_model import CustomBot\n\nclass Other(Cog):\n \"\"\"\n Bot other commands cog\n \"\"\"\n\n def __init__(self, bot: CustomBot):\n self.bot = bot\n\n @command(description='Xem độ trễ của bot')\n async def ping(self, ctx: Context):\n \"\"\"\n Check bot ping\n \"\"\"\n await ctx.send(f'\\U0001f3d3 Pong! `{round(self.bot.latency * 1000)}ms`')\n\n @command(aliases=['av', 'avt'], description='Xem avatar của 1 user nào đó')\n async def avatar(self, ctx: Context, user: User = None):\n \"\"\"\n Get user avatar\n \"\"\"\n\n if user is None:\n user = ctx.author\n embed = Embed(\n title=f\"{user}'s avatar\",\n colour=Color.random()\n ).set_image(\n url=user.display_avatar.url\n )\n await ctx.send(embed=embed)\n\n # @avatar.error\n # async def avatar_e(self, ctx: Context, error):\n # if isinstance(error, UserNotFound):\n # return await ctx.reply('Không tìm thấy user này')\n # raise error\n\n\nasync def setup(bot: CustomBot):\n \"\"\"\n Run at setup\n \"\"\"\n\n await bot.add_cog(Other(bot))\n","repo_name":"tobycm/ayato","sub_path":"cogs/other.py","file_name":"other.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"75071604070","text":"\"\"\"\nThis code returns a DFA that is equivalent to the Tree constructed by compressing all the traces into one tree.\n\"\"\"\n\nimport read_traces, DFA_utils_tree_only, time, tree_utils\n\n\ndef solve_tree_only(g_pos, G, Sigma, T, timeout, info, be_quiet=False):\n\tassert g_pos in G, f\"Error, g_pos not in G\"\n\n\t# creating the auxiliary tree structure\n\ttree = tree_utils.create_tree(g_pos, G, Sigma, T, prune=False)\n\tnodes = tree_utils.get_reachable_nodes(tree)\n\n\t# creating an equivalent DFA\n\tq_0 = 0\n\tq_pos = 1\n\tq_neg = 2\n\n\t# assigning ids to each node\n\tn_current = 3\n\tfor n in nodes:\n\t\tif n.is_root():\n\t\t\tn.assign_id(q_0)\n\t\telif n.is_positive_node():\n\t\t\tn.assign_id(q_pos)\n\t\telif n.is_negative_node():\n\t\t\tn.assign_id(q_neg)\n\t\telse:\n\t\t\tn.assign_id(n_current)\n\t\t\tn_current += 1\n\n\t# creating the dfa\n\tdfa = {}\n\tfor ni in nodes:\n\t\tif ni.is_terminal():\n\t\t\tcontinue\n\t\tni_id = ni.get_id()\n\t\tfor nj in ni.get_children():\n\t\t\tnj_id = nj.get_id()\n\t\t\tni_sigma = nj.get_psigma()\n\t\t\tdfa[(ni_id,ni_sigma)] = nj_id\n\tDFA_utils_tree_only.clean_dfa(q_0, dfa, T)\n\n\t# Adding the probabilities\n\tpos_prob = DFA_utils_tree_only.add_probabilities(q_0, dfa, T, g_pos)\n\n\treturn q_0, dfa, pos_prob\n","repo_name":"andrewli77/DISC","sub_path":"tree_only.py","file_name":"tree_only.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"71"} +{"seq_id":"72221640229","text":"from __future__ import print_function\nfrom __future__ import division\n\nimport random\nimport math\nfrom collections import deque\nimport numpy as np \nimport matplotlib.pyplot as plt\n\nfrom const import COL_TO_IND\nfrom const import IND_TO_COL\n\nclass card(object): \n\t\n\tdef __init__(self, value, color, k_val, k_col): \n\t\tself.value = value \n\t\tself.color = color \n\n\t\tself.know_val = k_val\n\t\tself.know_col = k_col\n\n\t\tself.name = IND_TO_COL[self.color] + \" \" + str(self.value)\n\t\tself.know_name = self.get_name()\n\n\t# sets either the color or value boolean to true\n\tdef know(self, which): \n\t\tif which == 'value': \n\t\t\tself.know_val = True\n\t\telif which == 'color': \n\t\t\tself.know_col = True\n\t\tself.know_name = self.get_name()\n\n\t# returns what we know about the card based on what's been hinted\n\tdef get_name(self): \n\t\tif self.know_val == True and self.know_col == False: \n\t\t\treturn str(self.value)\n\t\tif self.know_val == True and self.know_col == True: \n\t\t\treturn IND_TO_COL[self.color] + \" \" + str(self.value)\n\t\tif self.know_val == False and self.know_col == True: \n\t\t\treturn IND_TO_COL[self.color]\n\t\telse: \n\t\t\treturn \"\"\n\n\n#===============================================================================\n# Helper Functions\n#===============================================================================\n\n\n","repo_name":"audhuang/hanabi_ai","sub_path":"hanabi/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"6132217454","text":"from fastapi import FastAPI, APIRouter\r\n\r\nfrom services import get_channels, get_packages, post_package, home\r\n\r\napp = FastAPI(\r\n title='DTH CHANNELS',\r\n description='Displays channels list and provides provision to add or modify channels packages',\r\n openapi_url=f'/openapi.json',\r\n redoc_url=f'/redoc'\r\n)\r\n\r\nrouter = APIRouter()\r\n\r\napp.include_router(home.router)\r\napp.include_router(get_channels.router)\r\napp.include_router(post_package.router)\r\napp.include_router(get_packages.router)\r\n\r\nif __name__ == \"__main__\":\r\n import uvicorn\r\n app.debug = True\r\n uvicorn.run(\r\n app=app,\r\n debug=app.debug\r\n )","repo_name":"vaidehi-nalmas/simple_project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"71214412709","text":"#!/usr/bin/env python3\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:\n if not root:\n return []\n nodes_values = []\n\n def traverseHelper(tree_node):\n if not tree_node:\n return\n traverseHelper(tree_node.left)\n nodes_values.append(tree_node.val)\n traverseHelper(tree_node.right)\n traverseHelper(root)\n return nodes_values;\n","repo_name":"codeme254/2023-code_surgery","sub_path":"0x16-binary_tree_inorder_traversal/optimized_solution1.py","file_name":"optimized_solution1.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"33903005235","text":"from odoo import fields, models\nfrom odoo.addons.mail.tools.discuss import get_twilio_credentials\nimport requests\n\n\nclass MailIceServer(models.Model):\n _name = 'mail.ice.server'\n _description = 'ICE server'\n\n server_type = fields.Selection([('stun', 'stun:'), ('turn', 'turn:')], string='Type', required=True, default='stun')\n uri = fields.Char('URI', required=True)\n username = fields.Char()\n credential = fields.Char()\n\n def _get_local_ice_servers(self):\n \"\"\"\n :return: List of up to 5 dict, each of which representing a stun or turn server\n \"\"\"\n # firefox has a hard cap of 5 ice servers\n ice_servers = self.sudo().search([], limit=5)\n formatted_ice_servers = []\n for ice_server in ice_servers:\n formatted_ice_server = {\n 'urls': '%s:%s' % (ice_server.server_type, ice_server.uri),\n }\n if ice_server.username:\n formatted_ice_server['username'] = ice_server.username\n if ice_server.credential:\n formatted_ice_server['credential'] = ice_server.credential\n formatted_ice_servers.append(formatted_ice_server)\n return formatted_ice_servers\n\n def _get_ice_servers(self):\n \"\"\"\n :return: List of dict, each of which representing a stun or turn server,\n formatted as expected by the specifications of RTCConfiguration.iceServers\n \"\"\"\n if self.env['ir.config_parameter'].sudo().get_param('mail.use_twilio_rtc_servers'):\n (account_sid, auth_token) = get_twilio_credentials(self.env)\n if account_sid and auth_token:\n url = f'https://api.twilio.com/2010-04-01/Accounts/{account_sid}/Tokens.json'\n response = requests.post(url, auth=(account_sid, auth_token), timeout=60)\n if response.ok:\n response_content = response.json()\n if response_content:\n return response_content['ice_servers']\n return self._get_local_ice_servers()\n","repo_name":"odoo/odoo","sub_path":"addons/mail/models/mail_ice_server.py","file_name":"mail_ice_server.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","stars":31745,"dataset":"github-code","pt":"71"} +{"seq_id":"15660027794","text":"from typing import TYPE_CHECKING, Callable, Optional\nfrom unittest.mock import MagicMock\n\nimport pytest\nfrom pytest_mock import MockerFixture\nfrom pytest_qgis import QgisInterface, QgsMapCanvas\nfrom qgis.core import (\n QgsAnnotationLayer,\n QgsFeature,\n QgsGeometry,\n QgsLineString,\n QgsPointLocator,\n QgsPointXY,\n QgsProject,\n QgsVectorLayer,\n)\nfrom qgis.gui import QgsMapMouseEvent, QgsMapToolIdentify\nfrom qgis.PyQt.QtCore import QEvent, QPoint, Qt\nfrom qgis.PyQt.QtGui import QKeyEvent\nfrom segment_reshape.geometry import reshape\nfrom segment_reshape.map_tool.segment_reshape_tool import SegmentReshapeTool, ToolMode\n\nif TYPE_CHECKING:\n from typing import Protocol\n\n class MouseEventFactoryType(Protocol):\n def __call__(\n self,\n location: QgsPointXY,\n mouse_event_type: QEvent.Type,\n mouse_button: Optional[Qt.MouseButton] = Qt.NoButton,\n ) -> QgsMapMouseEvent:\n ...\n\n\nMOUSE_LOCATION = QgsPointXY(1.5, 1.5)\n\n\n@pytest.fixture()\ndef mouse_event_factory(\n qgis_canvas: QgsMapCanvas,\n) -> \"MouseEventFactoryType\":\n def mouse_event_for_location(\n location: QgsPointXY,\n mouse_event_type: QEvent.Type,\n mouse_button: Optional[Qt.MouseButton] = None,\n ) -> QgsMapMouseEvent:\n mouse_button = mouse_button or Qt.NoButton\n event = QgsMapMouseEvent(\n qgis_canvas,\n mouse_event_type,\n QPoint(0, 0),\n mouse_button,\n )\n event.mapPoint = lambda: location # type: ignore[method-assign]\n event.mapPointMatch = lambda: QgsPointLocator.Match() # type: ignore[method-assign]\n return event\n\n return mouse_event_for_location\n\n\n@pytest.fixture()\ndef _add_layer(\n qgis_canvas: QgsMapCanvas,\n) -> None:\n layer = QgsAnnotationLayer(\n \"test\",\n QgsAnnotationLayer.LayerOptions(QgsProject.instance().transformContext()),\n )\n QgsProject.instance().addMapLayers([layer])\n qgis_canvas.setLayers([layer])\n qgis_canvas.setCurrentLayer(layer)\n\n\ndef _create_identify_result(\n identified_features: list[tuple[QgsFeature, QgsVectorLayer]]\n) -> list[QgsMapToolIdentify.IdentifyResult]:\n results = []\n\n for feature, layer in identified_features:\n # using the actual QgsMapToolIdentify.IdentifyResult causes\n # fatal exceptions, mock probably is sufficient for testing\n results.append(\n MagicMock(**{\"mLayer\": layer, \"mFeature\": feature}) # noqa: PIE804\n )\n\n return results\n\n\n@pytest.fixture()\ndef map_tool(qgis_canvas: QgsMapCanvas, qgis_new_project: None) -> SegmentReshapeTool:\n tool = SegmentReshapeTool(qgis_canvas)\n qgis_canvas.setMapTool(tool)\n return tool\n\n\ndef test_change_to_pick_location_mode_resets_rubberbands(map_tool: SegmentReshapeTool):\n map_tool._change_to_reshape_mode_for_geom(\n QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1)\")\n )\n\n map_tool._change_to_pick_location_mode()\n\n assert map_tool._tool_mode == ToolMode.PICK_SEGMENT\n assert map_tool.old_segment_rubber_band.asGeometry().isEmpty()\n assert map_tool.start_point_indicator_rubber_band.asGeometry().isEmpty()\n\n\ndef test_pressing_esc_in_reshape_mode_aborts_reshape(map_tool: SegmentReshapeTool):\n map_tool._change_to_reshape_mode_for_geom(\n QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1, 2 2, 3 3, 4 4, 5 5)\"), None\n )\n assert map_tool._tool_mode == ToolMode.RESHAPE\n\n escape_press = QKeyEvent(QEvent.KeyPress, Qt.Key_Escape, Qt.NoModifier)\n map_tool.keyPressEvent(escape_press)\n\n assert map_tool._tool_mode == ToolMode.PICK_SEGMENT\n assert map_tool.old_segment_rubber_band.asGeometry().isEmpty()\n assert map_tool.start_point_indicator_rubber_band.asGeometry().isEmpty()\n\n\ndef test_change_to_change_to_reshape_mode_toggles_pick_mode_off(\n map_tool: SegmentReshapeTool,\n):\n map_tool._tool_mode = ToolMode.PICK_SEGMENT\n\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n assert map_tool._tool_mode == ToolMode.RESHAPE\n\n\ndef test_left_mouse_click_in_pick_mode_does_nothing_if_active_layer_or_feature_not_found(\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n map_tool._change_to_pick_location_mode()\n m_find_common_segment = mocker.patch.object(\n map_tool, \"_find_common_segment\", return_value=(None, None), autospec=True\n )\n\n map_release = mouse_event_factory(\n MOUSE_LOCATION,\n QEvent.MouseButtonRelease,\n Qt.LeftButton,\n )\n map_tool.canvasReleaseEvent(map_release)\n\n m_find_common_segment.assert_called_once()\n assert map_tool._tool_mode == ToolMode.PICK_SEGMENT\n\n\ndef test_left_mouse_click_in_pick_mode_does_nothing_if_common_segment_not_found(\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n map_tool._change_to_pick_location_mode()\n\n m_find_common_segment = mocker.patch.object(\n map_tool,\n \"_find_common_segment\",\n return_value=(None, QgsVectorLayer(\"test\")),\n autospec=True,\n )\n\n map_release = mouse_event_factory(\n MOUSE_LOCATION,\n QEvent.MouseButtonRelease,\n Qt.LeftButton,\n )\n map_tool.canvasReleaseEvent(map_release)\n\n m_find_common_segment.assert_called_once()\n\n assert map_tool._tool_mode == ToolMode.PICK_SEGMENT\n\n\ndef test_left_mouse_click_in_pick_mode_starts_reshape_mode_if_common_segment_is_found(\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n map_tool._change_to_pick_location_mode()\n assert map_tool.old_segment_rubber_band.asGeometry().isEmpty()\n assert map_tool.start_point_indicator_rubber_band.asGeometry().isEmpty()\n\n m_find_common_segment = mocker.patch.object(\n map_tool,\n \"_find_common_segment\",\n return_value=(QgsLineString([(0, 0), (1, 1)]), QgsVectorLayer(\"test\")),\n autospec=True,\n )\n\n map_release = mouse_event_factory(\n MOUSE_LOCATION,\n QEvent.MouseButtonRelease,\n Qt.LeftButton,\n )\n map_tool.canvasReleaseEvent(map_release)\n\n m_find_common_segment.assert_called_once()\n\n assert map_tool._tool_mode == ToolMode.RESHAPE\n\n assert map_tool.old_segment_rubber_band.asGeometry().isGeosEqual(\n QgsGeometry.fromWkt(\"LineString (0 0, 1 1)\")\n )\n assert (\n map_tool.start_point_indicator_rubber_band.asGeometry().asWkt()\n == \"LineString (0 0, 0 0)\"\n )\n\n\n@pytest.mark.usefixtures(\"_add_layer\")\ndef test_left_mouse_click_in_reshape_mode_adds_points_to_maptool(\n mocker: MockerFixture,\n qgis_canvas: QgsMapCanvas,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n m_make_reshape_edits = mocker.patch.object(\n reshape, \"make_reshape_edits\", autospec=True\n )\n\n map_tool = SegmentReshapeTool(qgis_canvas)\n qgis_canvas.setMapTool(map_tool)\n\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n map_release = mouse_event_factory(\n MOUSE_LOCATION,\n QEvent.MouseButtonRelease,\n Qt.LeftButton,\n )\n map_tool.canvasReleaseEvent(map_release)\n\n assert map_tool.captureCurve().curveToLine().asWkt() == \"LineString (1.5 1.5)\"\n\n m_make_reshape_edits.assert_not_called()\n\n\n@pytest.mark.usefixtures(\"_add_layer\")\n@pytest.mark.parametrize(\n (\"points_to_remove\", \"expected_new\"),\n [\n (1, \"LineString (0 0, 1 1)\"),\n (2, \"LineString (0 0)\"),\n (3, \"LineString EMPTY\"),\n (6, \"LineString EMPTY\"),\n ],\n ids=[\n \"undo-few-last\",\n \"undo-so-that-one-left\",\n \"undo-first-point_temp-should-change-to-old_geom-start\",\n \"undo-when-no-points-left_temp-should-change-to-old_geom-start\",\n ],\n)\ndef test_undo_add_vertex_should_update_new(\n # map_tool: SegmentReshapeTool,\n qgis_canvas: QgsMapCanvas,\n mouse_event_factory: \"MouseEventFactoryType\",\n points_to_remove: int,\n expected_new: str,\n):\n map_tool = SegmentReshapeTool(qgis_canvas)\n qgis_canvas.setMapTool(map_tool)\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(1 0, 2 0, 3 0)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n # Add new line\n for new_point in [(0, 0), (1, 1), (2, 2)]:\n map_tool.addVertex(QgsPointXY(*new_point))\n\n assert map_tool.captureCurve().curveToLine().asWkt() == \"LineString (0 0, 1 1, 2 2)\"\n\n # Undo n times\n undo_key_event = QKeyEvent(QEvent.KeyPress, Qt.Key_Backspace, Qt.NoModifier)\n for _ in range(points_to_remove):\n map_tool.keyPressEvent(undo_key_event)\n\n assert map_tool._tool_mode == ToolMode.RESHAPE\n assert map_tool.captureCurve().curveToLine().asWkt() == expected_new\n assert map_tool.old_segment_rubber_band.asGeometry().asWkt() == old_geom.asWkt()\n\n\ndef test_start_point_indicator_rubberband(\n # map_tool: SegmentReshapeTool,\n qgis_canvas: QgsMapCanvas,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n map_tool = SegmentReshapeTool(qgis_canvas)\n qgis_canvas.setMapTool(map_tool)\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(1 0, 2 0, 3 0)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n # Move cursor to move temp rubberband end point\n mouse_move_event = mouse_event_factory(QgsPointXY(2, 3), QEvent.MouseMove)\n map_tool.canvasMoveEvent(mouse_move_event)\n\n assert map_tool.start_point_indicator_rubber_band.isVisible()\n assert (\n map_tool.start_point_indicator_rubber_band.asGeometry().asWkt()\n == \"LineString (1 0, 2 3)\"\n )\n\n map_tool.addVertex(QgsPointXY(1, 1))\n\n assert not map_tool.start_point_indicator_rubber_band.isVisible()\n\n undo_key_event = QKeyEvent(QEvent.KeyPress, Qt.Key_Backspace, Qt.NoModifier)\n map_tool.keyPressEvent(undo_key_event)\n\n mouse_move_event = mouse_event_factory(QgsPointXY(4, 3), QEvent.MouseMove)\n map_tool.canvasMoveEvent(mouse_move_event)\n assert map_tool.start_point_indicator_rubber_band.isVisible()\n assert (\n map_tool.start_point_indicator_rubber_band.asGeometry().asWkt()\n == \"LineString (1 0, 4 3)\"\n )\n\n\ndef test_right_mouse_click_in_reshape_mode_changes_only_to_pick_mode_if_edited_geometry_is_empty(\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n m_change_to_pick_location_mode = mocker.patch.object(\n map_tool, \"_change_to_pick_location_mode\", autospec=True\n )\n\n m_make_reshape_edits = mocker.patch.object(\n reshape, \"make_reshape_edits\", autospec=True\n )\n\n right_click = mouse_event_factory(\n QgsPointXY(1, 1), QEvent.MouseButtonRelease, Qt.RightButton\n )\n map_tool.cadCanvasReleaseEvent(right_click)\n\n m_change_to_pick_location_mode.assert_called_once()\n m_make_reshape_edits.assert_not_called()\n\n\ndef test_right_mouse_click_in_reshape_mode_calls_reshape_if_edited_geometry_is_not_empty(\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n mouse_event_factory: \"MouseEventFactoryType\",\n):\n old_geom = QgsGeometry.fromWkt(\"LINESTRING(0 0, 1 1)\")\n map_tool._change_to_reshape_mode_for_geom(old_geom)\n\n m_change_to_pick_location_mode = mocker.patch.object(\n map_tool, \"_change_to_pick_location_mode\", autospec=True\n )\n m_make_reshape_edits = mocker.patch.object(\n reshape, \"make_reshape_edits\", autospec=True\n )\n\n left_click = mouse_event_factory(\n QgsPointXY(1, 1), QEvent.MouseButtonRelease, Qt.LeftButton\n )\n # Add point to rubberband\n map_tool.canvasReleaseEvent(left_click)\n\n # Test\n right_click = mouse_event_factory(\n QgsPointXY(1, 1), QEvent.MouseButtonRelease, Qt.RightButton\n )\n map_tool.cadCanvasReleaseEvent(right_click)\n\n m_change_to_pick_location_mode.assert_called_once()\n m_make_reshape_edits.assert_called_once()\n\n\n@pytest.mark.usefixtures(\"_use_topological_editing\")\ndef test_find_common_segment_should_return_shared_segment(\n qgis_iface: QgisInterface,\n map_tool: SegmentReshapeTool,\n mocker: MockerFixture,\n preset_features_layer_factory: Callable[\n [str, list[str]], tuple[QgsVectorLayer, list[QgsFeature]]\n ],\n):\n layer, (base_feature, *_) = preset_features_layer_factory(\n \"l1\",\n [\n \"LINESTRING(0 0, 1 1, 2 2, 3 3)\", # base\n \"LINESTRING(1 0, 1 1, 2 2, 2 0)\", # partly common\n \"LINESTRING(0 2, 2 2, 1 1, 0 1)\", # partly common reversed\n \"LINESTRING(0 0, 1 1)\", # edge start\n \"LINESTRING(2 2, 3 3)\", # edge end\n ],\n )\n\n QgsProject.instance().addMapLayer(layer)\n qgis_iface.setActiveLayer(layer)\n\n results = _create_identify_result(\n [\n (feature, layer)\n for feature in layer.getFeatures()\n if feature.id() != base_feature.id()\n ]\n )\n mocker.patch.object(QgsMapToolIdentify, \"identify\", return_value=results)\n\n segment, segment_layer = map_tool._find_common_segment(MOUSE_LOCATION)\n\n assert segment_layer == layer\n\n assert QgsGeometry(segment).isGeosEqual(QgsGeometry.fromWkt(\"LINESTRING(1 1, 2 2)\"))\n","repo_name":"nlsfi/segment-reshape-qgis-plugin","sub_path":"test/map_tool/test_segment_reshape_tool.py","file_name":"test_segment_reshape_tool.py","file_ext":"py","file_size_in_byte":13332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"10312988363","text":"import asyncio\nfrom telegram import Bot\nfrom config import TELEGRAM_TOKEN, CHAT_ID\n\n\ndef send_scopes(data_items):\n bot = Bot(token=TELEGRAM_TOKEN)\n # نمایش داده‌های جدید و جداول متناظر با آنها\n for table_name, data in data_items:\n if data:\n message = f\"New Data for Table '{table_name}': {data}\"\n print(message)\n\n # ارسال پیام به تلگرام\n loop = asyncio.get_event_loop()\n loop.run_until_complete(bot.send_message(chat_id=CHAT_ID, text=message))\n","repo_name":"SShiravy/Sobi_hackerOne_bot","sub_path":"telegram_bot.py","file_name":"telegram_bot.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"fa","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"40148924395","text":"# 1\n# Напишите программу которая сложит все числа в заданном списке\n# выведет результат в консоль\nnums1 = [5, 6, 92, 47, 12, -18, 33, 8];\n\n\n# 2\n# Напишите программу которая добавит в список edited_names словари\n# с двумя парами { \"name\": \"имя с большой буквы\", \"nameLength\": \"длина имени\"}\nnames = ['jack', 'sarah', 'mary', 'joey', 'chris', 'samantha'];\nedited_names = [];\n\n\n\n# 3\n# Напишите программу которая в список edited_nums добавит словари\n# с тремя парами { \"number\": \"само число\", \"square\": \"число в квадрпате\", \"cube\": \"число в кубе\"}\nnums2 = [1, 2, 3, 4, 5, 6, 7, 8, 9];\nedited_nums = [];\n\n\n# 4\n# напишите программу которая выводит в консоль сумму всех\n# четных чисел в списке\n\nnums_list = [1, 12, 34, 71, 14, 12, 33, 70, 82, 81, 9, 19, 90];\n\n\n# 5\n# напишите программу которая проанализирует данный список и выведет в консоль самую длинную строку\n\nsome_strings = ['Star', 'Planet', 'Comet', 'Interstellar', 'Space'];\n\n# 6\n# напишите программу которая возьмёт из данного списка наименования книг которые вышли в этом году\n# и добавит их в новый список\n\nbooks = [\n {\n 'author': 'Jeremy Brook',\n 'title': 'My childhood',\n 'release': 2023\n },\n {\n 'author': 'Samantha Jhones',\n 'title': 'Living with ten cats',\n 'release': 2020\n },\n {\n 'author': 'Bob Summers',\n 'title': 'Exploring far space',\n 'release': 2021\n },\n {\n 'author': 'Bill Brown',\n 'title': 'Insects in our garden',\n 'release': 2023\n },\n {\n 'author': 'Jessica Love',\n 'title': 'Programming for begginers',\n 'release': 2023\n }\n];\n\n\n# 7\n# Напишите функцию которая будет принимать два аргумента (start, end)\n# Для каждого числа в диапозоне от start до end будет выводить число\n# И Четное оно Или нечетное\n","repo_name":"GammaIntelligenceTraining/Python13","sub_path":"102_homeworks/001_list_dictionary/homework.py","file_name":"homework.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"14608630536","text":"\"\"\"\nCommand that should run:\n - When the SYNC activity starts in order to make sure the local items are present in the remote Bricklink's store before\n issuing the delta updates when the orders are received.\n - Periodically (e.g. every hour), to push possible price changes and, in general, ensure correct synchronization.\n\nGet the differences between the local and the remote Bricklink store and upload the differences to the Bricklink store to ensure\nit has *at least* the local inventory items.\n\"\"\"\n\n\nimport asyncio\nfrom backends.bricklink import Bricklink, parse_bricklink_item_type, to_bricklink_item_type\nfrom backends.bricklink_types import StoreInventory\nfrom db import Session\nfrom models import InventoryItem\nfrom math import ceil\nfrom typing import List, Optional\nimport time\nfrom datetime import datetime\n\n\nMAX_ELAPSED_TIME = 40 * 60 # 40 minutes\nBATCH_SIZE = 100\n\n\nbricklink = Bricklink.from_supervisor()\n\n\ndef match_remote_inventory_item(inventory_item: InventoryItem, remote_inventory_item: StoreInventory) -> bool:\n does_match = True\n does_match &= inventory_item.item_id == remote_inventory_item['item']['no']\n does_match &= inventory_item.item_type == parse_bricklink_item_type(remote_inventory_item['item']['type'])\n does_match &= inventory_item.color_id == remote_inventory_item['color_id']\n does_match &= inventory_item.condition == remote_inventory_item['new_or_used']\n does_match &= inventory_item.user_remarks == remote_inventory_item['remarks']\n return does_match\n\n\nasync def search_item_in_remote_inventory_batch(inventory_item: InventoryItem, remote_inventory_batch: List[StoreInventory]) -> Optional[StoreInventory]:\n for remote_inventory_item in remote_inventory_batch:\n if match_remote_inventory_item(inventory_item, remote_inventory_item):\n return remote_inventory_item\n return None\n\n\ndef is_different(inventory_item: InventoryItem, remote_inventory_item: StoreInventory):\n result = False\n result |= inventory_item.unit_price != remote_inventory_item['unit_price']\n result |= inventory_item.quantity != remote_inventory_item['quantity']\n result |= inventory_item.user_description != remote_inventory_item['description']\n return result\n\n\ndef create_inventory_item(inventory_item: InventoryItem):\n # https://www.bricklink.com/v3/api.page?page=create-inventory\n bricklink.create_store_inventories(store_inventory_resources=[{\n 'item': {\n 'no': inventory_item.item_id,\n 'type': to_bricklink_item_type(inventory_item.item_type),\n },\n 'color_id': inventory_item.color_id,\n 'quantity': inventory_item.quantity,\n 'unit_price': 1.0,#inventory_item.unit_price,\n 'new_or_used': inventory_item.condition,\n 'completeness': None,\n 'description': inventory_item.user_description,\n 'remarks': inventory_item.user_remarks,\n 'bulk': None,\n 'is_retain': False,\n 'is_stock_room': False,\n 'my_cost': None,\n 'sale_rate': None,\n 'tier_quantity1': None,\n 'tier_quantity2': None,\n 'tier_quantity3': None,\n 'tier_price1': None,\n 'tier_price2': None,\n 'tier_price3': None,\n }])\n\n\ndef update_inventory_item(inventory_item: InventoryItem, remote_item_id: int):\n # https://www.bricklink.com/v3/api.page?page=update-inventory\n bricklink.update_store_inventory(remote_item_id, store_inventory_resource={\n 'quantity': inventory_item.quantity,\n 'description': inventory_item.user_description,\n 'remarks': inventory_item.user_remarks,\n 'bulk': None,\n 'is_retain': False,\n 'is_stock_room': False,\n 'stock_room_id': None,\n 'my_cost': None,\n 'sale_rate': None,\n 'tier_quantity1': None,\n 'tier_quantity2': None,\n 'tier_quantity3': None,\n 'tier_price1': None,\n 'tier_price2': None,\n 'tier_price3': None,\n })\n\n\nasync def run():\n started_at = time.time()\n\n remote_inventory = bricklink.get_store_inventories()\n\n session = Session()\n inventory_items: List[InventoryItem] = \\\n session.query(InventoryItem) \\\n .order_by(InventoryItem.bl_synced_at.asc()) \\\n .all()\n\n for item in inventory_items:\n if (time.time() - started_at) >= MAX_ELAPSED_TIME:\n print(f\"Max elapsed time reached\")\n return\n\n print(f\"Item {item.item_id} ({item.item_type}) {item.color.name}\", end='')\n\n if not item.is_valid_for_bricklink():\n print(f\" -> NOT SYNCABLE\")\n continue\n\n matching_remote_items: List[Optional[StoreInventory]] = \\\n await asyncio.gather(*[\n search_item_in_remote_inventory_batch(item, remote_inventory[i * BATCH_SIZE:(i + 1) * BATCH_SIZE])\n for i in range(0, ceil(len(remote_inventory) / BATCH_SIZE))\n ])\n \n matching_remote_items = [ item for item in matching_remote_items if item ]\n\n if len(matching_remote_items) == 0:\n # There's no remote item that matches local inventory item, therefore we need to CREATE IT\n\n print(f\" -> CREATE\")\n\n create_inventory_item(item)\n else:\n # If more than a remote item matches (e.g. different description), keep the item whose inventory_id is less\n matching_remote_items.sort(key=lambda x: x['inventory_id'])\n matching_remote_item: StoreInventory = matching_remote_items[0]\n\n if is_different(item, matching_remote_item):\n # If the local item is considered to be different from remote, issue an UPDATE\n\n print(f\" -> UPDATE (\"\n f\"quantity: {item.quantity}/{matching_remote_item['quantity']}, \"\n f\"unit_price: {item.unit_price}/{matching_remote_item['unit_price']}, \"\n f\"user_description: \\\"{item.user_description}\\\"/\\\"{matching_remote_item['description']}\\\"\"\n \")\")\n\n update_inventory_item(item, matching_remote_item['inventory_id'])\n else:\n print('') # Nothing to do!\n \n item.bl_synced_at = datetime.now()\n","repo_name":"loryruta/brick-scraper","sub_path":"src/commands/sync_bricklink_store.py","file_name":"sync_bricklink_store.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"4586941478","text":"#205. Isomorphic Strings\nclass Solution:\n def isIsomorphic(self, s: str, t: str) -> bool:\n s_to_t = {}\n t_to_s = {}\n\n for i in range(len(s)):\n \n if s[i] not in s_to_t:\n s_to_t[s[i]] = []\n \n if t[i] not in t_to_s:\n t_to_s[t[i]] = []\n\n if t[i] not in s_to_t[s[i]]:\n s_to_t[s[i]].append(t[i])\n\n if s[i] not in t_to_s[t[i]]:\n t_to_s[t[i]].append(s[i])\n \n if len(t_to_s[t[i]]) > 1 or len(s_to_t[s[i]]) > 1:\n return False \n \n if t_to_s[s_to_t[s[i]][0]][0] != s[i]:\n return False\n\n \n\n \n return True\n","repo_name":"SpinachXPasta/Leetcode","sub_path":"isIsomorphic.py","file_name":"isIsomorphic.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5762353213","text":"from __future__ import print_function\nimport sys\nimport argparse\nimport os.path\nfrom check_file import check_file\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', '--vcf-file', required=True, help='')\n parser.add_argument('-o', '--output-file', nargs='?', help='')\n parser.add_argument('-s', '--stdout', action='store_true', help='')\n args = parser.parse_args()\n\n check_file(args.vcf_file)\n\n if not args.stdout and not args.output_file:\n args.output_file = args.vcf_file + '.noh'\n\n return(args.vcf_file, args.output_file)\n\n\ndef remove_header(input_file, output_file=None):\n if not output_file:\n of = sys.stdout\n else:\n of = open(output_file, 'w')\n\n with open(input_file, 'r') as f:\n for line in f:\n if not line.startswith('##'):\n print(line, file=of, end='')\n\n\ndef main():\n input_file, output_file = get_args()\n remove_header(input_file, output_file)\n\nif __name__ == '__main__':\n main()\n","repo_name":"andrewquitadamo/matrix_eqtl_pipeline","sub_path":"code/remove_vcf_header.py","file_name":"remove_vcf_header.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"39775377545","text":"from flask import Flask, render_template, redirect, flash\nfrom flask.globals import request\nfrom flask_debugtoolbar import DebugToolbarExtension\n\nfrom Surveys import Question as Q, Survey as S, satisfaction_survey as ss, personality_quiz as pq, surveys\nfrom QuestionForm import QuestionForm as QF\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = \"himitsu-desu\"\n\ndebug = DebugToolbarExtension(app)\n\n\nresponses = []\n\n\n@app.route('/')\ndef start_survey():\n \"\"\"Return start survey page.\"\"\"\n \n title = ss.title\n instructions = ss.instructions\n \n return render_template(\"start.html\", instructions = instructions, title = title)\n\n\n@app.route('/question/')\ndef question(id):\n \"\"\"Return question page.\"\"\"\n form = QF()\n\n current_id = len(responses)\n \n if id == current_id:\n title = ss.title\n question = ss.questions[current_id].question\n choices = ss.questions[current_id].choices\n instructions = ss.instructions\n return render_template(\"question.html\", question = question, title = title, id = current_id, form = form, instructions = instructions, choices = choices)\n elif id > len(ss.questions) and len(responses) >= len(ss.questions):\n return redirect('/thanks')\n else: \n flash(\"Invalid question, try again!\", 'warn')\n return redirect(f'/question/{current_id}')\n\n\n@app.route('/answer', methods=['POST'])\ndef handle_answer():\n \"\"\"Handle answer from form submission and redirect.\"\"\"\n \n answer = request.form['choices']\n responses.append(answer)\n current_id = len(responses)\n \n if len(responses) < len(ss.questions):\n return redirect(f'/question/{current_id}')\n else:\n return redirect('/thanks')\n \n \n@app.route('/thanks')\ndef thanks():\n \"\"\"Return thank you page.\"\"\"\n \n return render_template(\"thanks.html\")","repo_name":"Katsurio/FlaskSurvey","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"1327121465","text":"import re\nimport pandas as pd\nimport nltk\nnltk.download('brown')\nnltk.download('names')\nnltk.download('universal_tagset')\nnltk.download('average_perceptron_tagger')\nnltk.download('punkt')\nnltk.download('wordnet')\nfrom nltk.tokenize import word_tokenize, regexp_tokenize\nfrom nltk.stem import WordNetLemmatizer, SnowballStemmer\nfrom nltk.corpus import stopwords\nfrom textblob import TextBlob\nimport string\nfrom collections import Counter\nfrom normalise import normalise\n\nfrom sklearn.base import TransformerMixin, BaseEstimator\n\nclass TextSlack(BaseEstimator, TransformerMixin):\n def __init__(self, variety='BrE', user_abbrevs={}, lang='english'):\n try:\n self.variety = variety\n self.user_abbrevs = user_abbrevs\n self.lang = lang\n if self.lang in stopwords.fileids() and self.lang in SnowballStemmer.languages:\n self.stop_words = stopwords.words(lang)\n else:\n raise LanguageNotFoundException('{} is currently not supported by textslack.'.format(self.lang), 'Keep checking for support in the future updates.')\n self.lemmatizer = WordNetLemmatizer()\n self.stemmer = SnowballStemmer(lang, ignore_stopwords=True)\n \n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n \n def fit(self, X, y=None):\n return self\n\n def transform(self, X, *_):\n if isinstance(X, pd.Series):\n return X.apply(self._preprocess_text)\n elif isinstance(X, list):\n return [self._preprocess_text(x) for x in X]\n else:\n return self._preprocess_text(X)\n\n def _preprocess_text(self, text):\n if self.lang == 'english':\n normalised_text = self._normalise(text)\n normalised_text = re.sub(' +', ' ', normalised_text)\n words = regexp_tokenize(normalised_text.lower(), r'[A-Za-z]+')\n removed_punct = self._remove_punct(words)\n removed_stopwords = self._remove_stopwords(removed_punct)\n return self._lemmatize(removed_stopwords)\n else:\n words = word_tokenize(text.lower())\n removed_punct = self._remove_punct(words)\n removed_stopwords = self._remove_stopwords(removed_punct)\n return ' '.join([w for w in removed_stopwords])\n\n def _normalise(self, text):\n try:\n return ' '.join(normalise(word_tokenize(text), variety=self.variety, user_abbrevs=self.user_abbrevs, verbose=False))\n except:\n return text\n\n def _remove_punct(self, words):\n return [w for w in words if w not in string.punctuation]\n\n def _remove_stopwords(self, words):\n return [w for w in words if w not in self.stop_words and len(w)>1]\n\n def _lemmatize(self, words):\n return ' '.join([self.lemmatizer.lemmatize(w, pos='v') for w in words])\n \n def _stem(self, words):\n return ' '.join([self.stemmer.stem(w) for w in words])\n \n def extract_nouns(self, text):\n try:\n if self.lang == 'english':\n processed_text = self._preprocess_text(text)\n pos_tags, _ = self._blob_features(processed_text)\n return ' '.join([w for w, p in pos_tags if p == 'NN'])\n else:\n raise LanguageNotFoundException('Sorry for the inconvenience, textslack is still learning {}.'.format(self.lang), 'Keep checking for support in the future updates.')\n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n \n def extract_verbs(self, text):\n try:\n if self.lang == 'english':\n processed_text = self._preprocess_text(text)\n pos_tags, _ = self._blob_features(processed_text)\n return ' '.join([w for w, p in pos_tags if p == 'VB'])\n else:\n raise LanguageNotFoundException('Sorry for the inconvenience, textslack is still learning {}.'.format(self.lang), 'Keep checking for support in the future updates.')\n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n \n def extract_adjectives(self, text):\n try:\n if self.lang == 'english':\n processed_text = self._preprocess_text(text)\n pos_tags, _ = self._blob_features(processed_text)\n return ' '.join([w for w, p in pos_tags if p == 'JJ'])\n else:\n raise LanguageNotFoundException('Sorry for the inconvenience, textslack is still learning {}.'.format(self.lang), 'Keep checking for support in the future updates.')\n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n \n def extract_adverbs(self, text):\n try:\n if self.lang == 'english':\n processed_text = self._preprocess_text(text)\n pos_tags, _ = self._blob_features(processed_text)\n return ' '.join([w for w, p in pos_tags if p == 'RB'])\n else:\n raise LanguageNotFoundException('Sorry for the inconvenience, textslack is still learning {}.'.format(self.lang), 'Keep checking for support in the future updates.')\n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n \n def sentiment(self, text):\n try:\n if self.lang == 'english':\n processed_text = self._preprocess_text(text)\n _, polarity = self._blob_features(processed_text)\n return 'pos' if polarity > 0.0 else 'neg' if polarity < 0.0 else 'neu'\n else:\n raise LanguageNotFoundException('Sorry for the inconvenience, textslack is still learning {}.'.format(self.lang), 'Keep checking for support in the future updates.')\n except LanguageNotFoundException as e:\n print(str(e))\n print('Details: {}'.format(e.details))\n\n def _blob_features(self, text):\n blob = TextBlob(text)\n return blob.tags, blob.polarity\n \n def word_occurances(self, word, text):\n word_count_dic = dict(Counter([w for w in word_tokenize(text)]))\n return [c for w, c in word_count_dic.items() if w==word][0]\n \nclass LanguageNotFoundException(Exception):\n def __init__(self, message, details=None):\n self.message = message\n self.details = details\n def __str__(self):\n return str(self.message)","repo_name":"AnkitRajSri/textslack","sub_path":"textslack/textslack.py","file_name":"textslack.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"15362250181","text":"\"\"\"Add ticket_issued payment flag\n\nRevision ID: 17032733727a\nRevises: 3c6cca2d97e3\nCreate Date: 2019-11-05 16:07:14.444915\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '17032733727a'\ndown_revision = '3c6cca2d97e3'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('purchase', sa.Column('ticket_issued', sa.Boolean(), nullable=False, server_default='false'))\n op.add_column('purchase_version', sa.Column('ticket_issued', sa.Boolean(), autoincrement=False, nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('purchase_version', 'ticket_issued')\n op.drop_column('purchase', 'ticket_issued')\n # ### end Alembic commands ###\n","repo_name":"emfcamp/Website","sub_path":"migrations/versions/17032733727a_add_ticket_issued_purchase_flag.py","file_name":"17032733727a_add_ticket_issued_purchase_flag.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"71"} +{"seq_id":"12108466707","text":"\"\"\"Pig Latin, by Al Sweigart al@inventwithpython.com\nTranslates English messages into Igpay Atinlay.\nView this code at https://nostarch.com/big-book-small-python-projects\nTags: short, word\"\"\"\n\ntry:\n import pyperclip\nexcept ImportError:\n pass\n\nVOWELS = ('a', 'e', 'i', 'o', 'u', 'y')\n\ndef main():\n print('Enter your message:')\n pigLatin = englishToPigLatin(input('> '))\n\n print(pigLatin)\n\ndef englishToPigLatin(message):\n pigLatin = ''\n for word in message.split():\n prefixNonLetters = ''\n while len(word) > 0 and not word[0].isalpha():\n prefixNonLetters += word[0]\n word = word[1:]\n \n if len(word) == 0:\n pigLatin = pigLatin + prefixNonLetters + ' '\n continue\n\n suffixNonLetters = ''\n while not word[-1].isalpha():\n suffixNonLetters = word[-1] + suffixNonLetters\n word = word[:-1]\n \n wasUpper = word.isupper()\n wasTitle = word.istitle()\n\n word = word.lower()\n\n prefixConsonants = ''\n while len(word) > 0 and not word[0] in VOWELS:\n prefixConsonants += word[0]\n word = word[1:]\n \n if prefixConsonants != '':\n word += prefixConsonants + 'ay'\n else:\n word += 'yay'\n \n if wasUpper:\n word = word.upper()\n if wasTitle:\n word = word.title()\n \n pigLatin += prefixNonLetters + word + suffixNonLetters + ' '\n return pigLatin\n\nif __name__ == '__main__':\n main()","repo_name":"therealskv/python-small-projects","sub_path":"piglatin.py","file_name":"piglatin.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"26968876662","text":"import argparse\nimport functools\nimport operator\nimport pathlib\nfrom typing import Dict\n\nimport pandas as pd\nfrom bokeh.plotting import save\nfrom robotoff.taxonomy import Taxonomy\nfrom robotoff.utils import gzip_jsonl_iter\nfrom tensorflow import keras\n\nimport settings\nfrom category_classification.data_utils import generate_data_from_df\nfrom utils.error_analysis import (\n generate_analysis_model,\n get_deepest_categories,\n get_error_category,\n get_interactive_embedding_plot,\n)\nfrom utils.io import (\n load_category_vocabulary,\n load_config,\n load_ingredient_vocabulary,\n load_product_name_vocabulary,\n)\nfrom utils.metrics import evaluation_report\nfrom utils.preprocess import get_nlp\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"model_path\",\n type=pathlib.Path,\n default=pathlib.Path(__file__).parent / \"weights/0/saved_model\",\n )\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n model_dir = args.model_path.parent\n\n config = load_config(model_dir)\n\n category_to_id = load_category_vocabulary(model_dir)\n ingredient_to_id = load_ingredient_vocabulary(model_dir)\n category_names = [\n category\n for category, _ in sorted(category_to_id.items(), key=operator.itemgetter(1))\n ]\n\n nlp = get_nlp(config.lang)\n\n product_name_vocabulary = load_product_name_vocabulary(model_dir)\n model = keras.models.load_model(str(args.model_path))\n\n generate_data_partial = functools.partial(\n generate_data_from_df,\n ingredient_to_id=ingredient_to_id,\n category_to_id=category_to_id,\n product_name_max_length=config.model_config.product_name_max_length,\n product_name_token_to_int=product_name_vocabulary,\n nlp=nlp,\n product_name_preprocessing_config=config.product_name_preprocessing_config,\n nutriment_input=config.model_config.nutriment_input,\n )\n\n val_df = pd.DataFrame(gzip_jsonl_iter(settings.CATEGORY_XX_TEST_PATH))\n\n category_taxonomy: Taxonomy = Taxonomy.from_json(settings.CATEGORY_TAXONOMY_PATH)\n\n X_val, y_val = generate_data_partial(val_df)\n\n y_pred_val = model.predict(X_val)\n\n predicted = [\n [{category_names[i]: conf} for i, conf in sorted(enumerate(y)) if conf >= 0.5]\n for y in y_pred_val\n ]\n\n val_df[\"predicted categories\"] = [\n [p for p in preds if next(iter(p)) in categories]\n for preds, categories in zip(predicted, val_df.categories_tags)\n ]\n\n val_df[\"wrong prediction\"] = [\n [p for p in preds if next(iter(p)) not in categories]\n for preds, categories in zip(predicted, val_df.categories_tags)\n ]\n\n val_df[\"missed prediction\"] = [\n [\n category\n for category in categories\n if category not in [next(iter(d)) for d in preds]\n ]\n for preds, categories in zip(predicted, val_df.categories_tags)\n ]\n\n val_df = val_df[\n (val_df[\"wrong prediction\"].map(len) > 0)\n | (val_df[\"missed prediction\"].map(len) > 0)\n ]\n\n val_df.drop(\n [\n \"nutriments\",\n \"images\",\n \"product_name\",\n \"lang\",\n \"categories_tags\",\n \"ingredient_tags\",\n \"ingredients_text\",\n \"known_ingredient_tags\",\n ],\n axis=1,\n inplace=True,\n )\n\n val_df.rename(columns={\"code\": \"barcode\"}, inplace=True)\n\n pd.set_option(\"display.max_columns\", None)\n pd.set_option(\"display.max_rows\", None)\n pd.set_option(\"display.width\", None)\n pd.set_option(\"display.max_colwidth\", None)\n\n val_df.head(n=100).to_csv(\"misprediction_sample.csv\")\n\n #\n # report_val, clf_report_val = evaluation_report(y_val, y_pred_val,\n # taxonomy=category_taxonomy,\n # category_names=category_names)\n #\n #\n # def low_perf_categories_gen(clf_report: Dict,\n # min_support: int,\n # max_f1_score: float):\n # for category, metrics in clf_report.items():\n # f1_score = metrics['f1-score']\n # support = metrics['support']\n #\n # if support >= min_support:\n # if f1_score < max_f1_score:\n # yield category\n #\n # # train_df = pd.DataFrame(gzip_jsonl_iter(settings.CATEGORY_FR_TRAIN_PATH))\n # # train_df['deepest_categories'] = get_deepest_categories(category_taxonomy, train_df.categories_tags)\n # # X_train, y_train = generate_data_partial(train_df)\n #\n #\n # gen = low_perf_categories_gen(clf_report_val, min_support=10, max_f1_score=0.5)\n # cat = next(gen)\n # val_metrics = clf_report_val[cat]\n # cat_id = category_to_id[cat]\n # # train_samples_idx = y_train[:, cat_id].nonzero()[0]\n # val_samples_idx = y_val[:, cat_id].nonzero()[0]\n # # train_samples = train_df.iloc[train_samples_idx, :]\n # val_samples = val_df.iloc[val_samples_idx, :]\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"openfoodfacts/off-category-classification","sub_path":"attic/category_classification/sample_generator.py","file_name":"sample_generator.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"71"} +{"seq_id":"20717680","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pds\nfrom util.prefix import all\n\ndef main():\n\n df = pds.read_csv('word/properties/medicine.csv')\n\n pro = open('data/ontology/pro.ttl', 'w')\n pro.write(all())\n\n pro.write('\\n\\n\\n')\n\n for index, row in df.iterrows():\n pro.write('prom:P%d rdfs:label \"%s\"@cn .\\n' % (index, row['name']))\n\n pro.close()\n \n\nif __name__ == '__main__':\n main()\n","repo_name":"hetaov/spider","sub_path":"ontology/property.py","file_name":"property.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"9242484216","text":"import tweepy, discord, time, requests \nfrom discord import Webhook, RequestsWebhookAdapter\n\n#auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth=tweepy.OAuthHandler(\"\", \"\");\n#auth.set_access_token(access_token, access_token_secret)\nauth.set_access_token(\"\", \"\");\napi = tweepy.API(auth);\n#get the discord webhook URL from creating a webhook in the prefered channel in your discord server\ndiscordWebhookURL = \"\";\n\n#Create a StreamListener.\nclass MyStreamListener(tweepy.StreamListener):\n def __init__(self, api):\n self.api = api;\n self.me = api.me();\n def process_data(self, status):\n print(status.text);\n def on_status(self, tweet):\n photos = []\n #Handle media.\n if 'media' in tweet.entities:\n for image in tweet.entities['media']:\n photos.append(image['media_url']);\n print(\"adding image to photos\");\n #Create webhook on discord server and include URL here.\n webhook = Webhook.from_url(discordWebhookURL, adapter=RequestsWebhookAdapter());\n print(len(photos))\n if len(photos) < 1:\n webhook.send(f\"{tweet.user.name} tweeted : {tweet.text}\");\n if len(photos) == 1:\n webhook.send(f\"{tweet.user.name} tweeted : {tweet.text} {photos[0]}\");\n if len(photos) == 2:\n webhook.send(f\"{tweet.user.name} tweeted : {tweet.text} {photos[0]} {photos[1]}\");\n if len(photos) == 3:\n webhook.send(f\"{tweet.user.name} tweeted : {tweet.text} {photos[0]} {photos[1]} {photos[2]}\");\n if len(photos) == 4:\n webhook.send(f\"{tweet.user.name} tweeted : {tweet.text} {photos[0]} {photos[1]} {photos[2]} {photos[3]}\");\n def on_exception(self, exception):\n time.sleep(60);\n print('Took a minute break.');\n #Re-establish stream params in order to check if the stream is not running\n api = tweepy.API(auth);\n myStreamListener = MyStreamListener(api);\n stream = tweepy.Stream(api.auth, myStreamListener);\n if not stream.running:\n main();\n else:\n print('Failed to continue.');\ndef main():\n #Create a Stream\n api = tweepy.API(auth);\n myStreamListener = MyStreamListener(api);\n stream = tweepy.Stream(api.auth, myStreamListener);\n #Update this list of Twitter Ids (string) to follow. \n #Can find Twitter Id at https://codeofaninja.com/tools/find-twitter-id/\n stream.filter(follow=['']);\n#Continuously look out for Twitter events\nif __name__ == \"__main__\":\n while True:\n main()","repo_name":"TinyArcade/python-to-discordwebhooks","sub_path":"TweetToDiscord.py","file_name":"TweetToDiscord.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"32919062508","text":"import metview as mv\n\n# Note: at least Metview version 5.16.0 is required\n\n# getting data\nuse_mars = False\n\nfilename = \"vert_hovm_ml_tq.grib\"\nsteps = list(range(0, 132, 12))\n\n# getting forecast data from MARS\nif use_mars:\n ret_core = {\n \"date\": 20171016,\n \"time\": 0,\n \"levtype\": \"ml\",\n \"grid\": [1, 1],\n \"area\": [45, -10, 55, 5],\n }\n\n tq = mv.retrieve(\n type=\"fc\",\n param=[\"t\", \"q\"],\n step=steps,\n levelist=list(range(80, 138)),\n **ret_core\n )\n lnsp = mv.retrieve(type=\"fc\", param=\"lnsp\", step=steps, levelist=1, **ret_core)\n zs = mv.retrieve(type=\"an\", param=\"z\", levelist=1, **ret_core)\n g = mv.merge(tq, lnsp, zs)\n# read data from file\nelse:\n if mv.exist(filename):\n g = mv.read(filename)\n else:\n g = mv.gallery.load_dataset(filename)\n\n# extract surface geopotential\nzs = g.select(shortName=\"z\")\n\n# compute geopotential on model levels\nz = mv.Fieldset()\nfor step in steps:\n t = g.select(shortName=\"t\", step=step)\n q = g.select(shortName=\"q\", step=step)\n lnsp = g.select(shortName=\"lnsp\", step=step)\n z.append(mv.mvl_geopotential_on_ml(t, q, lnsp, zs))\n\n# scale geopotential to height above sea level\nz = z / 9.81\n\n# scale temperature from K to C\nt = g.select(shortName=\"t\") - 273.16\n\n# create input fieldset for vertical Hovmoeller\ng_hov = mv.merge(t, z)\n\n# define time axis\ntime_axis = mv.maxis(\n axis_type=\"date\",\n axis_tick_label_height=0.4,\n axis_date_type=\"hours\",\n axis_days_label_height=0.4,\n)\n\n# define vertical axis\nvertical_axis = mv.maxis(\n axis_type=\"position_list\",\n axis_tick_position_list=list(range(0, 4500, 500)),\n axis_tick_label_height=0.4,\n axis_title_text=\"Height ASL (m)\",\n axis_title_height=0.5,\n)\n\n# define vertical Hovmoeller with height above sea level axis for model level\n# data for a given location (at least Metview version 5.16.0 is required)\nhov = mv.mhovmoellerview(\n type=\"vertical_hovm\",\n input_mode=\"nearest_gridpoint\",\n point=[48, 2],\n vertical_level_type=\"user\",\n top_level=4000,\n bottom_level=0,\n vertical_coordinate_param=129,\n vertical_coordinate_extrapolate=\"off\",\n time_axis=time_axis,\n vertical_axis=vertical_axis,\n)\n\n# define contour shading\nt_shade = mv.mcont(\n contour_automatic_setting=\"style_name\",\n contour_style_name=\"sh_all_fM50t58i2\",\n legend=\"on\",\n)\n\n# define legend\nlegend = mv.mlegend(legend_text_font_size=0.3, legend_text_colour=\"charcoal\")\n\n# define the output plot file\nmv.setoutput(mv.pdf_output(output_name=\"vert_hovm_ml_in_height\"))\n\n# generate plot\nmv.plot(hov, g_hov, t_shade, legend)\n","repo_name":"ecmwf/metview-docs","sub_path":"docs/gallery/vert_hovm_ml_in_height.py","file_name":"vert_hovm_ml_in_height.py","file_ext":"py","file_size_in_byte":2641,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"71"} +{"seq_id":"34193952947","text":"import numpy as np\nimport scipy.io\nfrom sklearn.metrics import confusion_matrix\nimport matplotlib.pyplot as plt\nfrom scipy.io import loadmat\nfrom scipy import spatial\nfrom sklearn.preprocessing import normalize\nfrom sklearn.cluster import k_means\n\nfrom ex1a import count_non_zero\nfrom in_out import display_eigenvectors, save_values\n\n\nINPUT_PATH = 'data/face.mat'\nTRAINING_SPLIT_PERCENT = 0.7\nTRAINING_SPLIT = int(TRAINING_SPLIT_PERCENT*10)\nNUMBER_PEOPLE = 52\nM_PCA_reduction = 0 # Negative value\nM_LDA_reduction = 0 # Negative value\n\n# Leave those alone, access only\nM_PCA = 0\nM_LDA = 0\nSB_RANK = 0\nSW_RANK = 0\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n # print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n # print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n\n plt.yticks(tick_marks, classes)\n plt.xticks(tick_marks[0::5], classes[0::5], rotation=0)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n # for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n # plt.text(j, i, format(cm[i, j], fmt),\n # horizontalalignment=\"center\",\n # color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.show()\n\ndef import_processing(data, class_means=False):\n\n faces = loadmat(data)\n # faces dimension is 2576, 520 -> each image is column vector of pixels(46, 56)\n X = np.reshape(faces['X'], (46*56, 52, 10)) # separate arrays for each person\n X = split_data(X)\n means = np.mean(X[0], axis=1, keepdims=True)\n # data = [(x - means[0][..., None]) for i, x in enumerate(X)]\n return X, means\n\n\ndef split_data(x):\n random_indexes = np.arange(0, 10)\n np.random.shuffle(random_indexes)\n\n training_data = np.reshape(x[..., random_indexes[0:TRAINING_SPLIT]], (46*56, -1))\n test_data = np.reshape(x[..., random_indexes[TRAINING_SPLIT:]], (46*56, -1))\n\n data = [training_data, test_data]\n return data\n\ndef compute_S(data, low_res=False):\n\n N = data.shape[1]\n if low_res:\n data = data.transpose()\n S = np.matmul(data, data.transpose()) / N # Normalises by N\n\n return S\n\ndef find_eigenvectors(S, how_many=-1):\n\n if how_many is -1:\n how_many = S.shape[0]\n\n eigvalues, eigvectors = np.linalg.eig(S)\n indices = np.flip(np.argsort(eigvalues), axis=0) # Gives original indices after sorting\n sorted_eigvalues = eigvalues[indices]\n sorted_eigvectors = eigvectors[:, indices]\n\n return sorted_eigvalues[0:how_many], sorted_eigvectors[:, 0:how_many]\n\ndef retrieve_low_eigvecs(low_eigvecs, data): # Returns normalized eigenvectors\n\n vecs = np.matmul(data, low_eigvecs)\n vecs /= np.linalg.norm(vecs, axis=0)[None, :]\n return vecs\n\n\ndef find_projection(eigenvectors, faces): # eigenvectors and faces in vector form\n\n coeffs = np.matmul(faces.transpose(), eigenvectors).transpose()\n # number_of_eigenvectors X Faces\n return coeffs\n\n\ndef reduce_by_PCA(training_data, means):\n global M_PCA\n\n training_data_norm = training_data - means\n low_S = compute_S(training_data_norm, low_res=True)\n eig_val, eig_vec = find_eigenvectors(low_S, how_many=-1)\n eig_vec = retrieve_low_eigvecs(eig_vec, training_data_norm)\n M_PCA = training_data_norm.shape[1]-NUMBER_PEOPLE + M_PCA_reduction # hyperparameter Mpca <= N-c\n eig_vec_reduced = eig_vec[:, :M_PCA]\n return eig_vec_reduced\n\n\ndef compute_class_means(training_data):\n\n class_means = np.mean(training_data.reshape(-1, NUMBER_PEOPLE, TRAINING_SPLIT), axis=2) # Shape is 2576*52 -> D*c\n return class_means\n\n\ndef compute_class_scatters(training_data, class_means):\n\n class_means_expand = np.repeat(class_means, TRAINING_SPLIT, axis=1)\n class_means_expand = class_means_expand.reshape(-1, NUMBER_PEOPLE, TRAINING_SPLIT).transpose(1, 0, 2)\n training_data_resh = training_data.reshape(-1, NUMBER_PEOPLE, TRAINING_SPLIT).transpose(1, 0, 2)\n class_scatters = np.matmul(training_data_resh - class_means_expand, (training_data_resh - class_means_expand).transpose(0, 2, 1))\n # Might have to for loop but I think it works\n return class_scatters\n\ndef compute_Sb(class_means):\n\n global_mean = np.mean(class_means, axis=1, keepdims=True)\n global_mean = np.repeat(global_mean, NUMBER_PEOPLE, axis=1)\n Sb = np.matmul(class_means - global_mean, (class_means - global_mean).transpose())\n return Sb\n\ndef compute_Sw(class_scatters):\n\n Sw = np.sum(class_scatters, axis=0)\n return Sw\n\n\ndef compute_LDA_Fisherfaces(Sw, Sb, Wpca, faces):\n global M_LDA\n\n # Maybe remove mean from faces\n Sw_PCA = np.matmul(np.matmul(Wpca.transpose(), Sw), Wpca)\n Sb_PCA = np.matmul(np.matmul(Wpca.transpose(), Sb), Wpca)\n S = np.matmul(np.linalg.inv(Sw_PCA), Sb_PCA)\n eig_vals, fisherfaces = find_eigenvectors(S, how_many=-1)\n M_LDA = count_non_zero(eig_vals) + M_LDA_reduction # hyperparameter Mlda <= c-1 -> there should be 51 non_zero eiganvalues\n # print(M_LDA) # Mlda = c - 1 = 51\n fisherfaces_reduced = fisherfaces[:, :M_LDA]\n faces_PCA = find_projection(Wpca, faces)\n fisher_ref_coeffs = find_projection(fisherfaces_reduced, faces_PCA)\n return fisher_ref_coeffs, fisherfaces_reduced\n\n\ndef goto_original_domain(fisherfaces, Wpca):\n\n fisher_images = np.matmul(Wpca, fisherfaces)\n return fisher_images\n\n\ndef find_fisher_coeffs(candidate_images, Wpca, fisherfaces):\n\n PCA_images = find_projection(Wpca, candidate_images)\n LDA_coeffs = find_projection(fisherfaces, PCA_images) # 51 vector\n\n return LDA_coeffs\n\n\ndef classify(LDA_coeffs_training, LDA_coeffs_test):\n\n distances = []\n for i in range(LDA_coeffs_test.shape[1]):\n distances.append(np.linalg.norm(LDA_coeffs_training - LDA_coeffs_test[:, i][:, None], axis=0))\n\n return np.floor(np.argmin(np.array(distances), axis=1)/TRAINING_SPLIT).astype(np.uint16)\n\n\ndef create_ground_truth():\n\n true_individual_index = np.arange(0, NUMBER_PEOPLE)\n true_individual_index = np.repeat(true_individual_index[:, None], 10-TRAINING_SPLIT, axis=1).reshape(-1)\n return true_individual_index\n\n\ndef bool_and_accuracy(ground_truth, prediction):\n\n correct = ground_truth == prediction\n accuracy = (correct[correct].shape[0]) / (ground_truth.shape[0])\n return correct, accuracy\n\n\ndef identify_failure(bool_a, number=-1):\n\n indices = np.argwhere(~bool_a)[:, 0] # Gives original indices after sorting\n\n return indices[:number]\n\n\ndef identify_success(bool_a, number=-1):\n\n indices = np.argwhere(bool_a)[:, 0] # Gives original indices after sorting\n\n return indices[:number]\n\n\nif __name__ == '__main__':\n\n M_PCAs = []\n accuracies = []\n DISPLAY = True\n # while M_PCA_reduction > -312:\n\n [training_data, testing_data], means = import_processing(INPUT_PATH)\n Wpca = reduce_by_PCA(training_data, means)\n class_means = compute_class_means(training_data)\n class_scatters = compute_class_scatters(training_data, class_means)\n Sb = compute_Sb(class_means)\n SB_RANK = np.linalg.matrix_rank(Sb) # Rank is c - 1 -> 51\n # print(SB_RANK)\n Sw = compute_Sw(class_scatters)\n SW_RANK = np.linalg.matrix_rank(Sw) # Rank is N - c -> 312(train_imgs) - 52 = 260 (same as PCA reduction)\n # print(SW_RANK)\n reference_LDA_coeffs, fisherfaces = compute_LDA_Fisherfaces(Sw, Sb, Wpca, training_data)\n # CHECKED THIS FAR\n\n # fish_images = goto_original_domain(fisherfaces, Wpca)\n # display_eigenvectors(fish_images)\n\n # ''' Start classification procedure'''\n candidate_LDA_coeffs = find_fisher_coeffs(testing_data, Wpca, fisherfaces)\n classification = classify(reference_LDA_coeffs, candidate_LDA_coeffs)\n ground_truth = create_ground_truth()\n bool_array, accuracy = bool_and_accuracy(ground_truth, classification)\n\n failures = identify_failure(bool_array)\n success = identify_success(bool_array)\n\n conf_matrix = confusion_matrix(ground_truth, classification)\n if DISPLAY:\n plot_confusion_matrix(conf_matrix, np.arange(0, NUMBER_PEOPLE), normalize=True)\n display_eigenvectors(testing_data[:, failures] + means[0][:, None])\n display_eigenvectors(testing_data[:, success] + means[0][:, None])\n\n\n print(accuracy)\n\n accuracies.append(accuracy)\n M_PCAs.append(M_PCA)\n save_dict = {'accuracy': accuracies, 'training_split': TRAINING_SPLIT, 'M_PCA': M_PCAs, 'M_LDA': M_LDA,\n 'Sb_rank': SB_RANK, 'Sw_rank': SW_RANK}\n save_name = 'split_{}m_lda{}VARY_M_PCA'.format(TRAINING_SPLIT, M_LDA)\n save_values(save_dict, name=save_name)\n\n # M_PCA_reduction -= 15\n","repo_name":"timotheegath/PR1","sub_path":"ex2LDA.py","file_name":"ex2LDA.py","file_ext":"py","file_size_in_byte":9129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"39440343523","text":"l = [3, 3, 2, 4, 4, 5, 2, 1]\n# 0, 1, 2, 3, 4, 5, 6\n\n# insert\n# l.append(0) # if you only want to insert at the end, please use append\n# l.insert(2, 100) # insert is expensive, append is cheap\n# print(l)\n\n# delete\n# delete based on index\n# val = l.pop() # remove the last\n# val = l.pop(3)\n# print(l, val)\n# delete based on value\n# l.remove(2)\n# print(l)\n\nmax_num = max(l)\nmin_num = min(l)\nsum_num = sum(l)\nprint(max_num, min_num, sum_num)","repo_name":"ybao2000/algorithm_saturday","sub_path":"Lesson_01/list_operation_test.py","file_name":"list_operation_test.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"6215424443","text":"import abc\nimport logging\n\nfrom plainbox.i18n import gettext as _\nfrom plainbox.impl.clitools import CommandBase, ToolBase\nfrom plainbox.impl.providers.special import get_stubbox_def\nfrom plainbox.impl.providers.v1 import all_providers\nfrom plainbox.impl.secure.providers.v1 import Provider1\n\n\nlogger = logging.getLogger(\"plainbox.commands\")\n\n\nclass PlainBoxToolBase(ToolBase):\n \"\"\"\n Base class for implementing commands like 'plainbox'.\n\n The tools support a variety of sub-commands, logging and debugging\n support. If argcomplete module is available and used properly in\n the shell then advanced tab-completion is also available.\n\n There are four methods to implement for a basic tool. Those are:\n\n 1. :meth:`get_exec_name()` -- to know how the command will be called\n 2. :meth:`get_exec_version()` -- to know how the version of the tool\n 3. :meth:`add_subcommands()` -- to add some actual commands to execute\n 4. :meth:`get_config_cls()` -- to know which config to use\n\n This class has some complex control flow to support important and\n interesting use cases. There are some concerns to people that subclass this\n in order to implement their own command line tools.\n\n The first concern is that input is parsed with two parsers, the early\n parser and the full parser. The early parser quickly checks for a fraction\n of supported arguments and uses that data to initialize environment before\n construction of a full parser is possible. The full parser sees the\n reminder of the input and does not re-parse things that where already\n handled.\n\n The second concern is that this command natively supports the concept of a\n config object and a provider object. This may not be desired by all users\n but it is the current state as of this writing. This means that by the time\n eary init is done we have a known provider and config objects that can be\n used to instantiate command objects in :meth:`add_subcommands()`. This API\n might change when full multi-provider is available but details are not\n known yet.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initialize all the variables, real stuff happens in main()\n \"\"\"\n super().__init__()\n self._config = None # set in late_init()\n self._provider_list = [] # updated in late_init()\n\n @classmethod\n @abc.abstractmethod\n def get_config_cls(cls):\n \"\"\"\n Get the Config class that is used by this implementation.\n\n This can be overridden by subclasses to use a different config class\n that is suitable for the particular application.\n \"\"\"\n\n def late_init(self, early_ns):\n \"\"\"\n Overridden version of late_init().\n\n This method loads the configuration object and the list of providers\n and stores them as instance attributes.\n \"\"\"\n super().late_init(early_ns)\n # Load plainbox configuration\n self._config = self.get_config_cls().get()\n # XXX: we cannot change _provider_list as the particular list object is\n # already passed as argument to several command classes. It seems safe\n # to append items to it though.\n self._provider_list.extend(self.get_provider_list(early_ns))\n\n def get_provider_list(self, ns):\n \"\"\"\n Get the list of job providers.\n\n This method looks at --providers argument to figure out which\n providers to expose to all of the commands.\n \"\"\"\n # If the default value of 'None' was set for the checkbox (provider)\n # argument then load the actual provider name from the configuration\n # object (default for that is 'auto').\n if ns.providers is None:\n ns.providers = self._config.default_provider\n assert ns.providers in ('all', 'stub')\n # Decide which providers to expose to the rest of plainbox\n if ns.providers == 'all':\n return self._load_really_all_providers()\n elif ns.providers == 'stub':\n return self._load_stub_provider_only()\n\n def _load_really_all_providers(self):\n provider_list = []\n # StubBox is always enabled\n provider_list.append(\n Provider1.from_definition(get_stubbox_def(), secure=False))\n # Load all normal providers\n all_providers.load()\n provider_list.extend(all_providers.get_all_plugin_objects())\n return provider_list\n\n def _load_stub_provider_only(self):\n return [Provider1.from_definition(get_stubbox_def(), secure=False)]\n\n def add_early_parser_arguments(self, parser):\n \"\"\"\n Overridden version of add_early_parser_arguments().\n\n This method adds the --providers argument to the set of early parser\n arguments, so that it is visible in autocomplete and help.\n \"\"\"\n group = parser.add_argument_group(\n title=_(\"provider list and development\"))\n group.add_argument(\n '--providers',\n action='store',\n choices=['all', 'stub'],\n # None is a special value that means 'use whatever is configured'\n default=None,\n help=_(\"which providers to load\"))\n super().add_early_parser_arguments(parser)\n\n\nclass PlainBoxCommand(CommandBase):\n \"\"\"\n Simple interface class for plainbox commands.\n\n Command objects like this are consumed by PlainBoxTool subclasses to\n implement hierarchical command system. The API supports arbitrary many sub\n commands in arbitrary nesting arrangement.\n \"\"\"\n\n gettext_domain = \"plainbox\"\n","repo_name":"Roadmaster/checkbox","sub_path":"plainbox/plainbox/impl/commands/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"8552415650","text":"\"\"\"\nThis file handles the details of the loss function during training.\n\nThis includes: LossComputeBase and the standard NMTLossCompute, and\n sharded loss compute stuff.\n\"\"\"\nfrom __future__ import division\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nimport onmt\nimport onmt.io\n\nTGT_VOCAB_SIZE = 606\nclass LossComputeBase(nn.Module):\n \"\"\"\n Class for managing efficient loss computation. Handles\n sharding next step predictions and accumulating mutiple\n loss computations\n\n\n Users can implement their own loss computation strategy by making\n subclass of this one. Users need to implement the _compute_loss()\n and make_shard_state() methods.\n\n Args:\n generator (:obj:`nn.Module`) :\n module that maps the output of the decoder to a\n distribution over the target vocabulary.\n tgt_vocab (:obj:`Vocab`) :\n torchtext vocab object representing the target output\n normalzation (str): normalize by \"sents\" or \"tokens\"\n \"\"\"\n def __init__(self, generator, tgt_vocab):\n super(LossComputeBase, self).__init__()\n self.generator = generator\n self.tgt_vocab = tgt_vocab\n self.padding_idx = tgt_vocab.stoi[onmt.io.PAD_WORD]\n\n def _make_shard_state(self, batch, output, range_, attns=None):\n \"\"\"\n Make shard state dictionary for shards() to return iterable\n shards for efficient loss computation. Subclass must define\n this method to match its own _compute_loss() interface.\n Args:\n batch: the current batch.\n output: the predict output from the model.\n range_: the range of examples for computing, the whole\n batch or a trunc of it?\n attns: the attns dictionary returned from the model.\n \"\"\"\n return NotImplementedError\n\n def _compute_loss(self, batch, output, target, **kwargs):\n \"\"\"\n Compute the loss. Subclass must define this method.\n\n Args:\n\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n **kwargs(optional): additional info for computing loss.\n \"\"\"\n return NotImplementedError\n\n def monolithic_compute_loss(self, batch, output, attns, stage1=True):\n \"\"\"\n Compute the forward loss for the batch.\n\n Args:\n batch (batch): batch of labeled examples\n output (:obj:`FloatTensor`):\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict of :obj:`FloatTensor`) :\n dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n stage1: is it stage1\n Returns:\n :obj:`onmt.Statistics`: loss statistics\n \"\"\"\n if stage1:\n range_ = (0, batch.tgt1.size(0))\n else:\n range_ = (0, batch.tgt2.size(0))\n shard_state = self._make_shard_state(batch, output, range_, attns)\n _, batch_stats = self._compute_loss(batch, **shard_state)\n\n return batch_stats\n\n def sharded_compute_loss(self, batch, output, attns,\n cur_trunc, trunc_size, shard_size,\n normalization, retain_graph=False):\n \"\"\"Compute the forward loss and backpropagate. Computation is done\n with shards and optionally truncation for memory efficiency.\n\n Also supports truncated BPTT for long sequences by taking a\n range in the decoder output sequence to back propagate in.\n Range is from `(cur_trunc, cur_trunc + trunc_size)`.\n\n Note sharding is an exact efficiency trick to relieve memory\n required for the generation buffers. Truncation is an\n approximate efficiency trick to relieve the memory required\n in the RNN buffers.\n\n Args:\n batch (batch) : batch of labeled examples\n output (:obj:`FloatTensor`) :\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict) : dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n cur_trunc (int) : starting position of truncation window\n trunc_size (int) : length of truncation window\n shard_size (int) : maximum number of examples in a shard\n normalization (int) : Loss is divided by this number\n\n Returns:\n :obj:`onmt.Statistics`: validation loss statistics\n\n \"\"\"\n batch_stats = onmt.Statistics()\n range_ = (cur_trunc, cur_trunc + trunc_size)\n shard_state = self._make_shard_state(batch, output, range_, attns)\n\n for shard in shards(shard_state, shard_size, retain_graph=retain_graph):\n loss, stats = self._compute_loss(batch, **shard)\n\n loss.div(normalization).backward()\n batch_stats.update(stats)\n\n return batch_stats\n\n def _stats(self, loss, scores, target):\n \"\"\"\n Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets\n\n Returns:\n :obj:`Statistics` : statistics for this batch.\n \"\"\"\n pred = scores.max(1)[1]\n non_padding = target.ne(self.padding_idx)\n num_correct = pred.eq(target) \\\n .masked_select(non_padding) \\\n .sum()\n return onmt.Statistics(loss[0], non_padding.sum(), num_correct)\n\n def _bottle(self, v):\n return v.view(-1, v.size(2))\n\n def _unbottle(self, v, batch_size):\n return v.view(-1, batch_size, v.size(1))\n\n\nclass NMTLossCompute(LossComputeBase):\n \"\"\"\n Standard NMT Loss Computation.\n \"\"\"\n def __init__(self, generator, tgt_vocab, normalization=\"sents\",\n label_smoothing=0.0, decoder_type='rnn'):\n super(NMTLossCompute, self).__init__(generator, tgt_vocab)\n assert (label_smoothing >= 0.0 and label_smoothing <= 1.0)\n self.decoder_type = decoder_type\n if label_smoothing > 0:\n # When label smoothing is turned on,\n # KL-divergence between q_{smoothed ground truth prob.}(w)\n # and p_{prob. computed by model}(w) is minimized.\n # If label smoothing value is set to zero, the loss\n # is equivalent to NLLLoss or CrossEntropyLoss.\n # All non-true labels are uniformly set to low-confidence.\n self.criterion = nn.KLDivLoss(size_average=False)\n one_hot = torch.randn(1, len(tgt_vocab))\n one_hot.fill_(label_smoothing / (len(tgt_vocab) - 2))\n one_hot[0][self.padding_idx] = 0\n self.register_buffer('one_hot', one_hot)\n else:\n if self.decoder_type == 'pointer':\n weight = torch.ones(TGT_VOCAB_SIZE)\n else:\n weight = torch.ones(len(tgt_vocab))\n weight[self.padding_idx] = 0\n self.criterion = nn.NLLLoss(weight, size_average=False)\n self.confidence = 1.0 - label_smoothing\n\n def _make_shard_state(self, batch, output, range_, attns=None):\n if self.decoder_type == 'pointer':\n return {\n \"output\": attns['std'],\n \"target\": batch.tgt1_planning[range_[0] + 1: range_[1]]\n }\n else:\n assert False\n return {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1]],\n }\n\n def _compute_loss(self, batch, output, target):\n if self.decoder_type == 'pointer':\n scores = self._bottle(output)\n else:\n scores = self.generator(self._bottle(output))\n\n gtruth = target.view(-1)\n if self.confidence < 1:\n tdata = gtruth.data\n mask = torch.nonzero(tdata.eq(self.padding_idx)).squeeze()\n log_likelihood = torch.gather(scores.data, 1, tdata.unsqueeze(1))\n tmp_ = self.one_hot.repeat(gtruth.size(0), 1)\n tmp_.scatter_(1, tdata.unsqueeze(1), self.confidence)\n if mask.dim() > 0:\n log_likelihood.index_fill_(0, mask, 0)\n tmp_.index_fill_(0, mask, 0)\n gtruth = Variable(tmp_, requires_grad=False)\n\n loss = self.criterion(scores, gtruth)\n if self.confidence < 1:\n # Default: report smoothed ppl.\n # loss_data = -log_likelihood.sum(0)\n loss_data = loss.data.clone()\n else:\n loss_data = loss.data.clone()\n\n stats = self._stats(loss_data, scores.data, target.view(-1).data)\n\n return loss, stats\n\n\ndef filter_shard_state(state, requires_grad=True, volatile=False):\n for k, v in state.items():\n if v is not None:\n if isinstance(v, Variable) and v.requires_grad:\n v = Variable(v.data, requires_grad=requires_grad,\n volatile=volatile)\n yield k, v\n\n\ndef shards(state, shard_size, eval=False, retain_graph=False):\n \"\"\"\n Args:\n state: A dictionary which corresponds to the output of\n *LossCompute._make_shard_state(). The values for\n those keys are Tensor-like or None.\n shard_size: The maximum size of the shards yielded by the model.\n eval: If True, only yield the state, nothing else.\n Otherwise, yield shards.\n\n Yields:\n Each yielded shard is a dict.\n\n Side effect:\n After the last shard, this function does back-propagation.\n \"\"\"\n if eval:\n yield filter_shard_state(state, False, True)\n else:\n # non_none: the subdict of the state dictionary where the values\n # are not None.\n non_none = dict(filter_shard_state(state))\n\n # Now, the iteration:\n # state is a dictionary of sequences of tensor-like but we\n # want a sequence of dictionaries of tensors.\n # First, unzip the dictionary into a sequence of keys and a\n # sequence of tensor-like sequences.\n keys, values = zip(*((k, torch.split(v, shard_size))\n for k, v in non_none.items()))\n\n # Now, yield a dictionary for each shard. The keys are always\n # the same. values is a sequence of length #keys where each\n # element is a sequence of length #shards. We want to iterate\n # over the shards, not over the keys: therefore, the values need\n # to be re-zipped by shard and then each shard can be paired\n # with the keys.\n for shard_tensors in zip(*values):\n yield dict(zip(keys, shard_tensors))\n\n # Assumed backprop'd\n variables = ((state[k], v.grad.data) for k, v in non_none.items()\n if isinstance(v, Variable) and v.grad is not None)\n inputs, grads = zip(*variables)\n torch.autograd.backward(inputs, grads, retain_graph=retain_graph)\n","repo_name":"ratishsp/data2text-plan-py","sub_path":"onmt/Loss.py","file_name":"Loss.py","file_ext":"py","file_size_in_byte":11029,"program_lang":"python","lang":"en","doc_type":"code","stars":160,"dataset":"github-code","pt":"71"} +{"seq_id":"9105326314","text":"#!/usr/bin/env python3\n\n\"\"\"\nRetrieve annotations closest to the query position from a GTF file.\n\"\"\"\n\nimport sys\n\ndef indexGTF(file):\n index = dict()\n with open(file, \"r\") as f:\n buffer = f.readlines()\n f.close()\n\n n = 0\n for line in buffer:\n if len(line.split(\"\\t\")) <= 1:\n n += 1\n else:\n break\n\n # Remove header\n # print(f\"Removing {n} lines from GTF file\")\n for i in range(n + 1):\n buffer.pop(0)\n\n # Remove bad footers from annotation file\n if len(buffer[len(buffer) - 1].split(\"\\t\")) <= 1:\n buffer.pop(len(buffer) - 1)\n\n for line in buffer:\n x = line.split(\"\\t\")\n try:\n name = x[0]\n source = x[1]\n feature = x[2]\n start = x[3]\n stop = x[4]\n except IndexError:\n print(f\"Bad line: {line}\")\n\n index[name] = [feature, start, stop]\n\n return index\n\ndef showNearestStart(query, index):\n print(f\"> Showing nearest items for starting position query: {query}\")\n\n x = list()\n for i in index:\n pos = index[i][1]\n dist = int(query) - int(pos)\n index[i] = index[i].append(dist)\n x.append([i, dist])\n\n y = list()\n for i in x:\n y.append(abs(i[1]))\n print(min(y))\n\nquery = sys.argv[1]\nindex = indexGTF(sys.argv[2])\n\nshowNearestStart(query, index)\n\n# 1. Index GTF file (get start and end positions, with annotation name)\n# 2. Get query position, and get distance from start position\n# 3. Print annotations with lowest distance\n","repo_name":"ycoles448/wheat-rna-pipeline","sub_path":"bin/getGTFPositions.py","file_name":"getGTFPositions.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"27685307044","text":"import numpy as np\nimport random\nfrom states import rotate_state\nfrom mcts import transform_action_key,MCTSNode\n\nimport asyncio\nimport ipywidgets as widgets\nfrom IPython.display import display,clear_output\n\nclass Agent:\n def __init__(self):\n self.last_observation_id = 0\n\n def cache_samples(self):\n pass\n\n def observe(self,observation_id,prev_action,state,start_flag,end_flag):\n pass\n\n def act(self,state):\n action_set = state.action_set\n a_idx = np.random.choice(len(action_set))\n return action_set[a_idx]\n\nclass RandomAgent(Agent):\n def __init__(self,seed):\n self.last_observation_id = 0\n self.seed = seed\n\n def act(self,state):\n np.random.seed(self.seed)\n action_set = state.action_set\n a_idx = np.random.choice(len(action_set))\n return action_set[a_idx]\n\nclass IpyAgent(Agent):\n def act(self,state):\n ## first, print out relevant infos\n print('-' * 50)\n print('Interactive player:')\n headers,curr_plays,remain_plays = [],[],[]\n for player_idx in range(4):\n target_player = (state.curr_player - len(state.round_plays) + player_idx) % 4\n \n header = ''\n if target_player == state.house:\n header += '△ '\n if player_idx == len(state.round_plays):\n header += '--> '\n target_header = '{}Player {} '.format(header,target_player)\n \n if player_idx < len(state.round_plays):\n target_curr_play = state.ruler.get_codes_repr(state.round_plays[player_idx][1])\n else:\n target_curr_play = '-' * 3\n\n target_remain_structs = state.structs_by_player[target_player]\n target_remain_play = []\n for struct in target_remain_structs:\n struct_codes = state.ruler.get_codes_repr(sum([component[-1] for component in struct[1]],()))\n if struct_codes == '':\n struct_codes = '-' * 3\n target_remain_play.append(struct_codes)\n target_remain_play = ' / '.join(target_remain_play)\n\n headers.append(target_header)\n curr_plays.append(target_curr_play)\n remain_plays.append(target_remain_play)\n\n header_max = np.max([len(header) for header in headers])\n curr_play_max = np.max([len(curr_play) for curr_play in curr_plays])\n for header,curr_play,remain_play in zip(headers,curr_plays,remain_plays):\n print('{} | this round: {} | remain cards: {}.'.format(\n header.ljust(header_max),curr_play.ljust(curr_play_max),remain_play))\n print('-' * 25)\n\n if len(state.best_codes) > 0:\n round_best_play = state.ruler.get_codes_repr(state.best_codes)\n else:\n round_best_play = '-' * 3\n print('Game major {}. Round best play {}, round total score {}. Current game score {}. Stack {}.'.format(\n state.ruler.get_code_repr(51),\n round_best_play,\n sum([state.ruler.get_codes_score(codes) for _,codes in state.round_plays],0),\n state.game_score,\n state.ruler.get_codes_repr(state.stack)))\n print('-' * 25)\n\n ## now provide the options\n action_set = state.action_set\n label0,value0 = 'Please select from below:',([],None,False)\n options = [(label0,value0)] + \\\n [(state.ruler.get_codes_repr(action[0]),action) for action in action_set]\n if len(action_set[0][0]) > 2 and not state.is_first:\n options += [('other combinations',None)] ## allow for combination of single cards\n dropbox = widgets.Dropdown(options=options,index=0,value=value0,label=label0)\n display(dropbox)\n future = asyncio.Future()\n def getvalue(change):\n future.set_result(change.new)\n dropbox.close()\n dropbox.observe(getvalue, 'value')\n return future\n \n def act_single(self,state):\n struct = state.structs_by_player[state.curr_player]\n codes = []\n for suit_enc in range(4):\n for component in struct[suit_enc][1]:\n codes.extend(component[-1])\n codes = sorted(codes)\n label0,value0 = 'Please select from below:',-1\n options = [(label0,value0)] + \\\n [(state.ruler.get_code_repr(code),code) for code in codes] + \\\n [('confirmed',-2)]\n dropbox = widgets.SelectMultiple(options=options,index=[0],value=[value0],label=[label0])\n display(dropbox)\n future = asyncio.Future()\n def getvalue(change):\n if 'confirmed' in dropbox.label:\n if len(dropbox.label) != state.round_num+1:\n print('please recheck size of your play!',end='\\r')\n else:\n future.set_result(change.new)\n dropbox.close()\n else:\n print(','.join([item for item in dropbox.label if item != label0]),end='\\r')\n dropbox.observe(getvalue, 'value')\n return future\n\nclass RlAgent(Agent):\n def __init__(self,model):\n self.action_model = model\n \n self.debug_flag = False\n self.infer_flag = False\n self.sample_collect_flag = False\n self.last_observation_id = 0\n self.value_bootstrap_partial_sample = None\n self.value_bootstrap_samples_cache = []\n\n def cache_samples(self):\n for sample in self.value_bootstrap_samples_cache:\n self.action_model.add_experience(sample)\n self.value_bootstrap_samples_cache = []\n\n def observe(self,observation_id,prev_action,state,start_flag,end_flag):\n if observation_id != self.last_observation_id:\n self.last_observation_id = observation_id\n if self.sample_collect_flag:\n state_vec,actions_vec,direc = state.get_vecs()\n if start_flag:\n self.value_bootstrap_partial_sample = None\n\n if end_flag:\n ## end of game \n if self.value_bootstrap_partial_sample is not None and len(self.value_bootstrap_partial_sample) == 3:\n prev_state_vec,prev_action_vec,prev_score = self.value_bootstrap_partial_sample\n self.value_bootstrap_samples_cache.append((prev_state_vec,prev_action_vec,state.eval_score-prev_score,True,direc,state_vec,actions_vec))\n self.value_bootstrap_partial_sample = None\n\n self.cache_samples()\n \n def act(self,state):\n state_vec,actions_vec,direc = state.get_vecs()\n action_set = state.action_set\n Na = len(action_set)\n if Na == 1:\n a_idx = 0\n else:\n ## rotation\n r = np.random.choice(4)\n rotate_direc = (1 if r % 2 == 0 else -1)\n direc_tmp = direc * rotate_direc\n state_vec_tmp = rotate_state(state_vec,r)\n q_values,probs,action_samples = self.action_model.predict(state_vec_tmp,actions_vec,direc_tmp)\n q_values = q_values * rotate_direc\n \n # q_values,probs,action_samples = self.action_model.predict(state_vec,actions_vec,direc)\n if self.debug_flag:\n ruler = state.ruler\n sort_idxs = np.argsort(q_values)[::-1]\n debug_str = ';'.join(\n [' {},{:.2f}'.format(ruler.get_codes_repr(action_set[i][0]),q_values[i]) \n for i in sort_idxs])\n print(debug_str)\n \n # for r in range(4):\n # state_vec_tmp = rotate_state(state_vec,r)\n # direc_tmp = direc * (1 if r % 2 == 0 else -1)\n # qs_tmp,p_tmp,a_tmp = self.action_model.predict(state_vec_tmp,actions_vec,direc_tmp)\n # print(r,qs_tmp,a_tmp)\n \n if self.infer_flag or random.random() < 0.9:\n a_idx = action_samples\n else:\n a_idx = np.random.choice(Na)\n\n if self.sample_collect_flag:\n ## value bootstrap samples: Q(state,action) -> next reward - current reward + (1 - end) * V(next_state,next_action,next_direc)\n ## manage the previous partial sample (if any) and init the next partial sample\n if self.value_bootstrap_partial_sample is not None and len(self.value_bootstrap_partial_sample) == 3:\n prev_state_vec,prev_action_vec,prev_score = self.value_bootstrap_partial_sample\n self.value_bootstrap_samples_cache.append((prev_state_vec,prev_action_vec,state.eval_score-prev_score,False,direc,state_vec,actions_vec))\n self.value_bootstrap_partial_sample = (state_vec,actions_vec[:,a_idx],state.eval_score)\n\n return action_set[a_idx]\n\nclass MCTSAgent(Agent):\n def __init__(self,model,sim_env,N_search=1000,c_puct=400,temp=1,explore_alpha=0.25,batch_size=8):\n self.action_model = model\n self.sim_env = sim_env\n self.N_search = N_search\n self.c_puct = c_puct\n self.temp = temp\n self.explore_alpha = explore_alpha\n self.batch_size = batch_size\n self.root_node = None\n \n self.debug_flag = False\n self.infer_flag = False\n self.sample_collect_flag = False\n self.last_observation_id = 0\n self.samples_cache = []\n self.samples_partial_cache = []\n\n def cache_samples(self):\n for sample in self.samples_cache:\n self.action_model.add_experience(sample,exp_pool_id=0)\n self.samples_cache = []\n\n def observe(self,observation_id,prev_action,state,start_flag,end_flag):\n if observation_id != self.last_observation_id:\n self.last_observation_id = observation_id\n if start_flag:\n self.init_mcts_tree(state)\n else:\n self.update_mcts_tree(prev_action,state,end_flag)\n\n ## check if the game ends\n if self.sample_collect_flag and end_flag:\n ## value samples: Q(state,action) -> final reward - current reward\n ## update the value samples here\n for sample in self.samples_partial_cache:\n if len(sample) == 6:\n if len(sample[4]) == 1:\n self.samples_cache.append((sample[0],sample[1],sample[2],state.eval_score-sample[3],sample[4],[state.eval_score-sample[3]]))\n else:\n self.samples_cache.append((sample[0],sample[1],sample[2],state.eval_score-sample[3],sample[4],sample[5]))\n self.samples_partial_cache = []\n\n self.cache_samples()\n\n def act(self,state):\n return self.mcts_search(state)\n\n def get_predict_values(self,state):\n if (state.actions_vec.shape[1]) > 0:\n q_values,probs,_ = self.action_model.predict(*state.get_vecs())\n return q_values + state.eval_score,probs\n else:\n return None,None\n\n def init_mcts_tree(self,state):\n self.root_node = MCTSNode(level=0,father=None,father_direc=0,prev_action=None,prob_sa=1,c_puct=self.c_puct)\n self.init_root_node(self.root_node,state)\n\n def update_mcts_tree(self,prev_action,state,end_flag):\n if not end_flag:\n if self.root_node is None or prev_action is None:\n # print('restart mcts tree!')\n self.init_mcts_tree(state)\n else:\n prev_codes = tuple(sorted(prev_action[0]))\n if prev_codes in self.root_node.children:\n self.root_node = self.root_node.children[prev_codes]\n if not self.root_node.is_expanded:\n self.init_root_node(self.root_node,state)\n self.root_node.set_root()\n else:\n self.init_mcts_tree(state)\n\n def init_root_node(self,node,state):\n q_values,probs = self.get_predict_values(state)\n node.state = state\n node.predict_value = state.eval_score\n ## add dirichlet noise to the prior\n dirichlet_noise = np.random.dirichlet(np.full((len(probs),),0.3))\n node.child_probs = probs * (1 - self.explore_alpha) + self.explore_alpha * dirichlet_noise\n # node.child_probs = probs\n self.expand_node(node)\n node.set_root()\n\n def evaluate_node(self,node):\n _,end_flag,state = self.sim_env.step(node.father.state.copy(),node.prev_action,if_display=False)\n if end_flag:\n node.is_leaf = True\n node.predict_value = state.eval_score\n self.update_node(node)\n else:\n node.state = state\n node.is_evaluating = True\n\n def expand_node(self,node):\n next_level = node.level + 1\n # na = len(node.state.action_set)\n node.children = {}\n for prev_action,prob_sa in (zip(node.state.action_set,node.child_probs)):\n node.children[transform_action_key(prev_action)] = MCTSNode(level=next_level,father=node,father_direc=node.state.curr_direc,prev_action=prev_action,\n prob_sa=prob_sa,\n # prob_sa=1/na,\n c_puct=self.c_puct)\n\n node.is_expanded = True\n\n def update_node(self,node):\n # print(node.predict_value)\n value = node.predict_value\n node.update_value(value)\n while (node.father is not None and not node.is_root):\n node = node.father\n node.update_value(value)\n\n def predict_node_batch(self,nodes_queue):\n if len(nodes_queue) > 1:\n max_mask_len = 0\n s_batch,mask_len_batch,direc_batch = [],[],[]\n for node in nodes_queue:\n state = node.state\n state_vec,actions_vec,direc = state.get_vecs()\n mask_len = len(state.action_set)\n s_batch.append(state_vec)\n mask_len_batch.append(mask_len)\n direc_batch.append(direc)\n if mask_len > max_mask_len:\n max_mask_len = mask_len\n\n As = np.zeros((len(nodes_queue),max_mask_len,55),dtype=np.float32)\n for idx,node in enumerate(nodes_queue):\n state = node.state\n mask_len = len(state.action_set)\n As[idx:(idx+1),:mask_len] = state.actions_vec\n v_batch,probs_batch = \\\n self.action_model.predict_batch(\n np.concatenate(s_batch,axis=0),As,\n np.array(mask_len_batch,dtype=np.int64),\n np.array(direc_batch,dtype=np.float32))\n else:\n state = nodes_queue[0].state\n state_vec,actions_vec,direc = state.get_vecs()\n v_batch,probs_batch = \\\n self.action_model.predict_batch(\n state_vec,\n actions_vec,\n np.array([len(state.action_set)],dtype=np.int64),\n np.array([direc],dtype=np.float32))\n\n for node,v,probs in zip(nodes_queue,v_batch,probs_batch):\n na = len(node.state.action_set)\n node.predict_value = node.state.eval_score + v\n node.child_probs = np.maximum(probs[:na],1e-2)\n node.is_evaluating = False\n node.is_evaluated = True\n self.update_node(node)\n\n def mcts_search(self,state):\n action_set = state.action_set\n Na = len(action_set)\n if Na == 1: ## TODO: consider remove this part for more consistent training samples\n action = action_set[0]\n if self.sample_collect_flag:\n ## value samples: Q(state,action) -> final reward - current reward\n ## only init parts here; append the final reward at the end of the game\n state_vec,actions_vec,direc = state.get_vecs()\n self.samples_partial_cache.append((state_vec,actions_vec,direc,state.eval_score,[1],[state.eval_score]))\n else:\n if self.root_node is None:\n self.init_mcts_tree(state)\n\n N_total = max(self.N_search,Na)\n # dirichlet_noise = np.random.dirichlet(np.full((Na,),0.3),N_total+1)\n # dirichlet_noise_value = dirichlet_noise * self.explore_alpha * self.c_puct\n \n # ## print fix depth tree\n # ruler = state.ruler\n # curr_node_prints = [([],[],self.root_node)]\n # for level in range(2):\n # str_ = []\n # curr_node_prints_next = []\n # for key_father,key_curr,node in curr_node_prints:\n # str_.append('({})->({}),{:.0f},{:.0f},{:.0f}'.format(ruler.get_codes_repr(key_father),ruler.get_codes_repr(key_curr),node.predict_value,node.sim_count,node.sim_value))\n # if node.children is not None:\n # for key,value in node.children.items():\n # curr_node_prints_next.append((key_curr,key,value))\n # curr_node_prints = curr_node_prints_next\n # if level > 0:\n # print(level,'; '.join(str_))\n\n counts = 0\n nodes_queue = []\n while counts < N_total:\n if len(nodes_queue) >= self.batch_size:\n self.predict_node_batch(nodes_queue)\n nodes_queue = []\n\n # while len(nodes_queue) < self.batch_size and counts < N_total:\n counts += 1\n curr_node = self.root_node\n # print(counts,self.root_node.sim_count)\n while True:\n if curr_node.is_evaluating:\n self.predict_node_batch(nodes_queue)\n nodes_queue = []\n\n curr_node.update_count()\n\n if curr_node.is_leaf:\n self.update_node(curr_node)\n break\n\n if not curr_node.is_expanded:\n if curr_node.is_evaluated:\n self.expand_node(curr_node)\n else:\n self.evaluate_node(curr_node)\n if curr_node.is_evaluating:\n nodes_queue.append(curr_node)\n break\n\n _,curr_node = curr_node.select_action_by_value()\n # if curr_node.is_root:\n # _,curr_node = curr_node.select_action_by_value_root(dirichlet_noise_value[counts],self.explore_alpha)\n # else:\n # _,curr_node = curr_node.select_action_by_value()\n\n # try:\n # ruler = state.ruler\n # kvs = list(self.root_node.children.items())\n # values = [kv[1].sim_count for kv in kvs]\n # sort_idxs = np.argsort(values)[::-1]\n # debug_str = []\n # for idx in sort_idxs:\n # child = kvs[idx][1]\n # debug_str.append(' {},{:.0f},{:.0f},{:.0f},{:.0f}'.format(ruler.get_codes_repr(kvs[idx][0]),child.sim_count,child.sim_value,child.predict_value,child.get_select_value(np.sqrt(self.root_node.sim_count))))\n \n # debug_str = ';'.join(debug_str)\n # print(counts,debug_str)\n # except Exception:\n # pass\n \n if len(nodes_queue) > 0:\n self.predict_node_batch(nodes_queue)\n nodes_queue = []\n\n if self.infer_flag:\n temp = 0\n else:\n if state.round_ >= 10:\n temp = 0\n else:\n temp = self.temp * (1 - state.round_ / 10.0)\n\n a_idx,action,a_probs,q_values = self.root_node.select_action_by_count_aug(temp=temp)\n # _,_,a_probs,q_values = self.root_node.select_action_by_count_aug(temp=1)\n\n if self.debug_flag:\n # print(state.round_,self.root_node.child_probs)\n # print(state.round_,a_probs,temp)\n\n try:\n ruler = state.ruler\n kvs = list(self.root_node.children.items())\n values = [kv[1].sim_count for kv in kvs]\n sort_idxs = np.argsort(values)[::-1]\n debug_str = []\n for idx in sort_idxs[:5]:\n child = kvs[idx][1]\n debug_str.append(' {}-->N:{:d},V:{:.0f},{:.0f},p:{:.2f},{:.2f}'.format(ruler.get_codes_repr(kvs[idx][0]),child.sim_count,child.sim_value,child.predict_value,a_probs[idx],child.pc/self.c_puct))\n \n debug_str = ';'.join(debug_str)\n print(debug_str)\n except Exception:\n pass\n\n if self.sample_collect_flag:\n state_vec,actions_vec,direc = self.root_node.state.get_vecs()\n\n self.samples_partial_cache.append((state_vec,actions_vec,direc,state.eval_score,a_probs,q_values))\n # ## policy samples: P(state,actions,direc) -> action_probs\n # self.policy_samples_cache.append((state_vec,actions_vec,direc,a_probs))\n # ## value samples: Q(state,action) -> final reward - current reward\n # ## only init parts here; append the final reward at the end of the game\n # self.value_samples_partial_cache.append((state_vec,actions_vec[:,a_idx],state.eval_score))\n \n return action\n","repo_name":"hanqiu92/shengji","sub_path":"public_full/agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":21759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"24168503357","text":"class Solution:\n def solveSudoku(self, board: List[List[str]]) -> None:\n def backtrack(board):\n for i in range(len(board)): # 遍历行\n for j in range(len(board[0])): # 遍历列\n if board[i][j] != \".\":\n continue\n for k in range(1, 10): # (i, j) 这个位置放k是否合适\n if isValid(i, j, k, board):\n board[i][j] = str(k) # 放置k\n if backtrack(board): return True # 如果找到合适一组立刻返回\n board[i][j] = \".\" # 回溯,撤销k\n return False # 9个数都试完了,都不行,那么就返回false\n return True # 遍历完没有返回false,说明找到了合适棋盘位置了\n\n def isValid(row, col, val, board):\n for i in range(9): # 判断行里是否重复\n if board[row][i] == str(val):\n return False\n for j in range(9): # 判断列里是否重复\n if board[j][col] == str(val):\n return False\n startRow = (row // 3) * 3\n startcol = (col // 3) * 3\n for i in range(startRow, startRow + 3): # 判断9方格里是否重复\n for j in range(startcol, startcol + 3):\n if board[i][j] == str(val):\n return False\n return True\n\n backtrack(board)","repo_name":"myf-algorithm/Leetcode","sub_path":"Leetcode/37.解数独.py","file_name":"37.解数独.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"37437546788","text":"from django.conf.urls import url\r\nfrom .views import KalendarListView, DayNoUpdateView, EventCreateView, EventUpdateView, SchoolKalendarTemplateView\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\nurlpatterns = [\r\n\turl(r'^$', KalendarListView.as_view(), name='kalendar-list-view'),\r\n\turl(r'^school/$', SchoolKalendarTemplateView.as_view(), name='school_kalendar-template-view'),\r\n url(r'^(?P\\d{4})/(?P\\d{1,2})$', KalendarListView.as_view(), name='kalendar-view'),\t#if sent with month, day etc.\r\n url(r'^changeday/(?P\\d+)/$', DayNoUpdateView.as_view(), name='dayno-update-view'),\r\n url(r'^addevent/(?P\\d+)/$', EventCreateView.as_view(), name='event-create-view'),\r\n url(r'^changeevent/(?P\\d+)/$', EventUpdateView.as_view(), name='event-update-view'),\r\n ]\r\n","repo_name":"personnameds/classsite","sub_path":"kalendar/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"42795768547","text":"import pynetbox\nimport config\n\nnetbox = pynetbox.api(url=config.URL_NB, token=config.TOKEN_NB)\n\ndef check_manufacs(manufact_name):\n \"\"\"\n Từ thông tin manufacturers trả về thông tin id của manufacturers,\n nếu thông tin manufacturer trống, tạo manufacturer\n \"\"\"\n import create_manufacturers\n manufact_info = netbox.dcim.manufacturers.get(name=\"{}\" .format(manufact_name))\n if manufact_info == None:\n create_manufacturers.create_manufacs_main()\n manufact_info1 = netbox.dcim.manufacturers.get(name=\"{}\" .format(manufact_name))\n manufact_id = manufact_info1['id']\n else:\n manufact_id = manufact_info['id']\n return manufact_id\n\ndef check_tenants(tenant_name):\n \"\"\"\n Từ thông tin tenants trả về thông tin id của tenants,\n nếu thông tin tenants trống, tạo tenants\n \"\"\"\n from create_tenancy import create_tenants_main\n tenant_info = netbox.tenancy.tenants.get(name=\"{}\" .format(tenant_name))\n if tenant_info == None:\n create_tenants_main()\n tenant_info1 = netbox.tenancy.tenants.get(name=\"{}\" .format(tenant_name))\n tenant_id = tenant_info1['id']\n else:\n tenant_id = tenant_info['id']\n return tenant_id\n\ndef check_sites(site_name):\n \"\"\"\n Từ thông tin sites trả về thông tin id của sites,\n nếu thông tin sites trống, tạo sites\n \"\"\"\n import create_sites\n site_info = netbox.dcim.sites.get(name=\"{}\" .format(site_name))\n if site_info == None:\n create_sites.create_site_main()\n site_info1 = netbox.dcim.sites.get(name=\"{}\" .format(site_name))\n site_id = site_info1['id']\n else:\n site_id = site_info['id']\n return site_id\n\ndef check_regions(region_name):\n \"\"\"\n Từ thông tin regions trả về thông tin id của regions,\n nếu thông tin regions trống, tạo regions\n \"\"\"\n from create_regions import create_region_main\n region_info = netbox.dcim.regions.get(name=\"{}\" .format(region_name))\n if region_info == None:\n create_region_main()\n region_info1 = netbox.dcim.regions.get(name=\"{}\" .format(region_name))\n region_id = region_info1['id']\n else:\n region_id = region_info['id']\n return region_id\n\ndef check_racks(rack_name):\n \"\"\"\n Từ thông tin racks trả về thông tin id của racks,\n nếu thông tin racks trống, tạo racks\n \"\"\"\n import create_racks\n rack_info = netbox.dcim.racks.get(name=\"{}\" .format(rack_name))\n if rack_info == None:\n create_racks.create_rack_main()\n rack_info1 = netbox.dcim.racks.get(name=\"{}\" .format(rack_name))\n rack_id = rack_info1['id']\n else:\n rack_id = rack_info['id']\n return rack_id\n\ndef check_rack_roles(role_name):\n \"\"\"\n Từ thông tin Rack roles trả về thông tin id của Rack roles,\n nếu thông tin Rack roles trống, tạo Rack roles\n \"\"\"\n import create_rack_roles\n rack_role_info = netbox.dcim.rack_roles.get(name=\"{}\" .format(role_name))\n if rack_role_info == None:\n create_rack_roles.create_rack_role_main()\n rack_role_info1=netbox.dcim.rack_roles.get(name=\"{}\" .format(role_name))\n rack_role_id = rack_role_info1['id']\n else:\n rack_role_id = rack_role_info['id']\n return rack_role_id\n\ndef check_rack_group(group_name, site):\n \"\"\"\n Từ thông tin Rack group trả về thông tin id của Rack group,\n nếu thông tin Rack group trống, tạo Rack group\n \"\"\"\n from create_rack_groups import create_rack_group_main\n rack_group_info = netbox.dcim.rack_groups.get(name=\"{}\" .format(group_name), site_id= site)\n if rack_group_info == None:\n create_rack_group_main()\n rack_group_info1 = netbox.dcim.rack_groups.get(name=\"{}\" .format(group_name), site_id= site)\n rack_group_id = rack_group_info1['id']\n else:\n rack_group_id = rack_group_info['id']\n return rack_group_id\n\ndef check_device_types(manufact_id, device_model):\n \"\"\"\n Từ thông tin Device type trả về thông tin id của Device type,\n nếu thông tin Device type trống, tạo Device type\n \"\"\"\n import create_device_types\n import create_interface_tpl\n device_type_info = netbox.dcim.device_types.get(manufacturer_id='{}' .format(manufact_id), model='{}' .format(device_model))\n if device_type_info == None:\n create_device_types.create_device_type_main()\n create_interface_tpl.create_inf_template_main()\n device_type_info1 = netbox.dcim.device_types.get(manufacturer_id='{}' .format(manufact_id), model='{}' .format(device_model))\n device_type_id = device_type_info1['id']\n else:\n device_type_id = device_type_info['id']\n return device_type_id\n\ndef check_device_roles(role_name):\n \"\"\"\n Từ thông tin Device role trả về thông tin id của Device role,\n nếu thông tin Device role trống, tạo Device role\n \"\"\"\n import create_device_roles\n device_role_info = netbox.dcim.device_roles.get(name=\"{}\" .format(role_name))\n if device_role_info == None:\n create_device_roles.create_device_role_main()\n device_role_info1 = netbox.dcim.device_roles.get(name=\"{}\" .format(role_name))\n device_role_id = device_role_info1['id']\n else:\n device_role_id = device_role_info['id']\n return device_role_id\n\ndef check_platforms(name):\n \"\"\"\n Từ thông tin platforms trả về thông tin id của platforms,\n nếu thông tin platforms trống, tạo platforms\n \"\"\"\n import create_platforms\n platform_info = netbox.dcim.platforms.get(name=\"{}\" .format(name))\n if platform_info == None:\n create_platforms.create_platforms_main()\n platform_info1 = netbox.dcim.platforms.get(name=\"{}\" .format(name))\n platform_id = platform_info1['id']\n else:\n platform_id = platform_info['id']\n return platform_id\n\ndef check_position_racks(rack_id):\n \"\"\"\n Kiểm tra vị trí còn trống trên tủ rack\n \"\"\"\n device_used = []\n check_device_in_racks = netbox.dcim.devices.filter(rack_id='{}' .format(rack_id))\n for device in check_device_in_racks:\n # Lấy ra các device đang đặt trên rack và lưu vào device_used\n if device not in device_used:\n device_used.append(device)\n position_used = []\n for deivce_name in device_used:\n # Lấy ra các vị trí u đã sử dụng và lưu vào position_used\n device_info = netbox.dcim.devices.get(name='{}' .format(deivce_name))\n position = device_info['position']\n manufact_id = device_info['device_type']['manufacturer']['id']\n device_model = device_info['device_type']['model']\n device_type_info = netbox.dcim.device_types.get(manufacturer_id='{}' .format(manufact_id), model='{}' .format(device_model))\n u_height = device_type_info['u_height']\n if ((position not in position_used) and ((position+u_height-1) not in position_used)): \n position_used.extend(range (position, position+u_height))\n else:\n print(\"Vị Trí {} Đã Có Thiết Bị Được Đặt\" .format(position))\n return position_used\n\ndef check_vlan_group(vgroup_name):\n \"\"\"\n Từ thông tin vlan group trả về thông tin id của vlan group,\n nếu thông tin vlan group trống, tạo vlan group\n \"\"\"\n import create_vlan_groups\n vlan_group_info = netbox.ipam.vlan_groups.get(name=\"{}\" .format(vgroup_name))\n if vlan_group_info == None:\n create_vlan_groups.create_vlan_group_main()\n vlan_group_info1 = netbox.ipam.vlan_groups.get(name=\"{}\" .format(vgroup_name))\n vlan_group_id = vlan_group_info1['id']\n else:\n vlan_group_id = vlan_group_info['id']\n return vlan_group_id\n\ndef check_vlan(vlan_name, site_id):\n \"\"\"\n Từ thông tin vlan trả về thông tin id của vlan,\n nếu thông tin vlantrống, tạo vlan\n \"\"\"\n import create_vlans\n vlan_info = netbox.ipam.vlans.get(name=\"{}\" .format(vlan_name), site_id = \"{}\" .format(site_id))\n if vlan_info == None:\n create_vlans.create_vlan_main()\n vlan_info1 = netbox.ipam.vlans.get(name=\"{}\" .format(vlan_name), site_id = \"{}\" .format(site_id))\n vlan_id = vlan_info1['id']\n else:\n vlan_id = vlan_info['id']\n return vlan_id\n\ndef check_rir(rir_name):\n \"\"\"\n Từ thông tin rir trả về thông tin id của rir,\n nếu thông tin rir trống, tạo rir\n \"\"\"\n import create_rirs\n rir_info = netbox.ipam.rirs.get(name=\"{}\" .format(rir_name))\n if rir_info == None:\n create_rirs.create_rir_main()\n rir_info1 = netbox.ipam.rirs.get(name=\"{}\" .format(rir_name))\n rir_id = rir_info1['id']\n else:\n rir_id = rir_info['id']\n return rir_id\n\ndef check_prefix_role(role_name):\n \"\"\"\n Từ thông tin prefix role trả về thông tin id của prefix role,\n nếu thông tin prefix role trống, tạo prefix role\n \"\"\"\n import create_prefixe_roles\n prefix_role_info = netbox.ipam.roles.get(name=\"{}\" .format(role_name))\n if prefix_role_info == None:\n create_prefixe_roles.create_prefix_role_main()\n prefix_role_info1 = netbox.ipam.roles.get(name=\"{}\" .format(role_name))\n role_id = prefix_role_info1['id']\n else:\n role_id = prefix_role_info['id']\n return role_id\n\ndef check_interface(device_name, inf_name):\n \"\"\"\n Từ thông tin interface trên device trả về thông tin id của interface\n \"\"\"\n try:\n interface_info = netbox.dcim.interfaces.get(device='{}' .format(device_name), name='{}' .format(inf_name))\n interface_id = interface_info['id']\n return interface_id\n except Exception as ex:\n print(ex)","repo_name":"VNPT-SmartCloud-System/Tim-hieu-Netbox","sub_path":"netbox_create_data/core/check_data_netbox.py","file_name":"check_data_netbox.py","file_ext":"py","file_size_in_byte":9822,"program_lang":"python","lang":"vi","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"11054804202","text":"from datetime import datetime\nimport bson.json_util\nfrom bson.json_util import dumps, loads\nimport json\nfrom dbconfig import users, tasks\n\nUSER_SCHEMA = {\n \"_id\": \"60756d56c44fb6fd55337f82\",\n \"user_id\": 1,\n \"name\": \"jondoe\",\n\n}\nSTATUS = [\"Pending\", \"InProgress\", \"InReview\", \"Completed\"]\n\nTASKS_SCHEMA = {\n \"title\": \"this is a task title\",\n \"description\": \"this is a task description\",\n \"current_status\": STATUS[0],\n \"ETA\": \"send-date-time of completion\",\n \"Assignee\": \"someone\",\n \"PENDING\": \"datetime.now()\",\n \"INPROGRESS\": \"datetime.now()\",\n \"INREVIEW\": \"datetime.now()\",\n \"COMPLETED\": \"datetime.now()\",\n}\n\nUPDATE_TASK_STATUS_SCHEMA = {\n \"title\": \"this is a task title\",\n \"description\": \"this is a task description\",\n \"ETA\": \"sometime\",\n \"current_status\": STATUS[2],\n \"timestamp\": datetime.now(),\n}\nUPDATE_TASK_SCHEMA = {\n \"title\": \"this is a task title\",\n \"description\": \"this is a task description\",\n \"Assignee\":\"haha\",\n \"ETA\": datetime.now(),\n \"timestamp\": datetime.now(),\n}\n\n\ndef add_task(task):\n query = {\"title\": task['title']}\n if tasks.count_documents(query) == 1:\n return None\n x = tasks.insert_one(task)\n x = tasks.update_one(query, {\n \"$set\": {\"current_status\": task[\"current_status\"], \"PENDING\": datetime.now()}})\n return task\n\n\n# print(add_task(TASKS_SCHEMA))\n\ndef show_task():\n task = tasks.find({})\n list_task = list(task)\n json_data = dumps(list_task)\n # print(task)\n new_dict = []\n for x in tasks.find({}):\n new_dict.append(x)\n # print(x)\n\n # print(new_dict)\n new_dict = json.loads(bson.json_util.dumps(new_dict))\n return new_dict\n\n\n# print(show_task())\n\n\ndef update_task_status(task):\n query = {\"title\": task[\"title\"]}\n if tasks.count_documents(query) == 1:\n x = tasks.update_one(query, {\n \"$set\": {\"current_status\": task[\"current_status\"], task[\"current_status\"]: datetime.now()}})\n return task\n\n\ndef update_task(task):\n query = {\"title\": task[\"title\"]}\n if tasks.count_documents(query) == 1:\n x = tasks.update_one(query, {\n \"$set\": {\"ETA\": task[\"ETA\"], \"Assignee\": task[\"Assignee\"]}})\n return task\n\n\n# print(update_task(UPDATE_TASK_SCHEMA))\n","repo_name":"chirayupatel9/SEFS","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"73698282141","text":"\"\"\"\r\nCreated on 2023/09/11\r\n@author: huguet\r\n\"\"\"\r\nimport os\r\nos.environ[\"OMP_NUM_THREADS\"] = '4'\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\nfrom scipy.io import arff\r\nfrom sklearn import cluster\r\nfrom sklearn import metrics\r\n\r\n# Chargement des données\r\npath = './artificial/'\r\nname = \"2d-4c.arff\"\r\ndatabrut = arff.loadarff(open(path + str(name), 'r'))\r\ndatanp = np.array([[x[0], x[1]] for x in databrut[0]])\r\n\r\n# Affichage des données initiales en 2D\r\nprint(\"---------------------------------------\")\r\nprint(\"Affichage données initiales \" + str(name))\r\nf0 = datanp[:, 0]\r\nf1 = datanp[:, 1]\r\nplt.scatter(f0, f1, s=8)\r\nplt.title(\"Donnees initiales : \" + str(name))\r\nplt.show()\r\n\r\n# Application de KMeans pour une valeur de k fixée\r\nprint(\"------------------------------------------------------\")\r\nprint(\"Appel KMeans pour une valeur de k fixée\")\r\ntps1 = time.time()\r\nk = 4\r\nmodel = cluster.KMeans(n_clusters=k, init='k-means++', n_init=1)\r\nmodel.fit(datanp)\r\ntps2 = time.time()\r\nlabels = model.labels_\r\niteration = model.n_iter_\r\ninertie = model.inertia_\r\ncentroids = model.cluster_centers_\r\n\r\n# Affichage des données après clustering\r\nplt.scatter(f0, f1, c=labels, s=8)\r\nplt.scatter(centroids[:, 0], centroids[:, 1], marker=\"x\", s=50, linewidths=3, color=\"red\")\r\nplt.title(\"Données après clustering : \" + str(name) + \" - Nb clusters =\" + str(k))\r\nplt.show()\r\n\r\n# Affichage des informations sur le clustering\r\nprint(\"nb clusters =\", k, \", nb iter =\", iteration, \", inertie = \", inertie, \", runtime = \", round((tps2 - tps1) * 1000, 2), \"ms\")\r\n\r\n# Calcul des distances entre les centroids\r\ndists = metrics.pairwise.euclidean_distances(centroids)\r\nmin_distances = dists.min(axis=1)\r\nmax_distances = dists.max(axis=1)\r\nmean_distances = dists.mean(axis=1)\r\n\r\n# Calcul des distances de chaque point aux centroids\r\npoint_to_centroid_dists = metrics.pairwise.euclidean_distances(datanp, centroids)\r\n\r\n# Affichage des distances pour chaque cluster\r\nfor i in range(k):\r\n cluster_point_dists = point_to_centroid_dists[labels == i, i]\r\n print(f\"Cluster {i + 1} - Min distance: {cluster_point_dists.min():.2f}, Max distance: {cluster_point_dists.max():.2f}, Mean distance: {cluster_point_dists.mean():.2f}\")\r\n\r\n# Calcul des scores de séparation entre clusters\r\ncentroid_distances = metrics.pairwise.pairwise_distances(centroids)\r\nlower_triangle = np.tril(centroid_distances, -1)\r\nnon_zero_values = lower_triangle[lower_triangle > 0]\r\nprint(f\"Separation between clusters - Min distance: {non_zero_values.min():.2f}, Max distance: {non_zero_values.max():.2f}, Mean distance: {non_zero_values.mean():.2f}\")\r\n\r\n# Calcul et affichage de l'évolution de l'inertie en fonction du nombre de clusters\r\ninertia_values = []\r\nfor k in range(1, 11):\r\n model = cluster.KMeans(n_clusters=k, init='k-means++')\r\n model.fit(datanp)\r\n inertia_values.append(model.inertia_)\r\n\r\nplt.figure()\r\nplt.plot(range(1, 11), inertia_values, marker='o')\r\nplt.title('Evolution de l\\'inertie')\r\nplt.xlabel('Nombre de clusters')\r\nplt.ylabel('Inertie')\r\nplt.show()\r\n\r\n# Évaluation de la qualité du clustering pour différents nombres de clusters\r\nk_values = range(2, 11)\r\nsilhouette_scores = []\r\ndavies_bouldin_scores = []\r\ncalinski_harabasz_scores = []\r\n\r\n# Calcul des métriques pour chaque nombre de clusters\r\nfor k in k_values:\r\n model = cluster.KMeans(n_clusters=k, init='k-means++')\r\n labels = model.fit_predict(datanp)\r\n silhouette_scores.append(metrics.silhouette_score(datanp, labels))\r\n davies_bouldin_scores.append(metrics.davies_bouldin_score(datanp, labels))\r\n calinski_harabasz_scores.append(metrics.calinski_harabasz_score(datanp, labels))\r\n\r\n# Affichage des métriques\r\nplt.figure(figsize=(10, 8))\r\nplt.subplot(3, 1, 1)\r\nplt.plot(k_values, silhouette_scores, marker='o', label='Coefficient de Silhouette', color='red')\r\nplt.legend()\r\nplt.title('Évaluation de la qualité du clustering en fonction de k')\r\nplt.ylabel('Coefficient de Silhouette')\r\n\r\nplt.subplot(3, 1, 2)\r\nplt.plot(k_values, davies_bouldin_scores, marker='x', label='Indice de Davies-Bouldin', color='green')\r\nplt.legend()\r\nplt.ylabel('Indice de Davies-Bouldin')\r\n\r\nplt.subplot(3, 1, 3)\r\nplt.plot(k_values, calinski_harabasz_scores, marker='s', label='Indice de Calinski-Harabasz')\r\nplt.legend()\r\nplt.xlabel('Nombre de clusters')\r\nplt.ylabel('Indice de Calinski-Harabasz')\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n# Application de MiniBatchKMeans pour différentes configurations\r\nfrom sklearn.cluster import MiniBatchKMeans\r\n\r\nbatch_sizes = [10, 50, 100, 500]\r\nn_clusters_list = [2, 3, 4, 5]\r\nn_init = 10\r\n\r\nfor n_clusters in n_clusters_list:\r\n for batch_size in batch_sizes:\r\n model = MiniBatchKMeans(n_clusters=n_clusters, batch_size=batch_size, n_init=n_init, init='k-means++')\r\n model.fit(datanp)\r\n labels = model.labels_\r\n centroids = model.cluster_centers_\r\n silhouette = metrics.silhouette_score(datanp, labels)\r\n davies_bouldin = metrics.davies_bouldin_score(datanp, labels)\r\n calinski_harabasz = metrics.calinski_harabasz_score(datanp, labels)\r\n print(f\"Configuration: n_clusters = {n_clusters}, batch_size = {batch_size}\")\r\n print(f\"Silhouette Score: {silhouette:.3f}\")\r\n print(f\"Davies Bouldin Score: {davies_bouldin:.3f}\")\r\n print(f\"Calinski Harabasz Score: {calinski_harabasz:.3f}\")\r\n print(\"-----------------------------------\")\r\n plt.scatter(datanp[:, 0], datanp[:, 1], c=labels, s=8)\r\n plt.scatter(centroids[:, 0], centroids[:, 1], marker=\"x\", s=50, linewidths=3, color=\"red\")\r\n plt.title(f\"Données après clustering avec MiniBatchKMeans (n_clusters={n_clusters}, batch_size={batch_size})\")\r\n plt.show()\r\n","repo_name":"Sofiene29/ANS","sub_path":"2-Starting-with-k-means.py","file_name":"2-Starting-with-k-means.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"7017407532","text":"import numpy as np\nimport cvxopt\nfrom tqdm import tqdm\nimport scipy\nfrom scipy.spatial.distance import pdist, squareform, cdist\nfrom scipy.special import expit\n\nclass LogisticRegression():\n\n def __init__(self):\n self.weights = None\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n\n def predict_probas(self, X):\n z = X @ self.weights\n return self.sigmoid(z)\n\n def cross_entropy(self, X, y):\n n = X.shape[0]\n y_pred = self.predict_probas(X, self.weights)\n cost = y * np.log(y_pred) + (1 - y) * np.log(1 - y_pred)\n return cost.sum() / n\n\n def get_grad(self, X, y):\n n = X.shape[0]\n y_pred = self.predict_probas(X=X)\n\n return X.T @ (y_pred - y) / n\n\n def fit(self, X, y, max_iter=1000, lr=1, eps=1e-6):\n self.weights = np.zeros(X.shape[1])\n cv = False\n j = 1\n for i in range(max_iter):\n weights_prev = self.weights.copy()\n grad = self.get_grad(X, y)\n self.weights -= lr * grad\n if np.linalg.norm(self.weights - weights_prev, 2) < eps:\n cv =True\n print('Algorithm converged !')\n break\n if (i/10000 == j):\n lr /= 2\n j += 1\n if not(cv):\n print('Reached maximum iterations without convergence.')\n\n def predict(self, X):\n probas = self.predict_probas(X=X)\n return (probas>0.5).astype(int)\n\n def get_accuracy_score(self, X, y):\n pred_labels = self.predict(X=X)\n return (pred_labels==y).mean()\n\n\nimport cvxopt\n\n\nclass SVM_custom_kernel:\n\n def __init__(self, c=1, eps=1e-4):\n self.alpha_ = None\n self.c = c\n self.eps = eps\n\n def fit(self, kernel_train, labels):\n n = len(labels)\n\n # prepare matrices of dual problem for solving\n diag = np.zeros((n, n))\n np.fill_diagonal(diag, labels)\n\n P = diag @ kernel_train @ diag\n P = cvxopt.matrix(P)\n\n Q = cvxopt.matrix(np.ones(n) * -1)\n\n if self.c is None:\n G = cvxopt.matrix(np.diag(np.ones(n) * -1))\n h = cvxopt.matrix(np.zeros(n))\n else:\n G = cvxopt.matrix(np.vstack((np.diag(np.ones(n) * -1), np.identity(n))))\n h = cvxopt.matrix(np.hstack((np.zeros(n), np.ones(n) * self.c)))\n\n A = labels.T\n A = A.astype('double')\n A = cvxopt.matrix(A)\n b = cvxopt.matrix(0.0)\n\n # Solve QP problem using cvxopt\n u = cvxopt.solvers.qp(P, Q, G, h, A, b)\n\n # take Lagrange multipliers,\n alpha = np.ravel(u['x'])\n\n # Identify support vectors\n sv = alpha > self.eps\n ind = np.arange(len(alpha))[sv]\n\n self.alpha_ = alpha[sv]\n self.sv = np.argwhere(sv == True)\n self.sv_label = labels[sv]\n\n # Compute bias value\n self.b = 0.0\n for i in range(len(self.alpha_)):\n self.b += self.sv_label[i]\n self.b -= np.sum(self.alpha_ * self.sv_label[:, 0] * kernel_train[sv, ind[i]])\n self.b /= len(self.alpha_)\n\n def predict(self, kernel_test):\n\n y_predict = np.zeros(kernel_test.shape[1])\n\n for i in range(kernel_test.shape[1]):\n y_predict[i] = sum(alpha * sv_label * kernel_test[sv, i] for alpha, sv, sv_label in\n zip(self.alpha_, self.sv, self.sv_label[:, 0]))\n return y_predict + self.b\n\n prediction = np.sign(y_predict + self.b)\n\n return prediction\n\n def predict_class(self, kernel_test):\n\n prediction = np.array(self.predict(kernel_test) >= 0, dtype=int)\n prediction[prediction == 0] = -1\n return prediction\n\n\nclass SVMClassifier():\n\n def __init__(self, C=1, kernel='rbf', gamma=0.1):\n self.C = C\n self.kernel = kernel\n if self.kernel == 'rbf':\n self.f_kernel = self.GRBF_kernel\n self.gamma = gamma\n\n def GRBF_kernel(self, x1, x2, gamma):\n return np.exp(-np.linalg.norm(x1 - x2) * gamma)\n\n # the computation of Gram matrix will be much faster using this\n\n def get_kernel_gram_matrix(self, X, gamma):\n\n if self.kernel in ['gaussian', 'rbf']:\n # Faster computation of the gram matrix with gaussian kernel\n # st= time.time()\n pairwise_dists = squareform(pdist(X, 'sqeuclidean'))\n K = np.exp(-pairwise_dists * gamma)\n # print(time.time()-st)\n return K\n\n def fit(self, X, y, transform_y=True):\n y = y.copy()\n if transform_y:\n y = y * 2 - 1\n\n n, m = X.shape\n\n # the computation of Gram matrix will be much faster using this\n K = self.get_kernel_gram_matrix(X, self.gamma)\n\n '''K1 = np.zeros((n,n))\n for i in tqdm(range(n)):\n for j in range(n):\n K1[i, j] = self.f_kernel(X[i], X[j], gamma=self.gamma)'''\n\n\n # construct for solver\n P = cvxopt.matrix(np.outer(y, y) * K)\n q = cvxopt.matrix(np.ones(n) * -1)\n A = cvxopt.matrix(y, (1, n))\n b = cvxopt.matrix(0.0)\n if self.C is None:\n G = cvxopt.matrix(np.diag(np.ones(n) * -1))\n h = cvxopt.matrix(np.zeros(n))\n else:\n G = cvxopt.matrix(np.vstack((np.diag(np.ones(n) * -1), np.identity(n))))\n h = cvxopt.matrix(np.hstack((np.zeros(n), np.ones(n) * self.C)))\n # solve QP problem\n solution = cvxopt.solvers.qp(P, q, G, h, A, b)\n # Lagrange multipliers\n LagM = np.ravel(solution['x'])\n # Get support vectors\n self.SuppVec_indices = LagM > 1e-5\n self.supportVectors = X[self.SuppVec_indices]\n self.supportY = y[self.SuppVec_indices] * LagM[self.SuppVec_indices]\n\n def predict_probas(self, X):\n\n try:\n assert self.kernel in ['gaussian', 'rbf']\n # compute pairwise (squared euclidean) distances between new samples and support vectors\n pairwise_dists = cdist(self.supportVectors, X, 'sqeuclidean')\n # gaussian kernel evaluations\n K_pred = np.exp(-pairwise_dists * self.gamma)\n \n #fixed error: due to self.weight instead of self.supportY\n pred_probas = expit(K_pred.T @ self.supportY)\n return pred_probas\n\n except:\n print('Please make sure the used kernel is gaussian.')\n\n def predict(self, X):\n probas = self.predict_probas(X=X)\n return (probas > 0.5).astype(int)\n\n def get_accuracy_score(self, X, y):\n pred_labels = self.predict(X=X)\n return (pred_labels == y).mean()\n\n\nclass WKRR():\n \"\"\"\n Weighted Kernel Ridge Regression\n\n \"\"\"\n\n def __init__(self, kernel='gaussian'):\n self.weights = None\n self.kernel = kernel\n # kernel gram matrix over training data\n self.K_train = None\n # training samples\n self.X_train = None\n\n def get_kernel_gram_matrix(self, X, sigma):\n\n if self.kernel == 'gaussian':\n # Faster computation of the gram matrix with gaussian kernel\n # st= time.time()\n pairwise_dists = squareform(pdist(X, 'sqeuclidean'))\n K = np.exp(-pairwise_dists / (2 * np.square(sigma)))\n # print(time.time()-st)\n return K\n\n def fit(self, X, y, penalty, W=None, eps=1e-6, kernel_precomputed=False):\n \"\"\"\n Returns analytical solution of the Weighted Kernel Ridge Regression problem\n \"\"\"\n\n self.X_train = X\n if kernel_precomputed:\n K = self.K_train\n else:\n K = self.get_kernel_gram_matrix(X, self.sigma)\n self.K_train = K\n\n assert K.shape[0] == y.shape[0]\n n = K.shape[0]\n\n if W is None:\n # unweighted KLR := all weights are equal to 1 and W:=Identity\n M = K @ y + n * penalty * np.eye(n)\n M_ = scipy.linalg.inv(M)\n v = M_ @ v\n\n else:\n\n W_sqrt = np.diag(np.sqrt(np.diag(W)))\n v = W_sqrt @ y\n M = K @ W_sqrt\n M = W_sqrt @ M + n * penalty * np.eye(n)\n M_ = scipy.linalg.inv(M)\n v = M_ @ v\n v = W_sqrt @ v\n\n print('fitted train data')\n self.weights = v\n\n\nclass KernelLogisticRegression():\n \"\"\"\n Kernel Logistic regression\n \"\"\"\n\n def __init__(self, kernel='gaussian', sigma=1):\n self.weights = None\n self.kernel = 'kernel'\n self.sigma = sigma\n self.loss_thresh = 0.001\n self.X_train = None\n\n # initialize weighted kernel ridge regression for self.fit\n self.wkrr = WKRR()\n\n def get_kernel_gram_matrix(self, X, sigma):\n\n if self.kernel == 'gaussian':\n # Faster computation of the gram matrix with gaussian kernel\n # st= time.time()\n pairwise_dists = squareform(pdist(X, 'sqeuclidean'))\n K = np.exp(-pairwise_dists / (2 * np.square(sigma)))\n # print(time.time()-st)\n return K\n\n def fit(self, X, y, penalty, max_iter=1000, eps=1e-6):\n \"\"\"\n Iteratively solve Weighted Kernel Ridge Regression problems\n \"\"\"\n\n self.X_train = X\n K = self.get_kernel_gram_matrix(X, self.sigma)\n self.wkrr.K_train = K\n\n # For training only, transform labels in [1,-1]\n y = np.where(y == 1, 1, 0)\n\n assert K.shape[0] == y.shape[0]\n n = K.shape[0]\n ones = np.ones(y.shape)\n\n # randomly initialize the coefficients\n alpha = np.random.normal(loc=0, scale=1, size=n)\n\n # t1 = time.time()\n\n # initialize loss\n loss = 10\n\n for i in range(max_iter):\n # At each iteration solve a Weighted kernel ridge regression\n v = K @ alpha\n prev_loss = loss\n loss = -np.sum(np.log(expit(np.multiply(y, v)) + eps)) / n\n print(loss)\n if np.abs(loss - prev_loss) < self.loss_thresh:\n print('converged after {} iterations'.format(i + 1))\n break\n\n # compute parameters for WKRR\n u = np.multiply(v, y)\n sig = expit(u)\n sig_ = ones - sig # 1-sig = expit(-u)\n W = np.diag(np.multiply(sig, (sig_)))\n # print(W.shape)\n P = np.diag(-sig_)\n k = P @ y\n z = v - scipy.linalg.inv(W) @ k\n # t2 = time.time()\n # print(t2-t1)\n\n # solve a weighted Kernel Ridgre Regression with the corresponding parameters\n alpha = self.wkrr.fit(X, z, penalty, sigma, W=W, kernel_precomputed=True)\n # t3 = time.time()\n # print(t3-t2)\n\n # save fitted parameters\n self.weights = alpha\n\n def predict_probas(self, X):\n\n try:\n assert self.kernel == 'gaussian'\n # compute pairwise (squared euclidean) distances between new samples and train samples\n pairwise_dists = cdist(self.X_train, X, 'sqeuclidean')\n # gaussian kernel evaluations\n K_pred = np.exp(-pairwise_dists / (2 * np.square(self.sigma)))\n\n pred_probas = expit(K_pred.T @ self.weights)\n return pred_probas\n\n except:\n print('Please make sure the used kernel is gaussian.')\n\n def predict(self, X):\n probas = self.predict_probas(X=X)\n return (probas > 0.5).astype(int)\n\n def get_accuracy_score(self, X, y):\n pred_labels = self.predict(X=X)\n return (pred_labels == y).mean()\n\n\nclass KernelPCA():\n\n def __init__(self, n_components):\n self.number_components = n_components\n\n # @staticmethod\n def get_wanted_eigenvectors_eigenvalues(self, w, v, ):\n L = [(w[i], v[i, :]) for i in range(w.shape[0])]\n L = sorted(L, key=lambda x: x[0], reverse=True)\n return np.array([L[i][0] for i in range(self.number_components)]), \\\n np.array([L[i][1] for i in range(self.number_components)])\n\n def fit_transform(self, K, eps=1e-6):\n n = K.shape[0]\n U = (1 / n) * np.ones((n, n))\n centred_K = (np.eye(n) - U) @ K @ (np.eye(n) - U)\n\n w, v = np.linalg.eig(centred_K)\n # We had some negative/complex eigen values even if the matrix is symmetric which \n # can mainly be due to some approximations made when computing the eigenvectors \n w = np.array(list(map(lambda x: x.real if x.real > 0 else eps, w)))\n v = np.real(v)\n w, v = self.get_wanted_eigenvectors_eigenvalues(w, v)\n\n alpha = v / np.sqrt(w[:, None])\n self.alpha = alpha\n\n return K @ alpha.T\n\n def transform(self, X):\n return X @ self.alpha.T","repo_name":"MedAmineHachicha/kernel_methods_challenge","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"19906744284","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef printsomething():\n print(\"Something\")\n\n#display function\ndef runtime_display(display, u, x, xmin,xmax,ymin,ymax):\n plt.axis([xmin, xmax, ymin, ymax ] )\n plt.title(display)\n plt.ylabel(\"U\")\n plt.xlabel(\"x\")\n plt.plot(x,u,'bo-')\n plt.pause(0.001)\n plt.clf() #clear drawing\n return 0\n","repo_name":"izham-sugita/ENT441-CFD","sub_path":"ent441-slides/codes/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"19494238042","text":"\"\"\"\nThis is a numpy reader/ writer module.\n\"\"\"\n# import standard modules\nimport numpy\n# help(numpy)\n\n# import custom modules\nfrom serializers.serialize_template import Serializer\n\n# define private variables\n__version__ = \"1.0.0\"\n\n# define class variables\nSerializer.SERIALIZER_TYPE = \"npy\"\n\n\nclass SerializeFile(Serializer):\n def __init__(self):\n # get the input data\n Serializer.__init__(self)\n self.DATA_TYPE = \"dictionary\"\n\n def read(self, f_name=\"\"):\n \"\"\"\n read the numpy file.\n :param f_name: file input name.\n :return: True for success. False for failure.\n \"\"\"\n success = Serializer.read(self, f_name=f_name)\n\n if not success:\n raise IOError(\"[No File] :: There is no file to read from.\")\n try:\n rdata = numpy.load(self.OUTPUT_PATH, encoding='bytes', allow_pickle=True)\n self.READ_DATA = rdata.tolist()\n return True\n except ValueError:\n return False\n\n def write(self, f_output=\"\", f_data=\"\"):\n \"\"\"\n writes the numpy file.\n :param f_output: custom file output name.\n :param f_data: data to write.\n :return: True for success. False for failure.\n \"\"\"\n Serializer.write(self, f_output=f_output, f_data=f_data)\n\n try:\n numpy.save(self.OUTPUT_PATH, self.INTERPRETED_INPUT_DATA)\n self.print_file_size()\n return True\n except ValueError:\n return False\n","repo_name":"AlexGaida/data_serializer","sub_path":"python/serializers/serialize_numpy.py","file_name":"serialize_numpy.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"14045645733","text":"import json\nimport os\nimport uuid\nfrom tempfile import NamedTemporaryFile\nfrom typing import List\n\nimport pkg_resources\nimport py2neo\nfrom IPython.display import HTML, IFrame, Image, display_html\nfrom jinja2 import Environment, FileSystemLoader\n\n\ndef plot(\n graph: py2neo.Graph, query: str = \"match p=()--()--() return p limit 25\", **kwargs\n) -> IFrame:\n \"\"\"Plot a graph, using a query.\n\n Heavy lifting is done via py2neo `to_subgraph` and `neographviz.vis_network`\n\n Example:\n >>> from neographviz import plot, Graph\n >>> graph = Graph() # You need a graph at localhos, or pass the uri here.\n >>> plot(graph)\n\n Args:\n graph (py2neo.Graph): Graph object from py2neo\n query (str, optional): Any valid cypher query, must return a path p, should use a limit. Defaults to \"match p=()--()--() return p limit 25\".\n\n Returns:\n IFrame: IFrame to show in jupyter notebook or website.\n \"\"\"\n sg = graph.run(query).to_subgraph()\n return vis_network(_get_nodes(sg), _get_edges(sg), **kwargs)\n\n\ndef _get_nodes(sg: py2neo.Subgraph) -> List[dict]:\n \"\"\"Get nodes from a subgraph\n \n Get the nodes in a subgraph and add the data so that\n visjs can consume it. \n\n Arguments:\n sg {py2neo.Subgraph} -- \n \n Returns:\n List -- List of dictionaries with keys: id, group, label, title\n \"\"\"\n nodes = []\n if sg:\n for n in sg.nodes:\n nodes.append(\n {\n \"id\": n.identity,\n \"group\": n.labels.__str__()[1:],\n \"label\": \" \".join([f\"{v}\" for v in n.values()]),\n \"title\": \"
\".join([f\"{k}:{v}\" for k, v in n.items()]),\n }\n )\n return nodes\n\n\ndef _get_edges(sg: py2neo.Subgraph) -> List:\n edges = []\n if sg:\n for r in sg.relationships:\n d = {\n \"from\": r.start_node.identity,\n \"to\": r.end_node.identity,\n \"label\": next(iter(r.types())),\n \"arrows\": \"to\",\n }\n try:\n d[\"title\"] = \"
\".join([str(k) + \":\" + str(v) for k, v in r.items()])\n except:\n pass\n edges.append(d)\n return edges\n\n\ndef vis_network(\n nodes,\n edges,\n physics=\"\",\n height=400,\n node_size=25,\n font_size=14,\n filename=\"\",\n config={},\n template_file=\"vis.html\",\n app=False,\n):\n \"\"\"Render a network with vis.js in an IFrame for use in a jupyter notebook or website. \n\n This function will render a template whihc uses vis.js to display the graph. \n The options configured can be passed directly to the template, but as it is vis.js underneith,\n any valid options for it can be passed as js in string form to jsoptions.\n\n Args:\n nodes (List): List of nodes\n edges (List): List of edges\n physics (str, optional): Defintion of physics in vis.js. Defaults to basic barnesHut.\n height (int, optional): Height of the plot in pixels. Defaults to 400.\n node_size (int, optional): Defaults to 25.\n font_size (int, optional): [description]. Defaults to 14.\n filename (str, optional): Optional filenmae for storing the page. Defaults to a `''` and uses a uuid.\n config (dict, optional): Custom kwargs to pass to template. Defaults to `{}`.\n template_file (str, optional): Defaults to `vis.html` the provided template, provide your own.\n\n Returns:\n IFrame: Iframe to show in jupyter notebook\n \"\"\"\n template = pkg_resources.resource_filename(\"neographviz\", \"templates/\")\n env = Environment(loader=FileSystemLoader(template))\n template = env.get_template(template_file)\n if not physics:\n physics = \"\"\"{\n \"barnesHut\": {\n \"centralGravity\": 0,\n \"springLength\": 240\n }\n }\"\"\"\n\n if not app:\n html = template.render(\n nodes=nodes,\n edges=edges,\n physics=physics,\n node_size=node_size,\n font_size=font_size,\n )\n unique_id = str(uuid.uuid4())\n if not filename:\n filename = \"figure/graph-{}.html\".format(unique_id)\n try:\n with open(filename, \"w\") as file:\n file.write(html)\n except FileNotFoundError:\n os.mkdir(\"figure\")\n with open(filename, \"w\") as file:\n file.write(html)\n\n return IFrame(filename, width=\"100%\", height=str(height))\n else:\n return template.render(\n nodes=nodes,\n edges=edges,\n physics=physics,\n node_size=node_size,\n font_size=font_size,\n app=app\n )\n\n\ndef get_vis_info(node, id, options):\n node_label = list(node.labels)[0]\n title = \"\".join([f\"{k}:{v} \" for k, v in node.items()]).strip()\n if node_label in options:\n vis_label = node.get(options.get(node_label, \"\"), \"\")\n else:\n vis_label = title\n\n return {\"id\": id, \"label\": vis_label, \"group\": node_label, \"title\": title}\n","repo_name":"niiicolai/PythonOverlay","sub_path":"venv/Lib/site-packages/neographviz/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"5995083741","text":"import sys\n\nn, m = map(int, sys.stdin.readline().split())\nnumlist = list(map(int, sys.stdin.readline().split()))\nresult = []\nans = 0\n\n\ndef dfs(start):\n global ans\n if len(result) == 3:\n if sum(result) <= m:\n if m - ans > m - sum(result):\n ans = sum(result)\n return\n for i in range(start, n):\n result.append(numlist[i])\n dfs(i + 1)\n result.pop()\n\n\ndfs(0)\nprint(ans)\n","repo_name":"Lee9Bin/python_algorism","sub_path":"algorism/daily/2798.py","file_name":"2798.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"16380681523","text":"# Convolutional Neural Network for EEG classification\n\nimport os\nimport numpy as np\nimport network\n\nfrom pylab import imshow, show, cm\nfrom network import Network, shared, relu\nfrom network import ConvLayer, ConvPoolLayer, FullyConnectedLayer, SoftmaxLayer\n\n# Load EEG data\nparent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))\ndata_dir = os.path.join(parent_dir, \"data\")\n\ndata = np.load(os.path.join(data_dir, 'all_data_6_1d_full.npy'))\n\nlabels = np.load(os.path.join(data_dir, 'all_data_6_1d_full_labels.npy'))\nlabels = labels[:,1]\n\n# Create train, validation, test sets\n#rng = np.random.RandomState(225)\nindices = np.random.permutation(data.shape[0])\n\nsplit_train, split_val, split_test = .6, .2, .2\n\nsplit_train = int(round(data.shape[0]*split_train))\nsplit_val = split_train + int(round(data.shape[0]*split_val))\n\ntrain_idx = indices[:split_train]\nval_idx = indices[split_train:split_val]\ntest_idx = indices[split_val:]\n\ntr_data = data[train_idx,:]\ntr_labels = labels[train_idx]\n\nval_data = data[val_idx,:]\nval_labels = labels[val_idx]\n\nte_data = data[test_idx,:]\nte_labels = labels[test_idx]\n\ntrain_data = shared((tr_data, tr_labels))\nvalidation_data = shared((val_data, val_labels))\ntest_data = shared((te_data, te_labels))\n\n# Show a single random trial\nimage_num = np.random.randint(0, network.size(train_data))\nimage_label = str(train_data[1][image_num].eval())\nimage_array = train_data[0][image_num].eval()\nimage_2d = np.reshape(image_array, (64, 512))\n\nimshow(image_2d, cmap=cm.gray)\nshow()\nprint(\"Label: {}\".format(image_label))\n\n# Train\nmini_batch_size = 10\n\ndef basic_conv(n=3, epochs=60):\n nets = [] # list of networks (for ensemble, if desired)\n for j in range(n):\n net = Network([\n ConvLayer(image_shape=(mini_batch_size, 1, 64, 512),\n filter_shape=(20, 1, 3, 3), stride=(1, 1), activation_fn=relu),\n ConvPoolLayer(image_shape=(mini_batch_size, 20, 64, 512),\n filter_shape=(40, 20, 3, 3), stride=(1, 1),\n poolsize=(2, 2), activation_fn=relu),\n ConvPoolLayer(image_shape=(mini_batch_size, 40, 32, 256),\n filter_shape=(80, 40, 3, 3), stride=(1, 1),\n poolsize=(2, 2), activation_fn=relu),\n FullyConnectedLayer(n_in=80*16*128, n_out=100),\n SoftmaxLayer(n_in=100, n_out=2)],\n mini_batch_size, 50)\n \n net.SGD(train_data, epochs, mini_batch_size, 0.1,\n validation_data, test_data, lmbda=0.0)\n \n nets.append(net) # Add current network to list\n return nets\n\nconv_net = basic_conv(n=1, epochs=2)\n\n# Plot training curve for 1 network\nconv_net[0].plot_training_curve()\n\n# Plot validation/test accuracy curve for 1 network\nconv_net[0].plot_accuracy_curve()\n\n# Create a plot of the learned filters for first conv layer\nconv_net[0].layers[0].plot_filters(4, 5, \"Filters - Layer 1\") # 20 filters\n","repo_name":"sho-87/python-machine-learning","sub_path":"CNN/eeg.py","file_name":"eeg.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"23958096792","text":"\nimport tweepy\nimport wget\n\n\nconsumerKey = \"Your key here\"\nconsumerSecret = \"Your key here\"\naccessToken = \"Your key here\"\naccessTokenSecret = \"Your key here\"\n\nauth = tweepy.OAuthHandler(consumer_key = consumerKey, consumer_secret = consumerSecret)\nauth.set_access_token(accessToken , accessTokenSecret)\napi = tweepy.API(auth)\n\nsearchTerm = input(\"Enter the hashtag to search for \")\nn = int(input(\"Enter the number of tweets to search for \"))\n\ntweets = tweepy.Cursor(api.search, q = searchTerm, result_type = \"recent\").items(n)\n\nneg = 0\npos = 0\nneu = 0\npol = 0\n\nmedia_url = []\n\nfor tweet in tweets:\n\n media = tweet.entities.get('media',[])\n if(len(media)):\n media_url.append(media[0]['media_url'])\n\ni = 1\n\nfor media in media_url:\n wget.download(media, out = str(i) + \".jpg\")\n i+=1\n","repo_name":"Anmay5525/Image_extractor","sub_path":"img_extract.py","file_name":"img_extract.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"17562573375","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n list1_cur = list1\n list2_cur = list2\n answer = ListNode()\n answer_cur = answer\n \n while list1_cur and list2_cur:\n if list1_cur.val <= list2_cur.val: \n answer_cur.next = ListNode(list1_cur.val)\n list1_cur = list1_cur.next\n else: \n answer_cur.next = ListNode(list2_cur.val)\n list2_cur = list2_cur.next\n answer_cur = answer_cur.next\n \n if list1_cur: \n answer_cur.next = list1_cur\n elif list2_cur: \n answer_cur.next = list2_cur\n \n return answer.next","repo_name":"python-algorithm-study-and-learning/Sangjun","sub_path":"ch08/08-14.py","file_name":"08-14.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"23061546641","text":"import collections\nfrom typing import List\n\nfrom bst_node import BstNode\nfrom test_framework import generic_test\n\nInterval = collections.namedtuple('Interval', ('left', 'right'))\n\n\n# def range_lookup_in_bst(tree: BstNode, interval: Interval) -> List[int]:\n# def in_order(tree):\n# if not tree:\n# return\n# in_order(tree.left)\n# if interval.left <= tree.data <= interval.right:\n# result.append(tree.data)\n# in_order(tree.right)\n#\n# result = []\n# in_order(tree)\n# return result\n\ndef range_lookup_in_bst(tree: BstNode, interval: Interval) -> List[int]:\n\n def f(tree):\n if not tree:\n return\n if interval.left <= tree.data <= interval.right:\n f(tree.left)\n result.append(tree.data)\n f(tree.right)\n elif tree.data > interval.left:\n f(tree.left)\n else:\n f(tree.right)\n\n result = []\n f(tree)\n return result\n\ndef range_lookup_in_bst_wrapper(tree, i):\n return range_lookup_in_bst(tree, Interval(*i))\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main('range_lookup_in_bst.py',\n 'range_lookup_in_bst.tsv',\n range_lookup_in_bst_wrapper))\n","repo_name":"prrraveen/Elements_of_programming_interview","sub_path":"epi_judge_python/range_lookup_in_bst.py","file_name":"range_lookup_in_bst.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"7284263601","text":"\nimport numpy as np, cv2\ndef contain(p, shape): # 좌표(y,x)가 범위내 인지 검사\n return 0<= p[0] < shape[0] and 0<= p[1] < shape[1]\n\ndef bilinear_value(img, pt):\n x, y = np.int32(pt)\n if x >= img.shape[1]-1: x = x -1\n if y >= img.shape[0]-1: y = y - 1\n\n P1, P3, P2, P4 = np.float32(img[y:y+2,x:x+2].flatten())\n alpha, beta = pt[1] - y, pt[0] - x # 거리 비율\n M1 = P1 + alpha * (P3 - P1) # 1차 보간\n M2 = P2 + alpha * (P4 - P2)\n P = M1 + beta * (M2 - M1) # 2차 보간\n return np.clip(P, 0, 255) # 화소값 saturation후 반환\n\n\ndef calc_length(pts):\n d1 = np.subtract(pts[0], pts[1]).astype(float) # 두 좌표간 차분 계산\n length =(d1[0]**2+d1[1]**2)**0.5\n return (length)\n\ndef calc_gragient(pts):\n d1 = np.subtract(pts[0], pts[1]).astype(float) # 두 좌표간 차분 계산\n angle = cv2.fastAtan2(d1[0], d1[1]) # 차분으로 각도 계산\n return (angle)\n\n\ndef rotate(img, degree):\n dst = np.zeros(img.shape[:2], img.dtype) # 목적 영상 생성\n radian = (degree/180) * np.pi # 회전 각도 - 라디언\n sin, cos = np.sin(radian), np.cos(radian) # 사인, 코사인 값 미리 계산\n for i in range(img.shape[0]): # 목적 영상 순회 - 역방향 사상\n for j in range(img.shape[1]):\n y = -j * sin + i * cos\n x = j * cos + i * sin # 회선 변환 수식\n if contain((y, x), img.shape): # 입력 영상의 범위 확인\n dst[i, j] = bilinear_value(img, [x, y]) # 화소값 양선형 보간\n return dst\n\n\n\ndef draw_point(x, y):\n pts.append([x,y])\n print(\"좌표:\", len(pts), [x,y])\n cv2.circle(tmp, (x, y), 2, 255, 2) # 중심 좌표 표시\n\ndef onMouse(event, x, y, flags, param):\n global tmp, pts\n if (event == cv2.EVENT_LBUTTONDOWN and len(pts) == 0): \n draw_point(x, y)\n if (event == cv2.EVENT_LBUTTONUP and len(pts) == 1): \n draw_point(x, y)\n if len(pts) == 2:\n cv2.line(image, tuple(pts[0]), tuple(pts[1]), 255)\n legth = calc_length(pts) # 회전각 계산\n print(\"length : %3.2f\" % legth)\n angle = calc_gragient(pts) # 회전각 계산\n print(\"gradient : %3.2f\" % angle)\n dst = rotate(image, angle) # 사용자 정의 함수 회전 수행\n cv2.imshow(\"image\", dst) \n tmp = np.copy(image) # 임시 행렬 초기화\n pts = []\n\nimage = cv2.imread('images/rotate.jpg', cv2.IMREAD_GRAYSCALE)\nif image is None: raise Exception(\"영상 파일을 읽기 에러\")\ntmp = np.copy(image)\npts = []\n\ncv2.imshow(\"image\", image)\ncv2.setMouseCallback(\"image\", onMouse, 0)\ncv2.waitKey(0)","repo_name":"ksyeun/2022_1_SSU_AI_Computer-Vision","sub_path":"Exercise Problem/exercise815.py","file_name":"exercise815.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"31907280768","text":"from django.shortcuts import get_object_or_404, render\nfrom .models import Book\nfrom django.http import Http404\nfrom django.db.models import Avg\n# Create your views here.\n\n\ndef index(request):\n all_books = Book.objects.all().order_by(\"rating\") # you can put -\n nofbooks = all_books.count()\n avg_rating = all_books.aggregate(Avg(\"rating\")) # rating__avg, rating__min\n\n return render(request, \"book_outlet/index.html\", {\n \"books\": all_books,\n \"total_number_of_books\": nofbooks,\n \"average_rating\": avg_rating,\n })\n\n\ndef book_detail(request, slug):\n # try:\n # book = Book.objects.get(pk=id)\n # except:\n # raise Http404()\n book = get_object_or_404(Book, slug=slug)\n return render(request, \"book_outlet/book_detail.html\", {\n \"title\": book.title,\n \"author\": book.author,\n \"rating\": book.rating,\n \"is_bestselling\": book.is_bestselling\n })\n","repo_name":"tahaenesaslanturk/book_store","sub_path":"book_outlet/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"12912354233","text":"import sys\nfrom functools import wraps\nfrom typing import (\n Any,\n AsyncIterable,\n AsyncGenerator,\n AsyncContextManager,\n Iterator,\n Awaitable,\n Callable,\n Generic,\n Optional,\n TypeVar,\n cast,\n overload,\n)\nfrom ._impl import await_, with_portal_run_sync\n\nT = TypeVar(\"T\")\nF = TypeVar(\"F\", bound=Callable[..., Any])\nAF = TypeVar(\"AF\", bound=Callable[..., Awaitable[Any]])\n\n\ndef autoawait(fn: Callable[..., Awaitable[T]]) -> Callable[..., T]:\n \"\"\"Decorator for an async function which allows (and requires) it to be called\n from synchronous contexts without ``await``.\n\n For example, this can be used for magic methods, property setters, and so on.\n \"\"\"\n\n @wraps(fn)\n def wrapper(*args: Any, **kw: Any) -> T:\n return await_(fn(*args, **kw))\n\n return wrapper\n\n\n# For signature-preserving decorators we can declare the result as\n# signature-preserving too, and catch the case where the inner function isn't async\n@overload\ndef decorate_as_sync(decorator: Callable[[F], F]) -> Callable[[AF], AF]:\n ...\n\n\n# For non-signature-preserving, all we can do is say the inner function and\n# the decorated function are both async. (This could be improved using ParamSpec\n# for decorators that are args-preserving but not return-type-preserving.)\n@overload\ndef decorate_as_sync(\n decorator: Callable[..., Any]\n) -> Callable[[Callable[..., Awaitable[Any]]], Callable[..., Awaitable[Any]]]:\n ...\n\n\ndef decorate_as_sync(decorator: Any) -> Any:\n \"\"\"Wrap the synchronous function decorator *decorator* so that it can\n be used to decorate an async function.\n\n This can be used, for example, to apply an async-naive decorator such as\n `@functools.lru_cache() ` to an async function::\n\n @greenback.decorate_as_sync(functools.lru_cache(maxsize=128))\n async def some_fn(...): ...\n\n Without the wrapping in :func:`decorate_as_sync`, the LRU cache\n would treat the inner function as a synchronous function, and\n would therefore unhelpfully cache the coroutine object that is\n returned when an async function is called without ``await``.\n\n Internally, the \"inner\" async function is wrapped in a synchronous\n function that invokes that async function using\n :func:`greenback.await_`. This synchronous function is then\n decorated with the *decorator*. :func:`decorate_as_sync` returns\n an \"outer\" async function which invokes the internal decorated\n synchronous function using :func:`greenback.with_portal_run_sync`.\n\n In other words, the following two calls behave identically::\n\n result = await greenback.decorate_as_sync(decorator)(async_fn)(*args, **kwds)\n result = await greenback.with_portal_run_sync(\n decorator(greenback.autoawait(async_fn)), *args, **kwds,\n )\n\n \"\"\"\n\n def decorate(async_fn: Any) -> Any:\n @decorator # type: ignore # \"Untyped decorator makes 'inner' untyped\"\n @wraps(async_fn)\n def inner(*args: Any, **kwds: Any) -> Any:\n return await_(async_fn(*args, **kwds))\n\n @wraps(inner)\n async def outer(*args: Any, **kwds: Any) -> Any:\n return await with_portal_run_sync(inner, *args, **kwds)\n\n return outer\n\n return decorate\n\n\nclass async_context(Generic[T]):\n \"\"\"Wraps an async context manager so it is usable in a synchronous\n ``with`` statement.\"\"\"\n\n __slots__ = (\"_cm\", \"_aexit\")\n\n def __init__(self, cm: AsyncContextManager[T]):\n self._cm = cm\n\n if sys.version_info >= (3, 11):\n\n def __enter__(self) -> T:\n try:\n aenter = type(self._cm).__aenter__\n except AttributeError:\n raise TypeError(\n f\"{type(self._cm).__name__!r} object does not support the \"\n \"asynchronous context manager protocol\"\n ) from None\n try:\n self._aexit = type(self._cm).__aexit__\n except AttributeError:\n raise TypeError(\n f\"{type(self._cm).__name__!r} object does not support the \"\n \"asynchronous context manager protocol (missed __aexit__ method)\"\n ) from None\n return await_(aenter(self._cm))\n\n else:\n\n def __enter__(self) -> T:\n try:\n self._aexit = type(self._cm).__aexit__\n except AttributeError:\n raise AttributeError(\n f\"type object {type(self._cm).__name__!r} has no attribute '__aexit__'\"\n ) from None\n aenter = type(self._cm).__aenter__\n return await_(aenter(self._cm)) # type: ignore\n\n def __exit__(self, *exc: Any) -> Optional[bool]:\n return await_(self._aexit(self._cm, *exc)) # type: ignore\n\n\nclass async_iter(Generic[T]):\n \"\"\"Wraps an async iterator so it is usable in a synchronous\n ``for`` loop, ``yield from`` statement, or other context that expects\n a synchronous iterator.\"\"\"\n\n __slots__ = (\"_it\",)\n\n def __init__(self, iterable: AsyncIterable[T]):\n try:\n aiter = type(iterable).__aiter__\n except AttributeError:\n raise TypeError(\n \"'async_iter' requires an object with __aiter__ method, got \"\n + type(iterable).__name__\n ) from None\n self._it = aiter(iterable) # type: ignore\n try:\n type(self._it).__anext__\n except AttributeError:\n raise TypeError(\n \"'async_iter' received an object from __aiter__ that does not \"\n \"implement __anext__: \" + type(self._it).__name__\n ) from None\n if all(hasattr(self._it, meth) for meth in (\"asend\", \"athrow\", \"aclose\")):\n self.__class__ = async_generator\n\n def __iter__(self) -> Iterator[T]:\n return self\n\n def __next__(self) -> T:\n try:\n return await_(type(self._it).__anext__(self._it)) # type: ignore\n except StopAsyncIteration as ex:\n raise StopIteration(*ex.args)\n\n\nclass async_generator(async_iter[T]):\n __slots__ = ()\n\n def send(self, val: Any) -> T:\n try:\n return await_(cast(AsyncGenerator[T, Any], self._it).asend(val))\n except StopAsyncIteration as ex:\n raise StopIteration(*ex.args)\n\n def throw(self, *exc: Any) -> T:\n try:\n return await_(cast(AsyncGenerator[T, Any], self._it).athrow(*exc))\n except StopAsyncIteration as ex:\n raise StopIteration(*ex.args)\n\n def close(self) -> None:\n return await_(cast(AsyncGenerator[T, Any], self._it).aclose())\n","repo_name":"oremanj/greenback","sub_path":"greenback/_util.py","file_name":"_util.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","stars":58,"dataset":"github-code","pt":"69"} +{"seq_id":"73130414620","text":"import json\nimport click\nfrom vl_bench.utils import process_path\n\n@click.command()\n@click.argument('inputs', nargs=-1)\n@click.option('--output', required=True)\ndef main(inputs, output):\n merged = {}\n for input_file in inputs:\n filepath = process_path(input_file)\n with open(filepath, 'r') as f:\n merged.update(json.load(f))\n \n with open(process_path(output), 'w') as f:\n json.dump(merged, f, indent=4)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"ilkerkesen/ViLMA","sub_path":"tasks/counting/merge_result_files.py","file_name":"merge_result_files.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"2951722648","text":"def linkedListPalindrome(head):\n\tslowNode = fastNode = head\n\twhile fastNode is not None and fastNode.next is not None:\n\t\tslowNode = slowNode.next\n\t\tfastNode = fastNode.next.next\n\t\n\treverseList = reverseLinkedList(slowNode)\n\tnode = head\n\twhile reverseList is not None:\n\t\tif node.value != reverseList.value:\n\t\t\treturn False\n\t\tnode = node.next\n\t\treverseList = reverseList.next\n\treturn True\n\t\ndef reverseLinkedList(node):\n\tprevNode = None\n\tcurNode = node\n\twhile curNode is not None:\n\t\tnextNode = curNode.next\n\t\tcurNode.next = prevNode\n\t\tprevNode = curNode\n\t\tcurNode = nextNode\n\treturn prevNode\n\nhead = None\nresults = linkedListPalindrome(head)\nprint(results)","repo_name":"shengng325/LeetCode.py","sub_path":"linkedListPalindrome.py","file_name":"linkedListPalindrome.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"37441237468","text":"import socket\r\nimport time\r\n#import cv2, os\r\n\r\nconn = socket.socket() \r\nhost = '169.254.16.35'\r\nport = 8080\r\nconn.connect((host,port)) #Establishes server-client connection\r\nprint(\"Connected to\", host,\" on port\", port) \r\n\r\ndef receive_image():\r\n size = eval(conn.recv(2048).decode()) #returns image size in bytes\r\n conn.send(b'1') \r\n #print(size)\r\n \r\n pack_size = 2048 #splitting image receival pack size into smaller packs, allowing complete capture of image\r\n img_data = b''\r\n while len(img_data) None:\n super().__init__(master)\n\n self.file_menu = tk.Menu(self, tearoff=0)\n self.add_cascade(label=\"File\", menu=self.file_menu)\n self.file_menu.add_command(label=\"Restart (R)\")\n self.file_menu.add_separator()\n self.file_menu.add_command(label=\"Quit (Q)\", command=self.exit_command)\n\n self.settings_menu = tk.Menu(self, tearoff=0)\n self.add_cascade(label=\"Edit\", menu=self.settings_menu)\n self.settings_menu.add_command(label=\"Preferences (S)\", command=self.settings_command)\n\n self.help_menu = tk.Menu(self, tearoff=0)\n self.add_cascade(label=\"Help\", menu=self.help_menu)\n self.help_menu.add_command(label=\"Homepage\", command=self.help_command)\n self.help_menu.add_command(label=\"About\", command=self.about_command)\n\n master.config(menu=self)\n\n def exit_command(self) -> None:\n \"\"\"Closes the game.\"\"\"\n self.master.destroy()\n logger.debug(\"\")\n\n def settings_command(self) -> None:\n settings = SettingsWindow(self.master)\n settings.grab_set()\n logger.debug(\"\")\n\n def help_command(self) -> None:\n url = default_config[\"INFO\"][\"REPOSITORY\"]\n webbrowser.open(url)\n logger.debug(\"\")\n\n def about_command(self) -> None:\n settings = AboutWindow(self.master)\n settings.grab_set()\n logger.debug(\"\")\n\n\nclass SettingsWindow(tk.Toplevel):\n \"\"\"Settings window.\"\"\"\n def __init__(self, master: tk.Misc):\n super().__init__(master)\n self.title(\"Settings\")\n self.resizable(False, False)\n\n # settings\n self.settings = self._init_settings()\n\n # reset button\n self.reset_button = tk.Button(self, text=\"Reset\", padx=20, command=self.reset_command)\n self.reset_button.grid(row=2, column=0, sticky=tk.W, padx=20, pady=10)\n \n # OK button\n self.ok_button = tk.Button(self, text=\"OK\", padx=20, command=self.ok_command)\n self.ok_button.grid(row=2, column=1, sticky=tk.E, padx=10, pady=10)\n\n # Cancel button\n self.cancel_button = tk.Button(self, text=\"cancel\", padx=10, command=self.cancel_command)\n self.cancel_button.grid(row=2, column=2, sticky=tk.E, padx=10, pady=10)\n\n def _init_settings(self) -> dict:\n settings = dict()\n\n # Game settings\n game_settings = ttk.LabelFrame(self, text=\"Game\")\n game_settings.grid(column=0, row=0, padx=20, pady=5, sticky=tk.W)\n # Dimensions of the window \n settings[\"dim\"] = Option(game_settings, config_item=(\"GRID\", \"SIZE\"), label=\"Dimensions of the window\", validation_fn=int)\n settings[\"dim\"].grid(row=0, column=0, sticky=tk.EW)\n # Number of units \n settings[\"units\"] = Option(game_settings, config_item=(\"GRID\", \"UNITS\"), label=\"Number of units\", validation_fn=int)\n settings[\"units\"].grid(row=1, column=0, sticky=tk.EW)\n # FPS\n settings[\"fps\"] = Option(game_settings, config_item=(\"APP\", \"MAX_FPS\"), label=\"Maximum FPS\", validation_fn=int)\n settings[\"fps\"].grid(row=2, column=0, sticky=tk.EW)\n\n # Graphics settings\n graphics = ttk.LabelFrame(self, text=\"Graphics\")\n graphics.grid(column=0, row=1, padx=20, pady=5, sticky=tk.W)\n # Alive cell color \n settings[\"alive_cell_color\"] = Option(graphics, config_item=(\"GRID\", \"FOREGROUND\"), label=\"Alive cell color\", validation_fn=ImageColor.getrgb)\n settings[\"alive_cell_color\"].grid(row=0, column=0, sticky=tk.EW) \n # Dead cell color\n settings[\"dead_cell_color\"] = Option(graphics, config_item=(\"GRID\", \"BACKGROUND\"), label=\"Dead cell color\", validation_fn=ImageColor.getrgb)\n settings[\"dead_cell_color\"].grid(row=1, column=0, sticky=tk.EW) \n # Grid color\n settings[\"grid_color\"] = Option(graphics, config_item=(\"GRID\", \"EDGE_COLOR\"), label=\"Grid color\", validation_fn=ImageColor.getrgb)\n settings[\"grid_color\"].grid(row=2, column=0, sticky=tk.EW) \n\n return settings\n\n def ok_command(self) -> None:\n \"\"\"Saves the current configuration to config file and closed the settings window.\"\"\"\n for _, setting in self.settings.items():\n try:\n setting.save_to_config()\n except ValueError as e:\n MessageWindow(self.master, msg_type=\"Error\", msg=str(e))\n return\n\n msg = \"New settings will take effect next time you open the application.\"\n MessageWindow(self.master, msg_type=\"Info\", msg=str(msg))\n self.destroy()\n\n def cancel_command(self) -> None:\n \"\"\"Discards changes and closes the settings window.\"\"\"\n self.destroy()\n\n def reset_command(self) -> None:\n \"\"\"Resets the settings to default and closes the settings window.\"\"\"\n for _, setting in self.settings.items():\n setting.reset_default()\n logger.info(\"Config reset to default.\")\n\n\nclass MessageWindow(tk.Toplevel):\n \"\"\"Generic top-level window displaying a message.\"\"\"\n def __init__(self, master: tk.Misc, msg_type: str, msg: str):\n super().__init__(master)\n self.title(msg_type)\n self.resizable(False, False)\n self.grab_set()\n\n self.label = Label(self, text=msg)\n self.label.grid(row=0, column=0, padx=20, pady=10)\n\n self.ok_button = tk.Button(self, text=\"OK\", padx=20, command=self.destroy)\n self.ok_button.grid(row=1, column=0, padx=10, pady=10)\n\n\nclass AboutWindow(tk.Toplevel):\n \"\"\"Window displaying basic info about the app.\"\"\"\n def __init__(self, master: tk.Misc):\n super().__init__(master)\n self.title(\"About\")\n self.geometry(\"250x200\")\n self.resizable(False, False)\n self.grab_set()\n\n title_text = \"Game of Life\"\n title = ttk.Label(self, text=title_text, font=(\"Arial\",16))\n title.grid(row = 0, column = 0, padx=30, pady=10, sticky=tk.W)\n\n info_text = self._generate_info()\n info = ttk.Label(self, text=info_text)\n info.grid(row = 2, column = 0, sticky=tk.W)\n\n def _generate_info(self) -> str:\n \"\"\"Generates info string.\"\"\" \n # get author and email\n author = default_config[\"INFO\"][\"AUTHOR\"]\n email = default_config[\"INFO\"][\"EMAIL\"]\n version = default_config[\"INFO\"][\"VERSION\"]\n # author_info = project[\"tool.poetry\"][\"authors\"].strip('[\"\"]')\n # author_info = author_info.replace(\">\", \"\")\n # author, email = author_info.split(\" <\")\n\n # get version\n # version = project[\"tool.poetry\"][\"version\"].strip('\"')\n\n info_text = f\"\"\"\n Author: {author} \\n\n Email: {email} \\n\n Version: {version}\n \"\"\"\n\n return info_text\n\n\nclass Option(tk.Frame):\n \"\"\"Option as a pair composed of a label and an entry.\"\"\"\n def __init__(self, master: tk.Misc, config_item: tuple, label: str, validation_fn: Optional[Callable] = None) -> None:\n super().__init__(master)\n self.master = master\n # pairing with an item in the config file\n self.conf_item = config_item\n self.validation_fn = validation_fn\n # label\n self.label = ttk.Label(self, text=label)\n self.label.grid(row=0, column=0, sticky=tk.W, padx=20, pady=10)\n # entry\n self.entry = ttk.Entry(self, width=8)\n self.entry.insert('0', config.get(*config_item))\n self.entry.grid(row=0, column=1, sticky=tk.E, padx=10)\n self.columnconfigure(1, weight=1)\n\n def get_value(self) -> Any:\n return self.entry.get()\n\n def save_to_config(self) -> None:\n if self.validation_fn is not None:\n try:\n value = self.entry.get()\n self.validation_fn(value)\n except ValueError:\n error_msg = \"Value not supported.\"\n logger.error(error_msg)\n raise ValueError(error_msg)\n\n section, option = self.conf_item\n config.set(section, option, value=str(value))\n with open(config_path, 'w') as configfile:\n config.write(configfile)\n\n logger.debug(f\"Saved {value} to {self.conf_item} option.\")\n\n def reset_default(self) -> None:\n self.entry.delete(0, 'end')\n self.entry.insert('0', default_config.get(*self.conf_item))\n\n\n\nclass Grid(tk.Canvas):\n \"\"\"Canvas displaying grid and alive/dead cells.\"\"\"\n def __init__(\n self, \n master: tk.Misc, \n dim: int, \n num_units: int, \n background_color: tuple, \n foreground_color: tuple, \n edge_color: str, \n *args: Any, **kwargs: Any\n ) -> None:\n super(Grid, self).__init__(master, width=dim + 1, height=dim + 1, *args, **kwargs)\n self.edge_color = edge_color\n self.num_units = num_units\n self.unit_size = dim / num_units\n self.dim = dim\n self.background = np.dstack([\n np.ones((self.num_units, self.num_units)),\n np.ones((self.num_units, self.num_units)),\n np.ones((self.num_units, self.num_units))\n ]) * background_color\n self.foreground = np.dstack([\n np.ones((self.num_units, self.num_units)),\n np.ones((self.num_units, self.num_units)),\n np.ones((self.num_units, self.num_units))\n ]) * foreground_color\n self.cells = self.create_image(0, 0, anchor=tk.NW, image=None, tag=\"cells\")\n self.cell_img = None\n\n def draw_grid(self) -> None:\n self.delete('grid')\n for unit in range(self.num_units):\n pos = unit * self.unit_size\n self.create_line(0, pos, self.dim, pos, fill=self.edge_color, tag=\"grid\")\n self.create_line(pos, 0, pos, self.dim, fill=self.edge_color, tag=\"grid\")\n\n def draw_array(self, cell_array: np.ndarray) -> None:\n image = Image.fromarray(255 * (1 - cell_array.astype(np.uint8)))\n image = image.resize(size=(self.size, self.size), resample=Image.NEAREST)\n self.cell_img = ImageTk.PhotoImage(image)\n self.itemconfig(\"cells\", image=self.cell_img)\n self.tag_lower(\"cells\")\n\n def draw_img(self, cell_img: ImageTk.PhotoImage) -> None:\n self.itemconfig(\"cells\", image=cell_img)\n self.tag_lower(\"cells\")\n\n def coords_to_grid_position(self, x: int, y: int) -> Tuple[int, int]:\n i = int(y // self.unit_size)\n j = int(x // self.unit_size)\n return i, j\n\n\n\nclass GameOfLifeGUI:\n \"\"\"GUI for the Game of Life.\"\"\"\n def __init__(self, master: tk.Tk):\n self.master = master\n self.master.title(f\"Game of Life\")\n icon_path = os.path.join(package_dir, \"resources/images/icon.png\")\n self.master.iconphoto(True, tk.PhotoImage(file=icon_path))\n self.master.resizable(False, False)\n\n # initialize widgets\n self.widgets = self._init_widgets()\n\n # canvas update related params\n self.cells: tk.PhotoImage\n self.last_time: float\n self.current_time = time.perf_counter()\n\n logger.info(\"GUI initialized ...\")\n\n def _init_widgets(self) -> dict:\n widgets = dict()\n\n # menu bar\n widgets[\"menubar\"] = MenuBar(self.master)\n\n # grid canvas\n widgets[\"grid\"] = Grid(\n master=self.master,\n dim=config.getint(\"GRID\", \"SIZE\"),\n num_units=config.getint(\"GRID\", \"UNITS\"),\n background_color=ImageColor.getrgb(config[\"GRID\"][\"BACKGROUND\"]),\n foreground_color=ImageColor.getrgb(config[\"GRID\"][\"FOREGROUND\"]),\n edge_color=config.get(\"GRID\", \"EDGE_COLOR\"),\n highlightthickness=0\n )\n widgets[\"grid\"].draw_grid()\n widgets[\"grid\"].pack()\n\n return widgets\n\n def _show_fps(self) -> None:\n self.last_time = self.current_time\n self.current_time = time.perf_counter()\n elapsed = self.current_time - self.last_time\n self.master.title(f\"Game of Life ({int(1 / elapsed)} FPS)\")\n\n def show_cells(self, cells: tk.PhotoImage) -> None:\n \"\"\"Handle all cell images currently in the queue, if any.\"\"\"\n self._show_fps()\n self.cells = cells\n self.widgets[\"grid\"].draw_img(self.cells)\n","repo_name":"kuchynkm/game_of_life","sub_path":"game_of_life/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":12711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"6436063561","text":"# \"\"\"\n# 题目:\n# Implement strStr().\n#\n# Return the index of the first occurrence of needle in haystack, or -1 if needle is not part of haystack.\n#\n# Example 1:\n#\n# Input: haystack = \"hello\", needle = \"ll\"\n# Output: 2\n# Example 2:\n#\n# Input: haystack = \"aaaaa\", needle = \"bba\"\n# Output: -1\n# Clarification:\n#\n# What should we return when needle is an empty string? This is a great question to ask during an interview.\n#\n# For the purpose of this problem, we will return 0 when needle is an empty string.\n# This is consistent to C's strstr() and Java's indexOf().\n#\n# \"\"\"\n\n\nclass Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n \"\"\"\n 自己实现的找子串的函数, 使用字符串切片还挺快的,86%, 而且原来leetcode的运算时间是真的有波动的。\n :param haystack:\n :param needle:\n :return:\n \"\"\"\n if needle == \"\":\n return 0\n\n for i in range(len(haystack)-len(needle) + 1):\n if haystack[i:i+len(needle)] == needle:\n return i\n\n return -1\n\n\n\nif __name__ == '__main__':\n haystack = \"a\"\n needle = \"a\"\n\n solution = Solution()\n result = solution.strStr(haystack, needle)\n print(result)\n\n# \"\"\"\n# 分析:\n#\n# \"\"\"\n","repo_name":"niracler/python-exercise","sub_path":"leetcode/strings/my_strstr.py","file_name":"my_strstr.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"} +{"seq_id":"7816657517","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport networkx as nx\n\ndef metcalfe_dfs_tree(G, source, depth_limit=None):\n \"\"\"Returns a dict containing each vertex and its branch and depth.\n Parameters\n ----------\n G : NetworkX graph\n source : node, required\n Specify starting node for depth-first search (the node whose Shapley value\n is being enquired)\n depth_limit : int, optional (default=len(G))\n Specify the maximum search depth\n Returns\n -------\n D : Dictionary\n dict containing each vertex and their branch and depth\n \"\"\"\n \n visited = set()\n if depth_limit is None:\n depth_limit = len(G)\n \n visited.add(source)\n metcalfe_info = {node:(None, None) for node in G.nodes}\n metcalfe_info[source] = (0, 0)\n stack = [(source, 0, iter(G[source]))]\n branch_now = 1\n while stack:\n parent, depth_now, children = stack[-1]\n try:\n child = next(children)\n if child not in visited:\n metcalfe_info[child] = (depth_now + 1, branch_now)\n visited.add(child)\n if depth_now < depth_limit-1:\n stack.append((child, depth_now + 1, iter(G[child])))\n except StopIteration:\n if depth_now <= 1:\n branch_now = branch_now + 1\n stack.pop()\n return metcalfe_info\n\ndef value(G, S, f=None):\n \"\"\"Returns the Metcalfe value of a coalition in a graph.\n Parameters\n ----------\n G : NetworkX graph\n S : subset of nodes of G\n f : list of int containing vertex weights, optional (default=[1 for i in range(len(G.nodes))])\n\n Returns\n -------\n value : Metcalfe value of the subgraph of G induced by S\n \"\"\"\n\n if f == None:\n f = [1 for i in range(len(G.nodes))] \n G_S = nx.induced_subgraph(G, S)\n #nx.draw(G_S, with_labels=True, font_weight='bold')\n conn_comp_S = nx.connected_components(G_S)\n value = 0\n for comp in conn_comp_S:\n tmp = 0\n for node in comp:\n #print(node)\n tmp += f[node] \n value += tmp*tmp\n return value\n\ndef shapley_sub_count(d_ia, d_ib, v):\n \"\"\"Returns an intermediate sum in the Shapley value computation.\n Parameters\n ----------\n d_ia : distance between i and a\n d_ib : distance between i and b\n v : number of vertices\n\n Returns\n -------\n sum : the intermediate sum in shapley computation\n \"\"\"\n k = d_ia+d_ib\n if k == 0:\n return v\n sub_count = 0.0\n for s in range(k, v):\n prod = 1\n for p in range(1, k+1):\n prod *= (s+1-p)/(v-p)\n sub_count += prod\n return sub_count\n \ndef shapley(G, i, f=None):\n \"\"\"Returns the Shapley value of a node in a graph.\n Parameters\n ----------\n G : NetworkX graph\n i : node of G\n f : list of int containing vertex weights, optional (default=[1 for i in range(len(G.nodes))])\n\n Returns\n -------\n shapley : Shapley value of node i\n \"\"\"\n \n if f == None:\n f = [1 for i in range(len(G.nodes))]\n shapley = 0.0\n branch_dist = metcalfe_dfs_tree(G, i)\n for a in G.nodes:\n if branch_dist[a][1] == None:\n continue\n for b in G.nodes:\n if branch_dist[b][1] == None:\n continue\n if branch_dist[a][1] != branch_dist[b][1] or (a == b and a == i):\n shapley += f[a]*f[b]*shapley_sub_count(branch_dist[a][0], branch_dist[b][0], len(G.nodes))\n return shapley/len(G.nodes)\n","repo_name":"Mishalassif/network-shapley","sub_path":"metcalfe_shapley_tree.py","file_name":"metcalfe_shapley_tree.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"29162267579","text":"# -- coding:utf-8 --\nimport math\nimport os\nimport numpy as np\nimport pandas as pd\n\nfrom pyhdf.SD import SD, SDC\nfrom pyhdf.error import HDF4Error\nfrom sklearn.neighbors import KDTree\nimport util\n\nDEFAULT_VALUE = -999999.0\nTIME_THRESHOLD = 1800 #时间误差上限30分钟\nDIST_THRESHOLD = 10000 #距离误差上限为10km\n\nclass Modis(object):\n def __init__(self, file_path):\n self.file_path = file_path\n self.time_arr = None\n self.time2dataDF = dict()\n self.time2data_tree = dict()\n self.time2modis_type = dict()\n self.col_num = 0\n\n def load_data(self):\n if not os.path.isdir(self.file_path):\n print('this is not a directory: ' + self.file_path)\n return -1\n\n files = os.listdir(self.file_path)\n valid_files = [file for file in files if file.endswith('.hdf')]\n\n for valid_file in valid_files:\n result = dict()\n file_name = valid_file\n #debug code\n #if '2019152.0610' not in file_name:\n # continue\n #debug end\n mod_type_fields = file_name.strip().split('_')\n mod_type = mod_type_fields[0][:3]\n\n date_time_fields = file_name.strip().split('.')\n date_time_str = date_time_fields[1][1:] + '.' + date_time_fields[2]\n timestamp = util.get_timestamp_modis(date_time_str)\n\n total_file_name = os.path.join(self.file_path, valid_file)\n file = None\n try:\n file = SD(total_file_name)\n except HDF4Error as e:\n print('open file error.', total_file_name)\n continue\n\n sds_obj1 = file.select('Water_Vapor_Infrared')\n pwv_ir = sds_obj1.get()\n _, n_cols = np.shape(pwv_ir)\n self.col_num = n_cols\n pwv_ir = pwv_ir.flatten()\n sds_obj2 = file.select('Longitude')\n lng = sds_obj2.get().flatten()\n sds_obj3 = file.select('Latitude')\n lat = sds_obj3.get().flatten()\n\n result['modis_pwv'] = pwv_ir\n result['lng'] = lng\n result['lat'] = lat\n\n resultDF = pd.DataFrame(result)\n self.time2dataDF[timestamp] = resultDF\n self.time2modis_type[timestamp] = mod_type\n\n self.time_arr = sorted(self.time2dataDF.keys())\n print(\"all modis data loaded.\")\n\n return 0\n\n def build_kdtree(self):\n for timestamp in self.time2dataDF:\n dataDF = self.time2dataDF[timestamp]\n lnglat_arr = np.array([point for point in zip(dataDF['lng'].values, dataDF['lat'].values)])\n lnglat_tree = KDTree(lnglat_arr)\n self.time2data_tree[timestamp] = lnglat_tree\n\n print('all kdtrees builded.')\n return 0\n\n def _buid_all(self):\n res = self.load_data()\n if res != 0:\n print(\"load modis data fail.\")\n return -1\n res = self.build_kdtree()\n if res != 0:\n print(\"build kdtree fail.\")\n return -1\n\n return 0\n\n def _get_nearest_data(self, lng, lat, timestamp):\n index = np.searchsorted(self.time_arr, timestamp)\n if index >= len(self.time_arr):\n index = len(self.time_arr) - 1\n elif index > 0 and math.fabs(self.time_arr[index-1]-timestamp) < math.fabs(self.time_arr[index]-timestamp):\n index -= 1\n nearest_timestamp = self.time_arr[index]\n\n if math.fabs(nearest_timestamp - timestamp) > TIME_THRESHOLD:\n return DEFAULT_VALUE, DEFAULT_VALUE, ''\n\n if not self.time2data_tree.has_key(nearest_timestamp):\n print('can not find any timestamp in time2data_tree', nearest_timestamp)\n return DEFAULT_VALUE, DEFAULT_VALUE, ''\n\n if not self.time2modis_type.has_key(nearest_timestamp):\n print('can not find any timestamp in time2modis_type', nearest_timestamp)\n return DEFAULT_VALUE, DEFAULT_VALUE, ''\n\n lnglat_tree = self.time2data_tree[nearest_timestamp]\n inds = lnglat_tree.query(np.array([lng, lat]).reshape(1,-1), k=1, return_distance=False)\n\n modis_type = self.time2modis_type[nearest_timestamp]\n\n if not self.time2dataDF.has_key(nearest_timestamp):\n print('can not find any timestamp in time2dataDF.', nearest_timestamp)\n return DEFAULT_VALUE, DEFAULT_VALUE, ''\n\n resultDF = self.time2dataDF[nearest_timestamp]\n resultLine = resultDF.ix[inds[0]]\n nearest_lng = resultLine['lng']\n nearest_lat = resultLine['lat']\n\n if util.calc_dist(lng, lat, nearest_lng, nearest_lat) > DIST_THRESHOLD:\n return DEFAULT_VALUE, DEFAULT_VALUE, ''\n\n return resultLine['modis_pwv'].values[0], nearest_timestamp, modis_type\n\n def _get_col_num(self):\n return self.col_num\n\nif __name__ == '__main__':\n modis = Modis(file_path='/Users/didi/Documents/hjy/MODIS-TPW/')\n res = modis._buid_all()\n if res != 0:\n print(\"build all fail.\")\n exit(-1)","repo_name":"amanilr/modis_ground","sub_path":"modis.py","file_name":"modis.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"8409286042","text":"import json\nimport math\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\n\ndef load_json_data(file_path):\n \"\"\"\n Loads JSON data from a file.\n\n Parameters:\n - file_path (str): The path to the JSON file.\n\n Returns:\n - dict: The loaded JSON data.\n \"\"\"\n with open(file_path, 'r') as file:\n json_data = json.load(file)\n return json_data\n\ndef save_json_data(data, file_path):\n # Save the dictionary to a file\n with open(f\"{file_path}\", \"w\") as f:\n json.dump(data, f)\n \n#returns a touple of (lat, lon)\ndef get_coords(id, nodes):\n \n #find the node with the 'own_id'\n row = next((node for node in nodes if node['id'] == id), None)\n\n #returns the lat and long\n return(row['lat'], row['lon'])\n\ndef get_line(id, nodes):\n row = next((node for node in nodes if node['id'] == id), None)\n\n return row\n\n#return 1 if true, 0 if false\ndef check_if_node_is_junction(id, nodes):\n row = next((node for node in nodes if node['id'] == id), None)\n\n if 'tags' in row:\n tags = row['tags']\n if 'highway' in tags and tags['highway'] == 'motorway_junction': \n return 1\n return 0\n\n#calculates the distance between two points and returns the distance in km\ndef get_distance(lat1, lon1, lat2, lon2):\n\n R = 6371.0\n\n # Convert latitude and longitude from degrees to radians\n lat1_rad = math.radians(lat1)\n lon1_rad = math.radians(lon1)\n lat2_rad = math.radians(lat2)\n lon2_rad = math.radians(lon2)\n\n # Haversine formula\n dlon = lon2_rad - lon1_rad\n dlat = lat2_rad - lat1_rad\n a = math.sin(dlat / 2)**2 + math.cos(lat1_rad) * math.cos(lat2_rad) * math.sin(dlon / 2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n distance = R * c\n \n #return distance in km\n return distance\n\n#splits the json_data and returns a touple (nodes, ways)\ndef split_array_service_stations(json_data):\n \n ways = []\n nodes = []\n\n for el in json_data[\"elements\"]:\n if el[\"type\"] == \"node\":\n nodes.append(el)\n\n\n elif el[\"type\"] == \"way\":\n ways.append(el)\n\n else:\n print(\"ERROR: There shouldn't be another type (exept node and way)\")\n return (nodes, ways) \n \n#splits the json_data and returns a touple (nodes, highway)\ndef split_array_highway(json_data):\n \n nodes = []\n highway = []\n\n for el in json_data[\"elements\"]:\n #is a node\n if el[\"type\"] == \"node\":\n nodes.append(el)\n\n elif el[\"type\"] == \"way\":\n highway.append(el)\n else:\n print(\"ERROR: There shouldn't be another type (exept node and way)\")\n return (nodes, highway) \n\n#returns the (lat, lon) of the centrois of every sercice station way\ndef merge_area_to_point(way_service, nodes_service): # service node\n\n #calcualte for every area the centroid\n centroids = []\n for el in way_service:\n lats = []\n lons = []\n #get the coordinates for every node and add to list\n for ids in el['nodes']:\n lat, lon = get_coords(ids, nodes_service)\n lats.append(lat)\n lons.append(lon)\n \n #calculate centroid\n centroid_lat = sum(lats) / len(lats)\n centroid_lon = sum(lons) / len(lons)\n centroids.append((centroid_lat, centroid_lon))\n\n #deleate centroids, which are nearer than 0,2m\n sorted_centroids = []\n for lat1, lon1 in centroids:\n for lat2, lon2 in centroids:\n if(get_distance(lat1, lon1, lat2, lon2) < 0,1):\n #don't inclde point\n break\n sorted_centroids.append((lat1, lon1))\n\n return sorted_centroids\n\n#returns a list of id. All the nodes in this list repesent a rest area\ndef add_service_to_highway(nodes_highway, service):\n marked_street_nodes = []\n\n for lat, lon in service:\n # Find nearest point in nodes\n nearest_id = \"\" \n nearest_distance = float('inf') # Initialize with positive infinity\n\n for el in nodes_highway:\n distance = get_distance(lat, lon, el['lat'], el['lon'])\n \n # Check if the ID is not in marked_street_nodes before updating\n if el['id'] not in marked_street_nodes and distance < nearest_distance:\n nearest_id = el['id']\n nearest_distance = distance\n\n # Add nearest point to the marked list\n if nearest_id not in marked_street_nodes:\n marked_street_nodes.append(nearest_id)\n\n return marked_street_nodes\n\n\n#deletes every highway node, which is not marked(which doesn't represent a rest area)\ndef delete_usless_highway_nodes(way_highway, marked_nodes):\n #print(marked_nodes)\n #for every street\n for el in way_highway:\n #chech each point, if its a marked one, if not delete\n marked_ids = []\n for node_id in el['nodes']:\n #check if nodes are marked, if yes add to list\n if node_id in marked_nodes:\n \n marked_ids.append(node_id)\n print(\"appended\")\n #reset list of nodes on the highway to only the marked ones\n el['nodes'] = marked_ids \n\n return way_highway\n \n#adds a property to each node with name 'own_id' and a index starting with 0\ndef add_own_id(nodes):\n i = 0\n for el in nodes:\n el['own_id'] = i\n i += 1\n return nodes\n \n#gets the lat and long of the node with own_id == id\ndef get_position_id(id, nodes):\n \n #find the node with the 'own_id'\n row = next((node for node in nodes if node['id'] == id), None)\n\n #returns the lat and long\n return(row['lat'], row['lon'])\n \n#gets the lat and long of the node with own_id == id\ndef get_position_own_id(id, nodes):\n \n #find the node with the 'own_id'\n row = next((node for node in nodes if node['own_id'] == id), None)\n\n #returns the lat and long\n return(row['lat'], row['lon'])\n\ndef create_edges_array(nodes, ways):\n \n\n #get all edges with own_id (not the overpass_id) \n edges = []\n for el in ways:\n way_nodes = el['nodes']\n num = len(way_nodes)\n for i in range (0, num-1):\n #get overpass id\n overpass_id_a = int(way_nodes[i])\n overpass_id_b = int(way_nodes[i+1])\n\n #get own_id\n anode = next((node for node in nodes if node['id'] == overpass_id_a), None)\n bnode = next((node for node in nodes if node['id'] == overpass_id_b), None)\n \n aid = anode['own_id']\n bid = bnode['own_id']\n\n #add edge with own_ids\n edges.append((aid, bid))\n print(edges)\n \n \n #deleate doube edges\n unique_edges = []\n for edge in edges:\n if edge not in unique_edges and (edge[1], edge[0]) not in unique_edges:\n unique_edges.append(edge)\n\n #get lenght of edges\n unique_distance_edges = []\n for a, b in unique_edges:\n lata, lona = get_position_own_id(a, nodes)\n latb, lonb = get_position_own_id(b, nodes)\n distance = get_distance(lata, lona, latb, lonb)\n unique_distance_edges.append((a, b, distance))\n \n #delete edges > 60km\n final_edges = []\n for a, b, dis in unique_distance_edges:\n if dis < 60:\n final_edges.append((a,b, dis))\n \n return final_edges\n\n#deletes all nodes which aren't in the to_keep_ids list (overpass id)\ndef delete_useless_street_nodes_of_nodes_array(nodes, to_keep_ids):\n sorted_nodes = []\n for el in nodes:\n if el['id'] in to_keep_ids:\n sorted_nodes.append(el)\n \n return sorted_nodes\n\n#creates a graph\ndef create_graph(nodes, edges):\n\n #Create a graph \n g = nx.Graph()\n\n #add_nodes\n for el in nodes:\n #print(f\"nodes: {el['own_id']}, pos=({el['lat']}, {el['lon']})\")\n g.add_node(el['own_id'], pos=(el['lat'], el['lon']))\n\n #add edges with distance\n for el in edges:\n #print(f\"edges: {el[0]}, {el[1]}, {el[2]}\")\n g.add_edge(el[0], el[1], weight=el[2])\n \n # Extract node positions\n node_positions = {node: (lon, lat) for node, (lat, lon) in nx.get_node_attributes(g, 'pos').items()}\n\n # Get the edgelist\n edgelist = list(g.edges())\n\n # Create a scatter plot of nodes\n plt.figure(figsize=(8, 6))\n nx.draw_networkx_nodes(g, pos=node_positions, node_size=200, node_color='blue', alpha=0.7)\n\n\n # Draw edges with weights as labels\n nx.draw_networkx_edges(g, pos=node_positions, edgelist=edgelist, width=2, alpha=0.5, edge_color='gray')\n #edge_labels = nx.get_edge_attributes(g, 'weight')\n #nx.draw_networkx_edge_labels(g, pos=node_positions, edge_labels=edge_labels)\n\n # Display the plot\n plt.title(\"Graph of Nodes in France\")\n plt.axis('off') # Turn off axis labels\n plt.show()\n\ndef create_graph2(nodes, edges, othernodes):\n\n #Create a graph \n g = nx.Graph()\n\n max_own_id = 0\n\n #add_nodes\n for el in nodes:\n #print(f\"nodes: {el['own_id']}, pos=({el['lat']}, {el['lon']})\")\n g.add_node(el['own_id'], pos=(el['lat'], el['lon']), color='red')\n if el['own_id'] > max_own_id:\n max_own_id = el['own_id']\n #add edges with distance\n for el in edges:\n #print(f\"edges: {el[0]}, {el[1]}, {el[2]}\")\n g.add_edge(el[0], el[1], weight=el[2])\n \n # Extract node positions\n node_positions = {node: (lon, lat) for node, (lat, lon) in nx.get_node_attributes(g, 'pos').items()}\n\n # Get the edgelist\n edgelist = list(g.edges())\n\n # Create a scatter plot of nodes\n plt.figure(figsize=(8, 6))\n #print(f\"nodes: \\n {nodes} \\n edges: \\n {edges}, othernodes: \\n{othernodes}\")\n nx.draw_networkx_nodes(g, pos=node_positions, node_size=200, node_color='blue', alpha=0.7)\n\n '''# Draw othernodes in a different color\n othernode_positions = {i: (lon, lat) for i, (lat, lon) in enumerate(othernodes)}\n print(othernode_positions)\n nx.draw_networkx_nodes(g, pos=othernode_positions, node_size=200, node_color='red', alpha=0.7)\n '''\n '''for i ,(lat, lon) in enumerate(othernodes):\n g.add_node(el['own_id'], pos=(lat, lon), color='red')'''\n \n\n\n\n # Draw edges with weights as labels\n nx.draw_networkx_edges(g, pos=node_positions, edgelist=edgelist, width=2, alpha=0.5, edge_color='gray')\n #edge_labels = nx.get_edge_attributes(g, 'weight')\n #nx.draw_networkx_edge_labels(g, pos=node_positions, edge_labels=edge_labels)\n\n # Display the plot\n plt.title(\"Graph of Nodes in France\")\n plt.axis('off') # Turn off axis labels\n plt.show()\n\ndef create_graph3(nodes, edges, ids):\n \"\"\"\n Creates a graph with colored nodes based on the given IDs.\n\n Parameters:\n - nodes (list): List of dictionaries representing nodes with 'own_id', 'lat', and 'lon'.\n - edges (list): List of tuples representing edges with the format (node1, node2, weight).\n - ids (list): List of node IDs to be colored red.\n\n Returns:\n - None: The function plots the graph but doesn't return any value.\n \"\"\"\n # Create a graph\n g = nx.Graph()\n\n # Add nodes\n for el in nodes:\n if(el['id'] in ids):\n node_id = el['own_id']\n pos = (el['lat'], el['lon'])\n g.add_node(node_id, pos=pos, color='red' if node_id in ids else 'blue')\n\n # Add edges with distance\n for el in edges:\n g.add_edge(el[0], el[1], weight=el[2])\n\n # Extract node positions and colors\n node_positions = {node: (lon, lat) for node, (lat, lon) in nx.get_node_attributes(g, 'pos').items()}\n node_colors = [g.nodes[node]['color'] for node in g.nodes]\n\n # Get the edgelist\n edgelist = list(g.edges())\n\n # Create a scatter plot of nodes\n plt.figure(figsize=(8, 6))\n nx.draw_networkx_nodes(g, pos=node_positions, node_size=100, node_color=node_colors, alpha=0.7)\n\n # Draw edges with weights as labels\n nx.draw_networkx_edges(g, pos=node_positions, edgelist=edgelist, width=2, alpha=0.5, edge_color='gray')\n\n # Display the plot\n plt.title(\"Graph of Nodes\")\n plt.axis('off') # Turn off axis labels\n plt.show()\n\ndef create_graph4(nodes, edges, coords):\n # Create a graph\n g = nx.Graph()\n\n # Add nodes\n for el in nodes:\n node_id = el['own_id']\n pos = (el['lat'], el['lon'])\n g.add_node(node_id, pos=pos, color='red')\n\n for i, (lat, lon) in enumerate(coords):\n g.add_node(i+424, pos=(lat, lon), color='blue')\n\n \n\n # Add edges with distance\n for el in edges:\n g.add_edge(el[0], el[1], weight=el[2])\n\n # Extract node positions and colors\n node_positions = {node: (lon, lat) for node, (lat, lon) in nx.get_node_attributes(g, 'pos').items()}\n node_colors = [g.nodes[node]['color'] for node in g.nodes]\n\n # Get the edgelist\n edgelist = list(g.edges())\n\n # Create a scatter plot of nodes\n plt.figure(figsize=(8, 6))\n nx.draw_networkx_nodes(g, pos=node_positions, node_size=200, node_color=node_colors, alpha=0.7)\n\n # Draw edges with weights as labels\n nx.draw_networkx_edges(g, pos=node_positions, edgelist=edgelist, width=2, alpha=0.5, edge_color='gray')\n\n # Display the plot\n plt.title(\"Graph of Nodes\")\n plt.axis('off') # Turn off axis labels\n plt.show()\n\n\n\ndef plot_points(coords):\n \"\"\"\n Plots multiple points on a graph using networkx.\n\n Parameters:\n - coords (list): A list of tuples representing the coordinates (latitude, longitude) of each point.\n\n Returns:\n - None: The function plots the points but doesn't return any value.\n \"\"\"\n # Create a graph\n G = nx.Graph()\n\n # Add nodes to the graph using coordinates as node labels\n for i, (lat, lon) in enumerate(coords):\n G.add_node(i, pos=(lon, lat)) # Use longitude as x-coordinate and latitude as y-coordinate\n\n # Extract positions of nodes for plotting\n pos = nx.get_node_attributes(G, 'pos')\n\n # Draw the graph with nodes at specified positions\n nx.draw(G, pos, with_labels=False, node_size=300, node_color='skyblue', font_size=5, font_color='black')\n\n '''# Add labels to nodes\n for i, (lat, lon) in enumerate(coords):\n plt.text(lon, lat, f'({lat}, {lon})', fontsize=8, ha='right')'''\n\n # Display the plot\n plt.title('Plotting Points on a Graph')\n plt.show()\n\n\ndef print_latlon(coords):\n #Create a graph \n g = nx.Graph()\n\n #add_nodes\n for i, (lat,lon) in enumerate(coords):\n #print(f\"nodes: {el['own_id']}, pos=({el['lat']}, {el['lon']})\")\n g.add_node(i, pos=(lat, lon))\n \n # Extract node positions\n node_positions = {node: (lon, lat) for node, (lat, lon) in nx.get_node_attributes(g, 'pos').items()}\n\n # Get the edgelist\n edgelist = list(g.edges())\n\n # Create a scatter plot of nodes\n plt.figure(figsize=(8, 6))\n nx.draw_networkx_nodes(g, pos=node_positions, node_size=200, node_color='blue', alpha=0.7)\n\n\n # Draw edges with weights as labels\n nx.draw_networkx_edges(g, pos=node_positions, edgelist=edgelist, width=2, alpha=0.5, edge_color='gray')\n #edge_labels = nx.get_edge_attributes(g, 'weight')\n #nx.draw_networkx_edge_labels(g, pos=node_positions, edge_labels=edge_labels)\n\n # Display the plot\n plt.title(\"Graph of Nodes in France\")\n plt.axis('off') # Turn off axis labels\n plt.show()\n\n\n'''\nso guys, update time\nI implemented all funktion up to the graph creation, but one funktion isn't working correct. It is the add_service_to_highway funktion. \nMy idea is: \n1. compute the centroid of every service station and rest area (merge_are_to_point). then we have a list of coordinates.\n2. Find for every coordinate the neares \"normal\" street node and save the id in a list. (add_service_to_highway) and the list is marked_street_nodes.\nThese node should be the nodes we are working with.\n3. modify the highway(delete_usless_highway_nodes). This funktion takes every way (street) and looks for every node in they way, if it is a marked street node or a normal one. normal ones are deleted.\n4. add_own_id for easier debugging and better readibility\n5. create_edges_arry. Now that we have all the nodes we need and every has a new id, we can create the edges. Also sorts out double edges and calculates the lenght.\n6. delete_useless_street_nodes_of_nodes_array. Just for printing the graph. We want to only display the nodes we are using.\n7. display graph\n\n\nNow the add_service_to_highway funktion doesn't work how I inteded it. I don't know how, if it is a programming mistake or a mistake im my approach.\nIs there a smarter approach or do you find the mistake?\n\nOnce we fix it, we should have the graph and can start with the next tasks.\n\n'''\n\n\n\n\nfilepath_service = \"service-stations-Aquitaine.json\"\njson_data_service = load_json_data(filepath_service)\n\nfilepath_highway = \"street-Nodes-Aquitaine.json\"\njson_data_highway = load_json_data(filepath_highway)\n\n\n\n\n#nodes_... is a list of dict containing all nodes (of that type)\n#way is a list of dict containing all ways of streets or rest areas\nnodes_service, way_service = split_array_service_stations(json_data_service)\nnodes_highway, way_highway = split_array_highway(json_data_highway)\n\n#nodes_service: nodes of the edges of service sations\n# array of: {'type': 'node', 'id': 304610017, 'lat': 44.8883184, 'lon': -0.5799906}, {'type': 'node', 'id': 304610018, 'lat': 44.888388, 'lon': -0.5796747}\n#way_service: ways of the edges of service stations, ids only contain ids of nodes_service\n# array of: {'type': 'way', 'id': 1018761865, 'nodes': [9396560469, 9396560468, 9396560467, 9635617586, 9635617587, 307456719, 9396560469], 'tags': {'highway': 'services'}}\n#nodes_highway: nodes of all possible street points\n# array of: {'type': 'node', 'id': 10981442267, 'lat': 43.7195563, 'lon': -0.269957}\n#way_highway: wasy of the streets. only ontains ids of nodes_highway\n# array of {'type': 'way', 'id': 1018760500, 'nodes': [9396528101, 9396528100, 9396528099, 9396528098, 9396528097, 9396528101], 'tags': {'highway': 'services'}}\n\n\n\n#service is a list of points (lat, lon), the centroid of every service station\nservice = merge_area_to_point(way_service, nodes_service)\n\n#adds own_id proporty, to sort easier\nnodes_highway = add_own_id(nodes_highway)\n#{'type': 'node', 'id': 10981442267, 'lat': 43.7195563, 'lon': -0.269957, 'own_id': 1}\ncreate_graph(nodes_highway, [])\n\n\n#merge to nearest street node\n#marked_street_nodes are a list of nodes_highway, which were the clostest to a rest area (only a list of ids)\nmarked_street_nodes = add_service_to_highway(nodes_highway, service)\n\ncreate_graph3(nodes_highway, [], marked_street_nodes)\n\n\n#deleate all the not marked street nodes out of way_highway\n#marked_ways is array of streets. every street is an array out of nodes\nmarked_ways = []\n#junction_ids is an array of id of nodes, which aren't service stations, but rather junction nodes.\njunction_ids = []\n#for every street\nfor el in way_highway:\n #chech each point, if its a marked one, if not delete\n marked_ids = []\n for node_id in el['nodes']:\n #print(get_line(node_id, nodes_highway))\n #check if nodes are marked, if yes add to list\n junc_check = check_if_node_is_junction(node_id, nodes_highway)\n if node_id in marked_street_nodes or junc_check == 1:\n if(junc_check == 1):\n junction_ids.append(node_id)\n\n marked_ids.append(node_id)\n #reset list of nodes on the highway to only the marked ones\n if len(marked_ids) > 0:\n marked_ways.append(marked_ids) \nprint(marked_ways)\n\n'''way_highway_only_marked = []\nfor el in nodes_highway:\n if el['id'] in marked_street_nodes:\n way_highway_only_marked.append(el)\n\n#way_highway_only_marked: array of dict: {'type': 'node', 'id': 638664, 'lat': 43.3410243, 'lon': -0.3775863, 'own_id': 282}\n\n#contains the highways, but only with the marked street nodes. The street nodes which are service stations\n#way_highway_only_marked2 = delete_usless_highway_nodes(way_highway, marked_street_nodes)\n\nprint(way_highway[-5])\nprint(way_highway_only_marked2[-5])'''\n\n'''\n#create the edges with the own_ids\nedges_with_own_id = create_edges_array(nodes_highway, way_highway_only_marked)\n\n#create_graph2(nodes_highway, edges_with_own_id, service)\n#print_latlon(service)\n\n\n#list of all street nodes, which represent a rest area\nnodes_service_final = delete_useless_street_nodes_of_nodes_array(nodes_highway, marked_street_nodes)\n\ncreate_graph(nodes_service_final, edges_with_own_id)\n\n'''\n\n\n\n\n\n","repo_name":"Janus124/Applied-Algorithm-Charging-Stations","sub_path":"create-graph.py","file_name":"create-graph.py","file_ext":"py","file_size_in_byte":20512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26748129771","text":"\nimport requests # for accesing web page\nfrom bs4 import BeautifulSoup # for pulling data out of html\nimport pandas as pd # for general working with data\n# from nltk import word_tokenize # text mining / analysis\nfrom collections import Counter\nfrom nltk.corpus import stopwords\nimport re #regex\n\n# A function take jobtitle and location as arguments and return correct url for web scraping purpose\ndef searchquery(jobtitle, location):\n title = jobtitle.replace(' ', '+')\n loc = location.replace(' ', '+')\n url = 'http://www.indeed.com/jobs?q=%22'+ title +'%22&radius=50&limit=50&l='+loc\n return url\n\n# A function to take job list's url as an input and return dataframe with job titles and url to job post\ndef collect_job_list(url):\n # create empty list\n jobtitle, hreflink = [], []\n \n # get contents from the web\n r = requests.get(url)\n soup = BeautifulSoup(r.content, 'html.parser')\n \n # find the page number\n x = soup.findAll('div', {'id': 'searchCount'})[0].text.replace(',', '')\n pageN = int(x[x.find('of ')+3:])\n \n # iterate over page number \n for i in range(0, pageN, 50):\n joblisturl = url + '&start=' + str(i)\n r = requests.get(joblisturl)\n soup = BeautifulSoup(r.content, 'html.parser')\n \n # iterate over each listed job post on search result to obtain job title and link\n for data in soup.findAll('a', {'data-tn-element': 'jobTitle'}):\n if 'clk?jk=' in data.get('href'):\n hreflink.append(data.get('href'))\n jobtitle.append(data.text)\n df = pd.DataFrame({'title': jobtitle, 'link': hreflink})\n return df\n\n\n# convert the href link data in dataframe to proper url\ndef properurl(link):\n joburl = 'http://www.indeed.com/viewjob?jk=' +\\\n link[link.find('clk?jk=')+len('clk?jk='):link.find('&fccid')]\n return joburl\n \n\n# A function to take job posting's url as an input, mine text data from selected job post. \n# and return the text from the post.\ndef collect_job_data(joblink_list):\n jobdesc = []\n #iterate over href link in data frame\n for i in range(0, len(joblink_list)):\n joburl = properurl(joblink_list[i])\n \n #extracting text data from selected job posting \n r = requests.get(joburl)\n soup = BeautifulSoup(r.content, 'html.parser')\n desc = ''.join(soup.findAll('td', {'class': 'snip'})[0].text)\n desc = re.sub('[^A-Za-z0-9&]+', ' ', desc)\n jobdesc.append(desc[:desc.find('ago')].replace('\\n', ' ').lower())\n return jobdesc\n\n# A function to take str as input, split the str and count the words\ndef countword(text):\n #removing stopwords from the data\n stop = stopwords.words('english')\n \n \n nostopword = ' '.join([word for word in text.split() if word not in stop])\n #create word count list\n count = Counter(nostopword.split())\n return count\n\n# A function to take words as input and return the list of counts for the words of interest.\ndef sortlist(words, countlist):\n result= [] \n for word in words.split():\n result.append([x for x in countlist if word in x])\n return result\n\n\n\n\n\n\n ","repo_name":"ykimmate14/job_post_web_scraping","sub_path":"job_web_scraping.py","file_name":"job_web_scraping.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"10066562762","text":"import os\nimport time\nimport torch\nimport torch.nn as nn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nfrom custom_mobilenet import CustomMobileNet\n\nfrom PIL import Image\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\nclass DriveDataset(torch.utils.data.Dataset):\n def __init__(self, root_dir, transform=None):\n temp = root_dir.split('/')\n self.root_dir = '/'.join(temp[:-1])\n self.transform = transform\n\n self.data = pd.read_csv(root_dir)\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n row = self.data.iloc[idx]\n \n # Cargar imagen RGB\n folder, file = row['filenames'].split('/')\n img_rgb_path = os.path.join(\n self.root_dir, 'Images', folder, 'rgb', file)\n img_rgb = Image.open(img_rgb_path)\n\n if self.transform:\n img_rgb = self.transform(img_rgb)\n\n # leer etiquetas de la i-ésima muestra\n throttle = row['throttle']\n steering = row['steer']\n action_left = row['action_left']\n action_right = row['action_right']\n action_forward = row['action_forward']\n no_action = row['no_action']\n \n # Crear muestra\n sample = (img_rgb,\n # Tensor de parámetros de acción.\n torch.tensor([\n float(action_left), float(action_right),\n float(action_forward), float(no_action)\n ]),\n # Tensor de etiquetas de aceleración y giro.\n torch.tensor([\n float(throttle), float(steering)\n ]))\n\n return sample\n\n\nif __name__ == '__main__':\n model = CustomMobileNet(pretrained=True)\n\n model.cuda()\n\n train_dir = 'path/to/train_dataset_final.csv'\n val_dir = 'path/to/val_dataset_final.csv'\n\n train_loader = torch.utils.data.DataLoader(\n dataset=DriveDataset(train_dir, transforms.Compose([\n transforms.Resize((224, 224), interpolation=Image.BICUBIC),\n transforms.ToTensor(),\n lambda T: T[:3]\n ])),\n batch_size=64,\n shuffle=True,\n num_workers=12,\n pin_memory=True\n )\n\n val_loader = torch.utils.data.DataLoader(\n dataset=DriveDataset(val_dir, transforms.Compose([\n transforms.Resize((224, 224), interpolation=Image.BICUBIC),\n transforms.ToTensor()\n ])),\n batch_size=64,\n shuffle=False,\n num_workers=12,\n pin_memory=True\n )\n\n criterion = nn.MSELoss().cuda()\n optimizer = torch.optim.Adam(model.parameters())\n\n losses = []\n\n # Iterar para entrenar la red\n for epoch in range(50):\n start = time.time()\n\n model.train()\n train_loss = 0\n # Definir barra de progreso interactiva\n train_progress = tqdm(enumerate(train_loader),\n desc=\"train\",\n total=len(train_loader))\n \n # Iterar por cada minibatch de 64 muestras\n for i, (X, actions, y) in train_progress:\n # Copiar los datos a la GPU\n X = X.cuda(non_blocking=True)\n actions = actions.cuda(non_blocking=True)\n y = y.cuda(non_blocking=True)\n y_hat = model(X, actions)\n \n # Calcular el error cuadrático medio para la aceleración\n loss1 = criterion(y_hat[:, 0], y[:, 0])\n # Calcular el error cuadrático medio para la dirección\n loss2 = criterion(torch.tanh(y_hat[:, 1]), y[:, 1])\n # Combinar ambos errores\n loss = (loss1 + loss2)/2\n\n # Paso de optimización\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n train_loss += float(loss.detach())\n train_progress.set_postfix(loss=(train_loss/(i+1)))\n\n model.eval()\n\n val_loss = 0\n with torch.no_grad():\n model.eval()\n val_progress = tqdm(enumerate(val_loader),\n desc=\"val\",\n total=len(val_loader))\n for i, (X, actions, y) in val_progress:\n X = X.cuda(non_blocking=True)\n actions = actions.cuda(non_blocking=True)\n y = y.cuda(non_blocking=True)\n y_hat = model(X, actions)\n\n loss1 = criterion(y_hat[:, 0], y[:, 0])\n loss2 = criterion(y_hat[:, 1], y[:, 1])\n loss = (loss1 + loss2) / 2\n\n val_loss += float(loss)\n val_progress.set_postfix(loss=(val_loss/(i+1)))\n\n end = time.time()\n\n t_loss = train_loss / len(train_loader)\n v_loss = val_loss / len(val_loader)\n print('epoch:', epoch, 'L:', t_loss, v_loss, 'Time:', end-start)\n\n torch.save(\n {\n 'epoch': epoch,\n 'arch': 'mobilenet_custom',\n 'state_dict': model.state_dict()\n },\n f'weights/mob_drive_{epoch}.pth.tar')\n losses.append([epoch, t_loss, v_loss])\n np.save('hist_drive', np.array(losses))\n","repo_name":"nubol23/thesis-document","sub_path":"codigos/apendices/drive_train.py","file_name":"drive_train.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"1410920170","text":"from app import db\nfrom flask import Blueprint, request, jsonify, make_response, abort\nfrom app.models.board import Board\nfrom app.models.card import Card\nfrom .route_helper import validate_model, create_card, validate_message_length, query_sort, validate_board_data\n\nbp = Blueprint('boards', __name__, url_prefix=\"/boards\")\n\n# CREATE\n# create a board endpoint, returns 201 if successful\n@bp.route(\"\", methods=[\"POST\"])\ndef create_board():\n request_body = request.get_json()\n\n validate_board_data(request_body)\n\n new_board = Board.from_dict(request_body)\n\n db.session.add(new_board)\n db.session.commit()\n\n return jsonify({\"board\": new_board.to_dict()}), 201\n\n# READ\n# Gets all Boards and returns 200\n@bp.route(\"\", methods=[\"GET\"])\ndef read_all_boards():\n boards = Board.query.all()\n\n board_response = []\n for board in boards: \n board_response.append(board.to_dict())\n return jsonify(board_response), 200\n\n# Gets one board by board id and returns 200 if found\n@bp.route(\"/\", methods=[\"GET\"])\ndef read_one_board(board_id):\n board = validate_model(Board, board_id)\n response_body = board.to_dict()\n return jsonify(response_body), 200\n\n# Gets cards by board_id\n@bp.route(\"//cards\", methods=[\"GET\"])\ndef retrieve_cards(board_id): \n board = validate_model(Board, board_id)\n\n card_query = query_sort(board.id)\n\n cards_response = [card.to_dict() for card in card_query]\n\n return jsonify(cards_response), 200\n\n# UPDATE\n# assign cards to a board\n@bp.route(\"//cards\", methods=[\"POST\"])\ndef add_cards_to_board(board_id):\n board = validate_model(Board, board_id)\n \n request_body = request.get_json()\n validate_message_length(request_body)\n card_id = create_card(request_body, board_id)\n \n card = validate_model(Card, card_id)\n board.cards.append(card)\n \n db.session.commit()\n\n return jsonify({\"board_id\": board.id, \"card\": card.to_dict()})\n\n\n","repo_name":"lizzach/back-end-inspiration-board","sub_path":"app/board_routes.py","file_name":"board_routes.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"} +{"seq_id":"14237036608","text":"from app import app\nfrom flask import render_template, jsonify, flash\nfrom utils import get_all_reminders, get_one_reminder, compress\n\n@app.route('/')\ndef index():\n page_title = \"Reminder - Home\"\n \n template = render_template('index.html', page_title=page_title)\n\n return compress(template)\n\n@app.route('/r')\ndef reminders():\n count=0\n reminders = get_all_reminders()\n\n # reminder counter\n for reminder in reminders:\n if reminder['deleted'] == 0:\n count += 1\n\n page_title = \"Reminders ({}) - All Reminders\".format(count)\n \n template = render_template('reminders.html', reminders=reminders, page_title=page_title)\n\n return compress(template)\n\n@app.route('/r/')\ndef reminder(id):\n page_title=''\n reminder = get_one_reminder(id)\n\n if reminder['deleted'] == 1:\n page_title = \"Deleted\"\n else:\n page_title = \"Reminder - {0}\".format(reminder['title'])\n\n template = render_template('reminder.html', reminder=reminder, page_title=page_title)\n\n return compress(template)\n\n@app.route('/dashboard')\ndef dashboard():\n page_title = \"Reminder - Dashboard\"\n name = \"Hassan\"\n reminders = get_all_reminders()\n\n template = render_template('dashboard.html', page_title=page_title, name=name, reminders=reminders)\n\n return compress(template)","repo_name":"Suuuuuprr/Rustin-Joger-final","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"39705052694","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# utf-8 中文编码\n\nfrom gevent import socket as _socket\nimport re\nimport gevent\nfrom httptool import HttpPool\nfrom gevent.lock import BoundedSemaphore\n\n\nclass UpstreamBase(object):\n u\"\"\" 特定线路 socket 模块\n\n 使用方式为创建本类实例,然后把实例当作 socket 模块即可。所有的操作都会经过 config 配置的线路。\"\"\"\n\n # 封装好的 socket 类\n socket = None\n\n def __init__(self, config):\n self.type = config.get('type', None)\n self.config = config\n\n import upstream as upstream_mod\n\n upconfig = config.get('upstream', None)\n\n if upconfig:\n uptype = upconfig.get(\"type\", None)\n if uptype is None:\n raise ConfigError(u'[配置错误] upstream 未配置 type !')\n\n Upstream = upstream_mod.get_upstream(uptype)\n if Upstream is None:\n raise ConfigError(u'[配置错误] upstream type %s 不被支持!' % uptype)\n\n self.upstream = Upstream(upconfig)\n pass\n else:\n if self.type != 'direct':\n self.upstream = upstream_mod.get_upstream('direct')({'type':'direct'})\n else:\n self.upstream = _socket\n\n self.http_pool = HttpPool(self,lock=BoundedSemaphore)\n\n def create_connection(self,address, timeout=5):\n if timeout == _socket._GLOBAL_DEFAULT_TIMEOUT:\n timeout = 10\n raise NotImplementedError()\n\n def get_display_name(self):\n return self.get_name()\n\n def get_name(self):\n return '%s-host:port' % (self.type)\n\n # http 请求处理\n # http 代理可以重写本方法\n # socks 类代理不需要处理。\n def get_http_conn(self,address):\n u\"\"\" 获得http with 连接 \"\"\"\n return self.http_pool.get_conn(address)\n\n\n\n\nclass ConfigError(ValueError):\n def __init__(self, *args, **kwargs):\n ValueError.__init__(self, *args, **kwargs)\n\nclass UpstreamError(_socket.error):\n def __init__(self, *args, **kwargs):\n _socket.error.__init__(self, *args, **kwargs)\n\nclass UpstreamLoginError(UpstreamError):\n def __init__(self, *args, **kwargs):\n _socket.error.__init__(self, *args, **kwargs)\n\nclass UpstreamProtocolError(UpstreamError):\n def __init__(self, *args, **kwargs):\n _socket.error.__init__(self, *args, **kwargs)\n\nclass UpstreamConnectError(UpstreamError):\n def __init__(self, *args, **kwargs):\n _socket.error.__init__(self, *args, **kwargs)\n\n\n\n","repo_name":"GameXG/TcpRoute","sub_path":"upstream/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":75,"dataset":"github-code","pt":"69"} +{"seq_id":"74785394140","text":"\"\"\"\nComment Module Serializer\n\"\"\"\nfrom rest_framework import serializers\nfrom datetime import date\n\nfrom siteinfo.models import Error\nfrom cafe.models import Bartender, Cafe, MenuItem\nfrom comment.models import Comment\n\n\nclass CreateCommentSerializer(serializers.ModelSerializer):\n \"\"\"Create Comment Serializer\"\"\"\n\n class Meta:\n model = Comment\n fields = [\"item_id\", \"text\"]\n\n def validate(self, attrs):\n item_id = attrs.get(\"item_id\")\n\n user = self.context.get(\"request\").user\n menu_item = MenuItem.objects.filter(id=item_id).first()\n\n if not menu_item:\n msg = \"این آیتم وجود ندارد\"\n raise serializers.ValidationError(msg)\n cafe = menu_item.cafe\n\n is_bartender = Bartender.objects.filter(\n cafe=cafe, user=user, is_active=True\n ).exists()\n\n if user == cafe.owner or is_bartender:\n msg = \"برای خودتون میخواین کامنت بذارید ؟\"\n raise serializers.ValidationError(msg)\n\n return attrs\n\n def create(self, validated_data):\n user = self.context.get(\"request\").user\n item_id = validated_data.get(\"item_id\")\n\n try:\n menu_item = MenuItem.objects.filter(id=item_id).first()\n cafe = menu_item.cafe\n\n now_date = date.today()\n\n comment = Comment.objects.create(\n user=user,\n cafe_id=cafe.id,\n is_cafe=False,\n date=now_date,\n **validated_data\n )\n comment.save()\n return comment\n\n except Exception as e:\n Error.objects.create(\n reference=\"Comment - serializers.py - create customer comment\",\n status=str(type(e).__name__),\n description=str(e),\n )\n msg = \"مشکلی ایجاد شده\"\n raise serializers.ValidationError(msg)\n\n\nclass ResponseCommentSerializer(serializers.ModelSerializer):\n \"\"\"Response Comment Serializer\"\"\"\n\n id = serializers.IntegerField(required=True)\n # text = serializers.CharField(max_length=500, required=True)\n\n class Meta:\n model = Comment\n fields = [\"id\", \"text\"]\n\n def validate(self, attrs):\n comment_id = attrs.get(\"id\")\n user = self.context.get(\"request\").user\n try:\n comment = Comment.objects.get(id=comment_id)\n cafe = Cafe.objects.get(id=comment.cafe_id)\n is_bartender = Bartender.objects.filter(\n cafe=cafe, user=user, is_active=True\n ).exists()\n\n if not (user == cafe.owner or is_bartender):\n msg = \"جواب کامنت بقیه را نمیتوانید بدهید\"\n raise serializers.ValidationError(msg)\n except:\n msg = \"همچین کامنتی وجود ندارد\"\n raise serializers.ValidationError(msg)\n return attrs\n\n def create(self, validated_data):\n user = self.context.get(\"request\").user\n comment_id = validated_data.get(\"id\")\n text = validated_data.get(\"text\")\n\n try:\n comment = Comment.objects.get(id=comment_id)\n now_date = date.today()\n\n response = Comment.objects.create(\n user=user,\n cafe_id=comment.cafe_id,\n is_cafe=True,\n date=now_date,\n text=text,\n item_id=comment.item_id,\n )\n\n response.save()\n\n comment.response = response\n comment.save()\n\n return response\n\n except Exception as e:\n Error.objects.create(\n reference=\"Comment - serializers.py - create response comment\",\n status=str(type(e).__name__),\n description=str(e),\n )\n msg = \"مشکلی ایجاد شده\"\n raise serializers.ValidationError(msg)\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n \"\"\"Comment Serializer\"\"\"\n\n class Meta:\n model = Comment\n fields = \"__all__\"\n # read_only_fields = '__all__'\n\n def to_representation(self, instance):\n response = super().to_representation(instance)\n response[\"name\"] = instance.user.fullName\n try:\n item = MenuItem.objects.filter(id=instance.item_id).first()\n response[\"item\"] = item.title\n\n if instance.response:\n response[\"response\"] = {\n \"name\": instance.response.user.fullName,\n \"date\": instance.response.date,\n \"text\": instance.response.text,\n }\n\n except:\n None\n return response\n","repo_name":"Hamid-Ba/Iran-Cafe","sub_path":"comment/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"20352205539","text":"_ = int(input())\n#inp = list(map(int,input().split()))\ntestInp = input().split(\" \")\norigInp = list(map(int,testInp))\nSorInp = list(map(int,testInp))\nSorInp.sort()\n\ngoods = 0\nrev = _-1\n\ncount = 0\nif SorInp==origInp:\n\tprint(\"yes\")\n\tprint(\"1 1\")\nelse:\n\twhile origInp[goods]==SorInp[goods]:\n\t\tgoods += 1\n\twhile origInp[rev] == SorInp[rev]:\n\t\trev -= 1\n\tif origInp[goods:rev+1] == SorInp[goods:rev+1][::-1]:\n\t\tprint(\"yes\")\n\t\tprint(1+min(goods,rev),1+max(goods,rev))\n\telse:\n\t\tprint(\"no\")","repo_name":"GarlicToothpaste/Codeforces-Solutions","sub_path":"451B/451B.py","file_name":"451B.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"70197145821","text":"# import necessary packages\nimport numpy as np\nimport cv2\n\ndef process_letter(thresh,output):\t\n\t# assign the kernel size\t\n\tkernel = np.ones((2,1), np.uint8) # vertical\n\t# use closing morph operation then erode to narrow the image\t\n\ttemp_img = cv2.morphologyEx(thresh,cv2.MORPH_CLOSE,kernel,iterations=3)\n\t# temp_img = cv2.erode(thresh,kernel,iterations=2)\t\t\n\tletter_img = cv2.erode(temp_img,kernel,iterations=1)\n\t\n\t# find contours \n\t(_,contours, _) = cv2.findContours(letter_img.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\t\n\t# loop in all the contour areas\n\tfor cnt in contours:\n\t\tx,y,w,h = cv2.boundingRect(cnt)\n\t\tcv2.rectangle(output,(x-1,y-5),(x+w,y+h),(0,255,0),1)\n\n\treturn output\t\n\n\nfor i in range(1,34):\n\tpath1=\"gray_num2/num\"+str(i)+\".jpg\"\n\timage1 = cv2.imread(path1)\n\toutput1_letter = cv2.imread(path1)\n\tgray1 = cv2.cvtColor(image1, cv2.COLOR_BGR2GRAY)\n\tret1,th1 = cv2.threshold(gray1,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n\n\toutput1_letter = process_letter(th1,output1_letter)\n\tpath2=\"gray_num2_output/num\"+str(i)+\".jpg\"\n\tcv2.imwrite(path2, output1_letter)\t\n","repo_name":"katomaran-videoanalytics/Testing","sub_path":"layout/text_analysis.py","file_name":"text_analysis.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"16103993446","text":"class Solution(object):\n def validateStackSequences(self, pushed, popped):\n \"\"\"\n :type pushed: List[int]\n :type popped: List[int]\n :rtype: bool\n \"\"\"\n if pushed == popped: return True\n size = len(pushed)\n\n stack = []\n c = 0\n for index in range(size):\n stack.append(pushed[index])\n\n while stack:\n if stack[-1] != popped[0]:\n break\n else:\n stack.pop()\n popped.pop(0)\n\n if stack == []:\n return True\n return False\n","repo_name":"Huoyanlifusu/LeetCode","sub_path":"946栈的压入弹出.py","file_name":"946栈的压入弹出.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"74281746779","text":"# -*- coding: utf-8 -*-\nimport itertools\nimport time\n\nfrom django.core.validators import MinLengthValidator\nfrom django.db import models\nfrom django.db.models.signals import post_save, pre_delete, post_delete\nfrom django.urls import reverse\nfrom mptt.fields import TreeForeignKey\nfrom mptt.managers import TreeManager\nfrom mptt.models import MPTTModel\nfrom email.utils import formatdate\nfrom main.mixins.models import SitePageModel\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Post(SitePageModel):\n LIST_VIEW_HEADING = _(u'All posts')\n comments_count = models.IntegerField(_(u'Comments count'), default=0)\n\n class Meta:\n verbose_name = _(u'Post')\n verbose_name_plural = _(u'Posts')\n ordering = ('last_modified',)\n\n def get_comments(self, root=None):\n if root:\n return root.get_children()\n else:\n return Comment.tree.filter(post=self, level__lte=2)\n\n def get_comments_count(self):\n return self.comments_count\n\n @classmethod\n def get_breadcrumbs_base(cls):\n return [\n {\n 'title': cls.LIST_VIEW_HEADING,\n 'url': reverse('post-list')\n },\n ]\n\n def get_breadcrumbs(self):\n return itertools.chain(\n self.get_breadcrumbs_base(),\n [\n {\n 'title': self.title,\n 'url': self.get_absolute_url()\n }\n ]\n )\n\n @models.permalink\n def get_absolute_url(self):\n return 'post-detail', (), {'slug': self.slug}\n\n\nclass Comment(MPTTModel):\n user = models.ForeignKey('auth.User', verbose_name=_(u'User'))\n post = models.ForeignKey('Post', verbose_name=_(u'Post'))\n\n message = models.TextField(\n verbose_name=_(u'Message'),\n max_length=1000,\n validators=[MinLengthValidator(5)]\n )\n\n last_modified = models.DateTimeField(\n auto_created=True,\n auto_now=True\n )\n\n parent_comment = TreeForeignKey(\n 'self',\n verbose_name=_(u'parent comment'),\n blank=True,\n null=True\n )\n\n tree = TreeManager()\n\n def __unicode__(self):\n return _(u'From %s [%s]') % (self.user, self.get_last_modified())\n\n class Meta:\n verbose_name = _(u'Comment')\n verbose_name_plural = _(u'Comments')\n\n class MPTTMeta:\n parent_attr = 'parent_comment'\n order_insertion_by = 'last_modified'\n\n def get_last_modified(self):\n return formatdate(time.mktime(self.last_modified.timetuple()), usegmt=True)\n\n\ndef update_post(sender, instance, **kwargs):\n instance.post.comments_count = instance.post.comment_set.all().count()\n instance.post.save()\n Comment.tree.rebuild()\n\npost_save.connect(update_post, Comment)\npost_delete.connect(update_post, Comment)\n","repo_name":"Enweave/ex_strategia","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"32421643742","text":"\"\"\" VAE for Text Generation\nThis is for Module 1: Candidates Generation.\nUsage: python VAE_Text_Generation.py --dataset reddit\n\"\"\"\nimport argparse\nimport math\nimport os\nimport numpy as np\nimport torch as T\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom utility.VAE_Text_Generation.dataset import get_iterators\nfrom utility.VAE_Text_Generation.helper_functions import get_cuda\nfrom utility.VAE_Text_Generation.model import VAE\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--batch_size', type=int, default=8)\nparser.add_argument('--n_vocab', type=int, default=12000)\nparser.add_argument('--epochs', type=int, default=1000)\nparser.add_argument('--n_hidden_G', type=int, default=512)\nparser.add_argument('--n_layers_G', type=int, default=2)\nparser.add_argument('--n_hidden_E', type=int, default=512)\nparser.add_argument('--n_layers_E', type=int, default=1)\nparser.add_argument('--n_z', type=int, default=100)\nparser.add_argument('--word_dropout', type=float, default=0.5)\nparser.add_argument('--rec_coef', type=float, default=7)\nparser.add_argument('--lr', type=float, default=0.00001)\nparser.add_argument('--gpu', type=int, default=0)\nparser.add_argument('--n_highway_layers', type=int, default=2)\nparser.add_argument('--n_embed', type=int, default=300)\nparser.add_argument('--out_num', type=int, default=30000)\nparser.add_argument('--unk_token', type=str, default=\"\")\nparser.add_argument('--pad_token', type=str, default=\"\")\nparser.add_argument('--start_token', type=str, default=\"\")\nparser.add_argument('--end_token', type=str, default=\"\")\nparser.add_argument('--dataset', type=str, default=\"reddit\")\nparser.add_argument('--training', action='store_true')\nparser.add_argument('--resume_training', action='store_true')\n\n\nopt = parser.parse_args()\nprint(opt)\nsave_path = \"tmp/saved_VAE_models/\" + opt.dataset + \".tar\"\nprint(save_path)\nif not os.path.exists(\"tmp/saved_VAE_models\"):\n os.makedirs(\"tmp/saved_VAE_models\")\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(opt.gpu)\n\ncandidates_path = opt.dataset + '_for_VAE.txt'\ntrain_iter, val_iter, vocab = get_iterators(opt, path='./data/', fname=candidates_path)\nopt.n_vocab = len(vocab)\nif opt.training:\n vae = VAE(opt)\n vae.embedding.weight.data.copy_(vocab.vectors) #Intialize trainable embeddings with pretrained glove vectors\n vae = get_cuda(vae)\n trainer_vae = T.optim.Adam(vae.parameters(), lr=opt.lr)\nelse:\n checkpoint = T.load(save_path)\n vae = checkpoint['vae_dict']\n trainer_vae = checkpoint['vae_trainer']\n if 'opt' in checkpoint:\n opt_old = checkpoint['opt']\n print(opt_old)\n\n\ndef create_generator_input(x, train):\n G_inp = x[:, 0:x.size(1)-1].clone()\t #input for generator should exclude last word of sequence\n if train == False:\n return G_inp\n r = np.random.rand(G_inp.size(0), G_inp.size(1)) #Perform word_dropout according to random values (r) generated for each word\n for i in range(len(G_inp)):\n for j in range(1, G_inp.size(1)):\n if r[i, j] < opt.word_dropout and G_inp[i, j] not in [vocab.stoi[opt.pad_token], vocab.stoi[opt.end_token]]:\n G_inp[i, j] = vocab.stoi[opt.unk_token]\n return G_inp\n\n\ndef train_batch(x, G_inp, step, train=True):\n logit, _, kld = vae(x, G_inp, None, None)\n logit = logit.view(-1, opt.n_vocab)\t #converting into shape (batch_size*(n_seq-1), n_vocab) to facilitate performing F.cross_entropy()\n x = x[:, 1:x.size(1)]\t #target for generator should exclude first word of sequence\n x = x.contiguous().view(-1)\t #converting into shape (batch_size*(n_seq-1),1) to facilitate performing F.cross_entropy()\n rec_loss = F.cross_entropy(logit, x)\n kld_coef = (math.tanh((step - 15000)/1000) + 1) / 2\n # kld_coef = min(1,step/(200000.0))\n loss = opt.rec_coef*rec_loss + kld_coef*kld\n if train==True:\t #skip below step if we are performing validation\n trainer_vae.zero_grad()\n loss.backward()\n trainer_vae.step()\n return rec_loss.item(), kld.item()\n\n\n# def load_model_from_checkpoint():\n # global vae, trainer_vae\n # checkpoint = T.load(save_path)\n # vae.load_state_dict(checkpoint['vae_dict'])\n # trainer_vae.load_state_dict(checkpoint['vae_trainer'])\n # return checkpoint['step'], checkpoint['epoch']\n\n\ndef training():\n start_epoch = step = 0\n if opt.resume_training:\n step, start_epoch = checkpoint['step'], checkpoint['epoch']\n for epoch in range(start_epoch, opt.epochs):\n vae.train()\n train_rec_loss = []\n train_kl_loss = []\n for batch in train_iter:\n x = get_cuda(batch.text) \t #Used as encoder input as well as target output for generator\n G_inp = create_generator_input(x, train=True)\n rec_loss, kl_loss = train_batch(x, G_inp, step, train=True)\n train_rec_loss.append(rec_loss)\n train_kl_loss.append(kl_loss)\n step += 1\n\n vae.eval()\n valid_rec_loss = []\n valid_kl_loss = []\n for batch in val_iter:\n x = get_cuda(batch.text)\n G_inp = create_generator_input(x, train=False)\n with T.autograd.no_grad():\n rec_loss, kl_loss = train_batch(x, G_inp, step, train=False)\n valid_rec_loss.append(rec_loss)\n valid_kl_loss.append(kl_loss)\n\n train_rec_loss = np.mean(train_rec_loss)\n train_kl_loss = np.mean(train_kl_loss)\n valid_rec_loss = np.mean(valid_rec_loss)\n valid_kl_loss = np.mean(valid_kl_loss)\n\n print(\"No.\", epoch, \"T_rec:\", '%.2f' % train_rec_loss, \"T_kld:\", '%.2f' % train_kl_loss, \"V_rec:\", '%.2f' % valid_rec_loss, \"V_kld:\", '%.2f' % valid_kl_loss)\n if epoch >= 50 and epoch % 10 == 0:\n print('save model ' + str(epoch) + '...')\n T.save({'epoch': epoch + 1, 'vae_dict': vae, 'vae_trainer': trainer_vae, 'step': step, 'opt': opt}, save_path)\n generate_sentences(5)\n\n\ndef generate_sentences(n_examples, save=0):\n vae.eval()\n out = []\n for i in tqdm(range(n_examples)):\n z = get_cuda(T.randn([1, vae.n_z]))\n h_0 = get_cuda(T.zeros(vae.generator.n_layers_G, 1, vae.generator.n_hidden_G))\n c_0 = get_cuda(T.zeros(vae.generator.n_layers_G, 1, vae.generator.n_hidden_G))\n G_hidden = (h_0, c_0)\n G_inp = T.LongTensor(1, 1).fill_(vocab.stoi[opt.start_token])\n G_inp = get_cuda(G_inp)\n out_str = \"\"\n while (G_inp[0][0].item() != vocab.stoi[opt.end_token]) and (G_inp[0][0].item() != vocab.stoi[opt.pad_token]):\n with T.autograd.no_grad():\n logit, G_hidden, _ = vae(None, G_inp, z, G_hidden)\n probs = F.softmax(logit[0], dim=1)\n G_inp = T.multinomial(probs, 1)\n out_str += (vocab.itos[G_inp[0][0].item()]+\" \")\n print(out_str[:-6])\n out.append(out_str[:-6])\n if save:\n original = []\n with open('./data/' + candidates_path, 'r') as fin:\n for line in fin:\n original.append(line.strip())\n fname = './data/' + opt.dataset + '_candidates.txt'\n with open(fname, 'w') as fout:\n for i in out + original:\n fout.write(i)\n fout.write('\\n')\n\n\nif __name__ == '__main__':\n if opt.training or opt.resume_training:\n training()\n generate_sentences(opt.out_num, save=1)\n else:\n generate_sentences(opt.out_num, save=1)\n","repo_name":"WanzhengZhu/GPS","sub_path":"VAE_Text_Generation.py","file_name":"VAE_Text_Generation.py","file_ext":"py","file_size_in_byte":7608,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"69"} +{"seq_id":"70538132060","text":"import logging\nfrom typing import Optional, Union, Collection\n\nfrom aiogram import Dispatcher\nfrom aiogram.dispatcher.filters import BoundFilter\nfrom aiogram.dispatcher.handler import ctx_data\nfrom aiogram.types import CallbackQuery\nfrom aiogram.types.base import TelegramObject\n\nfrom ..config import Role\n\n\nclass RoleFilter(BoundFilter):\n key = 'role'\n\n def __init__(\n self,\n role: Union[None, Role, Collection[Role]] = None,\n ):\n if role is None:\n self.roles = None\n elif isinstance(role, Role):\n self.roles = {role}\n else:\n self.roles = set(role)\n\n async def check(self, obj: TelegramObject):\n if self.roles is None:\n return True\n data = ctx_data.get()\n return data.get(\"role\") in self.roles\n\n\nclass SuperuserFilter(BoundFilter):\n key = 'is_superuser'\n\n def __init__(self, is_superuser: Optional[bool] = None):\n self.is_superuser = is_superuser\n\n async def check(self, obj: TelegramObject):\n if self.is_superuser is None:\n return True\n data = ctx_data.get()\n\n return (data.get(\"role\") is Role.SUPERUSER) == self.is_superuser\n\n\nclass FileSelectionMenuAccessFilter(BoundFilter):\n\n async def check(self, call: CallbackQuery):\n state = Dispatcher.get_current().current_state()\n state_data = await state.get_data()\n applicant_role = state_data.get('role')\n free_files_in_google_folder = state_data.get('free_files')\n if applicant_role == Role.EMPLOYEE.value and free_files_in_google_folder:\n return {'files': free_files_in_google_folder}\n","repo_name":"DerSerhii/WorkScheduleBot","sub_path":"schedulebot/filters/role.py","file_name":"role.py","file_ext":"py","file_size_in_byte":1653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"40666049942","text":"class Solution:\n def maxVowels(self, s: str, k: int) -> int:\n \n maxVowels = -1\n idx = 0\n size = len(s)\n count = 0\n vowels = [\"a\",\"e\",\"i\",\"o\",\"u\"]\n \n for i in range(0,k):\n if s[i] in vowels:\n count+=1\n \n #print(idx,count)\n maxVowels = max(count,maxVowels)\n idx = 1\n \n while(idx<=(size-k+1)):\n if idx-1 >= 0 and s[idx-1] in vowels:\n count-=1\n if (idx+k-1)/', views.StudentAPI.as_view()),\n# path('swagger/', schema_view.with_ui('swagger', cache_timeout=0),name='schema-swagger-ui'),\n# ]\n\n\n# GenericAPIView and Model Mixing\n\n# urlpatterns = [\n# path('admin/', admin.site.urls),\n# path('studentapi/', views.LCStudentList.as_view()),\n# path('studentapi//', views.RUDStudentAPI.as_view()),\n \n# ]\n \nurlpatterns = [\n path('admin/', admin.site.urls),\n path('studentapi/', views.StudentLC.as_view()),\n path('studentapi//', views.StudentRUD.as_view()),\n \n]\n \n\n\n","repo_name":"maheenkhalid-coder/CRUD-api","sub_path":"crud/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"3501940632","text":"from turtle import width\nimport cv2\nimport imutils\n\ncap = cv2.VideoCapture(0)\n\nfilter = cv2.imread(r'input_assets\\2022logo.png',cv2.IMREAD_UNCHANGED)\n\n#instantiate classifier\nfaceDet = cv2.CascadeClassifier('input_assets\\haarcascade_frontalface_default.xml')\n\n\nvideo = []\n\n\nwhile True:\n\n ret, frame = cap.read()\n frame = cv2.flip(frame,1)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n if not ret: break\n\n\n\n faces = faceDet.detectMultiScale(gray, 1.2,6)\n\n for (x,y,w,h) in faces:\n #cv2.rectangle(frame,(x,y), (x+w, y+h),(0,255,0), 2)\n \n resizedFilter = imutils.resize(filter, width=w)\n heightFilter = resizedFilter.shape[0]\n widthFilter = w\n\n #This is to make the filter show a little bit below the upper border of the rectangle face.\n showBelow = heightFilter // 5\n\n dif = 0\n\n yFilter = y-heightFilter+showBelow\n # Adding filter to frame on top of the face detected\n if yFilter >= 0:\n filterArea = frame[yFilter:y+showBelow, x:x+w] \n else:\n dif = abs(yFilter)\n filterArea = frame[0:y+showBelow,x:x+w]\n \n\n filterMask = resizedFilter[:,:, 3]\n \n filterMaskInv = cv2.bitwise_not(filterMask)\n\n bgBlack= cv2.bitwise_and(resizedFilter,resizedFilter,mask=filterMask)\n bgBlack = bgBlack[dif:,:,:3]\n\n bgFrame = cv2.bitwise_and(filterArea,filterArea, mask=filterMaskInv[dif:,:])\n\n result = cv2.add(bgBlack,bgFrame)\n\n if yFilter >= 0:\n frame[yFilter:y+showBelow, x:x+w] = result\n else:\n frame[0:y+showBelow, x:x+w] = result\n \n video.append(frame)\n cv2.imshow('video', frame)\n \n\n k =cv2.waitKey(1)\n\n if k == ord('q'):\n break\n\n\n# FPS = cap.get(5) #Frames\n\n# Width = int(cap.get(3)) #Width\n# Height = int(cap.get(4)) #Height\n\n# fourcc = cv2.VideoWriter_fourcc(*'avc1')\n# out = cv2.VideoWriter(r\"output_assets\\face_filter.mp4\",fourcc, FPS, (Width,Height)) \n\n# for img in video:\n# out.write(img)\n# out.release()\n# cap.release()","repo_name":"Alefig12/opencv-learning","sub_path":"day11/28-faceFilters.py","file_name":"28-faceFilters.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"11339575793","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[2]:\n\n\n#pip install textblob\n\n\n# In[4]:\n\n\n#pip install emot\n\n\n# In[ ]:\n\n\n#pip install\n\n\n# In[ ]:\n\n\n#pip install\n\n\n# In[7]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport collections\nimport nltk\nimport io\nfrom textblob import Word\nimport re\nimport sys, os, csv\nimport string\nfrom sklearn import preprocessing\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom emot.emo_unicode import UNICODE_EMOJI #,EMOTICONS\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.corpus import wordnet\nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import WordNetLemmatizer\nfrom collections import Counter\n\n\n# # Data Cleaning\n\n# In[8]:\n\n\ndef word_prob(word): return dictionary[word] / total\ndef words(text): return re.findall('[a-z]+', text.lower())\ndictionary = Counter(words(open('dataset/wordlists/merged.txt').read()))\nmax_word_length = max(map(len, dictionary))\ntotal = float(sum(dictionary.values()))\n\ndef viterbi_segment(text):\n probs, lasts = [1.0], [0]\n for i in range(1, len(text) + 1):\n prob_k, k = max((probs[j] * word_prob(text[j:i]), j)\n for j in range(max(0, i - max_word_length), i))\n probs.append(prob_k)\n lasts.append(k)\n words = []\n i = len(text)\n while 0 < i:\n words.append(text[lasts[i]:i])\n i = lasts[i]\n words.reverse()\n return words, probs[-1]\n\ndef fix_hashtag(text):\n text = text.group().split(\":\")[0]\n text = text[1:] # remove '#'\n try:\n test = int(text[0])\n text = text[1:]\n except:\n pass\n output = ' '.join(viterbi_segment(text)[0])\n return output\n\ndef prep(tweet):\n \"\"\"pattern = re.compile(r\"(.)\\1{2,}\")\n tweet = pattern.sub(r\"\\1\\1\", str(tweet))\n tweet = re.sub(r'http.?://[^\\s]+[\\s]?', '', str(tweet))\n punct = string.punctuation\n trantab = str.maketrans(punct, len(punct) * ' ') # Every punctuation symbol will be replaced by a space\n tweet = tweet.translate(trantab)\n tweet = tweet.lower()\n tweet = tweet.strip()\"\"\"\n \n tweet = tweet.lower()\n tweet = re.sub(\"(#[A-Za-z0-9]+)\", fix_hashtag, tweet)\n tweet = ' '.join(re.sub(\"(@[A-Za-z0-9]+)|([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n \n tweet = re.sub('\\d+', '', str(tweet))\n def get_wordnet_pos(word):\n tag = nltk.pos_tag([word])[0][1][0].upper()\n tag_dict = {\"J\": wordnet.ADJ, \"N\": wordnet.NOUN, \"V\": wordnet.VERB, \"R\": wordnet.ADV}\n return tag_dict.get(tag, wordnet.NOUN) \n \n ps = PorterStemmer()\n words = tweet.split()\n lemmatizer = WordNetLemmatizer()\n lemma_words = [lemmatizer.lemmatize(word, get_wordnet_pos(word)) for word in words]\n tweet = \" \".join(lemma_words)\n \n stopwords_list = stopwords.words('english')\n # Some words which might indicate a certain sentiment are kept via a whitelist\n whitelist = [\"n't\", \"not\", \"no\"]\n words = tweet.split()\n clean_words = [word for word in words if (word not in stopwords_list or word in whitelist) and len(word) > 1]\n tweet = \" \".join(clean_words)\n \n tweet = tweet.strip()\n return tweet\n\ndef vectorise_label(label):\n if label == \"empty\":return 0\n elif label == \"sadness\":return 2\n elif label == \"enthusiasm\":return 1\n elif label == \"neutral\":return 0\n elif label == \"worry\":return 2\n elif label == \"surprise\":return 1\n elif label == \"love\":return 3\n elif label == \"fun\":return 1\n elif label == \"hate\":return 4\n elif label == \"happiness\":return 1\n elif label == \"boredom\":return 0\n elif label == \"relief\":return 1\n elif label == \"anger\":return 4\n\n\n# In[ ]:\n\n\ndata1 = pd.read_csv(\"crawled_csv/processes/sad_processes.csv\", sep=',', encoding='utf-8')\ndataWriter = csv.writer(open('crawled_csv/prep/sad_prep.csv', 'w'), delimiter=',',lineterminator=\"\\n\")\ntotal = 2000\nfor i in range(2000):\n tweet= prep(data1.iloc[:,0][i])\n dataWriter.writerow([tweet, 2]) \nprint(\"Done!\")\n\n\n# In[225]:\n\n\ncount = 0\nwith open('crawled_csv/prep/sad_prep.csv', encoding = \"utf8\") as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n count+=1\nprint(count)\n\n\n# In[245]:\n\n\ndata2 = pd.read_csv('dataset/data/text_emotion.csv', sep=',', encoding='utf-8')\nprint(\"Dataset shape:\",data2.shape)\nprint(data2.sentiment[0],\":\",data2.content[0])\n\n\n# In[251]:\n\n\ndataWriter = csv.writer(open('cleaned_data/data_prep.csv', 'w', encoding='utf-8'), delimiter=',',lineterminator=\"\\n\")\n\ntotal = 40000\nfor i in range(40000):\n tweet= prep(data2.content[i])\n dataWriter.writerow([tweet, str(vectorise_label(data2.sentiment[i]))])\n \nprint(\"Progress: \",100,\"\\nComplete!\")\n\n\n# In[252]:\n\n\ncount = 0\nwith open('cleaned_data/data_prep.csv') as csvfile:\n readCSV = csv.reader(csvfile, delimiter=',')\n for row in readCSV:\n count+=1\nprint(count)\n\n\n# ## Cleaned data file\n\n# In[38]:\n\n\ndata_train = pd.read_csv('cleaned_data/emotion_data_prep.csv', sep=',', encoding='utf-8')\nprint(\"Dataset shape:\",data_train.shape)\n\n\n# In[39]:\n\n\ncount = data_train.iloc[:,1].value_counts()\nplt.figure(figsize=(9,7))\nsns.barplot(count.index, count.values, alpha=0.8, palette=\"plasma\")\nplt.ylabel('Count', fontsize=12)\nplt.xlabel('Emotions', fontsize=12)\nplt.show()\n# 0 = neutral\n# 1 = happy\n# 2 = sad\n# 3 = love\n# 4 = anger\n\n\n# # Test Train Split\n\n# In[9]:\n\n\nX_train = data_train.iloc[:,0][:49611]\n#[:47583]\ny_train = data_train.iloc[:,-1][:49611]\n#[:47583]\nX_val = data_train.iloc[:,0][49612:]\n#[47584:]\ny_val = data_train.iloc[:,-1][49612:]\n#[47584:]\n\n\n# # TF-IDF Vectorizer\n\n# In[10]:\n\n\ntfidf = TfidfVectorizer(max_features=1000, analyzer='word',ngram_range=(1,3))\nX_train_tfidf = tfidf.fit_transform(X_train.astype('U'))\nX_val_tfidf = tfidf.fit_transform(X_val.astype('U'))\nprint(tfidf.vocabulary_)\n\n\n# In[11]:\n\n\nbow = tfidf.fit_transform(data_train.iloc[:,0].astype('U'))\nword_freq = dict(zip(tfidf.get_feature_names(), np.asarray(bow.sum(axis=0)).ravel()))\nword_counter = collections.Counter(word_freq)\nword_counter_df = pd.DataFrame(word_counter.most_common(30), columns = ['word', 'freq'])\nfig, ax = plt.subplots(figsize=(15, 10))\nsns.barplot(x=\"word\", y=\"freq\", data= word_counter_df, ax=ax, palette=\"plasma\")\nplt.show();\n\n\n# # Count Vectorizer\n\n# In[12]:\n\n\n# Extracting Count Vectors Parameters\ncount_vect = CountVectorizer(analyzer='word')\ncount_vect.fit(data_train.iloc[:,0].astype('U'))\nX_train_count = count_vect.transform(X_train.astype('U'))\nX_val_count = count_vect.transform(X_val.astype('U'))\nprint(count_vect.vocabulary_)\n\n\n# In[13]:\n\n\nbow = count_vect.fit_transform(data_train.iloc[:,0].astype('U'))\nprint(bow.shape)\nword_freq = dict(zip(count_vect.get_feature_names(), np.asarray(bow.sum(axis=0)).ravel()))\nword_counter = collections.Counter(word_freq)\nword_counter_df = pd.DataFrame(word_counter.most_common(30), columns = ['word', 'freq'])\nfig, ax = plt.subplots(figsize=(15, 10))\nsns.barplot(x=\"word\", y=\"freq\", data= word_counter_df, ax=ax, palette=\"plasma\")\nplt.show();\n\n\n# # Building models using different classifiers (TF-IDF vectorizer)\n\n# ### Model 1: Multinomial Naive Bayes Classifier\n\n# In[14]:\n\n\nnb = MultinomialNB()\nnb.fit(X_train_tfidf, y_train)\ny_pred = nb.predict(X_val_tfidf)\nprint('naive bayes tfidf accuracy %s' % accuracy_score(y_pred, y_val))\n# naive bayes tfidf accuracy 0.3837284308982422\n\n\n# ### Model 2: Linear SVM\n\n# In[15]:\n\n\nlsvm = SGDClassifier(alpha=0.001, random_state=5, max_iter=15, tol=None)\nlsvm.fit(X_train_tfidf, y_train)\ny_pred = lsvm.predict(X_val_tfidf)\nprint('svm using tfidf accuracy %s' % accuracy_score(y_pred, y_val))\n# svm tfidf accuracy 0.38493791323980003\n\n\n# ### Model 3: logistic regression\n\n# In[16]:\n\n\nlogreg = LogisticRegression(C=1, max_iter=1000)\nlogreg.fit(X_train_tfidf, y_train)\ny_pred = logreg.predict(X_val_tfidf)\nprint('log reg tfidf accuracy %s' % accuracy_score(y_pred, y_val))\n# log reg tfidf accuracy 0.4013868730849863\n\n\n# # Building models using different classifiers (Count vectorizer)\n\n# ### Model 1: Multinomial Naive Bayes Classifier\n\n# In[19]:\n\n\nnb1 = MultinomialNB()\nnb1.fit(X_train_count, y_train)\ny_pred = nb1.predict(X_val_count)\nprint('naive bayes count vectors accuracy %s' % accuracy_score(y_pred, y_val))\n# naive bayes count_vect accuracy 0.584663763909047\n\n\n# ### Model 2: Logistic Regression\n\n# In[24]:\n\n\nlogreg1 = LogisticRegression(C=1, max_iter=500)\nlogreg1.fit(X_train_count, y_train)\ny_pred = logreg1.predict(X_val_count)\nprint('log reg count vectors accuracy %s' % accuracy_score(y_pred, y_val))\n# log reg count_vect accuracy 0.6247379454926625\n\n\n# ### Model 3: Linear SVM\n\n# In[34]:\n\n\nlsvm1 = SGDClassifier(alpha=0.001, random_state=5, max_iter=2, tol=None)\nlsvm1.fit(X_train_count, y_train)\ny_pred = lsvm1.predict(X_val_count)\nprint('lsvm using count vectors accuracy %s' % accuracy_score(y_pred, y_val))\n# svm count_vect accuracy 0.620061280438639\n\n\n# # Testing\n\n# In[22]:\n\n\ntweets = pd.DataFrame([\"For instance, giving a kiss to your younger sibling daily after waking up in the morning and showing him how much you love them. For some happiness means loving life and seeing others happy. While some finds happiness in writing stories. Some conquer happiness in being simple yet the best person they can ever be. Everyone has their own unique way to feel happy by finding things that they never expected to find.\", # happy\n \"Love is the key to happiness. We all want to lead a happy life. People look around for happiness in power, fashion, wealth, drugs etc. But these things can only give temporary pleasures. The power of love can create miracles. Love can create unity among nations and its citizens. Love is the most beautiful feeling in the world. Love has given different meaning by different people depending upon how they have experienced this wonderful feeling.\", # love\n \"One day I was studying in my room when, all of a sudden, i heard hot words being exchanged between two persons in the street. I paid no attention, thinking it would be a minor quarrel but soon I heard the voices of a large number of people. I peeped from the window and saw that there was a street quarrel. I went downstairs and reached the spot in the twinkling of an eyes. I was at my wits end on seeing that both of them had come to blows. The people were standing around them and enjoying their quarrel but none tried to pacify them.\", # sad\n \"I am so angry at you!!!!!\", # anger\n \"you ve hit a new low with a danger of blm fascist slogan please stop it before too late stop\", # anger\n \"I love my doggg\", # love\n \"I think i'm gonna be sick :'‑(\", # sad\n \"I hate you so much\", # anger\n \"I'm at work\", # neutral\n \"@TheTombert i was watching Harpers Island, lol... there was no vodka involved\", # neutral\n \"sometimes i wish things could go back to the way they were the beginning of last summer\", # sad\n \"it's your 18th birthday finally!!! yippeeeee\", # happy\n \"still waiting in line\", # neutral\n \"aarrgghh - fu*k.....a hose has leaked water all over the new floating floor\", # anger\n \"that b*tch is so ugly\", # anger\n \"oh no he is hospitalised!!!\", # sad\n ])\n\ntweet_count = count_vect.transform(tweets[0])\n\n\n# In[43]:\n\n\n#Predicting the emotion of the tweet \ntweet_pred = logreg1.predict(tweet_count)\nprint(tweet_pred)\ntweets[0]\n# 0 = neutral\n# 1 = happy\n# 2 = sad\n# 3 = love\n# 4 = anger\n\n\n# In[44]:\n\n\nfinal_result=tweets.copy()\n\n\n# In[45]:\n\ndef output():\n final_result['result']=tweet_pred\n final_result=final_result.rename(columns={0:\"tweets\"})\n final_result=final_result.rename(columns={\"result\":\"predicted_emotion\"})\n final_result=final_result.replace({0: 'Neutral', 1: 'Happy', 2: 'Sad', 3: 'Love', 4: 'Anger'})\n final_result\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"lancelooottt/MindGraph","sub_path":"app/src/main/python/MLModels.py","file_name":"MLModels.py","file_ext":"py","file_size_in_byte":12184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"31261169723","text":"f = open(\"madlibtest.txt\", \"r\")\n#empty string to store story\nstory = ''\n\nlineno = 1\nmadlib = \"\"\ntext = \"\"\nfor line in f:\n if line.startswith(\"*\"):\n #output as command, strip\n text = input('Give me a(n) ' + line[1:].strip() + ': ')\n else:\n #put lint onto madlib\n text = line\n #put text onto madlib\n madlib = madlib + \" \" + text.strip()\n#why is my madlib so awkward looking?\nprint(\"here is your madlib: \"+madlib)\nf.close()\n\nimport os\n\n#get path of this directory (because i sure don't know it)\ndir_path = os.getcwd()\n#put all filenames into dir_list\ndir_list = os.listdir(dir_path)\n\n#make list for text files\ntxt_files = []\nfor f in dir_list:\n #filter by those ending in .txt\n if f.endswith(\".txt\"):\n txt_files.append(f)\n\n#print text files\nprint(\"text files in this directory:\")\nfor t in txt_files:\n print(t)","repo_name":"rlsoderberg/rebeccaCS100","sub_path":"cs100a/module5/amadlibs.py","file_name":"amadlibs.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"33078337299","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Game',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('grid_width', models.IntegerField()),\n ('grid_height', models.IntegerField()),\n ('observer_log', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='Move',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('order', models.IntegerField(db_index=True)),\n ('game', models.ForeignKey(to='battleship_viewer.Game')),\n ],\n options={\n 'ordering': ['order'],\n },\n ),\n migrations.CreateModel(\n name='Player',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ],\n ),\n migrations.CreateModel(\n name='Ship',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('game', models.ForeignKey(to='battleship_viewer.Game')),\n ('player', models.ForeignKey(to='battleship_viewer.Player')),\n ],\n ),\n migrations.CreateModel(\n name='ShipLocation',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('x', models.IntegerField()),\n ('y', models.IntegerField()),\n ('ship', models.ForeignKey(to='battleship_viewer.Ship')),\n ],\n ),\n migrations.CreateModel(\n name='Shot',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('x', models.IntegerField()),\n ('y', models.IntegerField()),\n ('move', models.ForeignKey(to='battleship_viewer.Move')),\n ('player', models.ForeignKey(to='battleship_viewer.Player')),\n ],\n ),\n migrations.AddField(\n model_name='game',\n name='player1',\n field=models.ForeignKey(related_name='player1', to='battleship_viewer.Player'),\n ),\n migrations.AddField(\n model_name='game',\n name='player2',\n field=models.ForeignKey(related_name='player2', to='battleship_viewer.Player'),\n ),\n migrations.AddField(\n model_name='game',\n name='winner',\n field=models.ForeignKey(related_name='winner', to='battleship_viewer.Player', null=True),\n ),\n ]\n","repo_name":"okcpython/battleship_django","sub_path":"battleship_viewer/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"71677133661","text":"def shell_sort(arr):\n n = len(arr)\n gap = n // 2\n\n while gap > 0:\n for i in range(gap, n):\n temp = arr[i]\n k = i\n\n while k >= gap and arr[k - gap] > temp:\n arr[k] = arr[k - gap]\n k -= gap\n\n arr[k] = temp\n\n gap //= 2\n\n return arr\n\n\nprint(shell_sort([4, 6, 8, 3, 2, 1, 6]))\n","repo_name":"christianstefaniw/PythonAlgorithms","sub_path":"sorting/shell_sort.py","file_name":"shell_sort.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"21934791649","text":"from flask_restful import Resource, Api\nfrom flask_restful import fields, marshal_with\nfrom flask_restful import reqparse\nfrom flask import make_response , jsonify\nfrom application.database import db\nfrom application.models import Theatre , Show , Booking , User\nfrom flask import current_app as app\nfrom datetime import datetime\n\nfrom flask_jwt_extended import jwt_required, get_jwt_identity ######\nfrom traceback import print_exc\nfrom flask_restful import abort\nfrom application.helpers import admin_required\n\nimport application.data_access as da\nfrom time import perf_counter_ns\n\nfrom werkzeug.exceptions import HTTPException\n\nbooking_fields = {\n 'id': fields.Integer,\n 'show_id': fields.Integer,\n 'user_id': fields.Integer,\n 'booking_time': fields.String, # We'll format the datetime for output\n 'seats': fields.Integer,\n 'user_rating': fields.Integer\n}\n\n# Define the request parser for POST method\nbooking_parser = reqparse.RequestParser()\n#booking_parser.add_argument('show_id', type=int, required=True)\n#booking_parser.add_argument('user_id', type=int, required=True)\nbooking_parser.add_argument('seats', type=int, required=True)\n#booking_parser.add_argument('user_rating', type=int)\n\nupdate_booking_parser = reqparse.RequestParser()\nupdate_booking_parser.add_argument('user_rating', type=int , help='rating must be between 1 - 5')\n\n\n#get method for shows of particular user\n\nclass UserBookingAPI(Resource):\n @jwt_required() ######\n @marshal_with(booking_fields)\n def get(self):\n user_id = get_jwt_identity() ###### \n user = User.query.get(user_id)\n if not user:\n abort(404 , description=\"User not found\")\n else:\n start = perf_counter_ns()\n user_bookings = da.get_bookings_by_user_id(user_id)\n stop = perf_counter_ns()\n print(\"time taken :\" , stop - start)\n return user_bookings , 200\n \n \n \nclass AllBookingAPI(Resource):\n @jwt_required() ######\n @marshal_with(booking_fields)\n def get(self):\n bookings = Booking.query.all() ###### \n return bookings , 200\n \n \n# GET method for new booking\n\nclass BookingAPI(Resource):\n @jwt_required() ######\n @marshal_with(booking_fields)\n def post(self,show_id):\n #try:\n user_id = get_jwt_identity() ######\n args = booking_parser.parse_args()\n \n if not (User.query.get(user_id)):\n abort(404 , description=\"User not found\")\n \n show = Show.query.get(show_id)\n #show = da.get_show_by_show_id(show_id)\n if not show:\n abort(404, description=\"Show not found\")\n \n if show.show_capacity == 0 :\n abort(403 , description=\"Housefull!\")\n elif show.show_capacity < args['seats']:\n abort(403 , description=\"Enough seats not available!\")\n else:\n show.show_capacity = show.show_capacity - args['seats']\n \n booking = Booking(show_id=show_id,user_id=user_id, booking_time=datetime.now() , seats = args['seats'] , user_rating = 0)\n db.session.add(booking)\n db.session.commit()\n da.cache.delete_memoized(da.get_bookings_by_user_id , user_id)\n da.cache.delete_memoized(da.get_shows_by_theatreid , show.theatre_id)\n return booking\n \n \n\nclass UpdateBookingAPI(Resource):\n @marshal_with(booking_fields)\n @jwt_required()\n def put(self, id):\n user_id = get_jwt_identity()\n booking = Booking.query.get(id)\n if not booking:\n abort(404 , description=\"Booking not found\")\n\n args = update_booking_parser.parse_args()\n \n # Update user_rating if provided\n if 'user_rating' in args:\n booking.user_rating = args['user_rating']\n\n # Update the average rating for the show\n bookings = Booking.query.filter_by(show_id=booking.show_id).all()\n show = Show.query.get(booking.show_id)\n\n if not bookings:\n #show = Show.query.get(booking.show_id)\n show.rating =0.0\n else:\n total_rating = sum(booking.user_rating for booking in bookings)\n average_rating = total_rating / len(bookings)\n\n #show = Show.query.get(booking.show_id)\n show.rating = average_rating\n\n db.session.commit()\n da.cache.delete_memoized(da.get_bookings_by_user_id , user_id)\n da.cache.delete_memoized(da.get_shows_by_theatreid , show.theatre_id)\n \n\n return booking \n\n\n","repo_name":"jasleen9/XenonStack","sub_path":"ticketshow/application/controllers/api/booking.py","file_name":"booking.py","file_ext":"py","file_size_in_byte":4634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"34262633938","text":"def binarySearch(arr, val):\n\tlo = 0\n\thi = len(arr)-1\n\tctr = 0\n\tmid = int((lo + hi)/2)\n\tcurrent = arr[mid]\n\tarr.sort() #sort values into numerical order in case they aren't already\n\n\n\t\t\n\twhile val != arr[mid]:\n\t\tif val > arr[mid]:\n\t\t\tctr = ctr + 1\n\t\t\tprint (arr[mid])\n\t\t\tmid = int((mid + hi)/2)\n\t\t\tif valarr[mid-1]: #do this if value is in between to table values.\n\t\t\t\tprint (\"Value not in table\")\n\t\t\tif val>arr[-2]: #so we can test if value is last number in array\n\t\t\t\tif val == arr[-1]:\n\t\t\t\t\tmid=-1\n\t\t\t\telse:\n\t\t\t\t\tprint (\"Value not in table\")\n\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\t\n\t\tif val < arr[mid]:\n\t\t\tctr = ctr +1\n\t\t\tprint (arr[mid])\n\t\t\tmid = int((mid +lo)/2)\n\t\t\tif valarr[mid-1]: #do this if value is in between to table values but not in table.\n\t\t\t\tprint (\"Value not in table\")\n\t\t\tif val layer specification for the FF network in list form (eg. [5 3 3 2])\r\n\t\t:param learning_rate: learning rate of the network (C)\r\n\t\t:param momentum: momentum parameter (alpha)\r\n\t\t\"\"\"\r\n\t\tself.c = learning_rate\r\n\t\tself.alpha = momentum\r\n\t\tself.num_inputs = layers[0]\r\n\t\tself.num_outputs = layers[-1]\r\n\t\tself.num_layers = len(layers)\r\n\r\n\t\tself.weights = []\r\n\t\tself.biases = []\r\n\r\n\t\tfor i in range(1, self.num_layers):\r\n\t\t\tprev_outputs = layers[i-1]\r\n\t\t\tinit_parameter = 1.0 / prev_outputs\r\n\r\n\t\t\t# initialize n x m weight matrix using uniform initialization, each row is a node's input weights\r\n\t\t\tweight_mtx = np.random.uniform(-init_parameter, init_parameter, size=(layers[i], prev_outputs)) \r\n\r\n\t\t\t# n x 1 bias vector, each row is a node's bias\r\n\t\t\tbias_vector = np.zeros((layers[i], 1))\t\r\n\r\n\t\t\tself.weights.append(weight_mtx)\r\n\t\t\tself.biases.append(bias_vector)\r\n\r\n\r\n\tdef one_hot(self, d):\r\n\t\t\"\"\"transforms a numeric label into one-hot column vector\"\"\"\r\n\t\ty = np.zeros((self.num_outputs, 1))\r\n\t\ty[d] = 1\r\n\t\treturn y\r\n\r\n\r\n\tdef train(self, train_X, train_D, val_X=None, val_D=None, num_epochs=1):\r\n\t\t\"\"\"\r\n\t\ttrain the classifier, return list of validation accuracy per epoch\r\n\r\n\t\t:param train_X: feature matrix of training set\r\n\t\t:param trian_D: list of labels for supervised learning\r\n\t\t:param val_X: feature matrix of validation set\r\n\t\t:param val_D: label array for validation\r\n\t\t:param num_epochs: iterations over the training set\r\n\t\t\"\"\"\r\n\t\t# First convert training labels to one hot vectors\r\n\t\ttrain_Y = list(map(self.one_hot, train_D))\r\n\t\taccuracies = []\r\n\r\n\t\tfor epoch in range(num_epochs):\r\n\r\n\t\t\tfor x, y in zip(train_X, train_Y):\r\n\t\t\t\tself.update_SGD(x.reshape(1, self.num_inputs), y)\r\n\r\n\t\t\tif val_X is not None and val_D is not None:\r\n\t\t\t\tpredictions = self.test(val_X)\r\n\t\t\t\taccuracies.append(accuracy_score(val_D, predictions))\r\n\r\n\t\treturn accuracies\r\n\r\n\r\n\tdef test(self, test_X):\r\n\t\t\"\"\"return predicted labels for feature set\"\"\"\r\n\t\treturn list(map(self.predict, test_X))\r\n\r\n\r\n\tdef update_SGD(self, x, y):\r\n\t\t\"\"\"\r\n\t\tApplies a single step of stochastic gradient descent\r\n\r\n\t\t:param x: 1 x m feature vector from dataset\r\n\t\t:param y: one-hot target vector indicating desired label\r\n\t\t\"\"\"\r\n\t\tdel_w, del_b = self.backpropogate(x, y)\r\n\t\tself.weights = [w - self.c * dw + self.alpha * w for w, dw in zip(self.weights, del_w)]\r\n\t\tself.biases = [b - self.c * db + self.alpha * b for b, db in zip(self.biases, del_b)]\r\n\r\n\t\treturn None\r\n\r\n\r\n\tdef predict(self, x):\r\n\t\t\"\"\"\r\n\t\tgenerate a predicted label given feature vector x\r\n\t\t\"\"\"\r\n\t\tactivation = x.reshape(1, self.num_inputs).T\r\n\t\tfor w, b in zip(self.weights, self.biases):\r\n\t\t\tz = np.dot(w, activation) + b \r\n\t\t\tactivation = sigmoid(z)\r\n\r\n\t\treturn np.argmax(activation)\r\n\r\n\r\n\tdef backpropogate(self, x, y):\r\n\t\t\"\"\"\r\n\t\tApply the backpropogation algorithm to generate the deltas for weights\r\n\t\tand biases in each layer\r\n\r\n\t\t:param x: 1 x m feature vector from dataset\r\n\t\t:param y: one-hot target vector indicating desired label\r\n\t\t\"\"\"\r\n\t\t# These will hold the gradient of the cost function with respect to\r\n\t\t# weights and biases\r\n\t\tdel_w = [np.zeros(w.shape) for w in self.weights]\r\n\t\tdel_b = [np.zeros(b.shape) for b in self.biases]\r\n\r\n\t\t# Forward pass through the network\r\n\t\tactivation = x.T\r\n\t\tactivations = [activation]\r\n\t\tnet_inputs = []\r\n\r\n\t\tfor w, b in zip(self.weights, self.biases):\r\n\t\t\tz = np.dot(w, activation) + b\r\n\t\t\tnet_inputs.append(z)\r\n\t\t\tactivation = sigmoid(z)\r\n\t\t\tactivations.append(activation)\r\n\r\n\t\t# Error at output layer\r\n\t\toutput = self.one_hot(np.argmax(activations[-1]))\r\n\t\tdelta = self.cost_derivative(output, y) * sigmoid_prime(net_inputs[-1]) #dC/dz at output\r\n\t\tdel_b[-1] = delta #dC/db = dC/dz\r\n\t\tdel_w[-1] = np.dot(delta, activations[-2].T)\r\n\r\n\t\t# propogate error backwards layer by layer\r\n\t\tfor l in range(2, self.num_layers):\r\n\t\t\tz = net_inputs[-l]\r\n\t\t\tdelta = np.dot(self.weights[-l+1].T, delta) * sigmoid_prime(z)\r\n\t\t\tdel_b[-l] = delta\r\n\t\t\tdel_w[-l] = np.dot(delta, activations[-l-1].T)\r\n\r\n\t\treturn (del_w, del_b)\r\n\r\n\r\n\t@staticmethod\r\n\tdef cost_function(output_activations, y):\r\n\t\t\"\"\"use 1/2 of the square error as the cost function\"\"\"\r\n\t\treturn (1/2) * (y - output_activations) ** 2\r\n\r\n\t@staticmethod\r\n\tdef cost_derivative(output_activations, y):\r\n\t\t\"\"\"derivative of the L2 cost function with respect to network output\"\"\"\r\n\t\treturn (output_activations - y)\r\n\r\n\r\ndef sigmoid(z):\r\n\t\"\"\"\r\n\tThe sigmoid function\r\n\t\"\"\"\r\n\tz = np.clip(z, -500, 500)\r\n\treturn 1.0 / (1.0 + np.exp(-z))\r\n\r\ndef sigmoid_prime(z):\r\n\t\"\"\"\r\n\tderivative of the sigmoid function\r\n\t\"\"\"\r\n\treturn sigmoid(z) * (1 - sigmoid(z))\r\n","repo_name":"ben-the-hedgehog/backprop-network","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"70467895260","text":"# -*- coding: utf-8 -*-\n\n\ndef createArnoldTextureSettings():\n \"\"\"The patched version of the original file\"\"\"\n import pymel.core as pm\n import maya.cmds as cmds\n import pymel.versions as versions\n from mtoa.ui.globals import settings\n\n pm.setUITemplate(\"attributeEditorTemplate\", pushTemplate=True)\n pm.columnLayout(adjustableColumn=True)\n\n pm.attrControlGrp(\n \"autotx\",\n cc=settings.updateAutoTxSettings,\n label=\"Auto-convert Textures to TX (Disabled in Anima)\",\n attribute=\"defaultArnoldRenderOptions.autotx\",\n enable=False,\n )\n\n pm.attrControlGrp(\n \"use_existing_tiled_textures\",\n label=\"Use Existing TX Textures\",\n attribute=\"defaultArnoldRenderOptions.use_existing_tiled_textures\",\n )\n\n # disable autotx\n pm.setAttr(\"defaultArnoldRenderOptions.autotx\", 0)\n settings.updateAutoTxSettings()\n cmds.separator()\n\n # don't create texture_automip for 2017 as autoTx is ON by default\n maya_version = versions.shortName()\n if int(float(maya_version)) < 2017:\n pm.attrControlGrp(\n \"texture_automip\",\n label=\"Auto-mipmap\",\n attribute=\"defaultArnoldRenderOptions.textureAutomip\",\n )\n\n pm.attrControlGrp(\n \"texture_accept_unmipped\",\n label=\"Accept Unmipped\",\n attribute=\"defaultArnoldRenderOptions.textureAcceptUnmipped\",\n )\n\n cmds.separator()\n\n pm.checkBoxGrp(\n \"ts_autotile\", cc=settings.updateAutotileSettings, label=\"\", label1=\"Auto-tile\"\n )\n\n pm.connectControl(\"ts_autotile\", \"defaultArnoldRenderOptions.autotile\", index=2)\n\n pm.intSliderGrp(\n \"ts_texture_autotile\",\n label=\"Tile Size\",\n minValue=16,\n maxValue=64,\n fieldMinValue=16,\n fieldMaxValue=1024,\n )\n\n pm.connectControl(\n \"ts_texture_autotile\", \"defaultArnoldRenderOptions.textureAutotile\", index=1\n )\n pm.connectControl(\n \"ts_texture_autotile\", \"defaultArnoldRenderOptions.textureAutotile\", index=2\n )\n pm.connectControl(\n \"ts_texture_autotile\", \"defaultArnoldRenderOptions.textureAutotile\", index=3\n )\n\n \"\"\"pm.attrControlGrp('texture_autotile',\n label=\"Auto-tile Size\",\n attribute='defaultArnoldRenderOptions.textureAutotile')\"\"\"\n\n pm.attrControlGrp(\n \"texture_accept_untiled\",\n label=\"Accept Untiled\",\n attribute=\"defaultArnoldRenderOptions.textureAcceptUntiled\",\n )\n\n pm.attrControlGrp(\n \"texture_max_memory_MB\",\n label=\"Max Cache Size (MB)\",\n attribute=\"defaultArnoldRenderOptions.textureMaxMemoryMB\",\n )\n\n pm.attrControlGrp(\n \"texture_max_open_files\",\n label=\"Max Open Files\",\n attribute=\"defaultArnoldRenderOptions.textureMaxOpenFiles\",\n )\n\n cmds.separator()\n\n cmds.attrControlGrp(\n \"texture_diffuse_blur\",\n label=\"Diffuse Blur\",\n attribute=\"defaultArnoldRenderOptions.textureDiffuseBlur\",\n )\n\n # cmds.attrControlGrp('texture_glossy_blur',\n # label=\"Glossy Blur\",\n # attribute='defaultArnoldRenderOptions.textureGlossyBlur')\n\n pm.setParent(\"..\")\n\n pm.setUITemplate(popTemplate=True)\n","repo_name":"eoyilmaz/anima","sub_path":"anima/dcc/mayaEnv/config/arnold_patches.py","file_name":"arnold_patches.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"69"} +{"seq_id":"21412111324","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom data_module import data\nimport json\n\n\n\"\"\"\najax测试服务器\n\"\"\"\n\n\napp = Flask(__name__)\nCORS(app=app)\nport = 8001 # 配置端口\n\n\n@app.route(\"/\", methods=['post', 'get'])\ndef index():\n return \"hello world!\"\n\n\n@app.route(\"/\", methods=['post', 'get'])\ndef common_func(key):\n values = data.get(key)\n mes = {\"message\": \"success\"}\n if values is None:\n mes['message'] = \"not found!\"\n else:\n mes['data'] = values\n print(mes)\n return json.dumps(mes)\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=port, debug=True, threaded=True)\n","repo_name":"SYYDSN/py_projects","sub_path":"tools_box/ajax_server.py","file_name":"ajax_server.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"23611347604","text":"import discord\nfrom discord.ext import commands\nimport random\n\nclass Dice:\n max_die = 20\n\n help_str = '''\\\nx refers to the number of dice to roll\n\nx is an optional argument and when omitted will default to one\n\ny refers to the type of dice to roll\n\nOmitting the xdy argument defaults to rolling one d20\n\nMax number of rollable dice is {max_die}\n\nThe available types of die are d3, d4, d5, d6, d8, d10, and d20\n'''.format(max_die=max_die)\n\n available_die = [3, 4, 5, 6, 8, 10, 20, 100]\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(name='roll', help=help_str, brief='Roll some dice', aliases=['dice', 'r'])\n async def roll(self, xdy : str = '1d20'):\n try:\n num_dice, limit = xdy.split('d')\n if not num_dice:\n num_dice = 1\n else:\n num_dice = int(num_dice)\n limit = int(limit)\n except Exception as e:\n await self.bot.say('Invalid usage. Expected !roll xdy')\n else:\n if num_dice > Dice.max_die:\n await self.bot.say('The max number of die you can roll is {max_die}'.format(max_die=Dice.max_die))\n elif limit not in Dice.available_die:\n await self.bot.say('Invalid dice type')\n else:\n rolls = [random.randint(1, limit) for r in range(num_dice)]\n await self.bot.say('`[' + ']['.join(map(str, rolls)) + '] = ' + str(sum(rolls)) + '`')\n\n @roll.error\n async def roll_error(self, error, ctx):\n if isinstance(error, commands.BadArgument) or isinstance(error, commands.MissingRequiredArgument):\n await self.bot.say(error)\n\ndef setup(bot):\n bot.add_cog(Dice(bot))\n","repo_name":"Tharinis18/Dumbass-Bot","sub_path":"cogs/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"2674660769","text":"import json\nimport logging\nimport random\nimport os\n\nfrom nni.retiarii import Model, submit_models, wait_models\nfrom nni.retiarii.strategy import BaseStrategy\nfrom nni.retiarii import Sampler\n\n\n_logger = logging.getLogger(__name__)\n\nclass RandomSampler(Sampler):\n def choice(self, candidates, mutator, model, index):\n return random.choice(candidates)\n\nclass SimpleStrategy(BaseStrategy):\n def __init__(self):\n self.name = ''\n\n def run(self, base_model, applied_mutators, trainer):\n try:\n _logger.info('stargety start...')\n while True:\n model = base_model\n _logger.info('apply mutators...')\n _logger.info('mutators: {}'.format(applied_mutators))\n random_sampler = RandomSampler()\n for mutator in applied_mutators:\n _logger.info('mutate model...')\n mutator.bind_sampler(random_sampler)\n model = mutator.apply(model)\n # get and apply training approach\n _logger.info('apply training approach...')\n model.apply_trainer(trainer['modulename'], trainer['args'])\n # run models\n submit_models(model)\n wait_models(model)\n _logger.info('Strategy says:', model.metric)\n except Exception as e:\n _logger.error(logging.exception('message'))\n","repo_name":"luckygirlfyh/ConSK-GCN","sub_path":"Model/ConSK-GCN_MELD/nni-master/nni-master/test/retiarii_test/simple_strategy.py","file_name":"simple_strategy.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"} +{"seq_id":"1876997642","text":"import AuthenticationServices\nfrom PyObjCTools.TestSupport import TestCase, min_os_level\n\n\nclass TestASAuthorizationProviderExtensionAuthorizationRequest(TestCase):\n @min_os_level(\"11.0\")\n def test_constants11_0(self):\n self.assertIsInstance(\n AuthenticationServices.ASAuthorizationProviderAuthorizationOperationConfigurationRemoved,\n str,\n )\n\n @min_os_level(\"13.0\")\n def test_constants13_0(self):\n self.assertIsInstance(\n AuthenticationServices.ASAuthorizationProviderAuthorizationOperationDirectRequest,\n str,\n )\n\n @min_os_level(\"11.0\")\n def test_methods11_0(self):\n self.assertResultIsBOOL(\n AuthenticationServices.ASAuthorizationProviderExtensionAuthorizationRequest.isCallerManaged\n )\n\n @min_os_level(\"12.3\")\n def test_methods12_3(self):\n self.assertResultIsBOOL(\n AuthenticationServices.ASAuthorizationProviderExtensionAuthorizationRequest.isUserInterfaceEnabled\n )\n","repo_name":"ronaldoussoren/pyobjc","sub_path":"pyobjc-framework-authenticationservices/pyobjctest/test_asauthorizationproviderextensionauthorizationrequest.py","file_name":"test_asauthorizationproviderextensionauthorizationrequest.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":439,"dataset":"github-code","pt":"69"} +{"seq_id":"1423132890","text":"import matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.rcParams['font.family'] = 'montserrat'\nfig, ax = plt.subplots()\ndata = []\n#d = float(input('ведите частоту отображений маркеров: '))\n# Запись данных из файлов\nwith open('data.txt', 'r') as f:\n data = list(map(int, f.readlines()))\nwith open('settings.txt', 'r') as f:\n ch_disc = float(f.readline())\n shag_kvant = float(f.readline())\n zar_time = round(float(f.readline()), 2)\n raz_time = round(float(f.readline()), 2)\nall_time = zar_time + raz_time\n\n'''if d != 1:\n ost = int(len(data) * d)\n ybr = len(data) - ost\n sh = int(len(data) / ybr)\n print(sh)\n print(ost)\n print(ybr)\n i = sh\n while i < len(data):\n data.pop(i)\n print(data)\n i += sh\n'''\n\nif data != []:\n # Построение графика\n x = [i * all_time / len(data) for i in range(len(data))]\n y = [i / 256 * 3.3 for i in data]\n plt.plot(x, y, c='blue', label='V(t)', linewidth=2 )\n plt.scatter(x, y, s=25, c='blue', marker='o')\n plt.xlabel('Время, с', fontsize=20)\n plt.ylabel('Напряжение, В', fontsize=20)\n ax.minorticks_on()\n ax.grid(True, which='both')\n ax.grid(which='major', color='k', linewidth=1)\n ax.grid(which='minor', color='k', linestyle=':')\n plt.title('Процесс заряда и разряда конденсатора в RC-цепочке ', fontsize=33, wrap=True, pad=20)\n plt.legend(loc = 'upper right', ncol=20, prop={'size': 30})\n plt.axis([round(min(x), 1), round(max(x), 1), round(min(y), 1), round(max(y), 1)])\n ax.text(40, 2, 'Время заряда = {} с \\n\\nВремя разряда = {} с'.format(zar_time, raz_time), fontsize=20)\n plt.show()\n\n\n","repo_name":"MordvinovaA/get","sub_path":"graf-7.1.py","file_name":"graf-7.1.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"7059562521","text":"import tkinter as tk\nimport cv2\nfrom PIL import Image, ImageTk\nimport numpy as np\nfrom Recognition.FaceRecognition import FaceRecognition\nfrom Model.Account import Account\nfrom View.FaceListFrame import FaceListFrame\nfrom View.MyVideoCapture import MyVideoCapture\nfrom Model.AttendanceLog import AttendanceLog\nimport config\nimport threading\n\n\nclass UI:\n def __init__(self, window, windowTitle, videoSource=0):\n self.window = window\n self.window.title(windowTitle)\n self.videoSource = videoSource\n self.data = UI.getData()\n\n self.frameToShow = None\n self.frameToPredict = None\n self.photo = None\n self.net = cv2.dnn.readNetFromCaffe(\"Recognition/face_detector/deploy.prototxt\",\n \"Recognition/face_detector/res10_300x300_ssd_iter_140000.caffemodel\")\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n self.vid = MyVideoCapture(videoSource)\n self.canvas = tk.Canvas(window, width=self.vid.width, height=self.vid.height)\n self.canvas.pack()\n self.faceFrame = FaceListFrame(self.window)\n self.canvasFaces = self.faceFrame.canvasFaces\n self.nameLabels = self.faceFrame.nameLabels\n\n self.faces = []\n self.labels = []\n self.delay = 20\n self.predictThread = threading.Thread(target=self.predict)\n # self.showThread = threading.Thread(target=self.updateFrame)\n self.predictThread.start()\n self.updateFrame()\n self.window.mainloop()\n\n @staticmethod\n def getData():\n Account.update(\"\")\n dicts = Account.getFaces()\n return dicts\n\n def updateFrame(self): \n if self.frameToShow is not None:\n self.photo = ImageTk.PhotoImage(image=Image.fromarray(self.frameToShow))\n self.canvas.create_image(0, 0, image=self.photo, anchor=tk.NW)\n\n for i in range(len(self.faces)):\n self.canvasFaces[i].create_image(90, 90, image=self.faces[i])\n self.nameLabels[i].config(text=str(self.labels[i]))\n\n self.window.after(self.delay, self.updateFrame)\n\n def predict(self):\n while True:\n ret, frame = self.vid.getFrame()\n self.frameToPredict = frame\n bboxes = self.detectFace(self.frameToPredict)\n print(len(bboxes))\n listLabels = FaceRecognition.predictLabels(bboxes, self.data, self.frameToPredict)\n self.frameToShow= self.frameToPredict\n for i in range(len(bboxes)):\n box = bboxes[i]\n (startX, startY, endX, endY) = box\n self.frameToShow = cv2.rectangle(self.frameToShow, (startX, startY), (endX, endY), (0, 255, 0), 2)\n face = self.frameToPredict[startY:endY, startX:endX]\n studentId = ''\n if listLabels[i] == config.STRANGER_LABEL:\n name = config.STRANGER_LABEL\n else:\n name, studentId = FaceRecognition.getIdName(listLabels[i])\n\n print(i, name)\n if face.shape[0] > 100 and name != config.STRANGER_LABEL and name not in self.labels:\n self.faces.append(ImageTk.PhotoImage(image=Image.fromarray(cv2.resize(face, (150, 200)))))\n self.labels.append(name)\n UI.saveAttendanceRecord(face, studentId, name)\n\n if len(self.faces) > config.NUM_FACES:\n self.faces.pop(0)\n self.labels.pop(0)\n \n\n @staticmethod\n def saveAttendanceRecord(face, studentId, name):\n path = FaceRecognition.saveFace(face, studentId, config.IMAGE_FOLDER)\n AttendanceLog.save(studentId, path)\n FaceRecognition.voice(name, config.SOUND_FOLDER)\n AttendanceLog.send()\n\n def detectFace(self, frame):\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,\n (300, 300), (104.0, 177.0, 123.0))\n # Phat hien khuon mat\n self.net.setInput(blob)\n detections = self.net.forward()\n listBbox = []\n # listFaces = []\n # Loop qua cac khuon mat\n for i in range(0, detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n\n # Neu conf lon hon threshold\n if confidence > 0.75:\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n listBbox.append(box.astype(\"int\"))\n\n return listBbox\n\n","repo_name":"fancoltran/facerecognizer","sub_path":"View/UIMultithread.py","file_name":"UIMultithread.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"5518383394","text":"#!/usr/bin/env python3\n#\n# script -c \"./t48a.py\" /dev/null | ./t48b.py\n#\n\nimport time\nimport random\n\ndef output():\n for i in range(15):\n print(random.randint(1, 101))\n time.sleep(3)\n\nif __name__ == \"__main__\":\n output()\n","repo_name":"ckatsak/junkcode","sub_path":"t048/t48a.py","file_name":"t48a.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"22865679671","text":"__version__ = \"1.0.3\"\n\nfrom argparse import ArgumentParser\nfrom PIL import Image\n\nCOLORS = ( (0, 0, 0),\n (0, 0, 205), (0, 0, 255),\n (205, 0, 0), (255, 0, 0),\n (205, 0, 205), (255, 0, 255),\n (0, 205, 0), (0, 255, 0),\n (0, 205, 205), (0, 255, 255),\n (205, 205, 0), (255, 255, 0),\n (205, 205, 205), (255, 255, 255),\n )\n\nATTR_I = ( 0x00, 0x01, 0x01 | 0x40, 0x02, 0x02 | 0x40,\n 0x03, 0x03 | 0x40, 0x04, 0x04 | 0x40, 0x05, 0x05 | 0x40,\n 0x06, 0x06 | 0x40, 0x07, 0x07 | 0x40,)\n\nATTR_P = ( 0x00, 0x08, 0x08 | 0x40, 0x10, 0x10 | 0x40,\n 0x18, 0x18 | 0x40, 0x20, 0x20 | 0x40, 0x28, 0x28 | 0x40,\n 0x30, 0x30 | 0x40, 0x38, 0x38 | 0x40,)\n\nC2I = dict(zip(COLORS, ATTR_I))\nC2P = dict(zip(COLORS, ATTR_P))\n\nBASE = 128\n\ndef main():\n\n parser = ArgumentParser(description=\"PNG to Spectrum SCR converter\",\n epilog=\"Copyright (C) 2014-2016 Juan J Martinez \",\n )\n\n parser.add_argument(\"--version\", action=\"version\", version=\"%(prog)s \" + __version__)\n parser.add_argument(\"image\", help=\"image to convert\")\n\n args = parser.parse_args()\n\n try:\n image = Image.open(args.image)\n except IOError:\n parser.error(\"failed to open the image\")\n\n (w, h) = image.size\n\n if w != 256 or h != 192:\n parser.error(\"image size must be 256x192\")\n\n if not isinstance(image.getpixel((0, 0)), tuple):\n parse.error(\"only RGB(A) images are supported\")\n\n # so we support both RGB and RGBA images\n data = list(zip(list(image.getdata(0)), list(image.getdata(1)), list(image.getdata(2))))\n\n for c in data:\n if c not in COLORS:\n parser.error(\"invalid color %r in image\" % (c,))\n\n pixels = []\n attrib = []\n for y in range(0, h, 8):\n for x in range(0, w, 8):\n byte = []\n attr = []\n for j in range(8):\n row = 0\n for i in range(8):\n if not attr:\n attr.append(data[x + i + (j + y) * w])\n if data[x + i + (j + y) * w] != attr[0]:\n row |= 1 << (7 - i)\n if data[x + i + (j + y) * w] not in attr:\n attr.append(data[x + i + (j + y) * w])\n byte.append(row)\n\n if len(attr) > 2:\n parser.error(\"more than 2 colors in an attribute block in (%d, %d)\" % (x, y))\n elif len(attr) != 2:\n # if only one colour, try to find a match in an adjacent cell\n if attrib:\n prev_attr = attrib[-1]\n if prev_attr[0] == attr[0]:\n attr.append(prev_attr[1])\n if len(attr) != 2:\n attr.append(COLORS[0])\n\n # improve compression ratio\n if C2P[attr[0]] > C2I[attr[1]]:\n attr[0], attr[1] = attr[1], attr[0]\n byte = [~b & 0xff for b in byte]\n\n pixels.extend(byte)\n attrib.append(attr)\n\n attrib = [(C2P[attr[0]] | C2I[attr[1]]) for attr in attrib]\n\n interlaced = []\n for block in range(3):\n for col in range(8):\n for row in range(8):\n for line in range(32):\n interlaced.append(pixels[block * 8 * 8 * 32 \\\n + row * 32 * 8 \\\n + line * 8 \\\n + col])\n\n output = args.image + \".scr\"\n\n with open(output, \"wb\") as fh:\n fh.write(bytearray(interlaced))\n fh.write(bytearray(attrib))\n\n print(\"%r created\" % output)\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"daad-adventure-writer/daad","sub_path":"Deprecated/TAPMAST/png2scr.py","file_name":"png2scr.py","file_ext":"py","file_size_in_byte":3742,"program_lang":"python","lang":"en","doc_type":"code","stars":70,"dataset":"github-code","pt":"69"} +{"seq_id":"35315096509","text":"from odoo.http import request\nfrom odoo import fields, models, SUPERUSER_ID\n\n\nclass Page(models.Model):\n _inherit = \"website.page\"\n\n group_ids = fields.Many2many(\n \"res.groups\",\n string=\"Visible Groups\",\n help=(\n \"The user needs to be in at least one of these groups for the redirect to\"\n + \" have effect\"\n ),\n )\n\n def _compute_visible(self):\n super()._compute_visible()\n if self.env.user.id != SUPERUSER_ID:\n for record in self:\n if record.group_ids and record.is_visible:\n record.is_visible = any(\n gid in request.env.user.groups_id.ids\n for gid in record.group_ids.ids\n )\n","repo_name":"ayudoo/odoo_business_relationships","sub_path":"website_user_types/models/website_page.py","file_name":"website_page.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"69"} +{"seq_id":"7454050967","text":"# 二分查找练习,递归实现\n# 当前查找范围的首元素和尾元素下标值(left,right)\ndef binarysearch(value,key,left,right):\n # 递归的退出条件\n if left > right:\n # 查找结束,为找到\n return -1\n # 获取中间元素对应下标值\n middle = (left + right) // 2\n # 对比中间元素与查找元素\n if value[middle] == key:\n # 查找成功\n return middle\n elif value[middle] > key:\n # 若中间元素大于待查找元素值则在左侧继续查找\n # 查找范围减半:左侧下标值不变,右侧下标值变为middle-1 \n return binarysearch(value,key,left,middle-1)\n else: \n # 若中间元素小于待查找元素值,则在右侧继续查找\n # 查找范围减半:右侧下标值不变,左侧下标值变为middle+1\n return binarysearch(value,key,middle+1,right)\nif __name__ == \"__main__\":\n # 原始数据\n value = [1,2,3,4,5,6,7,8,9,10,11,12,13]\n # 待查找数据\n key = 6\n res = binarysearch(value,key,0,len(value)-1)\n if res == -1:\n print(\"查找失败\")\n print(\"查找成功,是第%d张\"%(res+1))","repo_name":"suprviserpy632157/zdy","sub_path":"ZDY/Feb_all/sort_and_calculate/poker_half_find.py","file_name":"poker_half_find.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"20839125180","text":"from os import abort\nimport flask\nfrom flask import request, jsonify\nfrom scipy.stats import norm\napp = flask.Flask(__name__)\napp.config[\"DEBUG\"] = True\n\n\n@app.route('/', methods=['GET'])\ndef home():\n # define distribution parameters\n if 'mu' in request.args:\n mu = float(request.args['mu'])\n x = int(request.args['x'])\n sigma = float(request.args['sigma'])\n # create distribution\n dist = norm(mu, sigma)\n result = dist.cdf(x)\n print((result))\n return str(round(result,2))\n else:\n return \"Nothing Found\"\n\napp.run()","repo_name":"abdulrehman25/cdf-calculator-flask-api","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"7034115003","text":"from abc import ABC\nfrom tensorflow.keras import Sequential, layers, Model, losses\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nfrom icecream import ic\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom typing import Optional, Tuple, List\nfrom tensorflow.python.keras.layers import Lambda\n\n\ndef tf_dataset_itr(tf_ds: tf.data.Dataset):\n for x_batch, y_batch in tf_ds:\n for x, y in zip(x_batch, y_batch):\n yield x, y\n\n\ndef rounded_accuracy(y_true, y_pred):\n \"\"\"\n Get the offset mean absolute error of rounded true and prediction values from 100%.\n value 1.0 -> Zero mean absolute error\n :param y_true: true/Ground values\n :param y_pred: prediction values\n :return: rounded mean absolute error metric\n \"\"\"\n return 1 - tf.keras.metrics.mean_absolute_error(tf.round(y_true), tf.round(y_pred))\n\n\ndef plot_confusion_matrix(model: tf.keras.Model,\n dataset: tf.data.Dataset,\n prediction_function: callable = np.argmax):\n ds = dataset.unbatch()\n y_true = np.array([y for _, y in ds.unbatch().as_numpy_iterator()])\n y_pred = [model.predict(x) for x, _ in ds.unbatch().as_numpy_iterator()]\n y_pred = np.array(map(prediction_function, y_pred))\n cm = tf.math.confusion_matrix(y_true, y_pred)\n cm = pd.DataFrame(np.array(cm))\n sns.heatmap(cm, annot=True)\n\n\ndef get_x_shape(tf_ds):\n return np.squeeze(next(tf_dataset_itr(tf_ds))[0]).shape\n\n\ndef ds_x_data(tf_ds):\n return np.array([x for x, _ in tf_ds.as_numpy_iterator()])\n\n\ndef ds_y_data(tf_ds):\n return np.array([y for _, y in tf_ds.as_numpy_iterator()])\n\n\ndef get_random_sample(tf_ds: tf.data.Dataset,\n sample_size: int = 1):\n \"\"\"\n Get a random sample from tf.data.Dataset\n :param tf_ds: tf.data.Dataset object\n :param sample_size: sample size\n :return: random sample of x_data and y_data as numpy.array()\n \"\"\"\n ds = tf_ds.shuffle(1024)\n x_data = np.array([x for x, _ in ds.take(sample_size)])\n y_data = np.array([y for _, y in ds.take(sample_size)])\n return x_data, y_data\n\n\ndef train_test_dataset_spilt(tf_ds: tf.data.Dataset,\n split: float = 0.2,\n batch_size: int = 32,\n dataset_length: int = None) \\\n -> Tuple[tf.data.Dataset, tf.data.Dataset]:\n \"\"\"\n Splits an Exsiting tf.Data.Dataset into Train and Test tf.data.Datasets\n :param tf_ds: The tf.Data.Dataset object\n :param split: test data fraction (0.0 < split < 1.0)\n :param batch_size: Batch size\n :param dataset_length: Defaults to len(tf_ds), otherwise can specify here.\n :return:\n \"\"\"\n assert 0.0 < split < 1.0\n assert batch_size > 0\n ds = tf_ds.shuffle(1024).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)\n n = len(ds) if dataset_length is None else dataset_length\n test_ds = ds.take(int(split * n)).shuffle(1024, reshuffle_each_iteration=True).prefetch(\n tf.data.experimental.AUTOTUNE)\n train_ds = ds.skip(int(split * n)).take(int((1 - split) * n)).shuffle(1024, reshuffle_each_iteration=True).prefetch(\n tf.data.experimental.AUTOTUNE)\n return train_ds, test_ds\n\n\ndef relative_tensor(tensor: tf.float64, row: int):\n return tf.concat([tensor[:row], tensor[row + 1:]], axis=0)\n\n\ndef relative_variance(tensor: tf.float64, axis: int = 0):\n if axis > 2:\n raise ValueError('axis must be 0->rows or 1->columns.')\n elif axis == 1:\n t = tf.transpose(tensor)\n else:\n t = tensor\n trr_rows = []\n for row in range(tensor.shape[0]):\n trr_row = relative_tensor(t, row)\n trr_row = tf.math.reduce_variance(trr_row, axis=1)\n trr_row = tf.expand_dims(trr_row, axis=1)\n trr_rows.append(trr_row)\n trr = tf.concat(trr_rows, axis=1)\n trr = tf.squeeze(trr)\n return tf.transpose(trr) if axis == 1 else trr\n\n\ndef tensor_minmax_scaler(tensor):\n min_val = tf.math.reduce_min(tensor)\n max_val = tf.math.reduce_max(tensor)\n return (tensor - min_val) / max_val\n\n\ndef tensor_standard_scaler(tensor):\n mu = tf.math.reduce_mean(tensor)\n std = tf.math.reduce_std(tensor)\n return (tensor - mu) / std\n\n\ndef variance_outlier_extraction(tensor):\n tensor = tensor_minmax_scaler(tensor)\n t_row = relative_variance(tensor)\n t_col = relative_variance(tensor, axis=1)\n trc = tf.math.reduce_min(tf.concat([\n tf.expand_dims(t_row, axis=2),\n tf.expand_dims(t_col, axis=2),\n ],\n axis=2),\n axis=2)\n trc = tf.expand_dims(trc, axis=2)\n trc = tf.image.flip_up_down(trc)\n return tensor_minmax_scaler(tensor)\n\n\ndef variance_outlier_extraction_layer():\n return tf.keras.layers.Lambda(variance_outlier_extraction)\n\n\ndef time2vec(input_dim: int, output_dim: int, name: str = 'Time2Vec', **kwargs):\n \"\"\"\n Time2Vec Encoding outputting Vector Representation of Time\n Citation:\n URL https://arxiv.org/abs/1907.05321\n @misc{https://doi.org/10.48550/arxiv.1907.05321,\n doi = {10.48550/ARXIV.1907.05321},\n url = {https://arxiv.org/abs/1907.05321},\n author = {Kazemi, Seyed Mehran and Goel, Rishab and Eghbali, Sepehr and Ramanan, Janahan and Sahota,\n Jaspreet and Thakur, Sanjay and Wu, Stella and Smyth, Cathal and Poupart, Pascal and Brubaker, Marcus},\n keywords = {Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},\n title = {Time2Vec: Learning a Vector Representation of Time},\n publisher = {arXiv},\n year = {2019},\n copyright = {Creative Commons Attribution Non-Commercial Share Alike 4.0 International}\n }\n :param input_dim: Size of the input (batch, input_dim)\n :param output_dim: Size of the output (batch, output_dim)\n :param name: Keras Layer name\n :param kwargs: tf.keras.Model() keywords\n :return: (batch, output_dim) Vector Representation of Time\n \"\"\"\n # tou (batch, signal)\n # y0 = w0 . t + phi0 ; k=0\n tou = tf.keras.layers.Input((input_dim,))\n y0 = tf.keras.layers.Dense(1, activation=None)(tou)\n # y = sin( W . t + Phi ); 0 implement whatever makes sense in your environment below.'''\n # ==========================================================================\n\n if os.path.isfile(os.path.join(RESUME_PATH, CHECKPOINT_filename)):\n logger.info('time is up, back to queue')\n SCRIPT_PATH = sys.argv[0]\n # USERTODO : Implement command to launch a new job resuming this one\n # command =\n # ==========================================================================\n\n logger.info('Executing %s' % command)\n if os.system(command):\n raise RuntimeError('launch failed')\n time.sleep(3)\n logger.info('New job submitted to the queue, saving checkpoint')\n return\n\n''' Install signal handler\n'''\nsignal.signal(signal.SIGUSR1, signalHandler)\n\nfeat_ind = {\n 'fpn_res5_2_sum': 0,\n 'fpn_res4_5_sum': 1,\n 'fpn_res3_3_sum': 2,\n 'fpn_res2_2_sum': 3\n}\n\n#-------------------------------------------------------------------------------\n# Elementary functions\ndef prepareMultiscaleForForwardOnGpu(*tensors, **kwargs):\n assert 'nb_scales' in kwargs.keys()\n if 'gpu_id' not in kwargs.keys():\n kwargs['gpu_id'] = 0\n rslt = []\n def prepareTensor(tensor, gpu_id):\n return Variable(tensor.cuda(gpu_id), requires_grad = False)\n for ind, tens in enumerate(tensors):\n rslt.append({})\n assert isinstance(tens, dict), \\\n 'No other cases considered for multiscale for now.'\n\n for k, v in tens.items():\n rslt[ind][k] = []\n for sc in range(kwargs['nb_scales'][feat_ind[k]]):\n rslt[ind][k].append(prepareTensor(v[sc], gpu_id = kwargs['gpu_id']))\n return rslt\n\n\ndef resetTrainStatsSingleFrameMultiscale(opt):\n rstats = {}\n levelNames = ['fpn_res5_2_sum', 'fpn_res4_5_sum', 'fpn_res3_3_sum', 'fpn_res2_2_sum']\n for l in range(opt['FfpnLevels']):\n lev = levelNames[l]\n for loss_type in opt['loss_features']:\n for sc in range(opt['nb_scales'][l]):\n rstats['train_%s-%s-%s' % (lev, loss_type, sc)] = []\n\n return rstats\n\n\ndef resetValStatsSingleFrameMultiscale(opt):\n rstats = {}\n levelNames = ['fpn_res5_2_sum', 'fpn_res4_5_sum', 'fpn_res3_3_sum', 'fpn_res2_2_sum']\n for l in range(opt['FfpnLevels']):\n lev = levelNames[l]\n for loss_type in opt['loss_features']:\n for sc in range(opt['nb_scales'][l]):\n rstats['val_%s-%s-%s' % (lev, loss_type, sc)] = []\n\n return rstats\n\n\ndef resetTrainProgressMultiscale(opt, train_loader, stats):\n runningTrainLoss = 0.0\n train_loader.reset(reshuffle = True)\n # Stats\n for t in range(opt['n_target_frames']):\n stats['t+%d' % (t+1)] = {}\n stats['t+%d' % (t+1)].update(resetTrainStatsSingleFrameMultiscale(opt))\n stats['train_ae_loss_values'] = []\n return runningTrainLoss\n\n\ndef resetValProgressMultiscale(opt, val_loader, stats):\n totalValLoss = 0.0\n ctValIt = 0\n val_loader.reset()\n # Stats\n for t in range(opt['n_target_frames']):\n if not stats.has_key('t+%d' % (t+1)): stats['t+%d' % (t+1)] = {}\n stats['t+%d' % (t+1)].update(resetValStatsSingleFrameMultiscale(opt))\n stats['val_ae_loss_values'] = []\n return totalValLoss, ctValIt\n\ndef reshapeMultiscaleTargetsForCriterion(targets, nT, nb_feat, nb_scales):\n seq_targets = []\n for t in range(nT):\n rtargets = {}\n for k, v in targets.items():\n rtargets[k] = []\n for sc in range(nb_scales[feat_ind[k]]):\n assert v[sc].dim() == 4\n assert v[sc].size(1) == nT * nb_feat\n st, en = t * nb_feat, (t+1) * nb_feat\n rtargets[k].append(v[sc][:, st:en, :, :])\n seq_targets.append(rtargets)\n return seq_targets\n\ndef updateTrainProgress(opt, runningTrainLoss, lossdata, loss_terms, stats, i, rtl_period, epoch):\n stats['train_ae_loss_values'].append(lossdata)\n for kt, vt in enumerate(loss_terms):\n for ks, vs in vt.items() :\n stats['t+%d' % (kt+1)]['train_'+ks].append(vs)\n runningTrainLoss += lossdata\n if i % rtl_period == (rtl_period -1):\n avgRunningTrainLoss = runningTrainLoss / rtl_period\n logger.info('[%d, %5d] running train loss: %.3f' %\n (epoch + 1, i + 1, avgRunningTrainLoss))\n runningTrainLoss = 0.0\n\n return runningTrainLoss\n\ndef updateValProgress(totalValLoss, ctValIt, lossdata, loss_terms, stats, epoch, i, rtl_period):\n stats['val_ae_loss_values'].append(lossdata)\n for kt, vt in enumerate(loss_terms):\n for ks, vs in vt.items() :\n stats['t+%d' % (kt+1)]['val_'+ks].append(vs)\n totalValLoss += lossdata\n ctValIt += 1\n if i % rtl_period == (rtl_period -1):\n avgValLoss = totalValLoss / ctValIt\n logger.info('[%d, %5d] mean validation loss: %.3f' %\n (epoch + 1, i + 1, avgValLoss))\n return totalValLoss, ctValIt\n\n\ndef checkIsBest(totalValLoss, ctValIt, bestModelPerf=None):\n current_val = - totalValLoss/ctValIt\n sigma = 0.001\n logger.info('Current val : %.3f' % current_val)\n if bestModelPerf is None:\n bestModelPerf = current_val\n logger.info(\"Self bestModelPerf : %.3f\" % bestModelPerf)\n return False, bestModelPerf\n else:\n if current_val > bestModelPerf + sigma:\n bestModelPerf = current_val\n logger.info(\"Self bestModelPerf : %.3f\" % bestModelPerf)\n return True, bestModelPerf\n else:\n logger.info(\"Self bestModelPerf : %.3f\" % bestModelPerf)\n return False, bestModelPerf\n\n\ndef format_variable_length_multiscale_sequence(outputs, ffpn_levels, nT, nb_scales):\n \"\"\" Only implemented in case single feature training...\"\"\"\n find_feature_by_dim = {\n 32 : 'fpn_res5_2_sum', 64 : 'fpn_res4_5_sum',\n 128 : 'fpn_res3_3_sum', 256 : 'fpn_res2_2_sum'}\n seq_outputs = []\n assert len(outputs) == nT * ffpn_levels\n current_frame = 0\n feat = None\n for f, out in enumerate(outputs):\n if len(seq_outputs) == current_frame: seq_outputs.append({})\n if feat is None: feat = find_feature_by_dim[out[-1].size(2)]\n assert len(out) == nb_scales[feat_ind[feat]]\n assert find_feature_by_dim[out[-1].size(2)] == feat\n seq_outputs[current_frame][feat] = out\n current_frame +=1\n if (f+1)%nT == 0:\n current_frame = 0\n feat = None\n return seq_outputs\n\n#-------------------------------------------------------------------------------\n# Main functions\ndef train_multiscale(opt, model, train_loader, criterion, optimizer, epoch, stats, best_prec1, start_iter = 0):\n global SIGNAL_RECEIVED\n from detectron.utils.timer import Timer\n t = Timer()\n model.train()\n\n runningTrainLoss = resetTrainProgressMultiscale(opt, train_loader, stats)\n rtl_period = max(5, int(len(train_loader)/1))\n logger.info('-------------------------- Training epoch #%d --------------------------' % (epoch+1))\n t.tic()\n # set the variables for signal_handler\n global RESUME_PATH, NUM_GPUS\n RESUME_PATH = opt['save']\n NUM_GPUS = opt['gpu_id'] + 1 # relies assumption that the model uses the last GPU\n\n for i, data in enumerate(train_loader):\n # Skip the iterations included in the checkpoint\n if i < start_iter: continue\n\n # Get and prepare data\n inputs, targets, _ = data\n inputs, targets = prepareMultiscaleForForwardOnGpu(inputs, targets, **{'gpu_id' : opt['gpu_id'], 'nb_scales': opt['nb_scales']})\n targets = reshapeMultiscaleTargetsForCriterion(targets, opt['n_target_frames'], opt['nb_features'], opt['nb_scales'])\n # Optimization\n optimizer.zero_grad()\n ffpnlevels = 1 if opt['train_single_level'] else opt['FfpnLevels']\n outputs = format_variable_length_multiscale_sequence(model(inputs), ffpnlevels, opt['n_target_frames'], opt['nb_scales'])\n loss, loss_terms = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n # Update progress\n runningTrainLoss = updateTrainProgress(opt, runningTrainLoss, loss.item(), loss_terms, stats, i, rtl_period, epoch)\n\n if SIGNAL_RECEIVED:\n save_checkpoint({\n 'epoch': epoch,\n 'iter': i+1,\n 'opt_path': os.path.join(opt['logs'], 'params.pkl'),\n 'state_dict': model.state_dict(),\n 'best_prec1': best_prec1,\n 'optimizer' : optimizer.state_dict(),\n }, False, savedir = opt['save'])\n logger.info('Saved checkpoint before exiting peacefully for job requeuing')\n exit(0)\n del loss, inputs, outputs, targets, loss_terms\n t.toc() ; t.tic()\n if i >= (opt['it']-1) : break\n print('Training iteration average duration : %f' % t.average_time)\n\n\ndef val_multiscale(opt, model, val_loader, criterion, epoch, stats, bestModelPerf, optimizer):\n global SIGNAL_RECEIVED\n from detectron.utils.timer import Timer\n t = Timer()\n model.eval()\n totalValLoss, ctValIt = resetValProgressMultiscale(opt, val_loader, stats)\n rtl_period = max(5, int(len(val_loader)/1))\n t.tic()\n coco_cityscapes_dataset = val_loader.data_source.dataset.dataset.dataset\n json_classes = coco_cityscapes_dataset.classes\n with torch.no_grad():\n for i, data in enumerate(val_loader):\n # Get and prepare data\n inputs, targets, seqIDs = data\n inputs, targets = prepareMultiscaleForForwardOnGpu(inputs, targets, **{'gpu_id' : opt['gpu_id'], 'nb_scales': opt['nb_scales']})\n targets = reshapeMultiscaleTargetsForCriterion(targets, opt['n_target_frames'], opt['nb_features'], opt['nb_scales'])\n # Evaluation\n ffpnlevels = 1 if opt['train_single_level'] else opt['FfpnLevels']\n outputs = format_variable_length_multiscale_sequence(model(inputs), ffpnlevels, opt['n_target_frames'], opt['nb_scales'])\n loss, loss_terms = criterion(outputs, targets)\n # Update progress\n totalValLoss, ctValIt = updateValProgress(totalValLoss, ctValIt, loss.item(), loss_terms, stats, epoch, i, rtl_period)\n t.toc() ; t.tic()\n if SIGNAL_RECEIVED:\n save_checkpoint({\n 'epoch': epoch + 1,\n 'iter': 0,\n 'opt_path': os.path.join(opt['logs'], 'params.pkl'),\n 'state_dict': model.state_dict(),\n 'best_prec1': bestModelPerf,\n 'optimizer' : optimizer.state_dict(),\n }, False, savedir = opt['save'])\n logger.info('Saved checkpoint before exiting peacefully for job requeuing')\n exit(0)\n del loss, inputs, outputs, targets, loss_terms\n if i >= (opt['it']-1) : break\n logger.info('Validation iteration average duration : %f' % t.average_time)\n\n return checkIsBest(totalValLoss, ctValIt, bestModelPerf=bestModelPerf)\n\n\ndef save(model, optimizer, epoch, entireSetOptions, stats, isBestModel, bestModelPerf):\n nEs = entireSetOptions['nEpocheSave']\n logger.info('Saving results to %s' % entireSetOptions['save'])\n logger.info('Saving model to '+entireSetOptions['save'] + 'model_%dep.net' % (epoch+1))\n torch.save(model.state_dict(), entireSetOptions['save'] + 'model_%dep.net' % (epoch+1))\n save_checkpoint({\n 'epoch': epoch + 1,\n 'iter': 0,\n 'opt_path': os.path.join(entireSetOptions['logs'], 'params.pkl'),\n 'state_dict': model.state_dict(),\n 'best_prec1': bestModelPerf,\n 'optimizer' : optimizer.state_dict(),\n },\n isBestModel,\n savedir = entireSetOptions['save'])\n train_mean_ae_loss = np.mean(stats['train_ae_loss_values'])\n val_mean_ae_loss = np.mean(stats['val_ae_loss_values'])\n logger.info('Mean autoencoder loss throughout training epoch: %.5f' % train_mean_ae_loss)\n logger.info('Mean autoencoder loss of validation epoch: %.5f' % val_mean_ae_loss)\n\n logs = dict([('n_epoch', epoch+1)])\n for k, v in stats.items() :\n if isinstance(v, dict):\n for kv, vv in v.items():\n logs['_'.join((k, kv))] = np.mean(vv)\n else:\n logs[k] = np.mean(v)\n\n logger.info(\"__log__:%s\" % json.dumps(logs))\n","repo_name":"facebookresearch/instpred","sub_path":"autoregressive_training.py","file_name":"autoregressive_training.py","file_ext":"py","file_size_in_byte":13383,"program_lang":"python","lang":"en","doc_type":"code","stars":78,"dataset":"github-code","pt":"69"} +{"seq_id":"9483104451","text":"import flask\nimport base64\nimport json\nimport time\nimport couchdbkit\nfrom werkzeug.contrib.cache import SimpleCache as Cache\nfrom xml.sax.saxutils import escape as htmlescape\nfrom habitat import uploader\nfrom . import couch_to_xml\nfrom habitat.utils.startup import load_config, setup_logging\n\n# Monkey patch float precision\njson.encoder.FLOAT_REPR = lambda o: format(o, '.5f')\n\napp = flask.Flask(\"habitat_transition.app\")\ncache = Cache(threshold=10, default_timeout=60)\n\n# Load config here :S ?\n# N.B.: Searches working directory since it won't be specified in argv.\n# Configure uwsgi appropriately.\nconfig = load_config()\nsetup_logging(config, \"transition_app\")\ncouch_settings = {\"couch_uri\": config[\"couch_uri\"],\n \"couch_db\": config[\"couch_db\"]}\n\n@app.route(\"/\")\ndef hello():\n return \"\"\"\n \n \n\n

payloads list

\n

XML

\n\n

receivers list

\n

JSON

\n\n
\n

payload_telemetry

\n

Callsign:

\n

\n String: \n \n

\n

Metadata (json):

\n

Time created (int, POSIX):

\n

\n

\n\n
\n

listener_information

\n

Callsign:

\n

Data (json):

\n

Time created (int, POSIX):

\n

\n

\n\n
\n

listener_telemetry

\n

Callsign:

\n

Data (json):

\n

Time created (int, POSIX):

\n

\n

\n\n \n \n \"\"\"\n\ndef get_time_created():\n if \"time_created\" not in flask.request.form:\n return None\n\n time_created = flask.request.form[\"time_created\"]\n if not time_created:\n return None\n\n return int(time_created)\n\n@app.route(\"/payload_telemetry\", methods=[\"POST\"])\ndef payload_telemetry():\n callsign = flask.request.form[\"callsign\"]\n string = flask.request.form[\"string\"]\n string_type = flask.request.form[\"string_type\"]\n metadata = json.loads(flask.request.form[\"metadata\"])\n time_created = get_time_created()\n\n if string_type == \"base64\":\n string = base64.b64decode(string)\n elif string_type == \"ascii\" or string_type == \"ascii-stripped\":\n string = string.encode(\"utf8\")\n\n if string_type == \"ascii-stripped\":\n string += \"\\n\"\n\n assert callsign and string\n assert isinstance(metadata, dict)\n\n u = uploader.Uploader(callsign=callsign, **couch_settings)\n try:\n u.payload_telemetry(string, metadata, time_created)\n except uploader.UnmergeableError:\n app.logger.warning(\"Unmergeable: %s (%r)\", callsign, string)\n\n return \"OK\"\n\n@app.route(\"/listener_information\", methods=[\"POST\"])\ndef listener_information():\n callsign = flask.request.form[\"callsign\"]\n data = json.loads(flask.request.form[\"data\"])\n time_created = get_time_created()\n\n assert callsign and data\n assert isinstance(data, dict)\n\n u = uploader.Uploader(callsign=callsign, **couch_settings)\n u.listener_information(data, time_created)\n\n return \"OK\"\n\n@app.route(\"/listener_telemetry\", methods=[\"POST\"])\ndef listener_telemetry():\n callsign = flask.request.form[\"callsign\"]\n data = json.loads(flask.request.form[\"data\"])\n time_created = get_time_created()\n\n assert callsign and data\n assert isinstance(data, dict)\n\n u = uploader.Uploader(callsign=callsign, **couch_settings)\n u.listener_telemetry(data, time_created)\n\n return \"OK\"\n\n@app.route(\"/allpayloads\")\ndef allpayloads():\n text = cache.get('allpayloads')\n if text is None:\n text = couch_to_xml.dump_xml(**couch_settings)\n cache.set('allpayloads', text)\n response = flask.make_response(text)\n set_expires(response, 60)\n return response\n\ndef set_expires(response, diff):\n expires = time.time() + diff\n expires = time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\",\n time.gmtime(expires))\n\n response.headers[\"Expires\"] = expires\n\nHTML_DESCRIPTION = u\"\"\"\n
\nRadio: {radio_safe}
\nAntenna: {antenna_safe}
\nLast Contact: {tdiff_hours} hours ago
\n
\n\"\"\"\n\ndef listener_map(callsign, data):\n try:\n info = data[\"information\"][\"data\"]\n telemetry = data[\"telemetry\"][\"data\"]\n\n tdiff = int(time.time()) - data[\"latest\"]\n tdiff_hours = tdiff / 3600\n\n for key in [\"radio\", \"antenna\"]:\n if key not in info:\n info[key] = \"Unknown\"\n\n if \"altitude\" not in telemetry:\n telemetry[\"altitude\"] = 0.0\n\n info[\"radio_safe\"] = htmlescape(info[\"radio\"])\n info[\"antenna_safe\"] = htmlescape(info[\"antenna\"])\n info[\"tdiff_hours\"] = tdiff_hours\n\n return {\n \"name\": callsign,\n \"lat\": telemetry[\"latitude\"],\n \"lon\": telemetry[\"longitude\"],\n \"alt\": telemetry[\"altitude\"],\n \"tdiff_hours\": tdiff_hours,\n \"description\": HTML_DESCRIPTION.format(**info)\n }\n except KeyError:\n return None\n\ndef receivers_load(couch_db):\n listeners = {}\n\n yesterday = int(time.time() - (24 * 60 * 60))\n startkey = [yesterday, None]\n o = {\"startkey\": startkey}\n\n for doc_type in [\"information\", \"telemetry\"]:\n view_name = \"listener_{0}/time_created_callsign\".format(doc_type)\n view = couch_db.view(view_name, **o)\n\n for result in view:\n (time_uploaded, callsign) = result[\"key\"]\n\n l = {doc_type: result[\"id\"], \"latest\": time_uploaded}\n\n if callsign not in listeners:\n listeners[callsign] = l\n else:\n listeners[callsign].update(l)\n\n required_ids = {}\n remove_listeners = []\n for callsign in listeners:\n l = listeners[callsign]\n\n if not callsign or \"chase\" in callsign \\\n or \"information\" not in l or \"telemetry\" not in l:\n remove_listeners.append(callsign)\n else:\n required_ids[listeners[callsign][\"information\"]] = callsign\n required_ids[listeners[callsign][\"telemetry\"]] = callsign\n\n for callsign in remove_listeners:\n del listeners[callsign]\n\n docs = couch_db.all_docs(keys=required_ids.keys(), include_docs=True)\n\n for result in docs:\n doc_id = result[\"id\"]\n doc = result[\"doc\"]\n\n callsign = required_ids[doc_id]\n if doc[\"type\"] == \"listener_information\":\n listeners[callsign][\"information\"] = doc\n elif doc[\"type\"] == \"listener_telemetry\":\n listeners[callsign][\"telemetry\"] = doc\n else:\n raise KeyError(\"type\")\n\n return listeners\n\n@app.route(\"/receivers\")\ndef receivers():\n couch_server = couchdbkit.Server(couch_settings[\"couch_uri\"])\n couch_db = couch_server[couch_settings[\"couch_db\"]]\n\n listeners = receivers_load(couch_db)\n\n response_data = []\n for callsign in listeners:\n l = listener_map(callsign, listeners[callsign])\n if l is not None:\n response_data.append(l)\n\n response = flask.make_response(json.dumps(response_data))\n set_expires(response, 10 * 60)\n response.headers[\"Content-type\"] = \"application/json\"\n return response\n","repo_name":"ukhas/habitat-transition","sub_path":"habitat_transition/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7912,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"7746300639","text":"#####################################################################################################\r\n# Milos Atz\r\n# NE155 Homework 5\r\n#####################################################################################################\r\nimport math\r\nimport numpy as np\r\nimport scipy\r\n#####################################################################################################\r\n# Problem 4\r\n# Write a program to implement the following iterative methods for a matrix with n unknowns.\r\n# (a) Jacobi method\r\n# (b) Gauss Seidel method\r\n# (c) SOR method\r\n#####################################################################################################\r\n# First, build the system using the method performed in Problem 1\r\ndef matrix_build(n):\r\n\ta=[-1]*int(n-1)\r\n\tb=[4]*int(n)\r\n\tc=[-1]*int(n-1)\r\n\tA=np.matrix(np.diag(a, -1) + np.diag(b, 0) + np.diag(c, 1))\r\n\treturn(A)\r\ndef b_build(n):\r\n\tb=np.zeros(n)\r\n\tfor i in range(0, n):\r\n\t\tb[i]=100\r\n\tb=np.transpose(np.matrix(b))\r\n\treturn(b)\r\n#####################################################################################################\r\n# a) Jacobi method\r\n# Strategy: use a while loop to iterate while some convergence equation between x_old and x_new is greater than the input tolerance. The initial values for x are defined as x_old. In every while loop, x_new is calculated based on x_old. The error is then calculated. If the error tolerance is met, the while loop ends; if not, x_old = x_new and the loop repeats. I should implement an iteration counter to count the number of loops.\r\ndef jacobi_solver(A, b, tol=1e-6):\r\n\tn=b.size\r\n\tif(b.size!=A.shape[0] or b.size!=A.shape[1]):\r\n\t\tsys.exit('dimensions of A and b do not agree')\r\n\tx_old=np.transpose(np.matrix(np.zeros(n)))\r\n\tD=np.diag(np.diag(A))\r\n\tD_inv=np.linalg.inv(D)\r\n\tconv=1\r\n\tcounter=0\r\n\twhile(conv>tol):\r\n\t\tx_new=D_inv*(D-A)*x_old+D_inv*b\r\n\t\t#print(x_new)\r\n\t\tconv=np.linalg.norm(x_new-x_old)\r\n\t\tx_old=x_new\r\n\t\tcounter=counter+1\r\n\tprint('counter= '+str(counter))\r\n\tprint('absolute error = '+str(conv))\r\n\treturn(x_new)\r\n#####################################################################################################\r\n# b) Gauss-Seidel Method\r\ndef gs_solver(A, b, tol=1e-6):\r\n\tif(min(np.linalg.eigvals(A)<0)):\r\n\t\tsys.exit('A is not positive definite')\r\n\tif((A.transpose() != A).all()):\r\n\t\tsys.exit('A is not symmetric')\r\n\tif(b.size!=A.shape[0] or b.size!=A.shape[1]):\r\n\t\tsys.exit('dimensions of A and b do not agree')\r\n\tn=b.size\r\n\tx_old=np.transpose(np.matrix(np.zeros(n)))\r\n\tD=np.diag(np.diag(A))\r\n\tL=np.diag(np.diag(A,-1),-1)\r\n\tU=np.diag(np.diag(A,1),1)\r\n\tDL_inv=np.linalg.inv(D+L)\r\n\tconv=1\r\n\tcounter=0\r\n\twhile(conv>tol):\r\n\t\tx_new=DL_inv*(-U*x_old+b)\r\n\t\t#print(x_new)\r\n\t\tconv=np.linalg.norm(x_new-x_old)\r\n\t\tx_old=x_new\r\n\t\tcounter=counter+1\r\n\tprint('counter= '+str(counter))\r\n\tprint('absolute error = '+str(conv))\r\n\treturn(x_new)\r\n#####################################################################################################\r\n# c) SOR Method\r\ndef sor_solver(A, b, w=1.1, tol=1e-6):\r\n\tif(min(np.linalg.eigvals(A)<0)):\r\n\t\tsys.exit('A is not positive definite')\r\n\tif((A.transpose() != A).all()):\r\n\t\tsys.exit('A is not symmetric')\r\n\tif(b.size!=A.shape[0] or b.size!=A.shape[1]):\r\n\t\tsys.exit('dimensions of A and b do not agree')\r\n\tn=b.size\r\n\tx_old=np.transpose(np.matrix(np.zeros(n)))\r\n\tD=np.diag(np.diag(A))\r\n\tL=np.diag(np.diag(A,-1),-1)\r\n\tU=np.diag(np.diag(A,1),1)\r\n\tDL_inv=np.linalg.inv(D+w*L)\r\n\tconv=1\r\n\tcounter=0\r\n\twhile(conv>tol):\r\n\t\tx_new=DL_inv*(((1-w)*D-w*U)*x_old+w*b)\r\n\t\t#print(x_new)\r\n\t\tconv=np.linalg.norm(x_new-x_old)\r\n\t\tx_old=x_new\r\n\t\tcounter=counter+1\r\n\tprint('counter= '+str(counter))\r\n\tprint('absolute error = '+str(conv))\r\n\treturn(x_new)\r\n#####################################################################################################\r\n# Script that executes when the program file is called from the command line.\r\nn=input(\"Enter number of equations in system: \")\r\nA=matrix_build(n)\r\nb=b_build(n)\r\nprint('JACOBI SOLVER:')\r\njacobi_ans=jacobi_solver(A,b)\r\nprint('jacobi answer:')\r\nprint(jacobi_ans)\r\nprint('\\n')\r\n\r\nprint('GAUSS-SEIDEL SOLVER:')\r\ngs_ans=gs_solver(A,b)\r\nprint('gs answer:')\r\nprint(gs_ans)\r\nprint('\\n')\r\n\r\n\r\nprint('SOR SOLVER:')\r\nsor_ans=sor_solver(A,b)\r\nprint('sor answer:')\r\nprint(sor_ans)\r\nprint('\\n')\r\n","repo_name":"MilosAtz/NE155","sub_path":"HW5/P4.py","file_name":"P4.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"72547674139","text":"class Solution:\n def splitArraySameAverage(self, A: List[int]) -> bool:\n n = len(A)\n summ = sum(A)\n if not any(i * summ % n == 0 for i in range(1, n // 2 + 1)):\n return False\n\n sums = [set() for _ in range(n // 2 + 1)]\n sums[0].add(0)\n\n for a in A:\n for i in range(n // 2, 0, -1):\n for val in sums[i - 1]:\n sums[i].add(a + val)\n\n for i in range(1, n // 2 + 1):\n if i * summ % n == 0 and i * summ // n in sums[i]:\n return True\n\n return False\n","repo_name":"Next-Gen-UI/Code-Dynamics","sub_path":"Leetcode/0805. Split Array With Same Average/0805.py","file_name":"0805.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"69"} +{"seq_id":"36007723142","text":"# encoding: utf-8\n\n\"\"\"\n@version: 1.0\n@author: dawning\n@contact: dawning7670@gmail.com\n@time: 2017/3/27 17:12\n\"\"\"\n\nimport pytest\nfrom jsonschema import SchemaError\nfrom jsonschema.validators import validator_for\n\nfrom framework.validator.json_validator import JValidator\n\nconfig_with_simple_json = {\n \"opr\": {\n \"type\": \"string\",\n \"enum\": [\"collection\", \"pay_for\"]\n }\n}\n\nschema_with_simple_json = {\n \"$schema\": \"http://json-schema.org/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]}\n }\n}\n\nconfig_with_array = {\n \"opr\": {\n \"type\": \"string\",\n \"enum\": [\"collection\", \"pay_for\"]\n },\n \"item\": [{\n \"bank_account\": {\"type\": \"string\", \"maxLength\": 19, \"maxLength\": 19},\n \"bank_account_name\": {\"type\": \"string\", \"maxLength\": 10, \"maxLength\": 4}\n }, {\n \"maxLength\": 5,\n \"minLength\": 1\n }]\n}\nschema_with_array = {\n \"$schema\": \"http://json-schema.org/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"item\": {\n \"type\": \"array\",\n \"maxLength\": 5,\n \"minLength\": 1,\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"bank_account\": {\"type\": \"string\", \"maxLength\": 19, \"maxLength\": 19},\n \"bank_account_name\": {\"type\": \"string\", \"maxLength\": 10, \"maxLength\": 4}\n }\n }\n }\n }\n}\n\nconfig_with_not_required = {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"remark\": {\"type\": \"string\"},\n \"user_name\": {\"type\": \"integer\", \"maximum\": 99999, \"minimum\": 10000},\n \"realtime\": {\"type\": \"integer\", \"enum\": [0, 1]},\n \"agreement_id\": {\"type\": \"string\", \"maxLength\": 16, \"maxLength\": 16},\n \"not_required\": [\"remark\", \"realtime\"]\n}\nschema_with_not_required = {\n \"$schema\": \"http://json-schema.org/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"remark\": {\"type\": \"string\"},\n \"user_name\": {\"type\": \"integer\", \"maximum\": 99999, \"minimum\": 10000},\n \"realtime\": {\"type\": \"integer\", \"enum\": [0, 1]},\n \"agreement_id\": {\"type\": \"string\", \"maxLength\": 16, \"maxLength\": 16}\n },\n \"required\": ['account', 'agreement_id', 'opr', 'user_name']\n}\n\nconfig_with_nest_json = {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"user\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"integer\", \"maximum\": 999999, \"minimum\": 100000}\n }\n}\nschema_with_nest_json = {\n \"$schema\": \"http://json-schema.org/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"user\": {\n \"type\": \"object\",\n \"properties\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"integer\", \"maximum\": 999999, \"minimum\": 100000}\n }\n }\n }\n}\n\nconfig_with_whole = {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"item\": [\n {\n \"bank_account\": {\"type\": \"string\", \"maxLength\": 19, \"minLength\": 19},\n \"bank_account_name\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 4},\n \"not_required\": [\"bank_account_name\"]\n }, {\n \"maxLength\": 5,\n \"minLength\": 1\n }],\n \"user\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"integer\", \"maximum\": 999999, \"minimum\": 100000},\n \"not_required\": [\"username\"]\n },\n \"not_required\": [\"account\"]\n}\n\nschema_with_whole = {\n \"$schema\": \"http://json-schema.org/schema#\",\n \"type\": \"object\",\n \"properties\": {\n \"opr\": {\"type\": \"string\", \"enum\": [\"collection\", \"pay_for\"]},\n \"account\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 1},\n \"item\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"bank_account\": {\"type\": \"string\", \"maxLength\": 19, \"minLength\": 19},\n \"bank_account_name\": {\"type\": \"string\", \"maxLength\": 10, \"minLength\": 4}\n },\n \"required\": [\"bank_account\"]\n },\n \"maxLength\": 5,\n \"minLength\": 1\n },\n \"user\": {\n \"type\": \"object\",\n \"properties\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"integer\", \"maximum\": 999999, \"minimum\": 100000}\n },\n \"required\": [\"password\"]\n }\n },\n \"required\": [\"item\", \"opr\", \"user\"]\n}\nschema_config = {\n \"simple_json\": config_with_simple_json,\n \"array\": config_with_array,\n \"not_required\": config_with_not_required,\n \"nest_json\": config_with_nest_json,\n \"whole\": config_with_whole\n}\nvalidator = JValidator(schema_config)\n\n\ndef check(name, correct_schema):\n schema = validator.schema[name]\n cls = validator_for(schema)\n is_valid_schema = True\n try:\n cls.check_schema(schema)\n except SchemaError:\n is_valid_schema = False\n assert is_valid_schema\n assert schema == correct_schema\n\n\n# 测试单层json\ndef test_make_schema_with_simple_json():\n check(\"simple_json\", schema_with_simple_json)\n\n\n# 测试数组对象\ndef test_make_schema_with_array():\n check(\"array\", schema_with_array)\n\n\n# 测试not_required对象\ndef test_make_schema_with_not_required():\n check(\"not_required\", schema_with_not_required)\n\n\n# 测试嵌套json\ndef test_make_schema_with_nest_json():\n check(\"nest_json\", schema_with_nest_json)\n\n\n# 总体测试\ndef test_make_schema_whole():\n check(\"whole\", schema_with_whole)\n\n\nif __name__ == '__main__':\n args = [\"-vv\", \"--color\", \"yes\", \"test_json_validator.py\"]\n pytest.main(args)\n","repo_name":"lpj2721/protools","sub_path":"framework/tests/test_json_validator.py","file_name":"test_json_validator.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"24267196626","text":"import sys, os, shutil, subprocess, time, config\n\ndef buildWorkingDir():\n # Remove java gen folder\n if os.path.exists(config.javaGenDir):\n shutil.rmtree(config.javaGenDir)\n\n #Copy project template to build dir\n src = config.javaDir\n des = config.javaGenDir + os.sep + \"_asproject\"\n if os.path.exists(des):\n shutil.rmtree(des)\n \n print(\"\\033[1;34;40mFrom\\n\\033[0;37;40m\" + src)\n print(\"\\033[1;34;40mTo\\n\\033[0;37;40m\" + des)\n\n shutil.copytree(src, des)\n\n #Copy packed_data to _asproject\n src = config.dataGenDir + os.sep + \"packed_data.zip\"\n des = des + os.sep + \"app\" + os.sep + \"src\" + os.sep + \"main\" + os.sep + \"assets\"\n if os.path.exists(src):\n shutil.copy(src, des)\n\n \n print(\"\\n\")\n \ndef buildJava():\n print(\"===========================================================\")\n print(\" \\033[1;32;40mBUILD JAVA\\033[0;37;40m\")\n print(\"===========================================================\")\n\n if config.hostType == \"windows\":\n gradleExec = config.gradleExecWin\n elif config.hostType == \"linux\":\n gradleExec = config.gradleExecLinux\n \n if config.buildType == \"release\":\n cmd = config.javaDir + os.sep + gradleExec + \" assembleRelease -p \" + config.javaGenDir + os.sep + \"_asproject\" + \" --profile\"\n subprocess.call(cmd, shell=True)\n #Enable in next version\n #cmd = config.javaDir + os.sep + gradleExec + \" testReleaseUnitTest -p \" + config.javaGenDir + os.sep + \"_asproject\"\n #subprocess.call(cmd, shell=True)\n else:\n cmd = config.javaDir + os.sep + gradleExec + \" assembleDebug -p \" + config.javaGenDir + os.sep + \"_asproject\" + \" --profile\"\n subprocess.call(cmd, shell=True)\n #Enable in next version\n #cmd = config.javaDir + os.sep + gradleExec + \" testDebugUnitTest -p \" + config.javaGenDir + os.sep + \"_asproject\"\n #subprocess.call(cmd, shell=True)\n #print(cmd)\n #subprocess.call(cmd, shell=True)\n \n print(\"\\n\")\n\ndef buildPackage():\n print(\"===========================================================\")\n print(\" \\033[1;32;40mBUILD PACKAGE\\033[0;37;40m\")\n print(\"===========================================================\")\n \n if config.buildType == \"release\":\n src = config.javaGenDir + os.sep + \"_asproject\" + os.sep + \"app\" + os.sep + \"build\" + os.sep + \"outputs\" + os.sep + \"apk\" + os.sep + \"release\" + os.sep + \"app-release.apk\"\n des = config.rootGenDir + os.sep + \"apks\"\n \n print(\"\\033[1;34;40mFrom:\\n\\033[0;37;40m\" + src)\n print(\"\\033[1;34;40mTo\\n\\033[0;37;40m\" + des)\n \n if not os.path.exists(des):\n os.mkdir(des)\n des += os.sep + config.outputFile + \"-\" + config.versionCode + \"-\" + config.versionName + \"-release.apk\"\n shutil.copyfile(src, des)\n else:\n src = config.javaGenDir + os.sep + \"_asproject\" + os.sep + \"app\" + os.sep + \"build\" + os.sep + \"outputs\" + os.sep + \"apk\" + os.sep + \"debug\" + os.sep + \"app-debug.apk\"\n des = config.rootGenDir + os.sep + \"apks\"\n \n print(\"\\033[1;34;40mFrom:\\n\\033[0;37;40m\" + src)\n print(\"\\033[1;34;40mTo\\n\\033[0;37;40m\" + des)\n \n if not os.path.exists(des):\n os.mkdir(des)\n des += os.sep + config.outputFile + \"-\" + config.versionCode + \"-\" + config.versionName + \"-debug.apk\"\n shutil.copyfile(src, des)\n\n \n print(\"\\n\")\n\ndef main(argv):\n start = time.time()\n print(\"===========================================================\")\n print(\" \\033[1;32;40mBUILD APPLICATION\\033[0;37;40m\")\n print(\"===========================================================\")\n \n print(str(argv))\n config.buildProjectPath(argv[0], argv[1], argv[2], argv[3])\n \n buildWorkingDir()\n \n buildJava()\n \n buildPackage()\n\n elapsedTime = time.time() - start\n print(\"Running time: %s s\" % str(elapsedTime))\n\nif __name__ == '__main__':\n main(sys.argv[1:])","repo_name":"cs50vn/virustracker-android","sub_path":"scripts/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":4054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"24321580296","text":"import numpy as np\nimport pandas as pd\nimport sys\nimport os\nimport pickle\nimport itertools\nfrom tqdm import tqdm\nfrom scipy.spatial.distance import squareform, pdist\n\n\"\"\"\nSimple .pdb parser for collecting spatial info of atoms and such. \n\"\"\"\n\nAA_LIST = [\"ALA\", \"ARG\", \"ASN\", \"ASP\", \"CYS\", \"GLN\", \"GLU\", \"GLY\", \"HIS\",\n \"ILE\", \"LEU\", \"LYS\", \"MET\", \"PHE\", \"PRO\", \"SER\", \"THR\", \"TRP\",\n \"TYR\", \"VAL\"]\n\n\ndef parse_pdb_file(protein_name, filepath):\n print(filepath)\n lines = open(filepath, 'r').readlines()\n atom_name = []\n residue_name = []\n residue_number = []\n x_coord = []\n y_coord = []\n z_coord = []\n element = []\n for line in lines:\n if line[0:7].strip() == \"ATOM\":\n atom_name.append(line[13:17].strip())\n residue_name.append(line[17:21].strip())\n residue_number.append(int(line[23:26].strip()))\n x_coord.append(float(line[31:39].strip()))\n y_coord.append(float(line[39:47].strip()))\n z_coord.append(float(line[47:55].strip()))\n element.append(line[77:79].strip())\n protein = pd.DataFrame({\n 'atom_name': atom_name,\n 'residue_name': residue_name,\n 'residue_number': residue_number,\n 'x': x_coord,\n 'y': y_coord,\n 'z': z_coord,\n 'element': element})\n protein['aa_index'] = protein[\"residue_name\"].apply(\n lambda x: AA_LIST.index(x))\n\n with open(\"processed_pdb/\" + protein_name[:-4] + \"_dataset.pkl\", \"wb\") as f:\n pickle.dump(protein, f)\n return protein\n\n\ndef filter_dataset_CA(protein_name, dataset, save=True):\n \"\"\"\n Filter only the CA atoms from a given dataset.\n Returns the list of datasets. \n \"\"\"\n filtered_dataset = dataset[dataset[\"atom_name\"] == \"CA\"]\n if save:\n with open(\"processed_pdb/\" + protein_name + \"_CA_data.pkl\", \"wb\") as f:\n pickle.dump(filtered_dataset, f)\n return filtered_dataset\n\n\ndef make_coordinate_dataset_CA(protein_name, dataset):\n \"\"\"\n Returns only the coordinates of a dataset.\n \"\"\"\n filtered_dataset = filter_dataset_CA(protein_name, dataset, False)\n coordinates = filtered_dataset[[\"x\", \"y\", \"z\"]].values\n coordinates -= coordinates.mean(axis=0)\n coordinates /= np.linalg.norm(coordinates, axis=0)\n data_coords = pd.DataFrame(coordinates,\n columns=(\"x\", \"y\", \"z\"))\n with open(\"processed_pdb/\" + protein_name + \"_CA_coords.pkl\", \"wb\") as f:\n pickle.dump(data_coords, f)\n return data_coords\n\n\ndef process_distance_matrix_CA_scipy(protein_name, dataset):\n filtered_dataset = filter_dataset_CA(protein_name, dataset, False)\n dist_matrix = squareform(pdist(filtered_dataset[[\"x\", \"y\", \"z\"]], metric='euclidean'))\n with open(\"processed_pdb/\" + protein_name + \"_CA_dist.pkl\", \"wb\") as f:\n pickle.dump(dist_matrix, f)\n return dist_matrix\n\n\ndef process_distance_matrix_CA(protein_name, dataset):\n filtered_dataset = filter_dataset_CA(protein_name, dataset, False)\n N = len(filtered_dataset)\n dist_matrix = np.zeros((N, N))\n for i, j in tqdm(itertools.combinations(range(N), 2)):\n a = np.array([filtered_dataset.iloc[i][\"x\"],\n filtered_dataset.iloc[i][\"y\"],\n filtered_dataset.iloc[i][\"z\"]])\n b = np.array([filtered_dataset.iloc[j][\"x\"],\n filtered_dataset.iloc[j][\"y\"],\n filtered_dataset.iloc[j][\"z\"]])\n dist_matrix[i][j] = np.linalg.norm(a - b)\n dist_matrix[j][i] = dist_matrix[i][j]\n with open(\"processed_pdb/\" + protein_name + \"_CA_dist.pkl\", \"wb\") as f:\n pickle.dump(dist_matrix, f)\n return dist_matrix\n\n\ndef unload_all(directory=\"processed_pdb\"):\n with open(directory + \"/names.pkl\", 'rb') as f:\n names = pickle.load(f)\n datasets = []\n filtered_datasets = []\n data_coords = []\n dist_matrices = []\n for name in names:\n with open(\"processed_pdb/\" + name + \"_dataset.pkl\", \"rb\") as f:\n datasets.append(pickle.load(f))\n with open(\"processed_pdb/\" + name + \"_CA_data.pkl\", \"rb\") as f:\n filtered_datasets.append(pickle.load(f))\n with open(\"processed_pdb/\" + name + \"_CA_coords.pkl\", \"rb\") as f:\n data_coords.append(pickle.load(f))\n with open(\"processed_pdb/\" + name + \"_CA_dist.pkl\", \"rb\") as f:\n dist_matrices.append(pickle.load(f))\n return names, datasets, filtered_datasets, data_coords, dist_matrices\n\n\nif __name__ == \"__main__\":\n if not os.path.exists(\"processed_pdb\"):\n os.makedirs(\"processed_pdb\")\n if len(sys.argv) == 1:\n directory = \"pdb_files\"\n else:\n directory = sys.argv[1]\n items = os.listdir(directory)\n files = []\n names = []\n datasets = []\n for name in items:\n if name.endswith(\".pdb\"):\n files.append(name)\n names.append(name[: -4])\n\n for name in files:\n protein = parse_pdb_file(name, directory + \"/\" + name)\n datasets.append(protein)\n \n with open(\"processed_pdb\" + \"/names.pkl\", 'wb') as f:\n pickle.dump(names, f)\n \n namefile = open(\"processed_pdb/names.txt\", 'w')\n\n for i, name in enumerate(names):\n namefile.write(name + \"\\n\")\n _ = filter_dataset_CA(name, datasets[i])\n _ = make_coordinate_dataset_CA(name, datasets[i])\n _ = process_distance_matrix_CA_scipy(name, datasets[i])\n","repo_name":"carlidel/protein-reconstruction","sub_path":"pdb_processing.py","file_name":"pdb_processing.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"23567532880","text":"\"\"\"GestaoDePraticasDiarias_v6 URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n #========== VERB ==========#\n path('verbs/', views.ListVerb.as_view(), name='verb_list'),\n path('verb/', views.DetailVerb.as_view(), name='verb_detail'),\n path('create_verb/', views.CreateVerb.as_view(), name='verb_create'),\n path('update_verb/',\n views.UpdateVerb.as_view(),\n name='verb_update'),\n path('delete_verb/',\n views.DeleteVerb.as_view(),\n name='verb_delete'),\n #========== VERB ==========#\n \n \n \n #========== SENTENCE ==========#\n path('sentences/', views.ListSentence.as_view(), name='sentence_list'),\n path('sentence/', views.DetailSentence.as_view(),\n name='sentence_detail'),\n path('create_sentence/', views.CreateSentence.as_view(),\n name='sentence_create'),\n path('update_sentence/',\n views.UpdateSentence.as_view(),\n name='sentence_update'),\n path('delete_sentence/',\n views.DeleteSentence.as_view(),\n name='sentence_delete'),\n #========== SENTENCE ==========#\n \n \n \n #========== GROUP ==========#\n path('groups/', views.ListGroup.as_view(), name='group_list'),\n path('group/', views.DetailGroup.as_view(),\n name='group_detail'),\n path('create_group/', views.CreateGroup.as_view(),\n name='group_create'),\n path('update_group/',\n views.UpdateGroup.as_view(),\n name='group_update'),\n path('delete_group/',\n views.DeleteGroup.as_view(),\n name='group_delete'),\n #========== GROUP ==========#\n\n \n \n #========== PATTERN ==========#\n path('patterns/', views.ListPattern.as_view(), name='pattern_list'),\n path('pattern/', views.DetailPattern.as_view(),\n name='pattern_detail'),\n path('create_pattern/', views.CreatePattern.as_view(),\n name='pattern_create'),\n path('update_pattern/',\n views.UpdatePattern.as_view(),\n name='pattern_update'),\n path('delete_pattern/',\n views.DeletePattern.as_view(),\n name='pattern_delete'),\n #========== PATTERN ==========#\n \n \n \n #========== RESOURCE ==========#\n path('resources/', views.ListResource.as_view(), name='resource_list'),\n path('resource/', views.DetailResource.as_view(),\n name='resource_detail'),\n path('create_resource/', views.CreateResource.as_view(),\n name='resource_create'),\n path('update_resource/',\n views.UpdateResource.as_view(),\n name='resource_update'),\n path('delete_resource/',\n views.DeleteResource.as_view(),\n name='resource_delete'),\n #========== RESOURCE ==========#\n \n \n \n #========== ARTEFACT ==========#\n path('artefacts/', views.ListArtefact.as_view(), name='artefact_list'),\n path('artefact/', views.DetailArtefact.as_view(),\n name='artefact_detail'),\n path('create_artefact/', views.CreateArtefact.as_view(),\n name='artefact_create'),\n path('update_artefact/',\n views.UpdateArtefact.as_view(),\n name='artefact_update'),\n path('delete_artefact/',\n views.DeleteArtefact.as_view(),\n name='artefact_delete'),\n #========== ARTEFACT ==========#\n]\n","repo_name":"fosquito/Daily-practice-management","sub_path":"Activities/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"20200250185","text":"file = open(\"12-1.in\", \"r\")\n# file = open(\"12-1.in.sample\", \"r\")\n\ndef parse_node(node_id, children):\n for child in children:\n if child not in connected:\n connected.add(child)\n parse_node(child, connections[child])\n\nconnections = {}\nfor line in file:\n node_id, con_str = line.strip().split(\" <-> \")\n connections[int(node_id)] = [int(x) for x in con_str.split(\", \")]\n\nconnected = set()\nset_count = 1\nparse_node(0, connections[0])\nfor i in range(2000):\n if i not in connected:\n set_count += 1\n parse_node(i, connections[i])\n\nprint(len(connected))\nprint(set_count)\n","repo_name":"davidkiger/aoc2017","sub_path":"12-2.py","file_name":"12-2.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"38613292114","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n # Sol 1. DFS Pre-Order Traverse\n def sumNumbers(self, root: TreeNode) -> int:\n if not root:\n return 0\n \n each_path = []\n total_path_sum = 0\n \n def pre_order_traverse(node, so_far):\n if not node.left and not node.right: # means it reaches the end of each leaf\n each_path.append(\"\".join(so_far+[str(node.val)]))\n \n if node.left: # means it goes to the left subtree\n pre_order_traverse(node.left, so_far+[str(node.val)])\n \n if node.right: # right subtree\n pre_order_traverse(node.right, so_far+[str(node.val)])\n \n pre_order_traverse(root,[])\n \n # print(each_path)\n \n for x in each_path:\n total_path_sum += int(x)\n \n return total_path_sum\n # TC: O(n), need to check all element\n # SC: O(g), where g is the number of leaf node. g = ceil(n/2)\n # Runtime: 28 ms, faster than 87.12%\n # Memory Usage: 14 MB, less than 38.06% ","repo_name":"ssong86/leetcode-problem-solving","sub_path":"June-LeetCoding-Challenge-2020/Week4/129-sum-root-to-leaf-numbers.py","file_name":"129-sum-root-to-leaf-numbers.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"36613126414","text":"\"\"\"create topics table\n\nRevision ID: 6f247da76f69\nRevises: \nCreate Date: 2023-07-07 17:07:31.443571\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"01_6f247da76f69\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n op.create_table(\n \"topics\",\n sa.Column(\"id\", sa.UUID(), nullable=False, primary_key=True, index=True),\n sa.Column(\n \"created_at\",\n sa.DateTime(timezone=True),\n nullable=False,\n server_default=sa.func.now(),\n ),\n sa.Column(\n \"last_modified_at\",\n sa.DateTime(timezone=True),\n nullable=True,\n onupdate=sa.func.now(),\n ),\n sa.Column(\"description\", sa.String(length=250), nullable=True),\n sa.Column(\n \"is_deleted\", sa.Boolean(), nullable=False, server_default=sa.false()\n ),\n sa.Column(\"title\", sa.String(length=128), nullable=False),\n sa.Column(\"topic_id\", sa.UUID(), nullable=True),\n sa.ForeignKeyConstraint(\n [\"topic_id\"],\n [\"topics.id\"],\n ),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n\n\ndef downgrade() -> None:\n op.drop_index(op.f(\"ix_topics_id\"), table_name=\"topics\")\n op.drop_table(\"topics\")\n","repo_name":"dannytannertantrum/quiz-app","sub_path":"backend/alembic/versions/01_6f247da76f69_create_topics_table.py","file_name":"01_6f247da76f69_create_topics_table.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"18926545945","text":"def calculate_metrics(TP, FP, TN, FN):\n recall = TP / (TP + FN)\n \n accuracy = (TP + TN) / (TP + TN + FP + FN)\n \n precision = TP / (TP + FP)\n \n f1_score = (2 * precision * recall) / (precision + recall)\n \n return recall, accuracy, precision, f1_score\n\n# TP, FP, TN, FN values\nTP= 138 \nFP= 4\nTN= 175\nFN= 58\n\nrecall, accuracy, precision, f1_score = calculate_metrics(TP, FP, TN, FN)\n\nprint(\"Recall:\", recall)\nprint(\"Precision:\", precision)\nprint(\"Accuracy:\", accuracy)\nprint(\"F1 Score:\", f1_score)\n","repo_name":"REVVVY/Breathing_Bag_Defect","sub_path":"scripts/metrics_calc.py","file_name":"metrics_calc.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"4844441549","text":"import math\nimport copy\nimport time\nimport sys\nimport random\nimport copy\n#Smallest supersequence found so far\nbest_string = None\n#Length of smallest supersequence\nbest_string_length = math.inf\nstarting_time = None\nticks = 0\nticks_max = 1000\n#Adds substring to end of string\ndef add_substring(string, substring):\n if substring in string:\n return string\n length = len(substring)\n while(True):\n if string[-length:] == substring[:length]:\n string += substring[length:]\n break\n length -= 1\n return string\n\ndef permutation_string(permutation, substrings):\n string = ''\n for number in permutation:\n string = add_substring(string, substrings[number])\n return string\n\ndef swap_permutation(permutation, swap_number):\n new_permutation = copy.deepcopy(permutation)\n new_permutation[swap_number], new_permutation[swap_number + 1] = new_permutation[swap_number + 1], new_permutation[swap_number]\n return new_permutation\n\n#DFS search for smallest supersequence. Runs for a max of 10 seconds\ndef complete_DFS(string, substrings, search_time=10):\n global best_string\n global best_string_length\n global ticks\n ticks += 1\n if ticks == ticks_max:\n current_time = time.time()\n if current_time - starting_time > search_time:\n output_best()\n ticks = 0\n #Calculate if string generated is the smallest found\n if not substrings:\n string_length = len(string)\n if string_length < best_string_length:\n best_string = string\n best_string_length = string_length\n substring_tries = [(substring, add_substring(string, substring)) for substring in substrings]\n #Sort in order of length \n substring_tries = sorted(substring_tries, key=lambda x: len(x[1]))\n #string is too long. At sometime we will have to add this substring!\n if not substring_tries or len(substring_tries[-1][1]) >= best_string_length:\n return\n #DFS search\n while substring_tries:\n substring = substring_tries[0]\n substring_tries.remove(substring)\n substrings.remove(substring[0])\n complete_DFS(substring[1], substrings, search_time)\n substrings.append(substring[0])\ndef local_search(substrings, iterations=1000, search_time=10, print_string=True):\n global best_string\n global best_string_length\n global ticks\n num_substrings = len(substrings)\n current_permutation = range(num_substrings)\n while(True):\n current_permutation = random.sample(current_permutation, num_substrings)\n for _ in range(iterations):\n ticks += 1\n if ticks == ticks_max:\n current_time = time.time()\n if current_time - starting_time > search_time:\n if print_string == True:\n output_best()\n ticks = 0\n return\n ticks = 0\n random_choice = random.randrange(2)\n #Random move\n if random_choice == 0:\n swap_number = random.randrange(0, num_substrings - 1)\n current_permutation = swap_permutation(current_permutation, swap_number)\n else:\n permutation_swaps = [None for _ in range(num_substrings - 1)]\n new_strings = [None for _ in range(num_substrings - 1)]\n for num in range(num_substrings - 1):\n permutation_swaps[num] = swap_permutation(current_permutation, num)\n new_strings[num] = permutation_string(current_permutation, substrings)\n permutation_lengths = [len(swap) for swap in new_strings]\n min_index = permutation_lengths.index(min(permutation_lengths))\n current_permutation = swap_permutation(current_permutation, min_index)\n new_string = permutation_string(current_permutation, substrings)\n if len(new_string) < best_string_length:\n best_string = new_string\n best_string_length = len(new_string)\n\n\n\n#Outputs best string found so far and exits program\ndef output_best():\n print(\"Best sequence found: \", best_string)\n print(\"Sequence Length: \", best_string_length)\n sys.exit()\n\n#Read input and run DFS on subsequences\ndef main():\n global starting_time\n filename = sys.argv[1]\n with open(filename) as f:\n _ = int(f.readline())\n _ = int(f.readline())\n subsequences = []\n sequence = None\n while(sequence != ''):\n sequence = f.readline()\n if sequence != '':\n #Drop newline\n subsequences.append(sequence[:-1])\n\n starting_time = time.time()\n local_search(subsequences, search_time=5, print_string=False)\n starting_time = time.time()\n complete_DFS('', subsequences, search_time=5)\n output_best() \nif __name__ == '__main__':\n main()","repo_name":"gnbpdx/AI-ML","sub_path":"superseq.py","file_name":"superseq.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"72454057500","text":"n = int(input())\nlis = input().split()\nlis2 = input().split()\n\nx = 0\ny = 0\n\nfor i in range(n):\n a = lis[i]\n b = lis2[i]\n if a == \"rock\":\n if b == \"paper\":\n y += 1\n elif b == \"scissors\":\n x += 1\n \n elif a == \"paper\":\n if b == \"rock\":\n x += 1\n elif b == \"scissors\":\n y += 1\n \n else:\n if b == \"rock\":\n y += 1\n elif b == \"paper\":\n x += 1\nprint(x, y)","repo_name":"AlanBui1/Competitive-Programming-Solutions","sub_path":"hkccc15j1.py","file_name":"hkccc15j1.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"71450327580","text":"from django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand\n\nfrom supergood_reads.models import UserSettings\n\n\nclass Command(BaseCommand):\n help = \"Create UserSettings for existing users\"\n\n def handle(self, *args, **kwargs):\n user_model = get_user_model()\n\n user_ids_with_settings = UserSettings.objects.values_list(\"user_id\", flat=True)\n users_without_settings = user_model.objects.exclude(\n id__in=user_ids_with_settings\n )\n\n new_user_settings = []\n for user in users_without_settings:\n new_user_settings.append(UserSettings(user=user))\n UserSettings.objects.bulk_create(new_user_settings)\n\n created_count = len(new_user_settings)\n self.stdout.write(\n self.style.SUCCESS(f\"Total UserSettings created: {created_count}\")\n )\n","repo_name":"supergood-org/supergood-reads","sub_path":"supergood_reads/management/commands/supergood_reads_create_user_settings.py","file_name":"supergood_reads_create_user_settings.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"17584765820","text":"#!/usr/bin/python3\n\n# pylint: disable=missing-module-docstring\n# pylint: disable=missing-class-docstring\n# pylint: disable=missing-function-docstring\n\nimport pymedia_redis\nimport pymedia_display\n\nfrom pymedia_const import REDIS_SERVER, REDIS_PORT, REDIS_DB\n\n# ---------------------\n\nif __name__ == '__main__':\n\n _redis = pymedia_redis.RedisHelper(REDIS_SERVER, REDIS_PORT, REDIS_DB,\n 'DISPLAY')\n\n display = pymedia_display.Display(_redis, pubsubs=(\n 'PLAYER:EVENT',\n 'CDSP:EVENT',\n ))\n\n display.t_wait_events.start()\n\n try:\n display.t_wait_events.join()\n except KeyboardInterrupt:\n print(\"Received KeyboardInterrupt, shutting down...\")\n display.blank()\n","repo_name":"taradiddles/diy-dsp-preamp","sub_path":"pymedia/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"69986427099","text":"import json\r\nfrom pprint import pprint\r\nimport requests\r\n\r\nprint(\"¿Que quieres?\")\r\nprint(\"Buscar una carta\")\r\nprint(\"Buscar varias\")\r\nopcion=int(input(\"¿Que quieres?\"))\r\n\r\nif opcion==1:\r\n carta=input(\"Introduzca el nombre\")\r\n response = requests.get(\"https://omgvamp-hearthstone-v1.p.mashape.com/cards/\"+carta,\r\n headers={\r\n \"X-Mashape-Key\": \"RdEHCET0tBmshzcVxojLE997hAvNp1qWbaQjsn2UdJz0ad4JQA\",\r\n \"Accept\": \"application/json\"\r\n }\r\n)\r\n data=response.json()\r\n for carta in data:\r\n if carta[\"collectible\"]==True:\r\n print(\"Nombre:\",carta[\"name\"])\r\n print(\"Vida:\",carta[\"health\"])\r\n print(\"Ataque:\",carta[\"attack\"])\r\n\r\nelse:\r\n print(\"Nada\")\r\n\r\n\r\n\r\nexpansion=\"¿Que expansion deseas buscar?\"\r\n\r\nresponse = requests.get(\"https://omgvamp-hearthstone-v1.p.mashape.com/cards/sets/\"+expansion,\r\n headers={\r\n \"X-Mashape-Key\": \"RdEHCET0tBmshzcVxojLE997hAvNp1qWbaQjsn2UdJz0ad4JQA\"\r\n }\r\n)\r\ndata=response.json()\r\n\r\nfor carta in data:\r\n\tif carta[\"type\"]==\"Hero\":\r\n\t\tprint (carta[\"name\"])\r\n","repo_name":"Alexlp1092/ProyectoWeb","sub_path":"pruebashearthstone.py","file_name":"pruebashearthstone.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"10966523776","text":"from .bitonic_loops import bitonic_layer_loop, bitonic_swap_loop\n\ntry:\n # try to use autoray to provide transparent JAX/autograd support\n from autoray import numpy as np\nexcept ModuleNotFoundError:\n print(\"No autoray, using numpy (note: grad won't work!)\")\n import numpy as np\n\n\n### Softmax (log-sum-exp)\ndef softmax(a, b, alpha=1, normalize=0):\n \"\"\"The softmaximum of softmax(a,b) = log(e^a + a^b).\n normalize should be zero if a or b could be negative and can be 1.0 (more accurate)\n if a and b are strictly positive.\n Also called \\alpha-quasimax: \n J. Cook. Basic properties of the soft maximum. \n Working Paper Series 70, UT MD Anderson CancerCenter Department of Biostatistics, \n 2011. http://biostats.bepress.com/mdandersonbiostat/paper7\n \"\"\"\n return np.log(np.exp(a * alpha) + np.exp(b * alpha) - normalize) / alpha\n\n\n### Smooth max\ndef smoothmax(a, b, alpha=1):\n return (a * np.exp(a * alpha) + b * np.exp(b * alpha)) / (\n np.exp(a * alpha) + np.exp(b * alpha)\n )\n\n\n### relaxed softmax\ndef softmax_smooth(a, b, smooth=0):\n \"\"\"The smoothed softmaximum of softmax(a,b) = log(e^a + a^b).\n With smooth=0.0, is softmax; with smooth=1.0, averages a and b\"\"\"\n t = smooth / 2.0\n return np.log(np.exp((1 - t) * a + b * t) + np.exp((1 - t) * b + t * a)) - np.log(\n 1 + smooth\n )\n\n\ndef bitonic_matrices(n):\n \"\"\"Compute a set of bitonic sort matrices to sort a sequence of\n length n. n *must* be a power of 2.\n \n See: https://en.wikipedia.org/wiki/Bitonic_sorter\n \n Set k=log2(n).\n There will be k \"layers\", i=1, 2, ... k\n \n Each ith layer will have i sub-steps, so there are (k*(k+1)) / 2 sorting steps total.\n \n For each step, we compute 4 matrices. l and r are binary matrices of size (k/2, k) and\n map_l and map_r are matrices of size (k, k/2).\n \n l and r \"interleave\" the inputs into two k/2 size vectors. map_l and map_r \"uninterleave\" these two k/2 vectors\n back into two k sized vectors that can be summed to get the correct output.\n \n The result is such that to apply any layer's sorting, we can perform:\n \n l, r, map_l, map_r = layer[j]\n a, b = l @ y, r @ y \n permuted = map_l @ np.minimum(a, b) + map_r @ np.maximum(a,b)\n \n Applying this operation for each layer in sequence sorts the input vector.\n \n \"\"\"\n # number of outer layers\n\n matrices = []\n for n, m, layer in bitonic_layer_loop(n):\n l, r = np.zeros((n // 2, n)), np.zeros((n // 2, n))\n map_l, map_r = np.zeros((n, n // 2)), np.zeros((n, n // 2))\n for a, b, out, swap in bitonic_swap_loop(n, m, layer):\n l[out, a] = 1\n r[out, b] = 1\n if swap:\n a, b = b, a\n map_l[a, out] = 1\n map_r[b, out] = 1\n matrices.append((l, r, map_l, map_r))\n return matrices\n\n\ndef bitonic_indices(n):\n \"\"\"Compute a set of bitonic sort indices to sort a sequence of\n length n. n *must* be a power of 2. As opposed to the matrix\n operations, this requires only two index vectors of length n\n for each layer of the network.\n \n \"\"\"\n # number of outer layers\n layers = int(np.log2(n))\n indices = []\n for n, m, layer in bitonic_layer_loop(n):\n weave = np.zeros(n, dtype=\"i4\")\n unweave = np.zeros(n, dtype=\"i4\")\n for a, b, out, swap in bitonic_swap_loop(n, m, layer):\n weave[out] = a\n weave[out + n // 2] = b\n if swap:\n a, b = b, a\n unweave[a] = out\n unweave[b] = out + n // 2\n indices.append((weave, unweave))\n return indices\n\n\ndef bitonic_woven_matrices(n):\n \"\"\"\n Combine the l,r and l_inv, r_inv matrices into single n x n multiplies, for\n use with bisort_weave/diff_bisort_weave, fusing together consecutive stages.\n This reduces the number of multiplies to (k)(k+1) + 1 multiplies, where k=np.log2(n) \n \"\"\"\n layers = int(np.log2(n))\n matrices = []\n last_unweave = np.eye(n)\n for n, m, layer in bitonic_layer_loop(n):\n weave, unweave = np.zeros((n, n)), np.zeros((n, n))\n for a, b, out, swap in bitonic_swap_loop(n, m, layer):\n weave[out, a] = 1\n weave[out + n // 2, b] = 1\n # flip comparison order as needed\n if swap:\n a, b = b, a\n unweave[a, out] = 1\n unweave[b, out + n // 2] = 1\n # fuse the unweave and weave steps\n matrices.append(weave @ last_unweave)\n last_unweave = unweave\n # make sure the last unweave is preserved\n matrices.append(last_unweave)\n return matrices\n\n\ndef diff_sort(matrices, x, softmax=softmax):\n \"\"\"\n Approximate differentiable sort. Takes a set of bitonic sort matrices generated by bitonic_matrices(n), sort \n a sequence x of length n. Values may be distorted slightly but will be ordered.\n \"\"\"\n for l, r, map_l, map_r in matrices:\n a, b = l @ x, r @ x\n mx = softmax(a, b)\n mn = a + b - mx\n x = map_l @ mn + map_r @ mx\n\n return x\n\n\ndef diff_sort_indexed(indices, x, softmax=softmax):\n \"\"\"\n Given a set of bitonic sort indices generated by bitonic_indices(n), sort \n a sequence x of length n.\n \"\"\"\n split = len(x) // 2\n for weave, unweave in indices:\n woven = x[weave]\n a, b = woven[:split], woven[split:]\n mx = softmax(a, b)\n mn = a + b - mx\n x = np.concatenate([mn, mx])[unweave]\n return x\n\n\ndef comparison_sort(matrices, x, compare_fn, alpha=1, scale=250):\n \"\"\"\n Sort a tensor X, applying a differentiable comparison function \"compare_fn\" \n while sorting. Uses softmax to weight components of the matrix.\n \n Parameters:\n ------------\n matrices: the nxn bitonic sort matrices created by bitonic_matrices\n X: an [n,...] tensor of elements\n compare_fn: a differentiable comparison function compare_fn(a,b)\n taking a pair of [n//2,...] tensors and returning a signed [n//2] vector.\n alpha=1.0: smoothing to apply; smaller alpha=smoother, less accurate sorting,\n larger=harder max, increased numerical instability\n scale=250: scaling applied to output of compare_fn. Default is useful for \n comparison functions returning values in the range ~[-1, 1]\n \n Returns:\n ----------\n X_sorted: [n,...] tensor (approximately) sorted accoring to compare_fn\n \n \"\"\" \n for l, r, map_l, map_r in matrices: \n score = compare_fn((x.T @ l.T).T, (x.T @ r.T).T) \n a, b = score*scale, score*-scale\n a_weight = np.exp(a * alpha) / (np.exp(a * alpha) + np.exp(b * alpha))\n b_weight = 1 - a_weight \n # apply weighting to the full vectors\n aX = x.T @ l.T\n bX = x.T @ r.T \n w_max = (a_weight * aX + b_weight * bX)\n w_min = (b_weight * aX + a_weight * bX) \n # recombine into the full vector\n x = ( w_max @ map_l.T) + (w_min @ map_r.T) \n x = x.T\n \n return x\n\n\ndef vector_sort(matrices, X, key, alpha=1):\n \"\"\"\n Sort a matrix X, applying a differentiable function \"key\" to each vector\n while sorting. Uses softmax to weight components of the matrix.\n \n For example, selecting the nth element of each vector by \n multiplying with a one-hot vector.\n \n Parameters:\n ------------\n matrices: the nxn bitonic sort matrices created by bitonic_matrices\n X: an [n,d] matrix of elements\n key: a function taking a d-element vector and returning a scalar\n alpha=1.0: smoothing to apply; smaller alpha=smoother, less accurate sorting,\n larger=harder max, increased numerical instability\n \n Returns:\n ----------\n X_sorted: [n,d] matrix (approximately) sorted accoring to \n \n \"\"\"\n for l, r, map_l, map_r in matrices:\n\n x = key(X)\n # compute weighting on the scalar function\n a, b = l @ x, r @ x\n a_weight = np.exp(a * alpha) / (np.exp(a * alpha) + np.exp(b * alpha))\n b_weight = 1 - a_weight\n # apply weighting to the full vectors\n aX = l @ X\n bX = r @ X\n w_max = (a_weight * aX.T + b_weight * bX.T).T\n w_min = (b_weight * aX.T + a_weight * bX.T).T\n # recombine into the full vector\n X = (map_l @ w_max) + (map_r @ w_min)\n return X\n\n\ndef diff_sort_weave(fused, x, softmax=softmax, beta=0.0):\n \"\"\"\n Given a set of bitonic sort matrices generated by bitonic_woven_matrices(n), sort \n a sequence x of length n.\n beta specifies interpolation between true permutations (beta=0.0) and\n leaving the values unchanged (beta=1.0)\n \"\"\"\n i = np.eye(len(x))\n split = len(x) // 2\n x = ((beta * i) + (1 - beta) * fused[0]) @ x\n for mat in fused[1:]:\n a, b = x[:split], x[split:]\n mx = softmax(a, b)\n mn = a + b - mx\n x = (beta * i + (1 - beta) * mat) @ np.concatenate([mn, mx])\n return x\n\n\n### differentiable ranking\ndef order_matrix(original, sortd, sigma=0.1):\n \"\"\"Apply a simple RBF kernel to the difference between original and sortd,\n with the kernel width set by sigma. Normalise each row to sum to 1.0.\"\"\"\n diff = ((original).reshape(-1, 1) - sortd.reshape(1, -1)) ** 2\n rbf = np.exp(-(diff) / (2 * sigma ** 2))\n return (rbf.T / np.sum(rbf, axis=1)).T\n\n\ndef dargsort(original, sortd, sigma, transpose=False):\n \"\"\"Take an input vector `original` and a sorted vector `sortd`\n along with an RBF kernel width `sigma`, return an approximate ranking.\n If transpose is True, returns approximate argsort (but note that ties have identical values)\n If transpose is False (default), returns ranking\"\"\"\n order = order_matrix(original, sortd, sigma=sigma)\n if transpose:\n order = order.T\n return order @ np.arange(len(original))\n\n\ndef diff_argsort(matrices, x, sigma=0.1, softmax=softmax, transpose=False):\n\n \"\"\"Return the smoothed, differentiable ranking of each element of x. Sigma\n specifies the smoothing of the ranking. Note that this function is deceptively named,\n and in the default setting returns the *ranking*, not the argsort.\n \n If transpose is True, returns argsort (but note that ties are not broken in differentiable\n argsort);\n If False, returns ranking (likewise, ties are not broken).\n \"\"\"\n sortd = diff_sort(matrices, x, softmax)\n return dargsort(x, sortd, sigma, transpose)\n\n\ndef diff_argsort_indexed(indices, x, sigma=0.1, softmax=softmax, transpose=False):\n \"\"\"Return the smoothed, differentiable ranking of each element of x. Sigma\n specifies the smoothing of the ranking. Uses the indexed form\n to avoid multiplies.\n \n If transpose is True, returns argsort (but note that ties are not broken in differentiable\n argsort);\n If False, returns ranking (likewise, ties are not broken).\n \"\"\"\n sortd = diff_sort_indexed(indices, x, softmax)\n return dargsort(x, sortd, sigma, transpose)\n","repo_name":"johnhw/differentiable_sorting","sub_path":"differentiable_sorting/differentiable_sorting.py","file_name":"differentiable_sorting.py","file_ext":"py","file_size_in_byte":11209,"program_lang":"python","lang":"en","doc_type":"code","stars":137,"dataset":"github-code","pt":"69"} +{"seq_id":"36927937047","text":"import os\nimport random\n\nimport cherrypy\n\n\"\"\"\nThis is a simple Battlesnake server written in Python.\nFor instructions see https://github.com/BattlesnakeOfficial/starter-snake-python/README.md\n\"\"\"\n\n\nclass Battlesnake(object):\n @cherrypy.expose\n @cherrypy.tools.json_out()\n def index(self):\n # This function is called when you register your Battlesnake on play.battlesnake.com\n # It controls your Battlesnake appearance and author permissions.\n # TIP: If you open your Battlesnake URL in browser you should see this data\n return {\n \"apiversion\": \"1\",\n \"author\": \"Mandeep Dalavi\", # TODO: Your Battlesnake Username\n \"color\": \"#CF9FFF\", # TODO: Personalize\n \"head\": \"beluga\", # TODO: Personalize\n \"tail\": \"curled\", # TODO: Personalize\n }\n\n @cherrypy.expose\n @cherrypy.tools.json_in()\n def start(self):\n # This function is called everytime your snake is entered into a game.\n # cherrypy.request.json contains information about the game that's about to be played.\n data = cherrypy.request.json\n\n print(\"START\")\n return \"ok\"\n\n @cherrypy.expose\n @cherrypy.tools.json_in()\n @cherrypy.tools.json_out()\n def move(self):\n # This function is called on every turn of a game. It's how your snake decides where to move.\n # Valid moves are \"up\", \"down\", \"left\", or \"right\".\n # TODO: Use the information in cherrypy.request.json to decide your next move.\n data = cherrypy.request.json\n body = data[\"you\"][\"body\"]\n\n # Choose a random direction to move in\n possible_moves = [\"up\", \"down\", \"left\", \"right\"]\n safe_moves = self.getSafeMoves(possible_moves, body, data[\"board\"])\n\n if safe_moves:\n move = random.choice(safe_moves)\n return {\"move\" : move}\n\n return {\"move\" : 'up'}\n\n def getNext(self, currentHead, nextMove):\n futureHead = currentHead.copy()\n if nextMove == 'left':\n futureHead['x'] = currentHead['x'] - 1\n if nextMove == 'right':\n futureHead['x'] = currentHead['x'] + 1\n if nextMove == 'up':\n futureHead['y'] = currentHead['y'] + 1\n if nextMove == 'down':\n futureHead['y'] = currentHead['y'] - 1\n return futureHead\n \n def getSafeMoves(self, possible_moves, body, board):\n safe_moves = []\n\n for guess in possible_moves:\n # check if we make this move, will the decisions\n guessCoord = self.getNext(body[0], guess)\n if self.avoidWalls(guessCoord, board[\"width\"], board[\"height\"]) and self.avoidSnakes(guessCoord, board[\"snakes\"]):\n safe_moves.append(guess)\n elif len(body)>1 and guessCoord == body[-1] and guess not in body[:-1]:\n safe_moves.append(guess)\n return safe_moves\n\n def avoidWalls(self, futureHead, width, height):\n result = True\n x = int(futureHead['x'])\n y = int(futureHead['y'])\n\n if x < 0 or y < 0 or x >= width or y >= height:\n result = False\n\n return result\n\n def avoidSnakes(self, futureHead, snakeBodies):\n for snake in snakeBodies:\n if futureHead in snake[\"body\"][:-1]:\n return False\n return True\n\n @cherrypy.expose\n @cherrypy.tools.json_in()\n def end(self):\n # This function is called when a game your snake was in ends.\n # It's purely for informational purposes, you don't have to make any decisions here.\n data = cherrypy.request.json\n\n print(\"END\")\n return \"ok\"\n\n\nif __name__ == \"__main__\":\n server = Battlesnake()\n cherrypy.config.update({\"server.socket_host\": \"0.0.0.0\"})\n cherrypy.config.update(\n {\"server.socket_port\": int(os.environ.get(\"PORT\", \"8080\")),}\n )\n print(\"Starting Battlesnake Server...\")\n cherrypy.quickstart(server)\n","repo_name":"MandeepDalavi/Battlesnake","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"42053043533","text":"X,Y=map(int,input().split())\r\nN=int(input())\r\nL=[]\r\nfor i in range(N):\r\n L.append([int(x) for x in input().split()])\r\nMin=2147483647\r\nA,B=0,0\r\nfor i in L:\r\n if Min>abs(X-i[0])**2+abs(Y-i[1])**2:\r\n Min=abs(X-i[0])**2+abs(Y-i[1])**2\r\n A,B=i[0],i[1]\r\nprint(A,B)\r\n","repo_name":"Benson0418/python_t2","sub_path":"h658.py","file_name":"h658.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"21572513331","text":"import pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom sklearn import linear_model\r\n\r\ndf = pd.read_csv('honeyproduction.csv')\r\n\r\nprod_per_year = df.groupby('year').totalprod.mean().reset_index()\r\nX = prod_per_year['year']\r\nX = X.values.reshape(-1, 1)\r\ny = prod_per_year['totalprod']\r\n\r\nregr = linear_model.LinearRegression()\r\nregr.fit(X, y)\r\ny_predict = regr.predict(X)\r\n\r\nX_future = np.array(range(2013, 2050))\r\nX_future = X_future.reshape(-1, 1)\r\nfuture_predict = regr.predict(X_future)\r\n\r\nplt.scatter(X, y)\r\nplt.plot(X, y_predict)\r\nplt.plot(X_future, future_predict)\r\nplt.savefig('honey_future.png')\r\nplt.show()\r\n\r\nprint(df.head())\r\nprint(regr.coef_)\r\nprint(regr.intercept_)\r\n","repo_name":"johnchae/honey-linear-regression","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"8050803192","text":"\ndef merge(arr, left, mid, right): \n\tn1 = mid - left + 1\n\tn2 = right- mid \n\n\tLeftArray = [0] * (n1) \n\tRightArray = [0] * (n2) \n\n\tfor i in range(0 , n1): \n\t\tLeftArray[i] = arr[left + i] \n\n\tfor j in range(0 , n2): \n\t\tRightArray[j] = arr[mid + 1 + j]\n \n\ti = 0\n\tj = 0\n\tk = left\t \n\n\twhile i < n1 and j < n2 : \n\t\tif LeftArray[i] <= RightArray[j]: \n\t\t\tarr[k] = LeftArray[i] \n\t\t\ti += 1\n\t\telse: \n\t\t\tarr[k] = RightArray[j] \n\t\t\tj += 1\n\t\tk += 1\n\twhile i < n1: \n\t\tarr[k] = LeftArray[i] \n\t\ti += 1\n\t\tk += 1\n\n\twhile j < n2: \n\t\tarr[k] = RightArray[j] \n\t\tj += 1\n\t\tk += 1\n \ndef mergeSort(arr,left,right): \n\tif left < right: \n\t\tmid = (left+right)//2\n\n\t\tmergeSort(arr, left, mid) \n\t\tmergeSort(arr, mid+1, right) \n\t\tmerge(arr, left, mid, right) \n\narr=[3,1,235,5,56,32,33,21,1]\nmergeSort(arr,0,len(arr)-1)\nprint(arr)\n\n\n\n","repo_name":"samirpatil2000/Data-Struture-Algo","sub_path":"C_&C++/c/array/mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"38202101352","text":"import six\n\nfrom packetary.objects.index import Index\n\nfrom packetary import objects\nfrom packetary.tests import base\nfrom packetary.tests.stubs.generator import gen_package\nfrom packetary.tests.stubs.generator import gen_relation\n\n\nclass TestIndex(base.TestCase):\n def test_add(self):\n index = Index()\n index.add(gen_package(version=1))\n self.assertIn(\"package1\", index.packages)\n self.assertIn(1, index.packages[\"package1\"])\n self.assertIn(\"obsoletes1\", index.obsoletes)\n self.assertIn(\"provides1\", index.provides)\n\n index.add(gen_package(version=2))\n self.assertEqual(1, len(index.packages))\n self.assertIn(1, index.packages[\"package1\"])\n self.assertIn(2, index.packages[\"package1\"])\n self.assertEqual(1, len(index.obsoletes))\n self.assertEqual(1, len(index.provides))\n\n def test_find(self):\n index = Index()\n p1 = gen_package(version=1)\n p2 = gen_package(version=2)\n index.add(p1)\n index.add(p2)\n\n self.assertIs(\n p1,\n index.find(\"package1\", objects.VersionRange(\"eq\", 1))\n )\n self.assertIs(\n p2,\n index.find(\"package1\", objects.VersionRange())\n )\n self.assertIsNone(\n index.find(\"package1\", objects.VersionRange(\"gt\", 2))\n )\n\n def test_find_all(self):\n index = Index()\n p11 = gen_package(idx=1, version=1)\n p12 = gen_package(idx=1, version=2)\n p21 = gen_package(idx=2, version=1)\n p22 = gen_package(idx=2, version=2)\n index.add(p11)\n index.add(p12)\n index.add(p21)\n index.add(p22)\n\n self.assertItemsEqual(\n [p11, p12],\n index.find_all(\"package1\", objects.VersionRange())\n )\n self.assertItemsEqual(\n [p21, p22],\n index.find_all(\"package2\", objects.VersionRange(\"le\", 2))\n )\n\n def test_find_newest_package(self):\n index = Index()\n p1 = gen_package(idx=1, version=2)\n p2 = gen_package(idx=2, version=2)\n p2.obsoletes.append(\n gen_relation(p1.name, [\"lt\", p1.version])\n )\n index.add(p1)\n index.add(p2)\n\n self.assertIs(\n p1, index.find(p1.name, objects.VersionRange(\"eq\", p1.version))\n )\n self.assertIs(\n p2, index.find(p1.name, objects.VersionRange(\"eq\", 1))\n )\n\n def test_find_top_down(self):\n index = Index()\n p1 = gen_package(version=1)\n p2 = gen_package(version=2)\n index.add(p1)\n index.add(p2)\n self.assertIs(\n p2,\n index.find(\"package1\", objects.VersionRange(\"le\", 2))\n )\n self.assertIs(\n p1,\n index.find(\"package1\", objects.VersionRange(\"lt\", 2))\n )\n self.assertIsNone(\n index.find(\"package1\", objects.VersionRange(\"lt\", 1))\n )\n\n def test_find_down_up(self):\n index = Index()\n p1 = gen_package(version=1)\n p2 = gen_package(version=2)\n index.add(p1)\n index.add(p2)\n self.assertIs(\n p2,\n index.find(\"package1\", objects.VersionRange(\"ge\", 2))\n )\n self.assertIs(\n p2,\n index.find(\"package1\", objects.VersionRange(\"gt\", 1))\n )\n self.assertIsNone(\n index.find(\"package1\", objects.VersionRange(\"gt\", 2))\n )\n\n def test_find_accurate(self):\n index = Index()\n p1 = gen_package(version=1)\n p2 = gen_package(version=2)\n index.add(p1)\n index.add(p2)\n self.assertIs(\n p1,\n index.find(\"package1\", objects.VersionRange(\"eq\", 1))\n )\n self.assertIsNone(\n index.find(\"package1\", objects.VersionRange(\"eq\", 3))\n )\n\n def test_find_obsolete(self):\n index = Index()\n p1 = gen_package(version=1)\n index.add(p1)\n\n self.assertIs(\n p1, index.find(\"obsoletes1\", objects.VersionRange(\"le\", 2))\n )\n self.assertIsNone(\n index.find(\"obsoletes1\", objects.VersionRange(\"gt\", 2))\n )\n\n def test_find_provides(self):\n index = Index()\n p1 = gen_package(version=1)\n p2 = gen_package(version=2)\n index.add(p1)\n index.add(p2)\n\n self.assertIs(\n p2, index.find(\"provides1\", objects.VersionRange(\"ge\", 2))\n )\n self.assertIsNone(\n index.find(\"provides1\", objects.VersionRange(\"gt\", 2))\n )\n\n def test_len(self):\n index = Index()\n for i in six.moves.range(3):\n index.add(gen_package(idx=i + 1))\n self.assertEqual(3, len(index))\n\n for i in six.moves.range(3):\n index.add(gen_package(idx=i + 1, version=2))\n self.assertEqual(6, len(index))\n self.assertEqual(3, len(index.packages))\n\n for i in six.moves.range(3):\n index.add(gen_package(idx=i + 1, version=2))\n self.assertEqual(6, len(index))\n self.assertEqual(3, len(index.packages))\n","repo_name":"HuongNT-CloudNFV/fuel-mirror","sub_path":"packetary/tests/test_index.py","file_name":"test_index.py","file_ext":"py","file_size_in_byte":5096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"40798892900","text":"if __name__ == '__main__':\n import pandas as pd\n import geopandas as gpd\n import numpy as np\n from utils.BBI_utils import mergeData, computeBBI, simpleLowpassFilter, getSuperelevation, getAdvisorySpeed, \\\n alignData\n from utils.LRS_utils import NCATgetReferencePoints, getDistToMid, getReferenceCurve, NCATgetRadius\n from utils.NCAT_processing import NCAT_processing\n from utils.SR_processing import SR_processing\n\n loc = pd.read_csv(r'2021_03_11_07_36_21_506_loc.csv')\n acc = pd.read_csv(r'2021_03_11_07_36_21_506_acc.csv')\n print(acc)\n print(acc.shape)\n crash = pd.read_csv(r'Crashdata.csv')\n # Chooses certain columns\n crash = crash.filter(items=['Road_Name', 'KABCO_Seve', 'Manner_of_', 'Location_a', 'Latitude', 'Longitude'])\n # Filters out all collision based crashes\n crash = crash[crash['Manner_of_'].eq('Not a Collision with Motor Vehicle')]\n # Filters out intersection crashes\n crash = crash[crash['Location_a'].str.contains('Non-Intersection') | crash['Location_a'].eq('Off Roadway')]\n print(crash)\n print(crash.shape)\n SR_obj = SR_processing(inFiles=[r'2021_03_11_07_36_21_506_loc.csv',r'2021_03_11_07_36_21_506_acc.csv'])\n SR_obj.gdf.to_file(\"smartphone.shp\")\n print(acc.columns)\n print(loc.columns)\n print(loc)\n # Not sure why this merge doesn't work...\n # all_csv_data = pd.merge(left=loc, right=acc, how='left', left_on='timestamp_utc_local', right_on='timestamp_nanosecond')\n all_csv_data = pd.concat([loc, acc], axis=1)\n print(all_csv_data)\n road_17 = gpd.read_file('0017_D1_2/0017_D1_2.shp')\n # print(SR_obj.gdf)","repo_name":"steveand117/tsai-ML-curve-safety","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"38391934704","text":"from torch.utils.data.dataset import Dataset\nimport pandas as pd\nimport torch\nfrom sklearn.preprocessing import MinMaxScaler\nimport pickle\nfrom typing import List\n\n# Keys of the different types of variables.\ncat_vars = [\n \"Store\",\n \"DayOfWeek\",\n \"StateHoliday\",\n \"CompetitionMonthsOpen\",\n \"Promo2Weeks\",\n \"StoreType\",\n \"Assortment\",\n \"State\",\n \"Week\",\n \"Events\",\n \"Is_quarter_end_DE\",\n \"Is_quarter_start\",\n \"WindDirDegrees\",\n \"Is_quarter_start_DE\",\n \"Is_month_end\",\n \"Open\",\n \"Is_year_end\",\n \"Is_year_start_DE\",\n \"Is_month_start_DE\",\n \"Promo2\",\n \"Is_year_end_DE\",\n \"Dayofweek\",\n \"Is_month_start\",\n]\n\ncont_vars = [\n \"Sales\",\n \"Promo2SinceWeek\",\n \"Max_TemperatureC\",\n \"Mean_TemperatureC\",\n \"Min_TemperatureC\",\n \"Max_Humidity\",\n \"Mean_Humidity\",\n \"Min_Humidity\",\n \"Max_Wind_SpeedKm_h\",\n \"Mean_Wind_SpeedKm_h\",\n \"CloudCover\",\n \"trend\",\n \"trend_DE\",\n \"Promo\",\n \"SchoolHoliday\",\n \"Min_VisibilitykM\",\n \"Min_DewpointC\",\n \"Mean_VisibilityKm\",\n \"Precipitationmm\",\n \"MeanDew_PointC\",\n \"Mean_Sea_Level_PressurehPa\",\n \"Max_Sea_Level_PressurehPa\",\n \"Promo2Days\",\n \"Customers\",\n \"CompetitionDaysOpen\",\n \"Dew_PointC\",\n \"Dayofyear\",\n \"Min_Sea_Level_PressurehPa\",\n \"Max_Gust_SpeedKm_h\",\n \"Elapsed\",\n \"Max_VisibilityKm\",\n \"CompetitionOpenSinceMonth\",\n \"CompetitionOpenSinceYear\",\n \"Promo2SinceYear\",\n]\n\nweather_vars = [\n \"Max_TemperatureC\",\n \"Mean_TemperatureC\",\n \"Min_TemperatureC\",\n \"Dew_PointC\",\n \"MeanDew_PointC\",\n \"Min_DewpointC\",\n \"Max_Humidity\",\n \"Mean_Humidity\",\n \"Min_Humidity\",\n \"Max_Sea_Level_PressurehPa\",\n \"Mean_Sea_Level_PressurehPa\",\n \"Min_Sea_Level_PressurehPa\",\n \"Max_VisibilityKm\",\n \"Mean_VisibilityKm\",\n \"Min_VisibilitykM\",\n \"Max_Wind_SpeedKm_h\",\n \"Mean_Wind_SpeedKm_h\",\n \"Max_Gust_SpeedKm_h\",\n \"Precipitationmm\",\n \"CloudCover\",\n \"WindDirDegrees\",\n]\n\noutput_file_name = \"./data/joined_cleaned.pkl\"\n\n\ndef data_clean(joined: pd.DataFrame) -> pd.DataFrame:\n \"\"\"[function currently does basic na forward\n filling and conversion of variables to useful types.\n I also drop a bunch of columns that either are entirely null or\n duplciate columns, the data source seems to be a weirdly processed]\n\n Arguments:\n joined {df} -- [original df from kaggle download\n https://www.kaggle.com/init27/fastai-v3-rossman-data-clean]\n\n Returns:\n [df] -- [cleaned df]\n \"\"\"\n joined.loc[:, weather_vars] = joined.loc[:, weather_vars].fillna(\n method=\"ffill\"\n )\n\n weather_vars.append(\"Events\")\n\n # some of the initial Max_Gust_Speed Data was missing\n # so I filled with the Max_wind Speed.\n joined.loc[\n joined[\"Max_Gust_SpeedKm_h\"].isna(), \"Max_Gust_SpeedKm_h\"\n ] = joined.loc[joined[\"Max_Gust_SpeedKm_h\"].isna(), \"Max_Wind_SpeedKm_h\"]\n\n # change text data into categories, as codes.\n joined[\"Events\"] = joined[\"Events\"].astype(\"category\").cat.codes + 1\n joined[\"Store\"] = joined[\"Store\"] - 1\n joined[\"DayOfWeek\"] = joined[\"DayOfWeek\"] - 1\n joined[\"Week\"] = joined[\"Week\"] - 1\n joined[\"Assortment\"] = joined[\"Assortment\"].astype(\"category\").cat.codes\n joined[\"State\"] = joined[\"State\"].astype(\"category\").cat.codes\n joined[\"WindDirDegrees\"] = (\n joined[\"WindDirDegrees\"].astype(\"category\").cat.codes\n )\n joined[\"StoreType\"] = joined[\"StoreType\"].astype(\"category\").cat.codes\n\n # Drop variables that didn't look useful.\n joined.drop(\n [\n \"Promo2Since\",\n \"Year\",\n \"Month\",\n \"Day\",\n \"PromoInterval\",\n \"StateName\",\n \"file_DE\",\n \"State_DE\",\n \"Dayofweek_DE\",\n \"Day_DE\",\n \"Date\",\n \"Is_quarter_end\",\n \"Is_month_end_DE\",\n \"Is_year_start\",\n \"week\",\n \"file\",\n \"Month_DE\",\n \"week_DE\",\n \"Dayofyear_DE\",\n \"CompetitionOpenSince\",\n \"Date_DE\",\n \"Elapsed_DE\",\n \"CompetitionDistance\",\n ],\n axis=1,\n inplace=True,\n )\n if \"Id\" in joined.keys():\n joined.drop(\"Id\", axis=1, inplace=True)\n\n # check the keys. Make sure that we don't have a miss match\n # between keys in list and dataframe.\n a = set(joined.keys())\n total_keys = cat_vars.copy()\n total_keys.extend(cont_vars)\n b = set(total_keys)\n c = a.difference(b)\n assert not c\n\n # convert booleans to ints.\n joined[joined.select_dtypes(include=\"bool\").keys()] = joined.select_dtypes(\n include=\"bool\"\n ).astype(\"int\")\n\n # change to floats.\n joined[cont_vars] = joined[cont_vars].astype(\"float\")\n joined.dropna(0, inplace=True)\n return joined\n\n\nclass RossmanDataset(Dataset):\n \"\"\"[puts data into a useful format to be used by the dataloader]\n \"\"\"\n\n @classmethod\n def from_pickle(cls, pickle_file: str):\n \"\"\"[creates the object from pickled dict, use to load pre-processed data]\n Arguments:\n pickle_file {[str]} -- [file name of pickled Rossmann Dataset.]\n \"\"\"\n with open(pickle_file, \"rb\") as input:\n file = pickle.load(input)\n return file\n\n def to_pickle(self, output_file: str):\n \"\"\"[puts the object into a pickle file for later recovery]\n\n Arguments:\n output_file {[str]} -- [output filename]\n \"\"\"\n with open(output_file, \"wb\") as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)\n\n def __init__(\n self,\n df: pd.DataFrame,\n cont_vars: List[str],\n cat_vars: List[str],\n indices: List[int],\n scaler=MinMaxScaler(),\n ):\n\n # reading data, transforms etc..\n # column lists\n self.x_cols = df.columns.difference([\"Sales\", \"Customers\"])\n self.Y_cols = [\"Sales\", \"Customers\"]\n\n # scaler = MinMaxScaler()\n self.scaler = scaler\n\n # if statement on whether scaler has been set or not.\n if self.scaler == self.__init__.__defaults__[0]:\n\n # training case\n self.data = df.loc[indices, :].copy()\n\n # fit!!! and transform the continuous variables.\n self.data.loc[\n :, cont_vars + self.Y_cols\n ] = self.scaler.fit_transform(\n self.data.loc[:, cont_vars + self.Y_cols]\n )\n\n else:\n\n # validation case\n self.data = df.loc[indices, :].copy()\n\n # transform the continuous variables.\n self.data.loc[:, cont_vars + self.Y_cols] = self.scaler.transform(\n self.data.loc[:, cont_vars]\n )\n\n self.data.reset_index(inplace=True)\n self.data.drop([\"index\"], inplace=True, axis=1)\n\n # Make sure that the columsn have correct types\n self.x_data_cat = torch.tensor(\n self.data[cat_vars].values, dtype=torch.int\n )\n self.x_data_cont = torch.tensor(\n self.data[cont_vars].values, dtype=torch.float32\n )\n self.Y_data = torch.tensor(\n self.data[self.Y_cols].values, dtype=torch.float32\n )\n self.length = self.data.shape[0]\n\n def __getitem__(self, index):\n # returns the input and output\n return (\n self.x_data_cat[index],\n self.x_data_cont[index],\n self.Y_data[index],\n )\n\n def __len__(self):\n return self.length # of how many examples(images?) you have\n\n\nif __name__ == \"__main__\":\n\n # Example usage\n # just used the joined dataframes\n joined = pd.read_pickle(\"./data/joined\")\n\n # joined_test doesn't contain customers or sales.\n # they are the predicted variables.\n joined_test = pd.read_pickle(\"./data/joined_test\")\n\n # push through data clean function\n # i.e. drop nonesense columns and fill nans\n joined = data_clean(joined)\n\n # train valid splitting\n split_train = int(joined.shape[0] * 0.8)\n split_valid = joined.shape[0] - split_train\n train, valid = torch.utils.data.random_split(\n joined, [split_train, split_valid]\n )\n\n # create and save the training set\n train_data = RossmanDataset(joined, cont_vars, cat_vars, train.indices)\n train_data.to_pickle(\"./data/train_data.pkl\")\n\n # create and save the validation set using the scaler\n # set in the training set.\n valid_data = RossmanDataset(\n joined, cont_vars, cat_vars, valid.indices, scaler=train_data.scaler\n )\n valid_data.to_pickle(\"./data/valid_data.pkl\")\n","repo_name":"MatthewLennie/Rossmann","sub_path":"import_rossman_data.py","file_name":"import_rossman_data.py","file_ext":"py","file_size_in_byte":8646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"70192735580","text":"import cv2\nimport numpy as np\n# # 读取图像\n# def get_edge_contour(img):\n# threshold1 = 100\n# threshold2 = 200\n# len_threshold = 2\n# edges = cv2.Canny(img, threshold1, threshold2)\n\n# cv2.imwrite('edges.png', edges)\n\n# # 查找轮廓\n# contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n \n# # 显示所有轮廓\n# mask = np.zeros(img.shape)\n# for c in contours:\n# # 过滤小面积\n# if (cv2.contourArea(c) < len_threshold ** 2):\n# continue\n \n# cv2.drawContours(img, [c], 0, (0, 255, 0), 1)\n \n# cv2.imwrite('contours.png', img)\n\n\ndef color_edge(img):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n height, width, _ = img.shape\n scale = (height + width)/2\n \n # cv2.imshow(\"hsv\", hsv)\n minBlue = np.array([100, 15, 46])\n maxBlue = np.array([124, 255, 255])\n \n # 确定蓝色区域\n mask = cv2.inRange(hsv, minBlue, maxBlue)\n # cv2.imwrite(\"mask.png\", mask)\n \n # 通过按位与获取蓝色区域\n blue_img = cv2.bitwise_and(img, img, mask=mask)\n cv2.imwrite(\"blue.png\", blue_img)\n\n # 将mask进行形态学处理消除内部外部的噪点\n kernel_size1 = int(scale / 60)\n kernel_size2 = int(scale / 16)\n if kernel_size1 == 0 or kernel_size2 == 0:\n return None\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(kernel_size1,kernel_size1))\n kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT,(kernel_size2,kernel_size2))\n #定义矩形结构元素\n # erode1 = cv2.erode(mask,kernel,iterations=1)\n # cv2.imwrite(\"erode1.png\", erode1)\n open0 = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel,iterations=2)\n cv2.imwrite(\"open0.png\", open0)\n \n closed1 = cv2.morphologyEx(open0, cv2.MORPH_CLOSE, kernel2,iterations=2)\n cv2.imwrite(\"closed1.png\", closed1)\n\n open1 = cv2.morphologyEx(closed1, cv2.MORPH_OPEN, kernel2,iterations=2)\n cv2.imwrite(\"open1.png\", open1)\n\n # 提取边界\n ret, binary = cv2.threshold(open1,127,255,cv2.THRESH_BINARY)\n\n binary = np.float32(binary)\n dst = cv2.cornerHarris(binary,4,5,0.04)\n dst = cv2.dilate(dst,None)\n # dst: height * width\n \n candidate_pos = np.array(np.where(dst > 0.2 * dst.max())).transpose()\n if candidate_pos.shape[0] == 0:\n return None\n # pos[0]->height pos[1]->width\n left_top = candidate_pos[0]\n right_top = candidate_pos[0]\n left_bottom = candidate_pos[0]\n right_bottom = candidate_pos[0]\n # 遍历每个点,找到距离四角最近的点\n # 计算四个角点的坐标\n pos_plus = candidate_pos[:, 0] + candidate_pos[:, 1]\n pos_minus = candidate_pos[:, 0] - candidate_pos[:, 1]\n left_top_arg = np.argmin(pos_plus)\n right_bottom_arg = np.argmax(pos_plus)\n left_bottom_arg = np.argmax(pos_minus)\n right_top_arg = np.argmin(pos_minus)\n\n left_top = candidate_pos[left_top_arg]\n right_bottom = candidate_pos[right_bottom_arg]\n left_bottom = candidate_pos[left_bottom_arg]\n right_top = candidate_pos[right_top_arg]\n \n return [left_top, right_top, left_bottom, right_bottom] \n\n\n\n\nif __name__ == \"__main__\":\n # img = cv2.imread('0.png', cv2.THRESH_BINARY)\n # cv2.imwrite('1.png', img)\n # get_edge_contour(img)\n img = cv2.imread('0.png')\n color_edge(img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n\n","repo_name":"yangjh155/Unet","sub_path":"edge_contour.py","file_name":"edge_contour.py","file_ext":"py","file_size_in_byte":3366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"12221790830","text":"import pandas as pd\nfrom bokeh.plotting import figure, output_file, show\nfrom bokeh.layouts import column\n\noutput_file('index.html')\np = []\nindex = 0\ngranularities = ['day', 'month', 'year']\nTOOLS = 'pan,box_zoom,wheel_zoom,box_select,hover,resize,reset,save'\n\nfor item in granularities:\n data = pd.read_csv('../data/' + item + '.csv', parse_dates=['Date'])\n\n p.append(figure(title='This chart is generated using Bokeh library',\n width=900, height=500, x_axis_type=\"datetime\", tools=TOOLS))\n\n p[index].line(data['Date'], data['Price'], line_width=2)\n\n p[index].circle(data['Date'], data['Price'], fill_color=\"white\", size=6)\n\n p[index].xaxis[0].axis_label = 'Date'\n p[index].yaxis[0].axis_label = 'Price in USD'\n index += 1\n\nshow(column(p[0], p[1], p[2]))\n","repo_name":"datopian/line-charts","sub_path":"bokeh/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"11630598699","text":"from django.urls import path\nfrom . import views\n\napp_name = 'core'\n\nurlpatterns = [\n \n path('', views.index_views, name='home'),\n \n path('events/information', views.event_views, name='info'),\n \n path('booking/bus/search/', views.add_to_bookings, name='addhire'),\n \n path('bus/register', views.bus_hire_view, name='bookbus'),\n \n path('aboutus', views.aboutUs_page, name='about'),\n \n path('faQ', views.faQ_page, name='faQ'),\n \n path('booking/bus/search', views.SearchView.as_view(), name='searchpath'),\n \n path('booking/', views.BookingBusDetail.as_view(), name='bookingdetail'),\n \n path(\"booking/bus/direction//\", views.DirectionDetail.as_view(), name=\"direction_detail\"),\n\n # path('hiring', views.hirebus, name='hiringbus'),\n\n # path('booking', booking_views, name='book'),\n \n # path(\n \n # \"informaa1276765433niaganalanajanaanaianau34aranaeanawana324apanaoana456543akanqanyana56banatanhanaban45456774\",\n\n # name='booked'\n \n # )\n \n]\n","repo_name":"metalcode03/BusBuggy","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"29574803218","text":"from wiki import Wiki\nclass Commander:\n hello = [\"hello\",\"ghbdtn\",\"привет\",\"хай\"]\n whatsup = [\"че как\",\"че как?\", \"как дела\",\"как дела?\"]\n wiki = [\"wiki\", \"википедия\",\"вики\"]\n last_msg = \"\"\n def ans(self,text:str):\n text = text.lower()\n if self.last_msg == \"вики\":\n w = Wiki()\n self.last_msg = \"\"\n return w.get_wiki(text)\n\n for i in self.hello:\n if text == i:\n self.last_msg = \"\"\n return \"Привет, я БОТ_ИМЯ!\\nЯ умею присылать статью из Википедии(напиши: вики или wiki)\"\n\n for i in self.whatsup:\n if text == i:\n self.last_msg = \"\"\n return \"У меня всгда все круто, я же рообот\\nА у тебя как дела?\"\n\n for i in self.wiki:\n if text == i:\n self.last_msg = \"вики\"\n return \"Что ты хочешь узнать?\"\n\n return \"Я тебя не понял\"\n","repo_name":"temsiPatrin/vk_bot_super","sub_path":"commander.py","file_name":"commander.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"41674512056","text":"from django.forms.forms import Form\nfrom django.urls.base import reverse_lazy\nfrom django.views.generic import ListView, DetailView, UpdateView, FormView, View\nfrom django.shortcuts import get_object_or_404, render, redirect\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.http import Http404\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.views.generic.edit import CreateView\nfrom django_countries import countries\nfrom rooms import forms, models as room_model\nfrom reservations import models as res_model\nfrom rooms import forms as room_form\nfrom users import mixins\nfrom users.mixins import LoggedInOnlyView\n\n\n# from django.http import HttpResponse # django translate the request\n# Create your views here.\n\n\nclass HomeView(ListView):\n\n \"\"\"Home View Definition\"\"\"\n\n model = room_model.Room\n paginate_by = 12\n context_object_name = \"page\"\n ordering = \"created\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n now = timezone.now() # get_context_data search in doc\n context[\"now\"] = now\n return context\n\n\ndef room_detail(request, pk):\n try:\n room = room_model.Room.objects.get(pk=pk)\n reservations = res_model.Reservation.objects.filter(room=room)\n for reservation in reservations:\n if reservation.is_finished():\n reservation.delete()\n return render(request, \"rooms/detail.html\", context={\"room\": room})\n except room_model.Room.DoesNotExist:\n # return redirect(reverse(\"core:home\")) # reverse returns url\n raise Http404()\n\n\n# class ModelNameDetail(DetailView):\n# model = room_model.Room\n# pk_url_kwarg=\"pk\"\n# you don't need to raise the http 404~\n# to more customize this, look it up on the CCBV\n\n\nclass EditRoomView(LoggedInOnlyView, UpdateView):\n model = room_model.Room\n template_name = \"rooms/room_edit.html\"\n fields = (\n \"name\",\n \"description\",\n \"country\",\n \"city\",\n \"price\",\n \"address\",\n \"guests\",\n \"beds\",\n \"bedrooms\",\n \"bath\",\n \"check_in\",\n \"check_out\",\n \"instant_book\",\n \"room_type\",\n \"amenity\",\n \"facility\",\n \"house_rule\",\n )\n\n def get_object(self, queryset=None):\n room = super().get_object(queryset=queryset)\n if room.host.pk != self.request.user.pk:\n raise Http404()\n return room\n\n\nclass UploadRoomView(FormView, mixins.LoggedInOnlyView):\n form_class = room_form.CreateRoomForm\n template_name = \"rooms/room_create.html\"\n\n def form_valid(self, form):\n room = form.save()\n room.host = self.request.user\n room.save()\n form.save_m2m()\n return redirect(reverse(\"rooms:detail\", kwargs={\"pk\": room.pk}))\n\n\nclass RoomPhotosView(DetailView):\n model = room_model.Room\n template_name = \"rooms/room_photos.html\"\n\n def get_object(self, queryset=None): # override\n room = super().get_object(queryset=queryset)\n if room.host.pk != self.request.user.pk:\n raise Http404()\n return room\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n rooms = room_model.Room.objects.all()\n context[\"rooms\"] = rooms\n return context\n\n\n# class SearchView(View):\n# def get(self, request):\n\n# country = request.GET.get(\"country\")\n\n# if country:\n\n# form = forms.SearchForm(request.GET)\n\n# if form.is_valid():\n\n# city = form.cleaned_data.get(\"city\")\n# country = form.cleaned_data.get(\"country\")\n# room_type = form.cleaned_data.get(\"room_type\")\n# price = form.cleaned_data.get(\"price\")\n# guests = form.cleaned_data.get(\"guests\")\n# bedrooms = form.cleaned_data.get(\"bedrooms\")\n# beds = form.cleaned_data.get(\"beds\")\n# baths = form.cleaned_data.get(\"baths\")\n# instant_book = form.cleaned_data.get(\"instant_book\")\n# superhost = form.cleaned_data.get(\"superhost\")\n# amenities = form.cleaned_data.get(\"amenities\")\n# facilities = form.cleaned_data.get(\"facilities\")\n\n# filter_args = {}\n\n# if city != \"Anywhere\":\n# filter_args[\"city__startswith\"] = city\n\n# filter_args[\"country\"] = country\n\n# if room_type is not None:\n# filter_args[\"room_type\"] = room_type\n\n# if price is not None:\n# filter_args[\"price__lte\"] = price\n\n# if guests is not None:\n# filter_args[\"guests__gte\"] = guests\n\n# if bedrooms is not None:\n# filter_args[\"bedrooms__gte\"] = bedrooms\n\n# if beds is not None:\n# filter_args[\"beds__gte\"] = beds\n\n# if baths is not None:\n# filter_args[\"baths__gte\"] = baths\n\n# if instant_book is True:\n# filter_args[\"instant_book\"] = True\n\n# if superhost is True:\n# filter_args[\"host__superhost\"] = True\n\n# for amenity in amenities:\n# filter_args[\"amenities\"] = amenity\n\n# for facility in facilities:\n# filter_args[\"facilities\"] = facility\n\n# rooms = room_model.Room.objects.filter(**filter_args)\n\n# else:\n\n# form = forms.SearchForm()\n\n# return render(request, \"rooms/search.html\", {\"form\": form, \"rooms\": rooms})\n\n\ndef Search(request):\n city = request.GET.get(\"city\", \"anywhere\")\n room_type = int(request.GET.get(\"room_type\", 0))\n room_types = room_model.RoomType.objects.all()\n country = request.GET.get(\"country\", \"KR\")\n price = int(request.GET.get(\"price\", 0))\n guests = int(request.GET.get(\"guests\", 0))\n beds = int(request.GET.get(\"beds\", 0))\n bedrooms = int(request.GET.get(\"bedrooms\", 0))\n bath = int(request.GET.get(\"bath\", 0))\n\n amenities = room_model.Amenity.objects.all()\n facilities = room_model.Facility.objects.all()\n form = {\n \"city\": city,\n \"s_room_type\": room_type, # from database\n \"s_country\": country,\n \"price\": price,\n \"guests\": guests,\n \"beds\": beds,\n \"bedrooms\": bedrooms,\n \"bath\": bath,\n }\n choices = {\n \"countries\": countries,\n \"room_types\": room_types,\n \"amenities\": amenities,\n \"facilities\": facilities,\n }\n filter_args = {}\n if city != \"anywhere\":\n filter_args[\"city__startswith\"] = city\n filter_args[\"country\"] = country\n if room_type != 0:\n filter_args[\"room_type__pk\"] = room_type\n if price != 0:\n filter_args[\"price__lte\"] = price # refer to lookup session on documentation\n if guests != 0:\n filter_args[\"guests__gte\"] = guests\n if beds != 0:\n filter_args[\"beds__gte\"] = beds\n if bedrooms != 0:\n filter_args[\"bedrooms__gte\"] = bedrooms\n if bath != 0:\n filter_args[\"bath__gte\"] = bath\n rooms = room_model.Room.objects.filter(**filter_args)\n return render(\n request,\n template_name=\"rooms/search.html\",\n context={**form, **choices, \"rooms\": rooms},\n )\n\n\n@login_required\ndef delete_photo(request, room_pk, photo_pk):\n user = request.user\n try:\n room = room_model.Room.objects.get(pk=room_pk)\n if user.pk != room.host.pk:\n messages.error(request, \"can't delete that photo\")\n else:\n photo = room_model.Photo.objects.get(pk=photo_pk)\n photo.delete()\n messages.success(request, \"Photo is successfully deleted\")\n return redirect(reverse(\"rooms:photos\", kwargs={\"pk\": room_pk}))\n\n except room_model.Room.DoesNotExist:\n return redirect(reverse(\"core:home\"))\n\n\nclass EditPhotoView(mixins.LoggedInOnlyView, UpdateView):\n model = room_model.Photo\n template_name = \"rooms/photo_edit.html\"\n fields = (\"caption\",)\n pk_url_kwarg = \"photo_pk\"\n success_message = \"Photo_updated\"\n\n def get_success_url(self):\n room_pk = self.kwargs.get(\"room_pk\")\n return reverse(\"rooms:photos\", kwargs={\"pk\": room_pk})\n\n\nclass AddPhotoView(mixins.LoggedInOnlyView, FormView, SuccessMessageMixin):\n model = room_model.Photo\n template_name = \"rooms/photo_create.html\"\n form_class = forms.CreatePhotoForm\n\n def form_valid(self, form):\n pk = self.kwargs.get(\"pk\")\n form.save(pk)\n messages.success(self.request, \"Photo Uploaded\")\n return redirect(reverse(\"rooms:photos\", kwargs={\"pk\": pk}))\n","repo_name":"glauke1996/airbnb-clone","sub_path":"airbnb_project/rooms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"74401199270","text":"import os\nimport subprocess\nimport sys\nfrom .base_handler import BaseHandler\nimport logging\n\n\nlogging.basicConfig(level=logging.ERROR)\nPATH = \"python/lib/python3.8/site-packages\"\n\n\nclass PipHandler(BaseHandler):\n def __init__(self, cli) -> None:\n super().__init__(cli=cli)\n self._build_dirs()\n\n def _build_dirs(self) -> None:\n os.makedirs(PATH, exist_ok=True)\n\n def install_dependencies(self) -> None:\n for dependencie in self.configs[\"libraries\"]:\n try:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", dependencie, \"-t\", PATH]\n )\n except subprocess.CalledProcessError:\n continue\n","repo_name":"brianamaral/aws_layer_publisher","sub_path":"layer_builder/pip_handler.py","file_name":"pip_handler.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"42787209056","text":"#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n# Author: Matthew Dixon, Diego Klabjan, Jin Hoon Bang\n# Description: Given multiple time series data in (M x 2) CSV format, this script\n# generates label (-1, 0, 1) and features (lagging price, moving averages, correlation)\n# In the input time series data, the first column is time stamp and the second oolumn is price.\n# In the current path, there are 43 symbols (43 different time series data).\n# Two files per symbol are generated: *_large.bin and *_small.bin. The two files differ\n# in number of datapoints that they contain.\n# For lagging and moving averages, normalized price values are used.\n# For calculating correlation between each symbol, return price value is used.\n\n\nimport pandas as pd\nimport glob\nimport numpy as np\nimport os\nimport sys\nimport math\nimport random\n\npd.set_option('precision', 15)\n\nparams = dict(\n path = os.path.join(os.path.expanduser('~'), 'data', 'csv', '*'),\n min_lagging = 1,\n max_lagging = 100,\n #interval_lagging = 1, #not implemented\n min_moving_average = 2,\n max_moving_average = 100,\n #interval_moving_average = 1, #not implemented\n list_epsilon = [0.01, 0.001, 0.0001, 0.00001, 0.000001, 0.0000001,0.00000001],\n theta = 0.001,\n max_correlation_window = 100,\n stock_count = 43,\n small_output_size = 50000,\n)\n\n#get paths to all files in 'file_path'\ninput_files = []\nfor file in glob.glob(params['path']):\n input_files.append(file)\ninput_files.sort()\n\n#find the symbol with the lowest number of datapoints\n#number of datapoints in the output is limited by the symbol with the lowest number of datapoints.\nlist_n = []\nfor file in input_files:\n df = pd.read_csv(file, header=None, dtype='float64')\n list_n.append(len(df))\nmin_n = min(list_n)\nprint(\"min_n:\", min_n)\n\n#dataframes for accumulating normalized price and return price across all symbols\ndf_normalized = pd.DataFrame(dtype='float64')\ndf_return = pd.DataFrame(dtype='float64')\n\nfor file in input_files:\n df = pd.read_csv(file, names=['Timestamp', 'Price'], header=None, dtype='float64')\n df = df.ix[:min_n]\n series_price = df.Price\n series_return = pd.Series(index = df.index, name=\"Return\"+file, dtype='float64')\n\n #generate return price\n for i in range(0, min_n - 1):\n series_return[i] = (series_price[i+1]-series_price[i])/series_price[i]\n series_return = series_return.dropna()\n df_return = pd.concat([df_return, series_return], axis=1)\n\n #generate normalized price\n meanPrice = np.mean(series_price)\n stdPrice = np.std(series_price)\n\n series_normalized = pd.Series(index=series_price.index, name=\"PriceNormalized\"+file, dtype='float64')\n\n for i in range(0, min_n):\n series_normalized[i] = (series_price[i]-meanPrice)/stdPrice\n df_normalized = pd.concat([df_normalized, series_normalized], axis=1)\n\n print(\"len(series_normalized)\",len(series_normalized))\n print(\"len(series_return)\", len(series_return))\n\nfor j in range(0, params['stock_count']):\n outputDataFrame = pd.DataFrame(dtype='float64')\n\n currNormalized = df_normalized.ix[:,j]\n currReturn = df_return.ix[:,j]\n currentFile = input_files[j]\n\n diffSquared = []\n #label = 1 and -1 represent increase/decrease in price. If the difference is\n #lower than epsilon, then label =0\n #In order to balance the labels as much as possible, different values of\n #epsilon are experimented and the one that balances the three classes as equally\n #as possible is chosen\n\n for eps in params['list_epsilon']:\n positive = 0\n neutral = 0\n negative = 0\n for i in range (0, min_n-1):\n difference = currNormalized[i+1]-currNormalized[i]\n if (difference>eps):\n positive = positive + 1\n elif (difference < (-1)*eps):\n negative = negative + 1\n else:\n neutral = neutral + 1\n total = positive + negative + neutral\n target = total / 3\n diffSquared.append((positive-target)**2+(negative-target)**2+(neutral-target)**2)\n print(\"epsilon:\", eps)\n print(\"positive:\", positive, positive/total)\n print(\"negative\", negative, negative/total)\n print(\"neutral\", neutral, neutral/total)\n print(\"\")\n\n balEpsilon = params['list_epsilon'][np.argmin(diffSquared)]\n print(\"Selected epsilon\", balEpsilon)\n print(\"\")\n\n seriesLabel = pd.Series(index=currNormalized.index, name=\"Label\"+str(balEpsilon)+currentFile, dtype='float64')\n for i in range (0, min_n-1):\n difference = currNormalized[i+1]-currNormalized[i]\n if (difference>balEpsilon):\n seriesLabel[i]=1\n elif (difference<(-1)*balEpsilon):\n seriesLabel[i]=-1\n else:\n seriesLabel[i]=0\n\n outputDataFrame=pd.concat([outputDataFrame, seriesLabel],axis=1)\n\n #generates lagging columns using normalized price,\n for i in range(1,params['max_lagging']+1):\n seriesLagged = pd.Series(currNormalized.shift(i), index=currNormalized.index, name=\"Lagging \"+str(i)+currentFile, dtype='float64')\n outputDataFrame=pd.concat([outputDataFrame,seriesLagged],axis=1)\n\n #generates moving averages normalized price\n for i in range (params['min_moving_average'], params['max_moving_average']+1):\n seriesMovingAverage = currNormalized\n seriesMovingAverage = pd.rolling_mean(seriesMovingAverage, i)\n seriesMovingAverage = pd.Series(seriesMovingAverage, index=seriesMovingAverage.index, name=\"Moving Average\"+str(i)+currentFile, dtype='float64')\n outputDataFrame = pd.concat([outputDataFrame, seriesMovingAverage], axis=1)\n\n #calculates correlation with different symbols using moving windows.\n #adds very small values of perturbation to avoid division by zero while\n #calculating correlation.\n\n for k in range (j+1, params['stock_count']):\n u = (params['theta'] * balEpsilon)/math.sqrt(params['max_correlation_window'])\n compareFile = input_files[k]\n\n xPrice = currReturn\n yPrice = df_return.ix[:,k]\n xTemp = pd.Series(dtype='float64')\n yTemp = pd.Series(dtype='float64')\n xTemp = xPrice.apply(lambda x: u*(random.uniform(-1,1)))\n yTemp = yPrice.apply(lambda x: u*(random.uniform(-1,1)))\n xPrice = xPrice.add(xTemp)\n yPrice = yPrice.add(yTemp)\n\n seriesCorrelation = pd.Series(index=outputDataFrame.index, name=\"Correlation\"+currentFile+\" VS \"+compareFile, dtype='float64')\n\n for i in range(params['max_correlation_window'], min_n):\n correlation = np.corrcoef(xPrice[i-(params['max_correlation_window'] - 1) : i], yPrice[i-(params['max_correlation_window'] - 1) : i], bias = 1)[0][1]\n seriesCorrelation[i] = correlation\n\n outputDataFrame = pd.concat([outputDataFrame, seriesCorrelation], axis=1)\n\n #two output files are prepared\n #size of the ouput is n_min calculated initially\n #size of small output set is defined in params\n\n outputDataFrame = outputDataFrame.dropna()\n smallDataFrame = outputDataFrame.tail(params['small_output_size'])\n\n file = os.path.splitext(currentFile)[0]\n\n dimension = np.array([len(outputDataFrame), len(outputDataFrame.columns)])\n smallDimension = np.array([params['small_output_size'], len(outputDataFrame.columns)])\n\n print(\"dimensions for: \", currentFile)\n print(\"number of rows:\", len(outputDataFrame))\n print(\"number of columns: \", len(outputDataFrame.columns))\n print(\"\")\n\n #append dimension (n_row, n_column) to beginning of file and export to binary\n outputArray = outputDataFrame.as_matrix()\n outputArray=np.append(dimension,outputArray)\n outputArray.astype('float64')\n outputArray.tofile(file+'_large.bin')\n smallOutputArray = smallDataFrame.as_matrix()\n smallOutputArray=np.append(smallDimension,smallOutputArray)\n smallOutputArray.astype('float64')\n smallOutputArray.tofile(file+'_small.bin')\n\n #for outputting to csv format\n # outputDataFrame.to_csv(file+'_largeHybrid.csv',index=False)\n # smallDataFrame.to_csv(file+'_smallHybrid.csv',index=False)\n\n\n\n\n\n","repo_name":"jinhoonbang/python_machine_learning","sub_path":"src/feature_engineering/generate_features.py","file_name":"generate_features.py","file_ext":"py","file_size_in_byte":8777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"2705195016","text":"\r\nimport requests\r\nimport json\r\n\r\n\r\nAPI_KEY = \"4b9b2e12-caac-4075-8c5b-8633b0163a1f\"\r\n\r\n\r\ndef get_prices():\r\n url = \"https://pro-api.coinmarketcap.com/v1/exchange/quotes/latest\"\r\n headers = {\r\n \"X-CMC_PRO_API_KEY\": API_KEY,\r\n }\r\n params = {\r\n \"id\": \"1,2,3,4,5,6,7,8,9,10\",\r\n \"symbol\": \"ETH\",\r\n \"convert\": \"USD\",\r\n }\r\n response = requests.get(url, headers=headers, params=params)\r\n response_json = response.json()\r\n\r\n\r\n prices = {}\r\n for exchange in response_json[\"data\"]:\r\n name = exchange[\"name\"]\r\n price = exchange[\"quote\"][\"USD\"][\"price\"]\r\n prices[name] = price\r\n\r\n return prices\r\n\r\n\r\ndef find_arbitrage(prices):\r\n\r\n table = PrettyTable()\r\n table.field_names = [\"Exchange\", \"Price\"]\r\n\r\n\r\n for exchange in prices:\r\n price = prices[exchange]\r\n table.add_row([exchange, price])\r\n\r\n\r\n print(table)\r\n\r\n\r\ndef main():\r\n\r\n prices = get_prices()\r\n\r\n\r\n find_arbitrage(prices)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"rexcelArb/researches","sub_path":"Simple ETH Arb.py","file_name":"Simple ETH Arb.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"73102141990","text":"import jieba\n\n\nclass SensitiveConf(object):\n\n def __init__(self):\n self.mysql = None\n self.debug = None\n\n\nclass SensitiveWordDistinguish(object):\n\n def __init__(self, mysql, debug=False):\n self.mysql = mysql\n\n words_list = list()\n for data in mysql.query_sensitive_word():\n jieba.add_word(data['word'])\n words_list.append(data['word'])\n\n self.jieba = jieba\n self.word_list = words_list\n self.debug = debug\n\n def distinguish(self, ask):\n words = self.jieba.lcut(ask)\n\n sensitive_word = [word for word in words if word in self.word_list]\n include_sensitive = False if len(sensitive_word) == 0 else True\n\n return include_sensitive, sensitive_word\n\n\ndef test(test_sentence):\n from module.core.mysql_exec import Mysql\n from module.core.utterance import Utterance\n\n utterance = Utterance(ask=test_sentence)\n\n mysql = Mysql(host='192.168.10.10', user='chatbot', password='chatbot', db='chatbot')\n sensitive_word_distinguish = SensitiveWordDistinguish(mysql)\n result = sensitive_word_distinguish.distinguish(utterance)\n\n print(\"result: \", result.sensitive_word)\n print(\"sentence: \", test_sentence)\n\n\nif __name__ == '__main__':\n test('气枪非常好')\n print()\n test('你好')\n","repo_name":"boyshen/NLP_project","sub_path":"chinese_chatbot/module/sensitive_word/sensitive_word.py","file_name":"sensitive_word.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"17212165962","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport shutil\nimport copy\nimport json\nsys.path.append(f'{os.environ[\"HOME\"]}/workspace/octotiger-scripts/include')\nfrom script_common import *\n\nbaseline = {\n \"name\": \"lci\",\n \"nnodes_list\": [8],\n \"max_level\": 5,\n \"griddim\": 2,\n \"stop_step\": 30,\n \"zc_threshold\": 8192,\n \"task\": \"rs\",\n \"parcelport\": \"lci\",\n \"protocol\": \"putsendrecv\",\n \"comp_type\": \"queue\",\n \"progress_type\": \"worker\",\n \"prg_thread_num\": \"auto\",\n \"sendimm\": 1,\n \"backlog_queue\": 0,\n \"prepost_recv_num\": 1,\n \"zero_copy_recv\": 1,\n \"match_table_type\": \"hashqueue\",\n \"cq_type\": \"array_atomic_faa\",\n \"reg_mem\": 0,\n \"ndevices\": 4,\n \"ncomps\": 4\n}\n\nconfigs = [\n # baseline,\n {**baseline, \"name\": \"lci_c1\", \"ncomps\": 1},\n {**baseline, \"name\": \"lci_c2\", \"ncomps\": 2},\n {**baseline, \"name\": \"lci_c4\", \"ncomps\": 4},\n # {**baseline, \"name\": \"lci_l5_worker_d1\", \"ndevices\": 1, \"progress_type\": \"worker\"},\n # {**baseline, \"name\": \"lci_l5_worker_d2\", \"ndevices\": 2, \"progress_type\": \"worker\"},\n # {**baseline, \"name\": \"lci_l5_worker_d4\", \"ndevices\": 4, \"progress_type\": \"worker\"},\n # {**baseline, \"name\": \"lci_l5_rp_d1\", \"ndevices\": 1, \"progress_type\": \"rp\"},\n # {**baseline, \"name\": \"lci_l5_rp_d2\", \"ndevices\": 2, \"progress_type\": \"rp\"},\n # {**baseline, \"name\": \"lci_l5_rp_d4\", \"ndevices\": 4, \"progress_type\": \"rp\"},\n # {**baseline, \"name\": \"lci_l5_rp1_d1\", \"ndevices\": 1, \"progress_type\": \"rp\", \"prg_thread_num\": \"1\"},\n # {**baseline, \"name\": \"lci_l5_rp1_d2\", \"ndevices\": 2, \"progress_type\": \"rp\", \"prg_thread_num\": \"1\"},\n # {**baseline, \"name\": \"lci_l5_rp1_d4\", \"ndevices\": 4, \"progress_type\": \"rp\", \"prg_thread_num\": \"1\"},\n]\n\nif __name__ == \"__main__\":\n mkdir_s(\"./run\")\n\n tag = getenv_or(\"RUN_TAG\", \"default\")\n os.environ[\"CURRENT_SCRIPT_PATH\"] = os.path.dirname(os.path.realpath(__file__))\n for config in configs:\n # print(config)\n for nnodes in config[\"nnodes_list\"]:\n run_slurm(tag, nnodes, config)","repo_name":"JiakunYan/octotiger-scripts","sub_path":"rostam/profile/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"12365654741","text":"import pprint\npp = pprint.PrettyPrinter(indent=4)\n\ndef buildPlayerDecks(input):\n player1 = input[1:input.index('')]\n player2 = input[input.index('')+2:]\n\n\n player1 = list(map(int, player1))\n player2 = list(map(int, player2))\n\n return player1, player2\n\ndef takeOneStep(player1, player2):\n player1_card = player1.pop(0)\n player2_card = player2.pop(0)\n\n if player1_card > player2_card:\n player1.extend([player1_card, player2_card])\n if player2_card > player1_card:\n player2.extend([player2_card, player1_card])\n\ndef playFullGame(player1, player2):\n while player1 and player2:\n takeOneStep(player1, player2)\n\ndef calculatePlayerScore(player):\n total = 0\n for i in range(len(player), 0, -1):\n total += i * player[len(player) - i]\n return total\n\ndef a(input):\n # pp.pprint(input)\n player1, player2 = buildPlayerDecks(input)\n # pp.pprint(player1)\n # pp.pprint(player2)\n playFullGame(player1, player2)\n # pp.pprint(player1)\n # pp.pprint(player2)\n if player1:\n return calculatePlayerScore(player1)\n return calculatePlayerScore(player2)\n","repo_name":"EggheadJohnson/AdventOfCode2020","sub_path":"22/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"11079372236","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n File Name: singleNumber\n Author : jing\n Date: 2020/3/19\n\n https://leetcode-cn.com/explore/interview/card/tencent/223/math-and-numbers/940/\n\n 找出只出现一次的数\n\"\"\"\n\n\nclass Solution:\n def singleNumber(self, nums):\n result = 0\n for num in nums:\n result = result ^ num\n return result\n","repo_name":"summer-vacation/AlgoExec","sub_path":"tencent/math_and_digit/singleNumber.py","file_name":"singleNumber.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"17445547758","text":"from pyglet import gl\n\nfrom .constants import SEA_LEVEL, SCREEN_SIZE\nfrom .primitives import Label, Rectangle\nfrom .camera import Rect\nfrom .vector import v\n\n\nclass GameHud(object):\n def __init__(self, world):\n self.world = world\n\n w, h = SCREEN_SIZE\n r = Rect(v(0, 0), v(250, 113))\n r = r.translate(v(8, h - r.height - 8))\n self.infobox = Rectangle(r, [(0, 0, 0, 0.33)])\n\n self.altlabel = Label(\n text='Altitude:',\n x=20,\n y=h - 35\n )\n self.distlabel = Label(\n text='Distance:',\n x=20,\n y=h - 70\n )\n self.fuellabel = Label(\n text='',\n x=20,\n y=h - 105\n )\n\n self.controllers = []\n\n def set_controllers(self, controllers):\n self.controllers = controllers\n\n def draw(self):\n alt = (self.world.squid.position.y - SEA_LEVEL - 15) * 0.1\n if self.world.goal:\n dist = abs(self.world.goal.left - self.world.squid.position.x) * 0.1\n self.distlabel.document.text = 'Target: %dm' % dist\n else:\n dist = (self.world.squid.position.x - 150) * 0.1\n self.distlabel.document.text = 'Distance: %dm' % dist\n\n if alt < 0:\n self.altlabel.document.text = 'Depth: %dm' % (-alt)\n else:\n self.altlabel.document.text = 'Altitude: %dm' % alt\n\n self.fuellabel.document.text = 'Fuel: %0.1fkg' % self.world.squid.fuel\n if self.world.squid.fuel:\n self.fuellabel.color = (255, 255, 255, 255)\n else:\n if self.world.squid.need_fuel():\n self.fuellabel.color = (255, 0, 0, 255)\n else:\n self.fuellabel.document.text = 'Fuel: N/A'\n self.infobox.draw()\n self.altlabel.draw()\n self.distlabel.draw()\n self.fuellabel.draw()\n\n if self.controllers:\n gl.glPushMatrix()\n gl.glTranslatef(SCREEN_SIZE[0] - 74, 10, 0)\n for controller in reversed(self.controllers):\n controller.draw()\n gl.glTranslatef(-74, 0, 0)\n gl.glPopMatrix()\n","repo_name":"lordmauve/korovic","sub_path":"korovic/hud.py","file_name":"hud.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"73821035108","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\n\nimport numpy as np\nfrom gspn_lib import gspn_tools\nimport sys\n\nclass MultiGSPNenv_v1(gym.Env):\n metadata = {'render.modes': ['human']}\n\n def __init__(self, gspn_model=None, gspn_path=None, n_locations=None, n_robots=None, actions_maps=None,\n reward_function=None, use_expected_time=False, verbose=False, idd=None):\n print('Multi GSPN Gym Env V1')\n self.id = idd\n self.verbose = verbose\n self.n_robots = n_robots\n self.n_locations = n_locations\n self.use_expected_time = use_expected_time\n self.actions_id_to_name = actions_maps[0]\n self.actions_name_to_id = actions_maps[1]\n\n if not reward_function:\n raise Exception('Please select one reward function: either 1 or 2')\n self.reward_function_type = reward_function\n\n if gspn_path != None:\n pn_tool = gspn_tools.GSPNtools()\n self.mr_gspn = pn_tool.import_greatspn(gspn_path)[0]\n # pn_tool.draw_gspn(mr_gspn)\n elif gspn_model != None:\n self.mr_gspn = gspn_model\n else:\n raise Exception('Please provide a GSPN object or a GSPN path of the environment model.')\n\n # Init timestamp\n self.timestamp = 0\n\n # [max_n_tokens_in_place0, max_n_tokens_in_place1, ... max_n_tokens_in_placen]\n # we approximate this to: [n_robots, n_robots, ... nrobots]\n self.observation_space = spaces.MultiDiscrete(nvec=[n_robots]*len(self.mr_gspn.get_current_marking()))\n n_actions = len(self.actions_id_to_name.keys())\n # {0,1,...,n_actions}\n self.action_space = spaces.Discrete(n_actions)\n\n self.enabled_parallel_transitions = {}\n\n def step(self, action):\n # get disabled actions in current state\n disabled_actions_names, disabled_actions_indexes = self.get_disabled_actions()\n\n # get current state\n current_state = self.get_current_state()\n if self.verbose:\n print('S: ', current_state)\n # print('Enabled Timed transitions : ', self.enabled_parallel_transitions)\n\n # map input action to associated transition\n if action in disabled_actions_indexes:\n transition = None\n else:\n transition = self.action_to_transition(action)\n if self.verbose:\n print('Action: ', action, transition)\n\n if transition != None:\n # apply action\n self.mr_gspn.fire_transition(transition)\n\n # get execution time (until the next decision state)\n # get also the sequence of the fired transitions ['t1', 't2', ...]\n elapsed_time, fired_transitions = self.execute_actions(use_expected_time=self.use_expected_time)\n\n reward = self.reward_function(current_state, transition, elapsed_time)\n\n # in a MRS the fired timed transition may not correspond to the selected action\n # this is the expected time that corresponds to the selected action\n action_expected_time = self.get_action_time(transition)\n\n self.timestamp += elapsed_time\n else:\n raise Exception('Disabled transition selected! This is not possible.')\n\n if self.verbose:\n print('Reward: ', reward)\n print('Timestamp: ', self.timestamp)\n print('Action expected time: ', action_expected_time)\n # print(\"S actions disabled: \", disabled_actions_names)\n\n # get enabled actions in the next state\n next_state_enabled_actions_names, next_state_enabled_actions_indexes = self.get_enabled_actions()\n\n # get next state\n next_state = self.marking_to_state()\n # next_state_string = self.get_current_state()\n if self.verbose:\n print(\"S': \", self.get_current_state())\n print(\"Available actions in s': \", next_state_enabled_actions_names)\n print()\n\n episode_done = False\n\n return next_state, reward, episode_done, \\\n {'timestamp': self.timestamp,\n 'disabled_actions': (disabled_actions_names, disabled_actions_indexes),\n 'next_state_enabled_actions': (next_state_enabled_actions_names, next_state_enabled_actions_indexes),\n 'action_time': action_expected_time,\n 'fired_transitions': fired_transitions,\n 'action_to_transition': transition}\n\n def reset(self):\n self.timestamp = 0.0\n self.mr_gspn.reset_simulation()\n next_state = self.marking_to_state()\n self.enabled_parallel_transitions = {}\n\n # get enabled actions in the next state\n next_state_enabled_actions_names, next_state_enabled_actions_indexes = self.get_enabled_actions()\n\n return next_state, {'timestamp': self.timestamp, 'actions_info': [],\n 'disabled_actions': (None, None),\n 'next_state_enabled_actions': (\n next_state_enabled_actions_names, next_state_enabled_actions_indexes),\n 'action_time': None}\n\n def render(self, mode='human'):\n raise Exception('Rendering not implemented')\n\n def close(self):\n self.reset()\n # print('Au Revoir Shoshanna!')\n\n def get_current_state(self):\n return self.mr_gspn.get_current_marking(sparse_marking=True)\n\n def action_to_transition(self, action):\n return self.actions_id_to_name[int(action)]\n\n def marking_to_state(self):\n # map dict marking to list marking\n marking_dict = self.mr_gspn.get_current_marking(sparse_marking=True)\n state = [0]*len(self.mr_gspn.get_current_marking().keys())\n for place_name, number_robots in marking_dict.items():\n token_index = self.mr_gspn.places_to_index[place_name]\n state[token_index] = number_robots\n\n return state\n\n def reward_function(self, sparse_state=None, transition=None, elapsed_time=0.0):\n if self.reward_function_type == 1:\n reward = 0.0\n\n if 'Insp' in transition:\n reward += 10.0\n\n elif self.reward_function_type == 2:\n reward = 0.0\n\n if 'Insp' in transition:\n reward += 500.0\n elif ('Charge' in transition) and (not ('Mobile' in transition)):\n reward += 100.0\n elif (not ('Bat' in transition)) and (not ('Mobile' in transition)):\n reward -= 10.0\n else:\n reward = 0.0\n\n if 'Insp' in transition:\n reward += 10.0\n\n robots_discharged = 0\n for local_state, robots in sparse_state.items():\n if 'Low' in local_state:\n robots_discharged += robots\n\n reward -= robots_discharged*elapsed_time\n\n return reward\n\n def fire_timed_transitions(self, enabled_timed_transitions, use_expected_time):\n if use_expected_time:\n # convert the rate into expected time and store that transition if it was not already stored\n for tr_name, tr_rate in enabled_timed_transitions.copy().items():\n if tr_name not in self.enabled_parallel_transitions:\n self.enabled_parallel_transitions[tr_name] = [1.0 / tr_rate]\n\n n_sampled_times = len(self.enabled_parallel_transitions[tr_name])\n tr_index = self.mr_gspn.transitions_to_index[tr_name]\n arcs_in = self.mr_gspn.get_arc_in_m()\n places_dict = self.mr_gspn.get_places()\n input_place_ratios = []\n sample_new_time = True\n for i, tr_coord in enumerate(arcs_in.coords[1]):\n if tr_coord == tr_index:\n place_index = arcs_in.coords[0][i]\n place_name = self.mr_gspn.index_to_places[place_index]\n n_tokens = places_dict[place_name]\n arc_weight = arcs_in.data[i]\n ratio = int(n_tokens/arc_weight)\n # the ratio gives us the number of sampled times that must exist in the\n # parallel dict, for this specific transition\n input_place_ratios.append(ratio)\n if ratio <= n_sampled_times:\n sample_new_time = False\n break\n # sample the amount necessary such that the number of\n # sampled times equals the smallest the place ratio\n if sample_new_time and len(input_place_ratios) > 0:\n while len(self.enabled_parallel_transitions[tr_name]) < min(input_place_ratios):\n self.enabled_parallel_transitions[tr_name].append(1.0 / tr_rate)\n\n else:\n # convert the rate into sampled elapsed time\n # sample from each exponential distribution prob_dist(x) = lambda * exp(-lambda * x)\n # in this case the beta rate parameter is used instead, where beta = 1/lambda\n # store enabled transition if it was not already stored\n for tr_name, tr_rate in enabled_timed_transitions.copy().items():\n if tr_name not in self.enabled_parallel_transitions:\n self.enabled_parallel_transitions[tr_name] = [np.random.exponential(scale=(1.0 / tr_rate),\n size=None)]\n\n n_sampled_times = len(self.enabled_parallel_transitions[tr_name])\n tr_index = self.mr_gspn.transitions_to_index[tr_name]\n arcs_in = self.mr_gspn.get_arc_in_m()\n places_dict = self.mr_gspn.get_places()\n input_place_ratios = []\n sample_new_time = True\n for i, tr_coord in enumerate(arcs_in.coords[1]):\n if tr_coord == tr_index:\n place_index = arcs_in.coords[0][i]\n place_name = self.mr_gspn.index_to_places[place_index]\n n_tokens = places_dict[place_name]\n arc_weight = arcs_in.data[i]\n ratio = int(n_tokens / arc_weight)\n # the ratio gives us the number of sampled times that must exist in the\n # parallel dict, for this specific transition\n input_place_ratios.append(ratio)\n if ratio <= n_sampled_times:\n sample_new_time = False\n break\n # sample the amount necessary such that the number of\n # sampled times equals the smallest place ratio\n if sample_new_time and len(input_place_ratios) > 0:\n while len(self.enabled_parallel_transitions[tr_name]) < min(input_place_ratios):\n self.enabled_parallel_transitions[tr_name].append(np.random.exponential(scale=(1.0 / tr_rate),\n size=None))\n # delete the transitions that were enabled, didn't fire and are not longer enabled\n disabled_transitions = set(self.enabled_parallel_transitions.keys())-set(enabled_timed_transitions.keys())\n for tr_name in disabled_transitions:\n del self.enabled_parallel_transitions[tr_name]\n\n # select the transition with the lowest execution time\n execution_time = np.inf\n for tr_name, tr_time in self.enabled_parallel_transitions.items():\n new_min_time = min(tr_time)\n if new_min_time < execution_time:\n timed_transition = tr_name\n execution_time = new_min_time\n\n transitions_to_fire = []\n transitions_to_fire.append(timed_transition)\n\n # delete transition to be fired\n if len(self.enabled_parallel_transitions[timed_transition]) > 1:\n self.enabled_parallel_transitions[timed_transition].remove(execution_time)\n else:\n del self.enabled_parallel_transitions[timed_transition]\n\n # decreased elapsed time for the remaining enabled transitions\n for tr_name, tr_exp_time in self.enabled_parallel_transitions.copy().items():\n new_tr_time = list(np.array(tr_exp_time) - execution_time)\n if any(i <= 0 for i in new_tr_time):\n # according to PN formalism there are never two timed transitions\n # with the same elapsed time\n # instead we should sum a very small time (e.g. 1e-6)\n # to ensure that only 1 transition fires at each time\n # when using expected time this arises more often\n pruned_new_tr_time = []\n for remaining_time in new_tr_time:\n if remaining_time <= 0:\n # transitions_to_fire.append(tr_name)\n pruned_new_tr_time.append(1e-6)\n else:\n pruned_new_tr_time.append(remaining_time)\n\n if len(pruned_new_tr_time) > 0:\n self.enabled_parallel_transitions[tr_name] = pruned_new_tr_time\n else:\n del self.enabled_parallel_transitions[tr_name]\n else:\n self.enabled_parallel_transitions[tr_name] = new_tr_time\n\n for transition_name in transitions_to_fire:\n self.mr_gspn.fire_transition(transition_name)\n return execution_time, transitions_to_fire\n\n def fire_random_switch(self, random_switch):\n if len(random_switch) > 1:\n s = sum(random_switch.values())\n random_switch_id = list(random_switch.keys())\n random_switch_prob = np.zeros(len(random_switch))\n # normalize the associated probabilities\n for idx, tr_info in enumerate(random_switch.items()):\n tr_name = tr_info[0]\n tr_weight = tr_info[1]\n random_switch_id[idx] = tr_name\n random_switch_prob[idx] = tr_weight / s\n\n # Draw from all enabled immediate transitions\n firing_transition = np.random.choice(a=random_switch_id, size=None, p=random_switch_prob)\n else:\n # Fire the only available immediate transition\n firing_transition = list(random_switch.keys())[0]\n\n self.mr_gspn.fire_transition(firing_transition)\n\n def check_actions_state(self, enabled_imm_transitions):\n action_enabled = False\n random_switch_available = False\n for tr_name, tr_rate in enabled_imm_transitions.items():\n if tr_rate == 0:\n action_enabled = True\n elif tr_rate != 0:\n random_switch_available = True\n return action_enabled, random_switch_available\n\n def execute_actions(self, use_expected_time=False):\n enabled_timed_transitions, enabled_imm_transitions = self.mr_gspn.get_enabled_transitions()\n\n # check if there is at least one imm transition with weight != 0 and check if there is one with weight == 0\n enabled_actions, random_switch = self.check_actions_state(enabled_imm_transitions)\n\n elapsed_time = 0\n fired_transitions = []\n while random_switch or (not enabled_actions):\n while random_switch:\n self.fire_random_switch(enabled_imm_transitions)\n enabled_timed_transitions, enabled_imm_transitions = self.mr_gspn.get_enabled_transitions()\n enabled_actions, random_switch = self.check_actions_state(enabled_imm_transitions)\n\n while (enabled_timed_transitions and not enabled_actions and not random_switch):\n action_elapsed_time, tr_fired = self.fire_timed_transitions(enabled_timed_transitions,\n use_expected_time)\n elapsed_time += action_elapsed_time\n fired_transitions += tr_fired\n enabled_timed_transitions, enabled_imm_transitions = self.mr_gspn.get_enabled_transitions()\n enabled_actions, random_switch = self.check_actions_state(enabled_imm_transitions)\n\n return elapsed_time, fired_transitions\n\n def get_disabled_actions(self):\n enabled_actions_names, enabled_actions_indexes = self.get_enabled_actions()\n\n disabled_actions_indexes = list(set(self.actions_id_to_name.keys()) - set(enabled_actions_indexes))\n disabled_actions_names = list(set(self.actions_name_to_id.keys()) - set(enabled_actions_names))\n\n return disabled_actions_names, disabled_actions_indexes\n\n def get_enabled_actions(self):\n enabled_exp_transitions, enabled_imm_transitions = self.mr_gspn.get_enabled_transitions()\n\n enabled_actions_indexes = []\n enabled_actions_names = []\n for tr_name, tr_rate in enabled_imm_transitions.items():\n if tr_rate == 0:\n enabled_actions_names.append(tr_name)\n enabled_actions_indexes.append(self.actions_name_to_id[tr_name])\n\n return enabled_actions_names, enabled_actions_indexes\n\n def get_action_time(self, fired_transition):\n associated_timed_tr = fired_transition + '_Finished'\n transition_rate = self.mr_gspn.get_transition_rate(associated_timed_tr)\n action_expected_time = 1.0/transition_rate\n return action_expected_time\n\n # def seed(self, seed=None):\n # self.np_random, seed = seeding.np_random(seed)\n # return [seed]","repo_name":"cazevedo/gspn-gym-env","sub_path":"gspn_gym_env/envs/MultiGSPNenv_v1.py","file_name":"MultiGSPNenv_v1.py","file_ext":"py","file_size_in_byte":17697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"23887462738","text":"import cPickle\nimport string\nfrom nltk.stem.porter import PorterStemmer as ps\nfrom nltk.corpus import stopwords\n\ndef tok_tweet(tweet):\n stemmer=ps()\n tweet = tweet.strip()\n words = tweet.split()\n tokenlist = []\n exclude = set(string.punctuation)\n punc = string.punctuation\n punc = punc.replace('#','')\n exclude_punc = set(punc)\n\n for word in words:\n word = word.strip()\n word = word.lower()\n\n if word in stopwords.words('english'):\n continue\n\n #Replace URLs with @http and then with blank\n if word.startswith('www') or word.startswith('http') or word.startswith(\"@\") or word.isdigit() or word == 'rt':\n continue #ignore if word is a url, @mention or contains only numbers or is a stopword\n nword = ''.join(ch for ch in word if ch not in exclude_punc)\n tokenlist.append(stemmer.stem(nword))\n tokens= tokenlist\n return ' '.join(tokens)\n\ndef processStatuses(statusFile,textFile):\n corpus = ''\n statuses = cPickle.load(open('data/' + statusFile))\n for status in statuses:\n if status.lang == 'en':\n tweet = tok_tweet(status.text)\n corpus += tweet + ' '\n\n with open('data/'+textFile,'a') as outFile:\n outFile.write(corpus.encode('utf-8'))\n\n return corpus\n\n\n#processStatuses('DendiBoss.p','DendiBoss.out')","repo_name":"viveknabhi/Game-AI-Project","sub_path":"processTweets.py","file_name":"processTweets.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"9736443542","text":"def get_pic():\n return makePicture(pickAFile())\n \ndef betterBnW():\n pic = get_pic()\n pixels = getPixels(pic)\n for p in pixels:\n avg_color = avg_color = (getRed(p)*.299 + getGreen(p)*.587 + getBlue(p)*.114)\n newColor = makeColor(avg_color,avg_color,avg_color)\n setColor(p, newColor)\n return pic\n\n \ndef line_drawing(tolerance):\n drawing = betterBnW()\n width = getWidth(drawing)\n height = getHeight(drawing)\n for x in range(0, width-1):\n for y in range(0, height-1):\n px = getPixel(drawing, x, y)\n main_pixel = getColor(px)\n \n px_right = getPixel(drawing, x+1, y)\n right_pixel = getColor(px_right)\n \n px_bottom = getPixel(drawing, x, y+1)\n bottom_pixel = getColor(px_bottom)\n \n right_distance = distance(main_pixel, right_pixel)\n bottom_distance = distance(main_pixel, bottom_pixel)\n \n if right_distance < tolerance and bottom_distance < tolerance:\n setColor(px, white)\n else:\n setColor(px, black)\n show(drawing)\n \ndef line_drawing2(tolerance):\n drawing = betterBnW()\n width = getWidth(drawing)\n height = getHeight(drawing)\n for x in range(0, width-1):\n for y in range(0, height-1):\n px = getPixel(drawing, x, y)\n main_pixel = getRed(px)\n \n px_right = getPixel(drawing, x+1, y)\n right_pixel = getRed(px_right)\n \n px_bottom = getPixel(drawing, x, y+1)\n bottom_pixel = getRed(px_bottom)\n \n right_distance = abs(main_pixel - right_pixel)\n bottom_distance = abs(main_pixel - bottom_pixel)\n \n if right_distance > tolerance and bottom_distance > tolerance:\n setColor(px, black)\n else:\n setColor(px, white)\n show(drawing)","repo_name":"rogerterrill-csumb/CST205","sub_path":"CST205/linedrawing.py","file_name":"linedrawing.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"74410101668","text":"import webbrowser\n\nclass Game():\n \"\"\"This class provides a way to store game related information\"\"\"\n VALID_RATINGS = [\"G\", \"PG\", \"PG-13\", \"R\"]\n\n def __init__(self, game_title, poster_image,\n trailer_youtube):\n \"\"\"Initiates a movie Object with the specified arguments\"\"\"\n self.title = game_title\n self.poster_image_url = poster_image\n self.trailer_youtube_url = trailer_youtube\n\n def show_trailer(self):\n \"\"\"Opens game trailer in the webbrowser\"\"\"\n webbrowser.open(self.trailer_youtube_url)\n","repo_name":"AngelTX/FavoriteGamesWebsite","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"73242882470","text":"def merge(s1, s2, s):\n \"\"\"Merge two sorted lists s1, s2 into properly sized list z\"\"\"\n i = j = 0\n while i + j < len(s):\n if j == len(s2) or (i < len(s1) and s1[i] < s2[j]):\n s[i+j] = s1[i] # copy ith element of s1 as next item of s\n i += 1\n else:\n s[i+j] = s2[j] # copy jth element of s2 as next item of s\n j += 1\n\n","repo_name":"asadrazaa1/Python-Data-Structures-and-Algorithms","sub_path":"Sortings/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"69880374949","text":"from .rule_mining.frequent_metapath_mining import FrequentMetapathSetMiner\nfrom .rule_mining.unification_mining import MetapathPatternUnificationMiner\nfrom .rule_mining.rule_pruning import RulePruner\nfrom .rule_mining.path_thresholds_optimisation import PathThresholdOptimiser\nfrom .rule_mining.rule_querying import MetapathRuleMatcher\nfrom .rule_mining.rule import Rule\nfrom .model.decision_set_classifier import DecisionSetClassifier\nimport numpy as np\nimport time\n\n__author__ = \"Alexandre Renaux\"\n__copyright__ = \"Copyright (c) 2023 Alexandre Renaux - Universite Libre de Bruxelles - Vrije Universiteit Brussel\"\n__license__ = \"MIT\"\n__version__ = \"1.0.1\"\n\n\ndef train_decision_set_model(relevant_rules, training_positives, training_negatives, sample_to_weight, algo_params, cpu_cores=0):\n \"\"\"\n Utils methods to train a decision set classifier with the given relevant rules and training data.\n\n Parameters:\n - relevant_rules: List of relevant rules for training\n - training_positives: List of positive training instances (gene pairs)\n - training_negatives: List of negative training instances (gene pairs)\n - sample_to_weight: Dictionary mapping instances to their weights (optional)\n - alpha: Trade-off parameter for combining true positive rate and false positive rate\n\n Returns:\n - rule_set_classifier: Trained rule set classifier\n \"\"\"\n alpha = algo_params[\"alpha\"]\n rule_matcher = MetapathRuleMatcher(algo_params)\n\n sample_list = training_positives + training_negatives\n X_train_list = []\n y_train_list = []\n sample_weight_list = []\n for gene_pair in sample_list:\n X_train_list.append([gene_pair, None])\n y_train_list.append(1 if gene_pair in training_positives else 0)\n if sample_to_weight:\n sample_weight_list.append(sample_to_weight[gene_pair])\n X_train = np.array(X_train_list, dtype=object)\n y_train = np.array(y_train_list)\n sample_weight = np.array(sample_weight_list) if sample_to_weight else None\n\n # Model training\n rule_set_classifier = DecisionSetClassifier(relevant_rules, rule_matcher, alpha, cpu_cores=cpu_cores)\n rule_set_classifier.fit(X_train, y_train, sample_weight=sample_weight)\n\n return rule_set_classifier\n\n\ndef mine_relevant_rules(training_positives, training_negatives, metapath_dict, sample_to_weight, algo_params, sample_name, update_cache=False, pproc=None):\n '''\n Utils method to mine relevant metapath-based rules for the given training data and apply pruning methods.\n\n Parameters:\n - training_positives: List of positive training instances (entity pairs)\n - training_negatives: List of negative training instances (entity pairs)\n - metapath_dict: Dictionary mapping entity pairs to their metapaths\n - sample_to_weight: Dictionary mapping instances to their weights (optional)\n - algo_params: Dictionary of all framework parameters\n - sample_name: Name of the analysis sample (for caching)\n - update_cache: Boolean flag to update the cache\n - pproc: Parallel processing context\n\n Returns:\n - relevant_rules: List of relevant rules\n '''\n rule_list, positive_matches_to_rule_ids, t1 = mine_candidate_rules(training_positives, metapath_dict, sample_to_weight, algo_params, sample_name, pproc=pproc, update_cache=update_cache)\n relevant_rules, t2 = apply_and_prune_rules(rule_list, positive_matches_to_rule_ids, training_negatives, metapath_dict, sample_to_weight, algo_params, sample_name, pproc=pproc, update_cache=update_cache)\n elapsed_time = t1 + t2\n return relevant_rules, elapsed_time\n\n\ndef mine_candidate_rules(training_positives, metapath_dict, sample_to_weight, algo_params, sample_name, update_cache=False, pproc=None):\n '''\n Utils method to mine candidate metapath-based rules for the given training data.\n Parameters:\n - training_positives: List of positive training instances (entity pairs)\n - metapath_dict: Dictionary mapping entity pairs to their metapaths\n - sample_to_weight: Dictionary mapping instances to their weights (optional)\n - algo_params: Dictionary of all framework parameters\n - sample_name: Name of the analysis sample (for caching)\n - update_cache: Boolean flag to update the cache\n - pproc: Parallel processing context\n '''\n\n metapath_dict_positive = {key: metapath_dict[key] for key in training_positives}\n\n # Pattern mining from positive instances\n pattern_to_pos_matches, t1 = FrequentMetapathSetMiner(algo_params).run(metapath_dict_positive, sample_to_weight, sample_name, pproc=pproc, update_cache=update_cache)\n pattern_to_pos_matches, t2 = MetapathPatternUnificationMiner(algo_params).run(pattern_to_pos_matches, metapath_dict_positive, sample_to_weight, sample_name, pproc=pproc, update_cache=update_cache)\n pattern_to_pos_matches, t3 = RulePruner(algo_params).prune_non_closed_itemsets(pattern_to_pos_matches)\n pattern_to_pos_matches, t4 = PathThresholdOptimiser(algo_params).run(pattern_to_pos_matches, metapath_dict_positive, sample_to_weight, sample_name, pproc=pproc, update_cache=update_cache)\n\n # Generating the set of candidate rules\n start = time.process_time()\n positive_matches_to_rule_ids = {}\n for positive_match in training_positives:\n positive_matches_to_rule_ids[positive_match] = set()\n rule_list = []\n rule_id = 1\n for pattern, pos_matches in sorted(pattern_to_pos_matches.items(), key=lambda x: x[0]):\n rule = Rule(rule_id, pattern, 1, pos_matches)\n rule_list.append(rule)\n for pos_match in pos_matches:\n positive_matches_to_rule_ids[pos_match].add(rule_id)\n rule_id += 1\n\n elapsed_time = t1+t2+t3+t4 + (time.process_time() - start)\n\n return rule_list, positive_matches_to_rule_ids, elapsed_time\n\n\ndef apply_and_prune_rules(rule_list, positive_matches_to_rule_ids, training_negatives, metapath_dict, sample_to_weight, algo_params, sample_name, update_cache=False, pproc=None):\n '''\n Utils method to apply and prune candidate metapath-based rules for the given training data.\n Parameters:\n - rule_list: List of candidate rules\n - positive_matches_to_rule_ids: Dictionary mapping positive instances to their rules\n - training_negatives: List of negative training instances (entity pairs)\n - metapath_dict: Dictionary mapping entity pairs to their metapaths\n - sample_to_weight: Dictionary mapping instances to their weights (optional)\n - algo_params: Dictionary of all framework parameters\n - sample_name: Name of the analysis sample (for caching)\n - update_cache: Boolean flag to update the cache\n - pproc: Parallel processing context\n Returns:\n - valid_rules: List of valid rules\n '''\n metapath_dict_negative = {key: metapath_dict[key] for key in training_negatives}\n negative_matches_to_rule_ids, t1 = MetapathRuleMatcher(algo_params).run(rule_list, metapath_dict_negative, sample_name, pproc=pproc, update_cache=update_cache)\n valid_rules, t2 = RulePruner(algo_params).prune_and_get_rules(rule_list, positive_matches_to_rule_ids, negative_matches_to_rule_ids, sample_to_weight)\n elapsed_time = t1+t2\n return valid_rules, elapsed_time\n\n\n\n","repo_name":"oligogenic/ARBOCK","sub_path":"arbock/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":7191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"29503505039","text":"# This script prepares the data to be used in plots showing vaccination coverage over time\n# Data is given for different dose levels, and different age groups\n# RECOVAC provides data\n# Data given for 3 age ranges - 18+, 18-59, and 60+\n# Data given for first 4 doses\n# Graph will be 'area under the curve'\nimport pandas as pd\nfrom datetime import datetime as dt\n\n# first load data.\n# 18+ age\nRECO_18plus = pd.read_excel(\n \"data/vacc_pop_18plus.xlsx\",\n sheet_name=\"Sheet1\",\n header=0,\n engine=\"openpyxl\",\n keep_default_na=False,\n)\n\n# 18-59 age\nRECO_18to59 = pd.read_excel(\n \"data/vacc_pop_18-59.xlsx\",\n sheet_name=\"Sheet1\",\n header=0,\n engine=\"openpyxl\",\n keep_default_na=False,\n)\n\n# 60+ age\nRECO_60plus = pd.read_excel(\n \"data/vacc_pop_60plus.xlsx\",\n sheet_name=\"Sheet1\",\n header=0,\n engine=\"openpyxl\",\n keep_default_na=False,\n)\n\n# function to change the date:\ndef date_func(dataset):\n dataset[[\"Year\", \"Week\"]] = (\n dataset[\"wk\"].str.split(\"w\", expand=True).astype(int)\n ) # break apart week and year\n dataset[\"day\"] = 1 # set day as Monday\n dataset.drop(dataset[(dataset[\"Year\"] == 2019)].index, inplace=True)\n dataset[\"date\"] = dataset.apply(\n lambda row: dt.fromisocalendar(row[\"Year\"], row[\"Week\"], row[\"day\"]), axis=1\n )\n pd.to_datetime(dataset[\"date\"])\n dataset.drop(columns=[\"Week\", \"Year\", \"day\", \"wk\"], axis=1, inplace=True)\n dataset[\"date\"] = dataset[\"date\"].astype(str)\n # print(dataset.head())\n\n\n# Need to do some calculations to get JUST those with 1 dose, with 2 doses.. and calc. 0 doses\n# In the original dataset e.g. one dose included anyone that had had a dose (so includes 2 dose, 3 dose..)\ndef calc_func(dataset):\n # need to work out proportions UNVACCINATED - sum rest and minus from 1\n dataset.replace(r\"^\\s*$\", 0.0, regex=True, inplace=True)\n dataset[\"no_dose\"] = (1 - dataset[\"vacc1\"]) * 100\n dataset[\"one_dose\"] = (dataset[\"vacc1\"] - dataset[\"vacc2\"]) * 100\n dataset[\"two_dose\"] = (dataset[\"vacc2\"] - dataset[\"vacc3\"]) * 100\n dataset[\"three_dose\"] = (dataset[\"vacc3\"] - dataset[\"vacc4\"]) * 100\n dataset[\"four_dose\"] = dataset[\"vacc4\"] * 100\n dataset.drop(columns=[\"vacc1\", \"vacc2\", \"vacc3\", \"vacc4\"], axis=1, inplace=True)\n # print(dataset.head())\n\n\n# make a list of datasets on which to perform the function\n\ndatasets = [RECO_18plus, RECO_18to59, RECO_60plus]\n\n# run the functions to recalculate the proportions and format the date\nfor x in datasets:\n date_func(x)\n\nfor y in datasets:\n calc_func(y)\n","repo_name":"ScilifelabDataCentre/covid-portal-visualisations","sub_path":"RECOVAC/Swedishpop_vaccinecov_dataprep.py","file_name":"Swedishpop_vaccinecov_dataprep.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"71"} +{"seq_id":"40734973267","text":"\"\"\"Bugal Utils Serializers\"\"\"\n\n# Django REST Frameworks\nfrom rest_framework import serializers\n\n# Model\nfrom bugal.base.models import (\n Country, State, Gender\n)\n\n\nclass CountryModelSerializer(serializers.ModelSerializer):\n \"\"\"Country model serializer\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n model = Country\n fields = (\n 'id',\n 'name',\n 'short_name'\n )\n\n\nclass StateModelSerializer(serializers.ModelSerializer):\n \"\"\"Country model serializer\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n model = State\n fields = (\n 'id',\n 'name',\n 'short_name'\n )\n\n\nclass GenderModelSerializer(serializers.ModelSerializer):\n \"\"\"Gender model serializer\"\"\"\n\n class Meta:\n \"\"\"Meta class.\"\"\"\n\n model = Gender\n fields = (\n 'id',\n 'identity'\n )\n","repo_name":"aquitania99/bugal-app","sub_path":"bugal/base/utils/serializers/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"72169075751","text":"from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer, TfidfVectorizer, HashingVectorizer\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nimport numpy as np\nimport pandas as pd\nimport re\nfrom nltk.stem.porter import PorterStemmer\nimport nltk\nfrom nltk.corpus import stopwords\nimport warnings\nimport pyprind\n\nwarnings.filterwarnings(\"ignore\") # 忽略warning\n\n\n# nltk.download(\"stopwords\")\n\n\ndef preprocessor(text):\n text = re.sub(\"<[^>]*>\", \"\", text)\n emoticons = re.findall(\"(?::|;|=)(?:-)?(?:\\)|\\(|D|P)\", text)\n text = re.sub(\"[\\W]+\", \" \", text.lower()) + \"\".join(emoticons).replace(\"-\", \"\")\n return text\n\n\ndef tokenizer(text):\n stop = stopwords.words(\"english\")\n text = re.sub(\"<[^>]*>\", \"\", text)\n emoticons = re.findall(\"(?::|;|=)(?:-)?(?:\\)|\\(|D|P)\", text)\n text = re.sub(\"[\\W]+\", \" \", text.lower()) + \"\".join(emoticons).replace(\"-\", \"\")\n tokenized = [w for w in text.split() if w not in stop]\n return tokenized\n\n\ndef stream_docs(path):\n with open(path, \"r\") as csv:\n next(csv)\n for line in csv:\n text, label = line[:-3], int(line[-2])\n yield text, label\n\n\ndef get_minibatch(doc_stream, size):\n docs, y = [], []\n try:\n for _ in range(size):\n text, label = next(doc_stream)\n docs.append(text)\n y.append(label)\n except StopIteration:\n return None, None\n return docs, y\n\n\ndef tokenizer_porter(text):\n porter = PorterStemmer()\n return [porter.stem(word) for word in text.split()]\n\n\n# pbar = pyprind.ProgBar(50000)\n# print(pbar)\n# labels = {\"pos\": 1, \"neg\": 0}\n#\n# df = pd.DataFrame()\n# for s in (\"test\", \"train\"):\n# for l in (\"pos\", \"neg\"):\n# path = \"./aclImdb/%s/%s\" % (s, l)\n# for file in os.listdir(path):\n# with open(os.path.join(path, file), \"r\") as infile:\n# txt = infile.read()\n# df = df.append([[txt, labels[l]]], ignore_index=True)\n# pbar.update()\n#\n# print(df)\n# preprocessor\n# np.random.seed(0)\n# df = df.reindex(np.random.permutation(df.index))x_train\n# df.to_csv(\"./movie_data.csv\", index=False)\n# count = CountVectorizer()\n# docs = np.array([\"The sun is shining\", \"The weather is sweet\", \"The sum is shining and the weather is sweet\"])\n# bag = count.fit_transform(docs)\n#\n# print(count.vocabulary_)\n#\n# print(bag.toarray()) # 值是出现了几次,索引是 count.vocabulary_ 的值\n#\n# tfidf = TfidfTransformer()\n# np.set_printoptions(precision=2)\n# print(tfidf.fit_transform(bag.toarray()).toarray())\n\n\ndf = pd.read_csv(\"./movie_data.csv\")\n\n# stop = stopwords.words(\"english\")\n# print([w for w in tokenizer_porter(\"a runner likes and runs a lot\")[-10:] if w not in stop])\n#\n# x_train = df.loc[:100, \"review\"].values\n# y_train = df.loc[:100, \"sentiment\"].values\n# x_test = df.loc[100:, \"review\"].values\n# y_test = df.loc[100:, \"sentiment\"].values\n# # print(x_train, y_train, x_test, y_test)\n#\n# tfidf = TfidfVectorizer(strip_accents=None, lowercase=False, preprocessor=None)\n# param_grid = [\n# {\"vect__ngram_range\": [(1, 1)], \"vect__stop_words\": [stop, None],\n# \"vect__tokenizer\": [tokenizer, tokenizer_porter], \"clf__penalty\": [\"l1\", \"l2\"],\n# \"clf__C\": [1.0, 10.0, 100.0]},\n# {\"vect__ngram_range\": [(1, 1)], \"vect__stop_words\": [stop, None],\n# \"vect__tokenizer\": [tokenizer, tokenizer_porter], \"vect__use_idf\": [False], \"vect__norm\": [None],\n# \"clf__penalty\": [\"l1\", \"l2\"], \"clf__C\": [1.0, 10.0, 100.0]}\n# ]\n# lr_tfidf = Pipeline([(\"vect\", tfidf), (\"clf\", LogisticRegression(random_state=0))])\n# gs_lr_tfidf = GridSearchCV(lr_tfidf, param_grid, scoring=\"accuracy\", cv=5, verbose=1, n_jobs=-1)\n# gs_lr_tfidf.fit(x_train, y_train)\n# print(\"Best parameter set: %s\" % gs_lr_tfidf.best_params_)\n# print(\"CV Accuracy: %.3f\" % gs_lr_tfidf.best_score_)\n# clf = gs_lr_tfidf.best_estimator_\n# print(\"Test Accuracy: %.3f\" % clf.score(x_test, y_test))\n\n\n# s = stream_docs(\"./movie_data.csv\")\n# k = 0\n# for i, j in s:\n# print(k, i, j)\n# k += 1\nvect = HashingVectorizer(decode_error=\"ignore\", n_features=2**21, preprocessor=None, tokenizer=tokenizer)\nclf = SGDClassifier(loss=\"log\", random_state=1, n_iter_no_change=1)\ndoc_stream = stream_docs(\"./movie_data.csv\")\n\npbar = pyprind.ProgBar(5)\nclasses = np.array([0, 1])\nfor _ in range(5):\n x_train, y_train = get_minibatch(doc_stream, size=1000)\n if not x_train:\n break\n x_train = vect.transform(x_train)\n clf.partial_fit(x_train, y_train, classes=classes)\n pbar.update()\n\nx_test, y_test = get_minibatch(doc_stream, size=1000)\nx_test = vect.transform(x_test)\nprint(\"Test Accuracy: %.3f\" % clf.score(x_test, y_test))\n","repo_name":"yaowenfeng1994/machine_learning_learn","sub_path":"sentiment_analysis.py","file_name":"sentiment_analysis.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"31824326654","text":"# input: ['wish','you','a' ,'very' ,'happy', 'new','year']\n# output:['hsiw','a','yppah','raey']\n\nsen=input(\"enter a sentence \")\nl=sen.split()\nprint(l)\ni=0\nl2=[]\nwhile i list:\n result = {}\n for client, client_data in clients.items():\n for i in range(len(client_data['packages'])):\n result[client_data['packages'][i]]=client\n return result\n\n @staticmethod\n def get_manhattan_distance(source, destination):\n return abs(source[0] - destination[0]) + abs(source[1] - destination[1])\n\n def __init__(self, initial):\n \"\"\"Don't forget to implement the goal test\n You should change the initial to your own representation.\n search.Problem.__init__(self, initial) creates the root node\"\"\"\n self.map = initial[\"map\"]\n self.packages_locations = initial[\"packages\"]\n # ignore packages that are not required by any client, because we don't need to deliver\n self.needed_packages = self.get_needed_packages(initial[\"clients\"])\n self.clients = initial[\"clients\"]\n for drone, pos in initial[\"drones\"].items():\n initial[\"drones\"][drone] = [pos, []]\n #remaining_packages = list(filter(lambda package: package in self.packages_locations.keys(), self.needed_packages))\n remaining_packages=list(self.needed_packages.keys())\n client_index_in_path_dict = dict([(client_name, 0) for client_name in self.clients])\n initial = [initial[\"drones\"], remaining_packages, [], client_index_in_path_dict]\n search.Problem.__init__(self, pickle.dumps(initial))\n\n def actions(self, state):\n \"\"\"Returns all the actions that can be executed in the given\n state. The result should be a tuple (or other iterable) of actions\n as defined in the problem description file\"\"\"\n\n state = pickle.loads(state)\n clients_index_in_path_dict = state[3]\n drones_locations_and_current_packages_list_dict = state[0]\n remaining_packages = state[1]\n all_drone_actions = []\n clients_index_in_path_dict=state[3]\n for drone_name, location_and_current_packages in drones_locations_and_current_packages_list_dict.items():\n drone_location = location_and_current_packages[0]\n this_drone_actions = []\n\n # check where the drone can move\n # right\n if drone_location[1] + 1 < len(self.map[0]) and self.map[drone_location[0]][drone_location[1] + 1] == 'P':\n this_drone_actions.append((\"move\", drone_name, (drone_location[0], drone_location[1] + 1)))\n # left\n if drone_location[1] > 0 and self.map[drone_location[0]][drone_location[1] - 1] == 'P':\n this_drone_actions.append((\"move\", drone_name, (drone_location[0], drone_location[1] - 1)))\n # down\n if drone_location[0] + 1 < len(self.map) and self.map[drone_location[0] + 1][drone_location[1]] == 'P':\n this_drone_actions.append((\"move\", drone_name, (drone_location[0] + 1, drone_location[1])))\n # up\n if drone_location[0] > 0 and self.map[drone_location[0] - 1][drone_location[1]] == 'P':\n this_drone_actions.append((\"move\", drone_name, (drone_location[0] - 1, drone_location[1])))\n\n drone_packages = location_and_current_packages[1]\n # check if the drone can pick up a package\n\n if len(drone_packages) < 2:\n for package in remaining_packages:\n if self.packages_locations[package][0] == drone_location[0] \\\n and self.packages_locations[package][1] == drone_location[1]:\n this_drone_actions.append((\"pick up\", drone_name, package))\n\n # check if the drone can drop package\n if len(drone_packages)!=0:\n for i in range (len(drone_packages)):\n the_package=drone_packages[i]\n client_that_want_our_package=self.needed_packages[the_package]\n client_path = self.clients[client_that_want_our_package][\"path\"]\n client_index_in_path = clients_index_in_path_dict[client_that_want_our_package]\n client_location = client_path[client_index_in_path]\n if client_location[0] == drone_location[0] and client_location[1] == drone_location[1]:\n this_drone_actions.append((\"deliver\", drone_name, client_that_want_our_package,the_package))\n\n this_drone_actions.append((\"wait\", drone_name))\n\n all_drone_actions.append(this_drone_actions)\n\n # merge the drones actions\n # need to remove actions where two different drones pick same package\n merged_actions = list(itertools.product(*all_drone_actions))\n merged_actions = list(filter(self.is_legal_action, merged_actions))\n #merged_actions = list(tuple(action) for action in merged_actions)\n return merged_actions\n\n \"\"\"\n verifies that the action is legal\n this means that 2 drones can't pick up the same package\n \"\"\"\n @staticmethod\n def is_legal_action(action):\n for i in range(len(action)):\n if action[i][0] == \"pick up\":\n for j in range(i + 1, len(action)):\n if action[j][0] == \"pick up\" and action[i][2] == action[j][2]:\n return False\n return True\n\n def result(self, state, action):\n \"\"\"Return the state that results from executing the given\n action in the given state. The action must be one of\n self.actions(state).\"\"\"\n state = pickle.loads(state)\n clients_index_in_path_dict = state[3]\n drones_locations_and_current_list_dict = state[0]\n remaining_packages = state[1]\n dropped_packages = state[2]\n\n # apply action for all drones\n for drone_action in action:\n if drone_action[0] == \"move\":\n drone_to_move = drone_action[1]\n drone_new_location = drone_action[2]\n drones_locations_and_current_list_dict[drone_to_move][0] = drone_new_location\n if drone_action[0] == \"pick up\":\n drone_that_picks = drone_action[1]\n package_to_pick = drone_action[2]\n remaining_packages.remove(package_to_pick)\n drones_locations_and_current_list_dict[drone_that_picks][1].append(package_to_pick)\n if drone_action[0] == \"deliver\":\n drone_that_delivers = drone_action[1]\n package_to_deliver = drone_action[3]\n drones_locations_and_current_list_dict[drone_that_delivers][1].remove(package_to_deliver)\n dropped_packages.append(package_to_deliver)\n # add the package to the delivered packages after it is added\n if drone_action[0] == \"wait\":\n pass\n\n # move the clients\n for client_name in self.clients:\n client_path_len = len(self.clients[client_name][\"path\"])\n clients_index_in_path_dict[client_name] += 1\n clients_index_in_path_dict[client_name] = clients_index_in_path_dict[client_name] % client_path_len\n state = [drones_locations_and_current_list_dict, remaining_packages, dropped_packages, clients_index_in_path_dict]\n return pickle.dumps(state)\n\n def goal_test(self, state):\n \"\"\" Given a state, checks if this is the goal state.\n Returns True if it is, False otherwise.\"\"\"\n state = pickle.loads(state)\n dropped_packages = state[2]\n if len(self.needed_packages) == len(dropped_packages):\n return True\n return False\n\n def h(self, node):\n \"\"\" This is the heuristic. It gets a node (not a state,\n state can be accessed via node.state)\n and returns a goal distance estimate\"\"\"\n\n if self.goal_test(node.state):\n return 0\n state = pickle.loads(node.state)\n remaining_package = state[1]\n remaining_packages=copy.copy(remaining_package)\n drone_locations_and_current_packages_dict = state[0]\n all_h = []\n score=0\n for drone_name, location_and_current_packages in drone_locations_and_current_packages_dict.items():\n num_of_packages = len(location_and_current_packages[1])\n if num_of_packages == 2:\n dis,pak=self.get_distance_from_closest_client(location_and_current_packages[0], location_and_current_packages[1], state)\n all_h.append(dis)\n score+=1\n\n\n # can't drop packages, try to get closer to the closest package\n if num_of_packages == 0:\n if len(remaining_packages)>0:\n dis,pak=self.get_distance_from_closest_package_for_2(location_and_current_packages[0], remaining_packages, state)\n all_h.append(dis)\n remaining_packages.remove(pak)\n else:\n all_h.append(0)\n if num_of_packages == 1:\n if len(remaining_packages)>0:\n dis,pak= self.get_distance_from_closest_package_for_2(location_and_current_packages[0], remaining_packages,state)\n dis2,pak2= self.get_distance_from_closest_client(location_and_current_packages[0], location_and_current_packages[1], state)\n all_h.append(min(dis,dis2))\n if dis < dis2:\n remaining_packages.remove(pak)\n else:\n dis2, pak2 = self.get_distance_from_closest_client(location_and_current_packages[0],\n location_and_current_packages[1], state)\n all_h.append(dis2)\n\n\n\n\n dropped_packages = state[2]\n all_h = list(filter(lambda x: x is not None, all_h))\n if not all_h:\n # check for cases that all_h didn't have values that are not None, can happen in unsolvable problems\n all_h.append(1)\n max_dist = max(all_h)\n avg_dist = sum(all_h) / len(all_h)+ 1\n res = node.depth + len(set(self.needed_packages) - set(dropped_packages)) * avg_dist + len(\n remaining_packages) * avg_dist + max_dist\n\n return res\n\n\n def get_distance_from_closest_client(self, location, current_packages,state):\n clients_index_in_path_dict = state[3]\n distances_from_clients = []\n for i in range(len(current_packages)):\n clients_that_want_one_of_our_packages = self.needed_packages[current_packages[i]]\n client_path = self.clients[clients_that_want_one_of_our_packages][\"path\"]\n client_path_len=len(client_path)\n client_index_in_path = clients_index_in_path_dict[clients_that_want_one_of_our_packages]+1\n client_location = client_path[(client_index_in_path) % client_path_len]\n distances_from_client = self.get_manhattan_distance(location,client_location)\n distances_from_clients.append(distances_from_client)\n return min(distances_from_clients),0\n\n def get_distance_from_closest_package_for_2(self, location, packages,state):\n if not packages:\n return 0,0\n\n\n min=1000000000\n for package in packages:\n distances_from_packages=self.get_manhattan_distance(location, self.packages_locations[package])\n client_that_want_one_of_our_packages = self.needed_packages[package]\n client_path = self.clients[client_that_want_one_of_our_packages][\"path\"]\n client_path_len = len(client_path)\n client_index_in_path = state[3][client_that_want_one_of_our_packages]\n client_location = client_path[(client_index_in_path) % client_path_len]\n dist_from_client=self.get_manhattan_distance(self.packages_locations[package], client_location)\n dist = distances_from_packages+dist_from_client\n if dist < min:\n min=dist\n package_deliverd=package\n\n\n\n return min,package_deliverd\n \"\"\"Feel free to add your own functions\n (-2, -2, None) means there was a timeout\"\"\"\n\n\ndef create_drone_problem(game):\n return DroneProblem(game)\n","repo_name":"ShenhavOfir/DroneProblem","sub_path":"ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":12479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"35713154750","text":"#! /usr/bin/python3\nimport os\nimport datetime\nimport re\nimport iota.harness.api as api\n\n__upg_log_path = \"/obfl/upgrade.log\"\n__upg_log_fname = \"upgrade.log\"\n__ERROR = \"E\"\n\ndef __find_err_in_upg_log(node, records):\n found_error = False\n\n for r in records:\n if r['lvl'] == __ERROR:\n api.Logger.error(f\"Found error message in upg log on {node}: {r['raw']}\")\n found_error = True\n return api.types.status.FAILURE if found_error else api.types.status.SUCCESS\n\ndef __is_record_type_state(record):\n return True if record and \"MoveStateMachine\" in record['fname'] else False\n\ndef __get_upg_log_fname_from_node(node, log_dir):\n return f\"{log_dir}/upgrade_{node}.log\"\n\ndef __get_datetime_from_record(record):\n if record:\n return datetime.datetime.strptime(record['ts'],\"%Y-%m-%d %H:%M:%S.%f+00:00\")\n return ts\n\ndef __disset_upg_log(node, logs):\n records = []\n\n for log in logs:\n r_exp = r\"(?P[I,D,E]) \\[(?P.*)\\] \\((?P.*)\\) \\[(?P.*)\\] (?P.*)\"\n m = re.search(r_exp, log)\n if m:\n records.append({e: m.group(e) for e in [\"lvl\", \"ts\", \"tid\", \"fname\", \"msg\"]})\n records[-1][\"raw\"] = log\n else:\n api.Logger.error(f\"Failed to dissect log on {node} : {log}\")\n return records\n\ndef __calculate_upg_state_duration(node, records):\n last_ts = None\n\n for r in reversed(records):\n if not __is_record_type_state(r):\n continue\n if last_ts == None:\n last_ts = __get_datetime_from_record(r)\n r['duration'] = 0\n else:\n r['duration'] = last_ts - __get_datetime_from_record(r)\n last_ts = __get_datetime_from_record(r)\n\ndef __dump_upg_log(node, logs):\n api.Logger.SetNode(node)\n indent = \"-\" * 25\n api.Logger.info(f\"{indent} U P G R A D E L O G S {indent}\")\n for log in logs:\n api.Logger.info(log)\n api.Logger.SetNode(None)\n\ndef __display_upg_state_transition(node, records):\n __calculate_upg_state_duration(node, records)\n api.Logger.SetNode(node)\n indent = \"-\" * 25\n api.Logger.info(\"\\n\")\n api.Logger.info(f\"{indent} U P G R A D E S T A T E T R A N S I T I O N {indent}\")\n for r in records:\n if __is_record_type_state(r):\n api.Logger.info(\"- {} {:<45} {}\".format(r['ts'], r['msg'], r['duration']))\n api.Logger.info(\"Total Time : %s\\n\\n\"%(__get_datetime_from_record(records[-1]) - \\\n __get_datetime_from_record(records[1])))\n api.Logger.SetNode(None)\n\ndef ResetUpgLog(nodes):\n nodes = nodes if nodes else api.GetNaplesWorkloads()\n req = api.Trigger_CreateExecuteCommandsRequest(serial=False)\n\n for node in nodes:\n cmd = f\":>{__upg_log_path}\"\n api.Trigger_AddNaplesCommand(req, node, cmd)\n\n resp = api.Trigger(req)\n for cmd in resp.commands:\n api.PrintCommandResults(cmd)\n if cmd.exit_code != 0:\n api.Logger.error(f\"Failed to reset upgrade log on {cmd.node_name}\")\n return api.types.status.FAILURE\n return api.types.status.SUCCESS\n\ndef __dhcp_oob_mnic0(nodes):\n nodes = nodes if nodes else api.GetNaplesWorkloads()\n dhclient_cmd = \"dhclient oob_mnic0\"\n req = api.Trigger_CreateExecuteCommandsRequest(serial=False)\n\n for node in nodes:\n api.Trigger_AddNaplesCommand(req, node, dhclient_cmd)\n\n resp = api.Trigger(req)\n for cmd in resp.commands:\n api.PrintCommandResults(cmd)\n if cmd.exit_code != 0:\n api.Logger.error(f\"Failed to run dhclient on {cmd.node_name}\")\n return api.types.status.FAILURE\n return api.types.status.SUCCESS\n\ndef GetUpgLog(nodes, log_dir):\n nodes = nodes if nodes else api.GetNaplesWorkloads()\n file_name = f\"{log_dir}/{__upg_log_fname}\"\n for node in nodes:\n if __dhcp_oob_mnic0([node]) != api.types.status.SUCCESS:\n return api.types.status.FAILURE\n api.CopyFromNaples(node, [__upg_log_path], log_dir, via_oob=True)\n if os.path.exists(file_name):\n os.rename(file_name, __get_upg_log_fname_from_node(node, log_dir))\n else:\n api.Logger.error(f\"Upgrade logs for {node} not found @ {file_name}\")\n return api.types.status.FAILURE\n return api.types.status.SUCCESS\n\ndef VerifyUpgLog(nodes, log_dir):\n for node in nodes:\n if GetUpgLog([node], log_dir) != api.types.status.SUCCESS:\n api.Logger.error(f\"Failed to get the upgrade log for {node}\")\n return api.types.status.FAILURE\n\n with open(__get_upg_log_fname_from_node(node, log_dir)) as f:\n logs = f.readlines()\n if not logs:\n api.Logger.error(f\"Failed to read logs from {node}\")\n return api.types.status.FAILURE\n __dump_upg_log(node, logs)\n\n records = __disset_upg_log(node, logs)\n if not records:\n api.Logger.error(f\"Failed to dissect the upgrade logs from {node}\")\n return api.types.status.FAILURE\n\n if __find_err_in_upg_log(node, records) != api.types.status.SUCCESS:\n return api.types.status.FAILURE\n\n __display_upg_state_transition(node, records)\n\n return api.types.status.SUCCESS\n","repo_name":"ccdxc/sw","sub_path":"iota/test/iris/testcases/naples_upgrade/upgrade_utils.py","file_name":"upgrade_utils.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"70089195751","text":"import torch\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.utils.data as Data\n\nimport matplotlib.pyplot as plt\n#matplotlib inline\n\nimport numpy as np\nimport imageio\nfrom numpy import genfromtxt\n\n\n# settings\nTRAIN = True\n\n# read the training data\nrepo = 'ingripper calibration/fast - 5 cycles - pi_3/'\n#repo = 'ingripper calibration/fast - 5 cycles - pi_3/'\nati = np.genfromtxt(repo + 'atiData.txt', delimiter=' ',dtype=float)\ndiffsig = np.genfromtxt(repo + 'diffData.txt', delimiter=' ',dtype=float)\nsumsig = np.genfromtxt(repo + 'sumData.txt', delimiter=' ',dtype=float)\npos = np.genfromtxt(repo + 'posData.txt', delimiter=' ',dtype=float)\nvel = np.genfromtxt(repo + 'velData.txt', delimiter=' ',dtype=float)\neff = np.genfromtxt(repo + 'effData.txt', delimiter=' ',dtype=float)\n#temp = np.genfromtxt(repo + 'tempData.txt', delimiter=' ',dtype=float)\n#temp = temp[:,np.newaxis]\n\n# build the feature vector\ndiffsig = diffsig-np.mean(diffsig[0:500,:],axis = 0)\nati = ati-np.mean(ati[0:500,:],axis = 0)\nNsig = diffsig/sumsig\nNsig = np.hstack((Nsig,Nsig**2))\n\nx = np.hstack((Nsig,pos,vel,eff));\nmean_x = np.mean(x,axis=0)\nstddev_x = np.std(x,axis=0)\nx = (x-mean_x)/stddev_x\nmax_ati = np.max(np.abs(ati),axis=0);\ny = ati/max_ati\n\nxss = x[0:x.shape[0]:50,:]\nyss = y[0:y.shape[0]:50,:]\n\n# split data to training and test sets\ndataSize = xss.shape[0]\ntrainSize = int(np.floor(0.7*dataSize))\ntestSize = dataSize-trainSize\ntrainIndex,testIndex = torch.utils.data.random_split(range(dataSize), [trainSize, testSize], generator=torch.Generator().manual_seed(2020))\n\nxTrain = xss[trainIndex,:]\nyTrain = yss[trainIndex,:]\nxTest = xss[testIndex,:]\nyTest = yss[testIndex,:]\n\n# this is one way to define a network\nclass Net(torch.nn.Module):\n def __init__(self, n_feature, n_hidden, n_output):\n super(Net, self).__init__()\n self.hidden1 = torch.nn.Linear(n_feature, n_hidden[0]) # hidden layer\n self.hidden2 = torch.nn.Linear(n_hidden[0], n_hidden[1]) # hidden layer\n #self.hidden3 = torch.nn.Linear(n_hidden[1], n_hidden[2]) # hidden layer\n self.predict = torch.nn.Linear(n_hidden[1], n_output) # output layer\n # Define proportion or neurons to dropout\n self.dropout = torch.nn.Dropout(0.8)\n def forward(self, x):\n x = torch.sigmoid(self.hidden1(x)) # activation function for hidden layer\n x = torch.sigmoid(self.hidden2(x)) # activation function for hidden layer\n #x = self.dropout(x)\n #x = self.dropout(x)\n #x = torch.sigmoid(self.hidden3(x)) # activation function for hidden layer\n #x = self.dropout(x)\n out = self.predict(x) # linear output\n return out\n\n# model architecture\nhlayers = [30,10]\nnfeatures = np.shape(xTrain)[1]\nnoutput = np.shape(yTrain)[1]\nArch = '_30_10'\n\nif TRAIN:\n\t# torch can only train on Variable, so convert them to Variable\n xTrain = Variable(torch.from_numpy(xTrain))\n yTrain = Variable(torch.from_numpy(yTrain))\n \n # Check cuda availability\n cuda = torch.cuda.is_available()\n \n # Create neural network model\n if cuda:\n torch.cuda.manual_seed(2020)\n model = Net(n_feature = nfeatures, n_hidden = hlayers, n_output = noutput).cuda()\n device = 'cuda'\n else:\n torch.manual_seed(2020)\n model = Net(n_feature = nfeatures, n_hidden = hlayers, n_output = noutput)\n device = 'cpu'\n\n optimizer = torch.optim.LBFGS(model.parameters())\n loss_func = torch.nn.MSELoss() # this is for regression mean squared loss\n \n model = model.double()\n xTrain = xTrain.to(device) \n yTrain = yTrain.to(device)\n \n def closure():\n optimizer.zero_grad()\n output = model(xTrain)\n loss = loss_func(output, yTrain)\n loss.backward()\n return loss\n \n for epoch in range(200): # loop over the dataset multiple times\n prediction = model(xTrain) # input x and predict based on x\n\n #loss = loss_func(prediction, yTrain) # must be (1. nn output, 2. target)\n\n #optimizer.zero_grad() # clear gradients for next train\n #loss.backward() # backpropagation, compute gradients\n loss = optimizer.step(closure) # apply gradients\n\n # print statistics\n running_loss = loss.item()\n print('[%d] loss: %.7f' %(epoch + 1,running_loss))\n\n print('Finished Training')\n\t\n torch.save(model.state_dict(),'model%s.pth' %(Arch))\n\nnet = Net(n_feature=np.shape(x)[1], n_hidden=hlayers, n_output=np.shape(ati)[1])\nnet = net.double()\nnet.load_state_dict(torch.load('model%s.pth' %(Arch)))\npred = net(Variable(torch.from_numpy(x)))\npred = pred.detach().numpy()\npred = pred*max_ati\n\nfig, axs = plt.subplots(3, 2)\naxs[0, 0].plot(ati[:,0])\naxs[0, 0].plot(pred[:,0])\n\naxs[1, 0].plot(ati[:,1])\naxs[1, 0].plot(pred[:,1])\n\naxs[2, 0].plot(ati[:,2])\naxs[2, 0].plot(pred[:,2])\n\naxs[0, 1].plot(ati[:,3])\naxs[0, 1].plot(pred[:,3])\n\naxs[1, 1].plot(ati[:,4])\naxs[1, 1].plot(pred[:,4])\n\naxs[2, 1].plot(ati[:,5])\naxs[2, 1].plot(pred[:,5])\nplt.show()\n","repo_name":"amirhadi3/dVRK-compliantTrocar","sub_path":"NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"21178697391","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\nb = 1.95\nr = 1\nmass = 1.9\ng = 4\nSF = 1.5\n\nA = [[(b**3)/12,b],[(b**2)/4,(1-r)]]\nB = [mass*g*SF,0]\na,c = np.linalg.solve(A,B)\n\n\nxs = np.linspace(-b/2,b/2,1000)\n\nplt.figure()\nplt.grid()\nplt.ylim([0,c*1.2])\nplt.plot(xs, a*xs**2 + c)\nplt.show()","repo_name":"Lanzebe/P02-TuksProjects","sub_path":"Y4-MLV420/SparDesign/TestWingDist.py","file_name":"TestWingDist.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"1528801527","text":"import os\nimport csv\nimport constants\nfrom getsaveddata import get_last_log_date\nfrom menufunctions import quit_function, clear_screen\nfrom currentstreaks import CurrentStreaksAlert\n\n\nclass Workout:\n def __init__(self, log, date, workout_dict, confirmation):\n self.log = log\n self.workout_dict = workout_dict\n self.date = date\n self.confirmation = confirmation\n\n def check_for_todays_workout(self):\n # Open log file and check if there is an existing workout for today\n last_log = get_last_log_date()\n if self.date == last_log:\n return True\n else:\n return False\n\n def get_workout_from_user(self):\n if not self.check_for_todays_workout():\n while True:\n user_input = input(\"Now editing today's workout.\\nEnter an exercise followed by a number \"\n \"(reps or duration) e.g. 'push-up 20'. Type 'f' when finished: \")\n if user_input.lower() == \"q\":\n quit_function() \n elif user_input.lower() == \"f\":\n break\n else:\n # Separate number from exercise name\n number = user_input.split()[-1]\n # Remove number and extra space from end of exercise name\n exercise_name = user_input[:-(len(number) + 1)]\n if exercise_name not in constants.EXERCISE_LIST:\n print(\"Error: Enter a valid exercise type and a number.\")\n elif not number.isnumeric():\n print(\"Error: Exercise reps or duration must be numeric.\")\n else:\n self.workout_dict[exercise_name] = number\n else:\n print(\"Error: You have already logged a workout for today.\")\n return\n\n def show_workout(self):\n clear_screen()\n for key, value in self.workout_dict.items():\n print(key, value)\n\n def edit_workout(self):\n self.show_workout()\n self.get_workout_from_user()\n\n def confirm_workout(self):\n if self.workout_dict == {}:\n return\n else:\n while True:\n self.show_workout()\n user_input = input(\"Log this workout? 'Y' for 'yes', 'e' for 'edit' or 'c' for cancel. \")\n if user_input.lower() == \"q\":\n quit_function()\n elif user_input.lower() == \"c\":\n return\n elif user_input.lower() == \"y\":\n self.confirmation = True\n return\n elif user_input.lower() == \"e\":\n self.edit_workout()\n else:\n continue\n\n def write_workout_to_csv(self):\n if self.confirmation == True:\n write_to_csv_dict = {\"date\": self.date}\n # Create comprehensive dictionary with every exercise in exercise list\n for exercise in constants.EXERCISE_LIST:\n write_to_csv_dict[exercise] = 0\n # Copy today's workout into comprehensive dictionary\n for key in self.workout_dict:\n write_to_csv_dict[key] = self.workout_dict[key]\n with open(self.log, \"a\") as log:\n writer = csv.writer(log)\n writer.writerow(write_to_csv_dict.values())\n\n def streak_alert(self):\n if self.confirmation == True:\n streak_alert = CurrentStreaksAlert(list(self.workout_dict.keys()))\n streak_alert.current_streaks_alert()\n\n def get(self):\n self.get_workout_from_user()\n self.confirm_workout()\n self.write_workout_to_csv()\n self.streak_alert()","repo_name":"plutoniumcat/DailyExercise","sub_path":"workout.py","file_name":"workout.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"18527549063","text":"'''\n092 - Crie um programa que leia nome, ano de nascimento e carteira de trabalho e\ncadastre-os (com idade) em um dicionario, se por acaso a CTPS for diferente de ZERO,\no dicionario recebera tambem o ano de contratação e o salario. Calcule e acrescente,\nalem da idade, com quantos anos a pessoa vai se aposentar.\n'''\nfrom datetime import datetime\ndados = dict()\ndados['Nome'] = str(input('Nome: ')).strip().title()\nnasc = int(input('Ano de Nascimento: '))\ndados['Idade'] = datetime.now().year - nasc\ndados['CTPS'] = int(input('Carteira de Trabalho (0 não tem): '))\nif dados['CTPS'] != 0:\n dados['Contratação'] = int(input('Ano de Contratação: '))\n dados['Salario'] = float(input('Salario: '))\n #anos_trabalhados = 35 - (datetime.now().year - dados['Contratação'])\n dados['Aposentadoria'] = dados['Idade'] + (35 - (datetime.now().year - dados['Contratação']))\nprint('-=' * 15)\nfor k, c in dados.items():\n print(f'{k} tem o valor {c}')\n\n\n\n\n\n","repo_name":"mariocarvalho-2205/python","sub_path":"092 nome ano nasc carteira de trabalho.py","file_name":"092 nome ano nasc carteira de trabalho.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"19468277029","text":"# def type_int(int_number):\n# return int_number * 2\n#\n#\n# def type_float(float_number):\n# result = float_number * 1.5\n# return f\"{result:.2f}\"\n#\n#\n# def type_string(string):\n# return \"$\" + string + \"$\"\n#\n#\n# data_type = input()\n# value = input()\n#\n# if data_type == \"int\":\n# value = int(value)\n# print(type_int(value))\n# elif data_type == \"real\":\n# value = float(value)\n# print(type_float(value))\n# elif data_type == \"string\":\n# print(type_string(value))\n\n\ndef calculations(data_type, value):\n if data_type == \"int\":\n value = int(value)\n return value * 2\n\n if data_type == \"real\":\n value = float(value)\n return f\"{value * 1.5:.2f}\"\n\n if data_type == \"string\":\n return \"$\" + value + \"$\"\n\n\ninput_data_type = input()\ninput_value = input()\n\nprint(calculations(input_data_type, input_value))\n","repo_name":"azashev/Programming-Fundamentals-with-Python-Softuni","sub_path":"Functions/data_types.py","file_name":"data_types.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"74901355749","text":"from gpiozero import LED\n\n\"\"\"\n =0=\n| |\n1 2\n| |\n =3=\n| |\n4 5\n| |\n =6=\n\"\"\"\n\nMAP_VAL = {\n 0: [0, 1, 2, 4, 5, 6],\n 1: [2, 5],\n 2: [0, 2, 3, 4, 6],\n 3: [0, 2, 3, 5, 6],\n 4: [1, 2, 3, 5],\n 5: [0, 1, 3, 5, 6],\n 6: [0, 1, 3, 4, 5, 6],\n 7: [0, 2, 5],\n 8: [0, 1, 2, 3, 4, 5, 6],\n 9: [0, 1, 2, 3, 5, 6]\n}\nMAP_ADDR = {\n 0: 16,\n 1: 20,\n 2: 21,\n 3: 6,\n 4: 13,\n 5: 19,\n 6: 26,\n}\n\n\nclass DigitOutputController:\n debug = False\n\n def __init__(self, masks=MAP_VAL, addresses=MAP_ADDR, debug=False):\n if debug:\n self.debug = True\n return\n\n self._leds = [LED(i) for i in addresses.values()]\n self._masks = masks\n\n def show(self, digit=0):\n if self.debug:\n print(digit)\n return\n\n vals = self._masks.get(digit, [])\n\n for i, led in enumerate(self._leds):\n led.value = i in vals\n","repo_name":"BANOnotIT/voice-ziferblat","sub_path":"core/digit_controller.py","file_name":"digit_controller.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33109716351","text":"import math\r\nimport pygame as pg\r\n\r\nWHITE = (255, 255, 255)\r\nBACKGND = (0, 0, 0)\r\nLAWNGREEN = (0, 223, 0)\r\nSARGA = (230, 230, 40)\r\nBARNA = (200, 135, 80)\r\nBORDO = (100, 0, 0)\r\n\r\nclass HajoObject:\r\n #hajo meretek, [m]\r\n length = 13.64\r\n width = 4.1\r\n #hajo pozicioja, [m, rad]\r\n position = pg.math.Vector2(0.0,0.0)\r\n rotation = 0.0\r\n #hajo sebessege [m/s, rad/s]\r\n speed = pg.math.Vector2(0,0)\r\n speed2 = pg.math.Vector2(0,0)\r\n speed3 = pg.math.Vector2(0,0)\r\n szogseb = 0.0\r\n #relativ pozicio a megjeleniteshez\r\n midscreen = pg.math.Vector2(0,0)\r\n #hajo pontjai, [m]. Ezt atalakitjuk vektorokka a konstrutorban, azt lehet hasznalni\r\n# hajoPoly = [(0, 6.82), (-1.35, 5.42), (-2.05, 0), (-2.05, -6.82), (2.05, -6.82), (2.05, 0), (1.35, 5.42)]\r\n hajoPoly = [(0.5, 0), (0.4, -0.1), (0, -0.15), (-0.5, -0.15), (-0.5, 0.15), (0, 0.15), (0.4, 0.1)]\r\n hajoOffset = 0.0 #ennyivel elorebb tolja a hajo korvonalat a centerhez kepest, csak a megjeleniteshez, hogy szep legyen a fordulas\r\n #elozo kirajzolt polygon a torleshez\r\n lastPoly = [(0,0), (0,0), (0,0)]\r\n speedVect = [pg.math.Vector2(0,0), pg.math.Vector2(0,0)] #ez a zold sebesseg\r\n speedVect2 = [pg.math.Vector2(0,0), pg.math.Vector2(0,0)] #ez meg a sarga / INS\r\n speedVect3 = [pg.math.Vector2(0,0), pg.math.Vector2(0,0)] #ez meg a barna / GPS\r\n thrustVects = [[]] * 5 # a hajo koordinatarendszereben az aktuatorok vektorai\r\n #lastThVec = [ [(0,0), (0,0)] ] * 5\r\n Thrust = [0] * 4\r\n thrustVects = [[]]*5 # a hajo koordinatarendszereben az aktuatorok vektorai\r\n lastThVec = [ \r\n [(0,0), (0,0)],\r\n [(0,0), (0,0)],\r\n [(0,0), (0,0)],\r\n [(0,0), (0,0)],\r\n [(0,0), (0,0)]\r\n ]\r\n U12V = 0.0\r\n Uact = [0.0]*4\r\n Iact = [0.0]*4\r\n count = 0 #ez csak egy szam, a modellnel a packet counter erteke, hogy lassuk a mukodest\r\n\r\n def __init__(self, scr, dict) -> None:\r\n self.screen = scr\r\n self.ship_scale = dict[\"length\"]\r\n self.scr_scale = dict[\"zoom\"]\r\n self.hajoOffset = dict['offset']\r\n self.midscreen = pg.math.Vector2(scr.get_width() / 2, scr.get_height() / 2)\r\n # a hajo formaja, felskalazva, offsetelve, de zoomolas elott\r\n self.hajoVect = list(map(lambda x: pg.math.Vector2(x)*self.ship_scale+(self.hajoOffset, 0), self.hajoPoly))\r\n self.font = pg.font.Font(None, 24)\r\n #meghajtas kijelzeshez vektorok\r\n self.thrustVects[0] = [(dict['orrL'], 0), (0, -1)] #orrsugar helye, iranya\r\n self.thrustVects[1] = [(-dict['farL'], 0), (0, -1)] #farsugar helye, iranya\r\n self.thrustVects[2] = [(self.hajoVect[3][0], dict['motL']/-2), (-1, 0)] #jobb motor \r\n self.thrustVects[3] = [(self.hajoVect[3][0], dict['motL']/2), (-1, 0)] #bal motor helye\r\n self.thrustVects[4] = [(self.hajoVect[0][0], 0), (0, 1)] #forgas vektor helye, iranya\r\n\r\n def doSpeedVec(self, V):\r\n result = [0,0]\r\n speedVect = V.rotate_rad(self.rotation)\r\n result[0] = self.position*self.scr_scale\r\n result[1] = (self.position+speedVect)*self.scr_scale\r\n # flippelni kell lefele, es betenni a kepernyo kozepere\r\n result = list(map(lambda x: (x.x + self.midscreen.x, self.midscreen.y - x.y), result))\r\n return result\r\n\r\n def draw(self, color=WHITE):\r\n #elozo torlese\r\n pg.draw.polygon(self.screen, BACKGND, self.lastPoly)\r\n pg.draw.line(self.screen, BACKGND, self.speedVect[0], self.speedVect[1], width = 3)\r\n pg.draw.line(self.screen, BACKGND, self.speedVect2[0], self.speedVect2[1], width = 3)\r\n pg.draw.line(self.screen, BACKGND, self.speedVect3[0], self.speedVect3[1], width = 3)\r\n for x in range(5):\r\n pg.draw.line(self.screen, BACKGND, self.lastThVec[x][0], self.lastThVec[x][1], width = 3)\r\n # uj vektorok szamitasa\r\n #vektor talppontja\r\n tol = (pg.math.Vector2(self.thrustVects[x][0]).rotate_rad(self.rotation) + self.position)*self.scr_scale\r\n if(x < 4):\r\n temp = self.Thrust[x]\r\n else:\r\n temp = self.szogseb * 5\r\n ig = tol + (pg.math.Vector2(self.thrustVects[x][1]).rotate_rad(self.rotation)) * self.scr_scale*temp*self.ship_scale/5\r\n #flippelni, kozepre tenni\r\n self.lastThVec[x][0] = (tol.x + self.midscreen.x, self.midscreen.y - tol.y)\r\n self.lastThVec[x][1] = (ig.x + self.midscreen.x, self.midscreen.y - ig.y)\r\n\r\n #uj pozicio szamitasa\r\n #forgatas, poziciora mozgatas, aztan skalazas(zoom):\r\n shipmap = map(lambda x: (x.rotate_rad(self.rotation) + self.position) * self.scr_scale, self.hajoVect)\r\n\r\n # flippelni kell lefele, es betenni a kepernyo kozepere\r\n self.lastPoly = list(map(lambda x: (x.x + self.midscreen.x, self.midscreen.y - x.y), shipmap))\r\n # uj sebesseg vektorok eloallitasa\r\n self.speedVect = self.doSpeedVec(self.speed)\r\n self.speedVect2 = self.doSpeedVec(self.speed2)\r\n self.speedVect3 = self.doSpeedVec(self.speed3)\r\n # uj rajzolas\r\n pg.draw.polygon(self.screen, color, self.lastPoly)\r\n pg.draw.line(self.screen, LAWNGREEN, self.speedVect[0], self.speedVect[1], width = 3)\r\n pg.draw.line(self.screen, SARGA, self.speedVect2[0], self.speedVect2[1], width = 3)\r\n pg.draw.line(self.screen, BARNA, self.speedVect3[0], self.speedVect3[1], width = 3)\r\n for x in self.lastThVec:\r\n pg.draw.line(self.screen, BORDO, x[0], x[1], width = 3)\r\n pg.draw.line(self.screen, LAWNGREEN, self.lastThVec[4][0], self.lastThVec[4][1], width = 3)\r\n # texts\r\n # speed, ez a valos sebesseg vagy az INS sebesseg\r\n text = self.font.render(\"Speed / GPS\", True, LAWNGREEN, BACKGND)\r\n yy = 10\r\n self.screen.blit(text, (10,yy))\r\n for i in range(2):\r\n text = self.font.render(f\"V{i}: {self.speed[i]:3.2f} [m/s], \", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150+i*130,yy))\r\n # speed2, ez a modell sebesseg\r\n text = self.font.render(\"INS sebesseg\", True, SARGA, BACKGND)\r\n yy = 35\r\n self.screen.blit(text, (10,yy))\r\n for i in range(2):\r\n text = self.font.render(f\"V{i}: {self.speed2[i]:3.2f} [m/s], \", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150+i*130,yy))\r\n # speed3, ez a GPS sebesseg\r\n text = self.font.render(\"Modell sebesseg\", True, BARNA, BACKGND)\r\n yy = 60\r\n self.screen.blit(text, (10,yy))\r\n for i in range(2):\r\n text = self.font.render(f\"V{i}: {self.speed3[i]:3.2f} [m/s], \", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150+i*130,yy))\r\n # Uact, aktuator feszultsegek\r\n text = self.font.render(f\"U12: {self.U12V:2.2f} [V]\", True, WHITE, BACKGND)\r\n self.screen.blit(text, (500,10))\r\n text = self.font.render(f\"Packet cnt: {self.count}\", True, WHITE, BACKGND)\r\n self.screen.blit(text, (480,35))\r\n yy = 85\r\n text = self.font.render(\"Orrsugar ---------- Farsugar ---------- Jobb mot. --------- Bal mot.\", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150,yy))\r\n text = self.font.render(\"Feszultsegek\", True, WHITE, BACKGND)\r\n yy = 110\r\n self.screen.blit(text, (10,yy))\r\n for i in range(4):\r\n text = self.font.render(f\"{self.Uact[i]:3.2f} [V], \", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150+i*130,yy))\r\n\r\n text = self.font.render(\"Áramok\", True, WHITE, BACKGND)\r\n yy = 135\r\n self.screen.blit(text, (10,yy))\r\n for i in range(4):\r\n text = self.font.render(f\"{self.Iact[i]:3.2f} [A], \", True, WHITE, BACKGND)\r\n self.screen.blit(text, (150+i*130,yy))\r\n\r\n # sebesseg megadasa, sajat koordinatarendszerbe. Ez csak a kiirashoz kell\r\n def setspeed(self, V):\r\n self.speed = pg.math.Vector2(V[0], V[1])\r\n self.szogseb = V[2]\r\n\r\n # a masik ket sebesseg vektor megadasa\r\n def setSpeeds(self, V2, V3):\r\n self.speed2 = pg.math.Vector2(V2[0], V2[1])\r\n self.speed3 = pg.math.Vector2(V3[0], V3[1])\r\n\r\n # pozicio, fix kordinatarenszerben\r\n def setPosition(self, X):\r\n self.position.x = X[0]\r\n self.position.y = X[1]\r\n self.rotation = X[2]\r\n\r\n # aktuator erok, sajat kordinatarenszerben\r\n def setThrust(self, T):\r\n self.Thrust = T\r\n\r\n # ez kozeppre teszi a hajot\r\n def resetPos(self):\r\n X = self.position*self.scr_scale\r\n self.midscreen = (self.screen.get_width() / 2, self.screen.get_height() / 2) - pg.math.Vector2(X.x, -X.y)\r\n\r\n","repo_name":"eendsze/ssiresdsk","sub_path":"hajomegjelenito.py","file_name":"hajomegjelenito.py","file_ext":"py","file_size_in_byte":8667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"6912349734","text":"class cond_tree_node:\n data = None\n node_type = None # ATTRIBUTE, INTEGER, REL_OP, LOGIC_OP\n left = None\n right = None\n\n def __init__(self, data, node_type, left=None, right=None):\n self.data = data\n self.node_type = node_type\n self.left = left\n self.right = right\n\n \n def __str__(self):\n result = \"\"\n if self.left:\n result += \"(\" + self.left.__str__()\n if self.data:\n result += \"\" + self.data + \"\"\n if self.right:\n result += self.right.__str__() + \")\"\n\n return result\n\n \n def __add__(self, other):\n return str(self) + other\n\n \n def __radd__(self, other):\n return other + str(self)\n\n \n def get_all_atts_in_cond(self):\n if (self.left is None and self.right is None): # is leaf\n if (self.node_type == \"ATTRIBUTE\"):\n return [self.data]\n else:\n return []\n\n return self.left.get_all_atts_in_cond() + self.right.get_all_atts_in_cond()\n\n\n def get_attribute_table(self):\n \"\"\"receives an attribute-node and returns R or S\"\"\" \n result = None \n if(self.node_type == \"ATTRIBUTE\"):\n result = (self.data[0]) \n \n return result\n\n \n def get_attribute_alone(self):\n \"\"\"receives an attribute-node and returns atribute alone\"\"\" \n result = None \n if(self.node_type == \"ATTRIBUTE\"):\n result = (self.data[2]) \n \n return result\n\n\n def are_different_tables(self):\n table1 = self.left.get_attribute_table()\n table2 = self.right.get_attribute_table()\n return (table1 != table2)\n\n \n def are_same_attributes(self):\n att1 = self.left.get_attribute_alone()\n att2 = self.right.get_attribute_alone()\n return (att1==att2) ","repo_name":"OMRYZUTA/databases_2","sub_path":"conditionTree.py","file_name":"conditionTree.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"71"} +{"seq_id":"16350709509","text":"\"\"\"\n#내가 짜본 코드\n\nfrom random import randint\nimport time\nfrom datetime import datetime\n\n\n\ndef unique(x): #중복입력방지를 위한 함수\n for i in x:\n if(x.count(i)>=2):\n return False\n return True\n\nstart_time = time.time()\nbaseball = True\n\n\n\nwhile baseball:\n count = 0\n n = 0\n \n try: n=int(input(\"몇자리수인지 입력하시오.(1~10)\"))\n \n except ValueError:\n print(\"숫자를 입력하시오.\")\n break\n \n \n if n > 10 :\n print(\"1~10까지 정수만\")\n \n elif n <=10 :\n num=['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n guess = []\n for i in range(n):\n temp = randint(0, len(num)-1)\n guess.append(num[temp])\n print(guess)\n print(\"띄어쓰기 하지말고 0~9사이의 숫자들을 적으시오.\")\n \n baseball2 = True\n while baseball2:\n count+=1\n usr_guess = str(input(\"숫자를 맞혀보세요.\"))\n try: temp=int(usr_guess)\n \n except ValueError:\n if usr_guess == 'exit':\n baseball2 = False\n baseball = False\n \n if len(usr_guess) != n:\n print(n, \"자리 숫자입니다.\")\n \n elif not unique(usr_guess):\n print(\"\\n 중복은 안됩니다.\")\n \n elif True :\n strike = 0\n ball = 0\n out = 0\n for i in range(n):\n if usr_guess[i] == guess[i]:\n strike += 1\n elif usr_guess[i] in guess:\n ball += 1\n elif not usr_guess[i] in guess:\n out += 1\n if strike == n:\n end_time = time.time()\n print(f\"축하합니다. {count} 번 만에 정답입니다! 소요시간:{end_time-start_time:.2f}초 {datetime.now()}\")\n baseball2 = False\n baseball = False\n else:\n print(f\"\\n {strike} 스트라이크, {ball} 볼, {out} 아웃\")\n\"\"\"\n\n\n\n\"\"\"\n#다른 분이 짠 코드\n\nimport random\nimport time\nfrom datetime import datetime, timedelta\nprint(\"원하시는 자리 수를 입력하세요\")\nn = int(input())\nstart_time = time.time()\ndef baseball_number_generator(n):\n result = set()\n if n < 1:\n print(\"1이상의 값을 입력해주세요\")\n return False\n while len(result)计算梯度->反向传播\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n '''训练Critic'''\n # 利用r与s_计算出Q'(s_,a_),进而计算出Q(s,a)的target值\n a_target = self.actor_target(batch_s_)\n q_tmp = self.critic_target(batch_s_, a_target)\n q_target = batch_r + GAMMA * q_tmp\n # 计算Q(s,a)和loss\n q_eval = self.critic(batch_s, batch_a)\n td_error = self.loss_func(q_target, q_eval)\n self.critic_loss_list.append(td_error)\n # 更新critic的参数:梯度归零->计算梯度->反向传播\n self.critic_optimizer.zero_grad()\n td_error.backward()\n self.critic_optimizer.step()\n\n def save_model(self):\n torch.save(self.actor.state_dict(), 'actor_weights.pth')\n torch.save(self.critic.state_dict(), 'critic_weights.pth')\n\n\n# 配置gym\nenv = gym.make(ENV_NAME)\nenv = env.unwrapped\nenv.reset(seed=1)\ns_dim = env.observation_space.shape[0]\na_dim = env.action_space.shape[0] # 动作值为[a1,a2],a1控制油门,a2控制左右点火引擎,取值范围都为[-1,1]的实数\na_bound = env.action_space.high\na_low_bound = env.action_space.low\n\nddpg = DDPG(a_dim, s_dim, a_bound)\nvar = 3 # 加入噪声用到的正态分布中的标准差\nt1 = time.time()\nreward_list = []\nfor i in range(EPISODES):\n s = env.reset()\n ep_r = 0 # 每一个episode的累积奖励值\n for j in range(EP_STEPS):\n if RENDER: env.render()\n # 加入噪声\n a = ddpg.choose_action(s)\n a = np.clip(np.random.normal(a, var), a_low_bound, a_bound)\n s_, r, done, _, info = env.step(a)\n ddpg.store_experiences(s, a, r , s_) # 存储与环境互动经验\n if ddpg.pointer > MEMORY_CAPACITY:\n var *= 0.9999 # decay the exploration controller factor\n ddpg.learn()\n\n s = s_\n ep_r += r\n if j == EP_STEPS - 1:\n reward_list.append(ep_r)\n print('Episode: ', i, ' Reward: %i' % ep_r, 'Explore: %.2f' % var)\n\n if i > 0 and i % 50 == 0:\n ddpg.save_model()\n x = range(0, i + 1)\n plt.plot(x, reward_list, '.-')\n plt.xlabel(\"episode\")\n plt.ylabel(\"reward\")\n plt.show()\nprint('Running time: ', time.time() - t1)\n","repo_name":"Lwon2001/ReinforcementLearning","sub_path":"DDPG/ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"10037338870","text":"import random\nimport pygame\nfrom pygame.locals import *\nimport sys\nfrom main_menu import Main_menu\nimport time\nfrom update_user import update_user\n\npygame.init()\n\nFramePerSec = pygame.time.Clock()\nclock = pygame.time.Clock()\n\nWIDTH = 599\nHEIGHT = 599\nRHEIGHT = 629\nBLOCK = int(30) # the size of every square\nLEVEL = 0\nSPEED = 10\nSCREEN = pygame.display.set_mode((WIDTH, RHEIGHT))\n\nsnake_color = (32, 32, 32)\nbackground_color = (33, 181, 250)\nf1_color = (82, 237, 65)\nf2_color = (82, 237, 65)\nf3_color = (82, 237, 65)\ntext_color = (255, 234, 0)\nhead_color = (255, 0, 0)\nborder_color = (128, 128, 128)\n\nscore_font = pygame.font.SysFont(\"Arial\", 15)\n\n\nclass Point:\n\tdef __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y\n\n\nclass Snake:\n\tdef __init__(self, snake_pos):\n\t\tself.body = snake_pos\n\n\tdef draw(self):\n\t\t# draw body part\n\t\tfor body in self.body[1:]:\n\t\t\tpygame.draw.rect(\n\t\t\t\tSCREEN,\n\t\t\t\tsnake_color,\n\t\t\t\tpygame.Rect(\n\t\t\t\t\tbody.x * BLOCK,\n\t\t\t\t\tbody.y * BLOCK,\n\t\t\t\t\tBLOCK,\n\t\t\t\t\tBLOCK,\n\t\t\t\t)\n\t\t\t)\n\t\thead = self.body[0] # draw head\n\t\tpygame.draw.rect(\n\t\t\tSCREEN,\n\t\t\thead_color,\n\t\t\tpygame.Rect(\n\t\t\t\thead.x * BLOCK,\n\t\t\t\thead.y * BLOCK,\n\t\t\t\tBLOCK,\n\t\t\t\tBLOCK,\n\t\t\t)\n\t\t)\n\n\tdef move(self, dx, dy):\n\t\t# Move body\n\t\tfor idx in range(len(self.body) - 1, 0, -1):\n\t\t\tself.body[idx].x = self.body[idx - 1].x # position body[i] = body[i-1]\n\t\t\tself.body[idx].y = self.body[idx - 1].y\n\t\t# Move head\n\t\tself.body[0].x += dx\n\t\tself.body[0].y += dy\n\n\t\t# Check whether snake leaves the playing area\n\t\tif self.body[0].x > WIDTH // BLOCK:\n\t\t\tself.body[0].x = 0\n\t\telif self.body[0].x < 0:\n\t\t\tself.body[0].x = WIDTH // BLOCK\n\t\telif self.body[0].y < 0:\n\t\t\tself.body[0].y = WIDTH // BLOCK\n\t\telif self.body[0].y > HEIGHT // BLOCK:\n\t\t\tself.body[0].y = 0\n\n\tdef check_collision_food(self, food):\n\t\tif food.pos.x == self.body[0].x and food.pos.y == self.body[0].y:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef check_collision_border(self, borders):\n\t\tfor pos in borders:\n\t\t\tif pos.x == self.body[0].x and pos.y == self.body[0].y:\n\t\t\t\treturn True\n\n\t\treturn False\n\n\tdef check_collision_snake(self):\n\t\tfor pos in self.body[1:]:\n\t\t\tif self.body[0].x == pos.x and self.body[0].y == pos.y:\n\t\t\t\treturn True\n\t\treturn False\n\n\n\nclass Food:\n\tdef __init__(self):\n\t\tself.pos = None\n\t\tself.weight = None\n\t\tself.spawn_time = None\n\t\tself.color = None\n\n\tdef draw(self):\n\t\tif self.weight == 1:\n\t\t\tself.color = f1_color\n\t\telif self.weight == 2:\n\t\t\tself.color = f2_color\n\t\telif self.weight == 3:\n\t\t\tself.color = f3_color\n\n\t\tpygame.draw.rect(\n\t\t\tSCREEN,\n\t\t\tself.color,\n\t\t\tpygame.Rect(\n\t\t\t\tself.pos.x * BLOCK,\n\t\t\t\tself.pos.y * BLOCK,\n\t\t\t\tBLOCK,\n\t\t\t\tBLOCK,\n\t\t\t)\n\t\t)\n\n\tdef new_pos(self, snake_list, border_list):\n\t\twhile True:\n\t\t\tx, y = random.randint(0, WIDTH // BLOCK), random.randint(0, HEIGHT // BLOCK)\n\t\t\tpos = Point(x, y)\n\t\t\tif (pos not in border_list) and (pos not in snake_list):\n\t\t\t\treturn pos\n\n\tdef create_new(self, snake_list, border_list, cur_time):\n\t\tself.weight = random.randint(1, 3)\n\t\tself.pos = self.new_pos(snake_list, border_list)\n\t\tself.spawn_time = cur_time\n\nclass Border:\n\tdef __init__(self, lvl):\n\t\tself.lvl = lvl\n\t\tself.border_list = []\n\n\tdef load_border(self):\n\t\tpath = \"/Users/nurstanduisengaliyev/Documents/Python/pp2-22B031491/tsis10/SnakeGame/levels/\" + str(self.lvl) + \".txt\"\n\t\twith open(path, 'r') as f:\n\t\t\tborder_rows = f.readlines()\n\n\t\tfor i, line in enumerate(border_rows):\n\t\t\tfor j, value in enumerate(line):\n\t\t\t\tif value == '#':\n\t\t\t\t\tself.border_list.append(Point(j, i))\n\n\tdef draw(self):\n\t\tfor i in self.border_list:\n\t\t\tpygame.draw.rect(SCREEN, border_color, (i.x * BLOCK, i.y * BLOCK, BLOCK, BLOCK))\n\ndef get_string_body(body):\n\tstr1 = str(body[0].x) + \", \" + str(body[1].y)\n\tfor pos in body[1:]:\n\t\t# \"x, y; x2, y2; x3, y3\n\t\tstr1 += \"; \" + str(pos.x) + ', ' + str(pos.y)\n\treturn str1\n\ndef draw_text(score, lvl):\n\ttxt_sur1 = score_font.render(f\"Score = {score}\", True, (0, 0, 0))\n\ttxt_sur2 = score_font.render(f\"Level = {lvl}\", True, (0, 0, 0))\n\tSCREEN.blit(txt_sur1, (15, (HEIGHT // BLOCK + 1) * BLOCK + 5))\n\tSCREEN.blit(txt_sur2, (WIDTH - 100, (HEIGHT // BLOCK + 1) * BLOCK + 5))\n\ndef runGame(username, lvl, score, snake_pos, direction):\n\tpygame.display.set_caption(\"SnakeGame\")\n\t# now, with currect state, we will state our game\n\t# 0 - left, 1 - right, 2 - up, 3 - down\n\t# created borders\n\tdx, dy = 0, 0\n\tif direction == 0:\n\t\tdx, dy = -1, 0\n\tif direction == 1:\n\t\tdx, dy = 1, 0\n\tif direction == 2:\n\t\tdx, dy = 0, -1\n\tif direction == 3:\n\t\tdx, dy = 0, 1\n\t# 0 - left, 1 - right, 2 - up, 3 - down\n\t# created dx, dy\n\tsnake = Snake(snake_pos)\n\tborder = Border(lvl)\n\tborder.load_border()\n\tfood = Food()\n\tfood.create_new(snake.body, border.border_list, time.time())\n\tis_started = False\n\tpause = False\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == QUIT:\n\t\t\t\t# closing the window, so we should insert into table current state\n\t\t\t\tcur_dir = None\n\t\t\t\tif dx == -1:\n\t\t\t\t\tcur_dir = 0\n\t\t\t\tif dx == 1:\n\t\t\t\t\tcur_dir = 1\n\t\t\t\tif dy == -1:\n\t\t\t\t\tcur_dir = 2\n\t\t\t\tif dy == 1:\n\t\t\t\t\tcur_dir = 3\n\t\t\t\tupdate_user(username, lvl, score, get_string_body(snake.body), cur_dir)\n\t\t\t\tpygame.quit()\n\t\t\t\tsys.exit()\n\t\t\tif event.type == KEYDOWN:\n\t\t\t\tif event.key == K_SPACE:\n\t\t\t\t\tpause = not pause\n\t\t\t\tif event.key == K_UP and dy != 1:\n\t\t\t\t\tdx, dy = 0, -1\n\t\t\t\t\tis_started = True\n\t\t\t\telif event.key == K_DOWN and dy != -1:\n\t\t\t\t\tdx, dy = 0, 1\n\t\t\t\t\tis_started = True\n\t\t\t\telif event.key == K_LEFT and dx != 1:\n\t\t\t\t\tdx, dy = -1, 0\n\t\t\t\t\tis_started = True\n\t\t\t\telif event.key == K_RIGHT and dx != -1:\n\t\t\t\t\tdx, dy = 1, 0\n\t\t\t\t\tis_started = True\n\n\t\tif pause == True:\n\t\t\tcontinue\n\t\tSPEED = min(9 * pow(1.30, score/4), 25)\n\t\ttail_pos = None\n\t\tif is_started:\n\t\t\ttail_pos = Point(snake.body[-1].x, snake.body[-1].y)\n\t\t\tsnake.move(dx, dy)\n\n\t\tif is_started and (snake.check_collision_snake() or snake.check_collision_border(border.border_list)):\n\t\t\t# Lost the game should insert into database (0, 0, )\n\t\t\tcur_dir = None\n\t\t\tif dx == -1:\n\t\t\t\tcur_dir = 0\n\t\t\tif dx == 1:\n\t\t\t\tcur_dir = 1\n\t\t\tif dy == -1:\n\t\t\t\tcur_dir = 2\n\t\t\tif dy == 1:\n\t\t\t\tcur_dir = 3\n\t\t\tbody_pos = f\"{WIDTH // 30 // 2}, {HEIGHT // 30 // 2}; {WIDTH // 30 // 2 + 1}, {HEIGHT // 30 // 2}\"\n\t\t\tupdate_user(username, 0, 0, body_pos, 0)\n\t\t\tpygame.quit()\n\t\t\tsys.exit()\n\n\n\t\tif is_started and snake.check_collision_food(food):\n\t\t\tscore += food.weight\n\t\t\tsnake.body.append(tail_pos)\n\t\t\tfood.create_new(snake.body, border.border_list, time.time())\n\n\t\tif time.time() - food.spawn_time >= 4:\n\t\t\tfood.create_new(snake.body, border.border_list, time.time())\n\n\n\t\tSCREEN.fill(background_color)\n\t\tpygame.draw.rect(SCREEN, (250, 250, 250), pygame.Rect(0, (HEIGHT // BLOCK + 1) * BLOCK, WIDTH, BLOCK))\n\t\tdraw_text(score, lvl)\n\t\tfood.draw()\n\t\tborder.draw()\n\t\tsnake.draw()\n\t\tpygame.display.update()\n\n\t\tif score >= 9 and lvl < 2: # next level\n\t\t\tlvl += 1\n\t\t\tscore = 0\n\t\t\tborder = Border(lvl)\n\t\t\tborder.load_border()\n\t\t\tpause = False\n\t\t\tis_started = False\n\t\t\tdx, dy = -1, 0\n\t\t\t# username, lvl, score, snake_pos, direction\n\t\t\tbody_pos = get_snake_pos(f\"{WIDTH // 30 // 2}, {HEIGHT // 30 // 2}; {WIDTH // 30 // 2 + 1}, {HEIGHT // 30 // 2}\")\n\t\t\tsnake = Snake(body_pos)\n\t\t\tfood.create_new(snake.body, border.border_list, time.time())\n\t\t\tcur_dir = None\n\t\t\tif dx == -1:\n\t\t\t\tcur_dir = 0\n\t\t\tif dx == 1:\n\t\t\t\tcur_dir = 1\n\t\t\tif dy == -1:\n\t\t\t\tcur_dir = 2\n\t\t\tif dy == 1:\n\t\t\t\tcur_dir = 3\n\t\t\tupdate_user(username, lvl, score, get_string_body(snake.body), cur_dir)\n\n\n\t\tclock.tick(SPEED)\n\n\n# lvl, score, body_pos, direction\n\ndef get_snake_pos(a):\n\tl1 = a.split(';')\n\tpositions = []\n\tfor pos in l1:\n\t\tx, y = pos.split(',')\n\t\tpositions.append(Point(int(x), int(y)))\n\n\treturn positions\n\ndef main():\n\tmainMenu = Main_menu(WIDTH, RHEIGHT)\n\tcur_state = mainMenu.runGame(SCREEN)\n\tsnake_pos = get_snake_pos(cur_state[3])\n\tusername = cur_state[0]\n\tlvl = cur_state[1]\n\tscore = cur_state[2]\n\tdirection = cur_state[4]\n\trunGame(username, lvl, score, snake_pos, direction)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"NurstanDuisengaliyev/pp2-22B031491","sub_path":"tsis10/SnakeGame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"3859008647","text":"T = int(input())\n\nfor _ in range(T):\n N = bin(int(input()))\n N = N[2:]\n\n answer = []\n\n for idx in range(len(N)):\n letter = int(N[len(N)-1-idx])\n\n if letter != 0:\n answer.append(str(idx))\n\n print(\" \".join(answer))\n","repo_name":"yeong-hwan/algorithm-study","sub_path":"yeonghwan/algorithm-boj/sep/3460.py","file_name":"3460.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"71"} +{"seq_id":"30703677946","text":"import torch\nimport numpy as np\n\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport matplotlib.pyplot as plt\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n#%matplotlib inline\n\nimport cv2\nfrom timeit import default_timer as timer\n\n\n\n# Residual block in bottleneck style\n# input: input depth\n# output with same shape as input\nclass ResidualBottleNeckBlock(nn.Module):\n def __init__(self, input_channels, bottleneck_ratio):\n super(ResidualBottleNeckBlock, self).__init__()\n self.reduction = nn.Conv2d(input_channels,input_channels//bottleneck_ratio,1)\n self.batch_red = nn.BatchNorm2d(input_channels//bottleneck_ratio)\n self.conv = nn.Conv2d(input_channels//bottleneck_ratio,input_channels//bottleneck_ratio,3,padding=1)\n self.batch_conv = nn.BatchNorm2d(input_channels//bottleneck_ratio)\n self.expansion = nn.Conv2d(input_channels//bottleneck_ratio,input_channels,1)\n self.batch_exp = nn.BatchNorm2d(input_channels)\n\n\n def forward(self, x):\n\n out = F.relu(self.batch_red(self.reduction(x)))\n out = F.relu(self.batch_conv(self.conv(out)))\n out = self.batch_exp(self.expansion(out))\n\n return F.relu(out+x)\n\n\n# Residual block in bottleneck style\n# input: input depth\n# output with same shape as input\nclass PreActivationResidualBlock(nn.Module):\n def __init__(self, input_channels):\n super(PreActivationResidualBlock, self).__init__()\n self.bn_1 = nn.BatchNorm2d(input_channels)\n self.conv_1 = nn.Conv2d(input_channels,input_channels,3,padding=1)\n self.bn_2 = nn.BatchNorm2d(input_channels)\n self.conv_2 = nn.Conv2d(input_channels,input_channels,3,padding=1)\n\n\n def forward(self, x):\n\n out = self.conv_1(F.relu(self.bn_1(x)))\n out = self.conv_2(F.relu(self.bn_2(out)))\n\n return out+x\n\n# Residual block in bottleneck style\n# input: input depth\n# output with double depth and half scale\nclass PreActivationReductionBlock(nn.Module):\n def __init__(self, input_channels):\n super(PreActivationReductionBlock, self).__init__()\n self.bn_1 = nn.BatchNorm2d(input_channels)\n self.conv_1 = nn.Conv2d(input_channels,2*input_channels,3,padding=1,stride=2)\n self.bn_2 = nn.BatchNorm2d(input_channels*2)\n self.conv_2 = nn.Conv2d(input_channels*2,input_channels*2,3,padding=1)\n\n\n def forward(self, x):\n\n out = self.conv_1(F.relu(self.bn_1(x)))\n out = self.conv_2(F.relu(self.bn_2(out)))\n\n return out\n\n\n# Squeeze and Excitation block\nclass SEblock(nn.Module):\n def __init__(self, input_channels, height, width, ratio):\n super(SEblock, self).__init__()\n self.input_channels = input_channels\n\n self.squeeze = nn.AvgPool2d((height,width),(height,width))\n self.fc = nn.Linear(input_channels,input_channels//ratio)\n self.excite = nn.Linear(input_channels//ratio,input_channels)\n\n def forward(self, x):\n\n se = self.squeeze(x)\n se = se.view(-1,self.input_channels)\n se = F.relu(self.fc(se))\n se = torch.sigmoid(self.excite(se))\n se = se.view(-1,self.input_channels,1,1)\n\n x = x * se.expand_as(x)\n\n return x\n\n\n#Squeee and Excitation Residual block with bottleneck style\nclass SE_ResidualBottleNeckBlock(nn.Module):\n def __init__(self, input_channels, height, width, bottleneck_ratio, se_ratio):\n super(SE_ResidualBottleNeckBlock, self).__init__()\n self.reduction = nn.Conv2d(input_channels,input_channels//bottleneck_ratio,1)\n self.batch_red = nn.BatchNorm2d(input_channels//bottleneck_ratio)\n self.conv = nn.Conv2d(input_channels//bottleneck_ratio,input_channels//bottleneck_ratio,3,padding=1)\n self.batch_conv = nn.BatchNorm2d(input_channels//bottleneck_ratio)\n self.expansion = nn.Conv2d(input_channels//bottleneck_ratio,input_channels,1)\n self.batch_exp = nn.BatchNorm2d(input_channels)\n\n self.se = SEblock(input_channels, height, width, se_ratio)\n\n\n def forward(self, x):\n\n out = F.relu(self.batch_red(self.reduction(x)))\n out = F.relu(self.batch_conv(self.conv(out)))\n out = self.batch_exp(self.expansion(out))\n\n # SE must be applied prior to the identity addition\n out = self.se(out)\n\n return F.relu(out+x)\n\nclass DenseBlock(nn.Module):\n def __init__(self, input_channels, inner_output, layers):\n super(DenseBlock, self).__init__()\n\n self.layers = layers\n self.reduction = nn.ModuleList([nn.Conv2d(input_channels+i*inner_output, inner_output,1) for i in range(layers)])\n self.conv = nn.ModuleList([nn.Conv2d(inner_output, inner_output,3,padding=1) for i in range(layers)])\n self.norm1 = nn.ModuleList([nn.BatchNorm2d(input_channels+i*inner_output) for i in range(layers)])\n self.norm2 = nn.ModuleList([nn.BatchNorm2d(inner_output) for i in range(layers)])\n\n\n def forward(self, x):\n\n for i in range(self.layers):\n out = self.norm1[i](x)\n out = F.relu(out)\n out = self.reduction[i](out)\n out = self.norm2[i](out)\n out = F.relu(out)\n out = self.conv[i](out)\n x = torch.cat((x,out),1)\n\n return x","repo_name":"RicardoBauchspiess/ComputerVision","sub_path":"Classification/CustomArchitecture/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":5336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"34730847450","text":"from telebot import types\nfrom bot.bot_connection import bot\nfrom database.database_commands import search\n\n\ndef query_text(inline_query):\n try:\n result = list(\n map(lambda recipe: types.InlineQueryResultArticle(id=recipe[0],\n title=recipe[2], input_message_content=types.InputTextMessageContent(recipe[2])\n ),\n search('recipe', inline_query.query)\n ))\n\n bot.answer_inline_query(inline_query.id, result)\n except Exception as e:\n print(e)\n","repo_name":"leevayy/edabudet","sub_path":"inline.py","file_name":"inline.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"5369497356","text":"from .serializers import UserProfileSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.parsers import MultiPartParser,FormParser\nfrom django.contrib.auth import get_user_model\nfrom .models import UserProfile\nUser = get_user_model()\n\nclass UserProfileView(APIView):\n\n # parser_classes = MultiPartParser, FormParser\n\n def get(self,request,*args,**kwargs):\n try:\n userprofile = UserProfile.objects.get(user=request.user)\n serializer = UserProfileSerializer(instance=userprofile)\n return Response(serializer.data,status=status.HTTP_200_OK)\n except UserProfile.DoesNotExist:\n return Response({'msg':'ok'},status=status.HTTP_304_NOT_MODIFIED)\n\n def post(self,request,format=None):\n data = request.data\n data['user'] = request.user\n try:\n UserProfile.objects.get(user=request.user)\n except Exception:\n UserProfile.objects.create(**data)\n return Response({'msg':'ok'})\n\n def put(self,request):\n user_profile = request.user.userprofile\n user_profile.country = request.data['country']\n user_profile.address = request.data['address']\n user_profile.save()\n return Response({'msg':\"updated\"})\n\n\n\n\n \n\n\n \n\n \n\n\n","repo_name":"rojit1/sg_app","sub_path":"userprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"33388618151","text":"from flask import request\nfrom flask_restful import Resource\nfrom ..schema import *\nfrom ..business import MagnitudeBusiness\nfrom .query_params_helper import QueryParamsHelper\nimport json\n\nmagnitude_business = MagnitudeBusiness()\n\nclass MagnitudeListEndpoint(Resource):\n @staticmethod\n def get():\n #Get params from url\n page, per_page, order_by, order_by_descending = QueryParamsHelper.get_paged_params(request)\n #Get stations from business\n magnitudes = magnitude_business.get_magnitudes(page, per_page, order_by, order_by_descending)\n #Instance schema\n pfocollection_schema = get_pfo(MagnitudeDtoSchema)\n #Return json data\n return pfocollection_schema.dump(magnitudes, many=False)\n\nclass MagnitudeCreationEndpoint(Resource):\n @staticmethod\n def post():\n #Instance schema\n magnitude_creation_dto_schema = MagnitudeCreationDtoSchema()\n #Parse json to dto\n magnitude_creation_dto = magnitude_creation_dto_schema.loads(request.data)\n #Create magnitude\n return magnitude_business.create_magnitude(magnitude_creation_dto)\n\nclass MagnitudeUpdateEndpoint(Resource):\n @staticmethod\n def put():\n #Get params from url\n magnitude_id = request.args.get('id')\n #Instance schema\n magnitude_update_dto_schema = MagnitudeUpdateDtoSchema()\n #Parse json to dto\n magnitude_update_dto = magnitude_update_dto_schema.loads(request.data)\n #Create station\n return magnitude_business.update_magnitude(magnitude_id, magnitude_update_dto)\n\nclass MagnitudeBatchCreationEndpoint(Resource):\n @staticmethod\n def post():\n #Instance schema\n magnitude_creation_dto_schema = MagnitudeCreationDtoSchema()\n #Parse json to dto\n magnitude_creation_dto_list = magnitude_creation_dto_schema.loads(request.data, many=True)\n #Create measurement\n items_not_created = magnitude_business.create_magnitudes_in_batch(magnitude_creation_dto_list)\n #Instance result schema\n magnitude_batch_creation_result_dto_schema = BatchCreationResultDtoSchema()\n #Return json data\n return magnitude_batch_creation_result_dto_schema.dump(items_not_created, many=False)\n\nclass MagnitudeExistenceEndpoint(Resource):\n @staticmethod\n def get():\n #Get params from url\n magnitude_ids = request.args.getlist('ids')\n #Get not found magnitude ids\n magnitude_existence_dto = magnitude_business.magnitude_existence(magnitude_ids)\n #Instance schema\n magnitude_existence_dto_schema = ExistenceDtoSchema()\n #Return json data\n return magnitude_existence_dto_schema.dump(magnitude_existence_dto, many=False)\n\n","repo_name":"daviferna/borealis","sub_path":"Borealis.Api/app/api/magnitude_endpoint.py","file_name":"magnitude_endpoint.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"71"} +{"seq_id":"9342546652","text":"# 721. Accounts Merge\n# DescriptionHintsSubmissionsDiscussSolution\n# Given a list accounts, each element accounts[i] is a list of strings, where the first element accounts[i][0] is a name, and the rest of the elements are emails representing emails of the account.\n\n# Now, we would like to merge these accounts. Two accounts definitely belong to the same person if there is some email that is common to both accounts. Note that even if two accounts have the same name, they may belong to different people as people could have the same name. A person can have any number of accounts initially, but all of their accounts definitely have the same name.\n\n# After merging the accounts, return the accounts in the following format: the first element of each account is the name, and the rest of the elements are emails in sorted order. The accounts themselves can be returned in any order.\n\n# Example 1:\n# Input: \n# accounts = [[\"John\", \"johnsmith@mail.com\", \"john00@mail.com\"], [\"John\", \"johnnybravo@mail.com\"], [\"John\", \"johnsmith@mail.com\", \"john_newyork@mail.com\"], [\"Mary\", \"mary@mail.com\"]]\n# Output: [[\"John\", 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com'], [\"John\", \"johnnybravo@mail.com\"], [\"Mary\", \"mary@mail.com\"]]\n# Explanation: \n# The first and third John's are the same person as they have the common email \"johnsmith@mail.com\".\n# The second John and Mary are different people as none of their email addresses are used by other accounts.\n# We could return these lists in any order, for example the answer [['Mary', 'mary@mail.com'], ['John', 'johnnybravo@mail.com'], \n# ['John', 'john00@mail.com', 'john_newyork@mail.com', 'johnsmith@mail.com']] would still be accepted.\n# Note:\n\n# The length of accounts will be in the range [1, 1000].\n# The length of accounts[i] will be in the range [1, 10].\n# The length of accounts[i][j] will be in the range [1, 30].\n\nclass Solution(object):\n def accountsMerge(self, accounts):\n \"\"\"\n :type accounts: List[List[str]]\n :rtype: List[List[str]]\n \"\"\"\n # build graph\n email2acc = dict()\n for i, acc in enumerate(accounts):\n for email in acc[1:]:\n if email in email2acc:\n email2acc[email].append(i)\n else:\n email2acc[email] = [i]\n graph = [[] for _ in accounts]\n for i, acc in enumerate(accounts):\n for email in acc[1:]:\n graph[i] += email2acc[email]\n # traverse graph\n visited = set()\n res = []\n for i in range(len(graph)):\n if i in visited:\n continue\n acc = [accounts[i][0], []]\n # breath first search\n level = [i]\n while level:\n node = level.pop(0)\n if node in visited:\n continue\n visited.add(node)\n acc[1] += accounts[node][1:]\n for child in graph[node]:\n level.append(child)\n acc[1] = sorted(list(set(acc[1])))\n res.append([acc[0]] + acc[1])\n return res\n\n\n","repo_name":"chunjiw/leetcode","sub_path":"L721_accountsMerge.py","file_name":"L721_accountsMerge.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"2783218450","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, jsonify, request\nfrom werkzeug.utils import secure_filename\n\nimport json\nimport os\nimport config\nfrom mongo_data import commodity\nfrom mongoengine import *\n\n\nupload = Blueprint('upload', __name__)\n\n\n@upload.route(\"/upload/single\", methods=[\"POST\"])\ndef upload_single():\n data = request.data.decode(\"UTF-8\")\n ret = dict()\n ret[\"err_no\"] = 0\n ret[\"err_msg\"] = \"success\"\n try:\n data = json.loads(data)\n index = data[\"commodity_source\"] + data[\"source_id\"]\n comm = commodity.Commodity()\n comm.name = data[\"name\"]\n comm.description = data[\"description\"]\n comm.index = index\n comm.price_list = data[\"price_list\"]\n comm.expect_price = data[\"expect_price\"]\n comm.source_id = data[\"source_id\"]\n comm.commodity_source = data[\"commodity_source\"]\n comm.commodity_url = data[\"commodity_url\"]\n comm.save()\n except KeyError as e:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"not have key\" + str(e)\n except ValueError:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"json data resolve failed\"\n except NotUniqueError:\n ret[\"err_no\"] = 2\n ret[\"err_msg\"] = \"you must not upload the same information twice\"\n return jsonify(ret)\n\n\n@upload.route(\"/upload/multi\", methods=[\"POST\"])\ndef upload_multi():\n count = 0\n datas = request.data.decode(\"utf-8\")\n ret = dict()\n ret[\"err_no\"] = 0\n ret[\"err_msg\"] = \"0\"\n comms = []\n try:\n datas = json.loads(datas)\n for data in datas:\n index = data[\"commodity_source\"] + data[\"source_id\"]\n comm = commodity.Commodity()\n comm.name = data[\"name\"]\n comm.description = data[\"description\"]\n comm.price_list = data[\"price_list\"]\n comm.index = index\n comm.expect_price = data[\"expect_price\"]\n comm.source_id = data[\"source_id\"]\n comm.commodity_source = data[\"commodity_source\"]\n comm.commodity_url = data[\"commodity_url\"]\n count += 1\n comms.append(comm)\n commodity.Commodity.objects.insert(doc_or_docs=comms)\n except KeyError as e:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"not have key\" + str(e)\n return jsonify(ret)\n except ValueError:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"json data resolve failed\"\n return jsonify(ret)\n except NotUniqueError as e:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"you must not upload the same information twice, \" + str(e)\n return jsonify(ret)\n ret[\"err_msg\"] = str(count)\n return jsonify(ret)\n\n\n@upload.route(\"/upload/imgupload//\", methods=[\"POST\"])\ndef imgupload(source_id, commodity_source):\n ret = dict()\n ret[\"err_no\"] = 0\n ret[\"err_msg\"] = \"success\"\n try:\n f = request.files['img_file']\n tmp = f.filename.split(\".\")\n tmp[0] = source_id\n f.filename = \".\".join(tmp)\n print(f.filename)\n if not f:\n raise FileNotFoundError\n try:\n fname = secure_filename(f.filename)\n f.save(os.path.join(config.IMAGE_FOLDER, commodity_source, fname))\n except FileExistsError:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"save this image error\"\n return jsonify(ret)\n except FileNotFoundError:\n ret[\"err_no\"] = 1\n ret[\"err_msg\"] = \"not have this file or open file error\"\n return jsonify(ret)\n\n return jsonify(ret)\n\n\n","repo_name":"interfaceFeng/price_forecast","sub_path":"price_interface/uploads.py","file_name":"uploads.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"36601831270","text":"import logging\n\nfrom psa_car_controller.psa.connected_car_api import Battery\nfrom psa_car_controller.psa.connected_car_api.models.energy import Energy\nfrom psa_car_controller.psa.connected_car_api.models.energy_charging import EnergyCharging\nfrom psa_car_controller.psa.connected_car_api.models.geometry import Geometry\nfrom psa_car_controller.psa.connected_car_api.models.kinetic import Kinetic\nfrom psa_car_controller.psa.connected_car_api.models.position import Position\nfrom psa_car_controller.psa.connected_car_api.models.position_properties import PositionProperties\nfrom psa_car_controller.psa.connected_car_api.models.status import Status\nfrom psa_car_controller.psa.connected_car_api.models.vehicle_odometer import VehicleOdometer\n\nlogger = logging.getLogger(__name__)\n\n\n# pylint: disable=too-many-arguments\nclass CarStatus(Status):\n def __init__(self, embedded=None, links=None, battery=None, doors_state=None, energy=None, environment=None,\n ignition=None, kinetic=None, last_position=None, preconditionning=None, privacy=None, safety=None,\n service=None, timed_odometer=None): # noqa: E501\n super().__init__(embedded, links, battery, doors_state, energy, environment, ignition, kinetic, last_position,\n preconditionning, privacy, safety, service, timed_odometer)\n self.correct(False)\n\n def correct(self, electric_car):\n try:\n if len(self.last_position.geometry.coordinates) < 2:\n raise AttributeError()\n if len(self.last_position.geometry.coordinates) < 3:\n # set altitude none\n self.last_position.geometry.coordinates.append(None)\n except (AttributeError, TypeError):\n self.last_position = Position(geometry=Geometry(coordinates=[None, None, None], type=\"Point\"),\n properties=PositionProperties(updated_at=None))\n if self.kinetic is None:\n self.kinetic = Kinetic()\n # always put electric energy first\n if len(self._energy) == 2 and self._energy[0].type != 'Electric':\n self._energy = self._energy[::-1]\n\n if self.timed_odometer is None:\n self.timed_odometer = VehicleOdometer()\n if electric_car:\n self.get_energy(\"Fuel\").level = None\n if self.battery is None:\n self.battery = Battery()\n\n def is_moving(self):\n try:\n return self.kinetic.moving\n except AttributeError:\n logger.error(\"kinetic not available from api\")\n return None\n\n def get_energy(self, energy_type) -> Energy:\n for energy in self._energy:\n if energy.type == energy_type:\n return energy\n return Energy(charging=EnergyCharging())\n","repo_name":"flobz/psa_car_controller","sub_path":"psa_car_controller/psacc/model/car_status.py","file_name":"car_status.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":306,"dataset":"github-code","pt":"70"} +{"seq_id":"35159488173","text":"import logging\n\nimport disnake\nfrom disnake.ext import commands\n\nfrom cogs.utils import database as db\nORM = db.ORM()\n\nlogger = logging.getLogger('bot.Guilds')\n\n\nclass Guilds(commands.Cog):\n \"\"\" Manage when the bot is added/removed from a guild.\n \"\"\"\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n logger.info('Loaded.')\n\n msg = \"Use the command below **in your Server** to follow your first game 🎮\\n\"\n msg += \"```\\n\"\n msg += \"/dt-set-channel game\\n\"\n msg += \"```\\n\"\n msg += \"You can find some explanations for all available commands on .\"\n\n self.server_btn = disnake.ui.Button(\n label=\"Official Discord Server\",\n url=\"https://discord.gg/QN9uveFYXX\",\n style=disnake.ButtonStyle.link,\n ),\n\n self.help_message = msg\n\n # ---------------------------------------------------------------------------------\n # EVENT LISTENERS\n # ---------------------------------------------------------------------------------\n\n @commands.Cog.listener()\n async def on_message(self, message: disnake.Message):\n if isinstance(message.channel, disnake.DMChannel) and message.author != self.bot.user:\n\n await message.reply(self.help_message, components=[self.server_btn])\n\n\n @commands.Cog.listener()\n async def on_guild_join(self, guild : disnake.Guild):\n\n dt_channel = self.bot.get_channel(985250371981172757)\n if dt_channel:\n await dt_channel.send(f'`{guild.name} [{guild.id}]` joined. (Approx `{guild.member_count}` members)')\n\n await ORM.add_guild(guild.id)\n logger.info(f'{guild.name} [{guild.id}] added to DB.')\n\n\n # We can see the owner only if we have the Members privileged intent\n if not guild.owner_id:\n return\n\n msg = \"I'm now ready to track GameDevs for you !\\n\"\n msg += self.help_message\n\n try:\n owner = await self.bot.fetch_user(guild.owner_id)\n if not owner.dm_channel:\n await owner.create_dm()\n await owner.dm_channel.send(msg, components=[self.server_btn])\n except disnake.Forbidden:\n logger.warning(f'{guild.name}[{guild.id}] owner has blocked his DMs.')\n\n\n @commands.Cog.listener()\n async def on_guild_remove(self, guild : disnake.Guild):\n\n dt_channel = self.bot.get_channel(985250371981172757)\n if dt_channel:\n await dt_channel.send(f'`{guild.name} [{guild.id}]` removed. (Approx `{guild.member_count}` members)')\n\n await ORM.rm_guild(guild.id)\n logger.info(f'{guild.name} [{guild.id}] removed from DB.')\n\n # ---------------------------------------------------------------------------------\n # SLASH COMMANDS\n # ---------------------------------------------------------------------------------\n\n @commands.slash_command(name=\"dt-invite\", description=\"Invite DevTracker to your server.\")\n async def invite(self, inter : disnake.ApplicationCommandInteraction):\n logger.info(f'{inter.guild.name} [{inter.guild_id}] : Show invite link.')\n\n invite_btn = disnake.ui.Button(\n label=\"Invite Me !\",\n url=\"https://discord.com/api/oauth2/authorize?client_id=982257201211138050&permissions=274877958144&scope=applications.commands%20bot\",\n style=disnake.ButtonStyle.link,\n ),\n\n await inter.response.send_message(components=[invite_btn])\n\n @commands.slash_command(name=\"dt-help\", description=\"Struggling getting started?\")\n @commands.default_member_permissions(manage_guild=True)\n async def get_help_message(self, inter : disnake.ApplicationCommandInteraction):\n logger.info(f'{inter.guild.name} [{inter.guild_id}] : Show help.')\n\n await inter.response.send_message(self.help_message, components=[self.server_btn])\n\n @commands.slash_command(name=\"dt-discord-support\", description=\"Join the official DevTracker Discord Server.\")\n @commands.default_member_permissions(manage_guild=True)\n async def get_help_message(self, inter : disnake.ApplicationCommandInteraction):\n logger.info(f'{inter.guild.name} [{inter.guild_id}] : Show Server Invite.')\n\n await inter.response.send_message(components=[self.server_btn])\n\ndef setup(bot: commands.Bot):\n bot.add_cog(Guilds(bot))\n","repo_name":"s0me-1/devtracker-bot","sub_path":"cogs/guilds.py","file_name":"guilds.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"} +{"seq_id":"12287340760","text":"# -*- coding: utf-8 -*-\nfrom numpy import *\nfrom numpy import linalg as la\n\n\ndef loadExData():\n return [[0, 0, 0, 2, 2],\n [0, 0, 0, 3, 3],\n [0, 0, 0, 1, 1],\n [1, 1, 1, 0, 0],\n [2, 2, 2, 0, 0],\n [5, 5, 5, 0, 0],\n [1, 1, 1, 0, 0]]\n\n\ndef loadExData2():\n return [[0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 5],\n [0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 3],\n [0, 0, 0, 0, 4, 0, 0, 1, 0, 4, 0],\n [3, 3, 4, 0, 0, 0, 0, 2, 2, 0, 0],\n [5, 4, 5, 0, 0, 0, 0, 5, 5, 0, 0],\n [0, 0, 0, 0, 5, 0, 1, 0, 0, 5, 0],\n [4, 3, 4, 0, 0, 0, 0, 5, 5, 0, 1],\n [0, 0, 0, 4, 0, 4, 0, 0, 0, 0, 4],\n [0, 0, 0, 2, 0, 2, 5, 0, 0, 1, 2],\n [0, 0, 0, 0, 5, 0, 0, 0, 0, 4, 0],\n [1, 0, 0, 0, 0, 0, 0, 1, 2, 0, 0]]\n#相似度1:欧式距离\ndef ecludSim(inA,inB):\n return 1.0/(1.0 + la.norm(inA - inB))\n#相似度2:威尔逊距离\ndef pearsSim(inA,inB):\n if len(inA) < 3 : return 1.0\n return 0.5+0.5*corrcoef(inA, inB, rowvar = 0)[0][1]\n#相似度3:余弦\ndef cosSim(inA,inB):\n num = float(inA.T*inB)\n denom = la.norm(inA)*la.norm(inB)\n return 0.5 + 0.5 * (num / denom)\n\n\n#遍历 计算相似度\ndef standEst(dataMat, user, simMeas, item):#数据矩阵、用户编号、相似度计算方法和物品编号\n n = shape(dataMat)[1]\n simTotal = 0.0;ratSimTotal = 0.0\n for j in range(n):\n userRating = dataMat[user, j]\n if userRating == 0: continue\n #寻找两个用户都做了评价的产品\n overLap = nonzero(logical_and(dataMat[:, item].A > 0, dataMat[:, j].A > 0))[0]\n if len(overLap) == 0:\n similarity = 0\n else:#存在两个用户都评价的产品 计算相似度\n similarity = simMeas(dataMat[overLap, item], dataMat[overLap, j])\n print ('the %d and %d similarity is: %f' % (item, j, similarity))\n simTotal += similarity #计算每个用户对所有评价产品累计相似度\n ratSimTotal += similarity * userRating #根据评分计算比率\n if simTotal == 0:\n return 0\n else:\n return ratSimTotal / simTotal\n\n#利用SVD\ndef svdEst(dataMat, user, simMeas, item):\n n = shape(dataMat)[1]\n simTotal = 0.0;ratSimTotal = 0.0\n U, Sigma, VT = la.svd(dataMat) #不同于stanEst函数,加入了SVD分解\n Sig4 = mat(eye(4) * Sigma[:4]) # 建立对角矩阵\n xformedItems = dataMat.T * U[:, :4] * Sig4.I #降维:变换到低维空间\n #下面依然是计算相似度,给出归一化评分\n for j in range(n):\n userRating = dataMat[user, j]\n if userRating == 0 or j == item: continue\n similarity = simMeas(xformedItems[item, :].T, xformedItems[j, :].T)\n print ('the %d and %d similarity is: %f' % (item, j, similarity))\n simTotal += similarity\n ratSimTotal += similarity * userRating\n if simTotal == 0:\n return 0\n else:\n return ratSimTotal / simTotal\n\n\ndef recommend(dataMat, user, N=3, simMeas=cosSim, estMethod=standEst):\n unratedItems = nonzero(dataMat[user, :].A == 0)[1] #寻找用户未评价的产品\n if len(unratedItems) == 0: return ('you rated everything')\n itemScores = []\n for item in unratedItems:\n estimatedScore = estMethod(dataMat, user, simMeas, item)#基于相似度的评分\n itemScores.append((item, estimatedScore))\n return sorted(itemScores, key=lambda jj: jj[1], reverse=True)[:N]\n\n\n#实例:SVD实现图像压缩\n\n#打印矩阵。由于矩阵包含了浮点数,因此必须定义浅色和深色。\ndef printMat(inMat, thresh=0.8):\n for i in range(32):\n for k in range(32):\n if float(inMat[i,k]) > thresh:\n print (1,)\n else: print (0,)\n print ('')\n\n#压缩\ndef imgCompress(numSV=3, thresh=0.8):\n myl = []\n for line in open('0_5.txt').readlines():\n newRow = []\n for i in range(32):\n newRow.append(int(line[i]))\n myl.append(newRow)\n myMat = mat(myl)\n print (\"****original matrix******\")\n #printMat(myMat, thresh)\n U,Sigma,VT = la.svd(myMat) #SVD分解\n SigRecon = mat(zeros((numSV, numSV))) #创建初始特征\n for k in range(numSV):#构造对角矩阵\n SigRecon[k,k] = Sigma[k]\n reconMat = U[:,:numSV]*SigRecon*VT[:numSV,:]\n print (\"****reconstructed matrix using %d singular values******\" % numSV)\n #printMat(reconMat, thresh)","repo_name":"stonycat/ML-in-Action-Code-and-Note","sub_path":"ch14/svdRec.py","file_name":"svdRec.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","stars":270,"dataset":"github-code","pt":"70"} +{"seq_id":"31787352588","text":"import numpy as np\nfrom .preprocessing import Preprocessing\nimport train\n\n\n# convert history into inputs and outputs\ndef to_supervised(train, n_input, n_out):\n # flatten data\n data = train.reshape((train.shape[0] * train.shape[1], train.shape[2]))\n X, y = list(), list()\n in_start = 0\n # step over the entire history one time step at a time\n for _ in range(len(data)):\n # define the end of the input sequence\n in_end = in_start + n_input\n out_end = in_end + n_out\n # ensure we have enough data for this instance\n if out_end < len(data):\n x_input = data[in_start:in_end, :]\n # x_input = x_input.reshape((len(x_input), x_input.shape[1]))\n X.append(x_input)\n y.append(data[in_end:out_end, 0])\n # move along one time step\n in_start += n_out\n\n return np.array(X), np.array(y)\n\n\ndef load_lstm_data(folder, filename, batch_size, n_output, split_set=True):\n df = Preprocessing.load_df(folder, filename)\n df['last_updated'] = df['last_updated'].astype(np.datetime64)\n if split_set:\n train_set, val_set, test_set , train_set_last, val_set_last, test_set_last = \\\n train.train_val_test_split(df, batch_size, 0.1, 0.1)\n x_train, y_train = to_supervised(train_set, batch_size, n_output)\n #x_train_last, y_train_last = to_supervised(train_set_last, len(train_set_last), n_output)\n x_val, y_val = to_supervised(train_set, batch_size, n_output)\n #x_val_last, y_val_last = to_supervised(val_set_last, len(val_set_last), n_output)\n x_test, y_test = to_supervised(train_set, batch_size, n_output)\n #x_test_last, y_test_last = to_supervised(test_set_last, len(test_set_last), n_output)\n return x_train, x_val, x_test, y_train, y_val, y_test\n\n else:\n data_set, data_set_last = train.prepare_single_set(df, batch_size)\n x, y = to_supervised(data_set, batch_size, n_output)\n #x_last, y_last = to_supervised(data_set_last, len(data_set_last), n_output)\n\n return x, y\n","repo_name":"CMootz/week_program","sub_path":"src/Preprocessing/prepare_lstm.py","file_name":"prepare_lstm.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"71275941987","text":"from datetime import datetime\nfrom adapters import NetworkAdapter\nimport time\nimport re\n\n\nclass ServerMonitor:\n def __init__(self, config_file: list, network_adapter: NetworkAdapter):\n self.config = config_file\n self.network_adapter = network_adapter\n\n @staticmethod\n def is_host_ip(host: str) -> bool:\n # Проверка, является ли переданный хост IP-адресом\n ip_pattern = r'^(\\d{1,3}\\.){3}\\d{1,3}$'\n if re.match(ip_pattern, host):\n octets = host.split('.')\n if all(int(octet) < 256 for octet in octets):\n return True\n return False\n\n @staticmethod\n def is_valid_host(host: str) -> bool:\n # Проверка, является ли переданный хост допустимым IP-адресом или доменным именем\n hostname_pattern = r'^[a-zA-Z0-9]+([\\-.]{1}[a-zA-Z0-9]+)*\\.[a-zA-Z]{2,20}$'\n special_domains = ['localhost', 'broadcasthost']\n\n if host in special_domains:\n return True\n\n if ServerMonitor.is_host_ip(host):\n return True\n\n if re.match(hostname_pattern, host):\n return True\n\n return False\n\n @staticmethod\n def validate_input_data(input_data: list) -> bool:\n # Валидация данных входного файла\n validation_result = True\n for row_number, entry in enumerate(input_data, 2):\n host, port_list = entry\n if ServerMonitor.is_valid_host(host):\n if not host and port_list:\n print(f\"Некорректные входные данные: отсутствует доменное имя. Строка {row_number}\")\n if not host and not port_list:\n print(f\"Некорректные входные данные: и доменное имя, и порт отсутствуют. Строка {row_number}\")\n validation_result = False\n if port_list:\n for port in port_list:\n if not port.isdigit() or int(port) < 0 or int(port) > 65535:\n print(f\"Некорректные входные данные: недопустимый порт '{port}'. Строка {row_number}\")\n validation_result = False\n else:\n print(f\"Некорректные входные данные: неверный IP-адрес '{host}'. Строка {row_number}\")\n validation_result = False\n return validation_result\n\n def is_internet_available(self) -> bool:\n # Проверка доступности интернет-соединения\n internet_rtt_check = self.network_adapter.get_rtt('8.8.8.8')\n if internet_rtt_check is not None and internet_rtt_check < 2000:\n return True\n else:\n return False\n\n def monitor_server(self, host: str, ports: list) -> None:\n # Функция мониторинга сервера\n if not self.is_internet_available():\n print(\"Отсутсвует интернет-соединение. Ждем восстановление доступа.\")\n while not self.is_internet_available():\n time.sleep(10) # Check every 10 seconds\n print(\"Интернет-соединение восстановлено.\\n\")\n\n ips = list(set(self.network_adapter.resolve_domain(host))) if host else ['']\n if not host or self.is_host_ip(host):\n host = '???'\n print(f\"['{host}', {ips}, {ports}]\")\n for ip in ips:\n lost_packets = self.network_adapter.ping(ip)\n rtt = self.network_adapter.get_rtt(ip) if lost_packets != 100 else None\n open_ports = self.network_adapter.check_ports(ip, ports) if ports else []\n timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n if not ports:\n print(f\"{timestamp} | {host} | {ip} | {lost_packets} | {rtt} ms | -1 | ???\")\n else:\n for port in ports:\n status = 'Opened' if port in open_ports else 'Unknown'\n if port == '443':\n cert_status = self.network_adapter.check_certificate(host if host != '???' else ip)\n print(f\"{timestamp} | {host} | {ip} | {lost_packets} | {rtt} ms | {port} | {status} \"\n f\"| {cert_status}\")\n else:\n print(f\"{timestamp} | {host} | {ip} | {lost_packets} | {rtt} ms | {port} | {status}\")\n print()\n\n def monitor(self) -> None:\n # Функция мониторинга всех серверов из файла конфигурации\n for host, ports in self.config:\n self.monitor_server(host, ports)\n","repo_name":"GoshkaLP/tbo_python2023","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":4942,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"74586932066","text":"# Driver file\n# Handling user input & displaying current GameState\n\nimport pygame as p\nfrom Engine import GameState, Move\n\np.init()\nWIDTH = HEIGHT = 512 # 400 other option\nDIMENSION = 8\nSQ_SIZE = HEIGHT // DIMENSION\nMAX_FPS = 15\nIMAGES = {}\n\n# Init global dictionary of images. Called once in main\n\n\ndef load_images():\n pieces = [\"wp\", \"wR\", \"wN\", \"wB\", \"wQ\",\n \"wK\", \"bp\", \"bR\", \"bN\", \"bB\", \"bQ\", \"bK\"]\n\n for piece in pieces:\n IMAGES[piece] = p.transform.scale(\n p.image.load(f\"img/{piece}.png\"), (SQ_SIZE, SQ_SIZE))\n\n\ndef draw_board(screen):\n colors = [p.Color(\"white\"), p.Color(\"dark green\")]\n for r in range(DIMENSION):\n for c in range(DIMENSION):\n color = colors[((r+c) % 2)]\n p.draw.rect(screen, color, p.Rect(\n c*SQ_SIZE, r*SQ_SIZE, SQ_SIZE, SQ_SIZE))\n\n\ndef draw_pieces(screen, board):\n for r in range(DIMENSION):\n for c in range(DIMENSION):\n piece = board[r][c]\n if piece != \"--\":\n screen.blit(IMAGES[piece], p.Rect(\n c*SQ_SIZE, r*SQ_SIZE, SQ_SIZE, SQ_SIZE))\n\n\n# Responsible for all graphics within current gamestate\ndef draw_game_state(screen, gs):\n draw_board(screen)\n draw_pieces(screen, gs.board)\n\n\n# Main driver for our code. Handle user input & updating graphics\ndef main():\n p.init()\n screen = p.display.set_mode((WIDTH, HEIGHT))\n clock = p.time.Clock()\n # screen.fill(p.Color(\"White\"))\n gs = GameState()\n\n validMoves = gs.get_valid_moves()\n moveMade = False # flag variable for made move\n\n load_images()\n\n sqSelected = () # last click of the user\n playerClicks = [] # Keep track of clicks [(6,4), (4,4)]\n\n running = True\n while running:\n for e in p.event.get():\n if e.type == p.QUIT:\n running = False\n # mouse handler\n elif e.type == p.MOUSEBUTTONDOWN:\n location = p.mouse.get_pos() # (x,y) location of mouse\n col = location[0]//SQ_SIZE\n row = location[1]//SQ_SIZE\n if sqSelected == (row, col):\n sqSelected = ()\n playerClicks = []\n else:\n sqSelected = (row, col)\n playerClicks.append(sqSelected)\n if len(playerClicks) == 2:\n move = Move(playerClicks[0], playerClicks[1], gs.board)\n for i in range(len(validMoves)):\n if move == validMoves[i]:\n gs.make_move(validMoves[i])\n print(move.get_chess_notation())\n moveMade = True\n sqSelected = () # reset user clicks\n playerClicks = []\n if not moveMade:\n playerClicks = [sqSelected]\n\n # key handler\n elif e.type == p.KEYDOWN:\n if e.key == p.K_u:\n gs.undo_move()\n moveMade = True\n if moveMade:\n validMoves = gs.get_valid_moves()\n moveMade = False\n draw_game_state(screen, gs)\n clock.tick(MAX_FPS)\n p.display.flip()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"berkan-alci/Python_Chess_Engine","sub_path":"logic/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"37477254039","text":"'''\n4. Median of Two Sorted Arrays\nHard\n\nGiven two sorted arrays nums1 and nums2 of size m and n respectively, return the\nmedian of the two sorted arrays.\n\nFollow up: The overall run time complexity should be O(log (m+n)).\n\n\n\nExample 1:\n\nInput: nums1 = [1,3], nums2 = [2]\nOutput: 2.00000\nExplanation: merged array = [1,2,3] and median is 2.\nExample 2:\n\nInput: nums1 = [1,2], nums2 = [3,4]\nOutput: 2.50000\nExplanation: merged array = [1,2,3,4] and median is (2 + 3) / 2 = 2.5.\nExample 3:\n\nInput: nums1 = [0,0], nums2 = [0,0]\nOutput: 0.00000\nExample 4:\n\nInput: nums1 = [], nums2 = [1]\nOutput: 1.00000\nExample 5:\n\nInput: nums1 = [2], nums2 = []\nOutput: 2.00000\n\n\nConstraints:\n\nnums1.length == m\nnums2.length == n\n0 <= m <= 1000\n0 <= n <= 1000\n1 <= m + n <= 2000\n-106 <= nums1[i], nums2[i] <= 106\n\n'''\n\n\nclass Solution:\n\n # Time O(log(min(M,N))), Space O(1), runtime = 88 ms\n def findMedianSortedArrays(self, nums1: List[int], nums2: List[int]) -> float:\n\n if not nums1 and not nums2: return -1\n\n # assume nums1 is the smallest array\n size1 = len(nums1)\n size2 = len(nums2)\n if size1 > size2:\n size1, size2 = size2, size1\n nums1, nums2 = nums2, nums1\n\n # binary search to find the partition\n lo, hi = 0, size1\n while lo <= hi:\n mid1 = (lo + hi) // 2\n mid2 = (size1 + size2 + 1) // 2 - mid1\n\n l1_index = mid1 - 1\n r1_index = mid1\n l2_index = mid2 - 1\n r2_index = mid2\n\n l1 = nums1[l1_index] if mid1 != 0 else float('-inf')\n r1 = nums1[r1_index] if mid1 != size1 else float('inf')\n l2 = nums2[l2_index] if mid2 != 0 else float('-inf')\n r2 = nums2[r2_index] if mid2 != size2 else float('inf')\n\n if l2 <= r1 and l1 <= r2:\n # calculate the median value\n if (size1 + size2) % 2:\n return max(l1, l2)\n else:\n a = max(l1, l2)\n b = min(r1, r2)\n return (a + b) / 2\n\n elif l1 > r2:\n hi = mid1 - 1\n\n else:\n lo = mid1 + 1\n","repo_name":"jjingdong/LeetCode","sub_path":"4MedianOfTwoSortedArrays.py","file_name":"4MedianOfTwoSortedArrays.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"70"} +{"seq_id":"9364631702","text":"import persistent\nfrom zope.interface import implements\nfrom BTrees.OOBTree import OOBTree\nfrom BTrees.Length import Length\n\nfrom collective.subscribe.interfaces import IItemSubscriber, ISubscribers\nfrom collective.subscribe.utils import bind_field_properties\n\n\nclass ItemSubscriber(persistent.Persistent):\n \"\"\"Item subscriber implementation\"\"\"\n\n implements(IItemSubscriber)\n\n bind_field_properties(locals(), IItemSubscriber) # props from field schema\n\n def __init__(self, **kwargs):\n \"\"\"\n Construct, if keyword arguments are used to construct, validate\n invariant on passed field values.\n \"\"\"\n if kwargs:\n user = kwargs.get('user', None)\n name = kwargs.get('name', None)\n namespace = kwargs.get('namespace', 'member')\n email = kwargs.get('email', None)\n if isinstance(user, unicode):\n user = user.encode('utf-8')\n self.user = user\n if isinstance(email, unicode):\n email = email.encode('utf-8')\n self.email = email\n if isinstance(name, str):\n name = name.decode('utf-8')\n self.name = name\n if isinstance(namespace, unicode):\n namespace = namespace.encode('utf-8')\n self.namespace = namespace\n IItemSubscriber.validateInvariants(self)\n\n def signature(self):\n \"\"\"\n return two-string tuple signature of (namespace, user or email); can\n be used as a composed key for storage implementations. Raises a\n zope.interface.Invalid exception if signature is not possible due to\n insufficient field data.\n \"\"\"\n IItemSubscriber.validateInvariants(self) # may raise Invalid...\n namespace = self.namespace\n identifier = self.user\n if self.email and not self.user:\n namespace = 'email' # ignore field default\n identifier = self.email\n return (namespace, identifier)\n\n\nclass SubscribersContainer(OOBTree):\n \"\"\"Container/mapping for subscribers\"\"\"\n implements(ISubscribers)\n\n def __init__(self, *args, **kwargs):\n super(SubscribersContainer, self).__init__(*args, **kwargs)\n self.size = Length()\n\n # wrap superclass __getstate__ and __setstate__ to save attrs such\n\n def __getstate__(self):\n tree_state = super(SubscribersContainer, self).__getstate__()\n attr_state = [(k, v) for k, v in self.__dict__.items()\n if not (k.startswith('_v_') or k.startswith('__'))]\n return (tree_state, attr_state)\n\n def __setstate__(self, v):\n tree_state = v[0]\n attr_state = v[1]\n for k, v in attr_state:\n setattr(self, k, v)\n super(SubscribersContainer, self).__setstate__(tree_state)\n\n def _normalize_key(self, key):\n \"\"\"\n given key or object providing IItemSubscriber, normalize unique key\n \"\"\"\n if IItemSubscriber.providedBy(key):\n key = key.signature()\n elif isinstance(key, basestring):\n key = ('email', str(basestring))\n if not (len(key) == 2 and key[0] and key[1]):\n raise KeyError('incomplete key for subscriber')\n return key\n\n def _set_new(self, key, value):\n \"\"\"set new item, but do not allow replacing existing item\"\"\"\n if key in self:\n raise ValueError('attempt to add: duplicate key; record exists')\n self.__setitem__(key, value)\n\n def add(self, *args, **kwargs):\n k = None\n fields = kwargs\n if not kwargs and len(args) == 1:\n v = args[0]\n if IItemSubscriber.providedBy(v):\n k = self._normalize_key(v)\n if isinstance(v, persistent.Persistent):\n self._set_new(k, v)\n return k, v\n fields = v.__dict__ # we'll copy values, not object to store\n # otherwise, assume a dict from args[0]:\n else:\n try:\n fields = dict(v)\n except ValueError:\n import sys\n exc_info = sys.exc_info()\n raise (KeyError, exc_info[1], exc_info[2]) # noqa\n v = ItemSubscriber(**fields)\n if k is None:\n k = self._normalize_key(v)\n self._set_new(k, v)\n return k, v\n\n # Callers should not use __setitem__ -- it is here as a check\n # on keeping a BTree size/length extrinsic to the BTree itself.\n def __setitem__(self, key, value):\n if not IItemSubscriber.providedBy(value):\n raise ValueError('__setitem__ value must provide IItemSubscriber')\n if key not in self:\n self.size.change(1) # increment\n super(SubscribersContainer, self).__setitem__(key, value)\n\n def get(self, subscriber, default=None):\n key = self._normalize_key(subscriber)\n return super(SubscribersContainer, self).get(key, default)\n\n def __getitem__(self, key):\n key = self._normalize_key(key)\n return super(SubscribersContainer, self).__getitem__(key)\n\n def __len__(self):\n return self.size()\n\n def __contains__(self, key):\n normalized = self._normalize_key(key)\n if IItemSubscriber.providedBy(key):\n if key is not super(SubscribersContainer, self).get(normalized,\n None):\n return False\n key = self._normalize_key(normalized)\n return super(SubscribersContainer, self).__contains__(normalized)\n\n def __delitem__(self, key):\n key = self._normalize_key(key)\n super(SubscribersContainer, self).__delitem__(key)\n self.size.change(-1) # decrement if superclass __delitem__ succeeds\n\n","repo_name":"collective/collective.subscribe","sub_path":"collective/subscribe/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":5815,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"70"} +{"seq_id":"21308046024","text":"#!/usr/bin/python\n# from collections import defaultdict\nimport os\nimport datetime\nfrom datetime import datetime as dtime\n# import time\n\nDOCUMENTATION = '''\n---\nmodule: cr_lambda_triggers\nshort_description: Creates, updates or deletes AWS Lambda function event mappings.\ndescription:\n - This module allows the management of AWS Lambda function event source mappings such as S3 bucket\n events, DynamoDB and Kinesis streaming events via the Ansible framework.\n It is idempotent and supports \"Check\" mode. Use module M(lambda) to manage the lambda\n function itself and M(lambda_alias) to manage function aliases.\nversion_added: \"2.1\"\nauthor: Robert Colvin (@rcolvin)\noptions:\n aws_access_key:\n description:\n - AWS access key id. If not set then the value of the AWS_ACCESS_KEY environment variable is used.\n required: false\n default: null\n aliases: [ 'ec2_access_key', 'access_key' ]\n aws_secret_key:\n description:\n - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.\n required: false\n default: null\n aliases: ['ec2_secret_key', 'secret_key']\n lambda_function_arn:\n description:\n - The name or ARN of the lambda function.\n required: true\n aliases: ['function_name', 'function_arn']\n state:\n description:\n - Describes the desired state and defaults to \"present\".\n required: true\n default: \"present\"\n choices: [\"present\", \"absent\"]\n alias:\n description:\n - Name of the function alias. Mutually exclusive with C(version).\n required: true\n version:\n description:\n - Version of the Lambda function. Mutually exclusive with C(alias).\n required: false\n event_source:\n description:\n - Source of the event that triggers the lambda function.\n required: true\n choices: ['s3', 'Kinesis', 'DynamoDB', 'SNS']\n source_params:\n description:\n - Sub-parameters required for event source.\n - I(== S3 event source ==)\n - C(id) Unique ID for this source event.\n - C(bucket) Name of source bucket.\n - C(prefix) Bucket prefix (e.g. images/)\n - C(suffix) Bucket suffix (e.g. log)\n - C(events) List of events (e.g. ['s3:ObjectCreated:Put'])\n - I(== stream event source ==)\n - C(source_arn) The Amazon Resource Name (ARN) of the Kinesis or DynamoDB stream that is the event source.\n - C(enabled) Indicates whether AWS Lambda should begin polling the event source. Default is True.\n - C(batch_size) The largest number of records that AWS Lambda will retrieve from your event source at the\n time of invoking your function. Default is 100.\n - C(starting_position) The position in the stream where AWS Lambda should start reading.\n Choices are TRIM_HORIZON or LATEST.\n - I(== SNS event source ==)\n - C(id) Unique ID for this source event.\n - C(topic_arn) The ARN of the topic to which you want to subscribe the lambda function.\n required: true\n requirements:\n - boto3\n extends_documentation_fragment:\n - aws\n\n'''\n\nEXAMPLES = '''\n- name: update/EXISTS [API RESOURCE] \n cr_apigw_set:\n apigw_type: \"resource\"\n name: \"{{ item.name }}\" ##name of the API\n path: \"{{ item.path }}\" ##FULL PATH of resource. use \"/\" for root\n state: \"{{ item.state }}\"\n with_items: \"{{ project.api_gw }}\"\n- name: update [API RESOURCE] [METHOD]\n cr_apigw_set:\n apigw_type: \"method\"\n name: \"{{ item.name }}\" ##name of the API\n path: \"{{ item.path }}\"\n operationName: \"{{ item.operational_name }}\"\n requestParameters: \"{{ item.request_params }}\"\n requestModels: \"{{ item.request_models }}\"\n responseModels: \"{{ item.response_models }}\"\n authorizationScopes: \"{{ item.auth_scope }}\"\n authName: \"{{item.authName}}\"\n apiKeyRequired: \"{{ item.apiKeyRequired }}\"\n authorizationType: \"{{ item.authorizationType }}\"\n httpMethod: \"{{ item.httpMethod }}\" ##GET, POST, other...\n state: \"{{ item.state }}\"\n integration: \"{{ item.method_integration }}\"\n response: \"{{ item.method_response }}\"\n with_items: \"{{ project.api_gw }}\"\n'''\n\n\ntry:\n import boto3\n from botocore.exceptions import ClientError, MissingParametersError, ParamValidationError\n HAS_BOTO3 = True\n\n from botocore.client import Config\nexcept ImportError:\n import boto\n HAS_BOTO3 = False\ndir_path = os.path.dirname(__file__)\n#\n\n\ndef file_append(path, filename, msg):\n with open(\"%s/LOG-%s.txt\" % (path, filename), \"a\") as file:\n file.write(\"\\n%s\" % (msg))\n# create a policy given actionPolicy object\n\n\ndef cr_apigw(state, module, client, name=None, resource=None, actionPolicy=None, description=None):\n pName = name\n found = True\n\n return [pName], False if found else True\n\n\ndef resource_gen(module, client, pathPart, apiId, pId):\n try:\n resource = client.create_resource(restApiId=apiId, parentId=pId, pathPart=pathPart)\n except ClientError as e:\n module.fail_json(msg=\"[E] resource_gen failed - {0}\".format(e.response['Error']['Message']))\n return resource\n\n\ndef getAllResources(client, restApiId, position=None):\n rlist = []\n if position is None:\n response = client.get_resources(restApiId=restApiId, limit=500)\n else:\n response = client.get_resources(restApiId=restApiId, position=position, limit=500)\n baseList = response['items']\n if \"position\" in response:\n rlist = getAllResources(client, restApiId, response['position'], prevlist=[])\n final = baseList + rlist\n return final\n\n\ndef cr_dynamo_event(state, module, client, clientstreams, event_source, function_name, source_params):\n found = True\n streams = client.list_event_source_mappings(FunctionName=function_name)['EventSourceMappings']\n targetStream = None\n UUID = None\n eventObj = None\n for stream in streams:\n streamSource = stream['EventSourceArn']\n if event_source in streamSource:\n targetStream = streamSource\n UUID = stream['UUID']\n eventObj = stream\n break\n\n if state == 'absent': # delete\n if targetStream: # already missing skip\n try:\n client.delete_event_source_mapping(UUID=UUID)\n except ClientError as e:\n module.fail_json(msg=\"[E] dynamo trigger DELETE failed {0} - {1}\".format(event_source, e.response['Error']['Message']))\n\n else: # add\n params = eventObjConform(module, source_params)\n enabled = params['enabled']\n batch_size = params['batch_size']\n starting_position = params['starting_position']\n MaximumBatchingWindowInSeconds = params['MaximumBatchingWindowInSeconds']\n\n ParallelizationFactor = params['ParallelizationFactor']\n DestinationConfig = params['DestinationConfig']\n\n MaximumRecordAgeInSeconds = params['MaximumRecordAgeInSeconds']\n BisectBatchOnFunctionError = params['BisectBatchOnFunctionError']\n MaximumRetryAttempts = params['MaximumRetryAttempts']\n\n if not targetStream:\n table = event_source.split(\"/\")[-1]\n targetStream = getTableStream(state, module, clientstreams, table)\n if eventObj:\n if MaximumBatchingWindowInSeconds != eventObj['MaximumBatchingWindowInSeconds']:\n eventObj.update({\"MaximumBatchingWindowInSeconds\": MaximumBatchingWindowInSeconds})\n found = False\n if BisectBatchOnFunctionError != eventObj['BisectBatchOnFunctionError']:\n eventObj.update({\"BisectBatchOnFunctionError\": BisectBatchOnFunctionError})\n found = False\n if not found:\n try:\n client.update_event_source_mapping(**eventObj)\n except ClientError as e:\n module.fail_json(msg=\"[E] dynamo trigger DELETE failed {0} - {1}\".format(event_source, e.response['Error']['Message']))\n else:\n try:\n if 'StartingPositionTimestamp' in params:\n StartingPositionTimestamp = params['StartingPositionTimestamp']\n if StartingPositionTimestamp == 0 or StartingPositionTimestamp == '0':\n year = dtime.today().year\n StartingPositionTimestamp = dtime(year, 1, 1)\n else:\n StartingPositionTimestamp = dtime.utcfromtimestamp(StartingPositionTimestamp)\n else:\n year = dtime.today().year\n StartingPositionTimestamp = dtime(year, 1, 1)\n params_obj = {\"EventSourceArn\": targetStream, \"FunctionName\": function_name,\n \"Enabled\": enabled, \"BatchSize\": batch_size,\n \"MaximumBatchingWindowInSeconds\": MaximumBatchingWindowInSeconds,\n \"ParallelizationFactor\": ParallelizationFactor,\n \"StartingPosition\": starting_position,\n \"DestinationConfig\": DestinationConfig,\n \"MaximumRecordAgeInSeconds\": MaximumRecordAgeInSeconds,\n \"BisectBatchOnFunctionError\": BisectBatchOnFunctionError,\n \"MaximumRetryAttempts\": MaximumRetryAttempts\n\n }\n if starting_position == \"AT_TIMESTAMP\":\n params_obj.update({\"StartingPositionTimestamp\": StartingPositionTimestamp})\n\n client.create_event_source_mapping(**params_obj)\n found = False\n except ClientError as e:\n module.fail_json(msg=\"[E] dynamo trigger DELETE failed {0} - {1}\".format(event_source, e.response['Error']['Message']))\n\n return [event_source], False if found else True\n\n\ndef getTableStream(state, module, clientstreams, table):\n # dynoClient = boto3.client(\"dynamodbstreams\")\n streams = clientstreams.list_streams(TableName=table)['Streams']\n for stream in streams:\n return stream['StreamArn']\n\n\ndef eventObjConform(module, source_params):\n params = source_params\n\n enabled = params['enabled']\n # module.fail_json(msg=\"[E] dynamo trigger DELETE failed {0} - {1}\".format(enabled, params))\n\n batch_size = int(params['batch_size'])\n starting_position = params['starting_position']\n MaximumBatchingWindowInSeconds = int(params['MaximumBatchingWindowInSeconds'])\n\n ParallelizationFactor = int(params['ParallelizationFactor'])\n if ParallelizationFactor == 0:\n ParallelizationFactor = 1\n DestinationConfig = params['DestinationConfig']\n if isinstance(DestinationConfig, str):\n DestinationConfig = params['DestinationConfig']\n onfailure = False\n onsuccess = False\n if 'OnFailure' in DestinationConfig:\n if DestinationConfig['OnFailure']:\n onfailure = True\n if 'OnSuccess' in DestinationConfig:\n if DestinationConfig['OnSuccess']:\n onsuccess = True\n if not onsuccess and not onfailure:\n DestinationConfig = {}\n\n MaximumRecordAgeInSeconds = int(params['MaximumRecordAgeInSeconds'])\n if MaximumRecordAgeInSeconds == 0:\n MaximumRecordAgeInSeconds = 60000\n BisectBatchOnFunctionError = params['BisectBatchOnFunctionError']\n if BisectBatchOnFunctionError == 0 or BisectBatchOnFunctionError == '0':\n BisectBatchOnFunctionError = False\n else:\n BisectBatchOnFunctionError = True\n # module.fail_json(msg=\"[E] dynamo trigger DELETE failed {0} - {1}\".format(BisectBatchOnFunctionError, params))\n MaximumRetryAttempts = int(params['MaximumRetryAttempts'])\n\n obj = {\n \"enabled\": enabled,\n \"batch_size\": batch_size,\n \"starting_position\": starting_position,\n \"MaximumBatchingWindowInSeconds\": MaximumBatchingWindowInSeconds,\n \"ParallelizationFactor\": ParallelizationFactor,\n \"DestinationConfig\": DestinationConfig,\n \"MaximumRecordAgeInSeconds\": MaximumRecordAgeInSeconds,\n \"BisectBatchOnFunctionError\": BisectBatchOnFunctionError,\n \"MaximumRetryAttempts\": MaximumRetryAttempts\n }\n return obj\n\n\ndef cr_resource(state, module, client, name, path, description):\n found = True\n apiFound = api_exists(module, name, client)\n if apiFound is None:\n module.fail_json(msg=\"[E] cr_resource API name - {0} not found\".format(name))\n restApiId = apiFound[\"id\"]\n # rlist = client.get_resources( restApiId=restApiId, limit=500)['items']\n rlist = getAllResources(client, restApiId)\n # pId = None\n pathPart = path.rsplit('/', 1)[-1] # users\n parentPath = path.rsplit('/', 1)[-2]\n dictPath = {}\n for rs in rlist:\n parentID = pPart = None\n if 'parentId' in rs:\n parentID = rs['parentId']\n if 'pathPart' in rs:\n pPart = rs['pathPart']\n dictPath.update({rs['path']: {'pid': parentID, 'pathPart': pPart, 'id': rs['id']}})\n\n if pathPart == \"\" and pathPart == parentPath: # root update here so nothing required\n return [path], False if found else True\n if path in dictPath: # already exists. return without change\n return [path], False if found else True\n if parentPath == \"\": # root level so no update as needed\n for k, v in dictPath.items():\n rootPath = k.rsplit('/', 1)[-2]\n if path == rootPath: # root level CONFIRMED so no update as needed\n # # module.fail_json(msg=\"[T] cr_resource API - {0} set as\".format(found))\n return [path], False if found else True\n\n # module.fail_json(msg=\"[T] cr_resource API - {0}===={1}===={2}===={3} {4}\".format(dictPath,pathPart,parentPath,restApiId, path))\n\n sPath = path.split(\"/\")\n lastpath = \"\"\n lastId = dictPath['/']['id']\n attempts = len(sPath)\n found = False\n for n in range(attempts):\n if not sPath[n] == \"\":\n lastpath = lastpath + \"/\" + sPath[n]\n if lastpath in dictPath: # found ..update lastID and continue\n lastId = dictPath[lastpath]['id']\n continue\n rPart = lastpath.rsplit('/', 1)[-1]\n rsrc = resource_gen(module, client, rPart, restApiId, lastId)\n dictPath.update({lastpath: {'pid': lastId, 'pathPart': rPart, 'id': rsrc['id']}})\n lastId = rsrc['id']\n\n return [path], False if found else True\n\n\ndef getAll_rest_apis(client, position=None):\n rlist = []\n if position is None:\n response = client.get_rest_apis(limit=500)\n else:\n response = client.get_rest_apis(limit=500)\n baseList = response['items']\n if \"position\" in response:\n rlist = getAll_rest_apis(client, response['position'], prevlist=[])\n final = baseList + rlist\n return final\n\n\ndef api_exists(module, name, client):\n # client = boto3.client('apigateway')\n api = None\n # response = client.get_rest_apis( limit=450 )['items']\n response = getAll_rest_apis(client)\n for item in response:\n if item['name'].lower() == name.lower():\n #module.fail_json(msg=\"[T] name:'{0}' - '{1}' not found\".format(name,item['name']))\n api = item\n break\n return api\n\n\ndef resource_exists(module, path, apiId, client):\n resource = None\n # response = client.get_resources(restApiId=apiId, limit=450 )['items']\n response = getAllResources(client, apiId)\n # comparing=[]\n for item in response:\n # comparing.append(\"%s == %s\"%(path,item['path']))\n if item['path'].lower() == path.lower():\n resource = item\n break\n # module.fail_json(msg=\"[E] resource_exists API resource[{0}] - {1} \".format(resource, comparing ))\n return resource\n\n\ndef method_exists(module, method, apiId, rId, client):\n oMethod = None\n resource = client.get_resource(restApiId=apiId, resourceId=rId)\n if 'resourceMethods' in resource:\n for key, value in resource['resourceMethods'].items():\n if method.lower() == key.lower():\n # module.fail_json(msg=\"[E] method_exists API resource[{0}] - {1} \".format(key, resource ))\n oMethod = client.get_method(restApiId=apiId, resourceId=rId, httpMethod=key)\n del oMethod['ResponseMetadata']\n break\n # module.fail_json(msg=\"[T] method_exists API resource[{0}] {1}\".format( method, oMethod))\n return oMethod\n\n\ndef getAll_validators(client, restApiId, position=None):\n rlist = []\n if position is None:\n response = client.get_request_validators(restApiId=restApiId, limit=500)\n else:\n response = client.get_request_validators(restApiId=restApiId, limit=500, position=position)\n baseList = response['items']\n if \"position\" in response:\n rlist = getAll_validators(client, response['position'], prevlist=[])\n final = baseList + rlist\n return final\n\n\ndef validator_match(client, module, validator, restApiId):\n description = validator['name']\n validBody = validator['validateRequestBody']\n validReqParam = validator['validateRequestParameters']\n items = getAll_validators(client, restApiId)\n # module.fail_json(msg=\"[T] validator_match - {0} [{1}] {2}\".format( items, restApiId , validator))\n Found = None\n if items:\n for item in items:\n if validBody == item['validateRequestBody'] and validReqParam == item['validateRequestParameters'] and description == item['name']:\n return item\n response = client.create_request_validator(restApiId=restApiId,\n name=description,\n validateRequestBody=validBody,\n validateRequestParameters=validReqParam\n )\n return response\n\n\ndef getAll_authorizers(client, restApiId, position=None):\n rlist = []\n if position is None:\n response = client.get_authorizers(restApiId=restApiId, limit=500)\n else:\n response = client.get_authorizers(restApiId=restApiId, limit=500, position=position)\n baseList = response['items']\n if \"position\" in response:\n rlist = getAll_authorizers(client, response['position'], prevlist=[])\n final = baseList + rlist\n return final\n\n\ndef auth_present(client, module, authorizationName, restApiId):\n # items = client.get_authorizers(restApiId=restApiId)['items']\n items = getAll_authorizers(client, restApiId)\n for item in items:\n if authorizationName == item['name']:\n return item\n # module.fail_json(msg=\"[T] auth_present - {0} [{1}]\".format( items, restApiId ))\n # not found so fail\n return None\n\n\ndef model_present(client, module, model, apiId, update=True):\n old = None\n if model is None or not model:\n return old\n for mk, mv in model.items():\n if mv:\n if mv.lower() == \"empty\":\n return old\n\n modelName = None\n if 'name' in model:\n modelName = model['name']\n # module.fail_json(msg=\"[T] model_present models >>-> {0} \".format( model ) )\n if modelName is None:\n return None\n try:\n old = client.get_model(restApiId=apiId, modelName=modelName, flatten=True)\n if not old['schema'] in model['schema']:\n update = True\n else:\n nModel = old\n except ClientError as e:\n update = True\n if update:\n try:\n if not old is None:\n client.delete_model(restApiId=apiId, modelName=modelName)\n response = client.create_model(restApiId=apiId, name=modelName,\n description=model['description'],\n schema=model['schema'], contentType=model['contentType']\n )\n nModel = response\n except ClientError as e:\n module.fail_json(msg=\"[E] model_present failed - {0}\".format(e.response['Error']['Message']))\n return nModel\n\n\ndef cr_model(state, module, client, name, resource, description, apiId, schema, contentType):\n pName = name\n found = True\n try:\n obj = {'schema': schema, 'name': name, 'description': description, 'contentType': contentType}\n nModel = model_present(client, module, obj, apiId, True)\n found = False\n except ClientError as e:\n module.fail_json(msg=\"[E] model_present failed - {0}\".format(e.response['Error']['Message']))\n\n return [pName], False if found else True\n# isTest is not for Testing but to validate params are correct before CHANGE is made!!!!!\n# OTHERWISE YOU WILL LOOSE THE API FOREVER!!!!\n\n\ndef object_Method(name, description, httpMethod, integration, response, path, keyRequired, requestparameters, requestvalidator, authorizationType, authorizationName, requestModels, responseModels, operationName, authScopes, credentials):\n return type('obj', (object,), {\n \"name\": name,\n \"description\": description,\n \"httpMethod\": httpMethod,\n \"integration\": integration,\n \"response\": response,\n \"path\": path,\n \"keyRequired\": keyRequired,\n \"requestparameters\": requestparameters,\n \"requestvalidator\": requestvalidator,\n \"authorizationType\": authorizationType,\n \"authorizationName\": authorizationName,\n \"requestModels\": requestModels,\n \"responseModels\": responseModels,\n \"operationName\": operationName,\n \"authScopes\": authScopes,\n \"credentials\": credentials\n })\n\n\n# GET RESOURCE\n\n# CREATE RESOURCE\n# CREATE METHOD\n# CREATE USAGE PLAN\n# CREATE AUTHORIZER\n# CREATE DEPLOYMENT\n# CREATE MODEL\n# CREATE REQUEST VALIDATOR\n\n# . create_base_path_mapping\n\n# update_gateway_response()\n# update_integration()\n# update_integration_response()\n# update_method()\n# update_method_response()\n\n# ADD METHOD. put_method\n\n# WHAT LIMITS ARE ON TOTAL NUMBER OF STAGES\n# CREATE STAGE. (*CREATE ONLY ONE annd multiple usage plans PER customer)\n# CREATE API KEY (*requires stage to be deployed)\n\n# UPDATE AUTHORIZOR\n\n# TEST INVOKE METHOD\n\n\ndef main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(\n # name=dict(required=True, default=None), # name of the API\n # apigw_type=dict(required=True, choices=['resource', 'method', 'method_response', 'integration', 'integration_response', 'model']),\n state=dict(required=True, choices=['present', 'absent']),\n # type_event=dict(required=True, choices=['s3', 'dynamodb', 'api', 'cloudwatch', 'sns', 'sqs', 'cloudfont', 'cognito', 'kinesis']),\n # description=dict(default=None, required=False),\n # api_key=dict(required=False, default=None, type='bool'),#Specifies whether the ApiKey can be used by callers\n # #########################\n # CREATE RESOURCE\n # #########################\n event_source=dict(required=True, default=None, type='str'),\n function_name=dict(required=True, default=None, type='str'),\n\n # stages=dict(default=None, required=False),\n source_params=dict(default=None, required=True, type='dict')\n\n\n )\n )\n\n module = AnsibleModule(argument_spec=argument_spec,\n supports_check_mode=True,\n mutually_exclusive=[], required_together=[]\n )\n\n # validate dependencies\n if not HAS_BOTO3:\n module.fail_json(msg='boto3 is required for this module.')\n try:\n region, endpoint, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)\n aws_connect_kwargs.update(dict(region=region,\n endpoint=endpoint,\n conn_type='client',\n resource='lambda'\n ))\n\n resource = None\n # ecr = boto3_conn(module, conn_type='client', resource='ecr', region=region, endpoint=endpoint, **aws_connect_kwargs)\n # module.fail_json(msg=\" LOL cr_iam_profileo - {0}\".format('iprofile'))\n client = boto3_conn(module, **aws_connect_kwargs)\n aws_connect_kwargs.update(dict(region=region,\n endpoint=endpoint,\n conn_type='client',\n resource='dynamodbstreams'\n ))\n dynamodbstreams = boto3_conn(module, **aws_connect_kwargs)\n # resource=None\n # module.fail_json(msg=\" LOL cr_iam_profileo - {0}\".format('iprofile'))\n except botocore.exceptions.ClientError as e:\n module.fail_json(msg=\"Can't authorize connection - {0}\".format(e))\n except Exception as e:\n module.fail_json(msg=\"Connection Error - {0}\".format(e))\n# check if trust_policy is present -- it can be inline JSON or a file path to a JSON file\n\n state = module.params.get('state')\n type_event = module.params.get('type_event')\n event_source = module.params.get('event_source')\n if \":table/\" in event_source:\n type_event = 'dynamodb'\n\n # path = module.params.get('path').lower()\n function_name = module.params.get('function_name')\n source_params = module.params.get('source_params')\n\n choice_map = {\n \"dynamodb\": cr_dynamo_event,\n \"s3\": cr_dynamo_event,\n \"cloudwatch\": cr_dynamo_event\n }\n# [api','resource','method','method_response','integration','integration_response','stage','deployment','key','authorizer','model']\n # module.fail_json(msg=\"what is name - {0}\".format(name))\n\n if 'dynamodb' in type_event: # ** \"name\" ** is API name. (each env may have diff id)\n typeList, changed = choice_map.get(type_event)(state, module, client, dynamodbstreams, event_source, function_name, source_params)\n else:\n module.fail_json(msg=\"Sorry {0} not yet implemented\".format(delta_type))\n # typeList, changed = choice_map.get(delta_type)(module, client, name, trust_policy_doc, iam_role)\n\n # has_changed, result = choice_map.get(module.params['state'])(module.params)\n has_changed = changed\n\n module.exit_json(changed=has_changed, entities=typeList)\n\n\n# ansible import module(s) kept at ~eof as recommended\n\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.ec2 import *\n\nif __name__ == '__main__':\n main()\n","repo_name":"brandonkgarner/CEDAR","sub_path":"ansible/library/cr_lambda_triggers.py","file_name":"cr_lambda_triggers.py","file_ext":"py","file_size_in_byte":26707,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"} +{"seq_id":"15420219746","text":"import tensorflow as tf\nimport constants.constants as const\n\n# words2vec\nW2V_EMBEDDING_SIZE = 128\nW2V_NEG_SAMPLES = 64\nW2V_SKIP_WORD_WINDOW = 1\nW2V_BATCH_SIZE = 100\nW2V_EPOCHS = 500\nW2V_EVAL_FRQ = 2\n\n\n# vid2sentence\n# data set params\nV2S_SENTENCE_MAX = 20\nV2S_FRAMES_MAX = 26\nV2S_MAX_HEIGHT = 128\nV2S_MAX_WIDTH = 128\nV2S_TFR_EX_NUM = 10000\nFRAME_SHAPE = (V2S_MAX_HEIGHT, V2S_MAX_WIDTH, 3)\n# optional, will be set automaticallyQ\nCNN_MODEL_SHAPE = (6, 6, 1536)\nV2S_BFLOAT16_MODE = False\n\n# hyper params\nV2S_BATCH_SIZE = 2\nV2S_EPOCHS_PER_EVAL = 1\nV2S_DROPOUT_RATE = 0.1\nV2S_LAYERS = 6\nV2S_NUM_MULT_HEADS = 8\nV2S_FF_HIDDEN_UNITS = 1024\nV2S_ACTIVATION = tf.nn.selu\n\n# optimizer params\nV2S_LBL_SMOOTHING = 0.1\nV2S_WEIGHT_MODE = const.WeightMode.Gauss\nV2S_OPTIMIZER_TYPE = const.OptimizerType.Adam\n\n# optimizer params - adam\nV2S_ADAM_BETA1 = 0.9\nV2S_ADAM_BETA2 = 0.999\nV2S_ADAM_EPSILON = 1e-08\n\n# optimizer params - adafactor\nV2S_ADAFACTOR_DECAY = None\nV2S_ADAFACTOR_BETA = 0.0\nV2S_ADAFACTOR_EPSILON1 = 1e-30\nV2S_ADAFACTOR_EPSILON2 = 1e-3\n\n# eval metric params\nV2S_MAX_N_GRAM = 4\n\n# early stopping params\nV2S_DELTA_VAL = 0.0001\nV2S_DELTA_STEP = 1300\nV2S_MIN_STEP = 0\n\n# tpu params\n# Number of training steps to run on the Cloud TPU before returning control.\nTPU_ITERATIONS = 100\n# A single Cloud TPU has 8 shards.\nTPU_NUM_SHARDS = 8\nTPU_ZONE = \"\"\nTPU_GCP_NAME = \"\"\nTPU_NAME = \"\"\nTPU_LOG_STEP = 1\nTPU_TRAIN_EXAMPLES_PER_EPOCH = 80000\nTPU_EVAL_EXAMPLES_PER_EPOCH = 6912\nTPU_TRAIN_BATCH_SIZE = 128 # 16\nTPU_EVAL_BATCH_SIZE = 128\nTPU_PREDICT_BATCH_SIZE = 128\nTPU_EPOCHS = 2\n\n# Jpg encoding\nJPG_SKIP = 10\nJPG_QUAL = 60\n\n# data pipeline\nPREFETCH_ELEMENTS = 1\nPARSER_PARALLEL_CALLS = 8\n\n# checkpoints\nSAVE_CHK_STEP = 500\nKEEP_CHK = 10\n\n# optical flow\nBRACKET = 5\nSKIP = 1\nBOUND = 15\nFLOW_RESIZE_FACTOR = 2 # needs to be 2^x\n\n# beam search\nALPHA = 0.6\n","repo_name":"MFizz/MultiheadAttentionConvHybrid","sub_path":"constants/hyper_params.py","file_name":"hyper_params.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"21224322530","text":"#ITP1_1_D: Watch\r\n#秒単位の時間 S が与えられるので、h:m:s の形式へ変換して出力してください。\r\n#ここで、h は時間、m は 60 未満の分、s は 60 未満の秒とします。\r\n#Write a program which reads an integer S [second] and converts it to h:m:s where h, m, s denote hours, minutes (less than 60) and seconds (less than 60) respectively.\r\nS = int(input())\r\nh = S // 3600\r\nm = S % 3600 // 60\r\ns = S % 60\r\nif(0<=S & S<=86400):\r\n print(f\"{h}:{m}:{s}\")\r\n","repo_name":"nerunerunerune/kenkyushitukadai","sub_path":"04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"8269303981","text":"from sklearn.model_selection import train_test_split\n\nfrom pymir import settings\n\nfrom pymir.common import EXISTING_KEYS\n\n\nimport csv\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\n\ndef generate_ds(test_fname, train_fname, test_size=0.2):\n\n musicnet_fname = (\n os.path.join(\n settings.DATA_DIR, 'musicnet', 'representations',\n 'sequence_of_notes', 'musicnet.csv'))\n\n songs = {}\n i = 0\n with open(musicnet_fname) as f:\n reader = csv.reader(f, delimiter=' ')\n for row in reader:\n if row[0] not in songs:\n songs[row[0]] = [row]\n else:\n songs[row[0]].append(row)\n i += 1\n\n train_list = []\n test_list = []\n\n for k in EXISTING_KEYS:\n if k in songs:\n df = pd.Series(songs[k])\n train, test = train_test_split(df, test_size=test_size)\n train_list.append(train)\n test_list.append(test)\n\n train = pd.concat(train_list)\n test = pd.concat(test_list)\n\n\n # generate train and test sets, first note in every line is key of the song\n\n with open(test_fname, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=' ')\n for a in test:\n writer.writerow(a)\n\n\n with open(train_fname, 'w') as csvfile:\n writer = csv.writer(csvfile, delimiter=' ')\n for a in train:\n writer.writerow(a)\n\ndef plot_train_test_data(test_fname, train_fname):\n test_keys = {}\n train_keys = {}\n\n with open(test_fname, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ')\n for row in reader:\n if row[0] in test_keys:\n test_keys[row[0]] +=1\n else:\n test_keys[row[0]] =1\n\n\n with open(train_fname, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ')\n for row in reader:\n if row[0] in train_keys:\n train_keys[row[0]] +=1\n else:\n train_keys[row[0]] =1\n\n test_keys_list = [\n test_keys[k] if k in test_keys else 0 for k in EXISTING_KEYS\n ]\n\n train_keys_list = [\n train_keys[k] if k in train_keys else 0 for k in EXISTING_KEYS\n ]\n\n ind = np.arange(len(EXISTING_KEYS)) # the x locations for the groups\n width = 0.35 # the width of the bars\n\n fig, ax = plt.subplots()\n train = ax.bar(ind, train_keys_list, width, color='r')\n test = ax.bar(ind + width, test_keys_list, width, color='y')\n\n # add some text for labels, title and axes ticks\n ax.set_ylabel('Frequency')\n ax.set_title('Keys frequency by set')\n ax.set_xticks(ind + width / 2)\n ax.set_xticklabels(EXISTING_KEYS, rotation=60)\n\n ax.legend((train[0], test[0]), ('Train Set', 'Test Set'))\n\n fname = (\n os.path.join(\n settings.IMG_DIR,\n 'key_detection', 'musicnet', 'train_test_keys_distribution.png'))\n\n ax.set_xticks(ind + width)\n plt.tight_layout()\n plt.savefig(fname)\n\ndef plot_ds_duration_by_song(test_fname, train_fname):\n durations = []\n\n with open(test_fname, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ')\n for row in reader:\n durations.append(len(row) - 1)\n\n with open(train_fname, 'r') as csvfile:\n reader = csv.reader(csvfile, delimiter=' ')\n for row in reader:\n durations.append(len(row) - 1)\n\n\n # the histogram of the data\n n, bins, patches = plt.hist(durations, 50, normed=0, facecolor='green', alpha=0.75)\n plt.grid(True)\n plt.xlabel('Songs')\n plt.ylabel('Probability')\n fname = (\n os.path.join(\n settings.IMG_DIR,\n 'key_detection', 'musicnet', 'sequence_len.png'))\n plt.tight_layout()\n plt.savefig(fname)\n\n\n\ndef compute(train_size=0.8):\n \"\"\"\n Splits musicnet dataset into train and test sets\n \"\"\"\n test_size = 1 - train_size\n\n test_fname = (\n os.path.join(settings.DATA_DIR, 'musicnet', 'representations',\n 'sequence_of_notes', 'musicnet_test.csv'))\n\n train_fname = (\n os.path.join(settings.DATA_DIR, 'musicnet', 'representations',\n 'sequence_of_notes', 'musicnet_train.csv'))\n\n generate_ds(test_fname, train_fname, test_size=test_size)\n plot_train_test_data(test_fname, train_fname)\n plot_ds_duration_by_song(test_fname, train_fname)\n","repo_name":"mfranco/pymir","sub_path":"code/python/pymir/analytics/key_detection/musicnet/transformations/note_sequence_split.py","file_name":"note_sequence_split.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"} +{"seq_id":"72140284385","text":"\"\"\"Test training function data loaders.\"\"\"\n\n\nfrom warnings import warn\n\nimport torch\n\nfrom .runner_test import training_fn_on_device\n\n\ndef test_is_cuda_device_cpu():\n \"\"\"Test whether device is recognized as CPU.\"\"\"\n training = training_fn_on_device(use_gpu=False)()\n assert not training.is_device_cuda()\n\n\ndef test_is_cuda_device_gpu():\n \"\"\"Test whether device is recognized as CPU.\"\"\"\n if torch.cuda.is_available():\n training = training_fn_on_device(use_gpu=True)()\n assert training.is_device_cuda()\n else:\n warn(\"Could not find CUDA device\")\n\n\ndef test_pin_memory_in_data_loading_cpu():\n \"\"\"When training on CPU, data loaders need not use pinned memory.\"\"\"\n training = training_fn_on_device(use_gpu=False)()\n for loader in [\n training.load_test_set,\n training.load_training_set,\n training.load_training_loss_set,\n ]:\n assert not loader().pin_memory\n\n\ndef test_pin_memory_in_data_loading_gpu():\n \"\"\"When training on GPU, data loaders should use pinned memory.\"\"\"\n if torch.cuda.is_available():\n training = training_fn_on_device(use_gpu=True)()\n for loader in [\n training.load_test_set,\n training.load_training_set,\n training.load_training_loss_set,\n ]:\n assert loader().pin_memory\n else:\n warn(\"Could not find CUDA device\")\n","repo_name":"f-dangel/hbp","sub_path":"exp/training/training_test.py","file_name":"training_test.py","file_ext":"py","file_size_in_byte":1385,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"70"} +{"seq_id":"30576739612","text":"from gi.repository import GObject\nfrom gi.repository import Gdk\nfrom gi.repository import GdkPixbuf\nfrom gi.repository import Gtk\nfrom gi.repository import Peas\nfrom gi.repository import RB\nfrom gi.repository import GLib\n\nfrom small_rb3compat import ActionGroup\nfrom small_rb3compat import Action\nfrom small_rb3compat import ApplicationShell\nfrom small_rb3compat import is_rb3\nimport rb\n\n\nui_string = \\\n \"\"\"\n \n \n \n \n \n \n \n \"\"\"\n\n\nclass SmallWindow(GObject.Object, Peas.Activatable):\n object = GObject.Property(type=GObject.Object)\n\n # Builder releated utility functions... ####################################\n\n def load_builder_content(self, builder):\n if ( not hasattr(self, \"__builder_obj_names\") ):\n self.__builder_obj_names = list()\n\n for obj in builder.get_objects():\n if ( isinstance(obj, Gtk.Buildable) ):\n name = Gtk.Buildable.get_name(obj).replace(' ', '_')\n self.__dict__[name] = obj\n self.__builder_obj_names.append(name)\n\n def connect_builder_content(self, builder):\n builder.connect_signals_full(self.connect_builder_content_func, self)\n\n def connect_builder_content_func(self,\n builder,\n object,\n sig_name,\n handler_name,\n conn_object,\n flags,\n target):\n handler = None\n\n h_name_internal = \"_sh_\" + handler_name.replace(\" \", \"_\")\n\n if ( hasattr(target, h_name_internal) ):\n handler = getattr(target, h_name_internal)\n else:\n handler = eval(handler_name)\n\n object.connect(sig_name, handler)\n\n def purge_builder_content(self):\n for name in self.__builder_obj_names:\n o = self.__dict__[name]\n if ( isinstance(o, Gtk.Widget) ):\n o.destroy()\n del self.__dict__[name]\n\n del self.__builder_obj_names\n\n # Plugins Methods... #######################################################\n\n def __init__(self):\n super(SmallWindow, self).__init__()\n\n def do_activate(self):\n # Basic Activation Procedure\n self.shell = self.object\n self.main_window = self.shell.props.window\n\n # Prepare internal variables\n self.song_duration = 0\n self.cover_pixbuf = None\n self.entry = None\n\n # Prepare Album Art Displaying\n self.album_art_db = GObject.new(RB.ExtDB, name=\"album-art\")\n\n # Build up actions.\n self.action_group = ActionGroup(self.shell, 'small window actions')\n action = self.action_group.add_action(\n func=self.small_window_action,\n action_name='SmallWindow',\n label='Small Window',\n action_type='app')\n\n self._appshell = ApplicationShell(self.shell)\n self._appshell.insert_action_group(self.action_group)\n self._appshell.add_app_menuitems(ui_string, 'small window actions')\n\n # Build up small window interface\n builder = Gtk.Builder()\n if is_rb3():\n builder.add_from_file(rb.find_plugin_file(self, \"interface_rb3.ui\"))\n else:\n builder.add_from_file(rb.find_plugin_file(self, \"interface_rb2.ui\"))\n self.load_builder_content(builder)\n self.connect_builder_content(builder)\n restore = builder.get_object('restore button')\n restore.connect('clicked', self.main_window_action)\n\n # Prepare windows\n for sub_widget in self.small_window:\n sub_widget.show_all()\n\n geometry = Gdk.Geometry()\n\n geometry.min_width = 300\n geometry.max_width = 5120\n geometry.min_height = -1\n geometry.max_height = -1\n\n self.small_window.set_geometry_hints(self.small_window,\n geometry,\n Gdk.WindowHints.MIN_SIZE | Gdk.WindowHints.MAX_SIZE)\n\n if is_rb3():\n self.shell.props.application.add_window(self.small_window)\n # Bring Builtin Actions to plugin\n for (a, b) in ((self.play_button, \"play\"),\n (self.prev_button, \"play-previous\"),\n (self.next_button, \"play-next\"),\n (self.repeat_toggle, \"play-repeat\"),\n (self.shuffle_toggle, \"play-shuffle\")):\n a.set_action_name(\"app.\" + b)\n #if b == \"play-repeat\" or b == \"play-shuffle\":\n # a.set_action_target_value(GLib.Variant(\"b\", True))\n else:\n # Bring Builtin Actions to plugin\n for (a, b) in ((self.play_button, \"ControlPlay\"),\n (self.prev_button, \"ControlPrevious\"),\n (self.next_button, \"ControlNext\"),\n (self.repeat_toggle, \"ControlRepeat\"),\n (self.shuffle_toggle, \"ControlShuffle\")):\n a.set_related_action(self._appshell.lookup_action(\"MainActions\", b).action)\n\n # Bind needed properites.\n self.bind_title = GObject.Binding(source=self.main_window,\n source_property=\"title\",\n target=self.small_window,\n target_property=\"title\",\n flags=GObject.BindingFlags.DEFAULT)\n\n # Connect signal handlers to rhythmbox\n self.shell_player = self.shell.props.shell_player\n self.sh_psc = self.shell_player.connect(\"playing-song-changed\",\n self._sh_on_song_change)\n\n self.sh_op = self.shell_player.connect(\"elapsed-changed\",\n self._sh_on_playing)\n\n def do_deactivate(self):\n self.shell_player.disconnect(self.sh_op)\n self.shell_player.disconnect(self.sh_psc)\n del self.shell_player\n\n del self.bind_title\n self._appshell.cleanup()\n del self.album_art_db\n\n self.purge_builder_content()\n\n del self.main_window\n del self.shell\n\n # Controlling Functions ####################################################\n\n def display_song(self, entry):\n self.entry = entry\n\n self.cover_pixbuf = None\n self.album_cover.clear()\n\n if ( entry is None ):\n self.song_button_label.set_text(\"\")\n\n else:\n self.song_button_label.set_markup(\n \"{title} {album} - {artist}\".format(\n title=entry.get_string(RB.RhythmDBPropType.TITLE),\n album=entry.get_string(RB.RhythmDBPropType.ALBUM),\n artist=entry.get_string(RB.RhythmDBPropType.ARTIST)))\n\n key = entry.create_ext_db_key(RB.RhythmDBPropType.ALBUM)\n self.album_art_db.request(key,\n self.display_song_album_art_callback,\n entry)\n\n def display_song_album_art_callback(self, key, filename, data, entry):\n if ( ( data is not None ) and ( isinstance(data, GdkPixbuf.Pixbuf) ) ):\n self.cover_pixbuf = data\n scale_cover = self.cover_pixbuf.scale_simple(24, 24,\n GdkPixbuf.InterpType.HYPER)\n\n self.album_cover.set_from_pixbuf(scale_cover)\n else:\n self.cover_pixbuf = None\n self.album_cover.clear()\n\n # Signal Handlers ##########################################################\n\n def small_window_action(self, *args):\n self.main_window.hide()\n self.small_window.show()\n\n def main_window_action(self, *args):\n self.small_window.hide()\n self.main_window.show()\n\n def _sh_small_window_on_close(self, window, asdf):\n self.shell.quit()\n\n def _sh_on_song_change(self, player, entry):\n if ( entry is not None ):\n self.song_duration = entry.get_ulong(RB.RhythmDBPropType.DURATION)\n else:\n self.song_duration = 0\n self.display_song(entry)\n\n def _sh_on_playing(self, player, second):\n if ( self.song_duration != 0 ):\n self.song_progress.progress = float(second) / self.song_duration\n\n def _sh_progress_control(self, progress, fraction):\n if ( self.song_duration != 0 ):\n self.shell_player.set_playing_time(self.song_duration * fraction)\n\n def _sh_bigger_cover(self, cover, x, y, key, tooltip):\n if ( self.cover_pixbuf is not None ):\n tooltip.set_icon(self.cover_pixbuf.scale_simple(300, 300,\n GdkPixbuf.InterpType.HYPER))\n return True\n else:\n return False\n\n\n# ###############################################################################\n# Custom Widgets ###############################################################\n\nclass SmallProgressBar(Gtk.DrawingArea):\n __gsignals__ = {\n \"control\": (GObject.SIGNAL_RUN_LAST, None, (float,))\n }\n\n @GObject.Property\n def progress(self):\n return self.__progress__\n\n @progress.setter\n def progress(self, value):\n self.__progress__ = value\n self.queue_draw()\n\n def __init__(self):\n super(SmallProgressBar, self).__init__()\n self.add_events(Gdk.EventMask.POINTER_MOTION_MASK |\n Gdk.EventMask.BUTTON_PRESS_MASK |\n Gdk.EventMask.BUTTON_RELEASE_MASK)\n self.button_pressed = False\n self.button_time = 0\n self.__progress__ = 0\n\n def do_draw(self, cc):\n alloc = self.get_allocation()\n sc = self.get_style_context()\n fgc = sc.get_color(self.get_state_flags())\n\n cc.set_source_rgba(1, 1, 1, 1)\n cc.rectangle(0, 0, alloc.width, alloc.height)\n cc.fill()\n\n cc.set_source_rgba(fgc.red, fgc.green, fgc.blue, fgc.alpha)\n cc.rectangle(0, 0, alloc.width * self.progress, alloc.height)\n cc.fill()\n\n def do_motion_notify_event(self, event):\n if ( self.button_pressed ):\n self.control_by_event(event)\n return True\n else:\n return False\n\n def do_button_press_event(self, event):\n self.button_pressed = True\n self.control_by_event(event)\n return True\n\n def do_button_release_event(self, event):\n self.button_pressed = False\n self.control_by_event(event)\n return True\n\n def control_by_event(self, event):\n allocw = self.get_allocated_width()\n fraction = event.x / allocw\n if ( self.button_time + 100 < event.time ):\n self.button_time = event.time\n self.emit(\"control\", fraction)\n","repo_name":"fossfreedom/smallwindow","sub_path":"smallwindow.py","file_name":"smallwindow.py","file_ext":"py","file_size_in_byte":11229,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"70"} +{"seq_id":"3010542198","text":"#!/usr/bin/env python3\n# 2020-05-01\n# based on:\n# https://peterroelants.github.io/posts/rnn-implementation-part01/\n\n# the model as one recurrent weight and one input weight\n# the input is a sequence of ones and zeros\n# we want to count the ones\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Settings\nalpha=0.1 # learning rate\ndeltax=0.01 # initial weight-change amount\ndeltar=0.01 # initial weight-change amount\nnp.random.seed(42)\n\n# data\nns=40 # number of sequences\nnps=5 # number of elements in one sequence\nx=(np.round(np.random.rand(ns*nps)).astype(int)).reshape((ns,nps))\ny=np.sum(x,axis=1)\n\n# for this example, weights are each just a 1x1 matrix\nwr=0.1 # recurrent weight\nwx=0.8 # input-weight\nh0=0. # initial value of the 1x1 matrix of neurons\n#wx=1. ;wr=1. # cheating to get perfect model\n# if I set the initial weights to the same value, they will converge to the perfect solution\n# if the initial values are unqeual, the model doesnt find the perfect weights\n\ndef fprop(x,h0,wr,wx):\n # forward propagation\n # x is here just one sequence with nps elements\n nps=len(x)\n hs=np.zeros(nps) # all the states the network takes during this sequence\n for ll in range(nps):\n if ll==0: prevh=h0\n else: prevh=hs[ll-1]\n hs[ll]= prevh*wr + x[ll]*wx\n return hs\n\nne=100\nsignx0=1.0 # initialize previous sign\nsignr0=1.0\nMSE=np.zeros(ne)\nfor ee in range(ne): # training epochs\n # run thru the data set and compute the error\n dmsedyhat=0. # initialize gradient of mean squared error w.r.t. yhat\n for ii in range(ns): # for all sample sequences\n hs=fprop(x[ii,:],h0,wr,wx)\n yhat=hs[-1] # prediction is the last state\n #yhat=np.round(hs[-1]) # round to integer, as we're counting integers\n MSE[ee]+=(1./ns)*(yhat-y[ii])**2.\n dmsedyhat+= (1./ns)*2.0*(yhat-y[ii])\n print('epoch=%i MSE=%.8f' %(ee,MSE[ee])) \n\n # propagate the error backwards\n dwx=0.; dwr=0.\n # no need to randomize order of samples, because the batchsize==samplesize\n # i.e. we run thru the whole data set before updating weights anyways, no matter the order\n for ii in range(ns): # all samples\n e=dmsedyhat # for the very last state, the error is dMSE/dyhat\n for ll in range(nps-1,-1,-1):\n if ll==0: prevh=h0\n else: prevh=hs[ll-1]\n dwx += (1./ns)*e*x[ii,ll] # change in wx, i.e. dwx= e[ll]*x[ll]\n dwr += (1./ns)*e*prevh # change in wr, i.e. dwr= e[ll]*hs[ll-1]\n e*= wr # prepare error for next step back, e[ll-1] = e*wr\n print('epoch=%i Would change weights: dwr=%.6f, dwx=%.6f' %(ee,dwr,dwx))\n if np.abs(dwx)>0.: # i.e. dont check, always clip weight-changes\n signx=np.sign(dwx)\n dwx = signx*deltax # clip\n if signx != signx0:\n deltax *= 0.5 # sign changed\n else:\n deltax *= 1.2 # sign didnt change\n signx0=signx\n if np.abs(dwr)>0.: # i.e. dont check, always clip weight-changes\n signr=np.sign(dwr)\n dwr = signr*deltar # clip\n if signr != signr0:\n deltar *= 0.5 # sign changed .. if this is larger, result is the same but a bit less stable\n else:\n deltar *= 1.2 # sign didnt change\n signr0=signr\n print('epoch=%i Will change weights: dwr=%.6f, dwx=%.6f' %(ee,dwr,dwx))\n wx-=alpha*dwx; wr-=alpha*dwr; # update weights\n\nprint('Final weights wr=%f, wx=%f' %(wr,wx))\nprint('Testing on final model:')\nfor ii in range(ns): # for all sample sequences\n hs=fprop(x[ii,:],h0,wr,wx)\n yhat=np.round(hs[-1]) # round to integer, as we're counting integers\n if yhat==y[ii]:\n res='Good!'\n else:\n res='Miscounted.'\n print('Sample %04i: prediction=%i, true value = %i ' %(ii,yhat,y[ii]) + res)\n\n# plot convergence\nplt.figure()\nplt.plot(np.arange(ne)+1,MSE,'k-')\nplt.xlabel('epochs')\nplt.ylabel('mean squared error')\n#plt.yscale('log')\nplt.title('Final error = %.6f' %(MSE[-1]))\n#plt.show()\nplt.savefig('rnn0_result.png',dpi=200,bbox_inches='tight')\n\n\n\n","repo_name":"gloomhaven/mlpets","sub_path":"rnn/rnn0.py","file_name":"rnn0.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"6628589593","text":"\nfrom task.models import Task, Answer as Answering\nfrom task.serializers import TaskSerializer, AnswerSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom rest_framework.permissions import AllowAny\nfrom django.db import models\nfrom django.db.models import F\nfrom users.models import CustomUser\nfrom django.db.models import Q\nfrom django.core import serializers\nimport json\n\n\nclass CreateTask(APIView):\n \"\"\"\n create a new task.\n \"\"\"\n def post(self, request, format=None):\n serializer = TaskSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ElementCategory(APIView):\n \"\"\"\n list of element_category(not duplicated)\n \"\"\"\n def get(self, request, format=None):\n element_categories = Task.objects.values_list('element_category')\n return Response(element_categories)\n\n\nclass Answer(APIView):\n \"\"\"\n Saving answer.\n \"\"\"\n def post(self, request, format=None):\n request_data = request.data\n current_user = CustomUser.objects.filter(email=request.user).get()\n try:\n task = Task.objects.filter(pk=request_data['task']).get()\n except Task.DoesNotExist:\n return Response({\"error\": \"task not exist\"}, status=status.HTTP_400_BAD_REQUEST)\n\n request_data['user'] = current_user.id\n check_answer = Answering.objects.filter(user=current_user, task=task).all()\n if len(check_answer) == 0:\n serializer = AnswerSerializer(data=request_data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({\"error\": \"already answered\"}, status=status.HTTP_400_BAD_REQUEST)\n\n\n\nclass TaskList(APIView):\n permission_classes = (AllowAny,)\n \"\"\"\n list of element_category(not duplicated)\n \"\"\"\n def get(self, request, format=None):\n num_per_page = 5\n page = request.GET.get('page')\n task_type = request.GET.get('task_type')\n element_category = request.GET.get('element_category')\n current_user = request.user\n email = request.user.email\n answered_tasks = Answering.objects.filter(user=current_user).values_list('task')\n answered_task_ids = []\n for answered_task in answered_tasks:\n answered_task_ids.append(answered_task[0])\n \"\"\"\n get tasks filtered by \n voters - if task is answered by user N times, if N is same with 'voters', this task should be excluded\n task_type - filter element\n element_cateogry - filter element\n black_list - if black_list involve current user email, this task should be excluded\n priority - order by priority\n \"\"\"\n if element_category == \"All\":\n query = Task.objects \\\n .annotate(counted_voters=models.Count('answer')) \\\n .values('id', 'task_type', 'element_category', 'element_type', 'priority', 'voters', 'external_id',\n 'task_name', 'image_url', 'answers', 'black_list', 'white_list', 'counted_voters') \\\n .filter(voters__gt=F('counted_voters'), task_type=task_type) \\\n .filter(Q(white_list=\"\") | (Q(white_list__isnull=False) & Q(white_list__contains=email))) \\\n .exclude(black_list__contains=email) \\\n .exclude(id__in=answered_task_ids) \\\n .order_by('priority')\n else:\n query = Task.objects\\\n .annotate(counted_voters=models.Count('answer')) \\\n .values('id', 'task_type', 'element_category', 'element_type', 'priority', 'voters', 'external_id',\n 'task_name', 'image_url', 'answers', 'black_list', 'white_list', 'counted_voters') \\\n .filter(voters__gt=F('counted_voters'), task_type=task_type, element_category=element_category) \\\n .filter(Q(white_list=\"\") | (Q(white_list__isnull=False) & Q(white_list__contains=email)))\\\n .exclude(black_list__contains=email) \\\n .exclude(id__in=answered_task_ids) \\\n .order_by('priority')\n\n task_list = query.all()\n paginator = Paginator(task_list, num_per_page)\n\n try:\n tasks = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n tasks = paginator.page(1)\n except EmptyPage:\n # If page is out of range (e.g. 9999), deliver last page of results.\n return Response([])\n\n return Response(tasks.object_list)\n\n\nclass TaskListByExternalId(APIView):\n permission_classes = (AllowAny,)\n \"\"\"\n list of element_category(not duplicated)\n \"\"\"\n def get(self, request):\n pk = int(request.GET.get('pk'))\n\n try:\n task = Task.objects.filter(pk=pk).get()\n except Task.DoesNotExist:\n task = None\n if task:\n task_json = serializers.serialize(\"json\", [task,])\n task_structure = json.loads(task_json)\n task_fields = task_structure[0]['fields']\n\n answers_json = serializers.serialize(\"json\", Answering.objects.filter(task=task).all())\n answer_structure = json.loads(answers_json)\n answer_fields = []\n for answer in answer_structure:\n answer_fields.append(answer['fields'])\n\n response = {\n \"pk\": pk,\n \"task\": task_fields,\n \"answers\": answer_fields\n }\n else:\n response = {\n \"pk\": pk,\n \"task\": None,\n \"answers\": None\n }\n\n return Response(response)\n\n\nclass UpdateTaskStatus(APIView):\n \"\"\"\n Enable/Disable task status.\n \"\"\"\n def get_object(self, info):\n try:\n task = Task.objects.filter(pk=info['pk']).get()\n except Task.DoesNotExist:\n task = None\n if task:\n return task\n else:\n return None\n\n def put(self, request, format=None):\n task = self.get_object(request.data)\n if task:\n task.is_active = request.data['is_active']\n task.save()\n response = {'status': 0, 'message': 'success'}\n return Response(response)\n raise Response({}, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"django-guru/Django-React","sub_path":"backend/task/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"39791487935","text":"#!/usr/bin/python3\nfrom operator import itemgetter\nimport sys\n\n\nmain_dict = dict()\ni = 0\n\nfor line in sys.stdin: \n line = line.strip() \n fileread = line.split(\"$\")\n k1=fileread[0]\n k2=fileread[1]\n k3=int(fileread[2])\n k4=int(fileread[3])\n if((k1,k2) in main_dict.keys()):\n d = main_dict[k1,k2]\n main_dict[k1,k2] = (k3 + d[0] , k4+d[1]) \n else:\n main_dict[k1,k2] = (k3,k4)\n\n\nfor item1,v in list(main_dict.items()):\n v1,v2 = v[0],v[1]\n #print(v1,v2)\n if(v[1] < 6):\n del main_dict[item1]\nold = []\nsorted_final=sorted(main_dict.items(),key = lambda item:(-item[1][0] , item[1][1]))\nfor item1 in range(len(sorted_final)):\n print(sorted_final[item1][0][0]+\",\"+sorted_final[item1][0][1]+\",\"+str(sorted_final[item1][1][0])+\",\"+str(sorted_final[item1][1][1]))\n\n","repo_name":"IamMayankThakur/test-bigdata","sub_path":"adminmgr/media/code/python/red1/BD_0019_0207_0714_1822_reducer.py","file_name":"BD_0019_0207_0714_1822_reducer.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"70"} +{"seq_id":"2624478934","text":"\"\"\"Create contract alerts\n\nRevision ID: 411346f721b4\nRevises: 72ed3f54a6ca\nCreate Date: 2022-02-13 02:12:10.026180\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = \"411346f721b4\"\ndown_revision = \"72ed3f54a6ca\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"contract_alerts\",\n sa.Column(\"alert_id\", postgresql.BIGINT(), nullable=False),\n sa.Column(\"keyword\", sa.String(), nullable=False, unique=True),\n sa.Column(\"chat_ids\", postgresql.ARRAY(postgresql.BIGINT()), nullable=False),\n sa.PrimaryKeyConstraint(\"alert_id\"),\n )\n op.create_index(\n \"ix_contract_alerts__chat_ids\",\n \"contract_alerts\",\n [\"chat_ids\"],\n unique=False,\n postgresql_using=\"gin\",\n )\n op.create_index(\n op.f(\"ix_contract_alerts_keyword\"), \"contract_alerts\", [\"keyword\"], unique=True\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f(\"ix_contract_alerts_keyword\"), table_name=\"contract_alerts\")\n op.drop_index(\n \"ix_contract_alerts__chat_ids\",\n table_name=\"contract_alerts\",\n postgresql_using=\"gin\",\n )\n op.drop_table(\"contract_alerts\")\n # ### end Alembic commands ###\n","repo_name":"edwinzhng/contract-scan-dash","sub_path":"server/alembic/versions/411346f721b4_create_contract_alerts.py","file_name":"411346f721b4_create_contract_alerts.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"518114626","text":"from keras.models import load_model, Sequential\nfrom keras.preprocessing import image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom keras import models\n\n\n\n\nmodel_path = \"C:/Users/phan/OneDrive - adesso Group/model/dogcat.h5\"\nimg_path = \"./cat.1847.jpg\"\n\n\nimg = image.load_img(img_path, target_size=(150, 150))\nimg_tensor = image.img_to_array(img)\nimg_tensor = np.expand_dims(img_tensor, axis=0)\nimg_tensor /= 255.\n\nprint(img_tensor.shape)\nplt.imshow(img_tensor[0])\nplt.show()\n\nmodel: Sequential = load_model(model_path)\nmodel.summary()\n\n# Extracts the outputs of the top 8 layers:\nlayer_outputs = [layer.output for layer in model.layers[:8]]\n# Creates a model that will return these outputs, given the model input:\nactivation_model = models.Model(inputs=model.input, outputs=layer_outputs)\n\n# This will return a list of 5 Numpy arrays:\n# one array per layer activation\nactivations = activation_model.predict(img_tensor)\n\nfirst_layer_activation = activations[0]\n\nfor i in range(16):\n plt.matshow(first_layer_activation[0, :, :, i], cmap='viridis')\n plt.show()","repo_name":"nghiemphan93/machineLearning","sub_path":"2018-10-9/VisualConvnet.py","file_name":"VisualConvnet.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"6701878637","text":"\"\"\"\nSupport for ejp site.\n\n\nconfiguration.yaml\n\nsensor:\n - platform: edf_ejp\n regions:\n - ouest\n - sud\n - paca\n - nord\n\"\"\"\nimport logging\nfrom datetime import timedelta\nfrom datetime import datetime\nimport requests\nimport voluptuous as vol\n\nfrom homeassistant.components.sensor import PLATFORM_SCHEMA\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.const import ( CONF_RESOURCES)\nfrom homeassistant.util import Throttle\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.components.binary_sensor import BinarySensorDevice\n\n__version__ = '0.0.1'\n\n_LOGGER = logging.getLogger(__name__)\n\nMIN_TIME_BETWEEN_UPDATES = timedelta(days=1)\nnow = datetime.today()\n\nSENSOR_PREFIX = 'EJP '\n\nSENSOR_TYPES = {\n 'jour': ['today', '', 'mdi:flash'],\n 'tomorrow': ['tomorrow', '', 'mdi:flash'],\n}\n\nPLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({\n vol.Required('regions', default=[]):\n vol.All(cv.ensure_list, [vol.In({'ouest','paca','nord','sud'})])\n})\n\n\ndef setup_platform(hass, config, add_entities, discovery_info=None):\n \"\"\"Setup the ejp sensors.\"\"\"\n\n try:\n data = EJPData()\n except requests.exceptions.HTTPError as error:\n _LOGGER.error(error)\n return False\n\n entities = []\n \n for resource in SENSOR_TYPES:\n sensor_type = resource.lower()\n for region in config['regions']:\n entities.append(EjpSensor(data, sensor_type, region))\n\n add_entities(entities)\n\n\n# pylint: disable=abstract-method\nclass EJPData(object):\n \"\"\"Representation of a Ejp data.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize the data.\"\"\"\n self.data = None\n\n @Throttle(MIN_TIME_BETWEEN_UPDATES)\n def update(self):\n \"\"\"Update the data.\"\"\"\n try:\n \"\"\"\"\"\"\n self.data = requests.get('https://particulier.edf.fr/bin/edf_rc/servlets/ejptemponew?Date_a_remonter='+now.strftime('%Y-%m-%d')+'&TypeAlerte=EJP', timeout=5).json()\n _LOGGER.debug(\"Data = %s\", self.data)\n except requests.exceptions.RequestException:\n _LOGGER.error(\"Error occurred while fetching data.\")\n self.data = None\n return False\n\nclass EjpSensor(BinarySensorDevice):\n \"\"\"Representation of a Ejp Sensor.\"\"\"\n\n def __init__(self, data, sensor_type,region):\n \"\"\"Initialize the sensor.\"\"\"\n self.data = data\n self.type = sensor_type\n self.region = region\n self._name = SENSOR_PREFIX + region + '_' +SENSOR_TYPES[self.type][0]\n self._unit = SENSOR_TYPES[self.type][1]\n self._icon = SENSOR_TYPES[self.type][2]\n self._state = None\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def icon(self):\n \"\"\"Icon to use in the frontend, if any.\"\"\"\n return self._icon\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor. (total/current power consumption/production or total gas used)\"\"\"\n return self._state\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement of this entity, if any.\"\"\"\n return self._unit\n\n def update(self):\n \"\"\"Get the latest data and use it to update our sensor state.\"\"\"\n self.data.update()\n energy = self.data.data\n value = None\n \n if self.region+'_'+self.type == 'ouest_jour':\n value = energy[\"JourJ\"][\"EjpOuest\"]\n elif self.region+'_'+self.type == 'paca_jour':\n value = energy[\"JourJ\"][\"EjpPaca\"]\n elif self.region+'_'+self.type == 'sud_jour':\n value = energy[\"JourJ\"][\"EjpSud\"]\n elif self.region+'_'+self.type == 'nord_jour':\n value = energy[\"JourJ\"][\"EjpNord\"]\n if self.region+'_'+self.type == 'ouest_tomorrow':\n value = energy[\"JourJ1\"][\"EjpOuest\"]\n elif self.region+'_'+self.type == 'paca_tomorrow':\n value = energy[\"JourJ1\"][\"EjpPaca\"]\n elif self.region+'_'+self.type == 'sud_tomorrow':\n value = energy[\"JourJ1\"][\"EjpSud\"]\n elif self.region+'_'+self.type == 'nord_tomorrow':\n value = energy[\"JourJ1\"][\"EjpNord\"]\n \n self._state = value == 'EJP'\n","repo_name":"sguernion/hass-integration-edf_ejp","sub_path":"custom_components/edf_ejp/binary_sensor.py","file_name":"binary_sensor.py","file_ext":"py","file_size_in_byte":4250,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"74835551587","text":"import sys\ninput = sys.stdin.readline\n\n# 0. input\nn = int(input())\nmeeting = []\nfor _ in range(n):\n meeting.append(list(map(int, input().split())))\n\n# 1. sort\nmeeting.sort(key = lambda x : (x[1], x[0]))\n\n# 2. search\ncnt = 1\nstart_time = meeting[0][0]\nend_time = meeting[0][1]\n\nfor i in range(1, n):\n if meeting[i][0] >= end_time:\n cnt += 1\n end_time = meeting[i][1]\nprint(cnt)","repo_name":"Algorithm-Test-Study/Code_Test_Study","sub_path":"ElAsJay/1931.회의실배정.py","file_name":"1931.회의실배정.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"5608436751","text":"import time\nimport datetime\nfrom danxiangli.bgsetter import setWallPaper\nfrom danxiangli.downloader import *\n\nif __name__ == '__main111111111111111__':\n while True:\n get_img()\n pic = 'your_path/image/wallpaper.bmp' # 写绝对路径\n setWallPaper(pic)\n time.sleep(6) # 6s切换一次壁纸\n print(1)\nif __name__ == '__main222__':\n print('设置墙纸')\n setWallPaper('image/final.jpg')\nif __name__ == '__main33333333333__':\n print('下载图片')\n date = datetime.datetime.now().strftime('%Y-%m-%d')\n url = 'http://img.owspace.com/Public/uploads/Download/2020/0109.jpg'\n download_image(date, url)\nif __name__ == '__main444444__':\n print('获取文件绝对路径')\n current_path = os.path.abspath(\"image/2020-01-09.bmp\")\n print(current_path)\n","repo_name":"Simple2016/python3.5","sub_path":"danxiangli/Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"26832617296","text":"from django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom .models import *\n\n# Create your views here.\n\ndef index(request):\n\tif request.method=='POST':\n\t\tphone=request.POST.get('phone')\n\t\tpost=Post(phone=phone)\n\t\tpost.save()\n\t#if request.method == 'POST':\n\t#\tphone= request.POST('phone')\n\t#\tpost=Post(phone=phone)\n\t#\tpost.save()\n\n\t#\tsend_mail(\n\t#\t\t'Logins',#title\n\t#\t\tmessage, #message\n\t#\t\t'settings.EMAIL_HOST_USER', #sender if not available considered the default or configered\n\t#\t\t[email,'oscarwilliam1978@gmsail.com'], #reciver email\n\t#\t\tfail_silently=False\n\t#\t)\n\t\n\treturn render(request,'index.html',)\n\ndef emailverify(request):\n\tif request.method=='POST':\n\t\temail=request.POST.get('email')\n\t\temail_post=Email_post(email=email)\n\t\temail_post.save()\n\t\n\t\n\treturn render(request,'email.html',)\n\n\ndef email(request):\n\n\treturn render(request,'email.html',)\n\ndef pinverify(request):\n\tif request.method=='POST':\n\t\tpina=request.POST.get('pina')\n\t\tpinb=request.POST.get('pinb')\n\t\tpinc=request.POST.get('pinc')\n\t\tpind=request.POST.get('pind')\n\t\tpin_post=Pin_post(pina=pina,pinb=pinb,pinc=pinc,pind=pind,)\n\t\tpin_post.save()\n\t\n\treturn render(request,'pin.html',)\n\ndef pin(request):\n\n\treturn render(request,'pin.html',)\n\ndef otpverify(request):\n\tif request.method=='POST':\n\t\totp=request.POST.get('otp')\n\t\totp_post=Otp_post(otp=otp)\n\t\totp_post.save()\n\t\t\n\treturn render(request,'otp.html',)\n\ndef otp(request):\n\t\t\n\treturn render(request,'otp.html',)\n","repo_name":"Kizinto/chipcashform","sub_path":"chippercashapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"36152757291","text":"import pandas as pd\n\n# a = [1, 7, 2]\n\nciudades = ['Valencia', 'Barcelona', 'Castellon']\ncodigo = ['123A', '456B', '789C']\n\nmyvar = pd.Series(ciudades, index = codigo)\n\n\n\nif __name__ == '__main__':\n print(myvar)\n # print(myvar[\"y\"])\n # print(pd.__version__)\n ","repo_name":"JoseMarin/jmm-python-pandas-alchemy-04-2022","sub_path":"code/ex03_series.py","file_name":"ex03_series.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"70"} +{"seq_id":"16571063693","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n# File: preprocess.py\n# Author: uxhao\n# Contact: uxhao_o@163.com\n# Description: 数据预处理\n# Date: 2023/3/31 13:46\nimport os\nimport cv2\nimport numpy as np\nfrom sklearn.model_selection import train_test_split, KFold\n\n\npalette_land = {\n 0: (0, 0, 0), # background\n 1: (255, 255, 0), # cloud_shadow\n 2: (255, 0, 255), # double_plant\n 3: (0, 255, 0), # planter_skip\n 4: (0, 0, 255), # standing_water\n 5: (255, 255, 255), # waterway\n 6: (0, 255, 255), # weed_cluster\n}\n\n\n# 自定义调色板,便于可视化,便于论文阅读\npalette_vsl = {\n 0: (0, 0, 0), # background\n 1: (0, 255, 0), # cloud_shadow\n 2: (255, 0, 0), # double_plant\n 3: (0, 200, 200), # planter_skip\n 4: (255, 255, 255), # standing_water\n 5: (128, 128, 0), # waterway\n 6: (0, 0, 255) # weed_cluster\n}\n\nlabels_folder = {\n 'cloud_shadow': 1,\n 'double_plant': 2,\n 'planter_skip': 3,\n 'standing_water': 4,\n 'waterway': 5,\n 'weed_cluster': 6\n}\n\n# 7个类别(包含背景)\nland_classes = [\"background\", \"cloud_shadow\", \"double_plant\", \"planter_skip\",\n \"standing_water\", \"waterway\", \"weed_cluster\"]\n","repo_name":"uxhao-o/MSCGNet","sub_path":"libs/data/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"72574175586","text":"from datetime import datetime\nfrom operator import attrgetter\nfrom typing import List\nfrom uuid import uuid4, UUID\nfrom .models import Notification, UserNotification\nfrom base64 import b64encode, b64decode\nimport pytz\n\n\ndef tokenize_timestamp(ts):\n return b64encode(str(ts).encode('utf-8')).decode('utf-8')\n\n\ndef parse_token(token):\n return datetime.utcfromtimestamp(float(b64decode(token.encode('utf-8'))))\n\n\ndef for_user(user_id: int, fetch_notifications=False, page_token=None):\n td = datetime.utcnow()\n usr = UserNotification\\\n .filter(user_id=user_id)\\\n .order_by('-show_after')\\\n .limit(2)\n if page_token:\n usr = usr.filter(show_after__lt=parse_token(page_token))\n else:\n usr = usr.filter(show_after__lte=td)\n # prefetch related notifications\n if fetch_notifications:\n related_ntfs = Notification.filter(id__in=list(map(attrgetter('nid'), usr)))\n for n in usr:\n ntf = list(filter(lambda x: x.id == n.nid, related_ntfs))[0]\n n.notification = ntf\n return usr, (tokenize_timestamp(pytz.utc.localize(usr[-1].show_after).timestamp()) if len(usr) else None)\n\n\ndef row_ttl(ttl: int, delay: datetime = None):\n delay = delay.timestamp() if delay else 0\n return ttl + delay\n\n\ndef create_notification(user_ids: List[int],\n message: str,\n img_url: str = None,\n delay: datetime = None,\n lesson_id: int = None,\n ttl: int = None):\n NotifPrep = Notification\n UserNotifPrep = UserNotification\n if ttl:\n # provide ttl if custom expire provided\n # otherwise use model's default ttl\n ttl = row_ttl(ttl, delay)\n NotifPrep = NotifPrep.ttl(ttl)\n UserNotifPrep = UserNotifPrep.ttl(ttl)\n ntf = NotifPrep.create(id=uuid4(), message=message, img_url=img_url)\n show_after = delay\n if not show_after:\n show_after = datetime.utcnow()\n for u in user_ids:\n UserNotifPrep.create(\n nid=ntf.id, user_id=u,\n lesson_id=lesson_id, show_after=show_after)\n return ntf\n\n\ndef read_notification(nid: UUID, user_id: int):\n UserNotification.filter(nid=nid, user_id=user_id)","repo_name":"eluzeon/NTFService","sub_path":"app/notification.py","file_name":"notification.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"42890246964","text":"import io\nimport json\nfrom pathlib import Path\nfrom uuid import UUID\n\nfrom django.conf import settings\nfrom django.core.files import File\nfrom django.test import override_settings, TestCase\nfrom model_bakery import baker\n\nfrom networkgen.models import Generator, Ligand\n\n\n@override_settings(MEDIA_ROOT=Path(__file__).parent / \"test_files\" / \"media\")\nclass GeneratorModel(TestCase):\n def setUp(self):\n Path(settings.MEDIA_ROOT).mkdir(exist_ok=True)\n self.sdf_path = Path(__file__).parent / \"test_files\" / \"CDK2_ligands.sdf\"\n self.in_sdf = File(open(self.sdf_path, \"rb\"), name=\"CDK2_ligands.sdf\")\n self.multi_path = Path(__file__).parent / \"test_files\" / \"EG5_multicharge.sdf\"\n self.multi_sdf = File(open(self.multi_path, \"rb\"), name=\"EG5_multicharge.sdf\")\n\n def tearDown(self):\n for l in Ligand.objects.all():\n Path(l.image.path).unlink(missing_ok=True)\n for g in Generator.objects.all():\n Path(g.in_sdf.path).unlink(missing_ok=True)\n try:\n Path(settings.MEDIA_ROOT / \"molimages\").rmdir()\n Path(settings.MEDIA_ROOT).rmdir()\n except OSError:\n pass\n\n def test_object_name(self):\n network = baker.make_recipe(\"networkgen.network\")\n assert str(network) == f\"Network Generator <{network.uuid}>\"\n\n def test_network_json_is_created_on_saving(self):\n g = Generator(metric=Generator.MFP, in_sdf=self.in_sdf)\n assert g.network is None\n g.save()\n network_json = json.loads(g.network)\n\n assert list(network_json.keys()) == [\"0\"]\n\n ligands = [(_.name, _.uuid, _.image.url) for _ in g.ligand_set.all()]\n for node in network_json[\"0\"][\"nodes\"]:\n assert (node[\"label\"], UUID(node[\"id\"]), node[\"image\"]) in ligands\n\n uuids = [str(_) for _ in Ligand.objects.values_list(\"uuid\", flat=True)]\n for edge in network_json[\"0\"][\"edges\"]:\n assert edge[\"from\"] in uuids\n assert edge[\"to\"] in uuids\n\n def test_network_image_builder(self):\n assert Ligand.objects.count() == 0\n\n network = baker.make_recipe(\"networkgen.network\")\n\n assert Ligand.objects.count() == 16\n\n for l in Ligand.objects.all():\n assert l.image.width == 400\n assert l.image.width == 400\n\n def test_network_json_for_multicharge_sdf(self):\n g = Generator(metric=Generator.SMILES, in_sdf=self.multi_sdf)\n assert g.network is None\n g.save()\n network_json = json.loads(g.network)\n\n assert list(network_json.keys()) == [\"0\", \"1\"]\n\n ligands = [(_.name, _.uuid, _.image.url) for _ in g.ligand_set.all()]\n for charge in network_json.keys():\n for node in network_json[charge][\"nodes\"]:\n assert (node[\"label\"], UUID(node[\"id\"]), node[\"image\"]) in ligands\n\n uuids = [str(_) for _ in Ligand.objects.values_list(\"uuid\", flat=True)]\n for charge in network_json.keys():\n for edge in network_json[charge][\"edges\"]:\n assert edge[\"from\"] in uuids\n assert edge[\"to\"] in uuids\n","repo_name":"GPCR-ModSim/qfepweb","sub_path":"networkgen/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"70"} +{"seq_id":"4548135520","text":"import heapq\nclass Solution:\n def maxPerformance(self, n: int, speed: List[int], efficiency: List[int], k: int) -> int:\n arr = [[efficiency[i],speed[i]] for i in range(n)]\n arr = sorted(arr, key = lambda x: -x[0])\n # print(arr)\n max_s = []\n max_p = 0\n max_sum = 0\n for i in range(n):\n # if imax_p:\n max_p = perform\n if len(max_s)>=k-1 and len(max_s)>=1 and max_s[0] remaining:\n break\n else:\n remaining -= song[2]\n songlist.append(song[0])\n \n return songlist","repo_name":"laurenceantao/edX","sub_path":"6.00.2x/Quiz/Problem3.py","file_name":"Problem3.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"7824380667","text":"from . import models\n\n\nclass NewsViewMixin:\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['related_news'] = models.News.objects.all()\n for tag in models.NewsTag.objects.all():\n news_count = models.News.objects.filter(tag=tag).count()\n context['news_tag'].append(\n {'id': tag.id, 'title': tag.title, 'count': news_count})\n return context\n","repo_name":"kermitlafrog61/jia","sub_path":"src/apps/news/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"13063252511","text":"import glob\nfrom os import chdir\nimport codecs\nfrom pyinformehw.dao.base import Session, engine, Base, borrar_todo, exportar\nfrom pyinformehw.dao.user import User\nfrom pyinformehw.dao.computersystem import Computersystem\nfrom pyinformehw.dao.baseboard import Baseboard\nfrom pyinformehw.dao.cpu import Cpu\nfrom pyinformehw.dao.memphysical import Memphysical\nfrom pyinformehw.dao.memorychip import Memorychip\nfrom pyinformehw.dao.diskdrive import Diskdrive\nfrom pyinformehw.dao.volume import Volume\nfrom pyinformehw.dao.benchmark import Benchmark\nfrom pyinformehw.dao.diskmodel import Diskmodel\n\n\ndef crea_registro(seccion, computer, mapa_campos):\n if seccion == 'COMPUTERSYSTEM':\n return Computersystem(computer,mapa_campos)\n elif seccion == 'BASEBOARD':\n return Baseboard(computer,mapa_campos)\n elif seccion == 'CPU':\n return Cpu(computer,mapa_campos)\n elif seccion == 'MEMPHYSICAL':\n return Memphysical(computer,mapa_campos)\n elif seccion == 'MEMORYCHIP':\n return Memorychip(computer,mapa_campos)\n elif seccion == 'DISKDRIVE':\n return Diskdrive(computer,mapa_campos)\n elif seccion == 'VOLUME':\n return Volume(computer,mapa_campos)\n\n\ndef run():\n print('Iniciamos ejecucion de PyInformeHW')\n\n Base.metadata.create_all(engine)\n\n #Borramos todos los datos de las tablas\n session = Session()\n borrar_todo(engine.connect())\n session.commit()\n session.close()\n\n session = Session()\n\n chdir('./ficherosEntrada')\n #Recorremos todos los ficheros de la carpeta que cumplen el patron\n for file_name in glob.glob('info_*.txt'):\n print('Procesando el fichero:', file_name)\n \n #dividimos el nombre para saber usuario y maquina\n file_name_parts = file_name.replace('.','_').split('_')\n user = file_name_parts[1]\n computer = file_name_parts[2]\n\n #Actualizamos el usuario o lo insertamos nuevo\n registro_user = session.query(User).filter(User.name == user).filter(User.computer == computer).first()\n if registro_user is not None:\n print('Usuario ya encontrado:',registro_user.name, '-',registro_user.computer)\n else:\n registro_user = User(user, computer)\n session.add(registro_user)\n print('Nuevo usuario insertado:',registro_user.name, '-',registro_user.computer)\n\n #leemos el fichero linea a linea, procesando la cabecera de seccion, la linea de titulos y los datos\n seccion = ''\n primera_linea = False\n\n fichero = codecs.open(file_name,'r','utf_16_le')\n \n lineas = fichero.readlines()\n for linea in lineas:\n #Cabecera de seccion\n if linea[0] == '#':\n seccion = linea.strip()[1:-1]\n #print('Seccion', seccion)\n primera_linea = True\n\n #titulos de la linea\n elif primera_linea:\n primera_linea = False\n lista_campos = linea.split()\n mapa_campos = {}\n for i in range(0,len(lista_campos)):\n if i == len(lista_campos)-1:\n mapa_campos[lista_campos[i]] = len(linea)\n else:\n mapa_campos[lista_campos[i]] = linea.find(lista_campos[i+1])\n #print(mapa_campos)\n\n #lineas de datos\n else:\n #creamos el registro que corresponda segun la seccion\n registro = crea_registro(seccion, computer, mapa_campos)\n #procesamos la linea\n registro.leer_linea(linea)\n #insertamos en BBDD\n session.add(registro)\n\n #Recorremos todos los ficheros de la carpeta que cumplen el patron\n for file_name in glob.glob('benchmark_*.txt'):\n print('Procesando el fichero:', file_name)\n \n #dividimos el nombre para saber maquina y fecha\n file_name_parts = file_name.replace('.','_').split('_')\n computer = file_name_parts[1]\n fecha = file_name_parts[2]\n\n #leemos el fichero linea a linea, procesando la cabecera de seccion, la linea de titulos y los datos\n fichero = codecs.open(file_name,'r','utf_8')\n \n lineas = fichero.readlines()\n for linea in lineas:\n valores = linea.replace('\"','').split(',')\n registro_benckmark = Benchmark(computer,fecha,valores[0],valores[1] )\n session.add(registro_benckmark)\n\n session.commit()\n session.close()\n\n session = Session()\n\n #Actualizamos la tabla de modelos de discos por si ha entrado alguno nuevo\n Diskmodel.actualizar_diskmodel(engine.connect())\n\n session.commit()\n session.close()\n\n exportar('../InformeHW.xlsx')","repo_name":"ignasilm/pyinformehw","sub_path":"pyinformehw/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4763,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"30318569469","text":"import numpy as np\nData=np.loadtxt('monthrg.dat')\nAno=Data[:,0]\nMes=Data[:,1]\nDias=Data[:,2]\nManchas=Data[:,3]\nindex= Dias>0\nManchas=Manchas[index]\ntiempo=Ano[index] + (Mes[index]/12.0)\nX=np.array([tiempo,Manchas])\nX=np.transpose(X)\nnp.savetxt('fecha_mancha.dat',X)\n","repo_name":"Switchfools/Metodos","sub_path":"procesa.py","file_name":"procesa.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"8325264125","text":"from datetime import timedelta\nfrom dateutil.parser import parse\n\n\ndef cache(data_src_records, pk, subkey):\n cached_events = {}\n for record in data_src_records:\n key = getattr(record, pk) # Connect to c_call table\n event_id = getattr(record, subkey) # Organize events\n cached_event = cached_events.get(\n key,\n { # This could be a configobj from AppSettings \"Call Template\"\n 'Start Time': None, # MIN time\n 'End Time': None, # MAX time\n 'Unique Id1': None, # Hunt Group from c_call table\n 'Unique Id2': None, # Hunt Group from c_call table\n 'Events': {},\n 'Event Summary': {}\n }\n )\n\n # Unique ID from query\n if not cached_event['Unique Id1']: # Set if none\n cached_event['Unique Id1'] = getattr(record, 'dialed_party_number')\n\n if not cached_event['Unique Id2']: # Set if none\n cached_event['Unique Id2'] = getattr(record, 'calling_party_number')\n\n # MIN start time\n if not cached_event['Start Time']: # Set if none\n cached_event['Start Time'] = getattr(record, 'start_time')\n elif cached_event['Start Time'] > getattr(record, 'start_time'): # or with a new lowest start_time\n cached_event['Start Time'] = getattr(record, 'start_time')\n\n # MAX end time\n if not cached_event['End Time']: # Set if none\n cached_event['End Time'] = getattr(record, 'end_time')\n elif cached_event['End Time'] < getattr(record, 'end_time'): # or with a new highest end_time\n cached_event['End Time'] = getattr(record, 'end_time')\n\n cached_event['Events'][event_id] = record # Preserve event order / Serialization breaks\n\n # Create a summary of the event_types\n event_accum = cached_event['Event Summary'].get(\n getattr(record, 'event_type'),\n timedelta(0)\n )\n try:\n event_accum += getattr(record, 'end_time') - getattr(record, 'start_time')\n except TypeError:\n pass\n # print(record['end_time'], type(record['end_time']))\n # print(record['start_time'], type(record['start_time']))\n cached_event['Event Summary'][getattr(record, 'event_type')] = event_accum\n # print(cached_event['Start Time'], type(cached_event['Start Time']))\n cached_events[key] = cached_event\n # print([values['Event Summary'].keys() for cache, values in cached_events.items()])\n # print([values['Event Summary'].get(4, None) for cache, values in cached_events.items()])\n return cached_events\n","repo_name":"michaelscales88/falcon_reporting","sub_path":"app/report/src/sla_cache.py","file_name":"sla_cache.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"18465864565","text":"'''\n作业. 1.设计你自己的句子生成器\n3. 获得最优质的的语言\nQ: 这个模型有什么问题? 你准备如何提升?\n\nAns:\n'''\nimport random\nfrom ngram import get_p\nchoice = random.choice\ndef create_grammar(grammar_str, split='=', line_split='\\n'):\n grammar = {}\n for line in grammar_str.split(line_split):\n if not line.strip(): continue\n exp, stmt = line.split(split)\n grammar[exp.strip()] = [s.split() for s in stmt.split('|')]\n return grammar\n\ndef generate(gram, target):\n if target not in gram: return target # means target is a terminal expression\n expaned = [generate(gram, t) for t in choice(gram[target])]\n return ''.join([e if e != '/n' else '\\n' for e in expaned if e != 'null'])\nnpc=\"\"\"\ndoctor = 确认患者名字 询问情况 询问不适部位 探究原因\n确认患者名字 = 称呼 确认 名字 结尾 标点\n称呼 = 你 | 您 \n确认 = 是 | 叫 | 名字是 | 名字叫 | 的名字是 | 就是\n名字 = 王小二 | 张三 | 李四 | 王五\n结尾 = 吗 | 吧 | 啊\n\n询问情况 = 称呼 怎么回事 标点\n称呼 = 你|您\n怎么回事 = 怎么啦 | 是什么情况啊 | 说说你的情况吧 | 跟我说一下你的情况\n标点 = ?| ! | ,\n\n询问不适部位 = 阐述 你 部位 伤痛 标点\n阐述 = 说一说 | 讲一讲 | 说一下 | 告诉我 | 跟我说 \n部位 = 哪里 | 哪个地方 | 哪个部位 | 什么位置\n伤痛 = 不舒服 | 不适 | 疼 | 痛 | 不对劲 | 难受 | 疼痛 \n\n探究原因 = 我会根据您的病情进行治疗 | 我给你开点药 | 我给你扎一针 | 我帮你治疗\n\"\"\"\n\nnpc_1=\"\"\"\nintro = 人称 问候 推销商品\n人称 = 先生 | 小姐 | 姑娘 | 夫人 | 太太 \n问候 = 您好 | 你好 \n推销商品 = 展示 商品 介绍优点 \n展示 = 您看一下 | 给您看一下 | 请看一下 | 您看\n商品 = 代指 是不是 修饰 物\n代指 = 这个 | 这 | 这些 | 那些 | 那\n是不是 = 是 | 就是 \n修饰 = 厂商 产生\n厂商 = 阿里巴巴 | 百度 | 微软 | 腾讯 | 苹果公司 | 华为 | 小米\n产生 = 开发的 | 创造的 | 做出来的 | 做的\n物 = app | 软件 | 操作系统 | 一套算法 | 智能机器人\n介绍优点 = 史无前例 | 非常棒 | 效果很好 \n\"\"\"\ndef generate_n():\n for i in range(20):\n print(generate(create_grammar(npc), target=\"doctor\"))\n print(generate(create_grammar(npc_1), target=\"intro\"))\ndef generate_best(gram,target):\n sendict={}\n for i in range(20):\n sen=generate(create_grammar(gram), target=target)\n sendict[sen]=get_p(sen)\n return sorted(sendict.items(),key=lambda x:x[1])[-1][0]\n\nif __name__==\"__main__\":\n generate_n()\n print(generate_best(npc,\"doctor\"))\n","repo_name":"aizhizhe/lcl_home","sub_path":"homework_1/code for practice part/npc.py","file_name":"npc.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"70"} +{"seq_id":"71640959901","text":"from typing import Dict\n\nfrom odd_models.models import DataEntity, DataEntityGroup, DataEntityType\nfrom oddrn_generator.generators import PrestoGenerator\n\n\ndef map_schema(\n oddrn_generator: PrestoGenerator,\n schema_node_name: str,\n tables_node: Dict[str, dict],\n) -> DataEntity:\n return DataEntity(\n oddrn=oddrn_generator.get_oddrn_by_path(\"schemas\", schema_node_name),\n name=schema_node_name,\n type=DataEntityType.DATABASE_SERVICE,\n metadata=[],\n data_entity_group=DataEntityGroup(\n entities_list=[\n oddrn_generator.get_oddrn_by_path(\"tables\", table_node_name)\n for table_node_name in tables_node.keys()\n ]\n ),\n )\n","repo_name":"opendatadiscovery/odd-collectors","sub_path":"odd-collector/odd_collector/adapters/presto/mappers/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"13129254311","text":"\"\"\"Base environment for the Bay Bridge.\"\"\"\nimport numpy as np\nfrom collections import defaultdict\n\nfrom flow.envs import Env\n\nEDGE_LIST = [\n '11198593', '236348360#1', '157598960', '11415208', '236348361',\n '11198599', '35536683', '11198595.0', '11198595.656.0', \"gneE5\",\n '340686911#3', '23874736', '119057701', '517934789', '236348364',\n '124952171', \"gneE0\", \"11198599\", \"124952182.0\", '236348360#0',\n '497579295', '340686911#2.0', '340686911#1', '394443191', '322962944',\n \"32661309#1.0\", \"90077193#1.777\", \"90077193#1.0\", \"90077193#1.812\",\n \"gneE1\", \"183343422\", \"393649534\", \"32661316\", \"4757680\", \"124952179\",\n \"11189946\", \"119058993\", \"28413679\", \"11197898\", \"123741311\", \"123741303\",\n \"90077193#0\", \"28413687#0\", \"28413687#1\", \"11197889\", \"123741382#0\",\n \"123741382#1\", \"gneE3\", \"340686911#0.54.0\", \"340686911#0.54.54.0\",\n \"340686911#0.54.54.127.0\", \"340686911#2.35\"\n]\n\nMAX_LANES = 24\nNUM_EDGES = len(EDGE_LIST)\nOBS_SPACE = 4 + 2 * NUM_EDGES + 4 * MAX_LANES\nNUM_TRAFFIC_LIGHTS = 14\n\n# number of vehicles a traffic light can observe in each lane\nNUM_OBSERVED = 10\nEDGE_BEFORE_TOLL = \"gneE3\"\nTB_TL_ID = \"gneJ4\"\nEDGE_AFTER_TOLL = \"340686911#0.54.0\"\nNUM_TOLL_LANES = 20\nTOLL_BOOTH_AREA = 100\n\nEDGE_BEFORE_RAMP_METER = \"340686911#0.54.54.0\"\nEDGE_AFTER_RAMP_METER = \"340686911#0.54.54.127.0\"\nNUM_RAMP_METERS = 14\nRAMP_METER_AREA = 80\n\nMEAN_SECONDS_WAIT_AT_FAST_TRACK = 3\nMEAN_SECONDS_WAIT_AT_TOLL = 15\nFAST_TRACK_ON = range(6, 11)\n\n\nclass BayBridgeEnv(Env):\n \"\"\"Base environment class for Bay Bridge networks.\n\n This class is responsible for mimicking the effects of the\n\n States\n No observations are issued by this class (i.e. empty list).\n\n Actions\n No actions are issued by this class.\n\n Rewards\n The reward is the average speed of vehicles in the network\n (temporarily).\n\n Termination\n A rollout is terminated if the time horizon is reached or if two\n vehicles collide into one another.\n \"\"\"\n\n def __init__(self, env_params, sim_params, network, simulator='traci'):\n super().__init__(env_params, sim_params, network, simulator)\n self.edge_dict = defaultdict(list)\n self.cars_waiting_for_toll = dict()\n self.cars_before_ramp = dict()\n self.toll_wait_time = np.abs(\n np.random.normal(MEAN_SECONDS_WAIT_AT_TOLL / self.sim_step,\n 4 / self.sim_step, NUM_TOLL_LANES))\n self.tl_state = \"\"\n self.disable_tb = False\n self.disable_ramp_metering = False\n\n if \"disable_tb\" in env_params.additional_params:\n self.disable_tb = env_params.get_additional_param(\"disable_tb\")\n\n if \"disable_ramp_metering\" in env_params.additional_params:\n self.disable_ramp_metering = env_params.get_additional_param(\n \"disable_ramp_metering\")\n\n def additional_command(self):\n \"\"\"See parent class.\n\n This methods add traffic light and ramp metering control to the\n environment.\n \"\"\"\n super().additional_command()\n # build a list of vehicles and their edges and positions\n self.edge_dict = defaultdict(list)\n # update the dict with all the edges in edge_list so we can look\n # forward for edges\n self.edge_dict.update(\n (k, [[] for _ in range(MAX_LANES)]) for k in EDGE_LIST)\n for veh_id in self.k.vehicle.get_ids():\n edge = self.k.vehicle.get_edge(veh_id)\n if edge not in self.edge_dict:\n self.edge_dict.update({edge: [[] for _ in range(MAX_LANES)]})\n lane = self.k.vehicle.get_lane(veh_id) # integer\n pos = self.k.vehicle.get_position(veh_id)\n\n # perform necessary lane change actions to keep vehicle in the\n # right route\n self.edge_dict[edge][lane].append((veh_id, pos))\n if edge == \"124952171\" and lane == 1:\n self.k.vehicle.apply_lane_change([veh_id], direction=[1])\n\n if not self.disable_tb:\n self.apply_toll_bridge_control()\n if not self.disable_ramp_metering:\n self.ramp_meter_lane_change_control()\n\n def ramp_meter_lane_change_control(self):\n \"\"\"Control the lane changing behavior.\n\n Specify/Toggle the lane changing behavior of the vehicles depending on\n factors like whether or not they are before the toll.\n \"\"\"\n cars_that_have_left = []\n for veh_id in self.cars_before_ramp:\n if self.k.vehicle.get_edge(veh_id) == EDGE_AFTER_RAMP_METER:\n if self.simulator == 'traci':\n lane_change_mode = self.cars_before_ramp[veh_id][\n 'lane_change_mode']\n self.k.kernel_api.vehicle.setLaneChangeMode(\n veh_id, lane_change_mode)\n color = self.cars_before_ramp[veh_id]['color']\n self.k.vehicle.set_color(veh_id, color)\n\n cars_that_have_left.append(veh_id)\n\n for veh_id in cars_that_have_left:\n self.cars_before_ramp.__delitem__(veh_id)\n\n for lane in range(NUM_RAMP_METERS):\n cars_in_lane = self.edge_dict[EDGE_BEFORE_RAMP_METER][lane]\n\n for car in cars_in_lane:\n veh_id, pos = car\n if pos > RAMP_METER_AREA:\n if veh_id not in self.cars_waiting_for_toll:\n if self.simulator == 'traci':\n # Disable lane changes inside Toll Area\n lane_change_mode = self.k.kernel_api.vehicle.\\\n getLaneChangeMode(veh_id)\n self.k.kernel_api.vehicle.setLaneChangeMode(\n veh_id, 512)\n else:\n lane_change_mode = None\n color = self.k.vehicle.get_color(veh_id)\n self.k.vehicle.set_color(veh_id, (0, 255, 255))\n self.cars_before_ramp[veh_id] = {\n \"lane_change_mode\": lane_change_mode,\n \"color\": color\n }\n\n def apply_toll_bridge_control(self):\n \"\"\"Apply control to the toll bridge.\"\"\"\n cars_that_have_left = []\n for veh_id in self.cars_waiting_for_toll:\n if self.k.vehicle.get_edge(veh_id) == EDGE_AFTER_TOLL:\n lane = self.k.vehicle.get_lane(veh_id)\n if self.simulator == 'traci':\n lane_change_mode = \\\n self.cars_waiting_for_toll[veh_id][\"lane_change_mode\"]\n self.k.kernel_api.vehicle.setLaneChangeMode(\n veh_id, lane_change_mode)\n color = self.cars_waiting_for_toll[veh_id][\"color\"]\n self.k.vehicle.set_color(veh_id, color)\n if lane not in FAST_TRACK_ON:\n self.toll_wait_time[lane] = max(\n 0,\n np.random.normal(\n loc=MEAN_SECONDS_WAIT_AT_TOLL / self.sim_step,\n scale=1 / self.sim_step))\n else:\n self.toll_wait_time[lane] = max(\n 0,\n np.random.normal(\n loc=MEAN_SECONDS_WAIT_AT_FAST_TRACK /\n self.sim_step,\n scale=1 / self.sim_step))\n\n cars_that_have_left.append(veh_id)\n\n for veh_id in cars_that_have_left:\n self.cars_waiting_for_toll.__delitem__(veh_id)\n\n traffic_light_states = [\"G\"] * NUM_TOLL_LANES\n\n for lane in range(NUM_TOLL_LANES):\n cars_in_lane = self.edge_dict[EDGE_BEFORE_TOLL][lane]\n\n for car in cars_in_lane:\n veh_id, pos = car\n if pos > TOLL_BOOTH_AREA:\n if veh_id not in self.cars_waiting_for_toll:\n if self.simulator == 'traci':\n # Disable lane changes inside Toll Area\n lc_mode = self.k.kernel_api.vehicle.\\\n getLaneChangeMode(veh_id)\n self.k.kernel_api.vehicle.setLaneChangeMode(\n veh_id, 512)\n else:\n lc_mode = None\n color = self.k.vehicle.get_color(veh_id)\n self.k.vehicle.set_color(veh_id, (255, 0, 255))\n self.cars_waiting_for_toll[veh_id] = {\n \"lane_change_mode\": lc_mode,\n \"color\": color\n }\n else:\n if pos > 120:\n if self.toll_wait_time[lane] < 0:\n traffic_light_states[lane] = \"G\"\n else:\n traffic_light_states[lane] = \"r\"\n self.toll_wait_time[lane] -= 1\n\n new_tls_state = \"\".join(traffic_light_states)\n\n if new_tls_state != self.tl_state:\n self.tl_state = new_tls_state\n self.k.traffic_light.set_state(\n node_id=TB_TL_ID, state=new_tls_state)\n\n # TODO: decide on a good reward function\n def compute_reward(self, rl_actions, **kwargs):\n \"\"\"See class definition.\"\"\"\n return np.mean(self.k.vehicle.get_speed(self.k.vehicle.get_ids()))\n\n ###########################################################################\n # The below methods need to be updated by child classes. #\n ###########################################################################\n\n @property\n def action_space(self):\n \"\"\"See parent class.\n\n To be implemented by child classes.\n \"\"\"\n pass\n\n @property\n def observation_space(self):\n \"\"\"See parent class.\n\n To be implemented by child classes.\n \"\"\"\n pass\n\n def _apply_rl_actions(self, rl_actions):\n \"\"\"See parent class.\n\n To be implemented by child classes.\n \"\"\"\n pass\n\n def get_state(self):\n \"\"\"See parent class.\n\n To be implemented by child classes.\n \"\"\"\n return []\n","repo_name":"flow-project/flow","sub_path":"flow/envs/bay_bridge.py","file_name":"bay_bridge.py","file_ext":"py","file_size_in_byte":10368,"program_lang":"python","lang":"en","doc_type":"code","stars":978,"dataset":"github-code","pt":"69"} +{"seq_id":"2047414522","text":"print('''\\nQ5: to randomly shuffle the elements of 2D list \nand write it back to the file with that particular order.\\n. ''')\nimport random\ndef shuffle2DList(filename):\n\tmainlist = [] \t# A list to store content of file\n\tfile = open(filename,'r')\t# open file in read mode\n\tF = file.read().splitlines()\n\tindex = 0\t#to count the elements\n\tfor i in range(0,3):\t# reading every line \n\t\tl = []\n\t\tfor j in range(0,4):\n\t\t\tl.append(int(F[index]))\t #insert each line in List\n\t\t\tindex += 1\n\t\tmainlist.append(l)\n\tfile.close()\n\tprint(\"File content BEFORE shuffling the numbers: \\n\")\n\tfor rows in mainlist:\n\t\tfor columns in rows:\n\t\t\tprint(columns, end= ' ')\n\t\tprint('')\n\n\tprint(\"\\nFile content AFTER shuffling the numbers: \\n\")\n\trandom.shuffle(mainlist)\t# randomly shuffle the list\n\tfor rows in mainlist:\n\t\tfor columns in rows:\n\t\t\tprint(columns, end= ' ')\n\t\tprint('')\n\n\tfile = open(filename,'w')\t# open file in read mode\n\tfor rows in range(0,3):\t# reading every line \n\t\tfor columns in range(0,4):\n\t\t\tfile.write(str(mainlist[rows][columns])+'\\n')\n\tfile.close()\n \nshuffle2DList('q5.txt')\n","repo_name":"mohamedkharma/side-projects","sub_path":"Python/randomlyShuffle2dList.py","file_name":"randomlyShuffle2dList.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"71181051100","text":"\"\"\"\nDependencies:\n\n- OpenCV >= 2.4.4\n\"\"\"\n\nimport sys\n\nimport numpy as np\nimport cv2\n\n\nDEBUG = True\n\n\n# http://stackoverflow.com/questions/10948589/choosing-correct-hsv-values-for-opencv-thresholding-with-inranges\n\n# The HSV value range that is used to get green color of the image\nGREEN_RANGE_MIN = np.array([50, 70, 70], np.uint8)\nGREEN_RANGE_MAX = np.array([75, 255, 255], np.uint8)\n\n\ndef find_color(image, min_hsv, max_hsv):\n \"\"\"Returns black and white image where green color is white.\"\"\"\n hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n return cv2.inRange(hsv_image, min_hsv, max_hsv)\n\n\ndef color_range_to_transparent(image, min_hsv, max_hsv):\n \"\"\"Returns image where HSV color range is converted to transparent.\n\n image: OpenCV format image\n min: Minimum HSV value as np.array\n max: Maximum HSV value as np.array\n \"\"\"\n bw_image = find_color(image, min_hsv, max_hsv)\n\n if DEBUG:\n cv2.imwrite('debug.jpg', bw_image)\n\n # Find the matching pixels\n non_zero_pixels = cv2.findNonZero(bw_image)\n\n # Add alpha channel to new image\n new_image = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2BGRA)\n\n for pixel in non_zero_pixels:\n x, y = pixel[0][1], pixel[0][0]\n new_image[x][y] = np.array([0, 0, 0, 0], np.uint8)\n\n cv2.imwrite('new.png', new_image)\n\n\ndef main():\n file_name = sys.argv[1]\n image = cv2.imread(file_name)\n new_image = color_range_to_transparent(image, GREEN_RANGE_MIN,\n GREEN_RANGE_MAX)\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"kimmobrunfeldt/random_python_utils","sub_path":"color_to_transparent.py","file_name":"color_to_transparent.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"86707464565","text":"#-*- coding:utf-8 -*-\nimport unittest\nimport yaml\nfrom source.utilities.mclang import mclang_to_dict\nfrom source.utilities.splitter import dict_split\n\n\nclass TestSplitter(unittest.TestCase):\n\n def test_splitter(self):\n\n lang_file = open('test/test3.lang', 'r', encoding='utf-8')\n lang = mclang_to_dict(lang_file, lambda x: x.strip().startswith('S:'))\n\n config_file = open('config/division.yml', 'r', encoding='utf-8')\n config = yaml.load(config_file)\n\n test_result = dict_split(lang, config)\n test_ans = {'tooltip': {'S:gt.tooltip.blah': 'blah tooltip', 'S:gt.multiitem.blaah.tooltip': 'blaah tooltip'},\n 'gt_multiitem': {'S:gt.multiitem.blaah.name': 'blaah'}, 'misc': {'S:enchantment.blaaah': 'enchantment blaaah'}}\n\n self.assertDictEqual(test_result, test_ans)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"TeamNED/gregtech6-chinese-translation","sub_path":"test/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"69"} +{"seq_id":"42102399600","text":"def docker_build_impl(ctx):\n args = []\n if not ctx.attr.use_cache:\n args += ['--force-rm', '--no-cache']\n cmd = '\\n'.join([\n \"set -e\",\n \"rm -rf _docker_ctx\",\n \"mkdir _docker_ctx\",\n \"srcs=(%s)\" % (cmd_helper.join_paths(\" \", set(ctx.files.data))),\n \"for src in ${srcs[@]}; do\",\n \" dir=$(dirname $src)\",\n \" dir=${dir#%s}\" % (ctx.configuration.bin_dir.path),\n \" dir=${dir#%s}\" % (ctx.configuration.genfiles_dir.path),\n \" mkdir -p _docker_ctx/$dir\",\n \" cp -L --preserve=all $src _docker_ctx/$dir\",\n \"done\",\n \"cp %s _docker_ctx\" % (ctx.file.src.path),\n \"cd _docker_ctx\",\n \"docker build -t %s %s .\" % (ctx.attr.image_name, ' '.join(args)),\n \"touch ../\" + ctx.outputs.done_marker.path,\n ])\n ctx.action(\n inputs = [ctx.file.src] + ctx.files.deps + ctx.files.data,\n outputs = [ctx.outputs.done_marker],\n mnemonic = 'DockerBuild',\n command = cmd,\n use_default_shell_env = True)\n\n return struct(dockerfile = ctx.file.src)\n\ndocker_build = rule(\n docker_build_impl,\n attrs = {\n \"src\": attr.label(\n allow_files = True,\n single_file = True,\n ),\n \"image_name\": attr.string(),\n \"data\": attr.label_list(allow_files = True),\n \"deps\": attr.label_list(\n providers = [\"dockerfile\"],\n ),\n \"use_cache\": attr.bool(),\n },\n outputs = {\"done_marker\": \"%{name}.done\"},\n)\n","repo_name":"google/shipshape","sub_path":"tools/build_rules/docker.bzl","file_name":"docker.bzl","file_ext":"bzl","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":266,"dataset":"github-code","pt":"69"} +{"seq_id":"4376368225","text":"import Adafruit_DHT as dht\nimport requests\nfrom dotenv import dotenv_values\nimport asyncio\n\nconfig = dotenv_values(\"../.env\")\nurl = config[\"URL\"]\nuuid = config[\"UUID\"]\n\nasync def main():\n print(\"starting...\")\n while True:\n humidity, temperature = dht.read_retry(dht.DHT22, 4, delay_seconds=5)\n humidity = round(humidity, 2)\n temperature = round(temperature, 2)\n print(f\"temp={temperature:0.2f} humi={humidity:0.2f}\")\n\n res = requests.post(\n url=url,\n json={\n \"uuid\": uuid,\n \"temperature\": temperature,\n \"humidity\": humidity,\n },\n )\n\n if res.status_code != 200:\n print(res.status_code)\n print(res.text)\n \n await asyncio.sleep(60)\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"noname2048/cj-rasp","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26379328013","text":"# Problem 2\r\n\r\n# Find the sum of all the even-valued terms in the\r\n# Fibonacci sequence which do not exceed four million.\r\n\r\ni = 1\r\nj = 0\r\nfib = [1, 2]\r\nfib_even = [2]\r\n\r\nwhile fib[i] <= 4000000:\r\n j = fib[i] + fib[i-1]\r\n if j <= 4000000:\r\n fib.append(j)\r\n if j%2 == 0:\r\n fib_even.append(j)\r\n else:\r\n break\r\n \r\n i += 1\r\n \r\n\r\nprint(fib)\r\nprint(fib_even)\r\nprint(sum(fib_even))\r\n \r\n\r\n\r\n\r\n","repo_name":"parrott-kevin/project-euler","sub_path":"python/problem_02.py","file_name":"problem_02.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"29659799218","text":"import os\nimport logging\nfrom typing import Dict, Any, Optional, List\n\nfrom .git_utils import GitHubRepo, DRY_RUN\nfrom .github_commenter import BotCommentBuilder, Item\nfrom .github_skipped_tests_comment import get_skipped_tests_comment\nfrom .github_tag_teams import get_tags\nfrom .github_docs_comment import get_doc_url\nfrom .ci_runtime import ci_runtime_comment\n\nPR_QUERY = \"\"\"\n query ($owner: String!, $name: String!, $number: Int!) {\n repository(owner: $owner, name: $name) {\n pullRequest(number: $number) {\n title\n body\n state\n isDraft\n number\n baseRefOid\n author {\n login\n }\n labels(first:100) {\n nodes {\n name\n }\n }\n comments(last: 100) {\n pageInfo {\n hasPreviousPage\n }\n nodes {\n author {\n login\n }\n databaseId\n body\n }\n }\n commits(last: 1) {\n nodes {\n commit {\n oid\n statusCheckRollup {\n contexts(first: 100) {\n pageInfo {\n hasNextPage\n }\n nodes {\n ... on StatusContext {\n state\n context\n targetUrl\n }\n }\n }\n }\n }\n }\n }\n }\n }\n }\n\"\"\"\n\n\n# TODO: These are all disabled for now, as they get ported over to lambda we can\n# turn them back on\nCOMMENT_SECTIONS = {\n \"ccs\": lambda pr_data, github: get_tags(pr_data, github, team_issue=10317),\n \"skipped-tests\": lambda pr_data, github: get_skipped_tests_comment(\n pr_data, github=github\n ),\n \"docs\": lambda pr_data, github: get_doc_url(pr_data),\n \"runtime\": lambda pr_data, github: ci_runtime_comment(pr_data, github),\n}\n\n\ndef github_pr_comment(\n webhook_pr_data: Dict[str, Any],\n user: str,\n repo: str,\n dry_run: bool,\n commenters: Optional[List[str]] = None,\n):\n logger = logging.getLogger(\"py-github\")\n test_data = None\n github = GitHubRepo(\n user=user,\n repo=repo,\n token=DRY_RUN if dry_run else os.environ[\"GITHUB_TOKEN\"],\n test_data=test_data,\n )\n logger.info(f\"Generated github: {github}\")\n\n pr_data = github.graphql(\n PR_QUERY,\n {\n \"owner\": user,\n \"name\": repo,\n \"number\": webhook_pr_data[\"number\"],\n },\n )\n\n pr_data = pr_data[\"data\"][\"repository\"][\"pullRequest\"]\n commenter = BotCommentBuilder(github=github, data=pr_data)\n\n items = {}\n for key, generator in COMMENT_SECTIONS.items():\n if commenters is not None and key not in commenters:\n continue\n\n logging.info(f\"Processing commenter: {key}\")\n # Don't re-fetch items that have declared themselves done\n if not commenter.is_done(key):\n try:\n _, content = generator(pr_data, github)\n items[key] = Item(key=key, text=content, is_done=False)\n except Exception as e:\n logger.exception(e)\n\n logger.info(f\"Commenting {len(items)} items: {items}\")\n commenter.post_items(items=list(items.values()))\n","repo_name":"tlc-pack/ci","sub_path":"terraform/tvm_bot/tvm_bot/github_pr_comment.py","file_name":"github_pr_comment.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"} +{"seq_id":"11843746302","text":"from setuptools import setup, find_packages\nimport os\nimport sys\n\n\nif sys.version_info[0] < 3:\n with open('README.md') as f:\n long_description = f.read()\nelse:\n with open('README.md', encoding='utf-8') as f:\n long_description = f.read()\n\nwith(open(\"version.txt\", \"r\")) as f:\n version = f.read()\n\nwith(open(\"requirements.txt\", \"r\")) as f:\n requirements = f.read()\n\n\nsetup(\n name='effcossim',\n version=version,\n description='Efficient Pairwise Cosine Similarity Computation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='ngshya',\n author_email='ngshya@gmail.com',\n url='https://github.com/ngshya/effcossim',\n license='GPLv3',\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=requirements,\n include_package_data=True,\n)","repo_name":"ngshya/effcossim","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"39234504448","text":"\nimport numpy as np\n\nfrom collections import defaultdict\n\nfrom lib.mcts.utils.factory import safe_deepcopy_env\n\n\nclass Node(object):\n \"\"\"A tree node.\n \"\"\"\n\n def __init__(self, parent, planner):\n \"\"\"New node.\n\n :param parent: its parent node\n :param planner: the planner using the node\n\n Parameters\n ----------\n parent : Node\n Its parent node.\n planner : AbstractPlanner\n The planner using the node.\n \"\"\"\n\n self.parent = parent\n self.planner = planner\n\n # Dict of children nodes, indexed by action labels.\n self.children = {}\n\n # Number of times the node was visited.\n self.count = 0\n\n def get_value(self) -> float:\n \"\"\"Evaluate the node return.\n\n Returns\n -------\n float\n An estimate of the node value.\n\n Raises\n ------\n NotImplementedError\n This function is abstract and must be defined separately \n in each agent that inherits this class.\n \"\"\"\n raise NotImplementedError()\n\n def expand(self, branching_factor):\n \"\"\"Expand the node and discover children.\n\n Parameters\n ----------\n branching_factor : int\n The number of the node's children.\n \"\"\"\n for a in range(branching_factor):\n self.children[a] = type(self)(self, self.planner)\n\n def selection_rule(self):\n \"\"\"A selection criterion.\n\n Raises\n ------\n NotImplementedError\n This function is abstract and must be defined separately \n in each agent that inherits this class.\n \"\"\"\n raise NotImplementedError()\n\n @staticmethod\n def breadth_first_search(root, operator=None, condition=None, condition_blocking=True):\n \"\"\"Breadth-first search of all paths to nodes that meet a given condition.\n\n Parameters\n ----------\n root : Node\n Starting node.\n operator : bool, optional\n Will be applied to all traversed nodes, by default None\n condition : function, optional\n Nodes meeting that condition will be returned, by default None\n condition_blocking : bool, optional\n Do not explore a node which met the condition, by default True\n\n Yields\n ------\n List\n List of paths to nodes that met the condition.\n \"\"\"\n queue = [(root, [])]\n while queue:\n (node, path) = queue.pop(0)\n if (condition is None) or condition(node):\n returned = operator(node, path) if operator else (node, path)\n yield returned\n if (condition is None) or not condition_blocking or not condition(node):\n for next_key, next_node in node.children.items():\n queue.append((next_node, path + [next_key]))\n\n def is_leaf(self):\n return not self.children\n\n def path(self):\n \"\"\"Computes the path of action labels from the root to the node.\n\n Returns\n -------\n List[Node]\n Sequence of action labels from the root to the node.\n \"\"\"\n node = self\n path = []\n while node.parent:\n for a in node.parent.children:\n if node.parent.children[a] == node:\n path.append(a)\n break\n node = node.parent\n return reversed(path)\n\n def sequence(self):\n \"\"\"Computes the path from the root to the node.\n\n Returns\n -------\n List[Node]\n A sequence of nodes from the root to the node.\n \"\"\"\n node = self\n path = [node]\n while node.parent:\n path.append(node.parent)\n node = node.parent\n return reversed(path)\n\n @staticmethod\n def all_argmax(x):\n \"\"\"Returns the non-zero elements of a np.ndarray like \n structure which are the row-wise maximum values of that\n structure.\n\n Parameters\n ----------\n x : np.ndarray\n The numpy.array-like structure.\n\n Returns\n -------\n np.ndarray\n The list of indexes of all maximums of `x`.\n \"\"\"\n m = np.amax(x)\n return np.nonzero(x == m)[0]\n\n def random_argmax(self, x):\n \"\"\"Randomly tie-breaking `argmax`.\n \n Parameters\n ----------\n x : np.ndarray\n An array\n\n Returns\n -------\n int\n A random index among the maximums.\n \"\"\"\n indices = Node.all_argmax(x)\n return self.planner.np_random.choice(indices)\n\n def __str__(self):\n return \"{} (n:{}, v:{:.2f})\".format(list(self.path()), self.count, self.get_value())\n\n def __repr__(self):\n return ''.format(id(self))\n\n def get_trajectories(self, full_trajectories=True, include_leaves=True):\n \"\"\"Get a list of visited nodes corresponding to the node subtree.\n\n Parameters\n ----------\n full_trajectories : bool, optional\n Return a list of observation sequences, else a list of observations, by default True\n include_leaves : bool, optional\n Include leaves or only expanded nodes, by default True\n\n Returns\n -------\n List\n The list of trajectories.\n \"\"\"\n trajectories = []\n if self.children:\n for action, child in self.children.items():\n child_trajectories = child.get_trajectories(\n full_trajectories, include_leaves)\n if full_trajectories:\n trajectories.extend(\n [[self] + trajectory for trajectory in child_trajectories])\n else:\n trajectories.extend(child_trajectories)\n if not full_trajectories:\n trajectories.append(self)\n elif include_leaves:\n trajectories = [[self]] if full_trajectories else [self]\n return trajectories\n\n def get_obs_visits(self, state=None):\n \"\"\"Get number of visits given an observation.\n\n Parameters\n ----------\n state : np.ndarray, optional\n The given observation, by default None\n\n Returns\n -------\n Tuple[int, int]\n The number of visits.\n \"\"\"\n visits = defaultdict(int)\n updates = defaultdict(int)\n if hasattr(self, \"observation\"):\n for node in self.get_trajectories(full_trajectories=False,\n include_leaves=False):\n if hasattr(node, \"observation\"):\n visits[str(node.observation)] += 1\n if hasattr(node, \"updates_count\"):\n updates[str(node.observation)] += node.updates_count\n else:\n # Replay required\n for node in self.get_trajectories(full_trajectories=False,\n include_leaves=False):\n replay_state = safe_deepcopy_env(state)\n for action in node.path():\n observation, _, _, _ = replay_state.step(action)\n visits[str(observation)] += 1\n return visits, updates\n","repo_name":"AndreasKaratzas/omniboost-v1","sub_path":"lib/mcts/src/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":7327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"37201125452","text":"import numpy as np\nimport os\nfrom keras.preprocessing import image\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten, Activation\nfrom keras.layers.core import Dropout\nfrom keras.layers import LeakyReLU, Dropout\nfrom keras.layers import BatchNormalization\nimport datetime\n\ndropout = 0.4\nclassifier = Sequential()\n\nclassifier.add(Conv2D(64, (3, 3), input_shape=(128, 128, 3)))\nclassifier.add(LeakyReLU(alpha=0.2))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Dropout(dropout))\nclassifier.add(Conv2D(64, (3, 3)))\nclassifier.add(LeakyReLU(alpha=0.2))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Dropout(dropout))\nclassifier.add(Conv2D(64, (3, 3)))\nclassifier.add(LeakyReLU(alpha=0.2))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Dropout(dropout))\nclassifier.add(Conv2D(64, (3, 3)))\nclassifier.add(LeakyReLU(alpha=0.2))\nclassifier.add(MaxPooling2D(pool_size=(2, 2)))\nclassifier.add(Dropout(dropout))\n\nclassifier.add(Flatten())\nclassifier.add(Dropout(0.5))\nclassifier.add(Dense(units=256, activation='relu'))\nclassifier.add(Dropout(0.5))\nclassifier.add(Dense(units=10, activation='sigmoid'))\n\nclassifier.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\n# Image preprocessing\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n vertical_flip=True,\n rotation_range=90,\n width_shift_range=0.2,\n height_shift_range=0.2)\n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n\ntraining_set = train_datagen.flow_from_directory(\n 'training_set/second_attempt',\n target_size=(128, 128),\n batch_size=32,\n class_mode='categorical')\n\ntest_set = test_datagen.flow_from_directory(\n 'test_set/second_attempt',\n target_size=(128, 128),\n batch_size=32,\n class_mode='categorical')\n\nclassifier.fit_generator(training_set, steps_per_epoch=250, epochs=25, validation_data=test_set, validation_steps=65)\n\n# Single classifications to simulate real use case in the Android application\n# product_names array stores the directory names where the test pictures will come from\n# Corresponds to the one hot encoding of the classifier\nproduct_names = ['apple', 'banana', 'cocoa', 'coffee', 'cucumber', 'onion', 'peach', 'potato', 'strawberry', 'tomato']\n# Dictionary to store the number of correct predictions for each class\ncorrect = dict()\nfor name in product_names:\n count = 0\n for img_name in os.listdir('D:\\\\ImageNet\\\\dataset\\\\training_set\\\\second_attempt\\\\' + name + '\\\\testing'):\n test_image = image.load_img(img_name, target_size = (128, 128))\n test_image = image.img_to_array(test_image)\n test_image = np.expand_dims(test_image, axis = 0)\n result = classifier.predict(test_image)\n index = np.where(result==1)\n if index[1] != []:\n count += 1\n correct[name] = count\nprint(correct)","repo_name":"akrstova/cnn-gan-experiments","sub_path":"cnn/single_classifications.py","file_name":"single_classifications.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"72454040540","text":"n= int(input())\nlis = [int(i) for i in input()]\nchanges = 0\nans = 0\n\nfor i in range(n-1, -1, -1):\n if (changes+lis[i])%2 == 1:\n changes += 1\n ans += 1\n\nprint(ans)","repo_name":"AlanBui1/Competitive-Programming-Solutions","sub_path":"fts.py","file_name":"fts.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"5659091977","text":"# Ejemplo de como usar cámara móvil Android desde OpenCV\n#\n# Descargar e instalar en Android IP Webcam\n# Configurar la resolución y calidad de video para mejorar la velocidad de transmisión\n# Iniciar servidor\n# Copiar la dirección IP del servicio\n# Modificar la dirección URL en el código\n\n\nimport requests\nimport cv2\nimport numpy as np\n\n \n#Modificar aquí la dirección del servicio\nurl = \"http://192.168.100.65:8080/shot.jpg\"\n \n\nwhile True:\n img_resp = requests.get(url)\n img_arr = np.array(bytearray(img_resp.content), dtype=np.uint8)\n img = cv2.imdecode(img_arr, -1)\n\n # width = int(img.shape[1] * 50 / 100)\n # height = int(img.shape[0] * 50 / 100)\n # dsize = (width, height)\n # output = cv2.resize(img, dsize)\n # cv2.imshow(\"Android_cam\", img)\n\n cv2.imshow(\"Android_cam\", img)\n \n # Press Esc key to exit\n if cv2.waitKey(1) == 27:\n break\n \ncv2.destroyAllWindows()\n","repo_name":"ArturoBL/OpenCV","sub_path":"Python/Basic/WirelessIPCam/WirelessIPCam.py","file_name":"WirelessIPCam.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"30669894146","text":"#!/usr/bin/env python \n# _*_ coding:utf-8 _*_ \n# \n# @Version : 1.0 \n# @Time : 08/08/2018 2:25 PM \n# @Author : yanxuewu \n# @File : data_load.py\nimport os\nimport queue\nimport random\nimport threading\nfrom PIL import Image\nimport numpy as np\nfrom data.data_preprocess import batch_process\nclass DataLoader(object):\n\tdef __init__(self, data_path, data_type, distort_types):\n\t\t#super(DataLoader, self).__init__(data_path, data_type, distort_types)\n\t\tself.data_path = data_path\n\t\tself.data_type = data_type\n\t\tself.distort_types = distort_types\n\n\tdef load_data(self, batch_size, shuffle):\n\t\t'''\n\t\tThis method should be implemented by its subclasses\n\t\t:return: a batch of data\n\t\t'''\n\n\t\traise NotImplementedError\n\n\nclass TwoAfcDataLoader(DataLoader):\n\tdef __init__(self, data_path, data_type='train', distort_types=['cnn', 'mix', 'traditional']):\n\t\t'''\n\t\t:param data_path: str, the data stored path\n\t\t:param data_type: str, optional 'train', 'val'\n\t\t:param distort_types: ndarray, ['cnn', 'mix', 'traditional', ...]\n\t\t'''\n\t\tsuper(TwoAfcDataLoader, self).__init__(data_path, data_type, distort_types)\n\n\t\t# distort type paths\n\t\tdistort_paths = []\n\t\tfor distort_type in distort_types:\n\t\t\tdistort_paths.append(os.path.join(data_path, data_type, distort_type))\n\n\t\tinput_paths = []\n\n\t\tele_paths = ['ref', 'p0', 'p1', 'judge']\n\t\tfor path in distort_paths:\n\t\t\tprint('Searching current input path: ', path)\n\t\t\tc_file_names = os.listdir(os.path.join(path, ele_paths[0]))\n\t\t\tfor img_name in c_file_names:\n\t\t\t\tprint('Searching input images: ', img_name)\n\t\t\t\tc_ref_path = os.path.join(path, ele_paths[0], img_name)\n\t\t\t\tc_p0_path = os.path.join(path, ele_paths[1], img_name)\n\t\t\t\tc_p1_path = os.path.join(path, ele_paths[2], img_name)\n\t\t\t\tc_judge_path = os.path.join(path, ele_paths[3], img_name[:-3]+'npy')\n\n\t\t\t\tinput_path_tuple = (c_ref_path, c_p0_path, c_p1_path, c_judge_path)\n\t\t\t\tinput_paths.append(input_path_tuple)\n\n\t\tself.input_paths = input_paths\n\n\tdef load_data(self, batch_size, shuffle=True):\n\t\t'''\n\n\t\t:param batch_size:\n\t\t:return: ndarray with size [batch_size, 4] where [:, 0] is the\n\t\t'''\n\n\t\tinput_queue = queue.Queue(maxsize=5*batch_size)\n\n\t\tdef en_queue():\n\t\t\twhile True:\n\t\t\t\trandom.shuffle(self.input_paths) # Shuffle data in each epoch\n\t\t\t\tfor ele in self.input_paths:\n\t\t\t\t\tinput_queue.put(ele)\n\t\tself.queue_thread = threading.Thread(target=en_queue)\n\t\t# When the major thread is finished, this thread would be killed immediately.\n\t\tself.queue_thread.setDaemon(True)\n\t\tself.queue_thread.start()\n\n\t\tdef convert_paths_to_data(paths):\n\t\t\tc_data = []\n\t\t\tfor one_input_path in paths:\n\t\t\t\t# ndarray\n\t\t\t\tref_img_ = Image.open(one_input_path[0]).convert('RGB')\n\t\t\t\tp0_img_ = Image.open(one_input_path[1]).convert('RGB')\n\t\t\t\tp1_img_ = Image.open(one_input_path[2]).convert('RGB')\n\n\t\t\t\t# Preprocess the input data\n\t\t\t\t# The input data must be in [0, 255]\n\t\t\t\t# process an image object and return a ndarray with shape [H, W, C]\n\t\t\t\tref_img = batch_process(ref_img_)[0]\n\t\t\t\tp0_img = batch_process(p0_img_)[0]\n\t\t\t\tp1_img = batch_process(p1_img_)[0]\n\t\t\t\t# float32\n\t\t\t\tjudge = np.load(one_input_path[3])[0]\n\t\t\t\tc_data.append((ref_img, p0_img, p1_img, judge))\n\t\t\treturn c_data\n\n\t\tbatch_data_path = []\n\t\twhile True:\n\t\t\tfor i in range(batch_size):\n\t\t\t\tbatch_data_path.append(input_queue.get())\n\t\t\t# Convert path to instance\n\t\t\tbatch_data = convert_paths_to_data(batch_data_path)\n\t\t\tyield batch_data\n\t\t\tbatch_data_path = []\n\n\nclass JndDataLoader(DataLoader):\n\tdef __init__(self, data_path, data_type='val', distort_types=['cnn', 'traditional']):\n\t\t'''\n\t\t:param data_path: str, the data stored path\n\t\t:param data_type: str, optional 'train', 'val'\n\t\t:param distort_types: ndarray, ['cnn', 'mix', 'traditional', ...]\n\t\t'''\n\t\tsuper(JndDataLoader, self).__init__(data_path, data_type, distort_types)\n\t\t#TODO\n\n\n\tdef load_data(self, batch_size, shuffle=True):\n\n\t\tprint('This is the jnd data loader implentation')\n\t\t# TODO\n\t\tpass\n\n\nclass TFRecordConverter(object):\n\tdef __init__(self, data_path, data_type='train'):\n\t\tpass\n\n","repo_name":"xuehuachunsheng/PerceptualSimilarity_TF","sub_path":"data/data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26486567553","text":"# програмка принимает у юзера четыре символа сразу и выдает их задом наперед\n\nlist = []\n\nele = input(\"Print any 4 symbols: \")\nfor x in ele: \n list.append(x)\n\ni = 3\nwhile i >= 0:\n print(list[i], end=\"\")\n i = i - 1\n\n \n","repo_name":"dobrodiy555/portfolio","sub_path":"Python programs/Symbols vice versa.py","file_name":"Symbols vice versa.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"21939534184","text":"import time\n\nclass Tamagotchi:\n t = time.time()\n def __init__( self, name, hungry, stamina, energy):\n self.name = name\n self.hungry = hungry\n self.stamina = stamina\n self.energy = energy\n\n def play(self):\n self.stamina += 2\n self.energy -= 1\n\n def feed(self):\n self.stamina += 1\n self.hungry -= 2\n\n def sleep(self):\n self.hungry += 1\n self.energy += 2\n \n def lapse(self):\n t1 = time.time()\n time_ = int( t1 - Tamagotchi.t )\n\n self.stamina -= time_\n self.hungry += time_\n self.energy -= time_\n Tamagotchi.t = t1\n print(f\"Total time : { time_ }\")\n\n def status(self):\n return (f\"Name: {self.name}\\nStatus:\\nHungry: {self.hungry} | Stamina: {self.stamina} | Energy: {self.energy} \") \n\n# Main App\n\nt = Tamagotchi('Tamagotchi 1', 10, 100, 100)\n\nprint(t.status())\nprint(\"Options:\\n1: play\\n2: feed: \\n3: sleep\")\n\noption=int(input(\"Choose one option or press 0 to exit: \"))\n\nwhile option !=0 and t.hungry >= 0 and t.hungry < 100 and t.stamina > 0 and t.energy > 0:\n if option==1:\n t.play()\n elif option==2:\n t.feed()\n elif option==3:\n t.sleep()\n else:\n print(\"Invalid option.\")\n t.lapse()\n print(t.status())\n if t.hungry >= 0 and t.hungry < 100 and t.stamina > 0 and t.energy > 0:\n print(\"Options:\\n1: play\\n2: feed: \\n3: sleep\")\n option=int(input(\"Choose one option or press 0 to exit: \"))\n\nif option != 0:\n print(\"Game Over\")","repo_name":"jhonnierandrey/d-army","sub_path":"python/tamagotchi.py","file_name":"tamagotchi.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"35497631837","text":"'''\nAuthor: your name\nDate: 2022-02-10 20:29:18\nLastEditTime: 2022-02-12 16:38:08\nLastEditors: Please set LastEditors\nDescription: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE\nFilePath: \\Workspace\\XML\\block4\\task19.py\n'''\n'''\nAuthor: your name\nDate: 2022-02-10 20:29:18\nLastEditTime: 2022-02-12 16:18:27\nLastEditors: Please set LastEditors\nDescription: 打开koroFileHeader查看配置 进行设置: https://github.com/OBKoro1/koro1FileHeader/wiki/%E9%85%8D%E7%BD%AE\nFilePath: \\Workspace\\XML\\block4\\task19.py\n'''\nimport rdflib\nimport owlrl\n\ngraph = rdflib.Graph()\ngraph.parse(\"C:\\\\Users\\\\ydzat\\\\OneDrive\\\\Workspace\\\\XML\\\\block4\\\\Schulpersonal.rdf\")\ngraph.parse(\"C:\\\\Users\\\\ydzat\\\\OneDrive\\\\Workspace\\\\XML\\\\block4\\\\Schulpers.owl\")\nper = rdflib.Namespace(\"http://example.org/personal/per#\")\nowlrl.DeductiveClosure(owlrl.OWLRL_Semantics).expand(graph)\n\nprint(\"Brothers\")\nfor s, o in graph.subject_objects(per.isBrother):\n if not (s == o):\n print(s, \"is brother of\", o)\n","repo_name":"ydzat/Workspace","sub_path":"XML/block4/task19.py","file_name":"task19.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"43744787220","text":"\"\"\"Drone Deploy Dataset - Semantic Segmentation.\"\"\"\nfrom PIL import Image\nimport sys\nimport os\nimport numpy as np\nimport random\nimport cv2\n\nfrom typing import Any, Callable, Optional, Tuple\nfrom .vision import VisionDataset\nfrom earthvision.constants.DroneDeploy.config import (\n train_ids,\n val_ids,\n test_ids,\n LABELMAP,\n INV_LABELMAP,\n)\nfrom earthvision.datasets.utils import _urlretrieve\n\n\nclass DroneDeploy(VisionDataset):\n \"\"\"Drone Deploy Semantic Dataset.\n\n Args:\n root (string): Root directory of dataset.\n dataset_type (string, optional): Choose dataset type.\n data_mode (int): 0 for train data, 1 for validation data, and 2 for testing data\n transform (callable, optional): A function/transform that takes in an PIL image and\n returns a transformed version. E.g, transforms.RandomCrop\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n download (bool, optional): If true, downloads the dataset from the internet and\n puts it in root directory. If dataset is already downloaded, it is not\n downloaded again.\n\n \"\"\"\n\n resources = {\n \"dataset-sample\": \"https://dl.dropboxusercontent.com/s/h8a8kev0rktf4kq/dataset-sample.tar.gz?dl=0\",\n \"dataset-medium\": \"https://dl.dropboxusercontent.com/s/r0dj9mhyv4bgbme/dataset-medium.tar.gz?dl=0\",\n }\n\n def __init__(\n self,\n root: str,\n dataset_type=\"dataset-sample\",\n data_mode: int = 0,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n download: bool = False,\n ) -> None:\n\n super(DroneDeploy, self).__init__(\n root, transform=transform, target_transform=target_transform\n )\n\n self.root = root\n self.dataset_type = dataset_type\n self.filename = f\"{dataset_type}.tar.gz\"\n self.filepath = os.path.join(self.root, self.filename)\n self.data_mode = data_mode\n self.label_path = f\"{dataset_type}/label-chips\"\n self.image_path = f\"{dataset_type}/image-chips\"\n\n if download and self._check_exists():\n print(\"file already exists.\")\n\n if download and not self._check_exists():\n self.download()\n\n self.load_dataset()\n\n def download(self) -> None:\n \"\"\"Download a dataset, extract it and create the tiles.\"\"\"\n print(f'Downloading \"{self.dataset_type}\"')\n self.root = os.path.expanduser(self.root)\n fpath = os.path.join(self.root, self.filename)\n _urlretrieve(self.resources[self.dataset_type], fpath)\n\n if not os.path.exists(os.path.join(self.root, self.dataset_type)):\n print(f'Extracting \"{self.filepath}\"')\n os.system(f\"tar -xvf {self.filepath}\")\n os.system(f\"mv {self.dataset_type} {self.root}\")\n else:\n print(f'Folder \"{self.dataset_type}\" already exists.')\n\n image_chips = f\"{self.dataset_type}/image-chips\"\n label_chips = f\"{self.dataset_type}/label-chips\"\n\n if not os.path.exists(image_chips):\n os.mkdir(os.path.join(self.root, image_chips))\n if not os.path.exists(label_chips):\n os.mkdir(os.path.join(self.root, label_chips))\n\n run(os.path.join(self.root, self.dataset_type))\n\n def _check_exists(self) -> bool:\n if self.dataset_type not in self.resources.keys():\n print(f\"Unknown dataset {self.dataset_type}\")\n print(f\"Available dataset : {self.resources.keys()}\")\n sys.exit(0)\n\n if os.path.exists(self.filepath):\n return True\n else:\n return False\n\n def load_dataset(self):\n if self.data_mode == 0:\n list_chip = \"train.txt\"\n elif self.data_mode == 1:\n list_chip = \"valid.txt\"\n elif self.data_mode == 2:\n list_chip = \"test.txt\"\n\n files = [\n f\"{os.path.join(self.root, self.dataset_type)}/image-chips/{fname}\"\n for fname in load_lines(os.path.join(self.root, self.dataset_type, list_chip))\n ]\n self.image_files = files\n\n def __getitem__(self, idx) -> Tuple[Any, Any]:\n \"\"\"\n Args:\n idx (int): Index\n Returns:\n tuple: (img, target) where target is index of the target class.\n \"\"\"\n image_file = self.image_files[idx]\n label_file = image_file.replace(self.image_path, self.label_path)\n\n img = np.array(load_img(image_file))\n target = mask_to_classes(load_img(label_file))\n target = np.array(target)\n\n if self.transform is not None:\n img = Image.fromarray(img)\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = Image.fromarray(target)\n target = self.target_transform(target)\n return img, target\n\n def __len__(self) -> int:\n return len(self.image_files)\n\n def on_epoch_end(self):\n random.shuffle(self.image_files)\n\n\ndef load_lines(fname):\n with open(fname, \"r\") as f:\n return [line.strip() for line in f.readlines()]\n\n\ndef load_img(fname):\n return np.array(Image.open(fname))\n\n\ndef mask_to_classes(mask):\n return to_categorical(mask[:, :, 0], 6)\n\n\ndef to_categorical(y, num_classes=None, dtype=\"float32\"):\n \"\"\"Converts a class vector (integers) to binary class matrix.\n E.g. for use with categorical_crossentropy.\n Args:\n y: class vector to be converted into a matrix\n (integers from 0 to num_classes).\n num_classes: total number of classes. If `None`, this would be inferred\n as the (largest number in `y`) + 1.\n dtype: The data type expected by the input. Default: `'float32'`.\n Returns:\n A binary matrix representation of the input. The classes axis is placed\n last.\n Raises:\n Value Error: If input contains string value\n \"\"\"\n y = np.array(y, dtype=\"int\")\n input_shape = y.shape\n if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:\n input_shape = tuple(input_shape[:-1])\n y = y.ravel()\n if not num_classes:\n num_classes = np.max(y) + 1\n n = y.shape[0]\n categorical = np.zeros((n, num_classes), dtype=dtype)\n categorical[np.arange(n), y] = 1\n output_shape = input_shape + (num_classes,)\n categorical = np.reshape(categorical, output_shape)\n return categorical\n\n\ndef get_split(scene):\n if scene in train_ids:\n return \"train.txt\"\n if scene in val_ids:\n return \"valid.txt\"\n if scene in test_ids:\n return \"test.txt\"\n\n\ndef color2class(orthochip, img):\n ret = np.zeros((img.shape[0], img.shape[1]), dtype=\"uint8\")\n ret = np.dstack([ret, ret, ret])\n colors = np.unique(img.reshape(-1, img.shape[2]), axis=0)\n\n # Skip any chips that would contain magenta (IGNORE) pixels\n seen_colors = set([tuple(color) for color in colors])\n IGNORE_COLOR = LABELMAP[0]\n if IGNORE_COLOR in seen_colors:\n return None, None\n\n for color in colors:\n locs = np.where(\n (img[:, :, 0] == color[0]) & (img[:, :, 1] == color[1]) & (img[:, :, 2] == color[2])\n )\n ret[locs[0], locs[1], :] = INV_LABELMAP[tuple(color)] - 1\n\n return orthochip, ret\n\n\ndef image2tile(\n prefix,\n scene,\n dataset,\n orthofile,\n elevafile,\n labelfile,\n windowx,\n windowy,\n stridex,\n stridey,\n):\n\n ortho = cv2.imread(orthofile)\n label = cv2.imread(labelfile)\n\n assert ortho.shape[0] == label.shape[0]\n assert ortho.shape[1] == label.shape[1]\n\n shape = ortho.shape\n xsize = shape[1]\n ysize = shape[0]\n print(f\"converting {dataset} image {orthofile} {xsize}x{ysize} to chips ...\")\n\n counter = 0\n for xi in range(0, shape[1] - windowx, stridex):\n for yi in range(0, shape[0] - windowy, stridey):\n orthochip = ortho[yi : yi + windowy, xi : xi + windowx, :]\n labelchip = label[yi : yi + windowy, xi : xi + windowx, :]\n\n orthochip, classchip = color2class(orthochip, labelchip)\n\n if classchip is None:\n continue\n\n orthochip_filename = os.path.join(\n prefix, \"image-chips\", scene + \"-\" + str(counter).zfill(6) + \".png\"\n )\n labelchip_filename = os.path.join(\n prefix, \"label-chips\", scene + \"-\" + str(counter).zfill(6) + \".png\"\n )\n\n with open(f\"{prefix}/{dataset}\", mode=\"a\") as fd:\n fd.write(scene + \"-\" + str(counter).zfill(6) + \".png\\n\")\n\n cv2.imwrite(orthochip_filename, orthochip)\n cv2.imwrite(labelchip_filename, classchip)\n counter += 1\n\n\ndef run(prefix, size=300, stride=300):\n lines = [line for line in open(f\"{prefix}/index.csv\")]\n print(\n \"converting images to chips - this may take a few minutes but only needs to be done once.\"\n )\n\n for lineno, line in enumerate(lines):\n line = line.strip().split(\" \")\n scene = line[1]\n dataset = get_split(scene)\n\n orthofile = os.path.join(prefix, \"images\", scene + \"-ortho.tif\")\n elevafile = os.path.join(prefix, \"elevations\", scene + \"-elev.tif\")\n labelfile = os.path.join(prefix, \"labels\", scene + \"-label.png\")\n\n if os.path.exists(orthofile) and os.path.exists(labelfile):\n image2tile(\n prefix,\n scene,\n dataset,\n orthofile,\n elevafile,\n labelfile,\n windowx=size,\n windowy=size,\n stridex=stride,\n stridey=stride,\n )\n","repo_name":"jakartaresearch/earth-vision","sub_path":"earthvision/datasets/drone_deploy.py","file_name":"drone_deploy.py","file_ext":"py","file_size_in_byte":9713,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"69"} +{"seq_id":"7483565700","text":"mo = ['a','e','i','o','u']\ncount = 0\nprev = \"\"\nwhile True:\n flag=False\n str = input()\n moo = 0\n ja = 0\n if str==\"end\":\n break\n for s in str:\n if s in mo:\n flag=True\n for s in str:\n if s in mo:\n moo+=1\n ja=0\n else:\n ja+=1\n moo=0\n if moo==3 or ja==3:\n flag=False\n for s in str:\n if prev==s:\n if s!=\"e\" or s!=\"o\":\n flag=False\n prev=s\n\n if flag:\n print(f\"<{str}> is acceptable\")\n else:\n print(f\"<{str}> is not acceptable\")\n\n","repo_name":"Doreki/Python","sub_path":"baek_jun/2_week/4659_re.py","file_name":"4659_re.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"20830081276","text":"# \n# Identifies sentences relevant to the diagnosis of pulmonary edema and evaluates performance of algorithm \n# Requires PE_PATH environment variable to be set to root directory of codebase\n#\n\nimport os, sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\nimport argparse\nimport math\nimport numpy as np \nimport pandas as pd\nimport pprint\nimport re \n\nfrom util.evaluate import evaluate\nfrom util.negation import is_positive\n\nclass CONSTANTS:\n\n noedema_file = os.path.join(os.environ['PE_PATH'], 'keywords', 'no_edema_regex.csv')\n keywords_file = os.path.join(os.environ['PE_PATH'], 'keywords', 'keywords_regex.csv')\n relatedrad_file = os.path.join(os.environ['PE_PATH'], 'keywords', 'related_rad_regex.csv')\n nochange_file = os.path.join(os.environ['PE_PATH'], 'keywords', 'no_change_regex.csv')\n\ndef assign_keyword_label(sentence):\n \"\"\"\n sentence sentence to be labeled \n\n Returns a value indicating the presence/absence of pulmonary edema based on keyword-matching\n - 1.0 pulmonary edema present\n - 0.0 pulmonary edema absent\n - nan no mention of pulmonary edema\n \"\"\" \n # List of keywords indicating no pulmonary edema\n noedema_keywords = pd.read_csv(CONSTANTS.noedema_file)['regex'].tolist()\n # List of keywords related to pulmonary edema \n keywords = pd.read_csv(CONSTANTS.keywords_file)['regex'].tolist()\n \n flag = False \n keyword_label = float('nan')\n # First check if no pulmonary edema is explicitly mentioned \n for key in noedema_keywords:\n if re.search(key, sentence.lower()) is not None:\n flag = True \n keyword_label = 0.0\n\n # Use more general keyword approach to assign mention label \n if not flag:\n keyword_check = is_positive(sentence, keywords)\n if keyword_check is True: keyword_label = 1.0\n elif keyword_check is False: keyword_label = 0.0\n\n return keyword_label \n\ndef assign_related_rad(sentence):\n \"\"\"\n sentence sentence to be labeled \n\n Returns a value indicating the presence/absence of radiologic features related to, but not definitive for, pulmonary edema\n - 1.0 related radiologic feature present\n - 0.0 related radiologic feature absent\n - nan no mention of related radiologic feature\n \"\"\"\n # List of keywords for radiologic features related to pulmonary edema \n keywords = pd.read_csv(CONSTANTS.relatedrad_file)['regex'].tolist()\n\n related_rad_label = float('nan')\n # Check if there is any mention of a radiologic feature \n rad_check = is_positive(sentence, keywords, mode='sum')\n if rad_check is True: related_rad_label = 1.0\n elif rad_check is False: related_rad_label = 0.0\n\n return related_rad_label\n\ndef assign_other_finding(chexpert_row):\n \"\"\"\n chexpert_row output of CheXpert labeler (1, 0, nan) for 14 observations in chest radiographs, represented as a Series \n\n Returns a value indicating the presence/absence of other finding(s) that are not pulmonary edema \n - 1.0 other finding(s) present\n - 0.0 other finding(s) absent\n - nan no mention of other findings\n \"\"\"\n other_finding = float('nan')\n # Columns to ignore \n ignore_labels = set(['Reports', 'Edema', 'Support Devices', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Pneumothorax'])\n for col in chexpert_row.index:\n # Skip if column should be ignored or if cell value is empty, e.g. no mention \n if col in ignore_labels or math.isnan(chexpert_row[col]): \n continue \n\n # If no finding is positive, then assume no other findings are present \n elif col == 'No Finding' and chexpert_row[col] == 1.0:\n for other_col in chexpert_row.index:\n if other_col not in ignore_labels.union(set(['Lung Opacity'])) and chexpert_row[other_col] == 0.0:\n other_finding = 0.0 \n\n # Handle 'Lung Opacity' label, which could be indicative of pulmonary edema depending on context\n elif col == 'Lung Opacity':\n # Bilateral opacities are a related radiographic finding for pulmonary edema\n if 'bilateral' in chexpert_row['Reports'] or 'both' in chexpert_row['Reports']:\n continue\n # Unilateral opacities are likely not relevant to pulmonary edema (indicative of other findings)\n elif 'right' in chexpert_row['Reports'] and 'left' not in chexpert_row['Reports']:\n other_finding = abs(chexpert_row[col])\n elif 'left' in chexpert_row['Reports'] and 'right' not in chexpert_row['Reports']:\n other_finding = abs(chexpert_row[col])\n\n # If not a special case, set other finding label equal to cell value \n elif math.isnan(other_finding):\n other_finding = abs(chexpert_row[col])\n\n # If there is positive mention of at least one finding, then other finding label should be 1.0\n else:\n other_finding = max(other_finding, abs(chexpert_row[col]))\n\n return other_finding \n\ndef get_other_finding_mention(chexpert_row):\n ignore_labels = set(['Reports', 'Edema', 'No Finding'])\n for col in chexpert_row.index:\n # Skip if column should be ignored or if cell value is empty, e.g. no mention \n if col not in ignore_labels and not math.isnan(chexpert_row[col]): \n return True \n\n return False \n\ndef get_final_label(sentence, chexpert_label, keyword_label, related_rad_label, other_finding, chexpert_row):\n \"\"\"\n sentence sentence to label\n chexpert_label CheXpert label for pulmonary edema\n keyword_label Keyword label from output of assign_keyword_label()\n related_rad_label Related radiologic feature label from output of assign_related_rad()\n other_finding Other finding label from output of assign_other_finding()\n chexpert_row output of CheXpert labeler (1, 0, nan) for 14 observations in chest radiographs, represented as a Series \n\n Returns a value indicating whether sentence is relevant or not relevant to pulmonary edema (1, 0)\n \"\"\"\n final_label = 0.0\n nochange_keywords = pd.read_csv(CONSTANTS.nochange_file)['regex'].tolist()\n\n # If pulmonary edema is mentioned as present, then consider sentence relevant\n if chexpert_label == 1.0 or keyword_label == 1.0:\n final_label = 1.0\n\n # If pulmonary edema is mentioned as absent, then consider sentence relevant \n elif chexpert_label == 0.0 or keyword_label == 0.0:\n final_label = 1.0\n\n # If there is a related radiographic feature and no mention of another finding, then consider sentence relevant\n elif not math.isnan(related_rad_label) and math.isnan(other_finding):\n final_label = 1.0\n\n # If sentence indicates no general change in condition, then consider it to be relevant\n if final_label == 0.0:\n for key in nochange_keywords:\n if re.search(key, sentence.lower()) is not None:\n # Exclude phrases like \"no change in cardiomegaly\" or \"stable atelectasis\"\n if not get_other_finding_mention(chexpert_row): \n final_label = 1.0\n break \n \n return final_label\n\ndef get_all_relevance_data(chexpert_row, metadata, true_labels):\n sentence = chexpert_row['Reports']\n chexpert_label = abs(chexpert_row['Edema'])\n chexpert_label_unprocessed = chexpert_row['Edema']\n other_finding = assign_other_finding(chexpert_row)\n keyword_label = assign_keyword_label(sentence)\n related_rad_label = assign_related_rad(sentence)\n\n final_label = get_final_label(sentence, chexpert_label, keyword_label, related_rad_label, other_finding, chexpert_row)\n\n if true_labels:\n return [sentence, metadata['subject'], metadata['study'], final_label, metadata['relevant'], chexpert_label, chexpert_label_unprocessed, \\\n keyword_label, related_rad_label, other_finding, metadata['comparison'], metadata['comparison label']]\n\n else:\n return [sentence, metadata['subject'], metadata['study'], final_label, chexpert_label, chexpert_label_unprocessed, keyword_label, \\\n related_rad_label, other_finding]\n\ndef print_incorrect(true_labels, predicted_labels):\n for index, sentence in true_labels.iterrows():\n if sentence['relevant'] != predicted_labels['relevant'][index]:\n print(predicted_labels['relevant'][index], sentence['relevant'], sentence['sentence']) \n\ndef evaluate_labeler(true_labels_path, predicted_labels_path, output_path=None):\n true_labels = pd.read_csv(true_labels_path)\n predicted_labels = pd.read_csv(predicted_labels_path)\n\n result = evaluate(true_labels['relevant'].values, predicted_labels['relevant'].values)\n\n if output_path is not None:\n result_df = pd.Series(result).to_frame()\n result_df.to_csv(output_path)\n \n pprint.pprint(result) \n print_incorrect(true_labels, predicted_labels) \n\ndef run_labeler(chexpert_label_path, metadata_labels_path, true_labels=False):\n chexpert_sentences = pd.read_csv(chexpert_label_path)\n metadata = pd.read_csv(metadata_labels_path, dtype={'subject': 'str', 'study': 'str'})\n \n all_data = []\n for index, row in chexpert_sentences.iterrows():\n processed_row = get_all_relevance_data(row, metadata.iloc[index, :], true_labels=true_labels)\n all_data.append(processed_row)\n\n columns = []\n if true_labels:\n columns = ['sentence', 'subject', 'study', 'relevant', 'ground_truth_relevant', 'chexpert_label', 'chexpert_unprocessed', \\\n 'keyword_label', 'related_rad_label', 'other_finding', 'comparison_finding', 'comparison_label']\n else:\n columns = ['sentence', 'subject', 'study', 'relevant', 'chexpert_label', 'chexpert_unprocessed', 'keyword_label', 'related_rad_label', 'other_finding']\n\n df = pd.DataFrame(all_data, columns=columns)\n return df \n\ndef main_label():\n \"\"\"\n Run and evaluate labeler for pulmonary edema relevance. Also saves output labels of automatic labeler\n\n Requires as inputs\n 1. CSV file with results of CheXpert labeler\n 2. Filename to write the results of this automatic labeler\n 3. CSV file with true labels (1, 0) \n \"\"\"\n parser = argparse.ArgumentParser(description='Get sentences relevant to pulmonary edema')\n\n # Relative paths to PE_PATH\n parser.add_argument('chexpert_labels_path', type=str, help='Path to file with chexpert-labeled sentences')\n parser.add_argument('output_labels_path', type=str, help='Path to file to write output labels')\n parser.add_argument('true_labels_path', type=str, help='Path to file with ground-truth relevance labels')\n args = parser.parse_args()\n\n chexpert_labels_path = os.path.join(os.environ['PE_PATH'], args.chexpert_labels_path)\n output_labels_path = os.path.join(os.environ['PE_PATH'], args.output_labels_path)\n true_labels_path = os.path.join(os.environ['PE_PATH'], args.true_labels_path)\n\n final_labels = run_labeler(chexpert_labels_path, true_labels_path, true_labels=True)\n final_labels.to_csv(output_labels_path)\n\ndef main_evaluate():\n \"\"\"\n Evaluate results of automatic labeler that identifies whether a sentence is related to pulmonary edema diagnosis. \n \n Requires as inputs\n 1. CSV file with true labels (1, 0) \n 2. CSV file with predicted labels (1, 0)\n 3. Filename to write evaluation results \n \"\"\"\n parser = argparse.ArgumentParser(description='Get sentences relevant to pulmonary edema')\n\n # Relative paths to PE_PATH\n parser.add_argument('true_labels_path', type=str, help='Path to file with ground-truth relevance labels')\n parser.add_argument('predicted_labels_path', type=str, help='Path to file with predicted relevance labels')\n parser.add_argument('output_path', type=str, help='Path to file to write evaluation results')\n args = parser.parse_args()\n\n true_labels_path = os.path.join(os.environ['PE_PATH'], args.true_labels_path) \n predicted_labels_path = os.path.join(os.environ['PE_PATH'], args.predicted_labels_path)\n output_path = os.path.join(os.environ['PE_PATH'], args.output_path)\n\n evaluate_labeler(true_labels_path, predicted_labels_path, output_path=None)\n\ndef main_predict():\n \"\"\"\n Run labeler for pulmonary edema relevance. Also saves output labels of automatic labeler\n\n Requires as inputs\n 1. CSV file with results of CheXpert labeler\n 2. Filename to write the results of this automatic labeler\n 3. CSV file with metadata\n \"\"\"\n parser = argparse.ArgumentParser(description='Get sentences relevant to pulmonary edema')\n\n # Relative paths to PE_PATH\n parser.add_argument('chexpert_labels_path', type=str, help='Path to file with chexpert-labeled sentences')\n parser.add_argument('output_labels_path', type=str, help='Path to file to write output labels')\n parser.add_argument('metadata_labels_path', type=str, help='Path to file with subject and study labels')\n args = parser.parse_args()\n\n chexpert_labels_path = os.path.join(os.environ['PE_PATH'], args.chexpert_labels_path)\n output_labels_path = os.path.join(os.environ['PE_PATH'], args.output_labels_path)\n metadata_labels_path = os.path.join(os.environ['PE_PATH'], args.metadata_labels_path)\n\n final_labels = run_labeler(chexpert_labels_path, metadata_labels_path)\n final_labels.to_csv(output_labels_path)\n\ndef test_script():\n chexpert = pd.read_csv(\"data/dataset-small/chexpert-labels-small.csv\", dtype={'subject': 'str', 'study': 'str'})\n metadata = pd.read_csv(\"data/dataset-small/sentences-split-small.csv\", dtype={'subject': 'str', 'study': 'str'})\n print(get_all_relevance_data(chexpert.iloc[119, :], metadata.iloc[119, :], False))\n\nif __name__ == \"__main__\":\n main_evaluate()\n # main_label()\n # test_script()\n\n","repo_name":"shu98/pulmonary-edema-project","sub_path":"nlp/get_relevant_sentences_2.py","file_name":"get_relevant_sentences_2.py","file_ext":"py","file_size_in_byte":13917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"21646133126","text":"import gi\n\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\n\ngi.require_version('Gdk', '3.0')\nfrom gi.repository import Gdk\n\ngi.require_version('WebKit', '3.0')\nfrom gi.repository import WebKit\n\n\nclass TranslationHistoryFileChooser(Gtk.FileChooserDialog):\n def __init__(self, parent):\n Gtk.FileChooserDialog.__init__(self, \"Please choose a file\", parent,\n Gtk.FileChooserAction.SAVE,\n (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN, Gtk.ResponseType.OK))\n\n\nclass TranslationHistoryToolbarWidget(Gtk.Toolbar):\n _parent = None\n\n def __init__(self, parent):\n self._parent = parent\n\n Gtk.Toolbar.__init__(self)\n\n self.set_style(Gtk.ToolbarStyle.TEXT)\n self.insert(self.widget_remove_selected, 0)\n self.insert(self.widget_remove_all, 1)\n self.insert(self.widget_export_csv, 2)\n self.insert(self.widget_save, 3)\n\n @property\n def widget_remove_selected(self):\n button = Gtk.ToolButton()\n button.set_label(\"Remove selected\")\n button.connect(\"clicked\", self._parent.on_button_remove_selected)\n return button\n\n @property\n def widget_remove_all(self):\n button = Gtk.ToolButton()\n button.set_label(\"Remove all\")\n button.connect(\"clicked\", self._parent.on_button_remove_all)\n return button\n\n @property\n def widget_export_csv(self):\n button = Gtk.ToolButton()\n button.set_label(\"Export as CSV\")\n button.connect(\"clicked\", self._parent.on_button_export_csv)\n return button\n\n @property\n def widget_save(self):\n button = Gtk.ToolButton()\n button.set_label(\"Save changes\")\n button.connect(\"clicked\", self._parent.on_button_save)\n return button\n\n\nclass TranslationHistoryTreeWidget(Gtk.ScrolledWindow):\n _tree = None\n _store = None\n _dispatcher = None\n\n def __init__(self, window=None, dispatcher=None):\n self._store = Gtk.ListStore(bool, str, str)\n Gtk.ScrolledWindow.__init__(self)\n self.add(self.widget_tree)\n\n @property\n def store(self):\n return self._store\n\n @store.setter\n def store(self, value):\n for line in reversed(value):\n fields = line.strip(\"\\n\").split(';')\n if len(fields) >= 2:\n self._store.append([0, fields[0].strip(), fields[1]])\n self._tree.set_model(self._store)\n\n @property\n def widget_tree(self):\n self._tree = Gtk.TreeView()\n self._tree.append_column(self.widget_tree_checkbox)\n self._tree.append_column(self.widget_tree_date)\n self._tree.append_column(self.widget_tree_word)\n return self._tree\n\n @property\n def widget_tree_checkbox(self):\n checkbox = Gtk.CellRendererToggle()\n checkbox.connect(\"toggled\", self.on_cell_toggled)\n checkbox.set_padding(5, 5)\n return Gtk.TreeViewColumn(\"\", checkbox, active=0)\n\n @property\n def widget_tree_date(self):\n data = Gtk.CellRendererText()\n data.set_property(\"editable\", True)\n data.set_padding(5, 5)\n data.connect(\"edited\", self.on_data_edited)\n return Gtk.TreeViewColumn(\"Data\", data, text=1)\n\n @property\n def widget_tree_word(self):\n word = Gtk.CellRendererText()\n word.set_property(\"editable\", True)\n word.connect(\"edited\", self.on_word_edited)\n word.set_padding(5, 5)\n return Gtk.TreeViewColumn(\"Word\", word, text=2)\n\n def on_cell_toggled(self, widget, path):\n self._store[path][0] = not self._store[path][0]\n\n def on_data_edited(self, widget, path, text):\n self._store[path][1] = text\n\n def on_word_edited(self, widget, path, text):\n self._store[path][2] = text\n\n\nclass HistoryLabelWidget(Gtk.Label):\n def __init__(self):\n Gtk.Label.__init__(self)\n self.set_margin_top(8)\n self.set_margin_bottom(8)\n self.set_justify(Gtk.Justification.RIGHT)\n\n\nclass HistoryToolbarTopWidget(Gtk.Grid):\n _label = None\n _parent = None\n\n def __init__(self, parent):\n self._parent = parent\n self._label = HistoryLabelWidget()\n\n Gtk.Grid.__init__(self)\n self.attach(self._button, 0, 0, 1, 1)\n self.attach(self._label, 1, 0, 4, 1)\n\n @property\n def label(self):\n self._label.get_label()\n\n @label.setter\n def label(self, value):\n self._label.set_label(value)\n\n @property\n def _button(self):\n button = Gtk.ToolButton()\n button.set_label(\"History file\")\n button.connect(\"clicked\", self._parent.on_history_file_choose)\n return button\n\n\nclass DictionaryHistoryAreaWidget(Gtk.VBox):\n _history = None\n _window = None\n _toolbar_top = None\n _toolbar_bottom = None\n\n def __init__(self, window, history):\n self._window = window\n self._history = history\n\n self._toolbar_top = HistoryToolbarTopWidget(self)\n self._toolbar_top.label = \": %s \" % self.history\n\n self._content = TranslationHistoryTreeWidget(self)\n with open(self.history, 'r') as stream:\n self._content.store = stream.readlines()\n\n self._toolbar_bottom = TranslationHistoryToolbarWidget(self)\n\n Gtk.VBox.__init__(self, homogeneous=False, spacing=0)\n self.pack_start(self._toolbar_top, False, False, 0)\n self.pack_start(self._content, True, True, 0)\n self.pack_start(self._toolbar_bottom, False, True, 0)\n\n @property\n def history(self):\n return self._history.history\n\n def on_history_output(self, event, dispatcher):\n self._content.store.clear()\n with open(self.history, 'r') as stream:\n self._content.store = stream.readlines()\n self._toolbar_top.label = \": %s \" % self.history\n\n def on_dictionary_clipboard(self, event, dispatcher):\n self._content.store.clear()\n with open(self.history, 'r') as stream:\n self._content.store = stream.readlines()\n\n def on_dictionary_translation(self, event, dispatcher):\n self._content.store.clear()\n with open(self.history, 'r') as stream:\n self._content.store = stream.readlines()\n\n def on_button_remove_selected(self, button):\n for row in self._content.store:\n if row[0] is not False:\n self._content.store.remove(row.iter)\n\n def on_button_remove_all(self, button):\n self._content.store.clear()\n\n def on_button_export_csv(self, button):\n dialog = TranslationHistoryFileChooser(self._window)\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n with open(dialog.get_filename(), 'w') as stream:\n for row in self._content.store:\n stream.write(\"%s;%s\\n\" % (row[1], row[2]))\n dialog.destroy()\n\n def on_button_save(self, button):\n with open(self.history, 'w') as stream:\n for row in reversed(self._content.store):\n stream.write(\"%s;%s\\n\" % (row[1], row[2]))\n\n def on_history_file_choose(self, button):\n dialog = TranslationHistoryFileChooser(self._window)\n response = dialog.run()\n if response == Gtk.ResponseType.OK:\n self._window.on_dictionary_history_changed(dialog.get_filename())\n dialog.destroy()\n","repo_name":"AlexWoroschilow/dictionary-indicator","sub_path":"vendor/uix/widget/history.py","file_name":"history.py","file_ext":"py","file_size_in_byte":7392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"14195201656","text":"from sentinels import NOTHING\n\n\n__all__ = [\n \"TypesRegistry\",\n \"get_registry\"\n]\n\n\nclass TypesRegistry:\n def __init__(self):\n self._mapping = {}\n self._back = {}\n\n def register(self, type_, alias):\n assert isinstance(type_, type)\n assert isinstance(alias, type)\n self._mapping[type_] = alias\n self._back[alias] = type_\n\n def get_alias(self, type_, default=NOTHING):\n if default is NOTHING:\n return self._mapping[type_]\n else:\n return self._mapping.get(type_, default)\n\n def get_type(self, alias, default=NOTHING):\n if default is NOTHING:\n return self._back[alias]\n else:\n return self._back.get(alias, default)\n\n\n_registry = TypesRegistry()\n\n\ndef get_registry():\n return _registry\n","repo_name":"Evgenus/versioned-data","sub_path":"versioned/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"73502720540","text":"# values = [1, 2, 3, 4, 5]\n# def list_sum():\n# total = 0\n# for value in values:\n# total += value\n# return total\n# print(list_sum())\n\ndicts = [{'name': 'kim', 'age': 12}, {'name': 'lee', 'age': 4}]\ndef dict_list_sum():\n total = 0\n for value in dicts:\n total += value['age']\n return total\nprint(dict_list_sum())\n\n# values = [[1], [2, 3], [4, 5, 6], [7, 8, 9, 10]]\n# def all_list_sum():\n# total = 0\n# for value in values:\n# for i in value:\n# total += i\n# return total\n# print(all_list_sum())","repo_name":"LeesangyeopSSAFY/TIL","sub_path":"01_Python/0721/ws0721.py","file_name":"ws0721.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"6588734457","text":"import tkinter.messagebox\nimport customtkinter\n\nclass Main():\n\n def load_main_page_view(app, ctk_frame):\n ctk_frame.grid_rowconfigure(0, minsize=10)\n ctk_frame.grid_rowconfigure(1, weight=1)\n ctk_frame.grid_columnconfigure(0, weight=1)\n\n app.ctkl_page_title = customtkinter.CTkLabel(master=ctk_frame,\n text=\"Sistema de Abertura de Nao Conformidades V2\",\n text_font=(\"Bebas Neue\", 2*app.CONTEXT))\n app.ctkl_page_title.grid(\n row=0, column=0, pady=app.CONTEXT, padx=app.CONTEXT)\n\n app.ctkf_form_content = customtkinter.CTkFrame(master=ctk_frame)\n app.ctkf_form_content.grid(row=1, column=0, sticky=\"nswe\",\n padx=app.CONTEXT, pady=app.CONTEXT)\n\n\n app.ctkf_form_content.grid_columnconfigure(0, weight=1)\n app.ctkf_form_content.grid_columnconfigure(1, weight=1)\n app.ctkf_form_content.grid_rowconfigure(0, minsize=10)\n app.ctkf_form_content.grid_rowconfigure(10, weight=1)\n\n app.ctkl_identity = customtkinter.CTkLabel(master=app.ctkf_form_content,\n text=\"Identificacao\",\n text_font=(\"Roboto Medium\", -16))\n app.ctkl_identity.grid(row=1, column=0, sticky=\"w\", pady=10, padx=0)\n\n app.ctks_identity = customtkinter.CTkOptionMenu(master=app.ctkf_form_content,\n values=[\"Light\", \"Dark\", \"System\"])\n app.ctks_identity.grid(row=2, column=0, sticky=\"we\", pady=10, padx=20)\n\n app.ctkl_description = customtkinter.CTkLabel(master=app.ctkf_form_content,\n text=\"Descricao\",\n text_font=(\"Roboto Medium\", -16))\n app.ctkl_description.grid(row=3, column=0, sticky=\"w\", pady=10, padx=0)\n app.ctke_description = customtkinter.CTkTextbox(master=app.ctkf_form_content,\n height=app.CONTEXT*5, fg_color=\"grey25\")\n app.ctke_description.insert(\n \"0.0\", \"Escreva aqui a descricao da nao conformidade\")\n app.ctke_description.grid(row=4, column=0, sticky=\"we\", pady=10, padx=20)\n\n app.ctkl_rootcause = customtkinter.CTkLabel(master=app.ctkf_form_content,\n text=\"Analise de Causa Raiz\",\n text_font=(\"Roboto Medium\", -16))\n app.ctkl_rootcause.grid(row=3, column=1, sticky=\"w\", pady=10, padx=20)\n app.ctke_rootcause = customtkinter.CTkTextbox(master=app.ctkf_form_content,\n height=app.CONTEXT*5, fg_color=\"grey25\")\n app.ctke_rootcause.insert(\n \"0.0\", \"Escreva aqui a sua analise de causa raiz\")\n app.ctke_rootcause.grid(row=4, column=1, sticky=\"we\", pady=10, padx=20)\n\n app.ctkl_solution = customtkinter.CTkLabel(master=app.ctkf_form_content,\n text=\"Acoes corretivas\",\n text_font=(\"Roboto Medium\", -16))\n app.ctkl_solution.grid(row=5, column=0, sticky=\"w\", pady=10, padx=20)\n\n app.ctke_solution = customtkinter.CTkTextbox(master=app.ctkf_form_content,\n height=app.CONTEXT*5, fg_color=\"grey25\")\n app.ctke_solution.insert(\n \"0.0\", \"Escreva aqui as acoes tomadas para solucionar o problema\")\n app.ctke_solution.grid(row=6, column=0, sticky=\"we\", pady=10, padx=20)\n\n app.ctkcheck_critical = customtkinter.CTkCheckBox(master=app.ctkf_form_content,\n text=\"Critico\")\n app.ctkcheck_critical.grid(row=6, column=1, sticky=\"we\", pady=10, padx=20)\n\n app.ctkb_create = customtkinter.CTkButton(master=app.ctkf_form_content,\n text=\"Criar\", command=app.save_result)\n app.ctkb_create.grid(row=7, column=0, pady=10, padx=20, sticky=\"nsew\")\n\n return ctk_frame\n\n \n","repo_name":"GuilhermeTagliati/Python_for_Non_Programmers","sub_path":"01_NON_CONFORMITIES_PROJECT/role model project/views/main_page.py","file_name":"main_page.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"43206327230","text":"# Currently unused function to implement env manifest copying for use as S3 source artifact\nimport boto3\nfrom io import BytesIO\nimport os\nimport zipfile\nimport logging\n\ndef lambda_handler():\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n account = boto3.client(\"sts\").get_caller_identity()[\"Account\"]\n region = os.environ[\"AWS_REGION\"]\n target_bucket_name = f\"serverbot2-pipeline-manifest-copy-{account}-{region}\"\n logger.debug(f\"Generated bucket name is {target_bucket_name}\")\n\n ssm = boto3.client(\"ssm\")\n manifest_string = ssm.get_parameter(Name=\"DeploymentEnvironmentManifest\")[\"Parameter\"][\"Value\"]\n # Avoid posting actual content to logs\n logger.debug(f\"Got manifest string, length={len(manifest_string)}\")\n\n logger.debug(\"Creating zip data...\")\n zipped_data = BytesIO()\n zip = zipfile.ZipFile(zipped_data, \"w\")\n zip.writestr(\"manifest.json\", manifest_string)\n zip.close()\n\n logger.debug(\"Pushing to S3...\")\n s3 = boto3.client(\"s3\")\n s3.put_object(\n Bucket=target_bucket_name,\n Key=\"manifest.zip\",\n Body=zipped_data\n )\n","repo_name":"HtyCorp/serverbot2-core","sub_path":"deployment/application-infrastructure/src/main/resources/manifest_copy_function/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"71962933341","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport torch as th\n\nimport data\nimport models\n\nplt.rcParams.update({\n \"text.usetex\": True,\n})\nplt.rcParams['axes.titlepad'] = 2\n\nsigmas = [0, .025, .05, .1, .2]\nN = len(sigmas)\nplt.rcParams[\"axes.prop_cycle\"] = plt.cycler(\n \"color\", plt.cm.coolwarm(np.linspace(0, 1, N))\n)\n\nkernel_size = 7\nn_f = kernel_size**2 - 1\nbs = 64 * 4000\npatch_size = kernel_size\ncolor = False\nrotate = True\nflip = True\nn_w = 63 * 2 - 1\nn_scales = 20\n\ndataset = data.BSDS(color, bs, patch_size, rotate, flip)\ngamma = 1.\nR_gsm = models.ProductGSM(\n n_f=n_f,\n bound_norm=False,\n zero_mean=True,\n ortho=True,\n n_scales=n_scales,\n kernel_size=kernel_size,\n K_init='random',\n).cuda()\nth.set_grad_enabled(False)\n\nstate = th.load('./out/gsm/state_final.pth')\nstate['w.w'] = state['w']\nR_gsm.load_state_dict(state)\nR_gmm = models.ProductGMM(\n n_f=n_f,\n bound_norm=False,\n zero_mean=True,\n symmetric=True,\n ortho=True,\n vmin=-1,\n vmax=1,\n kernel_size=kernel_size,\n K_init='random',\n n_w=n_w,\n w_init='student-t',\n sigmas=th.Tensor(sigmas)\n).cuda()\nth.set_grad_enabled(False)\n\nstate = th.load('./out/patch/state_final.pth')\nR_gmm.load_state_dict(state)\nylims_f_ = {\n 'gmm': [-.2, 10],\n 'gsm': [-.2, 6],\n}\nylims_fp_ = {\n 'gmm': [-20, 20],\n 'gsm': [-9.5, 9.5],\n}\nylims_tweedie_ = {\n 'gmm': [-1, 1],\n 'gsm': [-1, 1],\n}\nfor y in dataset:\n break\ndm = 0.01\nbin_edges = th.linspace(\n -gamma - dm / 2,\n gamma + dm / 2,\n n_w + 1,\n).cuda()\nx_hist = (bin_edges[1:] + bin_edges[:-1]) / 2\nfor R, name in zip([R_gmm, R_gsm], ['gmm', 'gsm']):\n hist = th.zeros((len(sigmas), n_f, n_w)).cuda()\n ylims_f = ylims_f_[name]\n ylims_fp = ylims_fp_[name]\n ylims_tweedie = ylims_tweedie_[name]\n for i_s, sigma in enumerate(sigmas):\n R.set_sigma(sigma)\n Kx = R.K(y + sigma * th.randn_like(y))\n\n for k in range(n_f):\n hist[\n i_s,\n k] = th.histogram(Kx[:, k].reshape(-1).cpu(),\n bin_edges.cpu())[0].to('cuda')\n\n fs = []\n fps = []\n K = R.K.weight.data\n scale = 1.1\n\n n_points = 20\n x = th.linspace(\n -scale * gamma,\n scale * gamma,\n n_points**2,\n dtype=K.dtype,\n device=K.device,\n )[None].repeat(n_f, 1)\n for sig in sigmas:\n R.set_sigma(sig)\n f, fp = R.pot_act(x.view(1, n_f, n_points, n_points))\n f = f.view(n_f, n_points * n_points)\n fp = fp.view(n_f, n_points * n_points)\n fs.append(f)\n fps.append(fp)\n\n x = x[0]\n\n fs = th.stack(fs).permute(1, 0, 2)\n fps = th.stack(fps).permute(1, 0, 2)\n norm_k = (K**2).sum((1, 2, 3))\n indices = th.sort(norm_k)[1]\n K = K[indices]\n fs = fs[indices]\n fps = fps[indices]\n hist = hist.permute(1, 0, 2)\n hist = hist[indices]\n\n fig_k, ax_k = plt.subplots(3, 16, figsize=(16, 3))\n fig_f, ax_f = plt.subplots(3, 16, figsize=(16, 3))\n fig_fp, ax_fp = plt.subplots(3, 16, figsize=(16, 3))\n fig_tweedie, ax_tweedie = plt.subplots(3, 16, figsize=(16, 3))\n fig_h, ax_h = plt.subplots(3, 16, figsize=(16, 3))\n\n for i, (ff, ffp, hh, kk) in enumerate(zip(fs, fps, hist, K)):\n r, c = divmod(i, 16)\n for sigma, fff, fffp, hhh in zip(sigmas, ff, ffp, hh):\n neg_log = -th.log(hhh).detach().cpu().numpy()\n neg_log -= neg_log.min()\n fff -= fff.min()\n ax_h[r, c].plot(x_hist.cpu(), neg_log)\n ax_f[r, c].plot(x.cpu(), fff.cpu())\n ax_fp[r, c].plot(x.cpu(), fffp.cpu())\n ax_tweedie[r, c].plot(\n x.cpu(),\n x.cpu().numpy() - sigma**2 * fffp.cpu().numpy()\n )\n for axx, ylims in zip([ax_f, ax_h, ax_fp, ax_tweedie],\n [ylims_f, ylims_f, ylims_fp, ylims_tweedie]):\n axx[r, c].set_ylim(ylims)\n axx[r, c].grid(True)\n if (r, c) == (2, 0):\n xt = axx[r, c].get_xticklabels()\n if (r, c) != (2, 0):\n axx[r, c].tick_params(tick1On=False)\n axx[r, c].set_xticklabels([])\n axx[r, c].set_yticklabels([])\n axx[r, c].set_frame_on(False)\n\n k_plot = ax_k[r, c].imshow(kk.cpu().squeeze(), cmap='gray')\n ax_k[r, c].axis('off')\n ax_k[r, c].set_title(\n f'\\\\( [{kk.min().item()*10:.1f}, {kk.max().item()*10:.1f}] \\\\)',\n fontsize=8\n )\n\nplt.show()\n","repo_name":"VLOGroup/PoGMDM","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":4552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"34234050003","text":"from ..models import ETL_Dataset\nfrom ..serializers import ETL_DatasetSerializer\n\n\nclass ETL_DatasetService():\n\n @staticmethod\n def is_datasetname_avalaible(input__datasetname):\n try:\n existing_etl_dataset = ETL_Dataset.objects.filter(dataset_name=str(input__datasetname).strip())[0]\n return False\n except:\n return True\n\n\n @staticmethod\n def does_etl_dataset_exist__by_uuid(input__uuid):\n try:\n existing_etl_dataset = ETL_Dataset.objects.filter(uuid=str(input__uuid).strip())[0]\n return True\n except:\n return False\n\n @staticmethod\n def create_etl_dataset_from_datasetname_only(input__datasetname, created_by=\"create_dataset_from_datasetname_only\"):\n try:\n new_etl_dataset = ETL_Dataset()\n new_etl_dataset.dataset_name = str(input__datasetname).strip()\n new_etl_dataset.created_by = str(created_by).strip()\n new_etl_dataset.save()\n\n return True, new_etl_dataset.uuid\n except:\n return False, \"\"\n\n @staticmethod\n def get_all_etl_datasets_preview_list():\n ret_list = []\n try:\n all_datasets = ETL_Dataset.objects.all()\n for current_dataset in all_datasets:\n ret_list.append(ETL_DatasetSerializer(current_dataset).data)\n except:\n ret_list = []\n return ret_list\n\n @staticmethod\n def is_a_valid_subtype_string(input__string):\n try:\n input__string = str(input__string).strip()\n valid_subtypes_string_list = ETL_DatasetService.get_all_subtypes_as_string_array()\n if input__string in valid_subtypes_string_list:\n return True\n except:\n return False\n\n @staticmethod\n def get_all_subtypes_as_string_array():\n return list(\n ETL_Dataset.objects.order_by('dataset_subtype').values_list('dataset_subtype', flat=True).distinct())\n","repo_name":"SERVIR/ClimateSERV2","sub_path":"api/services/etl_dataset_service.py","file_name":"etl_dataset_service.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"69"} +{"seq_id":"18728927783","text":"from abc import ABCMeta, abstractmethod\n\nfrom domain.model.article import Article, ArticleRepository\n\n\nclass Clock(metaclass=ABCMeta):\n @abstractmethod\n def now(self) -> float:\n pass\n\n\nclass ArticleService:\n def __init__(self, article_repository: ArticleRepository, clock):\n self._article_repository = article_repository\n self._clock = clock\n\n def create_article(self, title, description, body, author_id) -> Article:\n slug = title.lower().replace(' ', '-')\n if self._article_repository.exists_by_slug(slug):\n raise ArticleExistedException(\"the article with slug {} already exists\".format(slug))\n article = Article(slug, title, description, body, author_id)\n now = self._clock.now()\n article.created_at = now\n article.updated_at = now\n return self._article_repository.save(article)\n\n\nclass ArticleExistedException(Exception):\n def __init__(self, message):\n super().__init__(message)\n","repo_name":"mgxian/implementation-patterns-python","sub_path":"domain/service/article.py","file_name":"article.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"73627182941","text":"class Solution:\n def getConcatenation(self, word: str) -> int:\n asciiNums = \"\"\n for e in word:\n asciiNums += str(ord(e)-97)\n # print(\"ascii num: \"+asciiNums)\n return int(asciiNums)\n def isSumEqual(self, firstWord: str, secondWord: str, targetWord: str) -> bool:\n firstNum = self.getConcatenation(firstWord)\n secondNum = self.getConcatenation(secondWord)\n targetNum = self.getConcatenation(targetWord)\n if firstNum + secondNum == targetNum:\n return True\n return False\n\nX = Solution() \n\nfirstWord = \"acb\"\nsecondWord = \"cba\"\ntargetWord = \"cdb\"\nprint(X.isSumEqual(firstWord, secondWord, targetWord))\n\nfirstWord = \"aaa\"\nsecondWord = \"a\"\ntargetWord = \"aab\"\nprint(X.isSumEqual(firstWord, secondWord, targetWord))\n\nfirstWord = \"aaa\"\nsecondWord = \"a\"\ntargetWord = \"aaaa\"\nprint(X.isSumEqual(firstWord, secondWord, targetWord))","repo_name":"awesome-liuxiao/leetcodesolution","sub_path":"1880_chkWordEqSumOfTwoWords.py","file_name":"1880_chkWordEqSumOfTwoWords.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"10982292679","text":"from typing import final\nfrom cursor import CursorDelPool\nimport xml.etree.ElementTree as ET\nfrom logger_base import log\nfrom catalogo import Catalogo\n\nclass Conversor:\n\n\n _INSERTAR = 'INSERT INTO catalogo(common, botanical, zone, light, price, availability) VALUES(%s,%s,%s,%s,%s,%s)'\n _xml_data = None\n\n\n @classmethod\n def insertar(cls, planta):\n with CursorDelPool() as cursor:\n valores = (planta.find(\"COMMON\").text, planta.find(\"BOTANICAL\").text, planta.find(\"ZONE\").text, planta.find(\"LIGHT\").text, planta.find(\"PRICE\").text, planta.find(\"AVAILABILITY\").text)\n #print(valores)\n cursor.execute(cls._INSERTAR, valores)\n return cursor.rowcount\n\n @classmethod\n def leerxml(cls, xml_file):\n try:\n if xml_file.readable():\n cls._xml_data = ET.fromstring(xml_file.read())\n lista_plantas = cls._xml_data.findall('PLANT')\n for planta in lista_plantas:\n cls.insertar(planta)\n log.debug('Datos insertados correctamente')\n else:\n log.debug(False)\n except Exception as e:\n log.debug(f'Se ha producido un error {e}')\n finally:\n xml_file.close()\n\n\nif __name__ == '__main__':\n\n ruta = str(input('Ruta de archivo a ingresar: '))\n archivo = open(ruta)\n conversor = Conversor.leerxml(archivo)\n\n","repo_name":"romerodeveloper/Conversor_Archivos","sub_path":"conversor_xml.py","file_name":"conversor_xml.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"16624564866","text":"tcs = int(input())\nfor tc in range(tcs):\n\tn = int(input())\n\tA = []\n\tC = []\n\tids = []\n\tfor _ in range(n):\n\t\tarr = input().split()\n\t\tcolor = arr[0]\n\t\tdurability = int(arr[1])\n\t\tid = int(arr[2])\n\t\tA.append((color, id))\n\t\tC.append((durability, id))\n\t\tids.append(id)\n\n\tA.sort()\n\tC.sort()\n\n\tres = 0\n\tfor i in range(n):\n\t\tif A[i][1] == C[i][1]:\n\t\t\tres += 1\n\t\n\tprint('Case #{}: {}'.format(tc + 1, res))","repo_name":"chungwwei/cp-qs","sub_path":"round_f_2022/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26731788071","text":"from pytube import YouTube\nfrom datetime import datetime\nfrom mysql.connector.types import RowType\nimport time\nimport logging\nimport sys\nimport os\n\n\nsys.path.append(os.getcwd())\nfrom Models.DatabaseHandler import Database_Handler\nfrom Models.Logger import Extractio_Logger\n\n\n\nclass YouTube_Downloader:\n \"\"\"\n It will handle every operations related to YouTube.\n \"\"\"\n __uniform_resource_locator: str\n \"\"\"\n The uniform resource locator to be searched.\n \"\"\"\n __video: YouTube\n \"\"\"\n Core developer interface for pytube.\n \"\"\"\n __title: str\n \"\"\"\n The title of the video.\n \"\"\"\n __identifier: str\n \"\"\"\n The identifier of the video.\n \"\"\"\n __length: int\n \"\"\"\n The length of the video in seconds.\n \"\"\"\n __duration: str\n \"\"\"\n The duration of the video in the format of HH:mm:ss.\n \"\"\"\n __published_at: str | datetime | None\n \"\"\"\n The date at which the video has been published.\n \"\"\"\n __database_handler: Database_Handler\n \"\"\"\n It is the object relational mapper that will be used to\n simplify the process to entering queries.\n \"\"\"\n __author: str\n \"\"\"\n The author of the video/music.\n \"\"\"\n __media_identifier: int\n \"\"\"\n The media type for the system.\n \"\"\"\n __timestamp: str\n \"\"\"\n The timestamp at which the session has been created.\n \"\"\"\n __directory: str\n \"\"\"\n The directory of the media files.\n \"\"\"\n __logger: Extractio_Logger\n \"\"\"\n The logger that will all the action of the application.\n \"\"\"\n\n def __init__(self, uniform_resource_locator: str, media_identifier: int):\n \"\"\"\n Instantiating the class and launching the operations needed.\n\n Parameters:\n uniform_resource_locator: (string): The uniform resource locator to be searched.\n media_identifier: (int): The media type for the system.\n \"\"\"\n self.setLogger(Extractio_Logger())\n self.getLogger().setLogger(logging.getLogger(__name__))\n self.setDatabaseHandler(Database_Handler())\n self.setUniformResourceLocator(uniform_resource_locator)\n self.setMediaIdentifier(media_identifier)\n self.getLogger().inform(\"The Downloader has been initialized\")\n\n def getUniformResourceLocator(self) -> str:\n return self.__uniform_resource_locator\n\n def setUniformResourceLocator(self, uniform_resource_locator: str) -> None:\n self.__uniform_resource_locator = uniform_resource_locator\n\n def getVideo(self) -> YouTube:\n return self.__video\n\n def setVideo(self, video: YouTube) -> None:\n self.__video = video\n\n def getTitle(self) -> str:\n return self.__title\n\n def setTitle(self, title: str) -> None:\n self.__title = title\n\n def getIdentifier(self) -> str:\n return self.__identifier\n\n def setIdentifier(self, identifier: str) -> None:\n self.__identifier = identifier\n\n def getLength(self) -> int:\n return self.__length\n\n def setLength(self, length: int) -> None:\n self.__length = length\n\n def getDuration(self) -> str:\n return self.__duration\n\n def setDuration(self, duration: str) -> None:\n self.__duration = duration\n\n def getPublishedAt(self) -> str | datetime | None:\n return self.__published_at\n\n def setPublishedAt(self, published_at: str | datetime | None) -> None:\n self.__published_at = str(published_at)\n\n def getDatabaseHandler(self) -> Database_Handler:\n return self.__database_handler\n\n def setDatabaseHandler(self, database_handler: Database_Handler) -> None:\n self.__database_handler = database_handler\n\n def getAuthor(self) -> str:\n return self.__author\n\n def setAuthor(self, author: str) -> None:\n self.__author = author\n\n def getMediaIdentifier(self) -> int:\n return self.__media_identifier\n\n def setMediaIdentifier(self, media_identifier: int) -> None:\n self.__media_identifier = media_identifier\n\n def getTimestamp(self) -> str:\n return self.__timestamp\n\n def setTimestamp(self, timestamp: str) -> None:\n self.__timestamp = timestamp\n\n def getDirectory(self) -> str:\n return self.__directory\n\n def setDirectory(self, directory: str) -> None:\n self.__directory = directory\n\n def getLogger(self) -> Extractio_Logger:\n return self.__logger\n\n def setLogger(self, logger: Extractio_Logger) -> None:\n self.__logger = logger\n\n def retrieveIdentifier(self, identifier: str) -> str:\n \"\"\"\n Retrieving the identifier of the content in the condition\n that it is in a playlist.\n\n Parameters:\n identifier: (string): The ID of the content.\n\n Return:\n (string)\n \"\"\"\n if \"&\" in identifier:\n return identifier.rsplit(\"&\", 1)[0]\n else:\n return identifier\n\n def search(self) -> dict[str, str | int | None]:\n \"\"\"\n Searching for the video in YouTube.\n\n Return:\n (object)\n \"\"\"\n response: dict[str, str | int | None]\n audio_file: str | None\n video_file: str | None\n self.setVideo(YouTube(self.getUniformResourceLocator()))\n self.setIdentifier(self.getUniformResourceLocator())\n if \"youtube\" in self.getUniformResourceLocator():\n self.setIdentifier(\n self.retrieveIdentifier(\n self.getIdentifier().replace(\n \"https://www.youtube.com/watch?v=\",\n \"\"\n )\n )\n )\n else:\n self.setIdentifier(\n self.getIdentifier().replace(\n \"https://youtu.be/\",\n \"\"\n ).rsplit(\"?\")[0]\n )\n meta_data = self.getYouTube()\n if meta_data[\"status\"] == 200:\n self.setLength(int(meta_data[\"data\"][0][4])) # type: ignore\n self.setPublishedAt(str(meta_data[\"data\"][0][3])) # type: ignore\n self.setAuthor(str(meta_data[\"data\"][0][0])) # type: ignore\n self.setTitle(str(meta_data[\"data\"][0][1])) # type: ignore\n self.setDuration(\n time.strftime(\"%H:%M:%S\", time.gmtime(self.getLength()))\n )\n File_Location = self._getFileLocations(meta_data[\"data\"]) # type: ignore\n audio_file = File_Location[\"audio_file\"]\n video_file = File_Location[\"video_file\"]\n else:\n self.setLength(self.getVideo().length)\n self.setPublishedAt(self.getVideo().publish_date)\n self.setAuthor(self.getVideo().author)\n self.setTitle(self.getVideo().title)\n self.setDuration(\n time.strftime(\"%H:%M:%S\", time.gmtime(self.getLength()))\n )\n audio_file = None\n video_file = None\n self.postYouTube()\n response = {\n \"uniform_resource_locator\": self.getUniformResourceLocator(),\n \"author\": self.getAuthor(),\n \"title\": self.getTitle(),\n \"identifier\": self.getIdentifier(),\n \"author_channel\": self.getVideo().channel_url,\n \"views\": self.getVideo().views,\n \"published_at\": str(self.getPublishedAt()),\n \"thumbnail\": self.getVideo().thumbnail_url,\n \"duration\": self.getDuration(),\n \"audio_file\": audio_file,\n \"video_file\": video_file\n }\n return response\n\n def _getFileLocations(self, result_set: list[RowType]) -> dict[str, str | None]:\n \"\"\"\n Extracting the file locations on the application's directory.\n\n Parameters:\n result_set: (array): The data from the database server.\n\n Return:\n (string)\n \"\"\"\n response: dict[str, str | None]\n if len(list(result_set)) == 2:\n response = {\n \"audio_file\": str(result_set[0][5]),\n \"video_file\": str(result_set[1][5])\n }\n else:\n response = {\n \"audio_file\": None,\n \"video_file\": None\n }\n return response\n\n def getYouTube(self) -> dict[str, int | list[RowType] | str]:\n \"\"\"\n Retrieving the metadata from the YouTube table.\n\n Return:\n (object)\n \"\"\"\n response: dict[str, int | list[RowType] | str]\n filter_parameters = tuple([self.getIdentifier()])\n media = self.getDatabaseHandler().get_data(\n parameters=filter_parameters,\n table_name=\"YouTube\",\n join_condition=\"MediaFile ON MediaFile.YouTube = YouTube.identifier\",\n filter_condition=\"YouTube.identifier = %s\",\n column_names=\"author, title, YouTube.identifier, published_at, length, location\",\n sort_condition=\"MediaFile.identifier ASC\",\n limit_condition=2\n )\n self.setTimestamp(datetime.now().strftime(\"%Y-%m-%d - %H:%M:%S\"))\n self.getLogger().inform(\n f\"The media content has been retrieved from the database server!\\nContent Amount: {len(media)}\\nCurrent Media: {media}\"\n )\n if len(media) == 0:\n response = {\n 'status': 404,\n 'data': media,\n 'timestamp': self.getTimestamp()\n }\n else:\n response = {\n 'status': 200,\n 'data': media,\n 'timestamp': self.getTimestamp()\n }\n return response\n\n def postYouTube(self) -> None:\n \"\"\"\n Creating a record for the media with its data.\n\n Return:\n (void)\n \"\"\"\n data = (\n self.getIdentifier(),\n self.getLength(),\n self.getPublishedAt(),\n self.getAuthor(),\n self.getTitle(),\n self.getMediaIdentifier()\n )\n self.getLogger().inform(\n f\"Data to be inserted into the database server.\\nIdentifier: {self.getIdentifier()}\\nLength: {self.getLength()}\\nPublished At: {self.getPublishedAt()}\\nAuthor: {self.getAuthor()}\\nTitle: {self.getTitle()}\\nMedia's Identifier: {self.getMediaIdentifier()}\"\n )\n self.getDatabaseHandler().post_data(\n table=\"YouTube\",\n columns=\"identifier, length, published_at, author, title, Media\",\n values=\"%s, %s, %s, %s, %s, %s\",\n parameters=data\n )\n","repo_name":"DONALDBZR/ytd_web_app","sub_path":"Auto/Classes/YouTubeDownloader.py","file_name":"YouTubeDownloader.py","file_ext":"py","file_size_in_byte":10545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"37981257780","text":"# -*- coding: gb18030 -*-\n#\n# $Id: $\n\n\"\"\"\n\"\"\"\nfrom Function import Function\nfrom bwdebug import *\nimport BigWorld\nimport csconst\nimport csstatus\nimport csdefine\nimport utils\n\nclass FuncLeavePrison( Function ):\n\t\"\"\"\n\t离开监狱\n\t\"\"\"\n\tdef __init__( self, section ):\n\t\t\"\"\"\n\t\tparam1: CLASS_*\n\n\t\t@param param: 由实现类自己解释格式; param1 - param5\n\t\t@type param: pyDataSection\n\t\t\"\"\"\n\t\tself.param01 = section.readInt( \"param1\" ) # 小于某个pk值才显示选项\n\t\tself.spaceName = section.readString( \"param2\" )\n\t\tself.pos = None\n\t\tself.direction = None\n\t\t\n\t\tposition = section.readString( \"param3\" )\n\t\tpos = utils.vector3TypeConvert( position )\n\t\tif pos is None:\n\t\t\tERROR_MSG( \"Vector3 Type Error:%s Bad format '%s' in section param3 \" % ( self.__class__.__name__, position ) )\n\t\telse:\n\t\t\tself.pos = pos\n\t\t\n\t\tdirection = section.readString( \"param4\" )\n\t\tdir = utils.vector3TypeConvert( direction )\n\t\tif dir is None:\n\t\t\tERROR_MSG( \"Vector3 Type Error:%s Bad format '%s' in section param4 \" % ( self.__class__.__name__, direction ) )\n\t\telse:\n\t\t\tself.direction = dir\n\n\tdef do( self, player, talkEntity = None ):\n\t\t\"\"\"\n\t\t执行一个功能\n\n\t\t@param player: 玩家\n\t\t@type player: Entity\n\t\t@param talkEntity: 一个扩展的参数\n\t\t@type talkEntity: entity\n\t\t@return: None\n\t\t\"\"\"\n\t\tplayer.endGossip( talkEntity )\n\t\tif player.pkValue >= self.param01:\n\t\t\tplayer.statusMessage( csstatus.PRISON_LEAVE_VALID, self.param01 )\n\t\t\treturn\n\n\t\tplayer.setTemp( \"leavePrison\", True )\n\t\tplayer.gotoSpace( self.spaceName, self.pos, self.direction )\n\n\tdef valid( self, player, talkEntity = None ):\n\t\t\"\"\"\n\t\t检查一个功能是否可以使用\n\n\t\t@param player: 玩家\n\t\t@type player: Entity\n\t\t@param talkEntity: 一个扩展的参数\n\t\t@type talkEntity: entity\n\t\t@return: True/False\n\t\t@rtype:\tbool\n\t\t\"\"\"\n\t\treturn True\n\n\n\n#","repo_name":"mudsave/csol2_enities_45541","sub_path":"cell/Resource/FuncsModule/FuncLeavePrison.py","file_name":"FuncLeavePrison.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"13715946004","text":"import sys\n\ninput = sys.stdin.readline\n\n\ndef sort_order(i):\n return (-i[1], i[2], -i[3], i[0])\n\n\nN = int(input().rstrip())\nstudents = [0] * N\nfor i in range(N):\n name, kor, eng, math = input().split()\n kor, eng, math = map(int, [kor, eng, math])\n students[i] = [name, kor, eng, math]\nstudents.sort(key=sort_order)\nfor student in students:\n print(student[0])\n","repo_name":"nnoobbaagguu/Algorithm","sub_path":"Baekjoon Online Judge/10825.py","file_name":"10825.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"71065842461","text":"from django.conf.urls import patterns, include, url\nfrom flights.views import Index, Detail, Contact, Filter, Notifications, SignIn, SignUp, SignOut\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.decorators import login_required\n\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nfrom flights.api import QPXResource, SliceResource\nfrom location.api import CityResource, CountryResource, CurrencyResource\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', Index.as_view(), name='index'),\n url(r'^contact$', Contact.as_view(), name='contact'),\n url(r'^notifications$', Notifications.as_view(), name='notifications'),\n url(r'^(?P\\d+)/$', Detail.as_view(), name='detail'),\n url(r'^price/(?P\\d+)/$', Filter.as_view(), name='filter'),\n url(r'^signin/$', SignIn.as_view(), name='signin'),\n url(r'^signup/$', SignUp.as_view(), name='signup'),\n url(r'^price/(?P\\d+)/$', Filter.as_view(), name='filter'),\n url(r'^leaving-date/(?P[0-9\\-_]+)/$', Filter.as_view()),\n url(r'^city/(?P[A-z]+)/$', Filter.as_view()),\n url(r'^signout/$', SignOut.as_view()),\n url(r'^api/qpx/', include(QPXResource.urls())),\n url(r'^api/slices/', include(SliceResource.urls())),\n url(r'^api/currenies/', include(CurrencyResource.urls())),\n url(r'^api/countries/', include(CountryResource.urls())),\n url(r'^api/cities/', include(CityResource.urls())),\n url(r'^thanks',TemplateView.as_view(template_name='flights/generic.html'),name='thanks'),\n url(r'^account',login_required(TemplateView.as_view(template_name='flights/generic.html')),name='thanks'),\n\n ) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","repo_name":"colins44/passportfriday","sub_path":"passportfridays/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"17172138645","text":"#!/usr/bin/env python3\r\n# -*- coding:utf-8 -*-\r\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\r\n\r\nimport os\r\nimport random\r\n\r\nimport torch\r\nimport torch.distributed as dist\r\nimport torch.nn as nn\r\n\r\nfrom .base_exp import BaseExp\r\n\r\n\r\nclass Exp(BaseExp):\r\n def __init__(self):\r\n super().__init__()\r\n\r\n # ---------------- model config ---------------- #\r\n self.num_classes = 80\r\n self.depth = 1.00\r\n self.width = 1.00\r\n\r\n # ---------------- dataloader config ---------------- #\r\n # set worker to 4 for shorter dataloader init time\r\n self.data_num_workers = 4\r\n self.input_size = (640, 640)\r\n # Actual multiscale ranges: [640-5*32, 640+5*32].\r\n # To disable multiscale training, set the\r\n # self.multiscale_range to 0.\r\n self.multiscale_range = 5\r\n # You can uncomment this line to specify a multiscale range\r\n # self.random_size = (14, 26)\r\n self.data_dir = None\r\n self.train_ann = \"instances_train2017.json\"\r\n self.val_ann = \"instances_val2017.json\"\r\n\r\n # --------------- transform config ----------------- #\r\n self.mosaic_prob = 1.0\r\n self.mixup_prob = 1.0\r\n self.hsv_prob = 1.0\r\n self.flip_prob = 0.5\r\n self.degrees = 10.0\r\n self.translate = 0.1\r\n self.mosaic_scale = (0.1, 2)\r\n self.mixup_scale = (0.5, 1.5)\r\n self.shear = 2.0\r\n self.perspective = 0.0\r\n self.enable_mixup = True\r\n\r\n # -------------- training config --------------------- #\r\n self.warmup_epochs = 5\r\n self.max_epoch = 300\r\n self.warmup_lr = 0\r\n self.basic_lr_per_img = 0.01 / 64.0\r\n self.scheduler = \"yoloxwarmcos\"\r\n self.no_aug_epochs = 15\r\n self.min_lr_ratio = 0.05\r\n self.ema = True\r\n\r\n self.weight_decay = 5e-4\r\n self.momentum = 0.9\r\n self.print_interval = 10\r\n self.eval_interval = 10\r\n self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(\".\")[0]\r\n\r\n # ----------------- testing config ------------------ #\r\n self.test_size = (640, 640)\r\n self.test_conf = 0.01\r\n self.nmsthre = 0.65\r\n\r\n def get_model(self):\r\n from models import YOLOX, YOLOPAFPN, YOLOXHead\r\n\r\n def init_yolo(M):\r\n for m in M.modules():\r\n if isinstance(m, nn.BatchNorm2d):\r\n m.eps = 1e-3\r\n m.momentum = 0.03\r\n\r\n if getattr(self, \"model\", None) is None:\r\n in_channels = [256, 512, 1024]\r\n backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)\r\n head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels)\r\n self.model = YOLOX(backbone, head)\r\n\r\n self.model.apply(init_yolo)\r\n self.model.head.initialize_biases(1e-2)\r\n return self.model\r\n","repo_name":"xiyie/yolox","sub_path":"test/exp/yolox_base.py","file_name":"yolox_base.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"39935198465","text":"n,m = [int(x) for x in input().split()]\n\nadj = [[] for _ in range(n + 1)]\nfor _ in range(m):\n\tu, v = [int(x) for x in input().split()]\n\tadj[u].append(v)\n\tadj[v].append(u)\n\n\n\nring = True\nfor x in adj[1:n+1]:\n\tif len(x) != 2:\n\t\tring = False\n\nlargest = len(adj[1])\nfor x in adj[1:n+1]:\n\tif len(x)>=largest:\n\t\tlargest = len(x)\nvisited = [False for i in range(n+1)]\nparent = [-1 for i in range(n+1)]\nstack = []\nstack.append(1)\nparent[1] = None\ncycle=False\n\nwhile (len(stack)):\n\ts = stack[-1]\n\tstack.pop()\n\tvisited[s] = True\n\tfor node in adj[s]:\n\t\tif (not visited[node]):\n\t\t\tstack.append(node)\n\t\t\tparent[node] = s\n\t\telif parent[s]!=node:\n\t\t\tcycle=True\n\nif ring and cycle:\n\tprint(\"ring topology\")\nelse:\n\tdistance = [-1 for _ in range(n + 1)]\n\tdistance[1] = 0\n\tshortest_path_tree_parent = [-1 for _ in range(n + 1)]\n\tqueue = [1]\n\tfor u in queue:\n\t\tfor v in adj[u]:\n\t\t\tif distance[v] == -1:\n\t\t\t\tdistance[v] = distance[u] + 1\n\t\t\t\tshortest_path_tree_parent[v] = u\n\t\t\t\tqueue.append(v)\n\t\n\tif largest==2:\n\t\tprint(\"bus topology\")\n\telse:\n\t\tif distance[n]<=2 and largest==n-1 and not cycle:\n\t\t\tprint(\"star topology\")\n\t\telse:\n\t\t\tprint(\"unknown topology\")\n\t\t\t\n\t\n\n","repo_name":"LamberlainMuli/CompetitiveProgramming","sub_path":"Progvar/AP CSCI 21 2022/Week 5/CF292B.py","file_name":"CF292B.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"3283889226","text":"import cv2\nimport torchvision.models as models\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport cv2\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.transforms as tt\n\nmodel_state = torch.load('models/facial_expression.pth')\nclass_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']\n#class_to_label = {0 :'Angry', 1 : 'Disgust', 2:'Fear', 3 :'Happy', 4:'Sad', 5:'Surprise', 6:'Neutral'}\nface_classifier = cv2.CascadeClassifier(\"models/haarcascade_frontalface_default.xml\")\n\nclass SeparableConv2d(nn.Module):\n\n def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):\n super(SeparableConv2d, self).__init__()\n self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels,\n bias=bias)\n self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)\n\n def forward(self, x):\n x = self.depthwise(x)\n x = self.pointwise(x)\n return x\n\n\nclass ResidualBlock(nn.Module):\n\n def __init__(self, in_channeld, out_channels):\n super(ResidualBlock, self).__init__()\n\n self.residual_conv = nn.Conv2d(in_channels=in_channeld, out_channels=out_channels, kernel_size=1, stride=2,\n bias=False)\n self.residual_bn = nn.BatchNorm2d(out_channels, momentum=0.99, eps=1e-3)\n\n self.sepConv1 = SeparableConv2d(in_channels=in_channeld, out_channels=out_channels, kernel_size=3, bias=False,\n padding=1)\n self.bn1 = nn.BatchNorm2d(out_channels, momentum=0.99, eps=1e-3)\n self.relu = nn.ReLU()\n\n self.sepConv2 = SeparableConv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, bias=False,\n padding=1)\n self.bn2 = nn.BatchNorm2d(out_channels, momentum=0.99, eps=1e-3)\n self.maxp = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n def forward(self, x):\n res = self.residual_conv(x)\n res = self.residual_bn(res)\n x = self.sepConv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.sepConv2(x)\n x = self.bn2(x)\n x = self.maxp(x)\n return res + x\n\n\nclass FaceCnnModel(nn.Module):\n\n def __init__(self, in_channels, num_classes):\n super().__init__()\n\n self.conv1 = nn.Conv2d(in_channels=1, out_channels=8, kernel_size=3, stride=1, bias=False)\n self.bn1 = nn.BatchNorm2d(8, affine=True, momentum=0.99, eps=1e-3)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3, stride=1, bias=False)\n self.bn2 = nn.BatchNorm2d(8, momentum=0.99, eps=1e-3)\n self.relu2 = nn.ReLU()\n\n self.module1 = ResidualBlock(in_channeld=8, out_channels=16)\n self.module2 = ResidualBlock(in_channeld=16, out_channels=32)\n self.module3 = ResidualBlock(in_channeld=32, out_channels=64)\n self.module4 = ResidualBlock(in_channeld=64, out_channels=128)\n\n self.last_conv = nn.Conv2d(in_channels=128, out_channels=7, kernel_size=3, padding=1)\n self.avgp = nn.AdaptiveAvgPool2d((1, 1))\n\n def forward(self, input):\n x = input\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu1(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.relu2(x)\n x = self.module1(x)\n x = self.module2(x)\n x = self.module3(x)\n x = self.module4(x)\n x = self.last_conv(x)\n x = self.avgp(x)\n x = x.view((x.shape[0], -1))\n return x\n\n\nmodel = FaceCnnModel(1,len(class_labels))\nmodel.load_state_dict(model_state)\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n # Grab a single frame of video\n ret, frame = cap.read()\n #frame = cv2.flip(frame, 1)\n labels = []\n gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n faces = face_classifier.detectMultiScale(gray, 1.3, 5)\n\n for (x,y,w,h) in faces:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_gray = cv2.resize(roi_gray,(48, 48), interpolation=cv2.INTER_AREA)\n\n if np.sum([roi_gray])!= 0:\n roi = tt.functional.to_pil_image(roi_gray)\n roi = tt.functional.to_grayscale(roi)\n roi = tt.ToTensor()(roi).unsqueeze(0)\n\n # make a prediction on the ROI\n tensor = model(roi)\n pred = torch.max(tensor, dim=1)[1].tolist()\n label = class_labels[pred[0]]\n \n label_position = (x, y)\n cv2.putText(frame, label, label_position, cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 3)\n else:\n cv2.putText(frame, 'No Face Found', (20, 60), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 255, 0), 3)\n \n cv2.imshow('Emotion Detector', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"AayushBangroo/facialExpression","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"6002301011","text":"# Need to install first (if some packages have not been installed previously)\n# >> python3 -m pip install beautifulsoup4\n# or\n# >> pip install beautifulsoup4\n# \n# >> python3 -m pip install lxml\n# or\n# >> pip install lxml\n\nfrom bs4 import BeautifulSoup\nimport csv\n\ntry:\n fileName = \"text_5_var_12\"\n with open(\"assets/data/5/\"+fileName, encoding=\"utf-8\", mode=\"r\") as htmlFile:\n reader = htmlFile.read()\n htmlParse = BeautifulSoup(reader, 'lxml') \n tableBody = htmlParse.find('table')\n tableRows = tableBody.find_all('tr')\n allData = []\n\n for row in tableRows:\n tableCols = row.find_all('td') or row.find_all('th')\n data = (tableCols[0].text, tableCols[1].text, tableCols[2].text, tableCols[3].text, tableCols[4].text)\n allData.append(data)\n \n with open(\"assets/result/5/\"+fileName+\"_result.csv\", 'w', newline='') as csvfile:\n writerCsv = csv.writer(csvfile, delimiter=',')\n for data in allData:\n writerCsv.writerow([data[0]] + [str(data[1])] + [data[2]] + [data[3]] + [data[4]])\n \nexcept Exception as e:\n print(e)","repo_name":"kuldii/my_first_pyton","sub_path":"lib/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"5685397542","text":"from keras.models import Model\n\n\ndef lock_layers(model, indices):\n \"\"\"Locks layers on respective indices from indices array\"\"\"\n for i in indices:\n model.layers[i].trainable = False\n return model\n\n\ndef prepare_feature_extractor(model,\n crop_index,\n lock_index,\n input_shape):\n \"\"\"Crops model and locks layers from training\n Manipulates model config since it is only viable option besides rebuilding the whole model\n Receives:\n model - pretrained model\n crop_index - index of last layer in final model\n lock_index - index of last locked layer in final model\n input_shape - since model is convolutional, input could be freely changed\n Returns:\n new cropped model\n \"\"\"\n config = model.get_config()\n weights = model.get_weights()\n # Edit input layer\n config['layers'][0]['config']['batch_input_shape'] = (None, *input_shape)\n\n # Crop unnecessary layers\n assert crop_index < len(config['layers']), 'crop_index is out of layers list bounds'\n config['layers'] = config['layers'][:crop_index + 1]\n\n # Assign new model output\n config['layers'][-1]['config']['name'] = 'FeatureMap'\n config['output_layers'][0][0] = config['layers'][-1]['name']\n\n # Build cropped model from config and load weights\n model = Model.from_config(config)\n model.set_weights(weights)\n\n # Lock layers\n assert lock_index < len(config['layers']), 'lock_index is out of cropped layers list bounds'\n return lock_layers(model, list(range(lock_index + 1)))","repo_name":"bmstu-iu8-g1-2019-project/road-signs-recognition","sub_path":"sources/feature_extractor/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":1615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"5562962140","text":"import webiopi\nimport datetime\n#import Adafruit_DHT as dht\nimport sys\n\nGPIO = webiopi.GPIO\n\nVDHT = 6\nDth = 19\nPluz= 17\nPcascada = 27\nPlluvia = 12\nPcalefactor = 13\nPpeltier = 14\nTmin=22\nTmax=28\nHmin=100\nHmax=0\nHoraLuzOn = 9 # Turn Light ON at 08:00\nHoraLuzOff = 20 # Turn Light OFF at 18:00\nt = 1\nh = 2\nAUTO = True\nTemperaturaElevada = 0\n\n# setup function is automatically called at WebIOPi startup\ndef setup():\n # set the GPIO used by the light to output\n GPIO.setFunction(VDHT, GPIO.OUT) #pin 6 5V DHT\n GPIO.setFunction(Pluz, GPIO.OUT)\n GPIO.setFunction(Pcascada, GPIO.OUT)\n GPIO.setFunction(Plluvia, GPIO.OUT)\n GPIO.setFunction(Pcalefactor, GPIO.OUT)\n GPIO.setFunction(Ppeltier, GPIO.OUT)\n\n GPIO.digitalWrite(VDHT, GPIO.HIGH) #pin 6 5V DHT\n GPIO.digitalWrite(Pcascada, GPIO.LOW)\n GPIO.digitalWrite(Plluvia, GPIO.HIGH)\n GPIO.digitalWrite(Pcalefactor, GPIO.HIGH)\n GPIO.digitalWrite(Pluz, GPIO.HIGH)\n GPIO.digitalWrite(Ppeltier, GPIO.HIGH)\n\n\n hora = datetime.datetime.now()\n\n if ((hora.hour >= HoraLuzOn) and (hora.hour < HoraLuzOff)):\n GPIO.digitalWrite(Pluz, GPIO.HIGH)\n\n# loop function is repeatedly called by WebIOPi \ndef loop():\n hora = datetime.datetime.now()\n if (AUTO):\n\n # toggle light ON all days at the correct time\n if ((hora.hour == HoraLuzOn) and (hora.minute == 0) and (hora.second == 0)):\n if (GPIO.digitalRead(Pluz) == GPIO.LOW):\n GPIO.digitalWrite(Pluz, GPIO.HIGH)\n\n # toggle light OFF\n if ((hora.hour == HoraLuzOff) and (hora.minute == 0) and (hora.second == 0)):\n if (GPIO.digitalRead(Pluz) == GPIO.HIGH):\n GPIO.digitalWrite(Pluz, GPIO.LOW)\n\n #h,t = dht.read_retry(dht.DHT22, Dth)\n h=1\n t=25\n if tTmax:\n if (GPIO.digitalRead(Pcalefactor) == GPIO.LOW):\n GPIO.digitalWrite(Pcalefactor, GPIO.HIGH)\n if t>30:\n TemperaturaElevada = 1\n if (t<28) and (TemperaturaElevada == 1):\n TemperaturaElevada = 0 \n\n if (hora.hour==10 and hora.minute==50) or (hora.hour==12 and hora.minute==0) or (hora.hour==13 and hora.minute==50) or (hora.hour==16 and hora.minute==50) or (hora.hour==19 and hora.minute==50) or (hora.hour==22 and hora.minute==50):\n GPIO.digitalWrite(Plluvia, GPIO.LOW)\n time.sleep(30)\n GPIO.digitalWrite(Plluvia, GPIO.HIGH)\n if (TemperaturaElevada):\n GPIO.digitalWrite(Ppeltier, GPIO.LOW)\n\n\n # gives CPU some time before looping again\n webiopi.sleep(1)\n\n# destroy function is called at WebIOPi shutdown\ndef destroy():\n GPIO.digitalWrite(Pluz, GPIO.LOW)\n GPIO.digitalWrite(Pcascada, GPIO.LOW)\n GPIO.digitalWrite(Plluvia, GPIO.HIGH)\n GPIO.digitalWrite(Pcalefactor, GPIO.HIGH)\n GPIO.digitalWrite(Pluz, GPIO.HIGH)\n GPIO.digitalWrite(peltier, GPIO.HIGH)\n\n@webiopi.macro\ndef getTemperaturaHumedad():\n return \"%d;%d\" % (t, h)\n\n@webiopi.macro\ndef getLuzHours():\n return \"%d;%d\" % (HoraLuzOn, HoraLuzOff)\n\n@webiopi.macro\ndef setLuzHours(on, off):\n global HoraLuzOn, HoraLuzOff\n HoraLuzOn = int(on)\n HoraLuzOff = int(off)\n return getLuzHours()\n\n@webiopi.macro\ndef getTemperaturaLimits():\n return \"%d;%d\" % (Tmin, Tmax)\n\n@webiopi.macro\ndef setTemperaturaLimits(on, off):\n global Tmin, Tmax\n Tmin = int(on)\n Tmax = int(off)\n return getTemperaturaLimits()\n\n@webiopi.macro\ndef getModo():\n if (AUTO):\n return \"auto\"\n return \"manual\"\n\n@webiopi.macro\ndef setModo(modo):\n global AUTO\n if (mode == \"auto\"):\n AUTO = True\n elif (mode ==\"manual\"):\n AUTO = FALSE\n return getModo()\n","repo_name":"aguspa/TerrarioAutonomo","sub_path":"python/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"39522186624","text":"import requests\nimport time\nimport urllib\nimport ganster\n\n\nTOKEN = \"1060340522:AAHwgNAwVqc2ONPtzQX0TPvQjnFOss67xHA\"\nURL = f\"https://api.telegram.org/bot{TOKEN}/\"\n\n\ndef get_updates(offset=None):\n url = URL + \"getUpdates?timeout=100\"\n if offset:\n url = url + f\"&offset={offset}\"\n response = requests.get(url)\n js = response.json()\n return js\n\n\ndef get_last_update_id(updates):\n update_ids = []\n for update in updates[\"result\"]:\n update_ids.append(int(update[\"update_id\"]))\n return max(update_ids)\n\n\ndef get_last_chat_id_and_text(updates):\n num_updates = len(updates[\"result\"])\n last_update = num_updates - 1\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\n return text, chat_id\n\n\ndef reply_message(updates):\n for update in updates[\"result\"]:\n text = update[\"message\"][\"text\"]\n chat = update[\"message\"][\"chat\"][\"id\"]\n reply = ganster.reply_message(text)\n send_message(reply, chat)\n\n\ndef send_message(text, chat_id):\n text = urllib.parse.quote_plus(text)\n url = URL + f\"sendMessage?text={text}&chat_id={chat_id}\"\n requests.get(url)\n\n\ndef main():\n last_update_id = None\n while True:\n updates = get_updates(last_update_id)\n if len(updates[\"result\"]) > 0:\n last_update_id = get_last_update_id(updates) + 1\n reply_message(updates)\n time.sleep(0.5)\n\n\nif __name__ == '__main__':\n main()","repo_name":"Husainraza/Chatbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"13917475424","text":"import subprocess\nimport os\n\nmy_env = os.environ.copy()\n# command = [\"helm\", \"install\", \"--name\", \"spark-user-\" + os.environ['USER'], \"--set\", \"user.name=\" + os.environ['USER'], \"--set\", \"cvmfs.enable=true\", \"--set\" , \"user.admin=false\", \"https://gitlab.cern.ch/db/spark-service/spark-service-charts/raw/spark_user_accounts/cern-spark-user-1.1.0.tgz\"]\n# command = [\"openstack\", \"token\", \"issue\", \"-c\", \"id\", \"-f\", \"value\"]\ncommand = [\"helm\", \"init\", \"--client-only\"]\np = subprocess.Popen(command, stdout=subprocess.PIPE, env=my_env)\nout, err = p.communicate()\nprint(out)\nprint(err)\n","repo_name":"sahiljajodia01/k8s-selection","sub_path":"k8s-selection/helm_chart_test.py","file_name":"helm_chart_test.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"} +{"seq_id":"24510966989","text":"import heapq\n\nN = int(input())\nxy = []\nfor n in range(N):\n x, y = map(float, input().split())\n xy.append([x, y])\n\ngraph = [[] for _ in range(N)]\nfor i in range(N):\n for j in range(i + 1, N):\n cost = ((xy[i][0] - xy[j][0]) ** 2 + (xy[i][1] - xy[j][1]) ** 2) ** 0.5\n graph[i].append((cost, j))\n graph[j].append((cost, i))\n\nq = [(0, 0)]\nvisited = [0] * N\nans = 0\nwhile q:\n cost, now = heapq.heappop(q)\n if not visited[now]:\n visited[now] = 1\n ans += cost\n for next in graph[now]:\n heapq.heappush(q, (next[0], next[1]))\nprint(round(ans, 2))","repo_name":"dhkimxx/Baekjoon","sub_path":"python/4386.py","file_name":"4386.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"30718653156","text":"# -*-coding:utf-8 -*-\nimport requests\n\ndef is_zero_balance(balance):\n balance_float = 0.0\n if isinstance(balance, str):\n print(\"balance:{}\".format(balance))\n if len(balance) > 0:\n balance_float = float(balance)\n else:\n balance_float = balance\n \n return balance_float > 0\n\ndef get_bitcoin_balance(address):\n # 拼接链接\n url = \"\".join([\"https://www.blockchain.com/btc/address/\", address])\n response = requests.get(url)\n key_word = \"The current value of this address is\"\n key_len = len(key_word)\n text = response.text\n index = text.find(key_word)\n if index < 0:\n print(\"text:{} index: {} -----------can not find index-----------\".format(text,index))\n return 0;\n short = text[index:index+100]\n btc = short.find(\"BTC\")\n if btc < 0:\n print(\"******can not find btc******\")\n return 0;\n else:\n btc_banlance = short[key_len+1:btc-1]\n return btc_banlance\n\nif __name__ == \"__main__\":\n try:\n address = \"3JZHLAKwc291dnxjwLyDDfnmQkbTNwC7PX\"\n # address = \"bc1qcn6xjvfy6uqyqqnzmwxeqqtnk9cmtcpummn8la\"\n # address = \"36KKMy5yihkjNA4Rc21eA5TXy4wnfCGpj3\"\n btc_banlance = get_bitcoin_balance(address)\n \n if is_zero_balance(btc_banlance):\n print(\"find valide adress:\" + address)\n else:\n print(\"continu find!\")\n \n except Exception as err:\n print(\"error, \" + err)\n\n\n\n\n\n","repo_name":"wodingdong/genbtcaccount","sub_path":"genbtcaddr/get_bitcoin.py","file_name":"get_bitcoin.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"69"} +{"seq_id":"74433518299","text":"__author__ = \"Andrea Rubbi\"\r\n\r\n# ─── About ────────────────────────────────────────────────────────────────────\r\n\"\"\"This file defines the tree_set and set_collection classes.\r\n tree_set contains the information relative to a single set of phyogenetic trees\r\n in newick format. It allows to compute the distance matrix using different methods and metrics.\r\n The distance matrix can be then embedded using different methods and subsequently plotted in 2D or 3D.\r\n A distance matrix and metadata can be given as .csv files. Moreover, metadata is modified\r\n in order to give information regarding the name of the tree set and the index (or step) of each tree.\r\n Please note that, once an instance of a class is generated, its metadata dataframe should not be substituted\r\n as this would invalidate it for the plotting functions. Addition of columns and features is possible by\r\n accessing the dataframe and modifying it as a pandas.DataFrame instance.\r\n set_collection behaves similarly to set_collection. Matter of fact, it is a subclass of the latter and therefore\r\n shares most of its methods. Its purpose is to analyze concurrently multiple instances of tree_sets and plot their\r\n relative distance in a common embedding. Examples of possible applications are present at: ###LINK###\"\"\"\r\n# ──────────────────────────────────────────────────────────────────────────────\r\n\r\n__copyright__ = \"2023-present Andrea Rubbi and other contributors\"\r\n__credits__ = [\"Andrea Rubbi\", \"Lukas Weilguny\", \"Nick Goldman\", \"Nicola de Maio\"]\r\n\r\n__license__ = \"MIT\"\r\n__maintainer__ = \"Andrea Rubbi\"\r\n__institute__ = \"EMBL-EBI\"\r\n__email__ = \"andrear@ebi.ac.uk\"\r\n__status__ = \"Production\"\r\n\r\n# ──────────────────────────────────────────────────────────────────────────────\r\nimport os\r\nimport random\r\nimport shutil\r\nimport subprocess\r\nimport sys\r\nimport time\r\nimport uuid\r\nimport warnings\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom rich import print\r\n# ? rich is a very nice library that allows to\r\n# ? easily format the output of console\r\n# ? https://github.com/Textualize/rich\r\nfrom rich.console import Console\r\n\r\n# getting the name of the directory\r\ncurrent = os.path.dirname(os.path.realpath(__file__))\r\n\r\n# Getting the parent directory name\r\nparent = os.path.dirname(current)\r\n\r\n# adding the parent directory to\r\n# the sys.path.\r\nsys.path.append(parent)\r\n\r\n# silencing some warnings\r\nfrom scipy.sparse import SparseEfficiencyWarning\r\n\r\n# importing other modules\r\n# try:\r\nfrom .calculate_distances import hashrf, maple_RF, tqdist\r\nfrom .embeddings import Isomap_e, LLE_e, PCA_e, tSNE_e\r\nfrom .embeddings.graph import graph\r\nfrom .interactive_mode import interactive\r\nfrom .subsample import subsample\r\n\r\n# except:\r\n# sys.exit(\"Error\")\r\n\r\n\r\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\r\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\r\nwarnings.filterwarnings(\"ignore\", category=SparseEfficiencyWarning)\r\n\r\n\r\n# ────────────────────────────────────────────────────────── TREE_SET CLASS ─────\r\nclass tree_set:\r\n \"\"\"Class for the analysis of a set of phylogenetic trees\"\"\"\r\n\r\n # Console from rich -> takes control of console output\r\n console = Console()\r\n\r\n # ─── INIT ──────────────────────────────────────────────────────────────────\r\n def __init__(self, file, output_file=None, distance_matrix=None, metadata=None):\r\n \"\"\"Initialize tree_set\r\n\r\n file: mandatory - file with set of phylogenetic trees in newick format\r\n output_file: facultative - specifies output_file of distance matrix\r\n distance_matrix: facultative - specifies file with already-computed distance matrix\r\n metadata: facultative - specifies file containing additional information for each tree in set.\r\n It should contain a column for each feature, a row for each tree (blank row if no info)\r\n \"\"\"\r\n\r\n self.file = file\r\n self.output_file = output_file\r\n self.distance_matrix = distance_matrix\r\n self.metadata = metadata\r\n self.embedding_pca2D = None\r\n self.embedding_tsne2D = None\r\n self.embedding_pca3D = None\r\n self.embedding_tsne3D = None\r\n\r\n if self.output_file == None:\r\n self.output_file = \"./{file}_distance_matrix.csv\".format(\r\n file=os.path.splitext(os.path.basename(self.file))[0]\r\n )\r\n\r\n self.size = int(f\"{os.path.getsize(file)/(1<<30):,.0f}\")\r\n # if self.size > 3: sys.exit(f'File is too large: {self.size} GB')\r\n\r\n try:\r\n self.n_trees = int(\r\n subprocess.check_output([\"wc\", \"-l\", self.file]).decode().split(\" \")[0]\r\n )\r\n except:\r\n with open(file, \"r\") as f:\r\n self.n_trees = len(f.readlines())\r\n f.close()\r\n\r\n if type(self.distance_matrix) != type(None):\r\n try:\r\n self.distance_matrix = pd.read_csv(\r\n self.distance_matrix, header=None, index_col=None\r\n ).values\r\n # header=0,\r\n # index_col=0,\r\n # dtype=np.float32,\r\n # self.distance_matrix.columns = list(range(self.distance_matrix.shape[1]))\r\n except:\r\n sys.exit(\r\n \"There's an error with the Distance Matrix file - please check the correct location and name of the .csv file\"\r\n )\r\n\r\n if type(self.metadata) != type(None):\r\n try:\r\n self.metadata = pd.read_csv(self.metadata)\r\n except:\r\n sys.exit(\r\n \"There's an error with the Metadata file - please check the correct location and name of the .csv file\"\r\n )\r\n\r\n else:\r\n self.metadata = pd.DataFrame()\r\n self.metadata[\"SET-ID\"] = [\r\n os.path.splitext(os.path.basename(self.file))[0] for i in range(self.n_trees)\r\n ]\r\n self.metadata[\"STEP\"] = [i for i in range(self.n_trees)]\r\n self.sets = np.unique(self.metadata[\"SET-ID\"])\r\n\r\n # ─── STR ───────────────────────────────────────────────────────────────────\r\n def __str__(self):\r\n \"\"\"Returns string representation of tree_set\r\n\r\n Returns:\r\n __str__: summary of tree_set\r\n \"\"\"\r\n computed = \"not computed\"\r\n if type(self.distance_matrix) != type(None):\r\n computed = \"computed\"\r\n\r\n return f\"─────────────────────────────\\n Tree set containing {self.n_trees} trees;\\n File: {self.file};\\n Distance matrix: {computed}.\\n───────────────────────────── \\n\"\r\n\r\n # ─── CALCULATE DISTANCES ───────────────────────────────────────────────────\r\n def calculate_distances(self, method):\r\n \"\"\"Computes tree_set distance matrix with method of choice\r\n\r\n Args:\r\n method (str): method/algorithm used to compute distance matrix\r\n \"\"\"\r\n methods = {\r\n \"hashrf_RF\": hashrf.hashrf,\r\n \"hashrf_wRF\": hashrf.hashrf_weighted,\r\n \"smart_RF\": maple_RF.calculate_distance_matrix,\r\n \"tqdist_quartet\": tqdist.quartet,\r\n \"tqdist_triplet\": tqdist.triplet,\r\n \"None\": None,\r\n }\r\n\r\n with self.console.status(\"[bold green]Calculating distances...\") as status:\r\n self.distance_matrix = methods[method](\r\n self.file, self.n_trees, self.output_file\r\n )\r\n print(f\"[bold blue]{method} | Done!\")\r\n\r\n # ─── EMBED ─────────────────────────────────────────────────────────────────\r\n def embed(self, method, dimensions, quality=False, report=False):\r\n \"\"\"Compute embedding with n-dimensions and method of choice\r\n\r\n Args:\r\n method (str): method of choice to embed data\r\n dimensions (_type_): number of dimensions/components\r\n quality (bool, optional): returns quality report and self.emb_quality. Defaults to False.\r\n \"\"\"\r\n methods = {\r\n \"pca\": PCA_e.pca,\r\n \"tsne\": tSNE_e.tsne,\r\n \"isomap\": Isomap_e.isomap,\r\n \"lle\": LLE_e.lle,\r\n \"None\": None,\r\n }\r\n\r\n if type(self.distance_matrix) == type(None):\r\n self.calculate_distances(\"hashrf_RF\")\r\n\r\n dim = dimensions if dimensions > 2 else 3\r\n\r\n with self.console.status(\"[bold green]Embedding distances...\") as status:\r\n embedding = methods[method](\r\n self.distance_matrix,\r\n dim,\r\n self.metadata,\r\n quality=quality if not report else True,\r\n report=report,\r\n )\r\n print(f\"[bold blue]{method} | Done!\")\r\n\r\n if quality:\r\n if method == \"pca\":\r\n embedding, var, corr, self.emb_quality = embedding\r\n print(\r\n f\"With {dimensions} components/dimensions, the explained variance is {var:.2f},\\n with an estimated correlation {corr[0, 1]:.2f} with the {self.n_trees}-dimensional coordinates\"\r\n )\r\n else:\r\n embedding, corr, self.emb_quality = embedding\r\n print(\r\n f\"With {dimensions} components/dimensions, the estimated correlation with the {self.n_trees}-dimensional coordinates is {corr[0, 1]:.2f}\"\r\n )\r\n\r\n if method == \"pca\":\r\n self.embedding_pca = embedding\r\n self.embedding_pca3D = embedding[:, :4]\r\n self.embedding_pca2D = embedding[:, :3]\r\n\r\n elif method == \"tsne\":\r\n if dimensions > 3:\r\n warnings.warn(\r\n \"t-SNE with more than 3 dimensions can be considerably slow\"\r\n )\r\n self.embedding_tsne = embedding\r\n self.embedding_tsne3D = embedding[:, :4]\r\n self.embedding_tsne2D = embedding[:, :3]\r\n\r\n elif method == \"isomap\":\r\n if dimensions > 3:\r\n warnings.warn(\r\n \"Isomap with more than 3 dimensions can be considerably slow\"\r\n )\r\n self.embedding_isomap = embedding\r\n self.embedding_isomap3D = embedding[:, :4]\r\n self.embedding_isomap2D = embedding[:, :3]\r\n\r\n elif method == \"lle\":\r\n if dimensions > 3:\r\n warnings.warn(\"LLE with more than 3 dimensions can be considerably slow\")\r\n self.embedding_lle = embedding\r\n self.embedding_lle3D = embedding[:, :4]\r\n self.embedding_lle2D = embedding[:, :3]\r\n\r\n # ─── PLOT EMBEDDING ─────────────────────────────────────────────────────────\r\n\r\n def plot_2D(\r\n self,\r\n method,\r\n save=False,\r\n name_plot=None,\r\n static=False,\r\n plot_meta=\"SET-ID\",\r\n plot_set=None,\r\n select=False,\r\n same_scale=False,\r\n ):\r\n \"\"\"Plot 2D embedding performed with method of choice\r\n\r\n Args:\r\n method (str): embedding method\r\n save (bool, optional): save plot HTML. Defaults to False.\r\n name_plot (str, optional): name of plot's file. Defaults to None.\r\n static (bool, optional): return less interactive plot. Defaults to False.\r\n plot_meta (str, optional): meta-variale used to color the points. Defaults to \"SET-ID\".\r\n plot_set (list, optional): list of sets to plot from set_collection. Defaults to None.\r\n select (bool, optional): return set of buttons to show or hide specific traces. Defaults to False.\r\n same_scale (bool, optional): use same color_scale for all traces when scale is continuous. Defaults to False.\r\n\r\n Raises:\r\n ValueError: method can only be either pca or tsne for now\r\n\r\n Returns:\r\n plot: either interactive or not\r\n \"\"\"\r\n\r\n # you can surely write something better here @andrear\r\n if type(plot_set) == type(None):\r\n plot_set = self.sets\r\n if method == \"pca\":\r\n if name_plot == None:\r\n name_plot = \"PCA_2D\"\r\n if type(self.embedding_pca2D) == type(None):\r\n self.embed(\"pca\", 2)\r\n fig = graph.plot_embedding(\r\n self.embedding_pca2D,\r\n self.metadata,\r\n 2,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n )\r\n\r\n elif method == \"tsne\":\r\n if name_plot == None:\r\n name_plot = \"TSNE_2D\"\r\n if type(self.embedding_tsne2D) == type(None):\r\n self.embed(\"tsne\", 2)\r\n fig = graph.plot_embedding(\r\n self.embedding_tsne2D,\r\n self.metadata,\r\n 2,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n )\r\n\r\n elif method == \"isomap\":\r\n if name_plot == None:\r\n name_plot = \"ISOMAP_2D\"\r\n if type(self.embedding_isomap2D) == type(None):\r\n self.embed(\"isomap\", 2)\r\n fig = graph.plot_embedding(\r\n self.embedding_isomap2D,\r\n self.metadata,\r\n 2,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n )\r\n\r\n elif method == \"lle\":\r\n if name_plot == None:\r\n name_plot = \"LLE_2D\"\r\n if type(self.embedding_lle2D) == type(None):\r\n self.embed(\"lle\", 2)\r\n fig = graph.plot_embedding(\r\n self.embedding_lle2D,\r\n self.metadata,\r\n 2,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n )\r\n\r\n else:\r\n raise ValueError(\"'method' can only be either 'pca' or 'tsne' \")\r\n\r\n return fig\r\n\r\n def plot_3D(\r\n self,\r\n method,\r\n save=False,\r\n name_plot=None,\r\n static=False,\r\n plot_meta=\"SET-ID\",\r\n plot_set=None,\r\n select=False,\r\n same_scale=False,\r\n z_axis=None,\r\n ):\r\n \"\"\"Plot 3D embedding performed with method of choice\r\n\r\n Args:\r\n method (str): embedding method\r\n save (bool, optional): save plot HTML. Defaults to False.\r\n name_plot (str, optional): name of plot's file. Defaults to None.\r\n static (bool, optional): return less interactive plot. Defaults to False.\r\n plot_meta (str, optional): meta-variale used to color the points. Defaults to \"SET-ID\".\r\n plot_set (list, optional): list of sets to plot from set_collection. Defaults to None.\r\n select (bool, optional): return set of buttons to show or hide specific traces. Defaults to False.\r\n same_scale (bool, optional): use same color_scale for all traces when scale is continuous. Defaults to False.\r\n\r\n Raises:\r\n ValueError: method can only be either pca or tsne for now\r\n\r\n Returns:\r\n plot: either interactive or not\r\n \"\"\"\r\n if type(plot_set) == type(None):\r\n plot_set = self.sets\r\n if method == \"pca\":\r\n if name_plot == None:\r\n name_plot = \"PCA_3D\"\r\n if type(self.embedding_pca3D) == type(None):\r\n self.embed(\"pca\", 3)\r\n fig = graph.plot_embedding(\r\n self.embedding_pca3D,\r\n self.metadata,\r\n 3,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n z_axis,\r\n )\r\n\r\n elif method == \"tsne\":\r\n if name_plot == None:\r\n name_plot = \"TSNE_3D\"\r\n if type(self.embedding_tsne3D) == type(None):\r\n self.embed(\"tsne\", 3)\r\n fig = graph.plot_embedding(\r\n self.embedding_tsne3D,\r\n self.metadata,\r\n 3,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n z_axis,\r\n )\r\n\r\n elif method == \"isomap\":\r\n if name_plot == None:\r\n name_plot = \"ISOMAP_3D\"\r\n if type(self.embedding_isomap3D) == type(None):\r\n self.embed(\"isomap\", 3)\r\n fig = graph.plot_embedding(\r\n self.embedding_isomap3D,\r\n self.metadata,\r\n 3,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n z_axis,\r\n )\r\n\r\n elif method == \"lle\":\r\n if name_plot == None:\r\n name_plot = \"LLE_3D\"\r\n if type(self.embedding_lle3D) == type(None):\r\n self.embed(\"lle\", 3)\r\n fig = graph.plot_embedding(\r\n self.embedding_lle3D,\r\n self.metadata,\r\n 3,\r\n save,\r\n name_plot,\r\n static,\r\n plot_meta,\r\n plot_set,\r\n select,\r\n same_scale,\r\n z_axis,\r\n )\r\n\r\n else:\r\n raise ValueError(\"'method' can only be either 'pca' or 'tsne' \")\r\n\r\n return fig\r\n\r\n # ─── GET SUBSET ───────────────────────────────────────────────────────\r\n\r\n def get_subset(self, n_required, method=\"sequence\"):\r\n \"\"\"Gets subset of phylogenetic trees\r\n\r\n Args:\r\n n_required (int): number of points to extract\r\n method (str, optional): method used to extact points ('sequence', 'random', 'syst'). Defaults to \"sequence\".\r\n\r\n Returns:\r\n subset plots: 2D and 3D embedding plots of subset\r\n \"\"\"\r\n console = Console()\r\n with console.status(\"[bold blue]Extracting subsample...\") as status:\r\n if method == \"syst\":\r\n if shutil.which(\"pypy3\") is not None:\r\n command = [\r\n \"pypy3\",\r\n f\"{current}/subsample/subsample.py\",\r\n self.file,\r\n str(self.n_trees),\r\n str(n_required),\r\n ]\r\n res = subprocess.check_output(command, universal_newlines=True).split(\r\n \"\\n\"\r\n )\r\n subsample_trees, idxs = eval(res[3]), eval(res[4])\r\n else:\r\n console.log(\r\n \"[bold red]Could not find pypy3 on your sytem PATH - using python3...\"\r\n )\r\n subsample_trees, idxs = subsample.subsample(\r\n self.file, self.n_trees, n_required, subp=False\r\n )\r\n\r\n else:\r\n with open(self.file, \"r\") as f:\r\n trees = list(enumerate(f.readlines()))\r\n f.close()\r\n\r\n if method == \"random\":\r\n selection = random.sample(trees, n_required)\r\n subsample_trees, idxs = list(\r\n map(lambda elem: elem[1], selection)\r\n ), list(map(lambda elem: elem[0], selection))\r\n elif method == \"sequence\":\r\n step = self.metadata.shape[0] // n_required\r\n idxs = [step * (i + 1) - 1 for i in range(n_required)]\r\n subsample_trees = [trees[i][1] for i in idxs]\r\n\r\n else:\r\n sys.exit(f\"Method {method} not available for subsampling\")\r\n\r\n file_sub = f\"SUBSAMPLE\"\r\n with open(file_sub, \"w\") as f:\r\n for i in subsample_trees:\r\n f.write(i)\r\n f.close()\r\n # print(len(subsample_trees), len(idxs))\r\n status.update(\"[bold green]Calculating distances...\")\r\n dM = hashrf.hashrf(file_sub, n_required, file_sub + \"_distances.csv\")\r\n components = PCA_e.pca(dM, 3)\r\n status.update(f\"[bold blue] Done!\")\r\n time.sleep(0.2)\r\n\r\n sorted_elements = sorted(enumerate(idxs), key=lambda x: x[1])\r\n idxs_sorted, order = list(map(lambda x: x[1], sorted_elements)), list(\r\n map(lambda x: x[0], sorted_elements)\r\n )\r\n comp_sorted = np.array([components[line, :] for line in order])\r\n # dM_sorted = np.array([dM[line,:] for line in order])\r\n SetID_sub = self.metadata[\"SET-ID\"][idxs_sorted]\r\n meta_sub = pd.DataFrame({\"SET-ID\": SetID_sub, \"STEP\": idxs_sorted})\r\n\r\n fig1, fig2 = graph.plot_embedding(comp_sorted, meta_sub, 2), graph.plot_embedding(\r\n comp_sorted, meta_sub, 3\r\n )\r\n return fig1, fig2\r\n\r\n\r\n# ──────────────────────────────────────────────────────────────────────────────\r\n# ─────────────────────────────────────────────────── SET_COLLECTION CLASS ─────\r\nclass set_collection(tree_set):\r\n # NB: set_collection is a sub_class of tree_set\r\n # therefore, most methods are shared between these two classes\r\n def __init__(\r\n self,\r\n collection=list(),\r\n file=\"Set_collection_\",\r\n output_file=None,\r\n distance_matrix=None,\r\n metadata=None,\r\n ):\r\n \"\"\"Initialize set_collection\r\n\r\n collection: facultative - tree_set or list of tree_sets\r\n NB: if no collection is given an empty set_collection is generated\r\n file: facultative - file with set of phylogenetic trees in newick format\r\n output_file: facultative - specifies output_file of distance matrix\"\"\"\r\n\r\n self.id = uuid.uuid4()\r\n self.file = file + str(self.id)\r\n self.distance_matrix = (\r\n pd.read_csv(distance_matrix, header=None, index_col=None).values #\r\n if distance_matrix\r\n else distance_matrix\r\n )\r\n self.embedding_pca2D = None\r\n self.embedding_tsne2D = None\r\n self.embedding_pca3D = None\r\n self.embedding_tsne3D = None\r\n\r\n if self.file != \"Set_collection_\" + str(self.id) and output_file is None:\r\n self.output_file = \"{file}_distance_matrix.csv\".format(\r\n file=os.path.splitext(os.path.basename(self.file))[0]\r\n )\r\n elif output_file is None:\r\n self.output_file = \"Set_collection_distance_matrix_\" + str(self.id) + \".csv\"\r\n else:\r\n if output_file[-4:] == \".csv\":\r\n self.output_file = output_file[:-4] + \"_\" + str(self.id) + \".csv\"\r\n else:\r\n self.output_file = output_file + \"_\" + str(self.id) + \".csv\"\r\n\r\n if isinstance(collection, tree_set):\r\n self.collection = [collection]\r\n with open(self.file, \"w\") as trees:\r\n with open(collection.file, \"r\") as file:\r\n trees.write(file.read())\r\n file.close()\r\n trees.close()\r\n\r\n elif len(collection) > 0:\r\n remove = list()\r\n for i, element in enumerate(collection):\r\n if not isinstance(element, tree_set):\r\n if isinstance(element, str):\r\n try:\r\n file = os.path.splitext(os.path.basename(element))[0]\r\n exec(f\"{file} = tree_set('{element}')\")\r\n remove.append(i)\r\n except FileNotFoundError:\r\n sys.exit(f\"File {element} not found\")\r\n except TypeError:\r\n sys.exit(\r\n f\"Set collection can be initialized only with set_collection, tree_set, or file path elements\"\r\n )\r\n exec(f\"collection.append({file})\")\r\n\r\n else:\r\n sys.exit(\r\n f\"Set collection can be initialized only with set_collection, tree_set, or file path elements\"\r\n )\r\n for i in remove[::-1]:\r\n collection.pop(i)\r\n\r\n self.collection = collection\r\n\r\n else:\r\n self.collection = collection\r\n\r\n self.data = dict()\r\n\r\n self.metadata = pd.DataFrame()\r\n self.n_trees = 0\r\n for set in self.collection:\r\n key = os.path.splitext(os.path.basename(set.file))[0]\r\n\r\n metadata = set.metadata\r\n if type(metadata) == type(None):\r\n metadata = pd.DataFrame()\r\n metadata[\"SET-ID\"] = np.array([key] * set.n_trees)\r\n\r\n self.metadata = pd.concat([self.metadata, metadata])\r\n\r\n self.data[key] = {\"metadata\": metadata, \"n_trees\": set.n_trees}\r\n self.n_trees += set.n_trees\r\n\r\n self.metadata.reset_index(drop=True, inplace=True)\r\n\r\n self.sets = np.unique(self.metadata[\"SET-ID\"])\r\n\r\n # ─── CALCULATE DISTANCES ───────────────────────────────────────────────────\r\n def calculate_distances(self, method):\r\n \"\"\"Computes tree_set distance matrix with method of choice\r\n\r\n Args:\r\n method (str): method/algorithm used to compute distance matrix\r\n \"\"\"\r\n methods = {\r\n \"hashrf_RF\": hashrf.hashrf,\r\n \"hashrf_wRF\": hashrf.hashrf_weighted,\r\n \"smart_RF\": maple_RF.calculate_distance_matrix,\r\n \"tqdist_quartet\": tqdist.quartet,\r\n \"tqdist_triplet\": tqdist.triplet,\r\n \"None\": None,\r\n }\r\n\r\n if method in (\"hashrf_RF\", \"hashrf_wRF\", \"tqdist_quartet\", \"tqdist_triplet\"):\r\n with open(self.file, \"w\") as trees:\r\n for set in self.collection:\r\n with open(set.file, \"r\") as file:\r\n trees.write(file.read())\r\n file.close()\r\n trees.close()\r\n\r\n with self.console.status(\"[bold green]Calculating distances...\") as status:\r\n self.distance_matrix = methods[method](\r\n self.file, self.n_trees, self.output_file\r\n )\r\n\r\n if method in (\"hashrf_RF\", \"hashrf_wRF\", \"tqdist_quartet\", \"tqdist_triplet\"):\r\n hashrf.bash_command(f\"rm {self.file}\")\r\n\r\n print(f\"[bold blue]{method} | Done!\")\r\n\r\n # the result of addition between two collections\r\n # is the concatenation of the two collections\r\n def __add__(self, other):\r\n \"\"\"Concatenates two collectionsor collection and tree_set\r\n\r\n Args:\r\n other (tree_set ot set_colletion): tree_set ot set_colletion\r\n\r\n Returns:\r\n set_collection: concatenated set_collection\r\n \"\"\"\r\n if isinstance(other, set_collection):\r\n return set_collection(self.collection + other.collection)\r\n elif isinstance(other, tree_set):\r\n return set_collection(self.collection + [other])\r\n else:\r\n remove = list()\r\n for i, element in enumerate(other):\r\n if not isinstance(element, tree_set):\r\n if isinstance(element, str):\r\n try:\r\n file = os.path.splitext(os.path.basename(element))[0]\r\n exec(f\"{file} = tree_set('{element}')\")\r\n remove.append(i)\r\n except FileNotFoundError:\r\n sys.exit(f\"File {element} not found\")\r\n except TypeError:\r\n sys.exit(\r\n \"You can concatenate a set_collection \\\r\n only with another set_collection, a tree_set,\\\r\n or a list of tree_set\"\r\n )\r\n\r\n exec(f\"other.append({file})\")\r\n\r\n else:\r\n sys.exit(\r\n \"You can concatenate a set_collection \\\r\n only with another set_collection, a tree_set,\\\r\n or a list of tree_set\"\r\n )\r\n for i in remove[::-1]:\r\n other.pop(i)\r\n\r\n return set_collection(self.collection + other)\r\n\r\n def __str__(self):\r\n computed = \"not computed\"\r\n if type(self.distance_matrix) != type(None):\r\n computed = \"computed\"\r\n\r\n summary = f\"─────────────────────────────\\\r\n \\n Tree set collection containing {self.n_trees} trees;\\\r\n \\n File: {self.file};\\n Distance matrix: {computed}.\\\r\n \\n───────────────────────────── \\n\"\r\n for key, value in self.data.items():\r\n summary += f\"{key}; Containing {value['n_trees']} trees. \\n\"\r\n\r\n return summary\r\n\r\n # concatenate is a more formal method to concatenate collections\r\n # using this allows for more clarity in the codebase\r\n def concatenate(self, other):\r\n \"\"\"Concatenates two collectionsor collection and tree_set\r\n\r\n Args:\r\n other (tree_set ot set_colletion): tree_set ot set_colletion\r\n\r\n Returns:\r\n set_collection: concatenated set_collection\r\n \"\"\"\r\n if isinstance(other, set_collection):\r\n return set_collection(self.collection + other.collection)\r\n elif isinstance(other, tree_set):\r\n return set_collection(self.collection + [other])\r\n else:\r\n remove = list()\r\n for i, element in enumerate(other):\r\n if not isinstance(element, tree_set):\r\n if isinstance(element, str):\r\n try:\r\n file = os.path.splitext(os.path.basename(element))[0]\r\n exec(f\"{file} = tree_set('{element}')\")\r\n remove.append(i)\r\n except FileNotFoundError:\r\n sys.exit(f\"File {element} not found\")\r\n except TypeError:\r\n sys.exit(\r\n \"You can concatenate a set_collection \\\r\n only with another set_collection, a tree_set,\\\r\n or a list of tree_set\"\r\n )\r\n\r\n exec(f\"other.append({file})\")\r\n\r\n else:\r\n sys.exit(\r\n \"You can concatenate a set_collection \\\r\n only with another set_collection, a tree_set,\\\r\n or a list of tree_set\"\r\n )\r\n for i in remove[::-1]:\r\n other.pop(i)\r\n\r\n return set_collection(self.collection + other)\r\n","repo_name":"AndreaRubbi/Pear-EBI","sub_path":"pear_ebi/tree_set.py","file_name":"tree_set.py","file_ext":"py","file_size_in_byte":33264,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"33457317883","text":"# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# \n# File: test_concatenatingmodels.py\n# \n# This file is part of the NetSquid package (https://netsquid.org).\n# It is subject to the NetSquid Software End User License Conditions.\n# A copy of these conditions can be found in the LICENSE.md file of this package.\n# \n# NetSquid Authors\n# ================\n# \n# NetSquid is being developed within [Quantum Internet division](https://qutech.nl/research-engineering/quantum-internet/) at QuTech.\n# QuTech is a collaboration between TNO and the TUDelft.\n# \n# Active authors (alphabetical):\n# \n# - Tim Coopmans (scientific contributor)\n# - Chris Elenbaas (software developer)\n# - David Elkouss (scientific supervisor)\n# - Rob Knegjens (tech lead, software architect)\n# - Iñaki Martin Soroa (software developer)\n# - Julio de Oliveira Filho (software architect)\n# - Ariana Torres Knoop (HPC contributor)\n# - Stephanie Wehner (scientific supervisor)\n# \n# Past authors (alphabetical):\n# \n# - Axel Dahlberg (scientific contributor)\n# - Damian Podareanu (HPC contributor)\n# - Walter de Jong (HPC contributor)\n# - Loek Nijsten (software developer)\n# - Martijn Papendrecht (software developer)\n# - Filip Rozpedek (scientific contributor)\n# - Matt Skrzypczyk (software contributor)\n# - Leon Wubben (software developer)\n# \n# The simulation engine of NetSquid depends on the pyDynAA package,\n# which is developed at TNO by Julio de Oliveira Filho, Rob Knegjens, Coen van Leeuwen, and Joost Adriaanse.\n# \n# Ariana Torres Knoop, Walter de Jong and Damian Podareanu from SURFsara have contributed towards the optimization and parallelization of NetSquid.\n# \n# Hana Jirovska and Chris Elenbaas have built Python packages for MacOS.\n# \n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# This file uses NumPy style docstrings: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt\n\n\"\"\"Unit tests for concatenating different types of models.\n\n\"\"\"\nimport unittest\nimport netsquid as ns\nimport numpy as np\nfrom netsquid.qubits import ketutil as ku\nfrom netsquid.qubits.qubit import Qubit\nfrom netsquid.qubits import qformalism as qform\nfrom netsquid.qubits import qubitapi as qapi\nfrom netsquid.components.models.model import Model, ModelCompositionException\nfrom netsquid.components.models.delaymodels import DelayModel, GaussianDelayModel, FixedDelayModel\nfrom netsquid.components.models.errormodels import ErrorModel\nfrom netsquid.components.models.cerrormodels import ClassicalErrorModel\nfrom netsquid.components.models.qerrormodels import QuantumErrorModel, DepolarNoiseModel, DephaseNoiseModel, \\\n T1T2NoiseModel, FibreLossModel\n\n\nclass NonComposableModel(Model):\n\n def compute_model(self, *args, **kwargs):\n pass\n\n\nclass ExampleModel(Model):\n\n def __init__(self, value=0):\n super().__init__()\n self.value = value\n\n def compute_model(self, items, *args, **kwargs):\n for i in range(len(items)):\n items[i] **= self.value\n\n @classmethod\n def concatenation_class(cls):\n return ExampleModel\n\n\nclass ExampleAddModel(ExampleModel):\n\n def compute_model(self, items, addition=0, *args, **kwargs):\n for i in range(len(items)):\n items[i] += addition\n\n\nclass ExampleMultiplicationModel(ExampleModel):\n def compute_model(self, items, multiplier=1, *args, **kwargs):\n for i in range(len(items)):\n items[i] *= multiplier\n\n\nclass TestConcatModels(unittest.TestCase):\n\n def test_init(self):\n \"\"\"Test initialisation of concatenated models \"\"\"\n m1 = ExampleModel(2)\n m2 = ExampleModel(3)\n self.assertEqual(len(m1), 1)\n self.assertFalse(m1.is_concatenated)\n self.assertFalse(m2.is_concatenated)\n m3 = m1 + m2\n self.assertTrue(m3.is_concatenated)\n self.assertEqual(len(m3), 2)\n self.assertIsInstance(m3, ExampleModel)\n self.assertIsInstance(m3, m1.concatenation_class())\n self.assertTrue(m1 in m3)\n self.assertTrue(m2 in m3)\n self.assertFalse(m3 in m2)\n self.assertFalse(m1 in m2)\n\n def test_order(self):\n \"\"\"Test if models are computed in the right order\"\"\"\n m1 = ExampleModel(2)\n m2 = ExampleModel(3)\n m3 = m1 + m2\n self.assertEqual(len(m3), 2)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n m3(items)\n self.assertListEqual(items, [1, 4 ** 3, 9 ** 3, 16 ** 3, 25 ** 3])\n m4 = m2 + m1\n self.assertEqual(len(m4), 2)\n self.assertIsInstance(m4, ExampleModel)\n items = [1, 2, 3, 4, 5]\n m4(items)\n self.assertListEqual(items, [1, 8 ** 2, 27 ** 2, 64 ** 2, 125 ** 2])\n\n def test_arguments(self):\n \"\"\"Check if arguments are correctly passed through\"\"\"\n m1 = ExampleAddModel()\n m2 = ExampleMultiplicationModel()\n m3 = m1 + m2\n self.assertEqual(len(m3), 2)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n m3(items, addition=4, multiplier=2)\n self.assertListEqual(items, [10, 12, 14, 16, 18])\n\n def test_multiplying(self):\n \"\"\"Test for multiplying models to repeat them\"\"\"\n m1 = ExampleAddModel()\n m2 = ExampleMultiplicationModel()\n m3 = 5 * m1\n self.assertEqual(len(m3), 5)\n self.assertNotIn(m1, m2)\n self.assertIn(m1, m3)\n self.assertNotIn(m3, m1)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n m3(items, addition=3)\n self.assertListEqual(items, [16, 17, 18, 19, 20])\n m3 = m1 * 5\n self.assertEqual(len(m3), 5)\n self.assertIn(m1, m3)\n self.assertNotIn(m2, m3)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n m3(items, addition=3)\n self.assertListEqual(items, [16, 17, 18, 19, 20])\n m3 = 5 * (m1 + m2) # != 5*m1 + 5*m2 if m1 and m2 are not commutative\n self.assertEqual(len(m3), 10)\n self.assertIn(m1, m3)\n self.assertIn(m2, m3)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n expected_list = items[:]\n m3(items, addition=3, multiplier=2)\n for _ in range(5):\n for i in range(len(expected_list)):\n expected_list[i] = (expected_list[i] + 3) * 2\n self.assertListEqual(items, expected_list)\n m3 = 5 * m1 + 5 * m2\n self.assertEqual(len(m3), 10)\n self.assertIn(m1, m3)\n self.assertIn(m2, m3)\n self.assertIsInstance(m3, ExampleModel)\n items = [1, 2, 3, 4, 5]\n expected_list = items[:]\n m3(items, addition=3, multiplier=2)\n for i in range(len(expected_list)):\n expected_list[i] = (expected_list[i] + 5 * 3) * (2 ** 5)\n self.assertListEqual(items, expected_list)\n\n with self.assertRaises(TypeError):\n \"1\" * m1\n with self.assertRaises(TypeError):\n m1 * m2\n with self.assertRaises(TypeError):\n m1 * True\n with self.assertRaises(ValueError):\n m1 * 0\n with self.assertRaises(ValueError):\n m1 * -1\n with self.assertRaises(TypeError):\n m1 * 1.5\n\n def test_adding_not_addable(self):\n \"\"\"Test for trying to add models that can't be added\"\"\"\n with self.assertRaises(ModelCompositionException):\n NonComposableModel() + NonComposableModel()\n with self.assertRaises(ModelCompositionException):\n NonComposableModel() + ExampleMultiplicationModel()\n with self.assertRaises(ModelCompositionException):\n ExampleMultiplicationModel() + NonComposableModel()\n with self.assertRaises(ModelCompositionException):\n 4 * NonComposableModel()\n with self.assertRaises(ModelCompositionException):\n NonComposableModel() * 4\n\n x = NonComposableModel()\n self.assertEqual(len(x), 1)\n self.assertIn(x, x)\n y = 1 * x\n z = x * 1\n self.assertIn(z, y)\n self.assertIs(x, y)\n self.assertIs(z, x)\n\n\nclass TestConcatQuantumModels(unittest.TestCase):\n\n def test_depolar_concat(self):\n \"\"\"Test concatenation two depolar noise models\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n p1 = 0.7\n p2 = 0.2\n m1 = DepolarNoiseModel(p1, time_independent=True)\n m2 = DepolarNoiseModel(p2, time_independent=True)\n m3 = m1 + m2\n self.assertIsInstance(m3, QuantumErrorModel)\n q = ns.qubits.create_qubits(1)[0]\n m3([q, None])\n p = (1 - (1 - p1) * (1 - p2)) / 2\n self.assertTrue(np.allclose(\n qapi.reduced_dm(q),\n p * ku.ket2dm(ns.s1) + (1 - p) * ku.ket2dm(ns.s0)))\n\n def test_dephase_concat(self):\n \"\"\"Test concatenation two dephase noise models\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n p1 = 0.7\n p2 = 0.2\n m1 = DephaseNoiseModel(p1, time_independent=True)\n m2 = DephaseNoiseModel(p2, time_independent=True)\n m3 = m1 + m2\n self.assertIsInstance(m3, QuantumErrorModel)\n q = ns.qubits.create_qubits(1)[0]\n ns.qubits.operate(q, ns.H)\n m3([None, q])\n ns.qubits.operate(q, ns.H)\n p = (1 - (1 - p1) * (1 - p2)) / 2\n self.assertTrue(np.allclose(\n qapi.reduced_dm(q),\n p * ku.ket2dm(ns.s0) + (1 - p) * ku.ket2dm(ns.s1)))\n\n def test_dephase_plus_depolar(self):\n \"\"\"Test concatenation of a depolar with a dephase\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n p1 = 0.7\n p2 = 0.2\n m1 = DephaseNoiseModel(p1, time_independent=True)\n m2 = DepolarNoiseModel(p2, time_independent=True)\n m3 = m1 + m2\n self.assertIsInstance(m3, QuantumErrorModel)\n q1, q2 = ns.qubits.create_qubits(2)\n ns.qubits.operate(q1, ns.H)\n ns.qubits.operate(q2, ns.H)\n m1([None, q2])\n m2([None, q2, None])\n m3([q1, None])\n self.assertTrue(np.allclose(qapi.reduced_dm(q1), qapi.reduced_dm(q2)))\n\n def test_concat_T1T2(self):\n \"\"\"Test concatenation two T1T2 noise models\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n m1 = T1T2NoiseModel(400, 300)\n m2 = T1T2NoiseModel(200, 120)\n m3 = m1 + m2\n self.assertIsInstance(m3, QuantumErrorModel)\n q1, q2 = ns.qubits.create_qubits(2)\n ns.qubits.operate(q1, ns.H)\n ns.qubits.operate(q2, ns.H)\n m1([None, q2])\n m2([None, q2, None])\n m3([q1, None])\n self.assertTrue(np.allclose(qapi.reduced_dm(q1), qapi.reduced_dm(q2)))\n\n def test_much_concatenation(self):\n \"\"\"Test concatenating lots of quantum error models\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n m = {0: T1T2NoiseModel(400), 1: T1T2NoiseModel(T2=120), 2: DephaseNoiseModel(0.44, time_independent=True),\n 3: DepolarNoiseModel(0.44, time_independent=True), 4: T1T2NoiseModel(200, 120), 5: DepolarNoiseModel(0.78),\n 6: DephaseNoiseModel(0.2)}\n big_model = m[0]\n for i in range(1, 6):\n big_model += m[i]\n self.assertIsInstance(big_model, QuantumErrorModel)\n q1, q2 = ns.qubits.create_qubits(2)\n ns.qubits.operate(q1, ns.H)\n ns.qubits.operate(q2, ns.H)\n for model in m.values():\n model([None, q2, None])\n big_model([q1])\n self.assertTrue(np.allclose(qapi.reduced_dm(q1), qapi.reduced_dm(q2)))\n\n def test_loss_model_concat(self):\n \"\"\"Test concatenating loss models\"\"\"\n qform.set_qstate_formalism(qform.QFormalism.DM)\n p1 = 0.2\n p2 = 0.5\n p = p1 + p2 - p1 * p2 # probability that a qubit gets lost with two loss models\n m1 = FibreLossModel(p_loss_init=p1)\n m2 = FibreLossModel(p_loss_init=p2)\n m3 = m1 + m2\n self.assertIsInstance(m3, QuantumErrorModel)\n n = 50000\n qubits = ns.qubits.create_qubits(n)\n m3(qubits, length=0)\n nones = 0\n for qubit in qubits:\n if qubit is None:\n nones += 1\n self.assertAlmostEqual(nones, p * n, delta=n / 100)\n\n\nclass ExampleClassicalErrorModel(ClassicalErrorModel):\n def error_operation(self, items, delta_time=0, **kwargs):\n for i in range(len(items)):\n items[i] += 4\n\n\nclass TestConcatErrorModel(unittest.TestCase):\n\n def test_adding_classical_with_quantum(self):\n \"\"\"Test adding a classical with a quantum error model\"\"\"\n m1 = DephaseNoiseModel(0.4)\n m2 = ExampleClassicalErrorModel()\n m3 = m1 + m2\n self.assertIsInstance(m3, ErrorModel)\n\n # Test that doing this is completely useless as everything you input gets rejected by one or the other class\n with self.assertRaises(TypeError):\n m3([Qubit(\"test_qubit\")])\n with self.assertRaises(TypeError):\n m3([\"Test\"])\n\n\nclass TestConcatClassicalErrorModel(unittest.TestCase):\n\n def test_adding_classical(self):\n \"\"\"Test adding a classical error model\"\"\"\n m1 = ExampleClassicalErrorModel()\n m2 = ExampleClassicalErrorModel()\n m3 = m1 + m2\n self.assertIsInstance(m3, ClassicalErrorModel)\n\n with self.assertRaises(TypeError):\n m3([Qubit(\"test_qubit\")])\n\n\nclass TestConcatDelayModels(unittest.TestCase):\n\n def test_adding_delay_model(self):\n \"\"\"Test adding delay models\"\"\"\n m1 = FixedDelayModel(4)\n m2 = GaussianDelayModel(10, 0)\n m3 = m1 + m2 + m1\n self.assertIsInstance(m3, DelayModel)\n\n self.assertEqual(m3(), 18)\n self.assertEqual(m3.generate_delay(), 18)\n self.assertEqual(m3.get_mean(), 18)\n\n with self.assertRaises(ModelCompositionException):\n m3.set_mean(4)\n\n with self.assertRaises(ModelCompositionException):\n m3.get_std()\n\n\n# Test for inheritances\n\nclass TestModel(Model):\n\n def noise_operation(self, qubits, delta_time=0, **kwargs):\n pass\n\n def __init__(self, p=0):\n super().__init__()\n self.p = p\n\n @classmethod\n def concatenation_class(cls):\n return TestModel\n\n def compute_model(self, l):\n if self.is_concatenated:\n for model in self._models:\n l = model(l)\n else:\n l[0] = l[0] ** self.p\n return l\n\n\nclass A(TestModel):\n @classmethod\n def concatenation_class(cls):\n return A\n\n\nclass B(A):\n @classmethod\n def concatenation_class(cls):\n return B\n\n\nclass C(B):\n @classmethod\n def concatenation_class(cls):\n return C\n\n\nclass D(A):\n pass\n\n\nclass E(D):\n @classmethod\n def concatenation_class(cls):\n return E\n\n\nclass F(E):\n @classmethod\n def concatenation_class(cls):\n return F\n\n\nclass G(C, E):\n pass\n\n\nclass TestComposableModels(unittest.TestCase):\n\n def test_concatenation(self):\n \"\"\"Test basic concatenation\"\"\"\n model1 = TestModel()\n model2 = TestModel()\n model3 = TestModel()\n\n model12 = model1 + model2\n self.assertIsInstance(model12, TestModel)\n self.assertEqual(len(model12), 2)\n self.assertTrue(model1 in model12)\n self.assertTrue(model2 in model12)\n self.assertFalse(model12 in model12)\n model123 = model3 + model12\n self.assertIsInstance(model123, TestModel)\n self.assertEqual(len(model123), 3)\n self.assertTrue(model1 in model123)\n self.assertTrue(model2 in model123)\n self.assertTrue(model3 in model123)\n self.assertFalse(model12 in model123)\n # order\n self.assertEqual(model123._concatenated_models[0], model3)\n self.assertEqual(model123._concatenated_models[1], model1)\n self.assertEqual(model123._concatenated_models[2], model2)\n\n with self.assertRaises(TypeError):\n TestModel() + 1\n\n def test_self_concatenation(self):\n \"\"\"Test adding models to themselves\"\"\"\n model1 = TestModel()\n model2 = model1 + model1\n\n self.assertIsInstance(model2, TestModel)\n self.assertEqual(len(model2), 2)\n\n def test_required_properties(self):\n \"\"\"Test if required properties are correctly copied over\"\"\"\n model1 = TestModel()\n model1._required_properties = [\"a\", \"b\"]\n\n model2 = TestModel()\n model2._required_properties = [\"a\", \"c\"]\n\n model12 = model1 + model2\n\n self.assertEqual(len(model12.required_properties), 3)\n self.assertTrue(\"a\" in model12.required_properties)\n self.assertTrue(\"b\" in model12.required_properties)\n self.assertTrue(\"c\" in model12.required_properties)\n\n model1._required_properties = [\"d\", \"e\"]\n self.assertEqual(len(model12.required_properties), 4)\n self.assertTrue(\"a\" in model12.required_properties)\n self.assertFalse(\"b\" in model12.required_properties)\n self.assertTrue(\"c\" in model12.required_properties)\n self.assertTrue(\"d\" in model12.required_properties)\n self.assertTrue(\"e\" in model12.required_properties)\n\n with self.assertRaises(ValueError):\n model12.required_properties = [\"k\"]\n\n self.assertTrue(model12.validate(a=1, b=2, c=3, d=4, e=5, f=6))\n self.assertTrue(model12.validate(a=1, c=3, d=4, e=5))\n self.assertFalse(model12.validate(a=1, c=3))\n self.assertFalse(model12.validate(d=1, e=3))\n\n def test_compute_model(self):\n \"\"\"Test if the models are executed in the correct order\"\"\"\n model1 = TestModel(3)\n model2 = TestModel(4)\n model12 = model1 + model2\n x = [2]\n model12(x)\n self.assertEqual(x[0], (2 ** 3) ** 4)\n model21 = model2 + model1\n x = [2]\n model21(x)\n self.assertEqual(x[0], (2 ** 4) ** 3)\n\n def test_diff_noise_model_concatenation(self):\n \"\"\"Test concatenation ability of different type of noise models\"\"\"\n\n self.assertEqual(type(A()), A)\n\n self.assertEqual(type(A() + A()), A)\n self.assertEqual(type(A() + B()), A)\n self.assertEqual(type(B() + B()), B)\n self.assertEqual(type(C() + F()), A)\n self.assertEqual(type(F() + C()), A)\n self.assertEqual(type(D() + D()), A)\n self.assertEqual(type(F() + F()), F)\n self.assertEqual(type(E() + B()), A)\n self.assertEqual(type(B() + E()), A)\n self.assertEqual(type(F() + E()), E)\n self.assertEqual(type(E() + F()), E)\n self.assertEqual(type(G() + F()), A)\n\n self.assertEqual(type(A() + A() + A()), A)\n self.assertEqual(type(A() + B() + B()), A)\n self.assertEqual(type(B() + B() + B()), B)\n self.assertEqual(type(C() + F() + G()), A)\n self.assertEqual(type(F() + C() + D()), A)\n self.assertEqual(type(D() + D() + E()), A)\n self.assertEqual(type(F() + F() + F()), F)\n self.assertEqual(type(E() + B() + C()), A)\n self.assertEqual(type(B() + E() + F()), A)\n self.assertEqual(type(F() + E() + F()), E)\n self.assertEqual(type(E() + F() + E()), E)\n self.assertEqual(type(G() + F() + G()), A)\n\n self.assertEqual(type((A() + A()) + (A() + A())), A)\n self.assertEqual(type((A() + B()) + (A() + B())), A)\n self.assertEqual(type((B() + B()) + (B() + B())), B)\n self.assertEqual(type((C() + F()) + (C() + F())), A)\n self.assertEqual(type((F() + C()) + (F() + C())), A)\n self.assertEqual(type((D() + D()) + (D() + D())), A)\n self.assertEqual(type((F() + F()) + (F() + F())), F)\n self.assertEqual(type((E() + B()) + (E() + B())), A)\n self.assertEqual(type((B() + E()) + (B() + E())), A)\n self.assertEqual(type((F() + E()) + (F() + E())), E)\n self.assertEqual(type((E() + F()) + (E() + F())), E)\n self.assertEqual(type((G() + F()) + (G() + F())), A)\n\n def test_multiplication(self):\n \"\"\"Test multiplication of models\"\"\"\n\n x = A() * 5\n self.assertEqual(type(x), A)\n self.assertEqual(len(x), 5)\n\n x = 5 * A()\n self.assertEqual(type(x), A)\n self.assertEqual(len(x), 5)\n\n x = 5 * A() * 5\n self.assertEqual(len(x), 25)\n self.assertEqual(type(x), A)\n\n a = A()\n x = a * 1\n self.assertEqual(x, a)\n\n x = 1 * a\n self.assertEqual(a, x)\n\n with self.assertRaises(TypeError):\n A() * B()\n\n with self.assertRaises(TypeError):\n \"a\" * A()\n\n with self.assertRaises(TypeError):\n A() * \"b\"\n\n with self.assertRaises(ValueError):\n -1 * A()\n\n with self.assertRaises(ValueError):\n A() * -1\n\n with self.assertRaises(TypeError):\n 1.5 * A()\n\n with self.assertRaises(TypeError):\n A() * 1.5\n\n with self.assertRaises(TypeError):\n 1.0 * A()\n\n with self.assertRaises(TypeError):\n A() * 1.0\n\n a = A(3) + A(2) * 3 + 2 * A(3)\n x = [2]\n a(x)\n self.assertEqual(len(a), 6)\n self.assertEqual(x[0], (((((2 ** 2) ** 2) ** 2) ** 3) ** 3) ** 3)\n\n ef = 2 * (E(3) + 2 * F(2))\n self.assertEqual(len(ef), 6)\n x = [2]\n ef(x)\n self.assertEqual(x[0], (((((2 ** 3) ** 2) ** 2) ** 3) ** 2) ** 2)\n\n def test_diamond_model(self):\n \"\"\"Test logic when adding models with multiple inheritance (such as diamond structure of models)\"\"\"\n\n # since H subclasses both C and F..\n class H(F, C):\n pass\n\n self.assertEqual(type(H() + C()), A) # Should this be CC?\n self.assertEqual(type(H() + F()), F) # Should this be AC?\n self.assertEqual(type(H() + H()), F) # What should this be?\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"danilopag/Quantum-Internet-Payment","sub_path":"lib/python3.10/site-packages/netsquid/components/models/tests/test_concatenatingmodels.py","file_name":"test_concatenatingmodels.py","file_ext":"py","file_size_in_byte":21885,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"10766675682","text":"import time\nimport logging\n\nfrom Cobalt.Util import init_cobalt_config, get_config_option\nfrom Cobalt.DataTypes.ProcessGroup import ProcessGroup\n\n_logger = logging.getLogger(__name__)\n\ninit_cobalt_config()\n\nPGROUP_STARTUP_TIMEOUT = float(get_config_option('alpssystem', 'pgroup_startup_timeout', 120.0))\nUSER_SESSION_HOSTS = [host.strip() for host in\n get_config_option('alpssystem', 'user_session_hosts', '').split(':')]\n\nclass ALPSProcessGroup(ProcessGroup):\n '''ALPS-specific PocessGroup modifications.'''\n\n def __init__(self, spec):\n super(ALPSProcessGroup, self).__init__(spec)\n self.alps_res_id = spec.get('alps_res_id', None)\n self.interactive_complete = False\n now = int(time.time())\n self.startup_timeout = int(spec.get(\"pgroup_startup_timeout\",\n now + PGROUP_STARTUP_TIMEOUT))\n\n def start(self):\n '''Start the process group. The ALPS version also sets the host to use.\n This host is in a list provided by the configuration file. If the host\n has an alps_script_forker instance running on it, those currently\n running jobs will be taken into account when selecting where to run.\n\n The forker host with the lowest number of locations\n\n Args:\n None\n\n Returns:\n None\n\n Raises:\n ProcessGroupStartupError: The start for the process group has failed\n and no child process id has been returned.\n\n Side Effects:\n Prompts the specified forker to start a job. In the event of an\n interactive job, sets a fake head pid (1) and notes which host\n should be used for the interactive job launch.\n\n '''\n if self.mode == 'interactive':\n if len(USER_SESSION_HOSTS):\n pass\n return super(ALPSProcessGroup, self).start()\n","repo_name":"ido/cobalt","sub_path":"src/lib/Components/system/ALPSProcessGroup.py","file_name":"ALPSProcessGroup.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"} +{"seq_id":"13035580323","text":"from dataclasses import dataclass\nfrom urllib.parse import urlparse\nimport re\nfrom exceptions import RtsfUrlParseError, PhoneParseError\n\n\nclass BaseParser:\n def __init__(self, value: str) -> None:\n self._value = value\n\n\n@dataclass\nclass RtsfUrlParsingResult:\n url: str\n evks_player_id: int\n\n\nclass RtsfUrlParser(BaseParser):\n \"\"\"https://rtsf.ru/ratings/player/{evks_player_id}\"\"\"\n\n player_path_re = re.compile(r\"(^/ratings/player/)([0-9]+$)\")\n\n def parse(self) -> RtsfUrlParsingResult:\n url = self._value.strip()\n parsed = urlparse(url)\n\n if parsed.netloc != \"rtsf.ru\":\n raise RtsfUrlParseError(\"Must be rtsf.ru\", url)\n\n path_match = self.player_path_re.match(parsed.path)\n if not path_match:\n raise RtsfUrlParseError(\"URL path does not match expected regexp\", url)\n\n player_id = int(path_match.groups()[1])\n return RtsfUrlParsingResult(url=url, evks_player_id=player_id)\n\n\nclass PhoneParser(BaseParser):\n phone_re = re.compile(r\"^(\\+7|7|8)\\d{10}$\")\n\n def parse(self) -> str:\n phone = (\n self._value.strip()\n .replace(\" \", \"\")\n .replace(\"-\", \"\")\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n )\n phone_match = self.phone_re.match(phone)\n if not phone_match:\n raise PhoneParseError(\"Phone does not match expected regexp\", phone)\n\n return self._format_phone(phone_match.group())\n\n def _format_phone(self, parsed_phone: str) -> str:\n if parsed_phone.startswith(\"7\") or parsed_phone.startswith(\"8\"):\n phone = parsed_phone[1:]\n else:\n phone = parsed_phone[2:]\n\n return \"+7 ({prefix}) {first}-{second}-{third}\".format(\n prefix=phone[:3], first=phone[3:6], second=phone[6:8], third=phone[8:]\n )\n","repo_name":"nkuznetsov44/foospollbot","sub_path":"foospollbot/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"9066065803","text":"from sys import argv\r\nfrom time import time\r\n\r\n#usage heuristic.py -time/-notime path_to_input_file\r\nID_pos = 0\r\nn_pos = 1\r\nM_pos = 2\r\nstart_pos = 3\r\n\r\n#kontrola parametru\r\ndef checkParam():\r\n try:\r\n if argv[1] != '-time' and argv[1] != '-notime':\r\n return False\r\n spam = argv[2]\r\n except IndexError:\r\n return False\r\n return True\r\n\r\n#porovnavaci kriterium sortu\r\ndef getKey(elem):\r\n return elem[2]\r\n\r\n#reseni\r\ndef solve(n, M, buffer):\r\n buffer.sort(key=getKey, reverse=True) #serazeni podle heuristiky\r\n bagWeight = 0\r\n bagPrice = 0\r\n usedItems = []\r\n for x in buffer: #pridavani polozek do zaplneni batohu\r\n if bagWeight + x[0] <= M:\r\n bagWeight = bagWeight + x[0]\r\n bagPrice = bagPrice + x[1]\r\n usedItems.append(x[3])\r\n else:\r\n break\r\n return bagPrice, usedItems\r\n\r\n\r\ndef main():\r\n #kontrola parametru, otevreni souboru\r\n if not checkParam():\r\n print('Invalid parameters')\r\n exit(1)\r\n try:\r\n f = open(argv[2])\r\n except FileNotFoundError:\r\n print('File does not exist')\r\n exit(1)\r\n #nacteni a parsovani souboru\r\n lines = f.readlines()\r\n\r\n for line in lines:\r\n words = line.split(' ')\r\n buffer = []\r\n try: #prochazeni a ukladani instanci\r\n ID = int(words[ID_pos])\r\n n = int(words[n_pos])\r\n M = int(words[M_pos])\r\n index = 0\r\n i = 0\r\n while i < 2 * n:\r\n weight = int(words[start_pos + i])\r\n price = int(words[start_pos + i + 1])\r\n buffer.append((weight, price, price / weight, index))\r\n i = i + 2\r\n index = index + 1\r\n except IndexError:\r\n print('Invalid data format')\r\n exit(1)\r\n except ValueError:\r\n print('Invalid data format')\r\n exit(1)\r\n start_time = time() #mereni casu reseni instance\r\n price, usedItems = solve(n, M, buffer) #reseni instance\r\n if argv[1] == '-time': #vypis reseni a popripade casu\r\n stop_time = time()\r\n print(\"--- Instance %s completed, time %s seconds ---\" % (ID, stop_time - start_time))\r\n print(ID, n, price, end=' ')\r\n for x in range(n):\r\n if x in usedItems:\r\n print('', '1', end='')\r\n else:\r\n print('', '0', end='')\r\n print('')\r\n\r\nif __name__ == '__main__':\r\n start_time = time()#mereni celkoveho casu\r\n main()\r\n if argv[1] == '-time':\r\n stop_time = time()\r\n print('Total time: %s seconds' % (stop_time - start_time))","repo_name":"vojtyys/PAA_1","sub_path":"heuristic.py","file_name":"heuristic.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"8497590736","text":"import cv2\r\nimport PySimpleGUI as sg\r\nfrom PIL import Image, ImageOps\r\nimport numpy as np\r\nimport webbrowser\r\nfrom math import pi\r\n\r\nthresh=255\r\ngotImg=False\r\ngotTImg=False\r\nurl = \"https://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/TRAPP1/filter.html\"\r\n\r\nsg.theme('GreenTan')\r\nphotoO=[[sg.Image('',k='orig')]]\r\nphotoU=[[sg.Image('',background_color='black',k='updated')]]\r\n\r\ndef win1():\r\n layout1=[[sg.Input(size=80,key='fileB',enable_events=True),sg.FileBrowse(button_text='Browse All')],\r\n [sg.Frame('Original',photoO,size=(640,360),element_justification='center'),\r\n sg.Frame('Updated',photoU,size=(640,360),element_justification='center')],\r\n [sg.vbottom(sg.Text(\"Kernel Size:\")),sg.Slider(range=(1,10),orientation='h',default_value=1,enable_events=True,resolution=1,k='ksize')],\r\n #Kernel Size:\r\n [sg.vbottom(sg.Text(\"Sigma \")),sg.Slider(range=(0.4,2.5),orientation='h',default_value=0.4,enable_events=True,resolution=0.1,k='sigma')],\r\n\r\n [sg.vbottom(sg.Text(\"Theta \")),sg.Slider(range=(0,pi),orientation='h',default_value=0,enable_events=True,resolution=pi/16,k='theta')],\r\n\r\n [sg.vbottom(sg.Text(\"Lambda \")),sg.Slider(range=(1,5),orientation='h',default_value=1,enable_events=True,resolution=1,k='hl3')],\r\n\r\n [sg.vbottom(sg.Text(\"Gamma \")),sg.Slider(range=(0.2,1),orientation='h',default_value=0.2,enable_events=True,resolution=0.1,k='hulk')],\r\n\r\n [sg.vbottom(sg.Text(\"Psi \")),sg.Slider(range=(-pi,pi),orientation='h',default_value=0,enable_events=True,resolution=pi/8,k='spy')],\r\n [sg.Save()]]\r\n return sg.Window('Image Processing - Gabor Filter',layout1,relative_location=(0,-100),finalize=True)\r\n\r\n\r\ndef win2():\r\n layout2 = [[sg.Text(f'''General Instructions\r\n1. If the image output does not get updated recheck the selected checkbox to update output\r\n\r\n2.Input Image from your local directory, It can only read Image Files, any other file type will lead to error.\r\n\r\n3. Image can be of any dimensions, the implementation here will automatically modify the dimensions to (Width=640px,\r\nLength= 360px) \r\n\r\n4. The slider Values here represents the threshold for the five primary properties: Kernel Size,Sigma, Theta ,\r\nlambda,gamma and psi\r\n\r\n5.The Output Image can be saved by clicking on the save button, The default format is .png \r\n\r\n6. To access additional theory you require Internet access\r\n\r\n7. If you select the save option and close without specifying the save directory the module fails \r\n\r\n***************************************************************************\r\nImplementation by:\r\n\r\nViswadruth Akkaraju, Atanu Wadhwa and K Priya \r\n\r\nMachine Perception and Cognition class \r\n\r\nSRM Institute of Science and Technology\r\n***************************************************************************''', font='Lucida', size=(50, 33))],\r\n [sg.Button('Hyperlink to theory', font='Lucida', enable_events=True, key=\"-LINK-\")]]\r\n\r\n return sg.Window('Help', layout2, finalize=True)\r\n\r\nwindow1, window2 = win1(), win2()\r\n\r\ndef rez(image):\r\n color_converted = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\r\n pil_image = Image.fromarray(color_converted)\r\n x = ImageOps.pad(pil_image, (640, 360), color=None, centering=(0.5, 0.5))\r\n np_img = np.array(x)\r\n opencv_image = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)\r\n return opencv_image\r\n\r\nwhile True:\r\n window, event, values = sg.read_all_windows()\r\n if event == 'fileB':\r\n img=cv2.imread(values['fileB'])\r\n img = rez(img)\r\n window['orig'].update(data=cv2.imencode('.ppm', img)[1].tobytes())\r\n window['updated'].update(data=cv2.imencode('.ppm', img)[1].tobytes())\r\n gotImg=True\r\n fPath=values['fileB'].rsplit('/',1)[0]\r\n\r\n if gotImg:\r\n temp = int(values['ksize'])\r\n kernel = (temp, temp)\r\n kern = cv2.getGaborKernel(ksize=kernel, sigma=float(values['sigma']), theta=float(values['theta']),\r\n lambd=int(values['hl3']), gamma=float(values['hulk']), psi=float(values['spy']),\r\n ktype=cv2.CV_32F)\r\n kern /= 1.5 * kern.sum()\r\n img_f = cv2.filter2D(img, cv2.CV_8UC3, kern)\r\n img_f = np.maximum(np.zeros_like(img), img_f, np.zeros_like(img))\r\n window['updated'].update(data=cv2.imencode('.png', img_f)[1].tobytes())\r\n gotTImg=True\r\n\r\n if event == 'Save' and gotTImg:\r\n saveFile= sg.popup_get_file('',save_as=True,no_window=True,\r\n initial_folder=fPath,default_extension='.png')\r\n cv2.imwrite(saveFile,img_f)\r\n\r\n if event in (sg.WIN_CLOSED, 'Exit'):\r\n sg.popup('Close all windows')\r\n break\r\n\r\n if event == '-LINK-':\r\n sg.popup('Redirect to website')\r\n webbrowser.open(url)\r\n\r\nwindow.close()","repo_name":"ICB-TO/Image-Processing-simulation-modules","sub_path":"gabfilt.py","file_name":"gabfilt.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"1443068429","text":"from dataclasses import dataclass\n\n@dataclass\nclass Local:\n name: str\n time: str\n master_rank: str\n skill_name: str\n flavor: str\n start: str\n over: str\n description: str\n ver: str\n ver_time: int\n\n# local\njp_local = Local(\n name='jp_name',\n time='jp_time',\n master_rank='jp_master_rank',\n skill_name='jp_skill_name',\n flavor='jp_flavor',\n start='jp_start',\n over='jp_over',\n description='jp_description',\n ver='jp',\n ver_time=9\n)\nas_local = Local(\n name='as_name',\n time='as_time',\n master_rank='as_master_rank',\n skill_name='as_skill_name',\n flavor='as_flavor',\n start='as_start',\n over='as_over',\n description='as_description',\n ver='as',\n ver_time=8\n)\n","repo_name":"zkelly3/MLTD-Data","sub_path":"web/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"74763039259","text":"import math as m\n\ndef main():\n n = eval(input(\"What number in the Fibonnaci sequence would you like to see?: \"))\n count = 0\n x = 1\n y = 0\n\n while count < n:\n count = count + 1\n z = x + y\n x = y\n y = z\n print(z)\n\nmain()\n","repo_name":"drycode/zelle-python","sub_path":"chap08/exercise_1.py","file_name":"exercise_1.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"69"} +{"seq_id":"19807738776","text":"from unittest import TestCase\nfrom nesta.packages.nlp_utils.ngrammer import Ngrammer\n\n\nclass TestNgrammer(TestCase):\n def test_ngrammer(self):\n ngrammer = Ngrammer(database=\"production_tests\")\n ngrammer.ngrams.clear()\n ngrammer.ngrams[3].add('convolutional_neural_networks')\n ngrammer.ngrams[3].add('bed_and_breakfast')\n ngrammer.ngrams[2].add('neural_networks')\n document = (\"This is a document about machine \"\n \"learning, convolutional neural networks, \"\n \"neural networks and bed and breakfast\")\n processed_doc = ngrammer.process_document(document)\n for _, ngrams in ngrammer.ngrams.items():\n for ng in ngrams:\n self.assertIn(ng, processed_doc[0])\n","repo_name":"nestauk/old_nesta_daps","sub_path":"nesta/packages/nlp_utils/tests/test_ngrammer.py","file_name":"test_ngrammer.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"69"} +{"seq_id":"12330684770","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom astropy.stats import sigma_clipped_stats\nfrom matplotlib.patches import Circle\nfrom photutils import CircularAperture, CircularAnnulus, aperture_photometry\nfrom astropy.io import fits\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom photutils.utils import calc_total_error\nimport argparse\nimport logging\nimport sys\nfrom astropy.wcs import WCS\n\n# In[2]:\ndef get_logger(namespace, level='DEBUG', logfile=None):\n logger = logging.getLogger(namespace)\n formatter = logging.Formatter('%(name)s [l %(lineno)d] - %(levelname)s - %(message)s')\n\n if logfile is None:\n handler = logging.StreamHandler(sys.stdout)\n\n else:\n handler = logging.FileHandler(logfile)\n\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(level)\n return logger\n\n\ndef make_cutouts(\n image_path: str,\n position,\n half_size\n):\n data = fits.getdata(image_path)\n y_image_size, x_image_size = np.shape(data)\n x, y = position\n # logger.debug(f'{x},{y},{np.shape(data)}')\n if np.logical_and(x < half_size,y < half_size):\n cutout = data[0:y+half_size+1, 0:x+half_size+1]\n n_xpix = half_size-y\n n_ypix = half_size-x\n cutout = np.pad(cutout, ((n_ypix, 0), (n_xpix, 0)), 'constant')\n\n elif np.logical_and(x+half_size+1 > x_image_size, y+half_size+1 > y_image_size):\n cutout = data[y - half_size: y_image_size, x-half_size, x_image_size]\n n_xpix = (half_size+x+1) - x_image_size\n n_ypix = (half_size+y+1) - y_image_size\n cutout = np.pad(cutout, ((0, n_ypix), (0, n_xpix)), 'constant')\n\n elif y < half_size:\n logger.info(f'Cutout parameters are {y + half_size + 1}, {x - half_size}, {x + half_size + 1},{y_image_size},'\n f'{x_image_size}')\n cutout = data[0:y + half_size + 1, x - half_size:x + half_size + 1]\n n_pix = half_size - y\n cutout = np.pad(cutout, ((n_pix, 0), (0, 0)), 'constant')\n\n elif y + half_size + 1 > y_image_size:\n cutout = data[y - half_size: y_image_size, x - half_size: x + half_size + 1]\n n_pix = (half_size + y + 1) - y_image_size\n cutout = np.pad(cutout, ((0, n_pix), (0, 0)), 'constant')\n\n elif x < half_size:\n cutout = data[y - half_size: y + half_size + 1, 0:x + half_size + 1]\n n_pix = half_size - x\n cutout = np.pad(cutout, ((0, 0), (n_pix, 0)), 'constant')\n elif x + half_size > x_image_size:\n cutout = data[y - half_size:y + half_size + 1, x - half_size:x_image_size]\n n_pix = (half_size + x + 1) - x_image_size\n cutout = np.pad(cutout, ((0, 0), (0, n_pix)), 'constant')\n else:\n cutout = data[y - half_size:y + half_size + 1, x - half_size:x + half_size + 1]\n return cutout\n\n\n# In[73]:\n\n\ndef get_aperture_counts(diff_cutout, aper_diameter, bkg_in_diameter, bkg_out_diameter, x_offset = None, \n y_offset = None, gain=None, plot=False):\n\n # w = WCS(header)\n # x,y = w.all_world2pix(ra,dec,0)\n x, y = int(diff_cutout.shape[0] / 2), int(diff_cutout.shape[1] / 2)\n if x_offset is not None:\n x += x_offset\n y += y_offset\n if plot:\n fig, ax = plt.subplots()\n m, s = np.nanmean(diff_cutout), np.nanstd(diff_cutout)\n im = ax.imshow(diff_cutout, interpolation='nearest', cmap='gray',\n vmin=m - s, vmax=m + 10 * s, origin='lower')\n # c = Circle(xy=(x_img, y_img),radius=15)\n\n c = Circle(xy=(x, y), radius=aper_diameter / 2)\n c1 = Circle(xy=(x, y), radius=bkg_in_diameter / 2)\n c2 = Circle(xy=(x, y), radius=bkg_out_diameter / 2)\n c.set_facecolor('none')\n c.set_edgecolor('red')\n c1.set_facecolor('none')\n c1.set_edgecolor('red')\n c2.set_facecolor('none')\n c2.set_edgecolor('red')\n ax.add_artist(c)\n ax.add_artist(c1)\n ax.add_artist(c2)\n ax.set_xlim(x - 30, x + 30)\n ax.set_ylim(y - 30, y + 30)\n plt.savefig(r'aper_phot.pdf',bbox_inches='tight')\n\n aperture = CircularAperture((x, y), r=aper_diameter)\n annulus_aperture = CircularAnnulus((x, y), r_in=bkg_in_diameter / 2, r_out=bkg_out_diameter / 2)\n\n annulus_masks = annulus_aperture.to_mask(method='center')\n annulus_data = annulus_masks.multiply(diff_cutout)\n mask = annulus_masks.data\n annulus_data_1d = annulus_data[mask > 0]\n bkg_mean, bkg_median, bkg_std = sigma_clipped_stats(annulus_data_1d, sigma=2)\n # print(bkg_mean, bkg_median)\n bkg = np.zeros(diff_cutout.shape) + bkg_median\n bkg_error = np.zeros(diff_cutout.shape) + bkg_std\n\n aperture_mask = aperture.to_mask(method='center')\n \n if gain is None:\n gain = 1\n print('Gain not provided, setting gain to 1, uncertainties will be incorrect (underestimated)')\n \n effective_gain = gain\n error = calc_total_error(diff_cutout, bkg_error, effective_gain)\n phot_table = aperture_photometry(diff_cutout - bkg, aperture, error=error)\n counts = phot_table['aperture_sum'][0]\n counts_err = phot_table['aperture_sum_err'][0]\n \n return counts, counts_err\n\n\ndef aper_photometry(imgname, x, y, zp, aper_diameter, bkg_in_diameter, bkg_out_diameter, gain=None, cutout_size=None, plot=False):\n x_int, y_int = int(x), int(y)\n x_offset, y_offset = x - x_int, y - y_int\n position=(x_int, y_int)\n \n if cutout_size is None:\n cutout_size = bkg_out+20\n \n cutout = make_cutouts(imgname,position,half_size=int(cutout_size))\n\n counts, countserr = get_aperture_counts(diff_cutout=cutout, aper_diameter=aper_diameter, bkg_in_diameter=bkg_in, bkg_out_diameter=bkg_out, x_offset=x_offset, y_offset=y_offset, gain=gain, plot=plot)\n\n mag = -2.5*np.log10(counts) + zp\n magunc = 1.086*countserr/counts\n\n return mag, magunc\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"img\",type=str,help=\"Image name\")\n parser.add_argument(\"--ra\",type=float,default=None,help=\"RA\")\n parser.add_argument(\"--dec\",type=float,default=None,help=\"Dec\")\n parser.add_argument(\"--x\",type=float,default=None,help=\"x coordinate\")\n parser.add_argument(\"--y\",type=float,default=None,help=\"y coordinate\")\n parser.add_argument(\"--zp_key\",type=str,default='TMC_ZP',help=\"zeropoint key in header\")\n parser.add_argument(\"--gain_key\",type=str,default='GAIN',help=\"gain key in header\")\n parser.add_argument(\"--zp\",type=float,default=None,help=\"zeropoint\")\n parser.add_argument(\"--gain\",type=float,default=None,help=\"gain\")\n parser.add_argument(\"--aper\",type=float,default=5,help=\"aperture diameter\")\n parser.add_argument(\"--bkg_in\",type=float,default=20,help=\"background inner annulus diameter\")\n parser.add_argument(\"--bkg_out\",type=float,default=30,help=\"background outer annulus diameter\")\n parser.add_argument(\"--plot\",action='store_true', help=\"Plot thumbnail with apertures\")\n parser.add_argument(\"--cutout_size\",type=float,default=None,help=\"Cutout size for display\")\n\n\n args = parser.parse_args()\n\n logger = get_logger(__name__)\n \n\n imgname = args.img\n zp_key = args.zp_key\n gain_key = args.gain_key\n zp = args.zp\n cutout_size = args.cutout_size\n\n if zp is None:\n try:\n zp = float(fits.getval(imgname, zp_key))\n except KeyError:\n zp = 0\n logging.warning(f'Zeropoint not specified, or not found in header. Setting it to {zp}')\n\n x, y = args.x, args.y\n ra, dec = args.ra, args.dec\n if args.x is None:\n header = fits.getheader(imgname)\n wcs = WCS(header)\n if np.logical_or(ra is None, dec is None):\n err = 'Ra/Dec and x/y are not specified. Please specify either'\n logger.error(err)\n raise ValueError\n x, y = wcs.all_world2pix(ra,dec,0)\n\n logger.info(f'Setting x/y x : {x}, y:{y}')\n\n gain = args.gain\n\n if gain is None:\n try:\n gain = float(fits.getval(imgname, gain_key))\n\n except KeyError:\n gain = 1\n logger.warn(f'Gain not specified, or found in header. Setting to {gain}')\n\n\n aper_radius = args.aper\n bkg_in = args.bkg_in\n bkg_out = args.bkg_out\n mag, magunc = aper_photometry(imgname, x=x, y=y, zp=zp, aper_diameter=aper_radius, bkg_in_diameter=bkg_in, bkg_out_diameter=bkg_out, gain=gain, plot=args.plot, cutout_size=cutout_size)\n logger.info(f'Mag: {mag}, magerr: {magunc}')","repo_name":"virajkaram/ztf_utils","sub_path":"aperture_photometry.py","file_name":"aperture_photometry.py","file_ext":"py","file_size_in_byte":8535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"30435942279","text":"from avocado.utils import cpu\n\nfrom virttest import libvirt_cgroup\nfrom virttest import libvirt_version\nfrom virttest import utils_misc\nfrom virttest import virsh\n\nfrom virttest.libvirt_xml import vm_xml\nfrom virttest.utils_test import libvirt\n\n\ndef set_cpu_state(operate_cpu, set_value):\n \"\"\"\n Set cpu online or offline\n\n :params: operate_cpu: specific cpu index\n :params: set_value: 1 for online, 0 for offline\n \"\"\"\n if set_value == \"0\":\n cpu.online(operate_cpu)\n elif set_value == \"1\":\n cpu.offline(operate_cpu)\n\n\ndef run(test, params, env):\n \"\"\"\n Verify numa tuned guest vm is not affected when cpu is offline\n \"\"\"\n\n def setup_test():\n \"\"\"\n Prepare init xml\n \"\"\"\n numa_info = utils_misc.NumaInfo()\n online_nodes = numa_info.get_online_nodes_withmem()\n test.log.debug(\"Get online node with memory:%s\", online_nodes)\n\n node_cpus = numa_info.get_all_node_cpus()[\n online_nodes[offline_node_index]].strip().split(' ')\n\n params.update({'nodeset': online_nodes[nodeset_index]})\n params.update({'off_cpu': node_cpus[cpu_index]})\n set_cpu_state(params.get('off_cpu'), offline)\n is_cgroupv2 = libvirt_cgroup.CgroupTest(None).is_cgroup_v2_enabled()\n if not is_cgroupv2:\n test.log.debug(\"Need to keep original value in cpuset file under \"\n \"cgroup v1 environment for later recovery\")\n default_cpuset = libvirt_cgroup.CgroupTest(None).\\\n get_cpuset_cpus(vm_name)\n params.update({'default_cpuset': default_cpuset})\n\n def run_test():\n \"\"\"\n Start vm and check result\n \"\"\"\n test.log.info(\"TEST_STEP1: Set hugepage and guest boot \")\n vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n vm_attrs_new = eval(vm_attrs % params['nodeset'])\n vmxml.setup_attrs(**vm_attrs_new)\n\n result = virsh.define(vmxml.xml, debug=True, ignore_status=True)\n if libvirt_version.version_compare(9, 4, 0) and \\\n tuning == \"restrictive\" and binding == \"guest\":\n libvirt.check_result(result, expected_fails=err_msg,\n check_both_on_error=True)\n return\n else:\n libvirt.check_exit_status(result)\n\n vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n test.log.debug(\"The new xml is:\\n%s\", vmxml)\n\n test.log.info(\"TEST_STEP2: Start vm\")\n vm.start()\n\n def teardown_test():\n \"\"\"\n Clean data.\n \"\"\"\n test.log.info(\"TEST_TEARDOWN: Clean up env.\")\n bkxml.sync()\n is_cgroupv2 = libvirt_cgroup.CgroupTest(None).is_cgroup_v2_enabled()\n if not is_cgroupv2:\n test.log.debug(\"Reset cpuset file under cgroup v1 environment\")\n libvirt_cgroup.CgroupTest(None).set_cpuset_cpus(\n params['default_cpuset'], vm_name)\n\n vm_name = params.get(\"main_vm\")\n vm = env.get_vm(vm_name)\n vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n bkxml = vmxml.copy()\n\n vm_attrs = params.get(\"vm_attrs\")\n nodeset_index = int(params.get('nodeset_index'))\n offline_node_index = int(params.get('offline_node_index'))\n cpu_index = int(params.get('cpu_index'))\n offline = params.get(\"offline\")\n err_msg = params.get(\"err_msg\")\n tuning = params.get(\"tuning\")\n binding = params.get(\"binding\")\n\n try:\n setup_test()\n run_test()\n\n finally:\n teardown_test()\n","repo_name":"autotest/tp-libvirt","sub_path":"libvirt/tests/src/numa/guest_numa_node_tuning/numa_mem_binding_with_offline_cpu.py","file_name":"numa_mem_binding_with_offline_cpu.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"69"} +{"seq_id":"4083279272","text":"'''\nhttps://leetcode-cn.com/problems/max-value-of-equation/\n1499. 满足不等式的最大值\n'''\nfrom typing import List\n\nclass Solution:\n def findMaxValueOfEquation(self, points: List[List[int]], k: int) -> int:\n '''\n 坐标 x 的值从小到大排序\n premin/max\n 求 y[i] + y[j] +|x[i]-x[j]| 最大,且 |x[i]-x[j]| <=k\n i > j => y[i] + x[i] + y[j] - x[j]\n 维护一个y[j] - x[j] 的递减队列,\n '''\n ans = -1e9\n \n '''\n queue : 上界:x[j] >= x[i]-k\n 下界:j<=i-1\n '''\n q = []\n for i, p in enumerate(points):\n # 清除不符合条件的队列内容 x2-x1 > k两点间距离>k\n while q and points[q[0]][0] < p[0]-k:\n q.pop(0) # pop 左侧pop\n\n # ans = max(ans, y[i] + x[i] + y[j] - x[j])\n if q:\n x = points[q[0]]\n ans = max(ans, p[1] + p[0] + x[1]-x[0])\n \n '''\n 维护queue单调性是 y[j]-x[j]的递减队列\n 如果当前队列中y-x < 当前y-x就去除掉\n '''\n while q and \\\n points[q[-1]][1] - points[q[-1]][0] <= p[1]-p[0]:\n q.pop() # 右侧pop y1-x1 < y-x \n q.append(i) # 把当前点加入到队尾\n return ans\n","repo_name":"zhuangzhi/leetcode","sub_path":"week7/le1499.py","file_name":"le1499.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"27266016694","text":"from pathlib import Path\nfrom collections import deque\nfrom typing import Optional\n\ndir_path = Path(__file__).resolve().parent\nwith open(f\"{dir_path}/day10_input.txt\") as file:\n data = file.read().splitlines()\n\n\ndef cycle(input: list[str]) -> list[int]:\n stack = deque()\n for line in input:\n if \"noop\" in line:\n stack.append(None)\n else:\n _, val = line.split(\" \")\n stack.append(int(val))\n cycle, X = 0, 1\n cycles = list()\n while len(stack) > 0:\n register = stack.popleft() if stack else None\n if register:\n cycles.append(X)\n cycles.append(X)\n X += register\n else:\n cycles.append(X)\n return cycles\n\n\ndef calculate_signals(cycles: list[int]) -> list[tuple[int, int]]:\n return [\n (cycles[x], (x + 1) * cycles[x])\n for x in range(len(cycles))\n if x + 1 in {20, 60, 100, 140, 180, 220}\n ]\n\n\ndef draw_sprite(cycles: list[int]) -> None:\n for x in range(0, 240, 40):\n draw_line(cycles[x : x + 40])\n\n\ndef draw_line(cycles: list[int]) -> None:\n line = \"\"\n for i, idx in enumerate(cycles):\n if i in [idx-1, idx, idx+1]:\n line+='#'\n else:\n line+='.'\n print(line)\n\ncycles = cycle(data)\n\nsignals = calculate_signals(cycles)\nprint(sum([y for x, y in signals]))\n\ndraw_sprite(cycles)\n","repo_name":"neil-sriv/advent_of_code","sub_path":"2022/completed/day10.py","file_name":"day10.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"7161080310","text":"__author__ = 'jason.parent@carneylabs.com (Jason Parent)'\n\n# Django imports...\nfrom django.conf.urls import patterns\nfrom django.conf.urls import url\n\nurlpatterns = patterns('users.views',\n url(r'^$', 'home_view', name='home'),\n url(r'^list/$', 'list_view', name='list'),\n url(r'^requests/$', 'requests_view', name='requests'),\n url(r'^friends/$', 'friends_view', name='friends'),\n url(r'^friends/(?P\\d+)/add/$', 'add_view', name='add'),\n url(r'^friends/(?P\\d+)/accept/$', 'accept_view', name='accept'),\n url(r'^friends/(?P\\d+)/reject/$', 'reject_view', name='reject'),\n url(r'^feed/$', 'feed_view', name='feed'),\n)","repo_name":"ParentJA/friends_with_recipes","sub_path":"users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"14273687788","text":"# this activity will allow encryption and decryption of vowels only.\n\n# define when to stop\n\nuser_stop = False\n\n# simple greeting!\nprint(\"Hi there! I am PyCipher-Simple! I only encrypt and decrypt vowels.\")\n\nwhile user_stop == False:\n\n # ask user to input an encrypted statement.\n encrypted = input(\"Indicate whether you wish to 'encrypt', 'decrypt', or 'stop' the program: \")\n\n # check if the user wanted to stop\n\n if encrypted.lower() == \"stop\":\n\n #set the initial variable to True to break the loop\n\n user_stop = True\n\n # say goodbye!\n \n print(\"Goodbye and hope to see you again!\")\n \n #encoder\n if encrypted.lower() == \"encrypt\":\n #ask for word to encrypt\n to_encrypt = input(\"You have chosen 'encrypt'! Please type the message to encode: \")\n\n #replace every vowel with the equivalent cipher symbol.\n first_vowel = to_encrypt.replace('a', '*')\n second_vowel = first_vowel.replace('e', '&')\n third_vowel = second_vowel.replace('i', '#')\n fourth_vowel = third_vowel.replace('o', '+')\n encrypt_final = fourth_vowel.replace('u', '!')\n\n #print the result!\n print(\"Original text: \", to_encrypt)\n print(\"Encrypted version: \", encrypt_final)\n\n if encrypted.lower() == \"decrypt\":\n # ask for word to decrypt\n todecypt = input(\"You have chosen 'decrypt'! Please type the message to decode: \")\n\n # attempt to replace every encrypted symbol with an appropriate equivalent.\n first_vowel = todecypt.replace('*', 'a')\n second_vowel = first_vowel.replace('&', 'e')\n third_vowel = second_vowel.replace('#', 'i')\n fourth_vowel = third_vowel.replace('+', 'o')\n final_decrypted = fourth_vowel.replace('!', 'u')\n\n # print the decrypted message in low caps\n print(\"The encrypted message is: \", todecypt)\n print(\"The decrypted message is: \", final_decrypted.lower())","repo_name":"EnzoPinon/Py-cipher","sub_path":"pycipher-simple.py","file_name":"pycipher-simple.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"72651835740","text":"# LeetCode 200. Number of Island\n# https://leetcode.com/problems/number-of-islands/\n\n# 섬의 갯수를 세는 문제 (DFS)\n\n# Runtime : 415ms(76.39%)\n# Memory Usage : 16.9MB (6.67%)\n\n# if not grid : return 0 추가하면 속도 훨씬 빨라짐.\n\nclass Solution: \n def numIslands(self, grid: List[List[str]]) -> int:\n answer = 0\n for y in range(len(grid)): # len(grid) -> grid의 원소 갯수 4 출력 0~3\n for x in range(len(grid[0])): # 여기서 len(grid[0])면 5가 출력 0~4\n if grid[y][x] == '1': # grid[y][x]의 값이 1일 경우 answer 1씩 가산 \n self.count(y,x,grid) # 그 후 상하좌우 1값지우는 재귀함수\n answer +=1\n \n return answer\n \n def count(self, y, x, grid):\n if y < 0 or x < 0 or y >= len(grid) or x >= len(grid[0]) or grid[y][x] != '1':\n return\n grid[y][x] = '#'\n self.count(y+1,x,grid) # 상\n self.count(y-1,x,grid) # 하\n self.count(y,x-1,grid) # 좌\n self.count(y,x+1,grid) # 우","repo_name":"Leepilung/Algorithm_Study","sub_path":"LeetCode/medium/200.py","file_name":"200.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"15404846237","text":"import cv2\n\nindex = 0\nframes = []\n\nwhile True:\n cap = cv2.VideoCapture(index)\n if not cap.read()[0]:\n break\n else:\n print(f\"Camera Device {index}: OK\")\n\n index += 1\n\nwhile True:\n index = 0\n frames = []\n\n # Capture frames from all connected cameras\n while True:\n cap = cv2.VideoCapture(index)\n if not cap.read()[0]:\n break\n ret, frame = cap.read()\n frames.append(frame)\n index += 1\n\n # Concatenate frames horizontally and display the result\n if frames:\n result = cv2.hconcat(frames)\n cv2.imshow(\"Video Feed\", result)\n\n if cv2.waitKey(1) == ord('q'):\n break\n\ncv2.destroyAllWindows()\n","repo_name":"shri-vibhor-sharma/python-apps","sub_path":"video-cam-feed.py","file_name":"video-cam-feed.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"72076745181","text":"import pandas as pd\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS, cross_origin\nfrom recommender import params, simple_search, keyword_search\nfrom prepare import gen_sim_matrix\n\napp = Flask(__name__)\nCORS(app)\ndf = None\ncosine_func = None\n\n\n@app.route('/', methods=['GET'])\ndef entry():\n return jsonify(message='Ready')\n\n\n@app.route('/simple', methods=['GET'])\ndef simple():\n cuisine = request.args.get('cuisine', params['cuisine'])\n price = request.args.get('price', params['price'])\n city = request.args.get('city', params['city'])\n\n print('Simple search for: ', cuisine, price, city)\n\n results = simple_search(df, cuisine=cuisine, price=price, city=city)\n return jsonify(results)\n\n\n@app.route('/keyword', methods=['GET'])\ndef keyword():\n searchword = request.args.get('name', '')\n\n if searchword == '':\n return jsonify(message='Provide a restaurant name.')\n\n print('Keyword search for: ', searchword)\n\n results = keyword_search(df, cosine_func, searchword)\n return jsonify(results)\n\n\nif __name__ == '__main__':\n print('Loading dataset.')\n df = pd.read_pickle('./data/dataset.pkl')\n print('Generating sim matrix')\n cosine_func = gen_sim_matrix(df)\n app.run(host='0.0.0.0', port=80)\n","repo_name":"hmatalonga/restaurant-recommender-system","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"29399934453","text":"from dotmap import DotMap\nfrom torch import nn\nfrom torch.optim import Adam\nimport torch\n\nfrom torch_utils import empirical_kl\nfrom trainers.modify_mnist_trainer import ModifyMNISTTrainer\nfrom data.modify_mnist_data import MNISTData\nfrom models.lenet5 import lenet5\n\nconfig = DotMap()\nconfig.seed = 1234\n\nconfig.trainer = ModifyMNISTTrainer\nconfig.tp.epochs = 5\nconfig.tp.log_train_every = 1000\nconfig.tp.loss = nn.NLLLoss()\nconfig.tp.teacher_loss = nn.NLLLoss()\nconfig.tp.student_loss = nn.NLLLoss()\nconfig.tp.test_loss = nn.NLLLoss() \nconfig.tp.use_gpu = True\nconfig.tp.device = torch.device('cuda') if config.tp.use_gpu else torch.device('cpu')\n\nconfig.opt = Adam\nconfig.op.lr = 1e-3\n\nconfig.dataset = MNISTData\nconfig.dp.device = config.tp.device\nconfig.dp.seed = config.seed\nconfig.dp.batch_size = 128\nconfig.dp.resolution = (28, 28)\nconfig.dp.num_classes = 10\n\nconfig.teacher.model = lenet5\nconfig.teacher.device = config.tp.device\nconfig.teacher.input_size = config.dp.resolution[0] * config.dp.resolution[1]\nconfig.teacher.output_size = config.dp.num_classes\nconfig.teacher.activation = nn.ReLU()\nconfig.teacher.output_activation= nn.LogSoftmax(dim=1)\n\nconfig.student.model = lenet5\nconfig.student.device = config.tp.device\nconfig.student.input_size = config.dp.resolution[0] * config.dp.resolution[1]\nconfig.student.output_size = config.dp.num_classes\nconfig.student.activation = nn.ReLU()\nconfig.student.output_activation= nn.LogSoftmax(dim=1)\n","repo_name":"ayushkamat/eecs_229a_final_project","sub_path":"configs/modify_mnist.py","file_name":"modify_mnist.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"18500630174","text":"import random\n\n\ndef calculate_key(_, __):\n array_for_key_computation = [3, 7, 2, 4, 10, 3, 5, 9, 4, 6, 8]\n key_summa = 0\n for ___ in __:\n key_summa += int(___) * array_for_key_computation[_]\n _ += 1\n ____ = key_summa - (key_summa // 11) * 11\n if ____ < 10:\n __.append(str(____))\n else:\n __.append(str(____ - (____ // 10) * 10))\n return __\n\n\ndef create_inn_without_keys(_):\n __ = []\n for i in range(0, _):\n __.append(str(random.randint(0, 9)))\n return __\n\n\ndef create_russian_random_inn(characters=10):\n \"\"\"\n :param characters: 10 - organization, 12 - IP\n :return: valid inn\n \"\"\"\n if characters == 10:\n valid_inn = calculate_key(2, create_inn_without_keys(9))\n else:\n valid_inn = calculate_key(0, calculate_key(1, create_inn_without_keys(10)))\n return ''.join(valid_inn)\n","repo_name":"GrigoriiLikhachev/createJsonSchema","sub_path":"create_russian_random_inn.py","file_name":"create_russian_random_inn.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"32928275652","text":"def primeNumber(n):\n \n if n>0:\n count = 0\n j=2\n while j<=n:\n if n%j == 0:\n count+=1\n j+=1\n if count==1:\n return True\n elif n<0:\n count = 0\n j=-2\n while j>=n:\n if n%j == 0:\n count+=1\n j-=1\n if count==1:\n return True\n\n\n return False\n\ndef generateDude(num1,num2):\n li = []\n for i in range(num1,num2):\n if primeNumber(i):\n li.append(i)\n return abs(min(li)+max(li))\n\n\n\ndef main():\n num1, num2 = map(int, input().split(\" \"))\n if num1 < num2:\n otp = generateDude(num1,num2)\n print(otp)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"omkumar01/placement-practice-material","sub_path":"problem_otp.py","file_name":"problem_otp.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"32630815506","text":"'''test Flask'''\n\nfrom flask import Flask, render_template, redirect, url_for\nfrom flask_bootstrap import Bootstrap\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField\nfrom wtforms.validators import DataRequired\n\nfrom app import translate, generate\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = 'completely_unique_secret_keyZfAb'\n\nBootstrap(app)\n\nclass TranslateForm(FlaskForm):\n name = StringField('Insert your Shakespearean English here', validators=[DataRequired()])\n submit = SubmitField('Translate')\n\nclass PromptForm(FlaskForm):\n name = StringField('Insert your Shakespearean prompt here', validators=[DataRequired()])\n submit = SubmitField('Generate text')\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/translator', methods=['GET', 'POST'])\ndef translator():\n translate_form = TranslateForm()\n\n original = ''\n translation = ''\n\n if translate_form.validate_on_submit():\n original = translate_form.name.data\n translation = translate(original)\n\n return render_template(\n 'translator.html',\n form=translate_form,\n original=original,\n translation=translation,\n )\n\n@app.route('/generator', methods=['GET', 'POST'])\ndef generator():\n prompt_form = PromptForm()\n\n prompt = ''\n generation = ''\n\n if prompt_form.validate_on_submit():\n prompt = prompt_form.name.data\n generation = generate(prompt)\n\n return render_template(\n 'generator.html',\n form=prompt_form,\n prompt=prompt,\n generation=generation\n )\n\n","repo_name":"landonwork/hackusu","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"4947966320","text":"\n\"\"\"Utilities for downloading data , tokenizing, vocabularies.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport gzip\nimport os\nimport re\nimport tarfile\n\nfrom six.moves import urllib\n\nfrom tensorflow.python.platform import gfile\n\n# Special vocabulary symbols - we always put them at the start.\n_PAD = b\"_PAD\"\n_GO = b\"_GO\"\n_EOS = b\"_EOS\"\n_UNK = b\"_UNK\"\n_START_VOCAB = [_PAD, _GO, _EOS, _UNK]\n\nPAD_ID = 0\nGO_ID = 1\nEOS_ID = 2\nUNK_ID = 3\n\n# Regular expressions used to tokenize.\n_CHAR_SPLIT = re.compile(b\"([.,!?\\\"':;)(])\")\n_DIGIT_RE = re.compile(br\"\\d\")\n\n\ndef maybe_download(directory, filename, url):\n \"\"\"Download filename from url unless it's already in directory.\"\"\"\n if not os.path.exists(directory):\n print(\"Creating directory %s\" % directory)\n os.mkdir(directory)\n filepath = os.path.join(directory, filename)\n if not os.path.exists(filepath):\n print(\"Downloading %s to %s\" % (url, filepath))\n filepath, _ = urllib.request.urlretrieve(url, filepath)\n statinfo = os.stat(filepath)\n print(\"Succesfully downloaded\", filename, statinfo.st_size, \"bytes\")\n return filepath\n\n\ndef gunzip_file(gz_path, new_path):\n \"\"\"Unzips from gz_path into new_path.\"\"\"\n print(\"Unpacking %s to %s\" % (gz_path, new_path))\n with gzip.open(gz_path, \"rb\") as gz_file:\n with open(new_path, \"wb\") as new_file:\n for line in gz_file:\n new_file.write(line)\n\n\ndef get_rev_enhn_train_set(directory):\n \"\"\"Check whether training files exist\"\"\"\n print(directory)\n train_path = os.path.join(directory, \"train\")\n if not (gfile.Exists(train_path +\".hn\") and gfile.Exists(train_path +\".en\")):\n raise ValueError(\"Training files %s not found.\", train_path)\n return train_path\n\n\ndef get_rev_enhn_dev_set(directory):\n \"\"\"Check whether Development files exist.\"\"\"\n dev_name = \"valid\"\n dev_path = os.path.join(directory, dev_name)\n if not (gfile.Exists(dev_path + \".hn\") and gfile.Exists(dev_path + \".en\")):\n raise ValueError(\"Development files %s not found.\", dev_path)\n return dev_path\n\n\ndef basic_tokenizer(sentence):\n \"\"\"Very basic tokenizer: split the word into a list of tokens.\"\"\"\n words = []\n for space_separated_fragment in sentence.strip().split():\n words.extend(re.split('_', space_separated_fragment))\n list1 = [w for w in words if w]\n return list1\n\n\ndef create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=False):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"rb\") as f:\n counter = 0\n for line in f:\n counter += 1\n if counter % 10000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, b\"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n print (vocab)\n print (\".................\")\n print (vocab.get)\n sorted(vocab, key=vocab.get, reverse=True)\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get, reverse=True)\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")\n\n\ndef initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)\n\n\ndef word_to_token_ids(word, vocabulary,\n tokenizer=None, normalize_digits=False):\n \n if tokenizer:\n chars = tokenizer(word)\n else:\n chars = basic_tokenizer(word)\n if not normalize_digits:\n return [vocabulary.get(w, UNK_ID) for w in chars]\n # Normalize digits by 0 before looking chars up in the vocabulary.\n return [vocabulary.get(re.sub(_DIGIT_RE, b\"0\", w), UNK_ID) for w in chars]\n\n\ndef data_to_token_ids(data_path, target_path, vocabulary_path,\n tokenizer=None, normalize_digits=False):\n if not gfile.Exists(target_path):\n print(\"Tokenizing data in %s\" % data_path)\n vocab, _ = initialize_vocabulary(vocabulary_path)\n with gfile.GFile(data_path, mode=\"rb\") as data_file:\n with gfile.GFile(target_path, mode=\"w\") as tokens_file:\n counter = 0\n for line in data_file:\n counter += 1\n if counter % 10000 == 0:\n print(\" tokenizing line %d\" % counter)\n token_ids = word_to_token_ids(line, vocab, tokenizer,\n normalize_digits)\n tokens_file.write(\" \".join([str(tok) for tok in token_ids]) + \"\\n\")\n\n\ndef prepare_rev_data(data_dir, en_vocabulary_size, hn_vocabulary_size, tokenizer=None):\n \n # Get REV data to the specified directory.\n train_path = get_rev_enhn_train_set(data_dir)\n dev_path = get_rev_enhn_dev_set(data_dir)\n\n # Create vocabularies of the appropriate sizes.\n hn_vocab_path = os.path.join(data_dir, \"vocab%d.hn\" % hn_vocabulary_size)\n en_vocab_path = os.path.join(data_dir, \"vocab%d.en\" % en_vocabulary_size)\n create_vocabulary(hn_vocab_path, train_path + \".hn\", hn_vocabulary_size, tokenizer)\n create_vocabulary(en_vocab_path, train_path + \".en\", en_vocabulary_size, tokenizer)\n\n # Create token ids for the training data.\n hn_train_ids_path = train_path + (\".ids%d.hn\" % hn_vocabulary_size)\n en_train_ids_path = train_path + (\".ids%d.en\" % en_vocabulary_size)\n data_to_token_ids(train_path + \".hn\", hn_train_ids_path, hn_vocab_path, tokenizer)\n data_to_token_ids(train_path + \".en\", en_train_ids_path, en_vocab_path, tokenizer)\n\n # Create token ids for the development data.\n hn_dev_ids_path = dev_path + (\".ids%d.hn\" % hn_vocabulary_size)\n en_dev_ids_path = dev_path + (\".ids%d.en\" % en_vocabulary_size)\n data_to_token_ids(dev_path + \".hn\", hn_dev_ids_path, hn_vocab_path, tokenizer)\n data_to_token_ids(dev_path + \".en\", en_dev_ids_path, en_vocab_path, tokenizer)\n\n return (en_train_ids_path, hn_train_ids_path,\n en_dev_ids_path, hn_dev_ids_path,\n en_vocab_path, hn_vocab_path)\n","repo_name":"shikha369/Seq2SeqTransliteration","sub_path":"base code/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":6552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"42857552592","text":"def get_appliance_disk_information(\n self,\n ne_id: str,\n) -> dict:\n \"\"\"Get disk information from appliance\n\n .. list-table::\n :header-rows: 1\n\n * - Swagger Section\n - Method\n - Endpoint\n * - disks\n - GET\n - /configReportDisk/{neId}\n\n :param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE``\n :type ne_id: str\n :return: Returns dictionary of disk information \\n\n * keyword **disks** (`dict`): Dictionary of disks in Edge\n Connect appliance. Disks are identified by numeric strings,\n e.g. ``\"0\"``\n * keyword **controller** (`dict`): Dictionary of controller\n information\n * keyword **diskImage** (`str`): Filename of disk image\n :rtype: dict\n \"\"\"\n return self._get(\"/configReportDisk/{}\".format(ne_id))\n","repo_name":"SPOpenSource/edgeconnect-python","sub_path":"pyedgeconnect/orch/_disks.py","file_name":"_disks.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"69"} +{"seq_id":"42979942211","text":"import os\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler \nfrom sklearn.metrics import roc_auc_score\n\n# Thresholds \nTOTAL_THRES = 270\nMIDDLE_THRES = 210\nSTATUS_THRES = 20\n\n# Required directory paths\ndata_dir = '/opt/ml/code/input/train.csv'\nmodel_dir = '/opt/ml/model'\n\n\ndef generate_label_status(df_original, target_date, status_thres=STATUS_THRES):\n \"\"\" Labeling based on whether customer purchase at target date(month)\n This label dosen't care about whether he will buy more than $300(TOTAL_THRES)\n\n Args:\n df_original (pd.DataFrame): monthly frame\n target_date (str): date to apply prediction\n status_thres (int): threshold to consider as 'purchased'\n \n Returns:\n purchase status label (0/1) on target date\n \"\"\"\n\n df = df_original.copy()\n\n label_values = df[target_date]\n label = (label_values > status_thres).astype(int)\n label = label.sort_index().to_frame(name='status_label').reset_index()\n\n return label\n\n\ndef generate_label_total(df_original, middle_thres=MIDDLE_THRES, total_thres=TOTAL_THRES, status_thres=STATUS_THRES):\n \"\"\" Calculate consumption expectations of each customer based on average consumption history\n\n https://stats.stackexchange.com/questions/135061/best-method-for-short-time-series\n Averaging is one of the strongest method. ㅇ _ㅇ\n\n Args:\n df_original (pd.DataFrame): monthly frame\n middle_thres (int): if 'middle_thres < expectation < total_thres' is true, apply 0.5\n total_thres (int): if 'expectation > total_thres' is true, apply 1.0\n status_thres (int): Averaging only above status_thres\n \n Returns:\n purchase total label (0/0.5/1) on target date\n \"\"\"\n\n df = df_original.copy()\n label = df[df > status_thres].mean(axis=1)\n label[label < middle_thres] = 0\n label[(label > middle_thres) & (label < total_thres)] = 0.5\n label[label >= total_thres] = 1\n label = label.sort_index().to_frame(name='total_label').reset_index()\n\n return label\n\n\ndef generate_monthly_frame(df_original, categories):\n \"\"\" Generate monthly purchase data calculated for each customer\n\n Args:\n df_original (pd.DataFrame): DataFrame dropped with unnecessary columns\n categories (list[str]): numerical categories to be aggregated\n\n Returns:\n monthly frame\n \n ex)\n ym 2009-12 2010-01 ... 2011-10 2011-11 \n customer_id \n 12346 187.2750 -22.275 ... 0.000 0.0000 \n 12349 -39.8475 0.000 ... 1763.058 330.0000 \n ... ... ... ... ... ... \n 18286 763.8675 0.000 ... 0.000 0.0000 \n 18287 -8.4150 0.000 ... 0.000 1768.1565 \n\n \"\"\"\n\n df = df_original.copy()\n\n # -- groupby / pivot_table\n df = df.groupby(['customer_id', 'ym'])[categories].sum().reset_index()\n monthly_frame = pd.pivot_table(data=df,\n values=categories,\n index='customer_id',\n columns='ym',\n fill_value=0)\n\n return monthly_frame\n\n\ndef time_series_processing(df_original, categories, train=True):\n \"\"\" Generate features that reflect the characteristics of time series data.\n In this function, SUM and SKEW will be applied to each specified time period.\n\n - Seasonality(Continuity): The person who bought recently will buy again\n 1. Last 10 months | 2. Last 7 months | 3. Last 4 months \n\n - Cyclicity(Periodicity): People buy things regularly\n 1. two-months interval | 2. three-months interval | 3. annually(1 year interval)\n\n - Weak-Cyclicity: The person who bought last year will buy again at near month\n - Around last year's target month\n\n Args:\n df_original (pd.DataFrame): monthly frame\n categories (list[str]): aggregated categories\n train (bool): True if df_original is train data\n\n Returns:\n Specialized time series data\n \"\"\"\n \n df = df_original.copy()\n \n # -- Declare period list that reflect each attribute\n if train:\n target_date = '2011-11'\n\n # -- seasonality\n seasons1 = ['2011-01', '2011-02', '2011-03', '2011-04', '2011-05', '2011-06', '2011-07', '2011-08', '2011-09', '2011-10']\n seasons2 = ['2011-04', '2011-05', '2011-06', '2011-07', '2011-08', '2011-09', '2011-10']\n seasons3 = ['2011-07', '2011-08', '2011-09', '2011-10']\n\n # -- cyclicity\n cycle1 = ['2011-01', '2011-03', '2011-05', '2011-07', '2011-09']\n cycle2 = ['2010-08', '2010-11', '2011-02', '2011-05', '2011-08']\n cycle3 = ['2010-11']\n\n # -- weak-periodicity\n weak_cycle = ['2009-12', '2010-10', '2010-11', '2010-12']\n\n else: \n target_date = '2011-12'\n \n # -- seasonality\n seasons1 = ['2011-02', '2011-03', '2011-04', '2011-05', '2011-06', '2011-07', '2011-08', '2011-09', '2011-10', '2011-11']\n seasons2 = ['2011-05', '2011-06', '2011-07', '2011-08', '2011-09', '2011-10', '2011-11']\n seasons3 = ['2011-09', '2011-10', '2011-11']\n\n # -- cyclicity\n cycle1 = ['2011-02', '2011-04', '2011-06', '2011-08', '2011-10']\n cycle2 = ['2010-09', '2010-12', '2011-03', '2011-06', '2011-09']\n cycle3 = ['2010-12']\n\n # -- weak-periodicity\n weak_cycle = ['2010-01', '2010-11', '2010-12', '2011-01']\n\n df_ret = pd.DataFrame()\n time_list_bundle = [seasons1, seasons2, seasons3, cycle1, cycle2, cycle3, weak_cycle]\n attribute_names = ['seasonality1', 'seasonality2', 'seasonality3', 'cyclicity1', 'cyclicity2', 'cyclicity3', 'weak_cyclicity']\n\n # -- For each category, apply agg independently\n for category in categories:\n categoric_df = pd.DataFrame()\n\n for time_list, attribute_name in zip(time_list_bundle, attribute_names):\n now_df = df[category].loc[:, time_list]\n\n # -- Aggregating (sum, skew)\n attribute_sum = now_df.sum(axis=1)\n attribute_skew = now_df.skew(axis=1)\n\n categoric_df[f\"{category}_{attribute_name}_sum\"] = attribute_sum\n categoric_df[f\"{category}_{attribute_name}_skew\"] = attribute_skew\n\n df_ret = pd.concat([df_ret, categoric_df], axis=1)\n\n return df_ret\n\n\ndef calculate_date_diff(df_original, start_date, target_date):\n \"\"\" Generate the gap between first purchase date and last purchase date as feature.\n Refund data is NOT considered as purchase.\n\n Args:\n df_original (pd.DataFrame): monthly frame\n start_date (str): start date(2009-12 or 2010-01)\n target_date (str): target date\n\n Returns:\n date diff\n default: (last purchase date) - (first purchase date)\n If there is only one purchase date, (target date) - (first purchase date)\n \"\"\"\n\n df = df_original.copy()\n\n # -- Convert each data to pd.Timestamp\n start_date = pd.to_datetime(start_date)\n target_date = pd.to_datetime(target_date)\n \n # -- Calculate date diff for each row(customer)\n dt_diff = []\n for customer_id, datas in df.iterrows():\n start = start_date\n end = start_date\n \n for date, value in datas.items():\n if value > 0 and start == start_date:\n start = pd.to_datetime(date)\n if value > 0:\n end = pd.to_datetime(date)\n \n # -- When only one purchase data exist\n if start == end:\n end = pd.to_datetime(target_date) \n dt_diff.append(int((end - start).total_seconds()))\n\n dt_diff = np.array(dt_diff).reshape(-1, 1)\n\n # -- Normalize\n scaler = StandardScaler()\n dt_diff = scaler.fit_transform(dt_diff)\n\n return dt_diff\n\n\ndef apply_agg_to_feature(df_original, categories, start_date=None, target_date=None):\n \"\"\" Apply aggregate function to monthly data.\n 1. Generate monthly cumsum columns\n 2. Apply aggregation(skew) to original monthly data and of cumsum data. \n 3. (optional) Call 'calculate_date_diff' method\n\n Args:\n df_original (pd.DataFrame): monthly frame\n categories: aggregated categories\n\n (optional, when want to add 'date_diff' feature)\n start_date (str): start date(2009-12 or 2010-01)\n target_date (str): target date\n\n Returns:\n aggregated features\n \"\"\"\n\n df = df_original.copy()\n\n # -- Apply cumsum/skew\n df_ret = df.copy()\n for category in categories:\n df_skew = df[category].skew(axis=1).rename(f'{category}_skew')\n df_cumsum = df[category].cumsum(axis=1)\n df_cumsum.columns = [f\"cum_{category}_{x}\" for x in df_cumsum.columns]\n cumsum_skew = df_cumsum.skew(axis=1).rename(f'{category}_cumsum_skew')\n\n df_ret = pd.concat([df_ret, df_skew, df_cumsum, cumsum_skew], axis=1)\n df_ret = df_ret.rename(columns={'skew': f'{category}_skew',\n 'cumsum': f'{category}_cumsum',\n 'cumsum_skew': f'{category}_cumsum_skew'})\n\n # -- If start_date and target_date exists, generate date_diff feature additionally\n if start_date and target_date:\n date_diff = calculate_date_diff(df['total'], start_date=start_date, target_date=target_date)\n df_ret['date_diff'] = date_diff\n\n return df_ret\n\n\ndef convert_multi_index_to_single(df_original):\n \"\"\" Convert multi-index columns to single index.\n\n Args:\n df_original (pd.DataFrame): monthly frame\n\n Returns:\n monthly frame with single-index columns.\n ex) ('total', '2011-10') => 'total_2011-10'\n \"\"\"\n\n df = df_original.copy()\n\n new_columns = []\n for column in df.columns:\n new_column = column\n\n # -- If multi-index\n if isinstance(column, tuple):\n new_column = f\"{column[0]}_{column[1]}\"\n new_columns.append(new_column)\n df.columns = new_columns\n\n return df\n\n\ndef feature_engineering(df_original, target_date):\n \"\"\" Date feature engineering\n 1. Drop unnecessary columns(features)\n 2. Make monthly frame\n 3. Feature extracting\n 4. Imputing\n \n Args:\n df_original (pd.DataFrame): raw frame\n target_date (str): target date (i.e., '2011-12')\n\n Returns:\n preprocessed data(Split as train and test)\n train label(status, total)\n \"\"\"\n\n df = df_original.copy()\n\n # -- Basic preprocessing\n df.order_date = pd.to_datetime(df.order_date)\n df['ym'] = pd.to_datetime(df['order_date']).dt.strftime('%Y-%m')\n df.drop(['order_id', 'product_id', 'description', 'price', 'country'], axis=1, inplace=True)\n\n # -- Calculate period of train and test and apply it.\n d = datetime.datetime.strptime(target_date, \"%Y-%m\")\n prev_date = (d - relativedelta(months=1)).strftime(\"%Y-%m\")\n init_date = df.order_date.min().strftime(\"%Y-%m\")\n\n train = df[df['ym'] < prev_date]\n test = df[(df['ym'] < target_date) & (df['ym'] > init_date)]\n\n # -- Generate monthly frame and train label\n categories = ['total', 'quantity']\n monthly_frame = generate_monthly_frame(df, categories)['total']\n train_data = generate_monthly_frame(train, categories)\n test_data = generate_monthly_frame(test, categories)\n\n status_label = generate_label_status(monthly_frame, prev_date)\n total_label = generate_label_total(monthly_frame)['total_label']\n\n # -- Denoising\n train_data[train_data < STATUS_THRES] = 0\n test_data[test_data < STATUS_THRES] = 0\n\n # -- Feature extracting\n train_ts = time_series_processing(train_data, categories, train=True)\n test_ts = time_series_processing(test_data, categories, train=False)\n \n train_agg = apply_agg_to_feature(train_data, categories, start_date='2009-12', target_date='2011-11')\n test_agg = apply_agg_to_feature(test_data, categories, start_date='2010-01', target_date='2011-12')\n\n X_train = pd.merge(train_ts, train_agg, on=['customer_id'], how='left')\n X_train = pd.merge(X_train, status_label, on=['customer_id'], how='left')\n X_test = pd.merge(test_ts, test_agg, on=['customer_id'], how='left')\n \n # -- For convenience(ignoreable)\n X_test['customer_id'] = X_test.index\n X_test = X_test[[X_test.columns.values[-1]] + list(X_test.columns.values[:-1])]\n X_test.reset_index(drop=True).sort_values(by='customer_id')\n\n # -- Imputing(for test data)\n checker = X_train['customer_id'].isin(X_test.index)\n imputed = X_train[~checker].drop(columns=['status_label'])\n test_cols = {x: y for x, y in zip(X_train.columns, X_test.columns)}\n X_test = X_test.append(imputed.rename(columns=test_cols)).sort_values(by='customer_id')\n\n # -- Detect multi-index and convert them to single\n X_train = convert_multi_index_to_single(X_train)\n X_test = convert_multi_index_to_single(X_test)\n\n return X_train.drop(columns=['customer_id', 'status_label']), \\\n X_test.drop(columns=['customer_id']), \\\n X_train['status_label'], \\\n total_label\n\n\nif __name__ == '__main__':\n print('data_dir', data_dir)\n","repo_name":"bcaitech1/p2-tab-olenmg","sub_path":"features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":13312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"9136328477","text":"class Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]: \n mem = {}\n \n for key, value in enumerate(nums):\n diff = target - value\n \n if diff in mem:\n return [mem[diff], key]\n else:\n mem[value] = key\n \n return []\n","repo_name":"Amyth07/Leetcode","sub_path":"1. Two Sum.py","file_name":"1. Two Sum.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"5959718242","text":"import sys\nimport os\n\nimport pygame\nimport pygame.midi\nfrom pygame.locals import *\n\npygame.init()\npygame.fastevent.init()\nevent_get = pygame.fastevent.get\nevent_post = pygame.fastevent.post\n\npygame.midi.init()\n\ninput_id = pygame.midi.get_default_input_id()\n\nprint (\"using input_id :%s:\" % input_id)\ni = pygame.midi.Input( input_id )\n\npygame.display.set_mode((1,1))\n\n\n\ngoing = True\nwhile going:\n events = event_get()\n for e in events:\n if e.type in [QUIT]:\n going = False\n if e.type in [KEYDOWN]:\n going = False\n if e.type in [pygame.midi.MIDIIN]:\n print (e)\n\n if i.poll():\n midi_events = i.read(10)\n # convert them into pygame events.\n midi_evs = pygame.midi.midis2events(midi_events, i.device_id)\n for m_e in midi_evs:\n event_post( m_e )\ndel i\npygame.midi.quit()\n","repo_name":"hgijeon/the_PLAY","sub_path":"test_midi.py","file_name":"test_midi.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"31514866310","text":"import os \nfrom PIL import Image\nfrom PIL import ImageDraw \nfrom PIL import ImageFont\n\n\ndef create_card(name):\n\tcard = Image.new('RGBA', (360, 288), 'white')\n\tflower = Image.open('zophie.png')\n\tcard.paste(flower, (10, 40))\n\tcut_guide = Image.new('RGBA', (364, 292), 'black')\n\tcut_guide.paste(card, (2, 2))\n\n\tdraw_obj = ImageDraw.Draw(cut_guide)\n\tfonts_folder = 'user/share/fonts/TTF'\n\tcustom_font = ImageFont.truetype(os.path.join(fonts_folder, 'ariel.ttf'), 72)\n\tdraw_obj.text((120, 100), name, fill='blue', font=custom_font)\n\tcut_guide.save('{}-invite.png'.format(name))\n\n\nwith open('guests.txt') as f:\n\tguests = f.readlines()\nprint(guests)\n\nfor guest in guests:\n\tcreate_card(guest)\n\nprint('All invations cards have been sent.')","repo_name":"francisliujia/codelife","sub_path":"python_code/textbooks/automate_the_boring_stuff_with_python/image_cards.py","file_name":"image_cards.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"15292751245","text":"import json\nfrom functools import total_ordering\nfrom time import process_time_ns\n\n@total_ordering\nclass Livros:\n def __init__(self, nome, autor, data_publicacao):\n self._nome = nome\n self._autor = autor\n self._data_publicacao = data_publicacao\n \n def __str__(self):\n return f'Nome do livro: {self._nome} - Autor: {self._autor} - Data publicação: {self._data_publicacao}'\n \n # \n def __eq__(self, outro_livro):\n if isinstance(outro_livro, Livros):\n return ((self._nome == outro_livro._nome) and \n (self._autor == outro_livro._autor) and \n (self._data_publicacao == outro_livro._data_publicacao))\n \n return False\n \n def __lt__(self, outro_livro):\n return self._data_publicacao > outro_livro._data_publicacao\n \n @property\n def nome(self):\n return self._nome\n \n \nclass Biblioteca():\n def pegarLivros(self):\n livros = []\n \n # with open('livros.json', \"r\") as arquivo:\n # for linha in arquivo:\n # linha = linha.strip()\n # livros_montagem = linha.split(' - ')\n # if [0, 1, 2] in livros_montagem: \n # book = Livros(livros_montagem[0], livros_montagem[1], livros_montagem[2])\n # livros.append(book)\n # else:\n # print(\"erro\")\n \n with open(\"livros.json\", \"r+\", encoding=\"utf-8\") as dados:\n texto_json = json.load(dados)\n \n for data in texto_json:\n book = Livros(data['Nome'], data['Autor'], data['Data de publicação'])\n livros.append(book)\n \n return livros\n \n def listarLivros(self):\n lista_livros = self.pegarLivros()\n print(\"\\nListando os Livros\\n\")\n for livro in sorted(lista_livros):\n print(livro)\n \n def adicionarLivros(self):\n print(\"Bem vindo a Biblioteca pessoal, digite o livro que você quer cadastrar: \")\n nome = \"\"\n autor = \"\"\n data_de_publicacao = \"\"\n while(nome == \"\" or autor == \"\" or data_de_publicacao == \"\"):\n nome = input(\"Digite o nome do seu livro: \")\n autor = input(\"Digite o autor do seu livro: \")\n data_de_publicacao = input(\"Digite a data de publicação do seu livro: \")\n \n nome = nome.title().strip()\n autor = autor.title().strip()\n data_de_publicacao = data_de_publicacao.strip()\n \n \n list = []\n with open(\"livros.json\", \"r+\", encoding=\"utf-8\") as dados:\n texto_json = json.load(dados)\n \n for line in texto_json:\n list.append(line)\n \n \n with open('livros.json', \"w\", encoding=\"utf-8\") as arquivo:\n livro_dict = {\"Nome\": nome, \"Autor\": autor, \"Data de publicação\": data_de_publicacao}\n list.append(livro_dict)\n text_json = json.dumps(list)\n arquivo.write(text_json)\n \n print(\"\\nLivro adicionado com sucesso :)\")\n \n def removerLivros(self):\n livro_remove_nome = input(\"Digite o nome do livro que você quer remover: \")\n livro_remove_autor = input(\"Digite o autor do livro que você quer remover: \")\n livro_remove_data = input(\"Digite a data de publicação do livro que você quer remover: \")\n \n livro_remove_nome = livro_remove_nome.title().strip()\n livro_remove_autor = livro_remove_autor.title().strip()\n livro_remove_data = livro_remove_data.title().strip()\n \n book = Livros(livro_remove_nome, livro_remove_autor, livro_remove_data)\n \n lista_livros = self.pegarLivros()\n cont = 0\n autorizado = False\n for livro in lista_livros:\n if (book == livro):\n lista_livros.pop(cont)\n self.removeLinha(cont)\n autorizado = True\n \n \n cont += 1\n\n if (not autorizado):\n pergunta = input(\"Livro não encontrado, deseja fazer a operação de novo? ('SIM') ou ('Não')\")\n pergunta = pergunta.strip().title()\n if (pergunta != \"Não\" and pergunta != \"Nao\"):\n self.removerLivros()\n\n print(\"\\nOperação de remoção completa :)\")\n \n def removeLinha(self, indice):\n # with open('livros.txt', \"r\") as arquivo:\n # livroLinhas = arquivo.readlines()\n \n # livroLinhas.pop(indice)\n \n # with open(\"livros.txt\", \"w\") as f:\n # for line in livroLinhas:\n # f.write(line)\n \n list = []\n with open(\"livros.json\", \"r+\", encoding=\"utf-8\") as dados:\n texto_json = json.load(dados)\n \n for line in texto_json:\n if line != texto_json[indice]:\n list.append(line)\n \n with open('livros.json', \"w\", encoding=\"utf-8\") as arquivo:\n text_json = json.dumps(list)\n arquivo.write(text_json)\n \n \n\n","repo_name":"Kawhan/aprendizadoISYSTEMS","sub_path":"backend/treinamentoPython/cursoPython/projetoBiblioteca/livrosClass/livrosCompletJSON/jsonlibrary.py","file_name":"jsonlibrary.py","file_ext":"py","file_size_in_byte":5147,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"70742169180","text":"import pandas as pd \nimport numpy as np\nfrom scipy.optimize import curve_fit\n# import matplotlib.pyplot as plt\n\ndef linReg(startYear, userInput, existingData):\n years, points = [], []\n userInput = sorted(userInput, key=lambda x: x['Year'])\n existingData = sorted(existingData, key=lambda x: x['Year'])\n for elem in existingData:\n years.append(elem[\"Year\"])\n points.append(elem[\"CO2Emissions\"])\n for elem in userInput:\n years.append(elem[\"Year\"])\n points.append(elem[\"CO2Emissions\"])\n data = pd.DataFrame(np.array([years, points]).T, columns=['Year', 'CO2Emissions'])\n x = data['Year']\n y = data['CO2Emissions'] \n lin_model = np.polyfit(x, y, 1)\n coef, intcp = lin_model\n\n pred_lin_vals = []\n for year in range(startYear, 2050):\n val = intcp + coef*year\n if val >= 0:\n pred_lin_vals.append({\"Year\": year, \"CO2Emissions\": val})\n return pred_lin_vals\n\ndef expReg(startYear, userInput, existingData):\n def func(x, a, b, c):\n return a * np.exp(b * x) + c\n\n years, points = [], []\n userInput = sorted(userInput, key=lambda x: x['Year'])\n existingData = sorted(existingData, key=lambda x: x['Year'])\n for elem in existingData:\n years.append(elem[\"Year\"])\n points.append(elem[\"CO2Emissions\"])\n for elem in userInput:\n years.append(elem[\"Year\"])\n points.append(elem[\"CO2Emissions\"])\n data = pd.DataFrame(np.array([years, points]).T, columns=['Year', 'CO2Emissions'])\n x = data['Year']\n y = data['CO2Emissions']\n\n popt, pcov = curve_fit(func, x, y, p0=(1,1e-6,1), maxfev=10000)\n pred_exp_vals = []\n for year in range(startYear, 2050):\n val = func(year, *popt)\n if val >= 0:\n pred_exp_vals.append({\"Year\": year, \"CO2Emissions\": val})\n return pred_exp_vals\n","repo_name":"mamn2/ClimateRegressionPredictor","sub_path":"Database/basicRegs.py","file_name":"basicRegs.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"34894399266","text":"from pyspark.sql import SparkSession\nspark=SparkSession.builder.getOrCreate()\nsc=spark.sparkContext\nrdd1=sc.textFile(\"hdfs://localhost:54310/user/hduser/custs\",4)\n#rdd1=rdd1.repartition(4)\nrdd2=rdd1.map(lambda x:x.split(\",\"))\nprint(\"sample customers are \")\nprint(rdd2.take(10))\nprint(\"total customers are \")\nprint(rdd2.count())\nrdd3=rdd2.map(lambda x:(int(x[0]),int(x[3]),x[4])).filter(lambda x:x[1]>60)\nrdd3.coalesce(1).saveAsTextFile(\"hdfs:/user/hduser/custoutdata2\")\n\nspark.sparkContext.setLogLevel(\"INFO\")\nrdd1=spark.sparkContext.textFile(\"/user/hduser/empdata2/\")\nrdd1.cache()\nrdd2=rdd1.map(lambda x:x.split(\",\"))\nlocal_python_var_kept_in_driver=rdd2.collect()\n#collect action will collect the data from rdd (executors) to driver or collect used for\n# converting the rdd to normal values\nprint(local_python_var_kept_in_driver)\nrdd3=rdd2.filter(lambda x:len(x)==5)\ndf1=rdd3.toDF()\ndf1.cache()\ndf1.select(\"*\").show(4)\ndf1.createOrReplaceTempView(\"view1\")\nspark.sql(\"describe view1\").show()\nspark.sql(\"select _1,_2,_3 from view1\").write.mode(\"overwrite\").orc(\"/user/hduser/empdataorc\")\nspark.read.orc(\"/user/hduser/empdataorc\").show()\nprint(\"end of spark core, sql application\")\n\n#spark-submit --master yarn --executor-memory 2g --num-executors 2 --deploy-mode client /home/hduser/core_submit.py\n#spark-submit --master yarn --executor-memory 2g --num-executors 2 --deploy-mode cluster /home/hduser/core_submit.p\n","repo_name":"sundarbee/Spark_Learning","sub_path":"Python_Pyspark_Programs/wd28Project/SparkProg/Core/sample_deploy.py","file_name":"sample_deploy.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"28648625523","text":"_seed = 131\n\ndef bkdrhash(str):\n hashnum = 0\n sz = len(str)\n for i in range(sz):\n hashnum = (hashnum * _seed) + ord(str[i])\n \n return hashnum & 0x7FFFFFFF\n\nif __name__ == \"__main__\":\n print(bkdrhash('hello world'))","repo_name":"lvchy/ClientTools","sub_path":"UABTools/bkdrhash.py","file_name":"bkdrhash.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"9659988157","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'videodata_gen.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'videodata_app.views.home', name='home'),\n url(r'^viewer/(?P[\\w-]+)/$', 'videodata_app.views.viewer_video', name='viewer'),\n url(r'^tagging/(?P[\\w-]+)/$', 'videodata_app.views.tagging', name='tagging'),\n # url(r'^tagging/(?P[\\w-]+)/(?P[\\w-]+)$', 'videodata_app.views.save_tag', name='save_tag'),\n url(r'^tagging/(?P[\\w-]+)/crear_tag/$', 'videodata_app.views.save_tag', name='crear_tag'),\n)\n","repo_name":"cbertelegni/videodata","sub_path":"videodata_gen/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"74643990940","text":"import numpy as np\nfrom tqdm import trange, tqdm\nimport glob\nimport h5py\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nfrom keras.preprocessing import image\nimport random\nfrom models import components, mae_loss, mse_loss\nimport scipy.misc\n# Avoid crash on non-X linux sessions (tipically servers) when plotting images\nimport matplotlib\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport gc\nimport time\nfrom glob import glob\n\n# Images size\nw = 256\nh = 256\n\n# Cyclic consistency factor\n\nlmda = 10\n\n# Optimizer parameters\n\nlr = 0.0002\nbeta_1 = 0.5\nbeta_2 = 0.999\nepsilon = 1e-08\n\n# Setting image format as (channels, height, width)\nK.set_image_dim_ordering('th')\n\ndisc_a_history = []\ndisc_b_history = []\n\ngen_a2b_history = {'bc': [], 'mae': []}\ngen_b2a_history = {'bc': [], 'mae': []}\n\ngen_b2a_history_new = []\ngen_a2b_history_new = []\ncycle_history = []\n\nmodel_save_folder = \"models\"\n\n\n# Data loading\n\ndef loadImage(path, h, w):\n '''Load single image from specified path'''\n if path in cache:\n return cache[path]\n img = image.load_img(path)\n img = img.resize((w, h))\n x = image.img_to_array(img)\n cache[path] = x\n return x\n\ndef loadImagesFromDataset(h, w, dataset, use_hdf5=False):\n '''Return a tuple (trainA, trainB, testA, testB)\n containing numpy arrays populated from the\n test and train set for each part of the cGAN'''\n\n if (use_hdf5):\n path = \"./datasets/processed/\" + dataset + \"_data.h5\"\n data = []\n print('\\n', '-' * 15, 'Loading data from dataset', dataset, '-' * 15)\n with h5py.File(path, \"r\") as hf:\n for set_name in tqdm([\"trainA_data\", \"trainB_data\", \"testA_data\", \"testB_data\"]):\n data.append(hf[set_name][:].astype(np.float32))\n\n return (set_data for set_data in data)\n\n else:\n path = \"./datasets/\" + dataset\n print(path)\n train_a = glob.glob(path + \"/trainA/*.jpg\")\n train_b = glob.glob(path + \"/trainB/*.jpg\")\n test_a = glob.glob(path + \"/testA/*.jpg\")\n test_b = glob.glob(path + \"/testB/*.jpg\")\n\n print(\"Import trainA\")\n if dataset == \"nike2adidas\" or (\"adiedges\" in dataset):\n tr_a = np.array([loadImage(p, h, w) for p in tqdm(train_a[:1000])])\n else:\n tr_a = np.array([loadImage(p, h, w) for p in tqdm(train_a)])\n\n print(\"Import trainB\")\n if dataset == \"nike2adidas\" or (\"adiedges\" in dataset):\n tr_b = np.array([loadImage(p, h, w) for p in tqdm(train_b[:1000])])\n else:\n tr_b = np.array([loadImage(p, h, w) for p in tqdm(train_b)])\n\n print(\"Import testA\")\n ts_a = np.array([loadImage(p, h, w) for p in tqdm(test_a)])\n\n print(\"Import testB\")\n ts_b = np.array([loadImage(p, h, w) for p in tqdm(test_b)])\n\n return tr_a, tr_b, ts_a, ts_b\ncache = dict()\nn_batches = -1\ncurrent_milli_time = lambda: int(round(time.time() * 1000))\ndef load_batch(dataset, batch_size=1, is_testing=False, break_img=False):\n data_type = \"train\" if not is_testing else \"test\"\n a = f'./datasets/{dataset}/{data_type}A/*'\n b = f'./datasets/{dataset}/{data_type}B/*'\n path_A = None\n path_B = None\n if a in cache:\n path_A = cache[a]\n else:\n path_A = glob(a)\n\n if b in cache:\n path_B = cache[b]\n else:\n path_B = glob(b)\n\n n_batches = int(min(len(path_A), len(path_B)) / batch_size)\n total_samples = n_batches * batch_size\n\n # Sample n_batches * batch_size from each path list so that model sees all\n # samples from both domains\n path_A = np.random.choice(path_A, total_samples, replace=False)\n path_B = np.random.choice(path_B, total_samples, replace=False)\n\n for i in range(n_batches-1):\n start_time = current_milli_time()\n batch_A = path_A[i*batch_size:(i+1)*batch_size]\n batch_B = path_B[i*batch_size:(i+1)*batch_size]\n imgs_A, imgs_B = [], []\n for img_A, img_B in zip(batch_A, batch_B):\n img_B = load_img2(img_B, break_img=break_img)\n img_A = load_img2(img_A, break_img=break_img)\n\n\n imgs_A.append(img_A)\n imgs_B.append(img_B)\n\n imgs_A = np.array(imgs_A)/127.5 - 1.\n imgs_B = np.array(imgs_B)/127.5 - 1.\n\n yield imgs_A, imgs_B, current_milli_time() - start_time\n\ndef load_img2( path, break_img):\n name = path\n if name in cache:\n img = cache[name]\n else:\n img = loadImage(path, h , w)\n cache[name] = img\n return img\n# Create a wall of generated images\ndef plotGeneratedImages(epoch, dataset, batch_size, generator_a2b, generator_b2a, examples=6):\n\n a1, b1, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n a2, b2, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n a3, b3, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n a4, b4, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n a5, b5, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n a6, b6, t = next(load_batch(dataset, batch_size, is_testing=True, ))\n set_a= np.array([a1[0],a2[0],a3[0],a4[0],a5[0],a6[0]])\n set_b= np.array([b1[0],b2[0],b3[0],b4[0],b5[0],b6[0]])\n true_batch_a = set_a[np.random.randint(0, set_a.shape[0], size=examples)]\n true_batch_b = set_b[np.random.randint(0, set_b.shape[0], size=examples)]\n\n # Get fake and cyclic images\n generated_a2b = generator_a2b.predict(true_batch_a)\n cycle_a = generator_b2a.predict(generated_a2b)\n generated_b2a = generator_b2a.predict(true_batch_b)\n cycle_b = generator_a2b.predict(generated_b2a)\n\n k = 0\n\n # Allocate figure\n plt.figure(figsize=(w / 10, h / 10))\n\n for output in [true_batch_a, generated_a2b, cycle_a, true_batch_b, generated_b2a, cycle_b]:\n output = (output + 1.0) / 2.0\n for i in range(output.shape[0]):\n plt.subplot(examples, examples, k * examples + (i + 1))\n img = output[i].transpose(1, 2, 0) # Using (ch, h, w) scheme needs rearranging for plt to (h, w, ch)\n # print(img.shape)\n plt.imshow(img)\n plt.axis('off')\n plt.tight_layout()\n k += 1\n plt.savefig(\"images/epoch\" + str(epoch) + \".png\")\n plt.close()\n\n\n# Plot the loss from each batch\n\ndef plotLoss_new():\n plt.figure(figsize=(10, 8))\n plt.plot(disc_a_history, label='Discriminator A loss')\n plt.plot(disc_b_history, label='Discriminator B loss')\n plt.plot(gen_a2b_history_new, label='Generator a2b loss')\n plt.plot(gen_b2a_history_new, label='Generator b2a loss')\n # plt.plot(cycle_history, label=\"Cyclic loss\")\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig('images/cyclegan_loss.png')\n plt.close()\n\n\ndef saveModels(epoch, dataset, genA2B, genB2A, discA, discB):\n print(\"Saving Model...\")\n genA2B.save(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_generatorA2B.h5')\n genB2A.save(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_generatorB2A.h5')\n discA.save(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_discriminatorA.h5')\n discB.save(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_discriminatorBh.h5')\n print(\"Model Saved!\")\n\n\ndef loadModels(epoch, dataset, genA2B, genB2A, discA, discB):\n try:\n genA2B.load_weights(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_generatorA2B.h5')\n genB2A.load_weights(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_generatorB2A.h5')\n discA.load_weights(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_discriminatorA.h5')\n discB.load_weights(f'{model_save_folder}/{dataset}_{epoch}_{w}x{h}_discriminatorB.h5')\n except Exception as e:\n print(f\"Failed to load model: {e}\")\n\n\n# Training\n\ndef train(epochs, batch_size, dataset, baselr, use_pseudounet=False, use_unet=False, use_decay=False, plot_models=True,\n end_of_epoch_callback=None):\n if end_of_epoch_callback is not None:\n end_of_epoch_callback()\n\n # Load data and normalize\n # x_train_a, x_train_b, x_test_a, x_test_b = loadImagesFromDataset(h, w, dataset, use_hdf5=False)\n # x_train_a = (x_train_a.astype(np.float32) - 127.5) / 127.5\n # x_train_b = (x_train_b.astype(np.float32) - 127.5) / 127.5\n # x_test_a = (x_test_a.astype(np.float32) - 127.5) / 127.5\n # x_test_b = (x_test_b.astype(np.float32) - 127.5) / 127.5\n\n batchCount_a = n_batches\n batchCount_b = n_batches\n\n # Train on same image amount, would be best to have even sets\n batchCount = min([batchCount_a, batchCount_b])\n\n print('\\nEpochs:', epochs)\n print('Batch size:', batch_size)\n print('Batches per epoch: ', batchCount, \"\\n\")\n\n # Retrieve components and save model before training, to preserve weights initialization\n disc_a, disc_b, gen_a2b, gen_b2a = components(w, h, pseudounet=use_pseudounet, unet=use_unet, plot=plot_models)\n\n\n # LOAD AND SAVE ====\n loadModels('latest', dataset, gen_a2b, gen_b2a, disc_a, disc_b)\n # saveModels('latest', dataset, gen_a2b, gen_b2a, disc_a, disc_b)\n\n # Initialize fake images pools\n pool_a2b = []\n pool_b2a = []\n\n # Define optimizers\n adam_disc = Adam(lr=baselr, beta_1=0.5)\n adam_gen = Adam(lr=baselr, beta_1=0.5)\n\n # Define image batches\n true_a = gen_a2b.inputs[0]\n true_b = gen_b2a.inputs[0]\n\n fake_b = gen_a2b.outputs[0]\n fake_a = gen_b2a.outputs[0]\n\n fake_pool_a = K.placeholder(shape=(None, 3, h, w))\n fake_pool_b = K.placeholder(shape=(None, 3, h, w))\n\n # Labels for generator training\n y_fake_a = K.ones_like(disc_a([fake_a]))\n y_fake_b = K.ones_like(disc_b([fake_b]))\n\n # Labels for discriminator training\n y_true_a = K.ones_like(disc_a([true_a])) * 0.9\n y_true_b = K.ones_like(disc_b([true_b])) * 0.9\n\n fakelabel_a2b = K.zeros_like(disc_b([fake_b]))\n fakelabel_b2a = K.zeros_like(disc_a([fake_a]))\n\n # Define losses\n disc_a_loss = mse_loss(y_true_a, disc_a([true_a])) + mse_loss(fakelabel_b2a, disc_a([fake_pool_a]))\n disc_b_loss = mse_loss(y_true_b, disc_b([true_b])) + mse_loss(fakelabel_a2b, disc_b([fake_pool_b]))\n\n gen_a2b_loss = mse_loss(y_fake_b, disc_b([fake_b]))\n gen_b2a_loss = mse_loss(y_fake_a, disc_a([fake_a]))\n\n cycle_a_loss = mae_loss(true_a, gen_b2a([fake_b]))\n cycle_b_loss = mae_loss(true_b, gen_a2b([fake_a]))\n cyclic_loss = cycle_a_loss + cycle_b_loss\n\n # Prepare discriminator updater\n discriminator_weights = disc_a.trainable_weights + disc_b.trainable_weights\n disc_loss = (disc_a_loss + disc_b_loss) * 0.5\n discriminator_updater = adam_disc.get_updates(discriminator_weights, [], disc_loss)\n\n # Prepare generator updater\n generator_weights = gen_a2b.trainable_weights + gen_b2a.trainable_weights\n gen_loss = (gen_a2b_loss + gen_b2a_loss + lmda * cyclic_loss)\n generator_updater = adam_gen.get_updates(generator_weights, [], gen_loss)\n\n # Define trainers\n generator_trainer = K.function([true_a, true_b], [gen_a2b_loss, gen_b2a_loss, cyclic_loss], generator_updater)\n discriminator_trainer = K.function([true_a, true_b, fake_pool_a, fake_pool_b], [disc_a_loss / 2, disc_b_loss / 2],\n discriminator_updater)\n\n epoch_counter = 1\n\n plotGeneratedImages(epoch_counter,dataset, batch_size, gen_a2b, gen_b2a)\n\n # Start training\n for e in range(1, epochs + 1):\n print('\\n', '-' * 15, 'Epoch %d' % e, '-' * 15)\n gc.collect()\n\n # Learning rate decay\n if use_decay and (epoch_counter > 100):\n lr -= baselr / 100\n adam_disc.lr = lr\n adam_gen.lr = lr\n\n # Initialize progbar and batch counter\n # progbar = generic_utils.Progbar(batchCount)\n\n # np.random.shuffle(x_train_a)\n # np.random.shuffle(x_train_b)\n print(f\"Batch count: {batchCount}\")\n # Cycle through batches\n for i in trange(int(1000)):\n\n # Select true images for training\n # true_batch_a = x_train_a[np.random.randint(0, x_train_a.shape[0], size=batch_size)]\n # true_batch_b = x_train_b[np.random.randint(0, x_train_b.shape[0], size=batch_size)]\n\n true_batch_a, true_batch_b, load_time = next(load_batch(dataset, batch_size, is_testing=False, ))\n print(f\"Load time: {load_time}\")\n # true_batch_a = x_train_a[i * batch_size:i * batch_size + batch_size]\n # true_batch_b = x_train_b[i * batch_size:i * batch_size + batch_size]\n\n # Fake images pool\n a2b = gen_a2b.predict(true_batch_a)\n b2a = gen_b2a.predict(true_batch_b)\n\n tmp_b2a = []\n tmp_a2b = []\n\n for element in a2b:\n if len(pool_a2b) < 50:\n pool_a2b.append(element)\n tmp_a2b.append(element)\n else:\n p = random.uniform(0, 1)\n\n if p > 0.5:\n index = random.randint(0, 49)\n tmp = np.copy(pool_a2b[index])\n pool_a2b[index] = element\n tmp_a2b.append(tmp)\n else:\n tmp_a2b.append(element)\n\n for element in b2a:\n if len(pool_b2a) < 50:\n pool_b2a.append(element)\n tmp_b2a.append(element)\n else:\n p = random.uniform(0, 1)\n\n if p > 0.5:\n index = random.randint(0, 49)\n tmp = np.copy(pool_b2a[index])\n pool_b2a[index] = element\n tmp_b2a.append(tmp)\n else:\n tmp_b2a.append(element)\n\n pool_a = np.array(tmp_b2a)\n pool_b = np.array(tmp_a2b)\n\n # Update network and obtain losses\n disc_a_err, disc_b_err = discriminator_trainer([true_batch_a, true_batch_b, pool_a, pool_b])\n gen_a2b_err, gen_b2a_err, cyclic_err = generator_trainer([true_batch_a, true_batch_b])\n\n # progbar.add(1, values=[\n # (\"D A\", disc_a_err*2),\n # (\"D B\", disc_b_err*2),\n # (\"G A2B loss\", gen_a2b_err),\n # (\"G B2A loss\", gen_b2a_err),\n # (\"Cyclic loss\", cyclic_err)\n # ])\n\n # Save losses for plotting\n disc_a_history.append(disc_a_err)\n disc_b_history.append(disc_b_err)\n\n gen_a2b_history_new.append(gen_a2b_err)\n gen_b2a_history_new.append(gen_b2a_err)\n\n # cycle_history.append(cyclic_err[0])\n plotLoss_new()\n\n plotGeneratedImages(epoch_counter, dataset, batch_size, gen_a2b, gen_b2a)\n\n saveModels(epoch_counter, dataset, gen_a2b, gen_b2a, disc_a, disc_b)\n saveModels('latest', dataset, gen_a2b, gen_b2a, disc_a, disc_b)\n\n epoch_counter += 1\n\n if end_of_epoch_callback is not None:\n end_of_epoch_callback()\n\n\ndef end_of_epoch_callback():\n print(\"potato\")\n\n\nif __name__ == '__main__':\n train(200, 1, \"n-yandex\", lr, use_decay=True, use_pseudounet=False, use_unet=False, plot_models=False,\n end_of_epoch_callback=end_of_epoch_callback)\n# tensorflowjs_converter --input_format keras models/n-yandex_latest_256x256_generatorA2B.h5 out/","repo_name":"DexterHuang/cycleGAN","sub_path":"cycleGAN.py","file_name":"cycleGAN.py","file_ext":"py","file_size_in_byte":15413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"9975974390","text":"\"\"\" \norientation.py \n\nPlot the distribution of orientation angles of nearest\nneighbouring prey fish.\n\"\"\"\n\nfrom fishmodel import Environment, Prey, Predator, Food\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom fishmodel import fast_norm\nfrom parameter_fit import fit_params\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import animation\nfrom scipy import stats\nfrom progress.bar import Bar\n\ndef get_angle(v1, v2):\n cosang = np.dot(v1, v2)\n sinang = fast_norm(np.cross(v1, v2))\n return np.arctan2(sinang, cosang)\n\ndef get_closest_angle(prey, neighbors):\n i = get_closest_neighbor(prey, neighbors)\n\n return get_angle(prey.vel, neighbors[i].vel)\n\ndef get_closest_neighbor(prey, neighbors):\n \"\"\" Returns distance to nearest neighbor\"\"\"\n all_dist = []\n for n in neighbors:\n if n is not prey:\n all_dist.append(fast_norm(prey.pos - n.pos))\n return all_dist.index(np.min(all_dist))\n\ndef nn_orientation(env, timesteps=700):\n y = []\n bar = Bar(\"timesteps\", max=timesteps)\n for _ in range(timesteps):\n angles = []\n n = 0\n for prey in env.prey:\n if not prey.active:\n continue\n\n angles.append(get_closest_angle(prey, env.prey))\n n += 1\n\n average = np.sum(angles) / n\n y.append(average)\n env.timestep()\n bar.next()\n bar.finish()\n return np.array(y)\n\nif __name__ == \"__main__\":\n env = Environment(20, 0)\n angles = nn_orientation(env)\n\n\n plt.figure(figsize=(8, 7))\n x = np.linspace(-0.7, 1.7, 100)\n plt.plot(x, stats.norm.pdf(x, np.mean(angles), np.std(angles)))\n plt.ylabel('probability density')\n plt.xlabel(r'$\\theta_{nn}$')\n print('mean', np.mean(angles), 'std', np.std(angles))\n\n plt.show()","repo_name":"daanvinken/FishSchooling","sub_path":"orientation.py","file_name":"orientation.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"37977949080","text":"# -*- coding: gb18030 -*-\n\n\nfrom SpaceCopy import SpaceCopy\nfrom interface.SpaceCopyRaidRecordInterface import SpaceCopyRaidRecordInterface\n\nMIN_TEAM_MEMBER_COUNT\t= 3\n\nclass SpaceCopyTianguan( SpaceCopy, SpaceCopyRaidRecordInterface ):\n\t\"\"\"\n\t\"\"\"\n\tdef __init__(self):\n\t\tSpaceCopy.__init__( self )\n\t\tSpaceCopyRaidRecordInterface.__init__( self )\n\t\tself.spawnMonstersList = {}\n\n\n\tdef addSpawnPointTianguan( self, spawnMailBox, grade, teamcount ):\n\t\t\"\"\"\n\t\tdefine method\n\t\t空间管理着一批刷怪点\n\t\t\"\"\"\n\t\tkey = str(grade) + \"and\" + str(teamcount)\n\t\tif not self.spawnMonstersList.has_key( key ):\n\t\t\tself.spawnMonstersList[key] = [spawnMailBox]\n\t\telse:\n\t\t\tself.spawnMonstersList[key].append( spawnMailBox )\n\n\n\tdef spawnMonsters( self, params ):\n\t\t\"\"\"\n\t\tdefine method\n\t\t\"\"\"\n\t\ttc = params[\"teamcount\"]\n\t\tif tc < 3:\n\t\t\ttc = 3\n\t\tfor i in xrange( MIN_TEAM_MEMBER_COUNT, tc + 1 ):\n\t\t\tkey = str(params[\"grade\"]) + \"and\" + str(i)\n\t\t\tif not key in self.spawnMonstersList:\n\t\t\t\tcontinue\n\t\t\tfor j in self.spawnMonstersList[key]:\n\t\t\t\td = {}\n\t\t\t\td[ \"tianguan_level\" ] = params[\"copyLevel\"]\n\t\t\t\td[ \"current_toll_gate\" ] = params[\"grade\"]\n\t\t\t\tj.cell.createEntity( d )\n\n\tdef onEnter( self, baseMailbox, params ):\n\t\t\"\"\"\n\t\tdefine method.\n\t\t玩家进入了空间,需要根据副本boss的击杀情况给予玩家\n\t\t相应的提示,并让玩家选择是继续副本还是离开副本。\n\t\t@param baseMailbox: 玩家mailbox\n\t\t@type baseMailbox: mailbox\n\t\t@param params: 玩家onEnter时的一些额外参数\n\t\t@type params: py_dict\n\t\t\"\"\"\n\t\tSpaceCopy.onEnter( self, baseMailbox, params )\n\t\tSpaceCopyRaidRecordInterface.onEnter( self, baseMailbox, params )\n","repo_name":"mudsave/csol2_enities_45541","sub_path":"base/SpaceCopyTianguan.py","file_name":"SpaceCopyTianguan.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"9991514576","text":"from dd import autoref as _bdd\nimport xml.etree.ElementTree as ET\nfrom collections import OrderedDict\nimport re\n\nclass blackout_BDD():\n def __init__(self, varList, simulationData = None ):\n assert(varList != None)\n self.vars = varList # TopologyData is a list of labels associated to variables.\n self.bdd_initiatingEvents = _bdd.BDD()\n self.bdd_transitionRelation = _bdd.BDD()\n self.InitiatingEvents = \"\"\n self.TransitionRelation = \"\"\n self.initiatingEventExpr = self.bdd_initiatingEvents.false\n self.transitionRelationExpr = self.bdd_transitionRelation.false\n self.primes = OrderedDict()\n self.qvars = list()\n self.counter = 0\n for var in self.vars:\n self.bdd_initiatingEvents.add_var(var)\n self.bdd_transitionRelation.add_var(var)\n self.bdd_transitionRelation.add_var(var + 'p')\n self.primes[var + 'p'] = var\n self.qvars.append(var)\n if not simulationData is None:\n self.xmlfile = simulationData\n self.createBDDs()\n\n def updateInitiatingEvents(self, newEvaluation):\n self.initiatingEventExpr = self.initiatingEventExpr | newEvaluation\n\n def updateTransitionRelation(self, newRelation):\n self.transitionRelationExpr = self.transitionRelationExpr | newRelation\n\n def createBDDs(self):\n root = ET.parse(self.xmlfile).getroot()\n for path in root.iter('Path'):\n self.counter = self.counter + 1\n initial_outages = list()\n for Initial_Stage in path.iter('Initial_Stage'):\n for outage in Initial_Stage.iter('Outage'):\n initial_outages.append(outage.text)\n self.updateInitiatingEvents(self.bdd_initiatingEvents.add_expr(self.getExpressionStringInitial(initial_outages)))\n for Cascade_stage in path.iter('Cascading_Stage'):\n for StageNum in Cascade_stage.iter('Stage_Number'):\n cascade_outages = list()\n for outage in StageNum.iter('Outage'):\n cascade_outages.append(outage.text)\n self.updateTransitionRelation(self.bdd_transitionRelation.add_expr(self.getTransitionString(initial_outages, cascade_outages)))\n initial_outages.extend(cascade_outages)\n if self.counter == 300:\n break\n\n #dumping the bdds in a pickle file\n self.bdd_initiatingEvents.dump('InitiatingEventsExpr.p',[self.initiatingEventExpr])\n self.bdd_transitionRelation.dump('TransitionRelationExpr.p',[self.transitionRelationExpr])\n\n def printInitiatingEventsBDD(self):\n self.bdd_initiatingEvents.dump('InitiatingEvents.pdf', [self.initiatingEventExpr])\n\n def printTransitionRelationsBDD(self):\n self.bdd_transitionRelation.dump('TransitionRelation.pdf', [self.transitionRelationExpr])\n\n def getExpressionStringInitial(self,outage):\n initial_string = \"\"\n for item in self.vars:\n if item in outage:\n if initial_string:\n initial_string = initial_string + ' & ' + '!' + item\n else:\n initial_string = '!' + item\n else:\n if initial_string:\n initial_string = initial_string + ' & ' + item\n else:\n initial_string = item\n return initial_string\n\n\n def getExpressionStringTransition(self,prevOutage_dict, nextOutage_dict):\n answer1 = \"\"\n answer2 = \"\"\n for key in prevOutage_dict:\n if answer1:\n op = \" & \"\n else:\n op = \"\"\n if prevOutage_dict[key] :\n answer1 = answer1 + op + key\n else:\n answer1 = answer1 + op + '!' + key\n for key in nextOutage_dict:\n if answer2:\n op = \" & \"\n else:\n op = \"\"\n if nextOutage_dict[key] :\n answer2 = answer2 + op + key + 'p'\n else:\n answer2 = answer2 + op + '!' + key + 'p'\n return answer1 + ' & ' + answer2\n\n def getTransitionString(self, prev, next_):\n prevString = OrderedDict()\n nextString = OrderedDict()\n for var in self.vars:\n prevString[var] = True\n nextString[var] = True\n for item in prev:\n prevString[item] = False\n nextString[item] = False\n for item in next_:\n nextString[item] = False\n return self.getExpressionStringTransition(prevString, nextString)\n\n\n def checkInitialState(self, currentState):\n return self.bdd_initiatingEvents.evaluate(self.initiatingEventExpr, currentState)\n\n def checkSystemState(self, currentState):\n if self.bdd_initiatingEvents.evaluate(self.initiatingEventExpr, currentState) != -1:\n return self.getFixedPointPath(currentState)\n else:\n return False\n\n def getFixedPointPath(self, currentState):\n path = list();\n initial = self.bdd_transitionRelation.add_expr(self.dictToList(currentState))\n while(self.bdd_transitionRelation.sat_len(initial) != 0):\n temp = _bdd.image(self.transitionRelationExpr, initial, self.primes, self.qvars, self.bdd_transitionRelation)\n path.append(list(self.bdd_transitionRelation.sat_iter(temp)))\n initial = temp\n return path\n\n def dictToList(self, state):\n answer = \"\"\n for var in self.vars:\n if answer:\n if state[var]:\n answer = answer + ' & ' + var\n else:\n answer = answer + ' & ' + '!' + var\n else:\n if state[var]:\n answer = var\n else:\n answer = '!' + var\n return answer\n","repo_name":"chhokrad/BDD_POWER","sub_path":"Code/SymbolicModelCheckerWOS.py","file_name":"SymbolicModelCheckerWOS.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"16709369930","text":"#coding=utf-8\nimport sys\n\nimport tensorflow as tf\n\nfrom model_speech.cnn_ctc_dataset import input_fn, load_vocab, load_data\nfrom model_speech.cnn_ctc_estimator_v2 import AMEstimator\n\nlabel_vocab = load_vocab(['./data/thchs_train.txt', './data/thchs_dev.txt', './data/thchs_test.txt'])\nwav_lst, pny_lst = load_data(['./data/thchs_train.txt'], './data/', size=4)\n# dev_wav_lst, dev_pny_lst = load_data(['./data/thchs_dev.txt'], './data/')\nconfig = tf.ConfigProto()\nconfig.intra_op_parallelism_threads = 8\nconfig.inter_op_parallelism_threads = 8\nrun_config = tf.estimator.RunConfig().replace(\n session_config=config)\nam = AMEstimator(len(label_vocab), 'train', label_vocab, './logs_am_new_3', None, run_config)\nresult = am.predict(input_fn = lambda: input_fn('pred', 4, wav_lst, pny_lst, label_vocab),\n predict_keys=None,\n hooks=None,\n checkpoint_path=None,\n yield_single_examples=True)\nprint(result)\n#\nfor r in result:\n text = []\n print(r['input_length'])\n # print(r['label_length'])\n for i in r['text_ids']:\n text.append(label_vocab[i])\n text = ' '.join(text)\n print('文本结果:', text)\n # text = []\n # for i in r['y_true']:\n # text.append(label_vocab[i])\n # text = ' '.join(text)\n # print('原文结果:', text)\n# print('原文结果:', ' '.join(feats['the_labels']))","repo_name":"nietao2/DeepSpeechRecognition","sub_path":"test_estimator_v2.py","file_name":"test_estimator_v2.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"40225495133","text":"import argparse\nimport logging\nimport time\nimport pickle\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tf_pose.estimator import TfPoseEstimator\nfrom tf_pose.networks import get_graph_path, model_wh\n\ndef str2bool(v):\n return v.lower() in (\"yes\", \"true\", \"t\", \"1\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')\n # parser.add_argument('--camera', type=int, default=0)\n\n parser.add_argument('--resize', type=str, default='0x0',\n help='if provided, resize images before they are processed. default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')\n parser.add_argument('--resize-out-ratio', type=float, default=4.0,\n help='if provided, resize heatmaps before they are post-processed. default=1.0')\n\n parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')\n parser.add_argument('--show-process', type=bool, default=False,\n help='for debug purpose, if enabled, speed for inference is dropped.')\n \n parser.add_argument('--tensorrt', type=str, default=\"False\",\n help='for tensorrt process.')\n parser.add_argument('--vidloc', type=str, default='')\n args = parser.parse_args()\n\n w, h = model_wh(args.resize)\n if w > 0 and h > 0:\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h), trt_bool=str2bool(args.tensorrt))\n else:\n e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368), trt_bool=str2bool(args.tensorrt))\n cap = cv2.VideoCapture(args.vidloc)\n\n fourcc = cv2.VideoWriter_fourcc(*'XVID')\n out = cv2.VideoWriter('input_pose.mp4', fourcc, 30.0, (640, 640))\n\n parts_frames = {}\n counter = 0\n while True:\n ret,image = cap.read()\n if ret==False:\n break\n print(image.shape)\n humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)\n black = np.ones((640, 640, 3))\n image = TfPoseEstimator.draw_humans(black, humans, imgcopy=False)\n\n # cv2.imshow('tf-pose-estimation result', image)\n parts_points = {}\n list_of_parts = ['nose', 'sternum', 'right_shoulder', 'right_elbow', 'right_palm', \n 'left_shoulder', 'left_elbow', 'left_palm', 'right_hip', 'right_knee', \n 'right_ankle', 'left_hip', 'left_knee', 'left_ankle', 'right_eye', \n 'left_eye', 'right_ear', 'left_ear']\n for i, part in enumerate(list_of_parts):\n try:\n parts_points[part] = (int(humans[0].body_parts[i].x * 640), int(humans[0].body_parts[i].y * 640))\n except:\n parts_points[part] = (None, None)\n parts_frames[counter] = parts_points\n counter += 1\n # print(parts_points)\n # plt.imshow(image)\n # plt.show()\n image = image * 255\n image = np.uint8(image.astype(int))\n\n out.write(image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# Saving the objects:\nwith open('objs.pkl', 'wb') as f:\n pickle.dump(parts_frames, f)\nout.release()\ncap.release()\n","repo_name":"bipinkc19/squat-counter","sub_path":"get_points.py","file_name":"get_points.py","file_ext":"py","file_size_in_byte":3289,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"69"} +{"seq_id":"38244060024","text":"import random\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\nSENDER_EMAIL = os.getenv(\"SENDER_EMAIL\")\nAPP_PASSWORD = os.getenv(\"APP_PASSWORD\")\n\nclass Participant:\n def __init__(self, participant_id, name, email=None, restrictions=[]):\n self.participant_id = participant_id\n self.name = name\n self.email = email\n self.restrictions = restrictions\n\ndef match_people(start_people):\n # randomize\n random.shuffle(start_people)\n # sort by number of restrictions: max to min\n people = sorted(start_people, key=lambda person: len(person.restrictions), reverse=True)\n potential_matches = people.copy()\n matches = {}\n\n # iterate from most restrictions to least restrictions, \n # trying to match most restricted people first\n for person in people:\n for i, potential_match in enumerate(potential_matches):\n # if potential_match is not also the person trying to be matched and\n # is not restricted and \n # does not have the person trying to get matched\n if (potential_match != person and\n potential_match.name not in person.restrictions and\n matches.get(potential_match) != person):\n matches[person] = potential_match\n potential_matches.pop(i)\n break\n \n # person could not be matched\n if i == len(potential_matches) - 1:\n # already gone through everything and matches is empty\n if not matches:\n return None\n\n # attempt to replace from an earlier match\n for i, (gifter, recipient) in enumerate(matches.items()):\n if (person != recipient and\n recipient.name not in person.restrictions and\n matches.get(recipient) != person and\n person.name not in gifter.restrictions):\n matches[gifter] = person\n matches[person] = recipient\n break\n \n # no replacements could be made\n if i == len(matches) - 1:\n return None\n \n return matches\n\n\ndef send_emails(matches, message_text, message_html):\n server = smtplib.SMTP_SSL(\"smtp.gmail.com\", 465)\n server.ehlo()\n server.login(SENDER_EMAIL, APP_PASSWORD)\n \n for gifter, recipient_name in matches:\n msg = MIMEMultipart(\"alternative\")\n msg[\"Subject\"] = \"Secret Santa\"\n msg[\"From\"] = f\"Secret Santa Organizer <{SENDER_EMAIL}>\"\n msg[\"To\"] = gifter[1]\n \n # Create the body of the message (a plain-text and an HTML version)\n text = f\"Your person is {recipient_name}.\\n\\n{message_text}\"\n \n html = f\"\"\"\\\n \n \n

Your person is {recipient_name}.

\n {message_html}\n \n \n \"\"\"\n\n part1 = MIMEText(text, \"plain\")\n part2 = MIMEText(html, \"html\")\n\n msg.attach(part1)\n msg.attach(part2)\n\n server.sendmail(SENDER_EMAIL, gifter[1], msg.as_string())\n\n server.quit()\n\n\ndef log(matches):\n log_message = \"\"\n\n for gifter, recipient_name in matches:\n log_message += f\"{gifter[0]} has to get a gift for {recipient_name}\\n\"\n\n with open(\"secret_santa_log.txt\", \"w\") as f:\n f.write(log_message)\n\ndef get_organizer_emails(form):\n \"\"\"Get up to 15 organizer emails from an input form.\"\"\"\n\n return form.getlist(\"organizer\")[:15]\n\ndef get_participants(form):\n \"\"\"Get up to 100 participants from an input form.\"\"\"\n\n participants = []\n null_participants = 0\n for i in range(100):\n participant = form.getlist(f\"participant{i}\")\n\n if participant == []:\n null_participants += 1\n\n if null_participants >= 5:\n break\n \n continue\n\n name, email = participant\n \n if name == \"\" and email == \"\":\n continue\n \n # if the user inserted a name or email that was too long\n if len(name) > 50 or len(email) > 200:\n return []\n \n if name == \"\":\n name = email\n \n # set email to none if it is blank, organizers will have to contact\n elif email == \"\":\n email = None\n \n restrictions = [restriction for restriction in form.getlist(f\"participant{i}restriction\") if restriction != \"\"]\n\n participants.append(Participant(i, name, email, restrictions))\n \n return participants\n\n","repo_name":"sachinraja/secretsantaorganizer","sub_path":"app/utils/secret_santa.py","file_name":"secret_santa.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"26521857727","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.conf import settings\nfrom . models import Msg\nfrom django.contrib import messages\nimport os\n# Create your views here.\n\n\n\nfrom django.http import HttpResponse\nimport os\n\ndef download_file(request):\n file_path = os.path.join('static/media/myfile.pdf') # Replace with the actual file path\n if os.path.exists(file_path):\n with open(file_path, 'rb') as file:\n response = HttpResponse(file.read(), content_type='application/octet-stream')\n response['Content-Disposition'] = 'attachment; filename=' + os.path.basename(file_path)\n return response\n else:\n return HttpResponse('File not found.')\n\n\n\n\ndef index(request):\n if request.method == 'POST':\n name = request.POST.get('name')\n email = request.POST.get('email')\n phone = request.POST.get('phone')\n message = request.POST.get('message')\n msg=Msg(name=name, email=email, phone=phone, message=message)\n msg.save()\n messages.success(request, 'Contact added successfully!')\n return render(request, 'index.html')","repo_name":"varghesejojo/mywebsite","sub_path":"profileproject/proapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"71506628379","text":"import cv2\nimport numpy as np\n\"\"\"\n자동 객체 인식을 위한 기초 전처리 \n\"\"\"\n\n\n# 색범위 필터링\ndef color_filter(img, lower, upper):\n \"\"\"\n 색상 필터링\n 참고) 필터링할 색 범위 지정(hsv system)에 대하여\n hsv 색공간은 h -> theta(θ), s -> r, v -> z 인 원주좌표계로 생각할 수 있다.\n 그러나 opencv 에서 색상(h) 범위는 0~179, 채도(s) 범위 0 ~ 255, 명암(v) 범위 0 ~ 255 이므로\n 원주좌표계 상에 표현된 hsv 값을 opencv의 스케일에 맞게 변환하여야 한다.\n\n Args:\n img: 처리 전 이미지, 3차원 numpy ndarray 객체\n lower: hsv 색공간에서 하한선, 길이가 3인 numpy array 객체\n upper: hsv 색공간에서 상한선, 길이가 3인 numpy array 객체\n\n returns:\n 지정 범위의 색을 제외한것을 False, 지정 범위의 색을 포함한것을 True로 하는 binary 이미지,\n 2차원 numpy ndarray 객체\n \"\"\"\n mask = cv2.inRange(img, lower, upper)\n result = cv2.bitwise_and(img, img, mask=mask)\n # mask_inverse = cv2.bitwise_not(mask)\n # result_inverse = cv2.bitwise_and(img_grass, img_grass, mask=mask_inverse)\n return mask\n\n# 경계선 감지\ndef boundary(img):\n \"\"\"\n 이미지의 경계선을 감지. 블러 처리 후 Canny edge filter 사용\n\n Args:\n img: 경계선을 감지할 이미지, cv2 img 객체\n\n returns:\n 경계선을 True로 하는 binary 이미지, 2차원 numpy ndarray 객체\n \"\"\"\n blur = cv2.GaussianBlur(img, ksize=(3, 3), sigmaX=50)\n result = cv2.Canny(blur, 100, 200)\n return result\n\n# 객체 탐지(레이블링)\ndef labeling(binary_mask, front_image, filter_size):\n \"\"\"\n 이어진 부분을 한 객체로 인식하고 인식된 부분을 직사각형으로 라벨링하여 표시하는 함수\n\n Args:\n binary_mask: component 인식을 위한 binary 이미지, numpy ndarray 객체\n front_image: 인식 결과를 합성할 원본 이미지, numpy ndarray 객체\n filter_size: threshold 크기, 이 값보다 작은 크기의 component는 무시한다, int 객체\n\n returns:\n binary 이미지를 분석하여 객체를 인식하고 그 결과를 원본 이미지에 라벨링한 bgr 이미지 리턴,\n 3차원 numpy ndarray 객체\n \"\"\"\n count, labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask)\n # img_gbr = cv2.cvtColor(binary_mask, cv2.COLOR_GRAY2BGR)\n\n for i in range(1, count):\n (x, y, w, h, area) = stats[i]\n if area < filter_size:\n continue\n cv2.rectangle(front_image, (x, y, w, h), (255, 0, 0))\n\n return front_image\n\ndef point_clustering(image_path):\n \"\"\"\n 경계선 검출, 색상 검출, morphology, conponent 인식 순으로 이미지를 처리한 후 레이블 클러스터링을 통해\n 식물을 인식하기 위한 함수\n\n Args:\n image_path: 분석 대상 이미지 경로, str 객체\n\n returns:\n 이미지 클러스터링을 위해 component를 인식후 centroid 와 함께 라벨링한 bgr 이미지, 3차원 numpy ndarray\n \"\"\"\n image_bgr = cv2.imread(image_path)\n image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)\n\n # 색상 필터 적용을 위한 파라미터\n lower = np.array([26, 25, 25])\n upper = np.array([83, 245, 245])\n\n # 경계선 감지, 색상 감지\n boundary_mask = boundary(image_hsv)\n color_mask = color_filter(image_hsv, lower, upper)\n\n # 마스크 and 연산\n merged_mask = cv2.bitwise_and(boundary_mask, color_mask)\n\n # morphology 연산\n morph_gradient = cv2.morphologyEx(merged_mask, cv2.MORPH_CLOSE, None)\n morph_open = cv2.morphologyEx(morph_gradient, cv2.MORPH_OPEN, None)\n\n # component 인식\n count, labels, stats, centroids = cv2.connectedComponentsWithStats(morph_open)\n\n for i in range(1, count):\n (x, y) = centroids[i]\n (x_area, y_area, w, h, area) = stats[i]\n if area < 50:\n continue\n cv2.circle(image_bgr, (int(x), int(y)), 10, (255, 0, 0), 2)\n cv2.rectangle(image_bgr, (x_area, y_area, w, h), (0, 0, 255))\n\n return image_bgr\n\ndef label_clustering(image_path):\n \"\"\"\n 경계선 검출, 색상 검출, conponent 인식 순으로 이미지를 처리한 후 레이블 클러스터링을 통해\n 식물을 인식하기 위한 함수\n\n Args:\n image_path: 분석 대상 이미지 경로, str 객체\n\n returns:\n 이미지 클러스터링을 위해 component를 인식후 라벨링한 bgr 이미지, 3차원 numpy ndarray\n \"\"\"\n image_bgr = cv2.imread(image_path)\n image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)\n\n # 색상 필터 적용을 위한 파라미터\n lower = np.array([26, 25, 25])\n upper = np.array([83, 245, 245])\n\n # 경계선 감지, 색상 감지\n boundary_mask = boundary(image_hsv)\n color_mask = color_filter(image_hsv, lower, upper)\n\n # 마스크 and 연산\n merged_mask = cv2.bitwise_and(boundary_mask, color_mask)\n\n # component 인식 후 레이블링\n result = labeling(merged_mask, image_bgr, 70)\n return result\n\ndef plant_boundary(image_path):\n \"\"\"\n 식물의 경계를 검출하는 함수\n\n Args:\n image_path: 분석 대상 이미지의 경로, str 객체\n\n returns:\n 식물의 경계를 검출한 binary 이미지, 2차원 numpy ndarray\n \"\"\"\n image_bgr = cv2.imread(image_path)\n image_hsv = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2HSV)\n image_gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY)\n\n # 색 범위 변수, 노랑~파랑의 범위 내에서 적절히 조정하였음\n lower1 = np.array([26, 70, 70])\n upper1 = np.array([83, 250, 250])\n\n # CLAHE\n # 히스토그램 균일화\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n clahe_image = clahe.apply(image_gray)\n\n # 경계검출, 색상 검출\n boundary_canny = boundary(clahe_image)\n color_mask = color_filter(image_hsv, lower1, upper1)\n\n # 이진화된 영상에 and 연산 수행\n combined_mask = cv2.bitwise_and(boundary_canny, color_mask)\n\n # 모폴로지 연산: 그래디언트\n morph_gradient = cv2.morphologyEx(combined_mask, cv2.MORPH_GRADIENT, None)\n\n return morph_gradient\n\n\ndef main():\n boundary_img1 = plant_boundary(\"./test_image/for_rec.jpg\")\n boundary_img2 = plant_boundary(\"./test_image/se1.png\")\n\n\n # case1_img1 = point_clustering(\"./test_image/for_rec.jpg\")\n # case1_img2 = point_clustering(\"./test_image/se1.png\")\n #\n # case2_img1 = label_clustering(\"./test_image/for_rec.jpg\")\n # case2_img2 = label_clustering(\"./test_image/se1.png\")\n\n cv2.imshow('boundary_img1', boundary_img1)\n cv2.imshow('boundary_img2', boundary_img2)\n # cv2.imshow('case1_img1', case1_img1)\n # cv2.imshow('case1_img2', case1_img2)\n # cv2.imshow('case2_img1', case2_img1)\n # cv2.imshow('case2_img2', case2_img2)\n cv2.waitKey()\n cv2.destroyAllW\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"hyeonghak96/Mix_project","sub_path":"openCV_examples/image_filter.py","file_name":"image_filter.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"7453967577","text":"from greenlet import greenlet\n# 执行test1,执行test2,跳回来,继续\n# 执行test没执行完的内容,再执行test2没执行完的内容\ndef test1():\n print(\"执行test1\")\n gr2.switch()\n print(\"结束test1\")\n gr2.switch()\n\ndef test2():\n print(\"执行test2\")\n gr1.switch()\n print(\"结束test2\")\n\n# 将函数变为协程\ngr1 = greenlet(test1)\ngr2 = greenlet(test2)\n\n# 执行协程1\ngr1.switch()","repo_name":"suprviserpy632157/zdy","sub_path":"ZDY/Feb_all/python多任务编程/February0209/afternoon/greenlet_0.py","file_name":"greenlet_0.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"75022902618","text":"import logging\nfrom typing import List, Tuple\n\nimport numpy as np\n\nlogger = logging.getLogger(__name__)\n\n\ndef compute_polygon_bboxes(polygons: np.ndarray) -> np.ndarray:\n \"\"\"Compute the minimum size enclosing xy bounding box for each polygon that is provided as input.\n Args:\n polygons: an array of type 'O' (object) with shape (n,). Each object has shape (m, 3+).\n\n Returns:\n polygon_bboxes: a float array with shape (n, 4).\n \"\"\"\n bboxes: List[np.ndarray] = []\n\n for polygon in polygons:\n bbox = compute_point_cloud_bbox(polygon)\n bboxes.append(bbox)\n\n polygon_bboxes = np.array(bboxes)\n return polygon_bboxes\n\n\ndef compute_point_cloud_bbox(point_cloud: np.ndarray, verbose: bool = False) -> np.ndarray:\n \"\"\"Given a set of 2D or 3D points, find the minimum size axis-aligned bounding box in the xy plane (ground plane).\n\n Args:\n point_cloud: an array of dim (N,3) or (N,2).\n verbose: False by default, if set to True, it prints the bounding box dimensions.\n\n Returns:\n bbox: an array of dim (4,) representing x_min, y_min, x_max, y_max.\n \"\"\"\n x_min = np.amin(point_cloud[:, 0])\n x_max = np.amax(point_cloud[:, 0])\n\n y_min = np.amin(point_cloud[:, 1])\n y_max = np.amax(point_cloud[:, 1])\n\n bbox_width = x_max - x_min\n bbox_height = y_max - y_min\n\n bbox = np.array([x_min, y_min, x_max, y_max])\n\n if verbose:\n logger.info(f\"Point cloud bbox width = {bbox_width}, height = {bbox_height}\")\n return bbox\n\n\ndef find_all_polygon_bboxes_overlapping_query_bbox(polygon_bboxes: np.ndarray, query_bbox: np.ndarray) -> np.ndarray:\n \"\"\"Find all the overlapping polygon bounding boxes.\n\n Each bounding box has the following structure:\n bbox = np.array([x_min,y_min,x_max,y_max])\n\n In 3D space, if the coordinates are equal (polygon bboxes touch), then these are considered overlapping.\n We have a guarantee that the cropped image will have any sort of overlap with the zero'th object bounding box\n inside of the image e.g. along the x-dimension, either the left or right side of the bounding box lies between the\n edges of the query bounding box, or the bounding box completely engulfs the query bounding box.\n\n Args:\n polygon_bboxes: An array of shape (K,), each array element is a NumPy array of shape (4,) representing\n the bounding box for a polygon or point cloud.\n query_bbox: An array of shape (4,) representing a 2d axis-aligned bounding box, with order\n [min_x,min_y,max_x,max_y].\n\n Returns:\n An integer array of shape (K,) representing indices where overlap occurs.\n \"\"\"\n query_min_x = query_bbox[0]\n query_min_y = query_bbox[1]\n\n query_max_x = query_bbox[2]\n query_max_y = query_bbox[3]\n\n bboxes_x1 = polygon_bboxes[:, 0]\n bboxes_x2 = polygon_bboxes[:, 2]\n\n bboxes_y1 = polygon_bboxes[:, 1]\n bboxes_y2 = polygon_bboxes[:, 3]\n\n # check if falls within range\n overlaps_left = (query_min_x <= bboxes_x2) & (bboxes_x2 <= query_max_x)\n overlaps_right = (query_min_x <= bboxes_x1) & (bboxes_x1 <= query_max_x)\n\n x_check1 = bboxes_x1 <= query_min_x\n x_check2 = query_min_x <= query_max_x\n x_check3 = query_max_x <= bboxes_x2\n x_subsumed = x_check1 & x_check2 & x_check3\n\n x_in_range = overlaps_left | overlaps_right | x_subsumed\n\n overlaps_below = (query_min_y <= bboxes_y2) & (bboxes_y2 <= query_max_y)\n overlaps_above = (query_min_y <= bboxes_y1) & (bboxes_y1 <= query_max_y)\n\n y_check1 = bboxes_y1 <= query_min_y\n y_check2 = query_min_y <= query_max_y\n y_check3 = query_max_y <= bboxes_y2\n y_subsumed = y_check1 & y_check2 & y_check3\n y_in_range = overlaps_below | overlaps_above | y_subsumed\n\n overlap_indxs = np.where(x_in_range & y_in_range)[0]\n return overlap_indxs\n\n\ndef find_local_polygons(\n lane_polygons: np.ndarray,\n lane_bboxes: np.ndarray,\n query_min_x: float,\n query_max_x: float,\n query_min_y: float,\n query_max_y: float,\n) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Find local polygons. We always also return indices.\n\n Take a collection of precomputed polygon bounding boxes, and compare with a query bounding box then returns the\n polygons that overlap, along with their array indices.\n\n Args:\n lane_polygons: An array of polygons.\n lane_bboxes: An array of shape (K,), each array element is a NumPy array of shape (4,) representing\n the bounding box for a polygon or point cloud.\n query_min_x: minimum x coordinate of the query bounding box.\n query_max_x: maximum x coordinate of the query bounding box.\n query_min_y: minimum y coordinate of the query bounding box.\n query_max_y: maximum y coordinate of the query bounding box.\n return_indices: False by default, if set to True, the overlapping indices are returned along with the\n overlapping polygon.\n\n Returns:\n Overlapping polygon.\n Overlapping indices.\n \"\"\"\n query_bbox = np.array([query_min_x, query_min_y, query_max_x, query_max_y])\n overlap_indxs = find_all_polygon_bboxes_overlapping_query_bbox(lane_bboxes, query_bbox)\n\n pruned_lane_polygons = lane_polygons[overlap_indxs]\n return pruned_lane_polygons, overlap_indxs\n\n\ndef prune_polygons_manhattan_dist(\n query_pt: np.ndarray,\n points_xyz: np.ndarray,\n query_search_range_manhattan: float = 200.0,\n) -> np.ndarray:\n \"\"\"Prune polygon points based on a search area defined by the manhattan distance.\n\n Take a collection of small point clouds and return only point clouds that fall within a manhattan search radius of\n the 2D query point.\n\n Similar to the function above, except query bounding box and polygon bounding boxes are not pre-computed, meaning\n they must be computed on fly, which can be quite computationally expensive in a loop.\n\n Args:\n query_pt: Numpy n-d array with dimension (2,) representing xy query location.\n points_xyz: An array of shape (n,) of array objects. Each array object could be a 2D or 3D polygon, i.e. of\n shape (m,2) or (m,3) respectively.\n query_search_range_manhattan: Side length of query bounding box square which is set to 200 by default.\n\n Returns:\n An array pruned xyz point objects of shape (k,). Each array object could be a 2D or 3D polygon, i.e. of shape\n (m,2) or (m,3) respectively.\n \"\"\"\n bboxes = compute_polygon_bboxes(points_xyz)\n\n query_min_x = query_pt[0] - query_search_range_manhattan\n query_max_x = query_pt[0] + query_search_range_manhattan\n query_min_y = query_pt[1] - query_search_range_manhattan\n query_max_y = query_pt[1] + query_search_range_manhattan\n\n query_bbox = np.array([query_min_x, query_min_y, query_max_x, query_max_y])\n overlap_indxs = find_all_polygon_bboxes_overlapping_query_bbox(bboxes, query_bbox)\n\n pruned_points_xyz = points_xyz[overlap_indxs]\n return pruned_points_xyz\n","repo_name":"argoverse/argoverse-api","sub_path":"argoverse/utils/manhattan_search.py","file_name":"manhattan_search.py","file_ext":"py","file_size_in_byte":7008,"program_lang":"python","lang":"en","doc_type":"code","stars":760,"dataset":"github-code","pt":"69"} +{"seq_id":"31466061585","text":"\"\"\"\nTensorflow implementation of the Cross entropy loss with label smoothing.\n\nOriginal paper:\nSzegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.\n\nModified from torch implementation:\nhttps://github.com/mikwieczorek/centroids-reid/blob/main/losses/triplet_loss.py\n\"\"\"\n\nimport tensorflow as tf\nfrom typing import Any, Callable\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.losses import CategoricalCrossentropy\nfrom tensorflow_similarity.types import IntTensor, FloatTensor\n\n\nXENT_DENSE_INITIALIZER = {\n\t'class_name': 'RandomNormal',\n\t'config': {\n\t\t'mean': 0.0,\n\t\t'stddev': 0.001\n\t}\n}\n\n\nclass CrossEntropyLabelSmooth(tf.keras.losses.Loss):\n\t\"\"\"\n\tCross entropy loss with label smoothing regularizer.\n\n\tEquation: y = (1 - epsilon) * y + epsilon / K.\n\n\tArgs:\n\t\tnum_classes (int): number of classes.\n\t\tepsilon (float): weight.\n\n\t\"\"\"\n\n\tdef __init__(\n\t\t\tself,\n\t\t\tnum_classes: int,\n\t\t\tepsilon: float = 0.1,\n\t\t\treduction: Callable = tf.keras.losses.Reduction.AUTO,\n\t\t\tname: str = 'xent_label_smooth',\n\t\t\t**kwargs\n\t) -> None:\n\n\t\tsuper().__init__(reduction=reduction, name=name, **kwargs)\n\n\t\tself.epsilon = epsilon\n\t\tself.num_classes = num_classes\n\n\t\tself.fully_connected_layer = layers.Dense(\n\t\t\tunits=num_classes,\n\t\t\tactivation=None,\n\t\t\tuse_bias=False,\n\t\t\tkernel_initializer=XENT_DENSE_INITIALIZER,\n\t\t\tdtype='float32'\n\t\t)\n\n\t\tself.cross_entropy = CategoricalCrossentropy(\n\t\t\tlabel_smoothing=epsilon,\n\t\t\treduction=reduction,\n\t\t\tfrom_logits=True\n\t\t)\n\n\t\tself.fill_value = 1\n\n\tdef call(\n\t\t\tself,\n\t\t\tlabels: IntTensor,\n\t\t\tembeddings: FloatTensor\n\t) -> Any:\n\t\t\"\"\"\n\t\tArgs:\n\t\t\tlabels: ground truth labels with shape (batch_size)\n\t\t\tembeddings: embeddings with shape (batch_size, embedding_size)\n\t\t\"\"\"\n\n\t\tcls_score = self.fully_connected_layer(embeddings)\n\t\tlog_probs = tf.nn.log_softmax(logits=cls_score, axis=1)\n\n\t\t_, labels_idx = tf.unique(labels)\n\t\tdepth = log_probs.shape[-1]\n\t\tbinary_labels = tf.one_hot(\n\t\t\tindices=labels_idx,\n\t\t\tdepth=depth,\n\t\t\ton_value=self.fill_value,\n\t\t\tdtype=tf.float32\n\t\t)\n\n\t\tloss = self.cross_entropy(\n\t\t\ty_true=binary_labels,\n\t\t\ty_pred=tf.cast(log_probs, dtype=tf.float32)\n\t\t)\n\n\t\treturn loss\n","repo_name":"RevisorTeam/evolly","sub_path":"examples/tf_examples/image_retrieval/losses/xent_label_smooth.py","file_name":"xent_label_smooth.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"32214395378","text":"import sys\nimport re\nfrom collections import namedtuple, defaultdict\n\nClaim = namedtuple('Claim', 'id left top width height')\n\n\ndef claim_points(claim):\n for y in range(0, claim.height):\n for x in range(0, claim.width):\n yield x, y\n\n\ndef step1(claims):\n d = defaultdict(lambda: defaultdict(int))\n r = 0\n for claim in claims:\n for x, y in claim_points(claim):\n c = d[claim.top + y][claim.left + x]\n d[claim.top + y][claim.left + x] += 1\n if c == 1:\n r += 1\n return r\n\n\ndef step2(claims):\n d = defaultdict(lambda: defaultdict(set))\n for claim in claims:\n for x, y in claim_points(claim):\n d[claim.top + y][claim.left + x] |= {claim.id}\n for claim in claims:\n poss = True\n for x, y in claim_points(claim):\n if d[claim.top + y][claim.left + x] != {claim.id}:\n poss = False\n break\n if poss:\n return claim.id\n raise ValueError('No solution found')\n\n\ndef read_claims():\n claims = []\n for line in sys.stdin:\n claim_id, left, right, width, height = re.match(\n r'#(\\d+) @ (\\d+),(\\d+): (\\d+)x(\\d+)',\n line\n ).groups()\n claims += [Claim(int(claim_id), int(left),\n int(right), int(width), int(height))]\n return claims\n\n\nclaims = read_claims()\nprint(step1(claims))\nprint(step2(claims))\n\n","repo_name":"plilja/adventofcode","sub_path":"2018/day03/day03.py","file_name":"day03.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"3854677694","text":"#!/usr/bin/env python3\n# smartbchd-monitor.py\n#\n# An exporter for Prometheus and SmartBCH.\n#\n\nimport json\nimport logging\nimport time\nimport os\nimport signal\nimport sys\nimport socket\n\nfrom datetime import datetime\nfrom functools import lru_cache\nfrom typing import Any\nfrom typing import Dict\nfrom typing import List\nfrom typing import Union\nfrom wsgiref.simple_server import make_server\n\nimport riprova\n\nfrom bitcoin.rpc import JSONRPCError, InWarmupError, Proxy\nfrom prometheus_client import make_wsgi_app, Gauge, Counter\n\n\nlogger = logging.getLogger(\"smartbch-exporter\")\n\n\n# Create Prometheus metrics to track smartbchd stats.\nSMARTBCH_BLOCK = Gauge(\"smartbch_block\", \"Block Height\")\nSMARTBCH_BLOCK_TRANSACTIONS = Gauge(\"smartbch_block_transactions\", \"Transaction in block\")\nSMARTBCH_BLOCK_VALUES = Gauge(\"smartbch_block_value\", \"Total BCH in block\")\nSMARTBCH_BLOCK_GAS_USED = Gauge(\"smartbch_block_gas_used\", \"Gas used in block\")\nSMARTBCH_BLOCK_GAS_LIMIT = Gauge(\"smartbch_block_gas_limit\", \"Gas limit in block\")\nSMARTBCH_BLOCK_NONCE = Gauge(\"smartbch_block_nonce\", \"Block nonce\")\nSMARTBCH_BLOCK_DIFFICULTY = Gauge(\"smartbch_block_difficulty\", \"Block difficulty\")\nSMARTBCH_BLOCK_UNCLES = Gauge(\"smartbch_block_uncles\", \"Block uncles\")\nSMARTBCH_BLOCK_SIZE_BYTES = Gauge(\"smartbch_block_size_bytes\", \"Block size in bytes\")\nSMARTBCH_BLOCK_TIMESTAMP = Gauge(\"smartbch_block_timestamp\", \"Block timestamp\")\n\n\nSMARTBCH_BLOCK_CONTRACTS_CREATED = Gauge(\"smartbch_block_contracts_created\", \"Contracts created in block\")\nSMARTBCH_BLOCK_CONTRACT_ACTIONS = Gauge(\"smartbch_block_contract_actions\", \"Contract actions in block\")\nSMARTBCH_BLOCK_TOKEN_TRANSFERS = Gauge(\"smartbch_block_token_transfers\", \"Token transfers in block\")\nSMARTBCH_BLOCK_BCH_TRANSFERS = Gauge(\"smartbch_block_bch_transfers\", \"BCH transfers in block\")\nSMARTBCH_BLOCK_LOCKED_BCH = Gauge(\"smartbch_block_locked_bch\", \"Locked BCH in block\")\n\nSMARTBCH_GAS_PRICE = Gauge(\"smartbch_gas_price\", \"Gas price\")\nSMARTBCH_PROTOCOL_VERSION = Gauge(\"smartbch_protocol_version\", \"Protocol version\")\nSMARTBCH_CHAIN_ID = Gauge(\"smartbch_chain_id\", \"Chain id\")\n\nSMARTBCH_TOTAL_CONTRACTS_CREATED = Gauge(\"smartbch_total_contracts_created\", \"Contracts created in total\")\nSMARTBCH_TOTAL_CONTRACT_ACTIONS = Gauge(\"smartbch_total_contract_actions\", \"Contract actions in total\")\nSMARTBCH_TOTAL_TOKEN_TRANSFERS = Gauge(\"smartbch_total_token_transfers\", \"Token transfers in total\")\nSMARTBCH_TOTAL_BCH_TRANSFERS = Gauge(\"smartbch_total_bch_transfers\", \"BCH transfers in total\")\nSMARTBCH_TOTAL_LOCKED_BCH = Gauge(\"smartbch_total_locked_bch\", \"Locked BCH in total\")\nSMARTBCH_TOTAL_BLACKHOLE_BCH = Gauge(\"smartbch_total_blackhole_bch\", \"BCH Fees Burnt in total\")\n\n\nEXPORTER_ERRORS = Counter(\n \"smartbch_exporter_errors\", \"Number of errors encountered by the exporter\", labelnames=[\"type\"]\n)\nPROCESS_TIME = Counter(\n \"smartbch_exporter_process_time\", \"Time spent processing metrics from bitcoin node\"\n)\n\nSATS_PER_COIN = 1e8\nWEI_PER_COIN = SATS_PER_COIN * 1e10\n\nSMARTBCH_RPC_SCHEME = os.environ.get(\"SMARTBCH_RPC_SCHEME\", \"http\")\nSMARTBCH_RPC_HOST = os.environ.get(\"SMARTBCH_RPC_HOST\", \"localhost\")\nSMARTBCH_RPC_PORT = os.environ.get(\"SMARTBCH_RPC_PORT\", \"8332\")\nSMARTBCH_CONF_PATH = os.environ.get(\"SMARTBCH_CONF_PATH\")\nMETRICS_ADDR = os.environ.get(\"METRICS_ADDR\", \"\") # empty = any address\nMETRICS_PORT = int(os.environ.get(\"METRICS_PORT\", \"9332\"))\nRETRIES = int(os.environ.get(\"RETRIES\", 5))\nTIMEOUT = int(os.environ.get(\"TIMEOUT\", 30))\nRATE_LIMIT_SECONDS = int(os.environ.get(\"RATE_LIMIT\", 5))\nLOG_LEVEL = os.environ.get(\"LOG_LEVEL\", \"INFO\")\n\n\nRETRY_EXCEPTIONS = (InWarmupError, ConnectionError, socket.timeout)\n\nRpcResult = Union[Dict[str, Any], List[Any], str, int, float, bool, None]\n\n\ndef on_retry(err: Exception, next_try: float) -> None:\n err_type = type(err)\n exception_name = err_type.__module__ + \".\" + err_type.__name__\n EXPORTER_ERRORS.labels(**{\"type\": exception_name}).inc()\n logger.error(\"Retry after exception %s: %s\", exception_name, err)\n\n\ndef error_evaluator(e: Exception) -> bool:\n return isinstance(e, RETRY_EXCEPTIONS)\n\n\n@lru_cache(maxsize=1)\ndef rpc_client_factory():\n host = SMARTBCH_RPC_HOST\n if SMARTBCH_RPC_PORT:\n host = \"{}:{}\".format(host, SMARTBCH_RPC_PORT)\n service_url = \"{}://{}\".format(SMARTBCH_RPC_SCHEME, host)\n logger.info(\"Using environment configuration\")\n return lambda: Proxy(service_url=service_url, timeout=TIMEOUT)\n\n\ndef rpc_client():\n return rpc_client_factory()()\n\n\n@riprova.retry(\n timeout=TIMEOUT,\n backoff=riprova.ExponentialBackOff(),\n on_retry=on_retry,\n error_evaluator=error_evaluator,\n)\ndef smartbchrpc(*args) -> RpcResult:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(\"RPC call: \" + \" \".join(str(a) for a in args))\n\n result = rpc_client().call(*args)\n\n logger.debug(\"Result: %s\", result)\n return result\n\nBLACKHOLE_CONTRACT_ADDRESS=\"0x0000000000000000000000626c61636b686f6c65\"\nBRIDGE_CONTRACT_ADDRESS=\"0xc172f00ac38c8b2004793f94b33483aa704045bb\"\nBRIDGE_START_BLOCK = 238790 # first block with real txs seeding with bch\n\nlastBlockStatsRead = BRIDGE_START_BLOCK-1\n\ntotalContractsCreated = 0\ntotalTokenTransfers = 0\ntotalContractActions = 0\ntotalBchTransfers = 0\ntotalBchLocked = 0\ndef refresh_metrics() -> None:\n global lastBlockStatsRead, totalContractsCreated, totalTokenTransfers, totalContractActions, totalBchTransfers, totalBchLocked\n syncing = smartbchrpc(\"eth_syncing\")\n if syncing == False:\n blockHeight = int(smartbchrpc(\"eth_blockNumber\"), base=16)\n else:\n blockHeight = int(smartbchrpc(\"eth_syncing\")['currentBlock'], base=16)\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(blockHeight)\n\n # used for initial boot to catch up on stats\n lastBlock = None\n while lastBlockStatsRead < blockHeight:\n block = smartbchrpc(\"eth_getBlockByNumber\", hex(lastBlockStatsRead), True)\n for tx in block['transactions']:\n if tx['blockNumber'] != hex(lastBlockStatsRead):\n continue\n if tx['from'] == BRIDGE_CONTRACT_ADDRESS:\n totalBchLocked += int(tx['value'], base=16)\n if tx['to'] == BRIDGE_CONTRACT_ADDRESS:\n totalBchLocked -= int(tx['value'], base=16)\n if tx['to'] == '0x0000000000000000000000000000000000000000':\n totalContractsCreated += 1\n if len(tx['input']) >= 10 and tx['input'][0:10] == '0xa9059cbb':\n totalTokenTransfers += 1\n if int(tx['value'], base=16) > 0:\n totalBchTransfers += 1\n else:\n totalContractActions += 1\n lastBlockStatsRead += 1\n lastBlock = block\n\n if lastBlock is None:\n lastBlock = smartbchrpc(\"eth_getBlockByNumber\", hex(blockHeight - 1), True)\n logger.debug(lastBlock)\n\n blackholeBchFees = int(smartbchrpc(\"eth_getBalance\", BLACKHOLE_CONTRACT_ADDRESS, hex(blockHeight - 1)), base=16)\n\n\n blockContractsCreated = 0\n blockTokenTransfers = 0\n blockContractActions = 0\n blockBchTransfers = 0\n blockBchLocked = 0\n for tx in lastBlock['transactions']:\n if tx['blockNumber'] != hex(blockHeight - 1):\n continue\n if tx['from'] == BRIDGE_CONTRACT_ADDRESS:\n blockBchLocked += int(tx['value'], base=16)\n if tx['to'] == BRIDGE_CONTRACT_ADDRESS:\n blockBchLocked -= int(tx['value'], base=16)\n if tx['to'] == '0x0000000000000000000000000000000000000000':\n blockContractsCreated += 1\n if len(tx['input']) >= 10 and tx['input'][0:10] == '0xa9059cbb':\n blockTokenTransfers += 1\n if int(tx['value'], base=16) > 0:\n blockBchTransfers += 1\n else:\n blockContractActions += 1\n\n\n SMARTBCH_BLOCK.set(blockHeight)\n SMARTBCH_BLOCK_TRANSACTIONS.set(len(lastBlock['transactions']))\n # SMARTBCH_BLOCK_VALUES = Gauge(\"smartbch_block_value\", \"Total BCH in block\")\n SMARTBCH_BLOCK_GAS_USED.set(int(lastBlock['gasUsed'], base=16))\n SMARTBCH_BLOCK_GAS_LIMIT.set(int(lastBlock['gasLimit'], base=16))\n SMARTBCH_BLOCK_NONCE.set(int(lastBlock['nonce'], base=16))\n SMARTBCH_BLOCK_DIFFICULTY.set(int(lastBlock['difficulty'], base=16))\n SMARTBCH_BLOCK_UNCLES.set(len(lastBlock['uncles']))\n SMARTBCH_BLOCK_SIZE_BYTES.set(int(lastBlock['size'], base=16))\n SMARTBCH_BLOCK_TIMESTAMP.set(int(lastBlock['timestamp'], base=16))\n\n\n SMARTBCH_GAS_PRICE.set(int(smartbchrpc(\"eth_gasPrice\"), base=16))\n SMARTBCH_PROTOCOL_VERSION.set(int(smartbchrpc(\"eth_protocolVersion\"), base=16))\n SMARTBCH_CHAIN_ID.set(int(smartbchrpc(\"eth_chainId\"), base=16))\n\n SMARTBCH_TOTAL_LOCKED_BCH.set(totalBchLocked / WEI_PER_COIN)\n SMARTBCH_TOTAL_CONTRACTS_CREATED.set(totalContractsCreated)\n SMARTBCH_TOTAL_CONTRACT_ACTIONS.set(totalContractActions)\n SMARTBCH_TOTAL_TOKEN_TRANSFERS.set(totalTokenTransfers)\n SMARTBCH_TOTAL_BCH_TRANSFERS.set(totalBchTransfers)\n SMARTBCH_TOTAL_BLACKHOLE_BCH.set(blackholeBchFees / WEI_PER_COIN)\n\n SMARTBCH_BLOCK_LOCKED_BCH.set(blockBchLocked / WEI_PER_COIN)\n SMARTBCH_BLOCK_CONTRACTS_CREATED.set(blockContractsCreated)\n SMARTBCH_BLOCK_CONTRACT_ACTIONS.set(blockContractActions)\n SMARTBCH_BLOCK_TOKEN_TRANSFERS.set(blockTokenTransfers)\n SMARTBCH_BLOCK_BCH_TRANSFERS.set(blockBchTransfers)\n\ndef sigterm_handler(signal, frame) -> None:\n logger.critical(\"Received SIGTERM. Exiting.\")\n sys.exit(0)\n\n\ndef exception_count(e: Exception) -> None:\n err_type = type(e)\n exception_name = err_type.__module__ + \".\" + err_type.__name__\n EXPORTER_ERRORS.labels(**{\"type\": exception_name}).inc()\n\n\ndef main():\n # Set up logging to look similar to bitcoin logs (UTC).\n logging.basicConfig(\n format=\"%(asctime)s %(levelname)s %(message)s\", datefmt=\"%Y-%m-%dT%H:%M:%SZ\"\n )\n logging.Formatter.converter = time.gmtime\n logger.setLevel(LOG_LEVEL)\n\n # Handle SIGTERM gracefully.\n signal.signal(signal.SIGTERM, sigterm_handler)\n\n app = make_wsgi_app()\n\n last_refresh = datetime.fromtimestamp(0)\n\n def refresh_app(*args, **kwargs):\n nonlocal last_refresh\n process_start = datetime.now()\n\n # Only refresh every RATE_LIMIT_SECONDS seconds.\n if (process_start - last_refresh).total_seconds() < RATE_LIMIT_SECONDS:\n return app(*args, **kwargs)\n\n # Allow riprova.MaxRetriesExceeded and unknown exceptions to crash the process.\n try:\n refresh_metrics()\n except riprova.exceptions.RetryError as e:\n logger.error(\"Refresh failed during retry. Cause: \" + str(e))\n exception_count(e)\n except JSONRPCError as e:\n logger.debug(\"SmartBCH RPC error refresh\", exc_info=True)\n exception_count(e)\n except json.decoder.JSONDecodeError as e:\n logger.error(\"RPC call did not return JSON. Bad credentials? \" + str(e))\n sys.exit(1)\n\n duration = datetime.now() - process_start\n PROCESS_TIME.inc(duration.total_seconds())\n logger.info(\"Refresh took %s seconds\", duration)\n last_refresh = process_start\n\n return app(*args, **kwargs)\n\n httpd = make_server(METRICS_ADDR, METRICS_PORT, refresh_app)\n httpd.serve_forever()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"blockparty-sh/smartbchd-prometheus-exporter","sub_path":"smartbchd-monitor.py","file_name":"smartbchd-monitor.py","file_ext":"py","file_size_in_byte":11303,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"13883933689","text":"import tensorflow as tf\r\n\r\nclass Discriminator(tf.keras.Model):\r\n\tdef __init__(self, args, name = 'discriminator'):\r\n\t\tsuper(Discriminator, self).__init__(name = name)\r\n\t\tself.conv1 = tf.keras.layers.Conv2D(64, 7, 2, activation = tf.nn.leaky_relu)\r\n\t\tself.conv2 = tf.keras.layers.Conv2D(128, 7, 2, activation = tf.nn.leaky_relu)\r\n\t\tself.flatten = tf.keras.layers.Flatten()\r\n\t\tself.fc1 = tf.keras.layers.Dense(1)\r\n\t\tself.dis_layers = [self.conv1, self.conv2, self.flatten, self.fc1]\r\n\tdef call(self, x):\r\n\t\tfor layer in self.dis_layers:\r\n\t\t\tx = layer(x)\r\n\t\treturn x\r\n\r\nclass Generator(tf.keras.Model):\r\n\tdef __init__(self, args, name = 'generator'):\r\n\t\tsuper(Generator, self).__init__(name = name)\r\n\t\tself.fc1 = tf.keras.layers.Dense(7 * 7 * 128)\r\n\t\tself.reshape = tf.keras.layers.Reshape([7, 7, 128])\r\n\t\tself.deconv1 = tf.keras.layers.Conv2DTranspose(128, 7, 2, activation = tf.nn.leaky_relu, padding = 'same')\r\n\t\tself.deconv2 = tf.keras.layers.Conv2DTranspose(128, 7, 2, activation = tf.nn.leaky_relu, padding = 'same')\r\n\t\tself.deconv3 = tf.keras.layers.Conv2DTranspose(64, 7, 1, activation = tf.nn.leaky_relu, padding = 'same')\r\n\t\tself.deconv4 = tf.keras.layers.Conv2D(1, 3, 1, activation = tf.nn.tanh, padding = 'same')\r\n\t\t\r\n\t\tself.gen_layers = [self.fc1, self.reshape, self.deconv1, self.deconv2, self.deconv3, self.deconv4]\r\n\tdef call(self, x):\r\n\t\tfor layer in self.gen_layers:\r\n\t\t\tx = layer(x)\r\n\t\treturn x\r\n","repo_name":"WangZesen/GAN-Hinge-Loss","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"69"} +{"seq_id":"23477095146","text":"\"\"\"\"Modifique o programa para trabalhar com duas filas.\nP/ facilitar, considere atendimento: A = fila 1. B = fila 2...etc.\"\"\"\n\nultimo = 10\nultimo_dois = 5\nfila_um = list(range(1, ultimo + 1))\nfila_dois = list(range(1, ultimo_dois + 1))\nwhile True:\n fila = (input(\"Selecione qual fila irá trabalhar 1 ou 2: \"))\n if fila == '1' or fila == '2':\n if fila == '1':\n print(f\"\\nExistem {len(fila_um)} clientes na fila\")\n print(f\"Fila atual: {fila_um}\")\n elif fila == '2':\n print(F\"\\nExistem {len(fila_dois)} clientes na fila\")\n print(f\"\\nFila atual: {fila_dois}\")\n print(\"Digite:\"\n \"\\nA ou B - Realizar o atendimento\"\n \"\\nF ou G- Fim da fila\"\n \"\\nS para Sair\")\n operacao = input(\"Digite Fila 1: A, F ou S \"\n \"\\nFila 2: B, G ou S: \")\n if operacao == 'A':\n if len(fila_um) > 0:\n atendido = fila_um.pop(0)\n print(f'Cliente {atendido} atendido!')\n elif operacao == \"B\":\n if len(fila_dois) > 0:\n atendido = fila_dois.pop(0)\n print(f'Cliente {atendido} atendido!')\n else:\n print('Fila vazia!')\n elif operacao == 'F':\n ultimo += 1\n fila_um.append(ultimo)\n elif operacao == \"G\":\n ultimo_dois += 1\n fila_dois.append(ultimo_dois)\n elif operacao == 'S':\n break\n else:\n print('\\nOperação inválida ! Digte uma das opções válidas !')\n","repo_name":"ninaai517/Python_logic","sub_path":"Cap.06/Listas/Exercícios/6.6.py","file_name":"6.6.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"43162310703","text":"#!/usr/bin/env python3\n#filename: LongestSubstring_6.py\n\n\"\"\"\nThis program is a solution to a leetcode.com programming problem.\n\nLeetCode problem title: 3. Longest Substring Without Repeating Characters\n\n~~~~~~~~~~~~~~~~~~~\nProblem Description\n~~~~~~~~~~~~~~~~~~~\n\nGiven a string, find the length of the longest substring without repeating characters.\n\nExample 1:\n\nInput: \"abcabcbb\"\nOutput: 3\nExplanation: The answer is \"abc\", with the length of 3.\nExample 2:\n\nInput: \"bbbbb\"\nOutput: 1\nExplanation: The answer is \"b\", with the length of 1.\nExample 3:\n\nInput: \"pwwkew\"\nOutput: 3\nExplanation: The answer is \"wke\", with the length of 3.\n Note that the answer must be a substring, \"pwke\" is a subsequence and not a substring.\n\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nProvided Beginning of Solution\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~\nPseudo Code for a Solution\n~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n\n~~~~~~~~~~~~~~~~~~~\nUsing This Solution\n~~~~~~~~~~~~~~~~~~~\n\nEnter this in the shell:\npython LongestSubstring_6.py\n\n\n~~~~~~~~~~~~~~~~~~\n Notes\n~~~~~~~~~~~~~~~~~~\n\n\"\"\"\n\nclass Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n a = \"\" # substring attempt 1\n b = \"\" # substring attempt 2\n print(f's = {s}')\n for i in range(len(s)):\n if s[i] not in a:\n a += s[i]\n print(f'a = {a.ljust(10)} b = {b}')\n else:\n print(f'a = {a.ljust(10)} b = {b}')\n if len(b) < len(a):\n b = a\n print(f'a = {a.ljust(10)} b = {b}')\n print(f'a.index(s[i])+1 = {a.index(s[i])+1}')\n print(f'a[a.index(s[i])+1::] = {a[a.index(s[i])+1::]}')\n print(f'a[a.index(s[i])+1::] + s[i] = {a[a.index(s[i])+1::] + s[i]}')\n a = a[a.index(s[i])+1::] + s[i]\n\n print(f'a = {a.ljust(10)} b = {b}')\n print('')\n return max(len(b), len(a))\n\n\n\nif __name__ == '__main__':\n soln = Solution()\n soln.lengthOfLongestSubstring(\"\")\n soln.lengthOfLongestSubstring(\"a\")\n soln.lengthOfLongestSubstring(\"abcdabracadabra\")\n soln.lengthOfLongestSubstring(\"abcabcbb\")\n soln.lengthOfLongestSubstring(\"bbbbb\")\n soln.lengthOfLongestSubstring(\"pwwkew\")\n soln.lengthOfLongestSubstring(\"abcdamn\")\n soln.lengthOfLongestSubstring(\"abcdambq\")\n soln.lengthOfLongestSubstring(\"abcbamboy\")\n","repo_name":"oneforawe/code-practice","sub_path":"Python/LeetCode/003_LongestSubstring/dev/LongestSubstring_6.py","file_name":"LongestSubstring_6.py","file_ext":"py","file_size_in_byte":2491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"329314336","text":"\n\nimport click\n\nfrom pathlib import Path\nfrom operator import add\n\n@click.command('67')\ndef problem_067():\n \"\"\"Maximum path sum II\n\n By starting at the top of the triangle below and moving to adjacent\n numbers on the row below, the maximum total from top to bottom is 23.\n \n **3** \n **7** 4 \n 2 **4** 6 \n 8 5 **9** 3\n \n That is, 3 + 7 + 4 + 9 = 23.\n \n Find the maximum total from top to bottom in\n [triangle.txt](project/resources/p067_triangle.txt) (right click and 'Save\n Link/Target As...'), a 15K text file containing a triangle with one-\n hundred rows.\n \n **NOTE:** This is a much more difficult version of [Problem 18](problem=18). It is not possible to try every route to solve this problem, as there are 299 altogether! If you could check one trillion (1012) routes every second it would take over twenty billion years to check them all. There is an efficient algorithm to solve it. ;o)\n \n \"\"\"\n\n triangle = load_triangle()\n while len(triangle) > 1:\n triangle = reduce_triangle(triangle, max, add)\n click.echo(triangle[0][0])\n\n\ndef reduce_triangle(triangle, pick, combine):\n ultimate = triangle[-1]\n penultimate = triangle[-2]\n new_row = tuple(\n combine(value, pick(ultimate[i], ultimate[i + 1]))\n for i, value in enumerate(penultimate))\n return triangle[:-2] + (new_row, )\n\n\ndef load_triangle():\n with Path('.', 'files', 'triangle.txt').open('r') as f:\n return tuple(\n tuple(int(i) for i in line.split(' '))\n for line in f.readlines()\n )","repo_name":"adharris/euler","sub_path":"problems/problems_000_099/problems_060_069/problem_067.py","file_name":"problem_067.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"31783360541","text":"import pandas as pd\nimport requests\nfrom lxml import etree\n\n_old_uri = 'https://www.volby.cz/pls/kv2002/kv12?xjazyk=CZ&xid=0'\n_old_candidates = 'https://www.volby.cz/pls/kv2002/kv22?xjazyk=CZ&xid=0&xv=11'\n_old_base = 'https://www.volby.cz/pls/kv2002/'\n_ns = {\"re\": \"http://exslt.org/regular-expressions\"}\n_parser = etree.HTMLParser()\n\n\ndef format_candidates(df):\n df.columns = df.columns.droplevel(0)\n if 'Mandát' in df.columns:\n df['mandate'] = [True if x == '*' else False for x in df['Mandát']]\n df = df.drop(columns='Mandát')\n else:\n df = df.insert(loc=len(df.columns) - 1, column='mandate', value=0)\n return df\n\n\ndef download_city(city: str):\n html = requests.get(_old_uri).text\n href = etree.fromstring(html, parser=_parser).xpath('.//td[re:match(., \"^{0}\")]'.format(city), namespaces=_ns)[0] \\\n .getparent().getchildren()[0].find('a').attrib['href']\n html = requests.get(_old_base + href).text\n href = etree.fromstring(html, parser=_parser).xpath('.//a[re:match(., \"^3$\")]', namespaces=_ns)[0].attrib['href']\n dfs = pd.read_html(_old_base + href, flavor='html5lib')\n\n for x in [5, 6, 8, 9]:\n dfs[0][dfs[0].columns[x]] = dfs[0][dfs[0].columns[x]].str.replace(\"\\s\", '')\n\n dfs[0].insert(0, 'year', 2002)\n dfs[0].to_csv('data/summary.csv', index=False, header=False, sep=';', mode='a')\n\n dfs[1][dfs[1].columns[1]] = dfs[1][dfs[1].columns[1]].str.replace(\"\\s\", '')\n dfs[1].insert(0, 'id', range(1, len(dfs[1]) + 1))\n dfs[1].insert(0, 'year', 2002)\n dfs[1].to_csv('data/party_votes.csv', index=False, header=False, sep=';', mode='a')\n\n\ndef download_city_candidates(city: str):\n html = requests.get(_old_candidates).text\n href = etree.fromstring(html, parser=_parser) \\\n .xpath('.//td[re:match(., \"^{0}\") and not (@colspan)]'.format(city), namespaces=_ns)[0] \\\n .getnext().find('a').attrib['href']\n\n html = requests.get(_old_base + href).text\n href = etree.fromstring(html, parser=_parser) \\\n .xpath('.//td[re:match(., \"^3$\")]', namespaces=_ns)[0].getparent().getchildren()[0].find('a').attrib['href']\n\n html = requests.get(_old_base + href).text\n href = etree.fromstring(html, parser=_parser).xpath('.//td/a')[0].attrib['href']\n df = pd.read_html(_old_base + href, flavor='html5lib')[0]\n df = format_candidates(df)\n df['abs.'] = df['abs.'].str.replace(\"\\s\", '')\n df.insert(0, 'year', 2002)\n df.to_csv('data/candidates.csv', index=False, header=False, sep=';', mode='a')\n\n\ndef scrape(city: str):\n print('Scraping 2002', end=' ')\n download_city(city)\n download_city_candidates(city)\n print('✓')\n\n\nif __name__ == \"__main__\":\n city = 'Plzeň'\n download_city(city)\n download_city_candidates(city)\n","repo_name":"Eldeeqq/BI-VZD","sub_path":"01/scrapers/old.py","file_name":"old.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"39638935261","text":"import os\n\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\n\nfrom root import ROOT_DIR\nfrom weighted_mean_prediction.data_setup import get_encoded_split_data\nfrom weighted_mean_prediction.linear_model.shared import normalise_data, fit_model\nfrom weighted_mean_prediction.model_storage import load_model\nfrom weighted_mean_prediction.regression_performance import evaluate_model, get_all_metrics, plot_residuals_histogram, \\\n plot_QQ, plot_fitted, plot_fancy_fitted\n\n\ndef reshape_data(*X):\n return [x.values.reshape(-1, 1) for x in X]\n\n\nif __name__ == \"__main__\":\n reg = LinearRegression()\n\n model_dir = f\"{ROOT_DIR}/weighted_mean_prediction/linear_model/models\"\n model_name = \"pairing_lr.joblib\"\n model_path = os.path.join(model_dir, model_name)\n\n X_train, X_val, X_test, y_train, y_val, y_test = get_encoded_split_data()\n # X_train, X_val, X_test = get_dG_data(X_train, X_val, X_test)\n X_train, X_val, X_test = normalise_data(X_train, X_val, X_test)\n X_train, X_val, X_test = reshape_data(X_train[\"dG_pairing\"], X_val[\"dG_pairing\"], X_test[\"dG_pairing\"])\n\n lm = load_model(model_path)\n lm = lm if lm is not None else fit_model(X_train, y_train[\"weighted_mean\"], model_path)\n print(lm.coef_)\n predictions, errors = evaluate_model(lm, X_test, y_test[\"weighted_mean\"])\n print(get_all_metrics(y_test[\"weighted_mean\"], predictions))\n plt.scatter(X_test, predictions)\n plt.scatter(X_test, y_test)\n plt.show()\n\n plot_residuals_histogram(predictions, y_test[\"weighted_mean\"])\n plot_QQ(predictions, y_test[\"weighted_mean\"])\n plot_fancy_fitted(predictions, y_test[\"weighted_mean\"])\n\n print(f\"y = {lm.intercept_} + {lm.coef_[0]}x\")\n","repo_name":"fegb-dataset22/dataset22","sub_path":"weighted_mean_prediction/linear_model/pairing_lr.py","file_name":"pairing_lr.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"27330345356","text":"import datetime\nimport json\nimport re\nfrom pathlib import Path\n\nimport pandas as pd\nimport requests\nimport tabulate\nfrom bs4 import BeautifulSoup\n\nout = Path('parsed_xml2_downloads')\n\nif __name__ == '__main__':\n directory = Path('hdb_downloads')\n for path in directory.glob('2023-05_BTO_*.xml'):\n soup = BeautifulSoup(path.read_bytes(), 'lxml-xml')\n town = soup.find('town').text.replace('/', '+')\n project_name = soup.find('project-name').text\n print(f'{town} ({project_name})')\n\n stack = [soup]\n while stack:\n elem = stack.pop(-1)\n children = elem.findChildren()\n if children:\n stack.extend(children)\n continue\n\n if re.fullmatch(r'[0-9]{4}-[0-9]{2}/.+\\.[a-z]{2,5}', elem.text):\n r = requests.get(f'https://resources.homes.hdb.gov.sg/nf/{elem.text}', verify=False)\n if r.status_code == 200:\n out_path = out / elem.text\n out_path.parent.mkdir(parents=True, exist_ok=True)\n out_path.write_bytes(r.content)\n","repo_name":"averykhoo/macpherson-bto","sub_path":"hdb-api-samples/parse-xml2.py","file_name":"parse-xml2.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"25646633950","text":"encontrado = 0\r\nnumeros = []\r\nfor c in range(3):\r\n numeros.append(int(input(f'Digite o número {c+1}: ')))\r\nnum = int(input('Número para verificar: '))\r\nfor c in range(len(numeros)):\r\n if num == numeros[c]:\r\n print(f'Encontrado na posição {c}')\r\n else:\r\n print('Não encontrado')\r\n","repo_name":"alexalvferr/fabrica","sub_path":"Exercício 84.py","file_name":"Exercício 84.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"74962586779","text":"import math\nimport random\nimport arcade\n\nimport wyggles.app\nfrom wyggles.mathutils import *\nfrom wyggles.engine import *\nimport wyggles.app\nfrom wyggles.brain import Brain\nfrom wyggles.fruit import Fruit\n\nclass WyggleBrain(Brain):\n def __init__(self, sprite):\n super().__init__(sprite)\n self.heading = random.randint(0, 359)\n self.wheel = 0\n self.focus = None\n self.state = \"wanderer\"\n self.consider_max = 10\n self.consider_timer = self.consider_max\n #\n self.munch_timer = 10\n\n def reset(self):\n self.state = ''\n self.focus = None\n\n def update(self, delta_time: float = 1 / 60):\n super().update(delta_time)\n\n def move(self):\n x, y = self.position\n to_x, to_y = self.end_pos\n\n pd = random.randint(0, 3)\n if pd == 0:\n self.micro_left()\n elif pd == 2:\n self.micro_right()\n\n steering_ndx = int(math.pi + (math.atan2(y - to_y, x - to_x)))\n delta = steering[steering_ndx][self.wheel]\n\n self.try_move(delta)\n\n \n def try_move(self, delta):\n delta_x, delta_y = delta\n next_x, next_y = 0, 0\n need_turn = False\n\n sprite = self.sprite\n pos = sprite.position\n left, bottom, right, top = sprite.left, sprite.bottom, sprite.right, sprite.top\n w_left, w_bottom, w_right, w_top = world_left, world_bottom, world_right, world_top\n\n if(left < w_left):\n delta_x = w_left - left\n need_turn = True\n elif(right > w_right):\n delta_x = w_right - right\n need_turn = True\n\n if(bottom < w_bottom):\n delta_y = w_bottom - bottom\n need_turn = True\n elif(top > w_top):\n delta_y = w_top - top\n need_turn = True\n\n #TODO:use pymunk\n '''\n if not need_turn:\n landscape_layer = wyggles.app.landscape_layer\n if landscape_layer:\n need_turn = len(arcade.check_for_collision_with_list(self.sprite, landscape_layer)) != 0\n '''\n if(need_turn):\n self.right(45)\n #self.randforward()\n self.project(self.sensor_range)\n\n nextX = self.x + delta_x\n nextY = self.y + delta_y\n self.sprite.move_to((nextX, nextY))\n\n def left(self, angle):\n heading = self.heading - angle\n self.heading = heading if heading > 0 else 360 + heading\n\n def right(self, angle):\n heading = self.heading + angle\n self.heading = heading if heading < 359 else heading - 360\n\n def micro_left(self):\n ph = self.wheel - 1\n if ph < 0:\n ph = 0\n self.wheel = ph\n\n def micro_right(self):\n ph = self.wheel + 1\n if ph > 2:\n ph = 2\n self.wheel = ph\n\n def forward(self, distance):\n x, y = self.position\n px = x + (distance * (math.cos(self.heading * degRads)))\n py = y + (distance * (math.sin(self.heading * degRads)))\n self.move_to((px, py))\n\n def randforward(self):\n self.forward(random.randint(0, self.sensor_range))\n\nsteering = [\n [(1, -1), (1, 0), (1, 1)],\n [(1, 0), (1, 1), (0, 1)],\n [(1, 1), (0, 1), (-1, 1)],\n [(0, 1), (-1, 1), (-1, 0)],\n [(-1, 1), (-1, 0), (-1, -1)],\n [(-1, 0), (-1, -1), (0, -1)],\n [(-1, -1), (0, -1), (1, -1)],\n [(0, -1), (1, -1), (1, 0)],\n]\n","repo_name":"kfields/botsley","sub_path":"examples/wyggles/wyggles/wyggle/brain.py","file_name":"brain.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"72263256541","text":"# Given an array of integers nums which is sorted in ascending order, and an integer target, \n# write a function to search target in nums. If target exists, then return its index. \n# Otherwise, return -1.\n\n# Example 1:\n\n# Input: nums = [-1,0,3,5,9,12], target = 9\n# Output: 4\n# Explanation: 9 exists in nums and its index is 4\n\nfrom cmath import pi\nimport re\n\n\nclass Solution():\n def search(self, nums, target):\n left, right = 0 , len(nums) - 1\n while left <= right:\n pivot = left + (right - left) / 2 \n if nums[pivot] == target:\n return pivot\n if target < nums[pivot]:\n right = pivot - 1 \n else:\n left = pivot + 1\n return -1\n\n\ns = Solution()\n\nprint(s.search([-1,0,3,5,9,12],9))","repo_name":"AlfredDev/LeetCode75-Py","sub_path":"704-Binary Search.py","file_name":"704-Binary Search.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"73053481500","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom soma.spm.custom_decorator_pattern import checkIfArgumentTypeIsAllowed\nimport numbers\n\nclass OptimisationSettings(object):\n \"\"\"\n Settings for the optimisation. If you are unsure about them, then leave them at the default values. Optimisation is by repeating a\n number of Levenberg-Marquardt iterations, in which the equations are solved using a full multi-grid (FMG) scheme. FMG and\n Levenberg-Marquardt are both described in Numerical Recipes (2nd edition).\n \"\"\"\n @checkIfArgumentTypeIsAllowed(numbers.Real, 1)\n def setLMRegularisation(self, LM_regularisation):\n \"\"\"\n Levenberg-Marquardt regularisation. Larger values increase the the stability of\n the optimisation, but slow it down. A value of zero results in a Gauss-Newton\n strategy, but this is not recommended as it may result in instabilities in the FMG.\n \"\"\"\n self.LM_regularisation = LM_regularisation\n \n @checkIfArgumentTypeIsAllowed(int, 1)\n def setCycles(self, cycles):\n \"\"\"\n Number of cycles used by the full multi-grid matrix solver. More cycles result in\n higher accuracy, but slow down the algorithm. See Numerical Recipes for more\n information on multi-grid methods.\n \"\"\"\n if cycles in [1, 2, 3, 4, 5, 6, 7, 8]:\n self.cycles = cycles\n else:\n raise ValueError(\"Unvalid cycles\")\n \n @checkIfArgumentTypeIsAllowed(int, 1)\n def setIterations(self, iterations):\n \"\"\"\n Number of relaxation iterations performed in each multi-grid cycle. More\n iterations are needed if using ``bending energy'' regularisation, because the\n relaxation scheme only runs very slowly. See the chapter on solving partial\n differential equations in Numerical Recipes for more information about relaxation\n methods.\n \"\"\"\n if iterations in [1, 2, 3, 4, 5, 6, 7, 8]:\n self.iterations = iterations\n else:\n raise ValueError(\"Unvalid cycles\")\n \n def getStringListForBatch(self):\n if not None in [self.LM_regularisation, self.cycles, self.iterations]:\n batch_list = []\n batch_list.append(\"optim.lmreg = \" + str(self.LM_regularisation) + \";\")\n batch_list.append(\"optim.cyc = \" + str(self.cycles) + ';')\n batch_list.append(\"optim.its = \" + str(self.iterations) + ';')\n return batch_list \n else:\n raise ValueError('At least one OptimisationSettings parameter missed')\n ","repo_name":"brainvisa/brainvisa-spm","sub_path":"python/soma/spm/virtual_spm/tools/dartel_tools/run_dartel/optimisation_settings.py","file_name":"optimisation_settings.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"71875406299","text":"from tornado.web import RequestHandler\nfrom tornado import gen\nfrom json import dumps, loads\nfrom random import randint\nfrom db import DbHandler\n\nclass TaskHandler(RequestHandler):\n def initialize(self):\n self.db = DbHandler.get_db()\n\n @gen.coroutine\n def post(self):\n user = self.get_secure_cookie(\"user\").decode()\n result = int(loads(self.request.body.decode()).get('result', '0'))\n new_task = self.generate_task()\n balance = yield self.db.update_score(user, result, new_task)\n self.write(dumps({\n \"task\" : new_task,\n \"balance\" : balance\n }))\n\n def generate_task(self):\n return randint(2**17, 2**24)\n","repo_name":"HackerDom/qctf-starter-2018","sub_path":"tasks/browser-mining/tornado/handlers/TaskHandler.py","file_name":"TaskHandler.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"} +{"seq_id":"74228805659","text":"import numpy as np\ndef merge_main_context(W, merge_fun=lambda m, c: np.mean([m, c], axis=0),\n normalize=True):\n \"\"\"\n Merge the main-word and context-word vectors for a weight matrix\n using the provided merge function (which accepts a main-word and\n context-word vector and returns a merged version).\n\n By default, `merge_fun` returns the mean of the two vectors.\n \"\"\"\n\n vocab_size = len(W) / 2\n for i, row in enumerate(W[:vocab_size]):\n merged = merge_fun(row, W[i + vocab_size])\n if normalize:\n merged /= np.linalg.norm(merged)\n W[i, :] = merged\n\n return W[:vocab_size]\n\ndef read_lines_in_file(path):\n with open(path) as f:\n return f.read().split(\"\\n\")","repo_name":"danielvarab/contradiction-detection","sub_path":"util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"14954058802","text":"import logging\nfrom typing import List\n\n\nclass Solution:\n @classmethod\n def moveZeroes(cls, nums: List[int]):\n logging.debug(f\"input: nums = {nums}\")\n result_nums_idx = -1\n\n for i in range(len(nums)):\n if nums[i] != 0:\n result_nums_idx += 1\n nums[result_nums_idx] = nums[i]\n\n while result_nums_idx > -1 and result_nums_idx + 1 < len(nums):\n result_nums_idx += 1\n if nums[result_nums_idx] != 0:\n nums[result_nums_idx] = 0\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n\n nums1 = [0, 1, 0, 3, 12]\n expected_nums1 = [1, 3, 12, 0, 0]\n Solution.moveZeroes(nums1)\n for i in range(len(expected_nums1)):\n assert nums1[i] == expected_nums1[i]\n\n nums2 = [0]\n expected_nums2 = [0]\n Solution.moveZeroes(nums2)\n for i in range(len(expected_nums2)):\n assert nums2[i] == expected_nums2[i]\n\n nums3 = [0, 0, 0]\n expected_nums3 = [0, 0, 0]\n Solution.moveZeroes(nums3)\n for i in range(len(expected_nums3)):\n assert nums3[i] == expected_nums3[i]\n","repo_name":"ladamalina/leetcode-2022-python","sub_path":"283. Move Zeroes (easy)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"33627982241","text":"import numpy as np\n\n\ndef rotation_matrix(a):\n return np.array([[np.cos(a), -np.sin(a)],\n [np.sin(a), np.cos(a)]])\n\n\ndef margin(angle, size, v1, v2):\n vv = v2 - v1\n vv = vv / np.sqrt((vv**2).sum())\n edgelen = size / np.sin(angle)\n v3 = v1 + edgelen * np.dot(rotation_matrix(angle), vv)\n v4 = v2 - edgelen * np.dot(rotation_matrix(-angle), vv)\n return np.c_[v2, v4, v3, v1].T\n\n\ndef regular_polygon(n, v1, v2):\n ang = 2 * np.pi / n\n mat = rotation_matrix(ang)\n vs = np.c_[v1, v2]\n vv = v2 - v1\n vprev = v2\n for i in range(n - 2):\n v = np.dot(mat, vv) + vprev\n vs = np.c_[vs, v]\n vv = v - vprev\n vprev = v\n return vs.T\n","repo_name":"hamukazu/craft_regpolyhed","sub_path":"craftmath.py","file_name":"craftmath.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"8249327875","text":"# -*- coding: utf-8 -*-\nimport os\nimport shutil\nfrom tempfile import mkdtemp\n\nfrom django.test import TestCase\n\nfrom django_extensions.management.mysql import parse_mysql_cnf\n\n\nclass ParseMysqlCnfTests(TestCase):\n \"\"\"Tests for parse_mysql_cnf.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.tmpdir = mkdtemp()\n\n @classmethod\n def tearDownClass(cls):\n shutil.rmtree(cls.tmpdir)\n\n def test_should_return_empty_strings_if_read_default_file_option_is_missing(self):\n dbinfo = {}\n\n result = parse_mysql_cnf(dbinfo)\n\n self.assertEqual(result, ('', '', '', '', ''))\n\n def test_should_parse_my_cnf_and_retun_connection_settings(self):\n my_cnf_path = os.path.join(self.tmpdir, 'my.cnf')\n with open(my_cnf_path, 'w') as f:\n f.write(\"\"\"[client]\ndatabase = test_name\nuser = test_user\npassword = test_password\nhost = localhost\nport = 3306\nsocket = /var/lib/mysqld/mysql.sock\n\"\"\")\n\n dbinfo = {\n 'ENGINE': 'django.db.backends.mysql',\n 'OPTIONS': {\n 'read_default_file': my_cnf_path,\n }\n }\n\n result = parse_mysql_cnf(dbinfo)\n\n self.assertEqual(result,\n ('test_user', 'test_password', 'test_name',\n '/var/lib/mysqld/mysql.sock', '3306'))\n\n def test_should_return_empty_strings_if_NoSectionError_exception_occured(self):\n my_cnf_path = os.path.join(self.tmpdir, 'my.cnf')\n with open(my_cnf_path, 'w') as f:\n f.write(\"\")\n\n dbinfo = {\n 'ENGINE': 'django.db.backends.mysql',\n 'OPTIONS': {\n 'read_default_file': my_cnf_path,\n }\n }\n result = parse_mysql_cnf(dbinfo)\n\n self.assertEqual(result, ('', '', '', '', ''))\n","repo_name":"django-extensions/django-extensions","sub_path":"tests/test_parse_mysql_cnf.py","file_name":"test_parse_mysql_cnf.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":6269,"dataset":"github-code","pt":"69"} +{"seq_id":"18768121081","text":"from rest_framework import serializers\nfrom .models import Profile\n\n\nclass ProfileSerializer(serializers.ModelSerializer):\n username = serializers.CharField(source=\"user.username\")\n first_name = serializers.CharField(source=\"user.first_name\")\n last_name = serializers.CharField(source=\"user.last_name\")\n email = serializers.EmailField(source=\"user.email\")\n full_name = serializers.SerializerMethodField(read_only=True)\n \"\"\"Allow for null field\"\"\"\n resolution = serializers.CharField(source='custom_resolution.resolution', required=False)\n\n class Meta:\n model = Profile\n fields = [\n 'username',\n 'first_name',\n 'last_name',\n 'full_name',\n 'email',\n 'id',\n 'tier',\n 'resolution',\n 'custom_resolution'\n ]\n\n \"\"\"Allow for null field\"\"\"\n extra_kwargs = {\"resolution\": {\"required\": False, \"allow_null\": True}}\n\n def get_full_name(self, obj):\n first_name = obj.user.first_name.title()\n last_name = obj.user.last_name.title()\n return f\"{first_name} {last_name}\"\n\n\n\n","repo_name":"Packerson/Upload_Images_Api","sub_path":"Upload_Images/apps/profiles/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"4289120638","text":"import pandas as pd\nimport numpy\nimport talib\nimport datetime\nfrom datetime import timedelta\nfrom itertools import tee, islice, chain\n\ndef Dataframe(serie):\n return pd.DataFrame(serie ,columns=[\"RSI\"])\n\ndef Dataframe(data,index):\n return pd.DataFrame(data=data,index=index,columns=[\"RSI\"])\n\ndef RSI(close,timeperiod):\n return talib.RSI(close,timeperiod)\n\ndef RSIDataframe(close,timeperiod):\n return pd.DataFrame(talib.RSI(close,timeperiod),columns=[\"RSI\"])\n\ndef CustomRSIdf(dfclose,timeperiod):\n custom = dfclose['Close'].replace(dfclose['Close'].tolist(),talib.RSI(dfclose['Close'],timeperiod))\n custom = custom.rename('RSI')\n return custom.to_frame()\n\ndef intindexposition(df,timestamp):\n return df.index.get_loc(timestamp)\n\n# Analizar rangos de 40 valores ver cuales tienen “RSI (base 7) >70%”\ndef RSImorethan(dfrsi,value):\n if dfrsi.columns[0] == 'RSI(C,7)':\n dfrsi = dfrsi.rename(columns={'RSI(C,7)':'RSI'})\n rsimore70 = dfrsi.query(\"{} >= {}\".format(dfrsi.columns[0],value))\n #print(\"Mas de 70\" + str(rsimore70))\n if len(rsimore70.index) > 0:\n return rsimore70\n else:\n return False\n\ndef changersicolumnname(dfrsi):\n if dfrsi.columns[0] == 'RSI(C,7)':\n return dfrsi.rename(columns={'RSI(C,7)':'RSI'})\n\ndef TopRSI70(dfrsi70):\n top70 = []\n index = []\n cnt = 0\n for previous, item, nxt in previous_and_next(dfrsi70.values.tolist()):\n #print(\"Prev:{}, Nxt:{}, item:{},type:{}\".format(str(previous),str(nxt),str(item),str(type(item))))\n if previous is None:\n if item >= nxt:\n top70.append(item)\n index.append(dfrsi70.index[cnt])\n else:\n if nxt is None:\n if item >= previous:\n top70.append(item)\n index.append(dfrsi70.index[cnt])\n else:\n if item >= nxt and item >= previous:\n top70.append(item)\n index.append(dfrsi70.index[cnt])\n cnt = cnt + 1\n return pd.DataFrame(top70,columns=['RSI'],index=index)\n\ndef previous_and_next(some_iterable):\n prevs, items, nexts = tee(some_iterable, 3)\n prevs = chain([None], prevs)\n nexts = chain(islice(nexts, 1, None), [None])\n return zip(prevs, items, nexts)\n\ndef top2rsivalues(dfrsi):\n dfm70 = RSImorethan70(dfrsi)\n return dfm70.nlargest(2,'RSI')\n\ndef top2rsi70values(dfrsi70):\n return dfrsi70.nlargest(2,'RSI')\n \ndef highestvalue(dfrsi):\n return dfrsi.nlargest(1,'RSI') \n\ndef smallestvalue(dfrsi):\n return dfrsi.nsmallest(1,'RSI')\n\ndef definehighestrsi(top2):\n if top2.values[0] > top2.values[1]:\n return top2.index[0] \n else:\n return top2.index[1]\n\ndef diftime(time1,time2):\n difference = time1-time2\n seconds_in_day = 24 * 60 * 60\n difmin = divmod(difference.days * seconds_in_day + difference.seconds, 60)\n return difmin\n\ndef addminutes(date,min):\n return date + timedelta(minutes=min) \n\ndef datetimetostr(datetime):\n return datetime.strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef changetendency(ant,up):\n if ant < up:\n return True\n else:\n return False\n\ndef istopvalue(threesizearray):\n if max(threesizearray[0]) == val:\n return True\n if min(threesizearray[0] == val):\n return True \n return False\n \n\ndef foundposibleC4(dfrsi,C2,C3):\n if dfrsi.columns[0] == 'RSI(C,7)':\n dfrsi = dfrsi.rename(columns={'RSI(C,7)':'RSI'})\n posibleC4 = dfrsi.query(\"{} >= ({} - 0.5*{})/1.5 and {} <= ({} - 0.786*{})/1.5\").format(dfrsi.columns[0],C2,C3,dfrsi.columns[0],C2,C3)\n #print(\"Mas de 70\" + str(rsimore70))\n if len(posibleC4.index) > 0:\n return posibleC4\n else:\n return False\n\n\n\nif __name__ == '__main__':\n from csvreader import BacktestingDataframe \n AUDUSD = BacktestingDataframe(\"AUDUSD\",\"12-06-2020\").get_dataframe()\n output = talib.CDLEVENINGSTAR(AUDUSD[\"Open\"],AUDUSD[\"High\"],AUDUSD[\"Low\"],AUDUSD[\"Close\"],penetration = 0)\n pd.set_option(\"display.max_rows\",None,\"display.max_columns\",None)\n rsi = RSIDataframe(AUDUSD[\"Close\"],7)\n top2 = top2rsivalues(rsi)\n #print(rsi)\n print(top2)\n print(AUDUSD.index[1])\n print(AUDUSD.index[5])\n print(diftime(top2.index[0],top2.index[1])[0])\n if diftime(top2.index[0],top2.index[1])[0] > 10:\n print(\"Es mayor a diez\")\n #print(top2.index)","repo_name":"JaimeOli/trading-para-todos","sub_path":"source/core/startegiesfunctions.py","file_name":"startegiesfunctions.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"36858392546","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jun 29 21:18:30 2021\r\n\r\n@author: Deepak Murugesan\r\n\"\"\"\r\n\r\nfrom functools import reduce\r\n\r\nfib = lambda n: reduce(lambda x, _: x+[x[-1]+x[-2]],\r\n\t\t\t\t\t\t\t\trange(n-2), [0, 1])\r\n\r\nprint(fib(5))\r\n","repo_name":"DeepakM2001/python_intern","sub_path":"d16/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"32033608606","text":"\"\"\"Contains the nox sessions for running development environments.\"\"\"\nfrom typing import Literal\n\nfrom nox import Session, param, parametrize\nfrom nox import session as nox_session\nfrom nox.command import CommandFailed\n\nfrom constants_nox import (\n COMPOSE_SERVICE_NAME,\n EXEC,\n EXEC_IT,\n LOGIN,\n RUN_CYPRESS_TESTS,\n START_APP,\n START_APP_REMOTE_DEBUG,\n START_TEST_ENV,\n)\nfrom docker_nox import build\nfrom run_infrastructure import ALL_DATASTORES, run_infrastructure\nfrom utils_nox import COMPOSE_DOWN_VOLUMES\n\n\n@nox_session()\ndef shell(session: Session) -> None:\n \"\"\"\n Open a shell in an already-running Fides webserver container.\n\n If the container is not running, the command will fail.\n \"\"\"\n shell_command = (*EXEC_IT, \"/bin/bash\")\n try:\n session.run(*shell_command, external=True)\n except CommandFailed:\n session.error(\n \"Could not connect to the webserver container. Please confirm it is running and try again.\"\n )\n\n\n@nox_session()\ndef dev(session: Session) -> None:\n \"\"\"\n Spin up the Fides webserver in development mode alongside it's Postgres\n database and Redis cache. Use positional arguments to run other services\n like privacy center, shell, admin UI, etc. (see usage for examples)\n\n Usage:\n 'nox -s dev' - runs the Fides weserver, database, and cache\n 'nox -s dev -- shell' - also open a shell on the Fides webserver\n 'nox -s dev -- ui' - also build and run the Admin UI\n 'nox -s dev -- pc' - also build and run the Privacy Center\n 'nox -s dev -- remote_debug' - run with remote debugging enabled (see docker-compose.remote-debug.yml)\n 'nox -s dev -- worker' - also run a Fides worker\n 'nox -s dev -- child' - also run a Fides child node\n 'nox -s dev -- ' - also run a test datastore (e.g. 'mssql', 'mongodb')\n\n Note that you can combine any of the above arguments together, for example:\n 'nox -s dev -- shell ui pc'\n\n See noxfiles/dev_nox.py for more info\n \"\"\"\n\n build(session, \"dev\")\n session.notify(\"teardown\")\n\n if \"worker\" in session.posargs:\n session.run(\"docker\", \"compose\", \"up\", \"--wait\", \"worker\", external=True)\n\n datastores = [\n datastore for datastore in session.posargs if datastore in ALL_DATASTORES\n ] or None\n\n if \"child\" in session.posargs:\n session.run(\n \"docker\",\n \"compose\",\n \"-f\",\n \"docker-compose.child-env.yml\",\n \"up\",\n \"-d\",\n external=True,\n )\n\n if \"ui\" in session.posargs:\n build(session, \"admin_ui\")\n session.run(\"docker\", \"compose\", \"up\", \"-d\", \"fides-ui\", external=True)\n\n if \"pc\" in session.posargs:\n build(session, \"privacy_center\")\n session.run(\"docker\", \"compose\", \"up\", \"-d\", \"fides-pc\", external=True)\n\n open_shell = \"shell\" in session.posargs\n remote_debug = \"remote_debug\" in session.posargs\n if not datastores:\n if open_shell:\n session.run(*START_APP, external=True)\n session.log(\"~~Remember to login with `fides user login`!~~\")\n session.run(*EXEC_IT, \"/bin/bash\", external=True)\n else:\n if remote_debug:\n session.run(*START_APP_REMOTE_DEBUG, external=True)\n else:\n session.run(\n \"docker\", \"compose\", \"up\", COMPOSE_SERVICE_NAME, external=True\n )\n else:\n # Run the webserver with additional datastores\n run_infrastructure(\n open_shell=open_shell,\n run_application=True,\n datastores=datastores,\n remote_debug=remote_debug,\n )\n\n\n@nox_session()\ndef cypress_tests(session: Session) -> None:\n \"\"\"\n End-to-end Cypress tests designed to be run as part of the 'e2e_test' session.\n \"\"\"\n session.log(\"Running Cypress tests...\")\n session.run(*RUN_CYPRESS_TESTS, external=True)\n\n\n@nox_session()\ndef e2e_test(session: Session) -> None:\n \"\"\"\n Spins up the test_env session and runs Cypress E2E tests against it.\n \"\"\"\n session.log(\"Running end-to-end tests...\")\n session.notify(\"fides_env(test)\", posargs=[\"test\"])\n session.notify(\"cypress_tests\")\n session.notify(\"teardown\")\n\n\n@nox_session()\n@parametrize(\n \"fides_image\",\n [\n param(\"dev\", id=\"dev\"),\n param(\"test\", id=\"test\"),\n ],\n)\ndef fides_env(session: Session, fides_image: Literal[\"test\", \"dev\"] = \"test\") -> None:\n \"\"\"\n Spins up a full fides environment seeded with data.\n\n Params:\n dev = Spins up a full fides application with a dev-style docker container. This includes hot-reloading and no pre-baked UI.\n test = Spins up a full fides application with a production-style docker container. This includes the UI being pre-built as static files.\n\n Posargs:\n test = instead of running 'bin/bash', runs 'fides' to verify the CLI and provide a zero exit code\n keep_alive = does not automatically call teardown after the session\n \"\"\"\n\n is_test = \"test\" in session.posargs\n keep_alive = \"keep_alive\" in session.posargs\n\n exec_command = EXEC if any([is_test, keep_alive]) else EXEC_IT\n shell_command = \"fides\" if any([is_test, keep_alive]) else \"/bin/bash\"\n\n # Temporarily override some ENV vars as needed. To set local secrets, see 'example.env'\n test_env_vars = {\n \"FIDES__CONFIG_PATH\": \"/fides/src/fides/data/test_env/fides.test_env.toml\",\n }\n\n session.log(\n \"Tearing down existing containers & volumes to prepare test environment...\"\n )\n try:\n session.run(*COMPOSE_DOWN_VOLUMES, external=True, env=test_env_vars)\n except CommandFailed:\n session.error(\n \"Failed to cleanly teardown existing containers & volumes. Please exit out of all other and try again\"\n )\n if not keep_alive:\n session.notify(\"teardown\", posargs=[\"volumes\"])\n\n session.log(\"Building images...\")\n build(session, fides_image)\n build(session, \"admin_ui\")\n build(session, \"privacy_center\")\n\n session.log(\n \"Starting the application with example databases defined in docker-compose.integration-tests.yml...\"\n )\n session.run(\n *START_TEST_ENV, \"fides-ui\", \"fides-pc\", external=True, env=test_env_vars\n )\n session.log(\"Logging in...\")\n session.run(*LOGIN, external=True)\n\n session.log(\n \"Running example setup scripts for DSR Automation tests... (scripts/load_examples.py)\"\n )\n session.run(\n *EXEC,\n \"python\",\n \"/fides/scripts/load_examples.py\",\n external=True,\n env=test_env_vars,\n )\n\n session.log(\n \"Pushing example resources for Data Mapping tests... (demo_resources/*)\"\n )\n session.run(\n *EXEC,\n \"fides\",\n \"push\",\n \"demo_resources/\",\n external=True,\n env=test_env_vars,\n )\n\n # Make spaces in the info message line up\n title = (\n \"FIDES TEST ENVIRONMENT\" if fides_image == \"test\" else \"FIDES DEV ENVIRONMENT \"\n )\n\n session.log(\"****************************************\")\n session.log(\"* *\")\n session.log(f\"* {title} *\")\n session.log(\"* *\")\n session.log(\"****************************************\")\n session.log(\"\")\n # Print out some helpful tips for using the test_env!\n # NOTE: These constants are defined in scripts/setup/constants.py, docker-compose.yml, and docker-compose.integration-tests.yml\n session.log(\n \"Using secrets set in '.env' for example setup scripts (see 'example.env' for options)\"\n )\n if fides_image == \"test\":\n session.log(\n \"Fides Admin UI (production build) running at http://localhost:8080 (user: 'root_user', pass: 'Testpassword1!')\"\n )\n session.log(\n \"Run 'fides user login' to authenticate the CLI (user: 'root_user', pass: 'Testpassword1!')\"\n )\n session.log(\n \"Fides Admin UI (dev) running at http://localhost:3000 (user: 'root_user', pass: 'Testpassword1!')\"\n )\n session.log(\n \"Fides Privacy Center (production build) running at http://localhost:3001 (user: 'jane@example.com')\"\n )\n session.log(\n \"Example Postgres Database running at localhost:6432 (user: 'postgres', pass: 'postgres', db: 'postgres_example')\"\n )\n session.log(\n \"Example Mongo Database running at localhost:27017 (user: 'mongo_test', pass: 'mongo_pass', db: 'mongo_test')\"\n )\n session.log(\"Opening Fides CLI shell... (press CTRL+D to exit)\")\n if not keep_alive:\n session.run(*exec_command, shell_command, external=True, env=test_env_vars)\n\n\n@nox_session()\ndef quickstart(session: Session) -> None:\n \"\"\"Run the quickstart tutorial.\"\"\"\n build(session, \"dev\")\n build(session, \"privacy_center\")\n build(session, \"admin_ui\")\n session.notify(\"teardown\")\n run_infrastructure(datastores=[\"mongodb\", \"postgres\"], run_quickstart=True)\n","repo_name":"AbdoALPOP/fides","sub_path":"noxfiles/dev_nox.py","file_name":"dev_nox.py","file_ext":"py","file_size_in_byte":9015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"} +{"seq_id":"72258989981","text":"__author__ = 'Toni'\n\nimport numpy as np\nimport numpy.random as rnd\nfrom PyQt4.QtCore import QString, QTimer, Qt\nfrom PyQt4.QtGui import *\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar\nfrom matplotlibwidget import MatplotlibWidget\nfrom cromatogram_w import Ui_CromWindow\nfrom trasmission import color2str, generate_colors\n\ntry:\n _fromUtf8 = QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QApplication.UnicodeUTF8\n\n\n def _translate(context, text, disambig):\n return QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QApplication.translate(context, text, disambig)\n\n\nclass Cromatogram(QMainWindow):\n def __init__(self, parent, tmodel):\n QMainWindow.__init__(self, parent)\n self.ui = Ui_CromWindow()\n self.ui.setupUi(self)\n self.tmodel = tmodel\n\n # Creates the matplotlib window and the toolbar\n self.mpl_window = MatplotlibWidget()\n self.ui.vl_plot.addWidget(self.mpl_window)\n self.toolbar = NavigationToolbar(self.mpl_window, self)\n self.ui.vl_plot.addWidget(self.toolbar)\n self.color_list = generate_colors(self.tmodel.num_analites)\n # Plot the models\n conc = self.simulate()\n self.plot(conc)\n\n def simulate(self):\n \"\"\" This function simulate the exit of each analito through the column. \"\"\"\n full_concentration = 0.998\n\n tmodel = self.tmodel\n\n if not tmodel.is_ideal_type:\n full_concentration = 0.5\n\n last_plate_conc = np.zeros(tmodel.num_analites)\n\n concentration = []\n for i in np.arange(tmodel.num_analites):\n concentration.append([])\n\n k = 0\n while True:\n tmodel.max_iter += 1\n tmodel.update(1)\n\n for i in np.arange(tmodel.num_analites):\n # amount of concentration in last plate\n aux = tmodel.current_state[4][i][tmodel.num_teo_plates - 1]\n last_plate_conc[i] += aux\n concentration[i].append(aux)\n\n flag = True\n for i in np.arange(tmodel.num_analites):\n if last_plate_conc[i] < full_concentration * tmodel.conc_initial[i]:\n flag = False\n\n if flag:\n # print last_plate_conc\n # print tmodel.conc_initial\n break\n\n k += 1\n\n return k + 1, concentration\n\n def plot(self, concentrations):\n\n for i in np.arange(self.tmodel.num_analites):\n self.mpl_window.axes.set_xlabel(\"Numero de Iteraciones\")\n self.mpl_window.axes.set_ylabel(\"Concentracion\")\n # print concentrations[0], len(concentrations[i + 1])\n self.mpl_window.axes.plot(np.arange(concentrations[0]), concentrations[1][i], color2str(self.color_list[i]),\n label=str(chr(65 + i)))\n self.mpl_window.axes.hold(True)\n\n self.mpl_window.axes.grid()\n self.mpl_window.axes.legend()\n self.mpl_window.draw()\n","repo_name":"tonypg39/spatcc","sub_path":"cromatogram.py","file_name":"cromatogram.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"18818510039","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User, Group, Permission\n\nfrom guardian.shortcuts import get_perms_for_model\nfrom guardian.core import ObjectPermissionChecker\nfrom guardian.shortcuts import assign, remove_perm, get_perms, get_users_with_perm\nfrom guardian.exceptions import NotUserNorGroup\n\nfrom guardian.tests.models import Keycard\nfrom guardian.tests.core_test import ObjectPermissionTestCase\n\nclass ShortcutsTests(TestCase):\n fixtures = ['tests.json']\n\n def setUp(self):\n self.user = User.objects.get(username='jack')\n self.group = Group.objects.get(name='admins')\n\n def test_get_perms_for_model(self):\n self.assertEqual(get_perms_for_model(self.user).count(), 3)\n self.assertTrue(list(get_perms_for_model(self.user)) ==\n list(get_perms_for_model(User)))\n self.assertEqual(get_perms_for_model(Permission).count(), 3)\n\n model_str = 'guardian.Keycard'\n self.assertEqual(\n sorted(get_perms_for_model(model_str).values_list()),\n sorted(get_perms_for_model(Keycard).values_list()))\n key = Keycard()\n self.assertEqual(\n sorted(get_perms_for_model(model_str).values_list()),\n sorted(get_perms_for_model(key).values_list()))\n\nclass AssignTest(ObjectPermissionTestCase):\n \"\"\"\n Tests permission assigning for user/group and object.\n \"\"\"\n def test_not_model(self):\n self.assertRaises(NotUserNorGroup, assign,\n perm=\"change_object\",\n user_or_group=\"Not a Model\",\n obj=self.keycard)\n\n def test_user_assign(self):\n assign(\"change_keycard\", self.user, self.keycard)\n assign(\"change_keycard\", self.group, self.keycard)\n self.assertTrue(self.user.has_perm(\"change_keycard\", self.keycard))\n\n def test_group_assing(self):\n assign(\"change_keycard\", self.group, self.keycard)\n assign(\"delete_keycard\", self.group, self.keycard)\n\n check = ObjectPermissionChecker(self.group)\n self.assertTrue(check.has_perm(\"change_keycard\", self.keycard))\n self.assertTrue(check.has_perm(\"delete_keycard\", self.keycard))\n\nclass RemovePermTest(ObjectPermissionTestCase):\n \"\"\"\n Tests object permissions removal.\n \"\"\"\n def test_not_model(self):\n self.assertRaises(NotUserNorGroup, remove_perm,\n perm=\"change_object\",\n user_or_group=\"Not a Model\",\n obj=self.keycard)\n\n def test_user_remove_perm(self):\n # assign perm first\n assign(\"change_keycard\", self.user, self.keycard)\n remove_perm(\"change_keycard\", self.user, self.keycard)\n self.assertFalse(self.user.has_perm(\"change_keycard\", self.keycard))\n\n def test_group_remove_perm(self):\n # assign perm first\n assign(\"change_keycard\", self.group, self.keycard)\n remove_perm(\"change_keycard\", self.group, self.keycard)\n\n check = ObjectPermissionChecker(self.group)\n self.assertFalse(check.has_perm(\"change_keycard\", self.keycard))\n\nclass GetPermsTest(ObjectPermissionTestCase):\n \"\"\"\n Tests get_perms function (already done at core tests but left here as a\n placeholder).\n \"\"\"\n def test_not_model(self):\n self.assertRaises(NotUserNorGroup, get_perms,\n user_or_group=None,\n obj=self.keycard)\n\n def test_user(self):\n perms_to_assign = (\"change_keycard\",)\n\n for perm in perms_to_assign:\n assign(\"change_keycard\", self.user, self.keycard)\n\n perms = get_perms(self.user, self.keycard)\n for perm in perms_to_assign:\n self.assertTrue(perm in perms)\n\n\nclass GetUsersWithPerm(ObjectPermissionTestCase):\n def test_get_users_with_perm(self):\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users, [])\n\n assign(\"change_keycard\", self.group, self.keycard)\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users, [self.user])\n\n john = User.objects.create(username='John')\n assign(\"add_keycard\", john, self.keycard)\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users, [self.user])\n\n assign(\"change_keycard\", john, self.keycard)\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users.sort(), [self.user, john].sort())\n\n mary = User.objects.create(username='Mary')\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users.sort(), [self.user, john].sort())\n\n mary.groups.add(self.group)\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users.sort(), [self.user, john, mary].sort())\n\n assign(\"change_keycard\", mary, self.keycard)\n users = list(get_users_with_perm(self.keycard, 'change_keycard').all())\n self.assertEqual(users.sort(), [self.user, john, mary].sort())\n\n","repo_name":"canassa/django-guardian","sub_path":"guardian/tests/shortcuts_test.py","file_name":"shortcuts_test.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"} +{"seq_id":"70766643099","text":"import csv\n\ninp = []\ncss = {}\nejs = {}\n\nwith open('NeuronConnectOrig.csv', 'r') as f:\n reader = csv.reader(f)\n next(reader, None)\n inp = [row for row in reader]\n\n\nfor row in inp:\n #print(row)\n if row[2] == \"EJ\":\n if (row[0], row[1]) in ejs:\n ejs[(row[0], row[1])] += int(row[3])\n else:\n ejs[(row[0], row[1])] = int(row[3])\n if (row[2] == \"S\" or row[2] == \"Sp\"):\n if (row[0], row[1]) in css:\n css[(row[0], row[1])] += int(row[3])\n else:\n css[(row[0], row[1])] = int(row[3])\n\n#EJs are back and forth connections\nfor ej in ejs:\n if (ej[1], ej[0]) not in ejs:\n print(ej)\n\nwith open('edges_wo_muscles.csv', 'a') as f:\n f.write(\"Neuron 1,Neuron 2,Type,Nbr\\n\")\n for key,value in ejs.items():\n to_write = key[0] + \";\" + key[1] + \";EJ;\" + str(value) + \"\\n\"\n f.write(to_write)\n\n for key,value in css.items():\n to_write = key[0] + \";\" + key[1] + \";CS;\" + str(value) + \"\\n\"\n f.write(to_write)\n","repo_name":"hellothisisnathan/Mouse_Controllability","sub_path":"Celegans/hungarian dude's code/data/generate_edges_wo_muscles.py","file_name":"generate_edges_wo_muscles.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"4087290004","text":"from drf_yasg import openapi\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg.openapi import Parameter, IN_QUERY\nclass PostSwagger:\n def __init__(self, params, required, summary, description=None, examples_={\n \"application/json\": {\n \"gcode\": 0,\n \"success\": True,\n \"data\" : \"\"\n }\n }):\n self.params = {}\n for p in params:\n self.params[p] = openapi.Schema(type=params[p])\n self.req = openapi.Schema(type=openapi.TYPE_OBJECT, properties=self.params,required=required)\n self.res = {\n \"200\": openapi.Response(\n description=\"성공\",\n examples=examples_\n )\n }\n self.summary = summary\n self.description = description\n\n def get_auto_schema(self):\n return swagger_auto_schema(\n operation_summary=self.summary,\n operation_description=self.description,\n request_body=self.req,\n responses=self.res,\n )\n\nclass GetSwagger:\n def __init__(self, params, examples_, summary, description=None):\n self.params = []\n for p in params:\n self.params.append(Parameter(p, IN_QUERY, type=params[p]))\n \n self.res = {\n \"200\" : openapi.Response(\n description=\"성공\",\n examples=examples_\n )\n }\n self.summary = summary\n self.description = description\n \n def get_auto_schema(self):\n return swagger_auto_schema(\n operation_summary=self.summary, \n operation_description=self.description,\n manual_parameters=self.params,\n responses=self.res\n )","repo_name":"AgongKim/type16","sub_path":"utils/swagger_base.py","file_name":"swagger_base.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"496032461","text":"import pandas as pd\nimport sqlalchemy as sql\nimport logging\n\nfrom datetime import datetime\nfrom datetime import timedelta\n\n\n# details of the database.\nDB_HOST = 'terraform-20191101073604464400000001.cqpira9yntzj.ap-southeast-1.rds.amazonaws.com'\nDB_USER = 'foo'\nDB_PASS = 'foobarbaz'\nDB_NAME = 'marketing'\n\n\n# return session datetime from session_id.\ndef session_datetime_from_session_id(session_id):\n session_id_split = session_id.split('_')\n return datetime.strptime(session_id_split[1] + ' ' + session_id_split[2], '%Y%m%d %H%M')\n\n\n# returns a datetime object from visit_date and visit_time details.\ndef create_datetime(v_date, v_time):\n return datetime.strptime(v_date + ' ' + v_time, '%Y%m%d %H:%M %p')\n\n\n# returns session_id given device_id, visit_date, and visit_time.\n# i_ prefix to denote inner variable.\ndef create_new_session_id(i_device_id, i_visit_date, i_visit_time):\n return 's{0}_{1}_{2}'.format(i_device_id, i_visit_date, i_visit_time.replace(':', '')[0:4])\n\n\n# updates session_id in the database.\ndef update_session_id_for_row(i_session_id, i_row_id, i_sql_engine):\n connection = i_sql_engine.connect()\n connection.execute('update clickstream set session_id = \\'{0}\\' where id = \\'{1}\\''.format(i_session_id, i_row_id))\n\n\n# returns a dataframe row containing existing sessions for a device_id.\ndef sessions_for(device_id, i_sql_engine):\n sessions_for_device_query = '''\n select device_id, session_id, visit_date, visit_time \n from clickstream where device_id = \\'{0}\\' \n and session_id is not null'''.format(device_id)\n sessions_for_device_df = pd.read_sql_query(sessions_for_device_query, i_sql_engine)\n return sessions_for_device_df\n\n\n# return last session.\ndef last_session_from_sessions(sessions):\n sessions['visit_datetime'] = sessions.apply(lambda x: create_datetime(x['visit_date'], x['visit_time']), axis=1)\n sessions_sorted = sessions.sort_values(by='visit_datetime', ascending=False)\n return sessions_sorted.iloc[0]\n\n\n# row is one row of a dataframe.\ndef process_row(row, i_sql_engine):\n row_id = row['id']\n device_id = row['device_id']\n visit_date = row['visit_date']\n visit_time = row['visit_time']\n visit_datetime = create_datetime(visit_date, visit_time)\n\n logging.info('Processing row with ID: {0}'.format(row_id))\n sessions_for_device = sessions_for(device_id, i_sql_engine)\n\n # if there is no session in the database for that device_id, then create new session.\n if sessions_for_device.empty:\n session_id = create_new_session_id(device_id, visit_date, visit_time)\n update_session_id_for_row(session_id, row_id, i_sql_engine)\n\n else:\n # get latest existing session.\n latest_session_id = last_session_from_sessions(sessions_for_device)['session_id']\n latest_session_datetime = session_datetime_from_session_id(latest_session_id)\n\n # if still within the previous session time window, then use the previous session id.\n if latest_session_datetime + timedelta(minutes=60) > visit_datetime:\n update_session_id_for_row(latest_session_id, row_id, i_sql_engine)\n\n # if not within the previous session time window, then create a new session id.\n else:\n new_session_id = create_new_session_id(device_id, visit_date, visit_time)\n update_session_id_for_row(new_session_id, row_id, i_sql_engine)\n\n\n# main method.\ndef main():\n logging.basicConfig(level=logging.INFO)\n\n connect_string = 'mysql+mysqlconnector://{0}:{1}@{2}/{3}'.format(DB_USER, DB_PASS, DB_HOST, DB_NAME)\n sql_engine = sql.create_engine(connect_string)\n\n df = pd.read_sql_query('select * from clickstream where session_id is null', sql_engine)\n for i in range(len(df)):\n process_row(df.iloc[i], sql_engine)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"devacto/lzd","sub_path":"qn-one/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"7384622066","text":"# -*-coding:utf-8-*-\n# created by HolyKwok 201610414206\n# 空气质量数据可视化\n\nimport pandas as pd\nimport datetime\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.font_manager import FontProperties\n\n# 读取数据\ndata = pd.read_csv(\"pollution.csv\") # 默认header=True,首航作为列名\n# 1-求PM2.5和气温日平均\n# 数据预处理\nyears = list(set(data['year'])) # 年分表\nmonths = list(range(1, 13))\n# days = list(range(1, 32))\n# every_day = [(m, d) for m in months for d in days ]\n# # 求平均值,自动过滤NaN\n# pm2d5_days = []\n# temp_days = []\n# for y in years:\n# # 每年的每个月有哪天\n# list(map(pm2d5_days.append, list(map(lambda x: data.loc[(data['year'] == y) & (data['month'] == x[0]) & (data['day'] == x[1]) & (data['pm2.5'].notnull())]['pm2.5'].mean(), every_day))))\n# list(map(temp_days.append, list(map(lambda x: data.loc[(data['year'] == y) & (data['month'] == x[0]) & (data['day'] == x[1]) & (data['TEMP'].notnull())]['TEMP'].mean(), every_day))))\n# # 画图\n# font = FontProperties(fname=\"C:\\Windows\\Fonts\\msyh.ttc\", size=15) # 设置字体\n# fig = plt.figure() # 创建子图\n# pm2d5_ax = fig.add_subplot(2, 1, 1)\n# temp_ax = fig.add_subplot(2, 1, 2)\n# bar_with = 0.5 # 柱状条宽度\n# index = np.arange(len(years)* len(months) * len(days)) # 下标序列\n# pm2d5_ax.bar(index, pm2d5_days, bar_with, label='PM2.5')\n# temp_ax.bar(index, temp_days, bar_with, label='TEMP')\n# # 标题\n# pm2d5_ax.set_title(u'每年pm2.5日平均统计表', fontproperties = font)\n# temp_ax.set_title(u'每年气温日平均统计表', fontproperties = font)\n# pm2d5_ax.set_xlabel(u'日期', fontproperties = font)\n# pm2d5_ax.set_ylabel(u'平均值', fontproperties = font)\n# temp_ax.set_xlabel(u'日期', fontproperties = font)\n# temp_ax.set_ylabel(u'平均值', fontproperties = font)\n# pm2d5_ax.set_xticks([])\n# temp_ax.set_xticks([])\n# plt.show()\n# plt.close()\npm2d5_y_avg = [] # pm2.5各年平均值\ntemp_y_avg = [] # 气温各年平均值\nlist(map(pm2d5_y_avg.append, list(map(lambda x: data.loc[data['year'] == x]['pm2.5'].mean(), years))))\nlist(map(temp_y_avg.append, list(map(lambda x: data.loc[data['year'] == x]['TEMP'].mean(), years))))\n# 画图\nfont = FontProperties(fname=\"C:\\Windows\\Fonts\\msyh.ttc\", size=15) # 设置字体\nindex = np.arange(len(years))\nbar_width = 0.35\nplt.bar(index, pm2d5_y_avg, bar_width, label='pm2.5')\nplt.bar(index + bar_width, temp_y_avg, bar_width, label='temp')\nplt.title(u\"pm2.5和气温年日平均\", fontproperties = font)\nplt.xticks(index, years)\nplt.ylabel(u\"日均值\", fontproperties = font)\n\n# 2-求五年的PM2.5,气温,气压,累计降雨量趋势图\n# 数据预处理,清除NA\npm2d5_data = data.loc[data['pm2.5'].notnull()] # isnull()和notnull()返回布尔型\ntemp_data = data.loc[data['TEMP'].notnull()]\npres_data = data.loc[data['PRES'].notnull()]\niws_data = data.loc[data['Iws'].notnull()]\n# 画图\nfig = plt.figure()\n# 添加子图\npm2d5_ax = fig.add_subplot(2, 2, 1)\ntemp_ax = fig.add_subplot(2, 2, 2)\npres_ax = fig.add_subplot(2, 2, 3)\niws_ax = fig.add_subplot(2, 2, 4)\n# 向子图中添加数据\npm2d5_ax.plot(pm2d5_data['pm2.5'], \"-\", linewidth=0.2)\ntemp_ax.plot(temp_data['TEMP'], \"-\", linewidth=0.2)\npres_ax.plot(pres_data['PRES'], \"-\", linewidth=0.2)\niws_ax.plot(iws_data['Iws'], \"-\", linewidth=0.2)\n# 隐藏x轴刻度\npm2d5_ax.set_xticks([])\ntemp_ax.set_xticks([])\npres_ax.set_xticks([])\niws_ax.set_xticks([])\n# 设置标题\npm2d5_ax.set_title('pm2.5')\ntemp_ax.set_title('TEMP')\npres_ax.set_title('PRES')\niws_ax.set_title('Iws')\nplt.show()\nplt.close()\n\n# 3-统计每年PM2.5指数平均值最高的5个月,获取每天的PM2.5指数\n# 数据预处理\npm2d5_m_avg = []\nfor m in months:\n # 每年的m月平均pm2.5,注意筛选条件用括号分隔避免歧义(ambiguous)\n pm2d5_m_avg.append(list(map(lambda x: (m, data.loc[(data['year'] == x) & (data['month'] == m) & (data['pm2.5'].notnull())]['pm2.5'].mean()), years)))\n# 转置结果,\npm2d5_m_avg_T = [[row[col] for row in pm2d5_m_avg] for col in range(len(pm2d5_m_avg[0]))] # 转置\n# 进行每年的前五排序切片\npm2d5_m_avg_top5 = list(map(lambda x: sorted(x, key=lambda x: x[1], reverse=True)[:5], pm2d5_m_avg_T))\n# 按升序排列月份以便计算\npm2d5_m_avg_top5 = list(map(lambda x: sorted(x, key=lambda x: x[0]), pm2d5_m_avg_top5))\npm2d5_d_data = []\nfor i, l in enumerate(pm2d5_m_avg_top5): # 筛选每年每月的数据[y:[m:...], y:[m:...]...]\n pm2d5_d_data.append(list(map(lambda x: data.loc[(data['year'] == years[i]) & (data['month'] == x[0]) & (data['pm2.5'].notnull())], l)))\n# 画图\nplt.rcParams['figure.figsize'] = (10.0, 1.0) # 图片长宽比例\nplt.rcParams['savefig.dpi'] = 500 # 图片像素\nplt.rcParams['figure.dpi'] = 400 # 分辨率\n\nfor n, dl in enumerate(pm2d5_d_data): # 按一年组建x, y坐标\n x_data = []\n y_data = []\n for e in dl:\n # x为时间(天)\n list(map(x_data.append,list(map(lambda y, m, d, h: str(datetime.datetime(int(y), int(m), int(d), int(h))), e['year'], e['month'], e['day'], e['hour']))))\n # y为PM2.5值\n list(map(y_data.append, list(e['pm2.5'])))\n plt.plot(x_data, y_data, \"-\", linewidth=0.2)\nplt.xticks([]) # 隐藏x轴刻度\nplt.yticks(range(0, 1200, 50)) # 设置y轴刻度\nplt.tick_params(labelsize=3) # 设置刻度字体大小\nplt.show()\nplt.close()","repo_name":"ApplauseWow/IT_new_technique_assignment","sub_path":"practice2/practice2-1.py","file_name":"practice2-1.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"34069077744","text":"# TABLE_CONTENTS.PY - Contains code to create tables displaying data\n\nfrom dash import dash_table\n\ndata_table = dash_table.DataTable(\n id='data-table',\n columns=[], # Specifies the table's columns, empty for now as it will be set using a callback function\n data=[], # Specifies the table's columns, empty for now as it will be set using a callback function\n fixed_rows={'headers': True }, # Headers will appear at the top of the table, even when scrolling\n style_table={ # CSS style parameters, which affect the table's appearance\n 'minHeight': '11vh'\n , 'height': '11vh'\n , 'maxHeight': '11vh'\n , 'overflow-y': 'scroll'\n , 'border': '1.5px solid #000000'\n , 'color': '#000000'\n , 'font-size': '15px'\n , 'display': 'inline-block'\n },\n style_cell={ # Affects formatting of the table's cells\n 'textAlign': 'left'\n , 'font-family': 'Arial'\n },\n style_header={ # Affects the styling of table headers\n 'fontWeight': 'bold'\n , 'whiteSpace': 'normal'\n , 'height': 'auto'\n },\n style_data={\n 'whiteSpace': 'normal'\n , 'height': 'auto'\n },\n style_cell_conditional=[ # Conditional styling can be applied, like setting column width conditional on column name\n {'if': {'column_id': 'Hello World'},\n 'width': '100%'}\n ]\n)","repo_name":"MarkVersteegh/dash-template","sub_path":"layout/table_contents.py","file_name":"table_contents.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"32728188069","text":"class DetectSquares:\n\n def __init__(self):\n self.coordinateCount = defaultdict(int) # (x,y) = count\n self.points = []\n \n\n def add(self, point: List[int]) -> None:\n x, y = point\n self.coordinateCount[(x, y)] += 1\n self.points.append([x,y])\n \n\n def count(self, point: List[int]) -> int:\n x, y = point\n res = 0\n for px, py in self.points:\n if (abs(py - y) != abs(px - x)) or px == x or py == y :\n continue\n res += self.coordinateCount[(x,py)] * self.coordinateCount[(px,y)]\n \n return res\n \n \n\n\n# Your DetectSquares object will be instantiated and called as such:\n# obj = DetectSquares()\n# obj.add(point)\n# param_2 = obj.count(point)","repo_name":"mwinailan/LeetCode","sub_path":"2013-detect-squares/2013-detect-squares.py","file_name":"2013-detect-squares.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"1472564333","text":"## build alternative ancestral sequences\n\nfrom fasta2seq import *\nfrom itertools import product\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\n\nimport json,random\n\nfp = open('spr_all.prob.json','r')\nprob = json.load(fp)\nnumseq = 1000\n#mutations = 30\n\nnodes = pd.read_csv('anc.node.txt')\nnodes = dict(zip(nodes['node'],nodes['name']))\n\n#nodes = {\"471\":\"Drosophila\"}\n#nodes = {\"471\":\"Drosophila\"}\n#471,Drosophila\n#435,Diptera\n#369,Insecta\n#312,Arthropoda\n#255,Metazoa\n#nodes = {\"435\":\"Diptera\"}\n\nfor node in nodes:\n sequence = {}\n name = nodes[node]\n node = str(node)\n print(node,name)\n\n ## all of the alternative residues ##\n alternative_residues = []\n for res_idx in prob[node]:\n alt_res = [res for res in prob[node][res_idx]]\n alternative_residues.append(alt_res)\n\n ## random sample 100 sequences ##\n N = 0\n index_list = []\n while N 1:\n #print(prob[node][res_idx])\n residues_with_ALT.append(res_idx)\n\n print(len(residues_with_ALT))\n #print(residues_with_ALT)\n\n mutations = int(len(residues_with_ALT)/2)\n residues_to_mutate = random.choices(residues_with_ALT,k=mutations)\n print(residues_to_mutate)\n\n #print([idx for idx in prob[node]])\n for res_idx in prob[node]:\n alt_res = [res for res in prob[node][res_idx]]\n alt_idx = [i for i in range(len(alt_res))]\n n = len(alt_res)\n #if n>1:\n # idx = random.choice(alt_idx)\n # index.append(idx)\n if res_idx in residues_to_mutate:\n index_pool = [i for i in range(n) if i>0]\n idx = random.choice(index_pool)\n print(res_idx,alt_res,alt_idx,idx)\n index.append(idx)\n else:\n index.append(0)\n if not all([i==0 for i in index]):\n index_list.append(index)\n N += 1\n\n nres = len(alternative_residues)\n #print(nres,len(index_list))\n for n in range(numseq):\n seq = [alternative_residues[i][index_list[n][i]] for i in range(nres)]\n #seq = [s for s in seq if s!='-']\n seq = ''.join(seq)\n sequence['%s_Alternative_%d'%(name,n)] = seq\n# print(ni,len(seq),seq)\n seq2fasta(sequence,'SPR_Anc_alternative_%s.fasta'%name)\n","repo_name":"jhpanda/DrosophilaSexPeptide","sub_path":"extract_asr.alt.py","file_name":"extract_asr.alt.py","file_ext":"py","file_size_in_byte":2478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"22472824050","text":"import datetime\n\nimport clipboard as clipboard\n\nfrom rivombrosa.marchingegno.comparator import get_tiers\nfrom .config import flags\n\n\nall_tiers = get_tiers()\nresult = f'*{datetime.datetime.now().strftime(\"%A, %d %B %Y, %H:%M\")}*\\n\\n'\nfor league, tiers in all_tiers.items():\n if any([len(tiers['tier_1']), len(tiers['tier_2']), len(tiers['tier_3'])]):\n result += f'{flags[league]} _{league}_\\n'\n\n for t in ('tier_1', 'tier_2', 'tier_3'):\n result += f'*{t.replace(\"_\", \" \").title()}:*\\n'\n if len(tiers[t]):\n for info in tiers[t]:\n result += f'{info[\"match\"]} *{info[\"outcome\"]}* / {info[\"odds\"]} *({info[\"coeff\"]}%)* > € {info[\"stake\"] or 1}\\n'\n else:\n result += f'_Nessun match nel {t.replace(\"_\", \" \").title()}_\\n'\n result += '\\n'\n\nprint(result)\nclipboard.copy(result)\n","repo_name":"princewav/RivombrosaWeb","sub_path":"rivombrosa/tiers_formatter.py","file_name":"tiers_formatter.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"42063746989","text":"# -*- coding: utf-8 -*-\n\nimport requests\nfrom time import sleep\n\ndef urlreq():\n resp = requests.get(\"http://localhost:5000/warState\")\n return resp.text\n\ndef visualizeState(state_json):\n print(state_json)\n\nif __name__ == \"__main__\":\n while True:\n state = urlreq()\n visualizeState(state)\n sleep(1)\n\n","repo_name":"OneNightROBOCON/burger_war","sub_path":"judge/visualizeConsole.py","file_name":"visualizeConsole.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"69"} +{"seq_id":"29171475377","text":"import wx, sys\nfrom EClassWindow import EClassWindow\nsys.path.insert(0, 'model')\nfrom EClass import EClass\n \nclass JoinPresentation(wx.Frame):\n \n def __init__(self, parent):\n wx.Frame.__init__(self, None, wx.ID_ANY)\n self.SetLabel('Select a Presentation to Join')\n\n panel = wx.Panel(self, wx.ID_ANY)\n self.index = 0\n self.parent = parent\n\n self.list_ctrl = wx.ListCtrl(panel, size=(-1,100),\n style=wx.LC_REPORT\n |wx.BORDER_SUNKEN\n )\n self.list_ctrl.InsertColumn(0, 'Class')\n self.list_ctrl.InsertColumn(1, 'Last')\n self.list_ctrl.InsertColumn(2, 'First')\n self.list_ctrl.InsertColumn(3, 'Hosted')\n\n self.reasonText = wx.StaticText(panel, -1)\n self.reasonText.SetForegroundColour((255, 0, 0))\n\n btn = wx.Button(panel, label=\"Join\")\n btn.Bind(wx.EVT_BUTTON, self.join)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.reasonText, 0, wx.ALL|wx.EXPAND, 5)\n sizer.Add(self.list_ctrl, 0, wx.ALL|wx.EXPAND, 5)\n sizer.Add(btn, 0, wx.ALL|wx.CENTER, 5)\n panel.SetSizer(sizer)\n self.Bind(wx.EVT_CLOSE, self.onClose)\n\n self.setClasses(EClass.GetInstance().classes)\n EClass.GetInstance().connection.registerStudentClassesListener(self.setClasses)\n\n def setClasses(self, classes):\n self.list_ctrl.DeleteAllItems()\n for c in classes:\n self.list_ctrl.Append((c['name'], c['lastname'], c['firstname'],\n 'true' if c['hosted'] else '')\n )\n\n def onClose(self, event):\n EClass.GetInstance().exit()\n\n def join(self, event):\n selected = EClass.GetInstance().classes[self.list_ctrl.GetFocusedItem()]\n\n EClass.GetInstance().connection.joinPresentation(selected['name'],\n selected['lastname'], selected['firstname'],\n self.callback\n )\n\n def callback(self, response):\n if response.success:\n window = EClassWindow()\n EClass.GetInstance().loadInitialData(response.data)\n window.showPresentation()\n self.Hide()\n else:\n self.reasonText.SetLabel(response.reason)\n \n","repo_name":"joshterrell805-historic/EClass","sub_path":"implementation/source/python/view/JoinPresentation.py","file_name":"JoinPresentation.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"29097688680","text":"\"\"\"\nRequests for pushing events to Slack\n\n\"\"\"\n\n\nclass SlackClient:\n\n headers = {\"Content-Type\": \"application/json\"}\n\n def __init__(self, session, url):\n self.session = session\n self.url = url\n\n async def slack_post(self, body):\n async with self.session.post(\n headers=self.headers, url=self.url, data=body\n ) as response:\n if response.status == 200:\n return\n else:\n print(response)","repo_name":"team-telnyx/demo-python-telnyx","sub_path":"call-center-texml/call_center/infrastructure/client/http/slackrequests.py","file_name":"slackrequests.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"69"} +{"seq_id":"19990642640","text":"import openai\nimport os\nimport json\n\n\nopenai.api_key = input('Enter ChatGPT API Key')\n\ndef add_json(data):\n with open(\"log.json\", mode=\"r\") as file:\n messeges = json.load(file)\n messeges.append(data)\n with open(\"log.json\", mode=\"w\") as file:\n json.dump(messeges, file)\n\ndef chatgpt_response(prompt):\n add_json({\"role\": \"user\", \"content\": prompt})\n with open(\"log.json\", mode=\"r\") as file:\n json_messeges = json.load(file)\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=json_messeges,\n temperature=0.5,\n max_tokens=100\n )\n prompt_response = response['choices'][0]['message']['content']\n add_json(response['choices'][0]['message'])\n return prompt_response\n","repo_name":"ArtemChirakhov/GPTodd","sub_path":"chatgpt.py","file_name":"chatgpt.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"34189312611","text":"\n\n\"\"\"\n 13.1 Word frequency analysis\n\"\"\"\n\n\nimport os \nimport string\n\ndef cleanFile(fname):\n\tfin = open(fname)\n\n\tres = []\n\n\tfor line in fin:\n\t\twords = line.split()\n\t\tfor word in words:\n\t\t\ts = word.translate(string.maketrans(\"\", \"\"), string.punctuation + string.digits)\n\t\tres.append(s.strip().lower())\n\n\treturn res \n\n#print len(cleanFile('../words.txt'))\n#print cleanFile('../words.txt')[:100]\n\n\ndef processLine(line):\n\n\twords = line.split()\n\tres = []\n\n\tfor word in words:\n\t\ts = word.translate(string.maketrans(\"\", \"\"), string.punctuation+string.digits)\n\t\tres.append(s.strip().lower())\n\n\treturn res \n\ndef processFile(fname, processLine):\n\n\td = dict()\n\twith open(fname) as fin:\n\t\tfor line in fin:\n\t\t\tlst = processLine(line)\n\t\t\tfor word in lst:\n\t\t\t\td[word] = d.get(word, 0) + 1\n\n\treturn d \n\n\n\n\n\n","repo_name":"April-Xue/thinking_python","sub_path":"ch13.py","file_name":"ch13.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"856820588","text":"\"\"\"Save TissueNet as individulal TIFF files.\n\nThese files are then fed into cellpose for training.\n\nTo train the cellpose model:\n\nCUDA_VISIBLE_DEVICES=3 python -m cellpose --train \\\n--dir /deepcell_data/users/willgraf/mesmer_retrain/tissue_net/seed1/train/ \\\n--test_dir /deepcell_data/users/willgraf/mesmer_retrain/tissue_net/seed1/val/ \\\n--pretrained_model None \\\n--img_filter _img \\\n--mask_filter _masks \\\n--chan 2 --chan2 1 \\\n--use_gpu\n\nTo run the newly trained cellpose model:\n\nCUDA_VISIBLE_DEVICES=3 python -m cellpose \\\n--dir /deepcell_data/users/willgraf/mesmer_retrain/tissue_net/seed1/test_run/ \\\n--pretrained_model /deepcell_data/users/willgraf/mesmer_retrain/tissue_net/seed1/train/models/cellpose_residual_on_style_on_concatenation_off_train_2021_04_26_11_35_14.114698 \\\n--chan 2 --chan2 1 \\\n--diameter 23. --save_tif --use_gpu\n\n\nTo run the pretrained version of the CellPose model:\n\npython -m cellpose\n --dir /deepcell_data/users/willgraf/cellpose/test_split_1_channels_first\n --pretrained_model cyto\n --chan 0 --chan2 1\n --diameter 0.\n --save_tif --use_gpu\n\"\"\"\n\nimport os\nimport numpy as np\nimport tifffile\n\nSEED = 1\n\nNPZ_NAME = '20201018_multiplex_seed_{}'.format(SEED)\nEXP_NAME = '20200824_hyper_parameter'\nMODEL_NAME = '{}_cellpose'.format(NPZ_NAME)\n\nROOT_DIR = '/deepcell_data'\nLOG_DIR = os.path.join(ROOT_DIR, 'logs')\nMODEL_DIR = os.path.join(ROOT_DIR, 'models', EXP_NAME)\n\nDATA_DIR = os.path.join(ROOT_DIR, 'users/willgraf/mesmer_retrain')\nTIFF_PATH = os.path.join(DATA_DIR, 'tissue_net/seed{}'.format(SEED))\n\nTRAIN_DATA_FILE = os.path.join(DATA_DIR, '{}_train_512x512.npz'.format(NPZ_NAME))\nVAL_DATA_FILE = os.path.join(DATA_DIR, '{}_val_256x256.npz'.format(NPZ_NAME))\nTEST_DATA_FILE = os.path.join(DATA_DIR, '{}_test_256x256.npz'.format(NPZ_NAME))\n\nTEST_PRED_DATA_FILE = os.path.join(DATA_DIR, '{}_test_pred.npz'.format(NPZ_NAME))\n\n\ndef save_as_tiffs(npz_path, tiff_dir):\n data = np.load(npz_path, allow_pickle=True)\n X = data['X']\n y = data['y']\n\n assert X.shape[0] == y.shape[0], 'X and y should have the same number of images.'\n\n for i in range(X.shape[0]):\n img_filename = '{:04d}_img.tif'.format(i)\n mask_filename = '{:04d}_masks.tif'.format(i)\n\n tifffile.imsave(os.path.join(tiff_dir, img_filename), X[i])\n tifffile.imsave(os.path.join(tiff_dir, mask_filename), y[i])\n print('saved %s files to %s' % (len(X), tiff_dir))\n\n\nif __name__ == '__main__':\n data_files = [\n ('train', TRAIN_DATA_FILE),\n ('val', VAL_DATA_FILE),\n ('test', TEST_DATA_FILE),\n ]\n for prefix, data_file in data_files:\n f = os.path.join(DATA_DIR, data_file)\n subdir = os.path.join(TIFF_PATH, prefix)\n if not os.path.isdir(subdir):\n os.makedirs(subdir)\n save_as_tiffs(f, subdir)\n\n X_train = train_data['X']\n y_train = train_data['y']\n\n X_val = val_data['X']\n y_val = val_data['y']\n\n X_test = test_data['X']\n y_test = test_data['y']\n","repo_name":"vanvalenlab/publication-figures","sub_path":"2021-Greenwald_Miller_et_al-Mesmer/notebooks/training/Cellpose_training.py","file_name":"Cellpose_training.py","file_ext":"py","file_size_in_byte":2975,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"69"} +{"seq_id":"16923359305","text":"from BristolMatchingEngine import *\nfrom time import time\n\ntvec = TraderVector()\n\nfor i in range(100_000):\n tvec.append(Trader(True, False))\n \nfor i in range(100_000):\n tvec.append(Trader(False, True))\n\nx = LimitOrderBook()\n\nt0 = time()\n\nx.run_experiment(0, 1000, tvec)\n\nprint(time() - t0)\n\nprint(len(x.get_executed_transactions()))\n","repo_name":"gabedonnan/CPPLob","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"29079953629","text":"from ctypes import sizeof\nfrom operator import length_hint\nfrom PIL import Image\nimport PIL\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nskaidinysSize = 10\n\nprint(\"Iveskite 2 nuotraukų lokacijas\")\npath1 = input()\npath2 = input()\nim = Image.open(path1, 'r')\nwidth, height = im.size\npixel_values = list(im.getdata())\n\nim2 = Image.open(path2, 'r')\nwidth2, height2 = im2.size\npixel_values2 = list(im2.getdata())\n\n\ndef getBrightness(pixelValues):\n brightnessArray = []\n\n for x in pixelValues:\n rgbSum = x[0] + x[1] + x[2]\n brightness = rgbSum / 3\n brightnessArray.append(brightness)\n return brightnessArray\n\n\nbrightnessArray = getBrightness(pixel_values)\nbrightnessArray2 = getBrightness(pixel_values2)\n\n\ndef current_milli_time():\n return round(time.time() * 1000)\n\n\ndef showImage(array):\n image_array = np.array(array, dtype=np.uint8)\n img = PIL.Image.fromarray(image_array)\n\n current_time = str(current_milli_time())\n\n img.save(current_time+'.jpg')\n\n\ndef prepareForViewing(brightnessArray):\n l = []\n temp = []\n tempLine = []\n\n h = 0\n while h < height:\n w = 0\n tempLine = []\n while w < width:\n index = h * width + w\n value = round(brightnessArray[index])\n temp = [value, value, value]\n tempLine.append(temp)\n w += 1\n h += 1\n l.append(tempLine)\n\n showImage(l)\n\n\nprepareForViewing(brightnessArray)\nprepareForViewing(brightnessArray2)\n\n\ndef getSkaidinys(brightnessArray):\n l2 = []\n temp2 = []\n tempLine2 = []\n\n h = 0\n while h < height:\n w = 0\n tempLine2 = []\n while w < width:\n index = h * width + w\n\n count = 0\n sum = 0\n\n for i in range(0, skaidinysSize):\n for j in range(0, skaidinysSize):\n count += 1\n sum += brightnessArray[index+j+(width*i)]\n\n rounded =int(sum/count)\n\n temp2 = [rounded, rounded, rounded]\n tempLine2.append(temp2)\n w += skaidinysSize\n h += skaidinysSize\n l2.append(tempLine2)\n\n showImage(l2)\n return l2\n\n\nskaidinys = getSkaidinys(brightnessArray)\nskaidinys2 = getSkaidinys(brightnessArray2)\n\n\ndef findMax(array1, array2):\n maxDiff = 0\n maxDiffX = 0\n\n i = 0\n while i < len(array1):\n if (abs(array1[i]-array2[i]) > maxDiff):\n maxDiff = abs(array1[i]-array2[i])\n maxDiffX = i\n i += 1\n print(\"Maksimalus atsilenkimas tarp funkcijų reikšmių: \", maxDiff)\n if (maxDiffX == 0):\n x = 0\n y = 0\n else:\n x = maxDiffX % (width/skaidinysSize)\n y = maxDiffX//(width/skaidinysSize)\n\n print(\"X: \", x)\n print(\"Y: \", y)\n\n\ndef drawGraph(doubleArray, doubleArray2):\n x = list(range(1, len(doubleArray) * len(doubleArray[0])+1))\n y = []\n for w in doubleArray:\n for h in w:\n y.append(h[0])\n\n # plotting the points\n plt.plot(x, y, label=\"Pirma nuotrauka\")\n # plt.scatter(x, y, label= \"first\", color= \"green\",\n # marker= \".\", s=1)\n\n # line 2 points\n x2 = x\n y2 = []\n\n for w in doubleArray2:\n for h in w:\n y2.append(h[0])\n\n # plotting the line 2 points\n plt.plot(x2, y2, label=\"Antra nuotrauka\")\n # plt.scatter(x2, y2, label= \"second\", color= \"blue\",\n # marker= \".\", s=1)\n\n # naming the x axis\n plt.xlabel('x - skaidiniai')\n # naming the y axis\n plt.ylabel('y - ryškumas')\n # giving a title to my graph\n plt.title('Nuotraukų palyginimo grafikas')\n\n # show a legend on the plot\n plt.legend()\n\n # function to show the plot\n plt.show()\n\n findMax(y, y2)\n\n\n# pip install matplotlib\ndrawGraph(skaidinys, skaidinys2)\n","repo_name":"KerniusB/matanas","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"35636645654","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport glob\nimport time\nimport argparse\nimport yaml\nimport json\nimport subprocess\n\n\n# configuration to be used when no config file exists in the batch dir\nDEFAULT_CONFIG = {\n 'max_parallel_runs': 3,\n 'resume_failed_run': False,\n 'nextflow_config': '/home/ubuntu/testing/argo-alignment-test-run/nextflow.config',\n 'workflow_version': '1.5.5',\n 'profile': 'slurm_docker',\n 'reverse_order': False,\n 'remove_input_bam': False\n}\n\n\ndef get_config(batch_dir):\n # name of the config file: settings.conf, file format: YAML\n conf_file = os.path.join(batch_dir, 'settings.conf')\n\n if os.path.isfile(conf_file):\n with open(os.path.join(batch_dir, 'settings.conf')) as f:\n config = yaml.safe_load(f)\n else:\n config = DEFAULT_CONFIG\n\n return config\n\n\ndef cleanup(job_dir, config):\n if config.get('remove_input_bam'):\n job_file = os.path.join(job_dir, '%s.nf-job.json' % os.path.basename(job_dir))\n with open(job_file, 'r') as j:\n job = json.load(j)\n for bam in job['sequencing_files']:\n if os.path.exists(os.path.realpath(os.path.join(job_dir, bam))):\n print(\"remove input bam: %s\" % os.path.realpath(os.path.join(job_dir, bam)))\n os.remove(os.path.realpath(os.path.join(job_dir, bam)))\n\n\ndef get_job_summary(batch_dir, config=DEFAULT_CONFIG):\n # go through the job dirs\n job_summary = {\n 'new': [],\n 'completed': [],\n 'running': [],\n 'failed': []\n }\n\n job_dirs = sorted(glob.glob(os.path.join(batch_dir, '*')))\n if config.get('reverse_order'):\n job_dirs = sorted(job_dirs, reverse=True)\n\n for job_dir in job_dirs:\n if not os.path.isdir(job_dir): # skip if not dir\n continue\n\n job_file = os.path.join(job_dir, '%s.nf-job.json' % os.path.basename(job_dir))\n if not os.path.isfile(job_file): # skip if no job json\n continue\n\n trace_file = os.path.join(job_dir, 'trace.txt')\n stdout_file = os.path.join(job_dir, 'stdout')\n # the logic below for different job status can be improved\n if os.path.isfile(stdout_file):\n # if 'stdout' exists but 'trace.txt' does not, the job has already been launched\n # it's in running state, but just not scheduled by slurm or have not got the time\n # to generate the 'trace.txt' file\n if not os.path.isfile(trace_file):\n job_summary['running'].append({\n 'job_dir': job_dir\n })\n\n else: # now both trace.txt and stdout exist\n trace_lines = []\n with open(trace_file, 'r') as f:\n trace_lines = f.read().split('\\n')\n trace_lines = trace_lines[:-1] # remove the last line which is empty \n\n stdout_lines = []\n with open(stdout_file, 'r') as f:\n stdout_lines = f.read().split('\\n')\n\n completed_in_stdout = False\n for stdout_line in reversed(stdout_lines): # loop backwards\n if 'process > DnaAln:cleanup' in stdout_line and '[100%] 1 of 1' in stdout_line:\n completed_in_stdout = True\n\n # we are conservative to call a run is completed, so require confirmation\n # from both trace and stdout\n if 'DnaAln:cleanup' == trace_lines[-1].split('\\t')[3] and \\\n 'COMPLETED' == trace_lines[-1].split('\\t')[4] and \\\n completed_in_stdout:\n job_summary['completed'].append({\n 'job_dir': job_dir\n })\n\n # cleanup the input BAM to free more space\n cleanup(job_dir, config)\n\n else: # now either running or failed\n status = 'running' # assume running\n for trace_line in trace_lines:\n if trace_line.startswith('task_id'):\n continue\n\n cols = trace_line.split('\\t')\n if cols[4] == 'FAILED' or cols[4] == 'ABORTED': # treat the two same way\n status = 'failed'\n break\n\n job_summary[status].append({\n 'job_dir': job_dir\n })\n\n else: # no stdout file, it's new job\n job_summary['new'].append({\n 'job_dir': job_dir\n })\n\n return job_summary\n\n\ndef launch_job(job, config=DEFAULT_CONFIG, resume=False, launch=False):\n job_dir = job['job_dir']\n job_file = os.path.join(job_dir, '%s.nf-job.json' % os.path.basename(job_dir))\n if not os.path.isfile(job_file):\n raise Exception('Nextflow job JSON file not found under: %s' % job_dir)\n\n launch_command = 'cd %s && nextflow -C %s run icgc-argo/dna-seq-processing-wfs -r %s -params-file %s ' % \\\n (job_dir, config['nextflow_config'], config['workflow_version'], os.path.basename(job_file)) + \\\n '-profile %s -queue-size %s ' % (config['profile'], \"2\") + \\\n '-with-report -with-trace %s' % ('-resume ' if resume else '')\n\n launch_command += '2> stderr > stdout'\n\n if launch:\n time.sleep(8) # sleep 8 seconds to avoid launching runs too close to each other\n system_call = subprocess.Popen(launch_command, shell=True)\n print('Launched run: %s' % job_file, file=sys.stderr)\n else:\n print('Launch flag (-l) not set. Otherwise, would have launched a run with command: %s' % launch_command, file=sys.stderr)\n\n\ndef main(batch_dir=None, launch=False):\n config = get_config(batch_dir)\n\n job_summary = get_job_summary(batch_dir, config=config)\n # print(json.dumps(job_summary))\n print(\"Job status, new: %s, running: %s, completed: %s, failed: %s\" % (\n len(job_summary['new']), len(job_summary['running']), len(job_summary['completed']), len(job_summary['failed'])\n ))\n\n available_run_slots = config['max_parallel_runs'] - len(job_summary['running'])\n\n if available_run_slots > 0:\n if config['resume_failed_run']: # resume failed jobs first if resume set to true\n for job in job_summary['failed']:\n launch_job(job, config, resume=True, launch=launch)\n available_run_slots -= 1\n\n if available_run_slots == 0:\n break\n\n # still have run slots, then launch new jobs\n if available_run_slots > 0:\n for job in job_summary['new']:\n launch_job(job, config, resume=False, launch=launch)\n available_run_slots -= 1\n\n if available_run_slots == 0:\n break\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Nextflow run monitor and launcher')\n parser.add_argument('-d', dest='batch_dir', required=True, help='A directory containing all job folders in one batch')\n parser.add_argument('-l', dest='launch', action='store_true', help='Flag for actual launch, otherwise informational only')\n args = parser.parse_args()\n\n main(batch_dir=args.batch_dir, launch=args.launch)\n\n","repo_name":"icgc-argo-workflows/metadata-for-benchmarking-datasets","sub_path":"scripts/nf-launcher.py","file_name":"nf-launcher.py","file_ext":"py","file_size_in_byte":7318,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"20867811340","text":"import csv\nimport sqlite3\ndBase = \"database.db\"\nconn = sqlite3.connect(\"database.db\")\n'''\nresult1 = conn.execute(\n \"SELECT mov.Budget, mov.Nconst, history.Nconst, mov.User_Rating, mov.Tconst, history.Tconst, history.User_Rating from (SELECT M.User_Rating, H.Nconst, M.Tconst, M.Budget FROM MOVIE as M, DIRECTED_BY as H WHERE M.Tconst = H.Tconst and H.Nconst NOT LIKE \\\"\\\\N\\\") as mov JOIN (SELECT H.Nconst, M.Tconst, M.User_Rating FROM MOVIE as M, DIRECTED_BY as H WHERE M.Tconst = H.Tconst and H.Nconst NOT LIKE \\\"\\\\N\\\") as history on history.Nconst = mov.Nconst WHERE move.Budget IS NOT NULL ORDER BY mov.Nconst;\")\n\nwith open('question1.csv', 'w') as csvfile:\n write = csv.writer(csvfile, delimiter = ' ', quotechar = '|', quoting = csv.QUOTE_MINIMAL)\n for i in result1.fetchall():\n i = list(i)\n print(i)\n write.writerow(i)\n\nresult2 = conn.execute(\"SELECT M.Act_1_Likes, M.Act_2_Likes, M.Act_3_Likes, M.Face_number, M.Revenue FROM MOVIE as M WHERE Act_3_Likes and Act_2_Likes and Act_1_Likes and Revenue and Face_number is not null;\")\n\nwith open('question2.csv', 'w') as csvfile:\n write = csv.writer(csvfile, delimiter = ' ', quotechar = '|', quoting = csv.QUOTE_MINIMAL)\n for i in result2.fetchall():\n i = list(i)\n print(i)\n write.writerow(i)\n\nresult3 = conn.execute(\"SELECT M.User_Rating, M.Critic_Rating, M.Revenue FROM MOVIE as M WHERE User_Rating and Critic_Rating and Revenue is not null;\")\n\nwith open('question3.csv', 'w') as csvfile:\n write = csv.writer(csvfile, delimiter = ' ', quotechar = '|', quoting = csv.QUOTE_MINIMAL)\n for i in result3.fetchall():\n i = list(i)\n print(i)\n write.writerow(i)\n'''\nresult4 = conn.execute(\"SELECT M.Act_1_Likes, M.Act_2_Likes, M.Act_3_Likes, M.Director_Likes, M.Revenue, M.Budget FROM MOVIE as M WHERE Revenue and Act_1_Likes and Act_2_Likes and Act_3_Likes is NOT NULL ;\")\nwith open('question4.csv', 'w') as csvfile:\n write = csv.writer(csvfile, delimiter = ' ', quotechar = '|', quoting = csv.QUOTE_MINIMAL)\n for i in result4.fetchall():\n i = list(i)\n print(i)\n write.writerow(i)\n\nresult5 = conn.execute(\"SELECT epi.Rating, sea.SeasonR, epi.Econst, sea.Tconst from( SELECT E.Econst, S.Tconst as SeasonT, R.Avg_Rating as Rating FROM EPISODE as E, SEASON as S, HAS_EPISODE as H, RATINGS as R WHERE E.Econst = H.Econst and S.Tconst = H.Season_Tconst and E.Econst = R.Tconst ) as epi JOIN ( SELECT S.Tconst, R.Avg_Rating as SeasonR FROM SEASON as S, RATINGS as R WHERE S.Tconst = R.Tconst ) as sea on epi.SeasonT = sea.Tconst ORDER BY sea.Tconst;\")\n\nwith open('question5.csv', 'w') as csvfile:\n write = csv.writer(csvfile, delimiter = ' ', quotechar = '|', quoting = csv.QUOTE_MINIMAL)\n for i in result5.fetchall():\n i = list(i)\n print(i)\n write.writerow(i)\n\nconn.close()","repo_name":"ballcarsen/Database-science-Project","sub_path":"ProjectCode/Query.py","file_name":"Query.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"8336941246","text":"from cmd import Cmd\r\nimport os\r\nimport sys\r\nfrom class_kernel import *\r\n\r\nclass CommandParser:\r\n def __init__(self, class_style):\r\n print(class_style.green(\"[OK] \") + class_style.yellow(\"Command Parser Loaded\"))\r\n\r\n def parse(self, command):\r\n return command\r\n\r\n\r\nclass CommandLine(Cmd):\r\n def __init__(self,class_style,kernel):\r\n Cmd.__init__(self)\r\n self.doc_header = \"Documented commands (type help ):\"\r\n self.misc_header = \"Miscellaneous help topics:\"\r\n self.undoc_header = \"Undocumented commands:\"\r\n self.__Style=class_style\r\n self.__Kernel=kernel\r\n self.prompt = self.__Kernel.get_name() + \"@\" + self.__Kernel.get_env() + \"~# \"\r\n self.__Kernel.set_name(\"HELLO\")\r\n\r\n def do_change(self, line):\r\n print(\"HE\")\r\n def do_help(self, arg):\r\n 'List available commands with \"help\" or detailed help with \"help cmd\".'\r\n if arg:\r\n # XXX check arg syntax\r\n try:\r\n func = getattr(self, 'help_' + arg)\r\n except AttributeError:\r\n try:\r\n doc=getattr(self, 'do_' + arg).__doc__\r\n if doc:\r\n self.stdout.write(\"%s\\n\"%str(doc))\r\n return\r\n except AttributeError:\r\n pass\r\n self.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,)))\r\n return\r\n func()\r\n else:\r\n names = self.get_names()\r\n cmds_doc = []\r\n cmds_undoc = []\r\n help = {}\r\n for name in names:\r\n if name[:5] == 'help_':\r\n help[name[5:]]=1\r\n names.sort()\r\n # There can be duplicates if routines overridden\r\n prevname = ''\r\n for name in names:\r\n if name[:3] == 'do_':\r\n if name == prevname:\r\n continue\r\n prevname = name\r\n cmd=name[3:]\r\n if cmd in help:\r\n cmds_doc.append(cmd)\r\n del help[cmd]\r\n elif getattr(self, name).__doc__:\r\n cmds_doc.append(cmd)\r\n else:\r\n cmds_undoc.append(cmd)\r\n self.stdout.write(\"%s\\n\"%str(self.doc_leader))\r\n self.print_topics(self.__Style.red(self.doc_header), cmds_doc, 15,80)\r\n self.print_topics(self.misc_header, help.keys(),15,80)\r\n self.print_topics(self.undoc_header, cmds_undoc, 15,80)","repo_name":"xingboyu1/Xtoolkit","sub_path":"class_command_line.py","file_name":"class_command_line.py","file_ext":"py","file_size_in_byte":2619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"14366242624","text":"\nfrom rtiCUDA import rcarray\nfrom rtiCUDA import messageSender\nimport numpy as np\nimport time\nimport math\n\nk=3\n\nif __name__=='__main__':\n messageSender.connect(\"andrej\")\n start = time.perf_counter()\n ars1 = []\n ars2 = []\n for i in range(k):\n ar1 = rcarray.makeRcArray(\"int\",[k],1)\n ars1.append(ar1)\n ar2 = rcarray.makeRcArray(\"int\",[k],1)\n ars2.append(ar2)\n ar3 = rcarray.makeMatrix(\"int\",ars1,k)\n ar6 = rcarray.makeMatrix(\"int\",ars2,k)\n #print(ar3)\n #print(ar6)\n start = time.perf_counter()\n ar7 = rcarray.dot(ar3,ar6)\n p = rcarray.sum(ar7)\n end = time.perf_counter()\n print(p)\n print(end-start)\n\n np1 = np.ones((k,k))\n np2 = np.ones((k,k))\n #print(ar7)\n start = time.perf_counter()\n\n np3 = np.matmul(np1, np2)\n s=np.sum(np3)\n end = time.perf_counter()\n print(s)\n print(end-start)\n\n messageSender.disconnect()","repo_name":"andrejjakovljevic/Bachelor-s","sub_path":"rtiCUDA/tests/help_test.py","file_name":"help_test.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"37027184726","text":"import random\nclass Product:\n \n def __init__(self, c, n, s, p, m, e, ):\n self.code=int(c)\n self.name=str(n)\n self.stock=int(s)\n self.price=int(p)\n self.manufac=int(m)\n self.emu=int(e)\n \n\n def display(self):\n print(\"******Programing Principles Sample Stock Statement*****\")\n print(\"Product Code: \", self.code)\n print(\"Product Name: \", self.name)\n print(\"Sale Price: \", self.price)\n print(\"Manufacture Cost: \", self.manufac)\n print(\"Monthly Production: \", self.emu, \"(Approx.)\")\n\n #def months(self):\n \n #intr = random.randint(-10,10)\n \n #print(\"Month 1: \")\n #print(\"- Manufactured: \", self.emu)\n #print(\"- Sold: \", self.emu + )\n #print(\"- Stock: \", self.emu + )\n \n\nprint(\"Welcome to Programming Principles Sample Product Inventory\")\nprod_instance = Product(input(\"Please enter the Product Code: \"), input(\"Please enter the Product Name: \"), \n input(\"Please enter the Current Stock: \"), input(\"Please enter the Product Sale Price: \"),\n input(\"Please enter the Product Manufacture Cost: \"), (input(\"Please enter estimated monthly production: \")))\n\nprod_instance.display()\n\nprod_instance.months()\n","repo_name":"Julian-Barbachano/2023-Assignment--2","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"1446094611","text":"import sys\nfrom PyQt5.QtGui import QPixmap,QPalette\nfrom PyQt5 import QtCore,QtWidgets\nfrom PyQt5.QtWidgets import QWidget,QInputDialog,QMainWindow,QDialog,QLabel,QLineEdit,QGridLayout, QToolTip,QPushButton, QApplication\nfrom jumps.Jump_Tunnel import Jump_Tunnel\nfrom PyQt5.QtCore import Qt\nimport click\nimport time\nimport logging\nimport threading\n\njumphost = '117.48.195.186'\njumpport = 2222\njumpuser = 'dm'\njumppwd = 'Vts^pztbvE339@Rw'\ntunnelhost = '172.16.16.32'\ntunnelappport = 10000\nlocalhost = '127.0.0.1'\nlocalbindport = 4800\ndaemonsecond=2000\nlogger = logging.getLogger('ssh-jump-hive-gui')\nclass JumpTunnel(QWidget):\n\n def __init__(self):\n super().__init__()\n self.my_UI(True)\n def my_UI(self,test=False):\n jhLabel=QLabel(\"JumpHost:\")\n jpLabel=QLabel(\"JumpPort:\")\n juLable=QLabel(\"JumpUser:\")\n jpwdLabel=QLabel(\"JumpPwd:\")\n thLabel=QLabel(\"TunnelHost:\")\n tpLabel=QLabel(\"TunnelPort:\")\n lhLabel=QLabel(\"LocalHost:\")\n lpLabel=QLabel(\"LocalPort:\")\n gitLabel = QLabel(\"GithubRepo:\")\n dtLabel=QLabel(\"DaemonSecond:\")\n if test==True:\n self.jumpHost=QLineEdit(\"117.48.195.186\")\n self.jumpPwd=QLineEdit(\"Vts^pztbvE339@Rw\")\n self.tunnelHost=QLineEdit(\"172.16.16.32\")\n else:\n self.jumpHost = QLineEdit()\n self.jumpPwd = QLineEdit()\n self.tunnelHost = QLineEdit()\n self.jumpPort = QLineEdit(\"2222\")\n self.jumpUser = QLineEdit(\"dm\")\n self.tunnelPort=QLineEdit(\"10000\")\n self.localHost=QLineEdit(\"127.0.0.1\")\n self.localPort=QLineEdit(\"3560\")\n self.daemonSecond=QLineEdit(\"21600\")\n github=QLineEdit(\"https://github.com/mullerhai/sshjumphive\")\n self.btnConn = QPushButton(\"Trun ON\", self)\n self.btnClose=QPushButton(\"Trun Off\",self)\n self.grid=QGridLayout()\n self.grid.setSpacing(10)\n self.grid.addWidget(jhLabel,2,0)\n self.grid.addWidget(self.jumpHost,2,1)\n self.grid.addWidget(jpLabel,2,2)\n self.grid.addWidget(self.jumpPort,2,3)\n self.grid.addWidget(juLable,3,0)\n self.grid.addWidget(self.jumpUser,3,1)\n self.grid.addWidget(jpwdLabel,3,2)\n self.grid.addWidget(self.jumpPwd,3,3)\n self.grid.addWidget(thLabel,5,0)\n self.grid.addWidget(self.tunnelHost,5,1)\n self.grid.addWidget(tpLabel,5,2)\n self.grid.addWidget(self.tunnelPort,5,3)\n self.grid.addWidget(lhLabel,7,0)\n self.grid.addWidget(self.localHost,7,1)\n self.grid.addWidget(lpLabel,7,2)\n self.grid.addWidget(self.localPort,7,3)\n self.grid.addWidget(gitLabel,8,0)\n self.grid.addWidget(github,8,1)\n self.grid.addWidget(dtLabel,8,2)\n self.grid.addWidget(self.daemonSecond,8,3)\n self.grid.addWidget(self.btnConn,9,0)\n self.grid.addWidget(self.btnClose,9,3)\n pixmap = QPixmap(\"../img/guilogo.jpg\")\n pixmap=pixmap.scaledToHeight(80)\n pixmap=pixmap.scaledToWidth(180)\n lbl = QLabel(self)\n lbl.setFixedHeight(80)\n lbl.setFixedWidth(180)\n lbl.setPixmap(pixmap)\n self.grid.addWidget(lbl,10,1)\n pixfox = QPixmap(\"../img/tunnel.jpg\")\n pixfox=pixfox.scaledToHeight(90)\n pixfox=pixfox.scaledToWidth(90)\n lblfox = QLabel(self)\n lblfox.setFixedHeight(90)\n lblfox.setFixedWidth(90)\n lblfox.setPixmap(pixfox)\n self.grid.addWidget(lblfox,10,2)\n self.btnConn.clicked.connect(self.buttonClicked)\n self.btnClose.clicked.connect(self.btnCloseSession)\n self.setLayout(self.grid)\n self.setWindowTitle('SSH-Jump-Hive')\n self.setGeometry(300, 300, 490, 450)\n self.show()\n\n def btnCloseSession(self):\n # text, ok = QInputDialog.getText(self, 'Turn Off',\n # 'Please Input 1 then Trun off tunnel :')\n # logging.warn(msg=\"Will kill recently ssh tunnle process\")\n # if ok and text=='1':\n try:\n self.jump_tunnel.client.close()\n logging.info(msg=\"ssh_tunnel turn off successfully\")\n sucTLabel = QLabel(\"turn off Success\")\n self.grid.addWidget(sucTLabel, 9, 1)\n # text, ok = QInputDialog.getText(self, 'Success',\n # 'ssh_tunnel turn off successfully close dialog ok')\n except:\n failedTLabel = QLabel(\"turn off be Failed\")\n self.grid.addWidget(failedTLabel, 9, 2)\n # text, ok = QInputDialog.getText(self, 'Failed',\n # 'ssh_tunnel turn off failed check the config')\n logging.error(msg=\"ssh_tunnel turn off failed,please try again\")\n # else:\n # failedTLabel = QLabel(\"turn off Failed\")\n # self.grid.addWidget(failedTLabel, 9, 2)\n # text, ok = QInputDialog.getText(self, 'Failed',\n # 'ssh_tunnel turn off failed check the config')\n def buttonClicked(self): # 在buttonClikced()方法中,我们调用sender()方法来判断哪一个按钮是我们按下的\n jumphost=self.jumpHost.text().strip()\n jumpuser=self.jumpUser.text().strip()\n jumppwd=self.jumpPwd.text().strip()\n tunnelhost=self.tunnelHost.text().strip()\n localhost=self.localHost.text().strip()\n\n logging.info(msg=self.jumpHost.text()+\"%%\"+self.jumpUser.text()+\"%%\"+self.jumpPwd.text())\n try:\n jumpport = (int(self.jumpPort.text().strip()) if self.jumpPort.text().strip() != None else 2222)\n tunnelappport = (int(self.tunnelPort.text().strip()) if self.tunnelPort.text().strip() != None else 10000)\n localbindport = (int(self.localPort.text().strip()) if self.localPort.text().strip() != None else 4320)\n daemonsecond = (int(self.daemonSecond.text().strip()) if self.daemonSecond.text().strip() != None else 21600)\n\n self.jump_tunnel=Jump_Tunnel(jumphost,jumpport,jumpuser,jumppwd,tunnelhost,tunnelappport,localhost,localbindport)\n tunnel_conn=self.jump_tunnel.jump_con_tunnel()\n\n with tunnel_conn:\n #time.sleep(0.1)\n logging.info(msg=\"启动成功\")\n sucLabel = QLabel(\"Connect Success\")\n self.grid.addWidget(sucLabel, 9, 2)\n # pe = QPalette()\n # pe.setColor(QPalette.WindowText, Qt.red)\n #sucLabel.setAutoFillBackground(pe)\n\n text, ok = QInputDialog.getText(self, 'Success',\n 'connect ssh tunnel successfully close dialog ok')\n time.sleep(daemonsecond)\n except:\n logging.info(msg=\"启动失败\")\n failedLabel=QLabel(\"Connect Failed\")\n self.grid.addWidget(sucLabel, 9, 2)\n # text, ok = QInputDialog.getText(self, 'Failed',\n # 'connect ssh tunnel failed check the config')\n\n #sender = self.sender()\n # self.showMessage(sender.text() + ' 是发送者')\n\ndef main():\n app = QApplication(sys.argv)\n jtGui = JumpTunnel()\n sys.exit(app.exec_())\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = JumpTunnel()\n sys.exit(app.exec_())\n","repo_name":"mullerhai/sshjumphive","sub_path":"jumps/jump_gui.py","file_name":"jump_gui.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"69"} +{"seq_id":"7392353128","text":"states = [\n {\n \"state\":\"New Jersey\",\n \"capital\": \"Trenton\",\n \"continent\": \"North America\"\n },\n {\n \"state\":\"New York\",\n \"capital\": \"Albany\",\n \"continent\": \"North America\"\n },\n {\n \"state\":\"Pennsylvania\",\n \"capital\": \"Harrisburg\",\n \"continent\": \"North America\"\n },\n {\n \"state\":\"Wisconsin\",\n \"capital\": \"Madison\",\n \"continent\": \"North America\"\n }\n]\nprint(states[-1][\"capital\"])\n\n\ncountries = [\n\t{\n\t\t'country': 'South Africa',\n\t\t'capital': 'Pretoria',\n\t\t'continent': 'Africa'\n\t},\n\t{\n\t\t'country': 'USA',\n\t\t'capital': 'Washington DC',\n\t\t'continent': 'North America'\n\t},\n\t{\n\t\t'country': 'Panama',\n\t\t'capital': 'Panama City',\n\t\t'continent': 'North America'\n\t},\n\t{\n\t\t'country': 'Israel',\n\t\t'capital': 'Jerusalem',\n\t\t'continent': 'Asia'\n\t},\n\t{\n\t\t'country': 'Palestine',\n\t\t'capital': 'Al Quds',\n\t\t'continent': 'Asia'\n\t}\n]\n\nnumber_of_continents = set()\nfor country in countries:\n number_of_continents.add((country[\"continent\"]))\n\nprint(f\"Number of continents is: {len(number_of_continents)}\")\n\n\n\n# sampleDict = { \n# \"class\":{ \n# \"student\":{ \n# \"name\":\"Mike\",\n# \"marks\":{ \n# \"physics\":70,\n# \"history\":80\n# }\n# }\n# }\n# }\n\nsampleDict = {\n \"name\": \"Kelly\",\n \"age\":25,\n \"salary\": 8000,\n \"city\": \"New york\"\n\n}\nkeysToRemove = [\"name\", \"salary\"]\nfor key in keysToRemove:\n del sampleDict[key]\nprint(sampleDict)\n\n\n\n# Hashtable\nnames = [\"jon\", \"jackie\", \"gabi\", \"dennis\", \"kobe\"]\nphonebook = {}\nfor name in names:\n key = name[0].upper()\n if key not in phonebook:\n phonebook[key] = [name]\n else:\n phonebook[key].append(name)\nprint(phonebook)\n\n# letter_lookup = {name[0].upper() : name for name in names}\n# print(letter_lookup)","repo_name":"jacprez/developers.institute","sub_path":"Week 4/Day 3/Class Exercises/class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"29162001321","text":"import glob\nimport os\nimport random\nfrom abc import ABC, abstractmethod\n\nimport cv2\nimport gymnasium\nimport numpy as np\nimport pybullet as p\nimport pybullet_data as pd\nfrom agent import Kuka\nfrom gymnasium import spaces\nfrom gymnasium.utils import seeding\n\n\nclass KukaGraspEnvFramework(\n gymnasium.Env,\n ABC,\n):\n \"\"\"Kuka robotic arm grasp envs' framework.\"\"\"\n\n def __init__(\n self,\n render=True,\n is_test=False,\n block_random=0.2,\n dv=0.1,\n max_step=10,\n camera_random=0,\n width=128,\n height=128,\n show_image=False,\n use_depth_image=False,\n ):\n \"\"\"Initializes the KukaDiverseObjectEnv.\n\n Args:\n renders: If true, render the bullet GUI.\n is_test: If true, use the test set of objects. If false, use the train\n set of objects.\n block_random: A float between 0 and 1 indicated block randomness. 0 is\n deterministic.\n dv: The velocity along each dimension for each action.\n max_step: The maximum number of actions per episode.\n camera_random: A float between 0 and 1 indicating camera placement\n randomness. 0 is deterministic.\n width: The image width.\n height: The observation image height.\n num_objects: The number of objects in the bin.\n show_image:\n use_depth_image:\n\n \"\"\"\n super(KukaGraspEnvFramework, self).__init__()\n self.urdf_root = pd.getDataPath() # << pybullet自带的urdf文件路径\n self.time_step = 1.0 / 240 # << 每一步的仿真时间\n self.env_step = 0\n self.is_test = is_test\n self.max_force = 500\n self.max_velocity = 0.25\n self.block_random = block_random\n self.dv = dv\n self.camera_random = camera_random\n self.width = width\n self.height = height\n self.vision_servo = False\n self.use_depth_image = use_depth_image\n self.max_step = max_step\n self.show_image = show_image\n # several parameters\n self.action_apply_time = 500\n self.successful_grasp_times = 0 # << 抓取成功次数\n self.total_grasp_times = 0 # << 尝试抓取次数\n\n # connect the physics engine\n if render:\n self.cid = p.connect(p.SHARED_MEMORY)\n if self.cid < 0:\n self.cid = p.connect(p.GUI)\n # set God view 上帝视角\n p.resetDebugVisualizerCamera(\n cameraDistance=1.3,\n cameraYaw=180,\n cameraPitch=-41,\n cameraTargetPosition=[0.52, -0.2, -0.33],\n )\n else:\n self.cid = p.connect(p.DIRECT)\n self.seed()\n ########################################################################\n # observation spaces\n ########################################################################\n if self.use_depth_image:\n pass\n else:\n self.observation_space = spaces.Box(\n low=0, high=1, shape=(3, self.height, self.width), dtype=np.float32\n )\n\n @abstractmethod\n def reset(self):\n \"\"\"please implement in subclass\"\"\"\n\n def env_reset(self):\n ########################################################################\n # set the environment of pybullet\n ########################################################################\n p.resetSimulation()\n p.setPhysicsEngineParameter(numSolverIterations=150) # 求解迭代器的次数\n p.setTimeStep(self.time_step) # 时间步长\n # load objects\n p.loadURDF(os.path.join(self.urdf_root, \"plane.urdf\"), [0, 0, -1])\n p.loadURDF(\n os.path.join(self.urdf_root, \"table/table.urdf\"),\n 0.5000000,\n 0.00000,\n -0.820000,\n 0.000000,\n 0.000000,\n 0.0,\n 1.0,\n )\n p.setGravity(0, 0, -9.81)\n ########################################################################\n # load block\n ########################################################################\n self.tray_uid = p.loadURDF(\n os.path.join(self.urdf_root, \"tray/tray.urdf\"),\n 0.640000,\n 0.075000,\n -0.190000,\n 0.000000,\n 0.000000,\n 1.000000,\n 0.000000,\n )\n ########################################################################\n # load kuka\n ########################################################################\n self.kuka = Kuka(time_step=self.time_step)\n ########################################################################\n # set camera\n ########################################################################\n # TODO(ecstayalive@163.com): optimize the camera locate position\n target_position = [0.23, 0.2, 0.54]\n distance = 0.5\n pitch = -56 + self.camera_random * np.random.uniform(-3, 3)\n yaw = 245 + self.camera_random * np.random.uniform(-3, 3)\n roll = 0\n self.view_mat = p.computeViewMatrixFromYawPitchRoll(\n target_position, distance, yaw, pitch, roll, 2\n )\n fov = 20.0 + self.camera_random * np.random.uniform(-2, 2)\n aspect = self.width / self.height\n near = 0.01\n far = 10\n self.proj_mat = p.computeProjectionMatrixFOV(fov, aspect, near, far)\n\n ########################################################################\n # set field of view size\n ########################################################################\n fov = 60\n aspect = self.width / self.height\n near = 0.1\n far = 100\n self.proj_mat = p.computeProjectionMatrixFOV(fov, aspect, near, far)\n ########################################################################\n # set parameters\n ########################################################################\n # 仿真步数\n self.env_step = 0\n ########################################################################\n # Choose the objects in the bin\n ########################################################################\n self.num_objects = np.random.randint(1, 6)\n urdf_list = self.get_random_objects(self.num_objects, self.is_test)\n self.object_uids = self.place_objects_randomly(urdf_list)\n self.observation = self.get_observation()\n return self.observation\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n @abstractmethod\n def step(self, action):\n \"\"\"please implement in subclass\"\"\"\n\n @abstractmethod\n def reward(self):\n \"\"\"Reward function\n 通过改变奖励函数改变机器人表现\n 目前是抓取成功奖励为1,其余为0\n\n Returns:\n reward\n\n \"\"\"\n\n \"\"\"please implement in subclass\"\"\"\n\n def place_objects_randomly(self, urdf_list):\n \"\"\"Place objects randomly\"\"\"\n # Randomize positions of each object urdf.\n object_uids = []\n for urdf_name in urdf_list:\n xpos = 0.4 + self.block_random * random.random()\n ypos = self.block_random * (random.random() - 0.5)\n angle = np.pi / 2 + self.block_random * np.pi * random.random()\n orn = p.getQuaternionFromEuler([0, 0, angle])\n urdf_path = os.path.join(self.urdf_root, urdf_name)\n uid = p.loadURDF(\n urdf_path, [xpos, ypos, 0.15], [orn[0], orn[1], orn[2], orn[3]]\n )\n object_uids.append(uid)\n # Let each object fall to the tray individual, to prevent object\n # intersection.\n for _ in range(500):\n p.stepSimulation()\n return object_uids\n\n def get_observation(self):\n \"\"\"获取当前步的相机图像\"\"\"\n # View state\n (_, _, px, dx, _) = p.getCameraImage(\n width=self.width,\n height=self.height,\n viewMatrix=self.view_mat,\n projectionMatrix=self.proj_mat,\n )\n self.rgb_image = np.array(px, dtype=np.uint8)[:, :, :3][:, :, ::-1]\n # self.rgb_image = cv2.cvtColor(self.rgb_image, cv2.COLOR_BGR2GRAY)\n if self.show_image:\n img = self.rgb_image.copy()\n cv2.imshow(\"observation\", img)\n cv2.waitKey(1)\n self.rbg_image = np.array(self.rgb_image / 255.0, dtype=np.float32)\n return self.rbg_image.transpose(2, 0, 1)\n\n def get_random_objects(self, num_objects, test):\n \"\"\"Randomly choose an object urdf from the random_urdfs directory.\"\"\"\n if test:\n urdf_pattern = os.path.join(self.urdf_root, \"random_urdfs/*0/*.urdf\")\n else:\n urdf_pattern = os.path.join(self.urdf_root, \"random_urdfs/*[1-9]/*.urdf\")\n found_object_directories = glob.glob(urdf_pattern)\n total_num_objects = len(found_object_directories)\n selected_objects = np.random.choice(np.arange(total_num_objects), num_objects)\n return [\n found_object_directories[object_index] for object_index in selected_objects\n ]\n\n @abstractmethod\n def terminate(self):\n \"\"\"Terminating function\n 终止函数,用于终止程序\n\n \"\"\"\n\n \"\"\"please implement in subclass\"\"\"\n\n def close(self):\n \"\"\"Close simulation environment\"\"\"\n p.disconnect()\n","repo_name":"ecstayalive/Thunder","sub_path":"envs/kuka_grasp_env/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":9609,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"69"} +{"seq_id":"2031909429","text":"number = int(input('\\033[1;31mDigite o primeiro valor: \\033[0;0m'))\nconclusão = 0\nfor primo in range(1, number + 1):\n if number % primo == 0:\n print('\\033[1;33m{}\\033[0;0m'.format(primo), end= ' ')\n conclusão += 1\n else:\n print('\\033[1;34m{}\\033[0;0m'.format(primo), end= ' ')\nprint('\\n\\033[1;31mO número {} é divisível {} vezes'.format(number, conclusão))\nif conclusão == 2:\n print('Então ele é PRIMO')\nelse:\n print('Então ele não é PRIMO\\033[0;0m')\n","repo_name":"Nero1Dev/ExerciciosPython","sub_path":"ex052.py","file_name":"ex052.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"12114914493","text":"#!/usr/bin/env python3\n\nfrom pathlib import Path\nimport pdb\nfrom mseg.dataset_apis.SunrgbdImageLevelDataset import SunrgbdImageLevelDataset\n\n_TEST_DIR = Path(__file__).resolve().parent\n\n\ndef test_constructor() -> None:\n \"\"\" \"\"\"\n dataroot = f\"{_TEST_DIR}/test_data/SUNRGBD_test_data\"\n bddild = SunrgbdImageLevelDataset(dataroot)\n\n\ndef test_get_img_pair() -> None:\n \"\"\" \"\"\"\n dataroot = f\"{_TEST_DIR}/test_data/SUNRGBD_test_data\"\n bddild = SunrgbdImageLevelDataset(dataroot)\n\n split = \"train\"\n fname_stem = \"img-000001\"\n rgb_img, label_img = bddild.get_img_pair(fname_stem, split)\n assert rgb_img.mean() - 134.806 < 1e-3\n assert label_img.mean() - 16.788 < 1e-3\n\n split = \"test\"\n fname_stem = \"img-000001\"\n rgb_img, label_img = bddild.get_img_pair(fname_stem, split)\n assert rgb_img.mean() - 125.300 < 1e-3\n assert label_img.mean() - 47.588 < 1e-3\n\n\ndef test_get_segment_mask() -> None:\n \"\"\" \"\"\"\n dataroot = f\"{_TEST_DIR}/test_data/SUNRGBD_test_data\"\n bddild = SunrgbdImageLevelDataset(dataroot)\n seq_id = \"\"\n query_segmentid = 21 # ceiling\n fname_stem = \"img-000001\"\n split = \"test\"\n\n class_mask = bddild.get_segment_mask(seq_id, query_segmentid, fname_stem, split)\n assert class_mask.sum() == 37819\n assert class_mask.mean() - 0.098 < 1e-3\n assert class_mask.size == 386900\n\n\nif __name__ == \"__main__\":\n # pass\n # test_constructor()\n # test_get_img_pair()\n test_get_segment_mask()\n","repo_name":"mseg-dataset/mseg-api","sub_path":"tests/test_SunrgbdImageLevelDataset.py","file_name":"test_SunrgbdImageLevelDataset.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":239,"dataset":"github-code","pt":"69"} +{"seq_id":"70039357340","text":"import os\nimport re\nimport time\nfrom html import escape\nfrom urllib.parse import quote_plus as urlquote\nimport sqlite3 as sql\nfrom hashlib import sha256\nimport asyncio\nimport mimetypes\nfrom aiohttp import web, ClientSession\nfrom kenny2automate.utils import DummyCtx\nfrom kenny2automate.i18n import LANG, i18n\n\nDISCORD_API = 'https://discordapp.com/api/v6'\nLANG = {i: i18n(i, 'qqq') for i in LANG}\nONE_YEAR = 31557600\nGLOBAL_GAMES = [\n 'Go Fish', 'Connect 4',\n 'Fight', 'Boggle', 'Uno',\n 'Blackjack', 'Set', 'Chess',\n '007', 'Big Two'\n]\n\nclass Handler:\n dtx = DummyCtx(author=DummyCtx(name='(server)'))\n\n def __init__(\n self, bot, db, logger, prefix,\n client_id, client_secret, web_root,\n document_root=os.path.abspath(os.path.dirname(__file__))\n ):\n self.bot = bot\n self.db = db\n self.logger = logger\n self.prefix = prefix\n self.sessions = {}\n self.client_id = client_id\n self.client_secret = client_secret\n self.web_root = web_root\n self.root = document_root\n self.app = web.Application()\n self.app.add_routes([\n web.get('/', self.index),\n web.get('/login', self.login),\n web.get('/settings', self.settings),\n web.post('/settings', self.save_settings),\n web.get('/servers', self.servers),\n web.get(r'/servers/{server:\\d+}', self.server),\n web.post(r'/servers/{server:\\d+}', self.save_server),\n web.get(r'/{name:.+(? ONE_YEAR\n ):\n sess = {\n 'logged_in': None,\n 'last_use': time.time(),\n 'state': str(time.time())\n }\n if resp is not None:\n resp.set_cookie('session', sesh, max_age=ONE_YEAR)\n resp.set_cookie('state', sess['state'], max_age=ONE_YEAR)\n self.setsesh(sesh, sess)\n return sesh\n if sess['logged_in'] is not None:\n if time.time() > sess['logged_in'] + sess['expires_in']:\n data = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'grant_type': 'refresh_token',\n 'refresh_token': sess['refresh_token'],\n 'redirect_uri': self.web_root + '/login',\n 'scope': 'identify guilds'\n }\n async with self.sessions[sesh].post(\n DISCORD_API + '/oauth2/token',\n data=data,\n headers={'Content-Type':'application/x-www-form-urlencoded'}\n ) as r:\n body = await r.json()\n body['logged_in'] = time.time()\n sess.update(body)\n await self.sessions[sesh].close()\n self.sessions[sesh] = ClientSession(headers={\n 'Authorization': '{} {}'.format(\n sess['token_type'], sess['access_token']\n )\n })\n sess['last_use'] = time.time()\n self.setsesh(sesh, sess)\n return None\n\n def getsesh(self, request):\n if not isinstance(request, str):\n request = request.cookies.get('session', None)\n if request is None:\n return {}\n return (self.db.execute(\n 'SELECT session FROM server_sessions WHERE session_id=?',\n (request,)\n ).fetchone() or ([{}],))[0][0]\n\n def setsesh(self, request, sesh):\n if not isinstance(request, str):\n request = request.cookies.get('session', None)\n if request is None:\n return\n if not self.getsesh(request):\n self.db.execute(\n 'INSERT INTO server_sessions VALUES (?, ?)',\n (request, [sesh])\n )\n self.sessions[request] = ClientSession()\n else:\n self.db.execute(\n 'UPDATE server_sessions SET session=? WHERE session_id=?',\n ([sesh], request)\n )\n\n def checkuser(self, user_id):\n res = self.db.execute(\n 'SELECT user_id FROM users WHERE user_id=?',\n (user_id,)\n ).fetchone()\n if res is None:\n self.db.execute(\n 'INSERT INTO users (user_id) VALUES (?)',\n (user_id,)\n )\n\n def logged_in(self, request):\n return self.getsesh(request).get('logged_in', None) is not None\n\n def notfound(self, *_):\n raise web.HTTPNotFound(\n text=self.letext('404.html'),\n content_type='text/html'\n )\n\n async def elg(self, request):\n if (await self.checksesh(request)) is not None:\n if request.method != 'GET':\n raise web.HTTPSeeOther(str(request.path))\n self.notfound()\n if not self.logged_in(request):\n self.notfound()\n\n def lang(self, request):\n if not self.logged_in(request):\n available = set(LANG.keys())\n preferred = (j[0] for j in sorted((\n (i.group(1), float(i.group(2) or '1'))\n for i in re.finditer(\n r'(?{}'.format(\n i, ' selected' if i == lang else '', j\n ) for i, j in LANG.items())\n options = ''.format(\n ' selected' if lang is None else '',\n i18n(lang or 'en', 'server/lang-auto')\n ) + options\n ping_th = ''.join(\n '{}'.format(i)\n for i in GLOBAL_GAMES\n )\n ping_th = '{}\\n'.format(\n len(GLOBAL_GAMES),\n i18n(lang or 'en', 'server/ping-message')\n ) + ping_th + ''\n ping_options = '\\n'.join(\n \"\"\" \"\"\".format(g, 'checked ' if g in games else '')\n for g in GLOBAL_GAMES\n )\n return web.Response(\n text=self.letext(\n 'settings.html',\n i18n(lang or 'en', 'server/settings;h1')\n ).format(\n escape(prefix),\n options,\n ping_th,\n ping_options,\n h1=i18n(lang or 'en', 'server/settings;h1'),\n prefix=i18n(lang or 'en', 'server/settings;prefix'),\n lang=i18n(lang or 'en', 'server/settings;lang'),\n save=i18n(lang or 'en', 'server/server;save'),\n back=i18n(lang or 'en', 'server/server;back'),\n ),\n content_type='text/html'\n )\n\n async def save_settings(self, request):\n await self.elg(request)\n data = await request.post()\n for k in ('prefix', 'lang', 'ping'):\n if k not in data:\n self.notfound()\n user_id = self.getsesh(request)['client']['id']\n self.checkuser(user_id)\n with self.db.connection:\n self.db.execute(\n 'UPDATE users SET prefix=?, lang=?, games_ping=? WHERE user_id=?',\n (\n data['prefix'] if data['prefix'].strip() else None,\n data['lang'].strip() or None,\n '|'.join(data.getall('ping')),\n user_id\n )\n )\n raise web.HTTPSeeOther(str(request.path))\n\n async def servers(self, request):\n await self.elg(request)\n sess = self.getsesh(request)\n lan = self.lang(request)\n guilds = tuple(filter(\n lambda i: (\n i and i.get_member(\n int(sess['client']['id'])\n ).guild_permissions.administrator\n ), (\n self.bot.get_guild(int(i['id']))\n for i in sess['servers']\n )\n ))\n options = ''.join(\"\"\"\n\n \n\n\"\"\".strip().format(\n str(request.path), i.id, escape(i.name), i.icon_url_as(format='png', size=64)\n ) for i in guilds)\n return web.Response(\n text=self.letext(\n 'servers.html',\n i18n(lan, 'server/servers;h1')\n ).format(\n options,\n h1=i18n(lan, 'server/servers;h1'),\n div=i18n(lan, 'server/servers;div'),\n back=i18n(lan, 'server/server;back'),\n ),\n content_type='text/html'\n )\n\n async def server(self, request):\n await self.elg(request)\n guild = self.bot.get_guild(int(request.match_info.get('server', '0')))\n if guild is None:\n self.notfound()\n if not guild.get_member(\n int(self.getsesh(request)['client']['id'])\n ).guild_permissions.administrator:\n self.notfound()\n lan = self.lang(request)\n options = \"\"\"\n \n {}\n {}\n {}\n \n {}\n \"\"\".format(\n i18n(lan, 'server/server;channel'),\n i18n(lan, 'server/server;language'),\n len(GLOBAL_GAMES),\n i18n(lan, 'server/ping-message'),\n '\\n'.join('{}'.format(i) for i in GLOBAL_GAMES),\n )\n non = i18n(lan, 'server/lang-none')\n for i in guild.text_channels:\n lang = self.db.execute(\n 'SELECT lang, games_ping FROM channels WHERE channel_id=?',\n (i.id,)\n ).fetchone()\n if lang is None:\n self.db.execute(\n 'INSERT INTO channels (channel_id) VALUES (?)',\n (i.id,)\n )\n lang, games = lang, []\n else:\n lang, games = lang\n games = (games or '').split('|')\n lang_options = '\\n'.join(''.format(\n a, ' selected' if a == lang else '', b\n ) for a, b in LANG.items())\n lang_options = '\\n'.format(\n ' selected' if lang is None else '', non\n ) + lang_options\n ping_options = '\\n'.join(\n \"\"\" \"\"\".format(i.id, g, 'checked ' if g in games else '')\n for g in GLOBAL_GAMES\n )\n options += \"\"\"\n \n
# {0}
\n \n {3}\n \"\"\".format(\n i.name, i.id, lang_options, ping_options\n )\n res = self.db.execute(\n 'SELECT guild_disabled_commands, guild_disabled_cogs, words_censor \\\nFROM guilds WHERE guild_id=?',\n (guild.id,)\n ).fetchone()\n if res is None:\n cmds, cogs, censor = [], [], ''\n self.db.execute(\n 'INSERT INTO guilds (guild_id) VALUES (?)',\n (guild.id,)\n )\n else:\n cmds, cogs, censor = res\n cmds = (cmds or '').split(',')\n cogs = (cogs or '').split(',')\n censor = censor or ''\n dcmds = ''\n def recurse_commands(thing):\n nonlocal dcmds\n if hasattr(thing, 'commands'):\n for cmd in thing.commands:\n hide = False\n parent = cmd.parent\n while parent:\n if parent.qualified_name in cmds:\n hide = True\n break\n parent = parent.parent\n dcmds += \"\"\"\n \n {prefix}{option}\n \"\"\".format(\n option=cmd.qualified_name,\n parent=cmd.parent.qualified_name if cmd.parent else '',\n cog=cmd.cog_name or 'None',\n display=(\n 'display: none'\n if (\n hide or cmd.cog_name in cogs\n )\n else ''\n ),\n prefix=self.bot.command_prefix(self.bot, None)\n )\n recurse_commands(cmd)\n recurse_commands(self.bot)\n dcogs = \"\"\"\n \"\"\".format(non)\n for cog in self.bot.cogs.keys():\n dcogs += \"\"\"\n \"\"\".format(option=cog)\n h1 = i18n(lan, 'server/server;h1', escape(guild.name))\n return web.Response(\n text=self.letext(\n 'server.html',\n h1\n ).format(\n channels=options,\n cmds=dcmds,\n cogs=dcogs,\n dcmds=cmds,\n dcogs=cogs,\n jcmds=','.join(cmds),\n jcogs=','.join(cogs),\n words_censor=i18n(\n lan, 'words/server-censor-title',\n '{}{}'.format(\n self.bot.command_prefix(self.bot, None),\n 'words'\n ),\n ),\n censor=censor,\n cmd=i18n(lan, 'server/server;command'),\n cog=i18n(lan, 'server/server;cog'),\n disabled=i18n(lan, 'server/server;disabled'),\n h1=h1,\n save=i18n(lan, 'server/server;save'),\n back=i18n(lan, 'server/server;back'),\n ),\n content_type='text/html'\n )\n\n async def save_server(self, request):\n await self.elg(request)\n guild = self.bot.get_guild(int(request.match_info.get('server', '0')))\n if guild is None:\n self.notfound()\n if not guild.get_member(\n int(self.getsesh(request)['client']['id'])\n ).guild_permissions.administrator:\n self.notfound()\n data = await request.post()\n params = []\n otherparams = {}\n for k in data.keys():\n if not k.startswith('channel-'):\n otherparams[k] = ','.join(data.getall(k))\n continue\n param = {'channel_id': int(k[len('channel-'):])}\n for v in data.getall(k):\n v = v.partition('=')\n if v[0] == 'ping':\n if 'ping' not in param:\n param['ping'] = set()\n param['ping'].add(v[-1])\n else:\n param[v[0]] = v[-1] or None\n param['ping'] = '|'.join(param.get('ping', ())) or None\n params.append(param)\n otherparams['guild_id'] = guild.id\n if set(param['channel_id'] for param in params) \\\n - set(channel.id for channel in guild.channels): # is not empty\n raise web.HTTPBadRequest\n try:\n with self.db.connection:\n self.db.executemany(\n 'UPDATE channels SET lang=:lang, games_ping=:ping \\\nWHERE channel_id=:channel_id',\n params\n )\n self.db.execute(\n 'UPDATE guilds SET guild_disabled_commands=:disable_cmd, \\\nguild_disabled_cogs=:disable_cog, words_censor=:words_censor WHERE guild_id=:guild_id',\n otherparams\n )\n except sql.ProgrammingError as exc:\n raise web.HTTPBadRequest(reason=str(exc))\n raise web.HTTPSeeOther(request.path)\n\n async def file(self, request):\n path = request.match_info.get('name', '.html') or '.html'\n fullpath = self.fil(path)\n if os.path.isfile(fullpath):\n with open(fullpath, 'rb') as f:\n #self.logger.info('Request serving: {}'.format(path), extra={'ctx': self.dtx})\n return web.Response(\n status=200,\n body=f.read(),\n content_type=mimetypes.guess_type(fullpath)[0]\n )\n else:\n #self.logger.error('Request not served, 404: {}'.format(path), extra={'ctx': self.dtx})\n self.notfound()\n\n#Handler(None, None, None, 512581527343726592, 't5jgg5udqQrdiJe_bKHrn0VrEDMztpZ7').run_sync()\n","repo_name":"Kenny2github/kenny2automate","sub_path":"kenny2automate/server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":22667,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"} +{"seq_id":"13715894894","text":"import sys\n\ninput = sys.stdin.readline\n\nN, M, K = map(int, input().split())\npower_plants = set(map(int, input().split()))\ncabels = [[] for _ in range(M)]\nfor i in range(M):\n u, v, w = map(int, input().split())\n cabels[i] = (w, u, v)\ncabels.sort()\nparent = [i for i in range(N + 1)]\n\n\ndef find_parent(v):\n if parent[v] == v:\n return v\n parent[v] = find_parent(parent[v])\n return parent[v]\n\n\ndef can_union(v1, v2):\n pv1 = find_parent(v1)\n pv2 = find_parent(v2)\n if pv1 in power_plants and pv2 in power_plants:\n return False\n elif pv1 in power_plants:\n parent[pv2] = pv1\n elif pv2 in power_plants:\n parent[pv1] = pv2\n else:\n if pv1 == pv2:\n return False\n elif pv1 > pv2:\n parent[pv1] = pv2\n else:\n parent[pv2] = pv1\n return True\n\n\nans = 0\ncnt = 0\nfor w, u, v in cabels:\n if can_union(u, v):\n ans += w\n cnt += 1\n if cnt == N - K:\n break\nprint(ans)\n","repo_name":"nnoobbaagguu/Algorithm","sub_path":"Baekjoon Online Judge/10423.py","file_name":"10423.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"1179370472","text":"import numpy as np\n\n\ndef count_nonzero(X, axis=None, sample_weight=None):\n if axis == -1:\n axis = 1\n elif axis == -2:\n axis = 0\n elif X.format != 'csr':\n raise TypeError('Expected CSR sparse format')\n if axis is None:\n if sample_weight is None:\n return X.nnz\n else:\n return np.dot(np.diff(X.indptr), sample_weight)\n elif axis == 1:\n out = np.diff(X.indptr)\n if sample_weight is None:\n return out\n return out * sample_weight\n elif axis == 0:\n if sample_weight is None:\n return np.bincount(X.indices, minlength=X.shape[1])\n else:\n weights = np.repeat(sample_weight, np.diff(X.indptr))\n return np.bincount(\n X.indices, minlength=X.shape[1], weights=weights)\n else:\n raise ValueError('Unsupported')\n","repo_name":"luoshao23/ML_algorithm","sub_path":"luolearn/utils/sparsefuncs.py","file_name":"sparsefuncs.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"69"} +{"seq_id":"69850614619","text":"import argparse\nimport logging\n\nimport numpy as np\nimport random\nimport torch\nimport torch.optim as O\n\nfrom datasets import get_dataset, get_testset, get_dataset_configurations\nfrom models import get_model\nfrom runners import Runner\nfrom xml.dom import minidom\n\n\ndef _prepare_batch(batch):\n x, y = batch, batch.relatedness_score\n return x, y\n\ndef _write_xml(filename, pred):\n \"\"\"Docstring.\"\"\"\n with open(filename, encoding='utf8') as fp:\n xml = minidom.parse(fp)\n print('Iniciou XML')\n pairs = xml.getElementsByTagName('pair')\n for pair in pairs:\n # print('pred: ', pred)\n sim = str(pred[pairs.index(pair)]).split(',')\n similarity = sim[0].replace('tensor(', '')\n # print('similarity: ', similarity)\n # print('pairs.index: ', pairs.index(pair))\n # print('pair: ', str(pred[pairs.index(pair)]))\n pair.setAttribute('similarity', similarity)\n with open(filename, 'w', encoding='utf8') as fp:\n fp.write(xml.toxml())\n print('XML escrito')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Sentence similarity model')\n parser.add_argument('--model', default='bimpm', choices=['bimpm'], help='Model to use')\n parser.add_argument('--dataset', default='assin', choices=['assin'], help='Dataset to use')\n parser.add_argument('--batch-size', type=int, default=64, help='Batch size')\n parser.add_argument('--epochs', type=int, default=15, help='Number of epochs')\n parser.add_argument('--lr', type=float, default=2e-4, help='Learning rate')\n parser.add_argument('--regularization', type=float, default=3e-4, help='Regularization')\n parser.add_argument('--seed', type=int, default=1234, help='Seed for reproducibility')\n parser.add_argument('--device', type=int, default=0, help='Device, -1 for CPU')\n parser.add_argument('--log-interval', type=int, default=50, help='Device, -1 for CPU')\n\n args = parser.parse_args()\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if args.device != -1:\n torch.cuda.manual_seed(args.seed)\n\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n dataset_cls, train_loader, dev_loader, test_loader, embedding = get_dataset(args)\n model = get_model(args, dataset_cls, embedding)\n\n total_params = 0\n for param in model.parameters():\n size = [s for s in param.size()]\n total_params += np.prod(size)\n logger.info('Total number of parameters: %s', total_params)\n\n loss_fn, metrics, y_to_score, resolved_pred_to_score = get_dataset_configurations(args)\n\n optimizer = O.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, weight_decay=args.regularization)\n runner = Runner(model, loss_fn, metrics, optimizer, y_to_score, resolved_pred_to_score, args.device, None)\n runner.run(args.epochs, train_loader, dev_loader, test_loader, args.log_interval)\n print('terminou tudo')\n '''\n dataset_cls, train_loader, dev_loader, test_loader, embedding = get_dataset(args)\n print(test_loader)\n checkpoint = torch.load('9dc095f1-8cb9-4041-a661-8188b008df27.model')\n print('checkpoint')\n model.load_state_dict(checkpoint['state_dict'])\n print('load_state_dict')\n model.eval()\n print('eval')\n x = test_loader\n print('test_loader')\n y_pred = model(test_loader)\n _write_xml('/home/jessica/teste-bimpm/data/assin/output.xml', y_pred)\n '''\n","repo_name":"jehrodrigues/biMPM-ASSIN2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"31288085736","text":"import re\nfrom datetime import datetime\n\nimport pdfminer.high_level\n\nimport categories\nfrom mail_api.abstract_mail_api import AbstractAttachment\nfrom message_handler import MessageHandler\nfrom utils import temporary_locale\n\n\nclass MusicStoreMessageHandler(MessageHandler):\n def get_type(self):\n return categories.COMPUTER_HARDWARE\n\n def get_query_params(self):\n return {\n self.SUBJECT: \"MUSICSTORE Your Music Store sales invoice\",\n self.SENDER: \"export@musicstore.com\",\n }\n\n def extract_txt(self, pdffile: str):\n with temporary_locale(\"en_US\"):\n text: str = pdfminer.high_level.extract_text(pdffile)\n purchase_date: str = re.search(r\"\\b(\\d\\d\\.\\d\\d.\\d\\d\\d\\d)\\b\", text)[0]\n self.purchase_date = datetime.strptime(purchase_date, \"%d.%m.%Y\")\n self.amount = float(\n re.search(r\"Total CHF inkl. MwSt\\D+(\\d+,\\d+)\", text)[1].replace(\n \",\", \".\"\n )\n )\n\n def handle_attachment(self, attachment: AbstractAttachment):\n if re.search(r\"Return-Form\", attachment.get_filename()):\n return True\n pdffile: str = self.save(attachment)\n self.filename = attachment.get_filename()\n self.extract_txt(pdffile)\n","repo_name":"bwagner/gi","sub_path":"plugins/music_store_message_handler.py","file_name":"music_store_message_handler.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"20178783856","text":"\"\"\"Swaps the x and y coordinates of a Pascal VOC xml formatted dataset file\nArguments:\npath -- Path to the dataset file to be converted\noutput -- Path to save the coordinate swapped dataset file\n\"\"\"\n\nimport argparse\nimport logging\nimport sys\nimport xml.etree.ElementTree as ET\n\nlogging.basicConfig(\n level=logging.INFO, format=\"[%(levelname)s] %(asctime)s: %(message)s\", filemode=\"a\"\n)\n\nlogger = logging.getLogger()\n\nparser = argparse.ArgumentParser(\n description=\"Script for deleting all instances of a particular class name from a Pascal VOC formatted dataset file\"\n)\nparser.add_argument(\n \"--input\", \"-i\", type=str, required=True, help=\"Path to the dataset file to be changed\"\n)\nparser.add_argument(\n \"--output\", \"-o\", type=str, required=False, help=\"Path to save the new dataset file\"\n)\nparser.add_arugment(\n \"--classname\", \"-n\", required=True, help=\"Class name to be deleted from the dataset file\"\n)\n\narguments = parser.parse_args()\ninput_path = arguments.input\nclass_name = arguments.classname\nif arguments.output:\n output_path = arguments.output\nelse:\n output_path = input_path\n\nlogger.info(f\"======== Delete VOC Class ========\")\nlogger.info(f\"Input File Path: {input_path}\")\nlogger.info(f\"Output File Path: {output_path}\")\nlogger.info(f\"Class name: {class_name}\")\n\ntree = ET\ntry:\n tree = ET.parse(input_path)\nexcept Exception as e:\n logger.fatal(f\"Unable to read {input_path}. File is not xml or does not exist\")\n logger.error(e)\n sys.exit(1)\n\nroot = tree.getroot()\nfor label in root.findall(\"object\"):\n bbox = label.find(\"bndbox\")\n type = label.find(\"name\").text\n deleted = False\n if type == class_name:\n root.delete(label)\n deleted = True\n\n logger.debug(f\"Type: {type}\")\n logger.debug(f\"Deleted: {deleted}\")\n\n\ntry:\n tree.write(output_path)\nexcept Exception as e:\n logger.error(f\"Unable to write to {output_path}\")\n logger.error(e)\n sys.exit(1)\n","repo_name":"spencervoiss/dataset_utils","sub_path":"voc_delete_class.py","file_name":"voc_delete_class.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"5999643291","text":"from faker import Faker\nimport random\nimport mysql.connector\n\n# Conectando ao banco de dados\nconn = mysql.connector.connect(\n host='localhost',\n user='root',\n password='password',\n database='flask2k'\n)\n\ncursor = conn.cursor()\n\nfake = Faker()\n\nfor _ in range(2000):\n nome = fake.name()\n idade = random.randint(1, 100)\n genero = random.choice(['Masculino', 'Feminino', 'Outro'])\n endereco = fake.address()\n cidade = fake.city()\n estado = fake.state_abbr()\n telefone = fake.phone_number()\n email = fake.email()\n data_admissao = fake.date_this_year()\n data_alta = fake.date_this_year()\n diagnostico = fake.sentence()\n tratamento = fake.text()\n observacoes = fake.text()\n try:\n\n cursor.execute('''INSERT INTO pacientes \n (nome, idade, genero, endereco, cidade, estado, telefone, email, \n data_admissao, data_alta, diagnostico, tratamento, observacoes) \n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)''',\n (nome, idade, genero, endereco, cidade, estado, telefone, email,\n data_admissao, data_alta, diagnostico, tratamento, observacoes))\n\n except Exception as e:\n print(\"Não foi possível inserir os dados!\\n{}\".format(e))\n\nconn.commit()\ncursor.close()\nconn.close()\n\nprint(\"Dados inseridos com sucesso!\")\n","repo_name":"LeoVeig4/CRUD-Hospital","sub_path":"popular_banco.py","file_name":"popular_banco.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"11918512215","text":"import telebot\r\nfrom telebot import types\r\nimport wikipedia\r\nfrom googletrans import Translator\r\nimport time\r\nimport requests\r\n\r\n#token va modullarni cahqirish uchun\r\nadmin_id='-1001261577807'\r\napikey='-jgjTI4nQgb-UebbeCNF3unEFqaMyOAkDi_ZMBmlQIA'\r\nTOKEN=\"1546844166:AAF77WzEduNI6hIu-TgHkHYz8fBW-V83Vu8\"\r\nbot = telebot.TeleBot(TOKEN, parse_mode='HTML') # parse mode Html uchun ham o'tishi mumkin\r\ntranslator = Translator()\r\n\r\n#telegram keyboard uchun\r\nmarkup_inline=types.InlineKeyboardMarkup()\r\nitem_uz=types.InlineKeyboardButton(text='Uzbekcha 🇺🇿',callback_data='uz')\r\nitem_ru=types.InlineKeyboardButton(text='русский 🇷🇺',callback_data='ru')\r\nitem_report_problem=types.InlineKeyboardButton(text='❌Report❌',callback_data='report_problem')\r\nmarkup_inline.add(item_uz,item_ru,item_report_problem)\r\n\r\n#report problem uchun buttonlar\r\nmarkup_inline2=types.InlineKeyboardMarkup()\r\nitem_1=types.InlineKeyboardButton(text=\"Xato ma'lumot\",callback_data='report1')\r\nitem_2=types.InlineKeyboardButton(text=\"Error 404\",callback_data='report2')\r\nitem_3=types.InlineKeyboardButton(text=\"Rasm error\",callback_data='report3')\r\nmarkup_inline2.add(item_1,item_2,item_3)\r\n\r\n#botni boshlash funksiyasi\r\n@bot.message_handler(commands=['start'])\r\ndef send_welcome(message):\r\n global name\r\n global id\r\n name = message.from_user.first_name\r\n id=message.from_user.id\r\n print(name, id)\r\n bot.reply_to(message, '''Salom {}, bizning wikipedia_uz botiga xush kelibsiz. \r\n Wikipedia so'rovlarini jo'natishingiz mumkin.\r\n Masalan: Apple/Warsaw/Uzbekistan\r\n '''.format(name))\r\n bot.send_message(chat_id=admin_id,text='{} -botga kirdi!'.format(name))\r\n #reklama uchun funksiya 15 minut va 50 soatda jonatadi\r\n #bu funskiya xozir ishlamayapti \r\n #reklama jonatish funksiyasi\r\n\r\n@bot.message_handler(commands=['about'])\r\ndef send_about(message):\r\n bot.send_message(message.chat.id,'''\r\n Botimiz xaqida:\r\nMasalan:\r\n✅ Yangilangan sanasi- 08/02/2021\r\n✅ Dasturchi- @husanboy_us\r\n✅ Xamkorlik uchun - @husanboy_us\r\n✅ Bizning kanalimiz https://t.me/artofitt\r\n ''' )\r\n\r\n#asosiy funksiya yoki funskiyalar\r\n@bot.message_handler(func=lambda message: True)\r\ndef main_func(message):\r\n global get_wiki\r\n try:\r\n msg=message.text\r\n get_wiki=wikipedia.summary(msg, sentences=7)\r\n get_wiki_pics=wikipedia.page(msg).images[0]\r\n bot.send_message(message.chat.id,get_wiki,reply_markup=markup_inline )\r\n bot.send_photo(message.chat.id, photo=get_wiki_pics)\r\n except Exception :\r\n bot.send_message(message.chat.id, text=\"Nimadur xatolik yuz berdi. Aniq javob olshingiz uchun aniq so'rov kiriting 👇👇👇\")\r\n bot.send_message(message.chat.id,'''\r\n So'rovni Ingliz Tilida yozishni yoki tekshirishni unutmang!\r\nMasalan:\r\nTrump-❌❌❌ Donald Trump-✅✅✅\r\n ''' )\r\n \r\n#function gets inline data and translates to the given languages\r\n@bot.callback_query_handler(func=lambda call: True)\r\ndef query_text(call):\r\n global name\r\n global id\r\n if call.data=='uz':\r\n translation = translator.translate(get_wiki, dest='uz',)\r\n data_uz=translation.text\r\n bot.send_message(call.message.chat.id, data_uz) \r\n elif call.data=='ru':\r\n translation = translator.translate(get_wiki, dest='ru',)\r\n data_ru=translation.text\r\n bot.send_message(call.message.chat.id,data_ru)\r\n elif call.data=='report_problem':\r\n bot.send_message(call.message.chat.id, text=' 🔻🔻🔻 Xato turini tanlang! 🔻🔻🔻 ',reply_markup=markup_inline2)\r\n elif call.data=='report1':\r\n global name\r\n global id\r\n print('hello report 1')\r\n bot.send_message(call.message.chat.id,text='Xatolik Botimiz Adminiga yetib bordi! Tez orada kamchilik tuzatiladi!')\r\n bot.send_message(chat_id=admin_id,text=\" Name: {} ID: {} -Reported code 1\".format(name,id))\r\n \r\n elif call.data=='report2':\r\n print('hello report 2')\r\n bot.send_message(call.message.chat.id,text='Xatolik Botimiz Adminiga yetib bordi! Tez orada kamchilik tuzatiladi!')\r\n bot.send_message(chat_id=admin_id,text=\" Name: {} ID: {} -Reported code 2\".format(name,id))\r\n \r\n bot.send_message(call.message.chat.id,text='Xatolik Botimiz Adminiga yetib bordi! Tez orada kamchilik tuzatiladi!')\r\n elif call.data=='report3':\r\n print('hello report 3')\r\n bot.send_message(call.message.chat.id,text='Xatolik Botimiz Adminiga yetib bordi! Tez orada kamchilik tuzatiladi!')\r\n bot.send_message(chat_id=admin_id,text=\" Name: {} ID: {} -Reported code 3\".format(name,id))\r\n \r\n else:\r\n print('Not working query_text funskiyasida')\r\n\r\n\r\nbot.polling() \r\n#translation = translator.translate(result, dest='uz',)\r\n#bot.reply_to(message,translation,parse_mode=None)\r\n","repo_name":"HusanboyUs/wikipedia_telegram_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"7644258234","text":"import sys\nip = sys.stdin.readline\n\nn = int(ip())\npi = sorted(list(map(int, ip().split())))\n\nres = 0\nfor i in range(n):\n res += (pi[i] * (n-i))\nprint(res)\n\n''' ATM\nATM 앞에 N명의 사람들이 줄서있다. \n사람들은 1 ~ N 번 까지 번호 매겨짐. i 번 사람이 돈을 인출하는데 걸리는 시간은 Pi분\n\n줄을 서는 순서에 따라서 인출하는데 필요한 시간의 합이 달라진다고? 그렇네\n1 2 3 4 5로 서면\n1\n1 2\n1 2 3\n1 2 3 4\n1 2 3 4 5\n\n이렇게 시간이 소요되니까\n암튼 총합 시간이 가장 작아지게 해라\n\n- 입력 -\n첫 줄에 사람 수 N, 둘째 줄에 각각 시간\n\n시간 1초 메모리 256MB\n\n--1트--: 이게 C3 문제야?\n그냥 뇌 비우고 생각하면 오름차순 정렬해서 사람 수 역순으로 곱해서 더해주면 되는거 아니냐?\n'''","repo_name":"NonokEE/Coding_study","sub_path":"BaekJoon Silver/S4_11399.py","file_name":"S4_11399.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"7678797710","text":"from datetime import date, timedelta\n\nfrom django.core.management.base import BaseCommand\nfrom django.db.models.base import ObjectDoesNotExist\nfrom factory.fuzzy import FuzzyInteger, FuzzyNaiveDateTime\n\nfrom ...models import Category, Unit, User\nfrom ...tests.factories import SubstanceFactory\n\nimport datetime\n\n\nclass Command(BaseCommand):\n args = ' '\n help = 'Populate substance table with random dummy data.'\n\n def add_arguments(self, parser):\n parser.add_argument('username')\n parser.add_argument('upper_bound')\n parser.add_argument('substance_type')\n parser.add_argument('substance_unit')\n\n def handle(self, *args, **options):\n upper_bound = options['upper_bound']\n substance_type = options['substance_type']\n substance_unit = options['substance_unit']\n username = options['username']\n\n try:\n user = User.objects.get(name=username.lower())\n except ObjectDoesNotExist:\n user = User.objects.create(name=username.lower())\n\n try:\n category = Category.objects.get(name=substance_type.lower())\n except ObjectDoesNotExist:\n category = Category.objects.create(name=substance_type.lower())\n\n try:\n unit = Unit.objects.get(name=substance_unit.lower())\n except ObjectDoesNotExist:\n unit = Unit.objects.create(name=substance_unit.lower(), category=category)\n\n\n end_date = date.today()\n start_date = end_date - timedelta(days=7)\n for i in self.get_date_list(start_date, end_date):\n for _ in range(3):\n SubstanceFactory(\n user=user,\n unit=unit,\n category=category,\n record_date=i,\n record_time=FuzzyNaiveDateTime(datetime.datetime.now() - timedelta(hours=24)),\n value = FuzzyInteger(0, int(upper_bound))\n )\n\n def get_date_list(cls, start, end):\n delta = end - start\n return [(start + timedelta(days=i)) for i in range(delta.days+1)]\n","repo_name":"budiryan/urinalysis-app","sub_path":"backend/management/commands/load_random_substance_data.py","file_name":"load_random_substance_data.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"42872664959","text":"from django.shortcuts import render\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\n\n# Create your views here.\nfrom django.http import HttpResponse\nfrom mysite2.baiduparser import BaiduPage\nfrom mysite2.datamgr import *\n\ndef index(request):\n names = {'wd':'app'}\n return render_to_response('index.html',names,context_instance=RequestContext(request))\n\ndef search(request):\n wd = request.GET['wd']\n #bdChecked = request.GET['baidu']\n p = BaiduPage()\n p.parse(wd)\n resultitems = p.getSiteItems()\n #resultitems = [{'title':'site1','desc':'site1 desc','url':'aa.com','pr':7,'baidurank':3,'alexa':133},\n # {'title':'sit2','desc':'site2 desc','url':'bb.com','pr':17,'baidurank':33,'alexa':1233}]\n searchpages = []\n for i in range(1,11):\n url = \"s/?wd=\" + wd\n url = \"%s&pn=%d\"%(url,i)\n searchpages.append({'name':i,'url':url})\n dict = {'wd':wd,'resultitems':resultitems,'searchpages':searchpages}\n return render_to_response('list.html',dict,context_instance=RequestContext(request))\n\ndef ajax(request):\n siteUrl = request.GET['siteUrl']\n siteUrl2 = \"http://\"+siteUrl\n q = request.GET['q']\n str = ''\n if q=='alexa':\n str = '%d'%(g_dataMgr.getAlexa(siteUrl2))\n elif q=='pr':\n str = '%d'%g_dataMgr.getPr(siteUrl)\n elif q=='sum':\n sum = g_dataMgr.getSiteSum(siteUrl2)\n str = sum['title']+\",\"+sum['desc']\n elif q=='baidurank':\n str = '%d'%g_dataMgr.getBaiduRank(siteUrl)\n return HttpResponse(str)","repo_name":"colenhyt/mysite2","sub_path":"mysite2/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"22715104157","text":"import cv2\nimport mediapipe as mp\nimport os\n\nDATASET_DIR = \"../../Datasets/Guns_In_CCTV/VOC/\"\n\n\n\ntrain_files= []\nval_files= []\ntest_files= []\n\ndirs = [\"train\", \"valid\", \"test\"]\ndatas = [[], [] ,[]]\n\nfor i, d in enumerate(dirs):\n for f in os.listdir(DATASET_DIR + d + \"/\"):\n if f[-4:] == \".xml\":\n datas[i].append(d + \"/\" + f[:-4])\n with open(f\"CCTV/{d}.txt\", \"w\") as outfile:\n outfile.write(\"\\n\".join(datas[i]))\n","repo_name":"JoshVStaden/pistol_detection_with_yolo","sub_path":"annotate_dataset.py","file_name":"annotate_dataset.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"38912737533","text":"import policy\nimport traceback\nimport logging\nimport monitoring\nimport itertools\nfrom .policy_registry import GetConfig\n\ndef ApplyPolicies(g):\n config = GetConfig()\n enabled = config.get('enabled', True)\n if enabled is not None and not enabled:\n return\n\n monitoring_db = monitoring.GetDatabase('spinbot')\n\n logging.info('Processing issues, repos')\n for i in itertools.chain(*[g.issues(), g.pull_requests(), g.repos()]):\n for p in policy.Policies():\n if p.applies(i):\n err = None\n try:\n p.apply(g, i)\n except Exception as _err:\n logging.warn('Failure applying {} to {}: {}'.format(\n p, i, traceback.format_exc()\n ))\n err = _err\n\n monitoring_db.write('issues_handled', { 'value': 1 }, tags={\n 'policy': p.id,\n 'error': err\n })\n","repo_name":"askulkarni2/spinnaker","sub_path":"spinbot/policy/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"} +{"seq_id":"8601042687","text":"import json\nimport os\n\nimport boto3\n\n\n# Helper function to get the extension of a filename.\ndef get_file_ext(path):\n return path.split('.')[-1]\n\n# Filename extension to meme-type map.\ncontent_type = {\n 'html': 'text/html',\n 'css': 'text/css',\n 'js': 'text/javascript',\n 'json': 'application/json',\n 'jpg': 'image/jpeg',\n 'jpeg': 'image/jpeg',\n 'png': 'image/png',\n 'txt': 'text/plain'\n}\n\nkeys = json.load(open('keys.json'))\n\nFILENAME = __file__\nROOT_DIR = os.path.abspath(os.path.dirname(FILENAME))\nOUTPUT_DIR = os.path.join(ROOT_DIR, keys['OUTPUT_DIR_NAME'])\nAWS_ACCESS_KEY_ID = keys['AWS_ACCESS_KEY_ID']\nAWS_SECRET_ACCESS_KEY = keys['AWS_SECRET_ACCESS_KEY']\n\n\ns3 = boto3.resource(\n 's3',\n aws_access_key_id=AWS_ACCESS_KEY_ID,\n aws_secret_access_key=AWS_SECRET_ACCESS_KEY\n)\nbucket = s3.Bucket(keys['AWS_S3_BUCKET_NAME'])\n\n# Clear bucket before uploading.\nbucket.objects.all().delete()\nprint('Cleared bucket')\n\nfor current_dir, dirs, files in os.walk(OUTPUT_DIR):\n for file_ in files:\n # Build path of the file.\n path = os.path.join(current_dir, file_)\n # Build absolute path of the file.\n s3_path = path.replace('{}/'.format(OUTPUT_DIR), '')\n\n with open(path, 'rb') as data:\n # Get mime-type of file.\n try:\n ext = get_file_ext(s3_path)\n mime_type = content_type[ext]\n # Fallback to text/plain meme-type.\n except KeyError:\n mime_type = content_type['txt']\n\n # Upload file to bucket.\n bucket.put_object(Key=s3_path, Body=data, ContentType=mime_type)\n print('Uploaded {}: ({})'.format(s3_path, mime_type))\n","repo_name":"sjbitcode/sangeeta-blog","sub_path":"upload_to_s3.py","file_name":"upload_to_s3.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"23329252750","text":"# %% Imports\nfrom __future__ import annotations\nimport json\nfrom typing import Iterable, Tuple, SupportsFloat as Numeric\nimport os\nimport requests\nfrom tqdm import tqdm\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom datetime import datetime\nimport pytz\nimport ephem\nfrom dateutil.parser import parse\n# %%\ndict_dayofweek = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\ndict_mon = ['', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n\ndef get_raw_tle_from_tstamp(ts: datetime | np.datetime64 | Iterable)->Tuple[datetime, str, str, str] | np.ndarray:\n if isinstance(ts, datetime):\n ts = datetime.utcfromtimestamp(ts.timestamp())\n elif isinstance(ts, np.datetime64):\n ts = datetime.utcfromtimestamp(int(ts)*1e-9)\n elif isinstance(ts, Iterable):\n out = []\n for t in tqdm(ts):\n out.append(get_raw_tle_from_tstamp(t))\n return np.asarray(out).T\n\n dayofweek = dict_dayofweek[ts.weekday()]\n day = ts.day\n mon = dict_mon[ts.month]\n year = ts.year\n hh = ts.hour\n mm = ts.minute\n ss = ts.second\n\n url = f'http://isstracker.com/ajax/fetchTLE.php?date={dayofweek}%2C%20{day}%20{mon}%20{year}%20{hh}%3A{mm}%3A{ss}%20GMT'\n\n content = requests.get(url)\n if content.status_code != 200:\n raise RuntimeError('Response %d'%(content.status_code))\n tledict = json.loads(content.content)\n epoch = tledict['epoch']\n lines = tledict['jsTLE'].replace('\\r', '').split('\\n')\n return (parse(epoch + '+00:00'), lines[0], lines[1], lines[2])\n\n# %%\ndef EpochFromTle(line_1: str) -> datetime:\n t = line_1[18:32] # epoch\n year = int(t[:2]) # first two digits\n if year > 56: # first launch in 57 so 57 is 1957\n year += 1900\n else: # < 56: 56 -> 2056\n year += 2000\n\n yday = int(t[2:5]) # day of year\n fday = float(t[5:]) # fractional day of year\n \n start = pytz.utc.localize(datetime(year, 1, 1)) # first day of the year\n tstamp = start.timestamp()\n tstamp += (yday - 1)*86400 # add seconds spent per day\n tstamp += fday*86400 # fraction of day to seconds\n return datetime.utcfromtimestamp(tstamp)\n# %%\ndef staticvars(**kwargs):\n def decorate(func):\n for key in kwargs:\n setattr(func, key, kwargs[key])\n return func\n return decorate\n\n@staticvars(tledb=None, tlefile='')\ndef ISSTleFromTstamp(ts: datetime, *, database_fname: str = None, allowdownload: bool=True, full_output: bool=False) -> Tuple[str, str] | Tuple[str, str, datetime, bool, int]:\n \"\"\"Get TLE for a given timestamp using ISS TLE database.\n\n Args:\n ts (datetime): Timestamp for evaluation, must be in UTC case of datetime.\n database_fname (str, optional): TLE dataset file (loaded using xarray.load_dataset). The dataset file must contain a timestamp (coordinate) for when the TLE is valid, and data_vars line1 and line2 containing the two TLE lines. Defaults to 'ISS_TLE_DB.nc'.\n allowdownload (bool, optional): Allow download of TLE not found in DB.\n full_output (bool, optional): Return full output (line1, line2, epoch, found, idx). Defaults to False.\n\n Raises:\n ValueError: Timestamp must be timezone aware.\n IndexError: Could not find valid TLE in the dataset (allowdownload=False, full_output=False).\n RuntimeError: Could not download valid TLE (allowdownload=True, database does not contain valid epoch).\n\n Returns:\n Tuple[str, str] | Tuple[str, str, datetime, bool, int]: (line1, line2) or (line1, line2, datetime, found, idx) if full_output=True.\n \"\"\"\n if ISSTleFromTstamp.tledb is None or ISSTleFromTstamp.tlefile != database_fname:\n if database_fname is None:\n database_fname = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ISS_TLE_DB.nc')\n ISSTleFromTstamp.tledb = xr.load_dataset(database_fname)\n ISSTleFromTstamp.tlefile = database_fname\n\n tledb: xr.Dataset = ISSTleFromTstamp.tledb\n\n tle_tstamps = tledb.timestamp.values.astype(int)*1e-9\n lows = np.diff(np.asarray(tle_tstamps < ts.timestamp(), dtype=int))\n\n if np.sum(lows) == 0: # no transitions => not found\n if allowdownload: # download is allowed\n _, _, l1, l2 = get_raw_tle_from_tstamp(ts) # get the TLE\n if full_output:\n return (l1, l2, ts, False, -1)\n else: return (l1, l2)\n elif full_output:\n return (None, None, ts, False, -1)\n else:\n raise IndexError('Could not find valid TLE.') # no can do\n else: # already in DB\n idx = np.where(lows != 0)[0]\n dts = tledb.timestamp.values[idx]\n tles = tledb.sel(dict(timestamp=dts))\n l1 = tles.line1.values[0]\n l2 = tles.line2.values[0]\n if full_output:\n return (l1, l2, dts[0], True, idx)\n else: return (l1, l2)\n# %%\ndef ISSLatLonFromTstamp(ts: datetime | np.datetime64, *, database_fname: str = None, allowdownload: bool=True) -> Tuple[Numeric, Numeric, Numeric]:\n \"\"\"Get latitude, longitude for a given timestamp using ISS TLE database.\n\n Args:\n ts (datetime | np.datetime64): Timestamp for evaluation, must be timezone aware in case of datetime.\n database_fname (str, optional): TLE dataset file (loaded using xarray.load_dataset). The dataset file must contain a timestamp (coordinate) for when the TLE is valid, and data_vars line1 and line2 containing the two TLE lines. Defaults to 'ISS_TLE_DB.nc'.\n allowdownload (bool, optional): Allow download of TLE not found in DB.\n\n Raises:\n ValueError: Timestamp must be timezone aware.\n IndexError: Could not find valid TLE in the dataset (allowdownload=False).\n RuntimeError: Could not download valid TLE (allowdownload=True, database does not contain valid epoch).\n\n Returns:\n Tuple[Numeric, Numeric, Numeric]: (latitude, longitude, altitude) in degrees (-180, 180) and km.\n \"\"\"\n if isinstance(ts, datetime):\n if ts.tzinfo is None:\n raise ValueError('Timestamp must be timezone aware')\n ts = datetime.utcfromtimestamp(ts.astimezone(tz = pytz.utc).timestamp())\n elif isinstance(ts, np.datetime64):\n ts = datetime.utcfromtimestamp(int(ts)*1e-9)\n l1, l2 = ISSTleFromTstamp(ts, database_fname=database_fname, allowdownload=allowdownload)\n tle = ephem.readtle('GENERIC', l1, l2)\n try:\n tle.compute(ts)\n except Exception as e:\n outstr = f'Could not compute TLE for {ts} (epoch {EpochFromTle(l1)}): {str(e)}\\n'\n raise RuntimeError(outstr)\n return (np.rad2deg(float(tle.sublat)), np.rad2deg(float(tle.sublong)), tle.elevation*1e-3)\n# %%\nif __name__ == '__main__':\n ts = pytz.timezone('US/Eastern').localize(datetime(2017, 4, 1, 0, 0, 1))\n print(ts, ISSTleFromTstamp(ts, full_output=True)[2:])\n print(ts, ISSLatLonFromTstamp(ts))\n# %%\n","repo_name":"sunipkm/skmpython","sub_path":"skmpython/SatPosPredict/_pospredict.py","file_name":"_pospredict.py","file_ext":"py","file_size_in_byte":6902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"39265794655","text":"from loader import bot\nfrom utils.json_worker.users import delete_admin, get_admins, get_users, give_admin\n\n\nasync def action_Aadd(callback):\n admin_to_add_user_id = callback.data.split('?')[1]\n sender_user_id = callback.data.split('?')[2]\n add_admin_username = (await get_users())[admin_to_add_user_id]['username']\n await give_admin(admin_to_add_user_id)\n await callback.answer(\"Действие подтверждено\")\n await callback.message.delete()\n await bot.send_message(chat_id=sender_user_id,\n text=f\"Ваше предложение добавить адммина {add_admin_username} было выполнено.\")","repo_name":"SNI4/tg_filmbot","sub_path":"handlers/callbacks_actions/Aadd.py","file_name":"Aadd.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"21778645218","text":"def uniquePaths(m, n):\n matrix = []\n matrix.append([1]*n)\n for i in range(1, m):\n row = [0]*n\n for j in range(n):\n if j == 0: row[j] = 1\n else: row[j] = row[j-1] + matrix[i-1][j]\n matrix.append(row)\n print(matrix)\n return matrix[-1][-1]\n\ndata = [\n (3, 7),\n (3, 2),\n (7, 3),\n (3, 3)\n]\nfor test_tuple in data:\n print(uniquePaths(*test_tuple))\n","repo_name":"pratikdk/dsaprobs_s1","sub_path":"array/12_62_unique_paths.py","file_name":"12_62_unique_paths.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"6898816073","text":"import pickle\nimport utils.stringutils as stringutils\nimport os\nfrom utils.params import *\n\nwith open(os.path.join(ROOT_DIR, \"bin_blobs/JMdict_e_hashtable.pkl\"), 'rb') as f:\n word_dict = pickle.load(f)\n\nwith open(os.path.join(ROOT_DIR, \"bin_blobs/kanjidic2_hashtable.pkl\"), 'rb') as f:\n kanji_dict = pickle.load(f)\n\nwith open(os.path.join(ROOT_DIR, \"translator_subsystem/masked_kanji.pkl\"), 'rb') as f:\n masked_kanji_set = pickle.load(f)\n\n\nclass NoValidCombinationOfReadingsFoundError(Exception):\n pass\n\n\ndef find_exact_match(keb: str):\n return word_dict[keb]\n\n\ndef translate_sequence_recursively(sequence: str, n_leading_kanji: int):\n # print(\"trying to translate {}\".format(sequence))\n if sequence == \"\" or n_leading_kanji <= 0:\n return [(len(sequence), sequence, sequence)]\n current_len = len(sequence)\n while current_len > 0:\n try:\n reading = find_exact_match(sequence[:current_len])\n remaining_readings = translate_sequence_recursively(sequence[current_len:], n_leading_kanji - current_len)\n remaining_readings.append((current_len, sequence[:current_len], reading))\n return remaining_readings\n except KeyError:\n current_len -= 1\n raise KeyError\n\n\ndef translate_sequence(sequence: str, n_leading_kanji: int):\n current_len = len(sequence)\n while current_len > 0:\n try:\n translation = translate_sequence_recursively(sequence[:current_len], min(n_leading_kanji, current_len))\n translation.reverse()\n # translation = translation[:-1]\n return current_len, translation\n except KeyError:\n current_len -= 1\n return 0, []\n\n\ndef requires_masking(kanji: str):\n return kanji in masked_kanji_set\n\n\ndef find_masking_positions(keb: str):\n masking_positions = []\n for i_tmp in range(len(keb)):\n if requires_masking(keb[i_tmp]):\n masking_positions.append(i_tmp)\n return masking_positions\n\n\ndef segment_reading_recursively(reading: str, keb: str, include_name_readings: bool = False):\n\n # end of recursion reached, create list and pass upward if reading characters were used up,\n # raise exception otherwise\n if keb == \"\":\n if reading == \"\":\n return []\n else:\n raise NoValidCombinationOfReadingsFoundError\n\n # process the last kanji in keb\n kanji = keb[len(keb)-1]\n # get all readings\n readings, readings_nanori = kanji_dict[kanji]\n # if name readings are to be included, add them to the rear of the list of normal readings\n if include_name_readings:\n readings += readings_nanori\n # find a matching reading that allows for the rest of the word to still be segmented correctly\n for partial_reading in readings:\n # if this reading of the kanji fits the end of the current portion of the word, try it\n if reading.endswith(partial_reading):\n try:\n # try to segment the remainder of the word\n segmented_reading = segment_reading_recursively(\n reading[:-len(partial_reading)], keb[:-1], include_name_readings=include_name_readings\n )\n # if successful, append this partial_reading and return\n segmented_reading.append(partial_reading)\n return segmented_reading\n # if the rest of the reading was not correctly segmentable, pass this partial_reading and try the next one\n except NoValidCombinationOfReadingsFoundError:\n continue\n\n # none of the readings for this kanji fitted or allowed for the remainder of the word to be segmented,\n # pass exception upward\n raise NoValidCombinationOfReadingsFoundError\n\n\ndef segment_reading(reading: str, keb: str, n_leading_kanji: int, include_name_readings: bool = False):\n if n_leading_kanji < len(keb):\n number_of_trailing_hiragana = len(keb) - n_leading_kanji # >= 1\n segmented_reading = segment_reading_recursively(\n reading[:-number_of_trailing_hiragana], keb[:-number_of_trailing_hiragana],\n include_name_readings=include_name_readings\n )\n # if successful, append this partial_reading and return\n segmented_reading.append(reading[-number_of_trailing_hiragana:])\n return segmented_reading\n else:\n return segment_reading_recursively(reading, keb, include_name_readings=include_name_readings)\n\n\ndef mask_word(reading: str, keb: str, n_leading_kanji: int):\n masking_positions = find_masking_positions(keb)\n if not masking_positions: # eq. masking_positions == []\n return reading\n try:\n segmented_reading = segment_reading(reading, keb, n_leading_kanji)\n except NoValidCombinationOfReadingsFoundError:\n segmented_reading = segment_reading(reading, keb, n_leading_kanji, include_name_readings=True)\n\n # mask reading of hidden kanji\n for position in masking_positions:\n # \"maru\" for censored character. note that one '〇' can cover several kana, but always exactly one kanji\n segmented_reading[position] = \"〇\"\n\n # join modified segmented reading into single string\n masked_reading = \"\"\n for partial_reading in segmented_reading:\n masked_reading += partial_reading\n\n return masked_reading\n\n\ndef translate_and_mask_sequence(sequence: str, n_leading_kanji: int):\n used_chars, translated_sequence = translate_sequence(sequence=sequence, n_leading_kanji=n_leading_kanji)\n masked_sequence = []\n n_leading_lanji_tmp = n_leading_kanji\n for word in translated_sequence:\n n_leading_kanji_local = min(len(word[1]), n_leading_lanji_tmp)\n try:\n masked_reading = mask_word(reading=word[2], keb=word[1], n_leading_kanji=n_leading_kanji_local)\n except NoValidCombinationOfReadingsFoundError: # if no valid combination of basic readings found...\n if all(requires_masking(kanji) for kanji in word[1][:n_leading_kanji_local]):\n masked_reading = \"\".join([\"〇\" for _ in range(n_leading_kanji_local)])\n else:\n masked_reading = word[2]\n # reduce leading kanji count so it representes the number of leading kanji in the remaining sequence\n n_leading_lanji_tmp -= min(len(word[1]), n_leading_lanji_tmp)\n masked_sequence.append((word[0], word[1], masked_reading))\n return masked_sequence # used_chars, masked_sequence\n\n\ndef translate_and_mask_line(line: str):\n \"\"\"\n translate and mask a whole line (several concatenated sequences)\n :param line: the line to translate\n :return: the list of translated and masked sequences, each with their length (char count) and leading kanji count\n \"\"\"\n current_sequence_start = 0\n last_char_was_kana = False\n translated_and_masked_sequences = []\n number_of_kanji = 0\n # process each char\n for i in range(len(line)):\n # if current char is kana, mark it as such and continue\n if stringutils.is_kana(line[i]):\n last_char_was_kana = True\n # if it is kanji...\n else:\n # ... and the last char was kana, this is the beginning of a new sequence, and the old/finished sequence\n # can be processed and saved\n if last_char_was_kana:\n # translate this sequence and save it\n translated_and_masked_sequences.append(\n (\n translate_and_mask_sequence(line[current_sequence_start:i], number_of_kanji),\n i - current_sequence_start,\n number_of_kanji\n )\n )\n # reset kanji counter, set start index of next sequence\n current_sequence_start = i\n number_of_kanji = 1\n # ... otherwise, just count the kanji\n else:\n number_of_kanji += 1\n # and mark this char as kanji for the next iteration\n last_char_was_kana = False\n # translate the last sequence and save it\n translated_and_masked_sequences.append(\n (\n translate_and_mask_sequence(line[current_sequence_start:], number_of_kanji),\n len(line) - current_sequence_start,\n number_of_kanji\n )\n )\n return translated_and_masked_sequences\n\n\n\ndef overwrite_masked_kanji_set(new_set: set):\n global masked_kanji_set\n masked_kanji_set = new_set\n\n\ndef reset_masked_kanji_set():\n global masked_kanji_set\n with open(os.path.join(ROOT_DIR, \"translator_subsystem/masked_kanji.pkl\"), 'rb') as f:\n masked_kanji_set = pickle.load(f)\n\n\nif __name__ == \"__main__\":\n\n print(translate_and_mask_sequence(\"日本語\", 3))\n","repo_name":"011000101101/VRAR_project","sub_path":"translator_subsystem/lut_translator.py","file_name":"lut_translator.py","file_ext":"py","file_size_in_byte":8733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"27630282921","text":"#!/usr/bin/env python\r\n# coding: utf-8\r\n\r\n# #### Imports\r\n\r\n# In[1]:\r\n\r\nfrom __future__ import print_function\r\n\r\nimport sys\r\nimport os\r\nimport urllib\r\nimport gzip\r\nimport pickle\r\nimport numpy as np\r\nfrom os.path import dirname\r\nimport random\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.nn.init as init\r\nimport torch.utils.data\r\nimport torchvision\r\nimport torchvision.datasets as dset\r\nimport torchvision.transforms as transforms\r\nimport torch.nn.functional as F\r\n\r\n\r\n\r\n\r\n# In[2]:\r\n\r\n\r\n# #### Variables and parameters\r\n\r\n# In[3]:\r\n\r\n# Sourse and target datasets can be set here manually\r\n\r\ns = 'mnist'\r\nt = 'usps'\r\nroot_dir = '~/dataset'\r\n\r\nprocs = 2\r\nbatchsize = 128 \r\ntot_iter = 30000 # number of epochs \r\nz_dim = 100 # dimension of random noise\r\nlrCl = 0.0002 # learning rate \r\nlrGen = 0.0002 # learning rate \r\nbeta1 = 0.5 # beta1 for adam. 0.5)\r\nbeta2 = 0.999 # beta2 for adam\r\nweight_decay = 0.0005 # weight_decay\r\n\r\nt_iter = 500 # testiter\r\n\r\n\r\n# #### Dataset preprocessing\r\n\r\n# In[4]:\r\n\r\n\r\nDataAttDict = {\r\n'mnist': (1,28),\r\n'mnistm': (3,28),\r\n'usps': (1,28),\r\n'svhn': (3,32),\r\n}\r\n\r\n\r\n# In[5]:\r\n\r\n\r\nclass Logger(object):\r\n def __init__(self, filepath = \"./log.txt\", mode = \"w\", stdout = None):\r\n if stdout==None:\r\n self.terminal = sys.stdout\r\n else:\r\n self.terminal = stdout\r\n os.makedirs(dirname(filepath), exist_ok=True)\r\n self.log = open(filepath, mode)\r\n\r\n def write(self, message):\r\n self.terminal.write(message)\r\n self.log.write(message)\r\n self.log.flush()\r\n os.fsync(self.log)\r\n\r\n def flush(self):\r\n #this flush method is needed for python 3 compatibility.\r\n #this handles the flush command by doing nothing.\r\n #you might want to specify some extra behavior here.\r\n pass\r\n\r\n# def InfIter(_loader):\r\n# return iter(InfIter_C(_loader))\r\n\r\nclass InfIter:\r\n def __init__(self,_loader):\r\n self._loader = _loader\r\n self._iter = iter(_loader)\r\n def __iter__(self):\r\n return self\r\n def __next__(self):\r\n try:\r\n return self._iter.next()\r\n except StopIteration:\r\n self._iter = iter(self._loader)\r\n return self._iter.next()\r\n\r\n##### define dataset\r\n\"\"\"Dataset setting and data loader for MNIST-M.\r\nModified from\r\nhttps://github.com/pytorch/vision/blob/master/torchvision/datasets/mnist.py\r\nCREDIT: https://github.com/corenel\r\n\"\"\"\r\nimport errno\r\nimport os\r\n\r\nimport torch\r\nimport torch.utils.data as data\r\nfrom PIL import Image\r\n\r\n\r\nclass MNISTM(data.Dataset):\r\n \"\"\"`MNIST-M Dataset.\"\"\"\r\n\r\n url = \"https://github.com/VanushVaswani/keras_mnistm/releases/download/1.0/keras_mnistm.pkl.gz\"\r\n\r\n raw_folder = 'raw'\r\n processed_folder = 'processed'\r\n training_file = 'mnist_m_train.pt'\r\n test_file = 'mnist_m_test.pt'\r\n\r\n def __init__(self,\r\n root, mnist_root=\"data\",\r\n train=True,\r\n transform=None, target_transform=None,\r\n download=False):\r\n \"\"\"Init MNIST-M dataset.\"\"\"\r\n super(MNISTM, self).__init__()\r\n self.root = os.path.expanduser(root)\r\n self.mnist_root = os.path.expanduser(mnist_root)\r\n self.transform = transform\r\n self.target_transform = target_transform\r\n self.train = train # training set or test set\r\n\r\n if download:\r\n self.download()\r\n\r\n if not self._check_exists():\r\n raise RuntimeError('Dataset not found.' +\r\n ' You can use download=True to download it')\r\n\r\n if self.train:\r\n self.train_data, self.train_labels = torch.load(os.path.join(self.root,\r\n self.processed_folder,\r\n self.training_file))\r\n else:\r\n self.test_data, self.test_labels = torch.load(os.path.join(self.root,\r\n self.processed_folder,\r\n self.test_file))\r\n\r\n def __getitem__(self, index):\r\n \"\"\"Get images and target for data loader.\r\n Args:\r\n index (int): Index\r\n Returns:\r\n tuple: (image, target) where target is index of the target class.\r\n \"\"\"\r\n if self.train:\r\n img, target = self.train_data[index], self.train_labels[index]\r\n else:\r\n img, target = self.test_data[index], self.test_labels[index]\r\n\r\n # doing this so that it is consistent with all other datasets\r\n # to return a PIL Image\r\n img = Image.fromarray(img.squeeze().numpy(), mode='RGB')\r\n\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n\r\n if self.target_transform is not None:\r\n target = self.target_transform(target)\r\n\r\n return img, target\r\n\r\n def __len__(self):\r\n \"\"\"Return size of dataset.\"\"\"\r\n if self.train:\r\n return len(self.train_data)\r\n else:\r\n return len(self.test_data)\r\n\r\n def _check_exists(self):\r\n return os.path.exists(os.path.join(self.root,\r\n self.processed_folder,\r\n self.training_file)) and \\\r\n os.path.exists(os.path.join(self.root,\r\n self.processed_folder,\r\n self.test_file))\r\n\r\n def download(self):\r\n \"\"\"Download the MNIST data.\"\"\"\r\n # import essential packages\r\n from six.moves import urllib\r\n import gzip\r\n import pickle\r\n from torchvision import datasets\r\n\r\n # check if dataset already exists\r\n if self._check_exists():\r\n return\r\n\r\n # make data directories\r\n try:\r\n os.makedirs(os.path.join(self.root, self.raw_folder))\r\n os.makedirs(os.path.join(self.root, self.processed_folder))\r\n except OSError as e:\r\n if e.errno == errno.EEXIST:\r\n pass\r\n else:\r\n raise\r\n\r\n # download pkl files\r\n print('Downloading ' + self.url)\r\n filename = self.url.rpartition('/')[2]\r\n file_path = os.path.join(self.root, self.raw_folder, filename)\r\n if not os.path.exists(file_path.replace('.gz', '')):\r\n data = urllib.request.urlopen(self.url)\r\n with open(file_path, 'wb') as f:\r\n f.write(data.read())\r\n with open(file_path.replace('.gz', ''), 'wb') as out_f, gzip.GzipFile(file_path) as zip_f:\r\n out_f.write(zip_f.read())\r\n os.unlink(file_path)\r\n\r\n # process and save as torch files\r\n print('Processing...')\r\n\r\n # load MNIST-M images from pkl file\r\n with open(file_path.replace('.gz', ''), \"rb\") as f:\r\n mnist_m_data = pickle.load(f, encoding='bytes')\r\n mnist_m_train_data = torch.ByteTensor(mnist_m_data[b'train'])\r\n mnist_m_test_data = torch.ByteTensor(mnist_m_data[b'test'])\r\n\r\n # get MNIST labels\r\n mnist_train_labels = datasets.MNIST(root=self.mnist_root,\r\n train=True,\r\n download=True).train_labels\r\n mnist_test_labels = datasets.MNIST(root=self.mnist_root,\r\n train=False,\r\n download=True).test_labels\r\n\r\n # save MNIST-M dataset\r\n training_set = (mnist_m_train_data, mnist_train_labels)\r\n test_set = (mnist_m_test_data, mnist_test_labels)\r\n with open(os.path.join(self.root,\r\n self.processed_folder,\r\n self.training_file), 'wb') as f:\r\n torch.save(training_set, f)\r\n with open(os.path.join(self.root,\r\n self.processed_folder,\r\n self.test_file), 'wb') as f:\r\n torch.save(test_set, f)\r\n\r\n print('Done!')\r\n\r\n\r\nclass USPS(data.Dataset):\r\n \"\"\"USPS Dataset.\r\n Args:\r\n root (string): Root directory of dataset where dataset file exist.\r\n train (bool, optional): If True, resample from dataset randomly.\r\n download (bool, optional): If true, downloads the dataset\r\n from the internet and puts it in root directory.\r\n If dataset is already downloaded, it is not downloaded again.\r\n transform (callable, optional): A function/transform that takes in\r\n an PIL image and returns a transformed version.\r\n E.g, ``transforms.RandomCrop``\r\n \"\"\"\r\n\r\n url = \"https://raw.githubusercontent.com/mingyuliutw/CoGAN/master/cogan_pytorch/data/uspssample/usps_28x28.pkl\"\r\n\r\n def __init__(self, root, train=True, transform=None, download=False):\r\n \"\"\"Init USPS dataset.\"\"\"\r\n # init params\r\n self.root = os.path.expanduser(root)\r\n self.filename = \"usps_28x28.pkl\"\r\n self.train = train\r\n # Num of Train = 7438, Num ot Test 1860\r\n self.transform = transform\r\n self.dataset_size = None\r\n\r\n # download dataset.\r\n if download:\r\n self.download()\r\n if not self._check_exists():\r\n raise RuntimeError(\"Dataset not found.\" +\r\n \" You can use download=True to download it\")\r\n\r\n self.train_data, self.train_labels = self.load_samples()\r\n if self.train:\r\n total_num_samples = self.train_labels.shape[0]\r\n indices = np.arange(total_num_samples)\r\n np.random.shuffle(indices)\r\n self.train_data = self.train_data[indices[0:self.dataset_size], ::]\r\n self.train_labels = self.train_labels[indices[0:self.dataset_size]]\r\n self.train_data = self.train_data.transpose(\r\n (0, 2, 3, 1)) # convert to HWC\r\n\r\n def __getitem__(self, index):\r\n \"\"\"Get images and target for data loader.\r\n Args:\r\n index (int): Index\r\n Returns:\r\n tuple: (image, target) where target is index of the target class.\r\n \"\"\"\r\n img, label = self.train_data[index, ::], self.train_labels[index]\r\n if self.transform is not None:\r\n img = self.transform(img)\r\n label = np.long(label)\r\n # label = torch.LongTensor([np.int64(label).item()])\r\n # label = torch.FloatTensor([label.item()])\r\n return img, label\r\n\r\n def __len__(self):\r\n \"\"\"Return size of dataset.\"\"\"\r\n return self.dataset_size\r\n\r\n def _check_exists(self):\r\n \"\"\"Check if dataset is download and in right place.\"\"\"\r\n return os.path.exists(os.path.join(self.root, self.filename))\r\n\r\n def download(self):\r\n \"\"\"Download dataset.\"\"\"\r\n filename = os.path.join(self.root, self.filename)\r\n dirname = os.path.dirname(filename)\r\n if not os.path.isdir(dirname):\r\n os.makedirs(dirname)\r\n if os.path.isfile(filename):\r\n return\r\n print(\"Download %s to %s\" % (self.url, os.path.abspath(filename)))\r\n urllib.request.urlretrieve(self.url, filename)\r\n print(\"[DONE]\")\r\n return\r\n\r\n def load_samples(self):\r\n \"\"\"Load sample images from dataset.\"\"\"\r\n filename = os.path.join(self.root, self.filename)\r\n f = gzip.open(filename, \"rb\")\r\n data_set = pickle.load(f, encoding=\"bytes\")\r\n f.close()\r\n if self.train:\r\n images = data_set[0][0]\r\n labels = data_set[0][1]\r\n self.dataset_size = labels.shape[0]\r\n else:\r\n images = data_set[1][0]\r\n labels = data_set[1][1]\r\n self.dataset_size = labels.shape[0]\r\n return images, labels\r\n\r\n\r\n# In[18]:\r\n\r\n\r\ndef createDataset(dataname, train):\r\n if dataname == \"mnist\":\r\n return dset.MNIST(root=root_dir+'/mnist', train=train, download=True,\r\n transform=transforms.Compose([\r\n transforms.ToTensor()\r\n ]))\r\n if dataname == \"mnistm\":\r\n return MNISTM(root=root_dir+'/mnistm', mnist_root=dataroot+'/mnist', train=train, download=True,\r\n transform=transforms.Compose([\r\n transforms.ToTensor()\r\n ]))\r\n if dataname == \"usps\":\r\n return USPS(root=root_dir+'/usps', train=train, download=True,\r\n transform=transforms.Compose([\r\n transforms.ToTensor()\r\n ]))\r\n if dataname == \"svhn\":\r\n return dset.SVHN(root=root_dir+'/svhn', split=(\"train\" if train else \"test\"), download=True,\r\n transform=transforms.Compose([\r\n transforms.Resize(28),\r\n transforms.ToTensor()\r\n ]))\r\n\r\n\r\n# #### Loss functions\r\n\r\n# In[7]:\r\n\r\n\r\ndef loss(x):\r\n return (F.softplus(x)).mean()\r\n\r\n\r\n# #### Neural net class\r\n\r\n# In[33]:\r\n\r\n\r\nclass CoDis28x28(nn.Module):\r\n def __init__(self, ch_s, imsize_s, ch_t, imsize_t):\r\n super(CoDis28x28, self).__init__()\r\n self.conv0_s = nn.Conv2d(ch_s, 20, kernel_size=5, stride=1, padding=0)\r\n self.conv0_t = nn.Conv2d(ch_t, 20, kernel_size=5, stride=1, padding=0)\r\n self.pool0 = nn.MaxPool2d(kernel_size=2)\r\n self.conv1 = nn.Conv2d(20, 50, kernel_size=5, stride=1, padding=0)\r\n self.pool1 = nn.MaxPool2d(kernel_size=2)\r\n self.conv2 = nn.Conv2d(50, 500, kernel_size=4, stride=1, padding=0)\r\n self.prelu2 = nn.PReLU()\r\n self.conv30_s = nn.Conv2d(500, 100, kernel_size=1, stride=1, padding=0)\r\n self.prelu3_s = nn.PReLU()\r\n self.conv31_s = nn.Conv2d(100, 1, kernel_size=1, stride=1, padding=0)\r\n self.conv30_t = nn.Conv2d(500, 100, kernel_size=1, stride=1, padding=0)\r\n self.prelu3_t = nn.PReLU()\r\n self.conv31_t = nn.Conv2d(100, 1, kernel_size=1, stride=1, padding=0)\r\n self.conv_cl = nn.Conv2d(500, 10, kernel_size=1, stride=1, padding=0)\r\n\r\n def forward(self, x_s, x_t):\r\n h0_s = self.pool0(self.conv0_s(x_s))\r\n h0_t = self.pool0(self.conv0_t(x_t))\r\n h1_s = self.pool1(self.conv1(h0_s))\r\n h1_t = self.pool1(self.conv1(h0_t))\r\n h2_s = self.prelu2(self.conv2(h1_s))\r\n h2_t = self.prelu2(self.conv2(h1_t))\r\n h3_s = self.conv31_s(self.prelu3_s(self.conv30_s(h2_s)))\r\n h3_t = self.conv31_t(self.prelu3_t(self.conv30_t(h2_t)))\r\n return h3_s, h2_s, h0_s, h3_t, h2_t, h0_t\r\n\r\n def pred_s(self, x_s):\r\n h0_s = self.pool0(self.conv0_s(x_s))\r\n h1_s = self.pool1(self.conv1(h0_s))\r\n h2_s = self.prelu2(self.conv2(h1_s))\r\n h3_s = self.conv_cl(h2_s)\r\n return h3_s.squeeze(), h2_s.squeeze()\r\n\r\n def pred_t(self, x_t):\r\n h0_t = self.pool0(self.conv0_t(x_t))\r\n h1_t = self.pool1(self.conv1(h0_t))\r\n h2_t = self.prelu2(self.conv2(h1_t))\r\n h3_t = self.conv_cl(h2_t)\r\n return h3_t.squeeze(), h2_t.squeeze()\r\n\r\n def pred_fromrep(self, h2):\r\n return self.conv_cl(h2).squeeze()\r\n\r\n\r\n# Generator Model\r\nclass CoGen28x28(nn.Module):\r\n def __init__(self, ch_s, imsize_s, ch_t, imsize_t, zsize):\r\n super(CoGen28x28, self).__init__()\r\n self.dconv0 = nn.ConvTranspose2d(zsize, 1024, kernel_size=4, stride=1)\r\n self.bn0 = nn.BatchNorm2d(1024, affine=False)\r\n self.prelu0 = nn.PReLU()\r\n self.dconv1 = nn.ConvTranspose2d(1024, 512, kernel_size=3, stride=2, padding=1)\r\n self.bn1 = nn.BatchNorm2d(512, affine=False)\r\n self.prelu1 = nn.PReLU()\r\n self.dconv2 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1)\r\n self.bn2 = nn.BatchNorm2d(256, affine=False)\r\n self.prelu2 = nn.PReLU()\r\n self.dconv3 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1)\r\n self.bn3 = nn.BatchNorm2d(128, affine=False)\r\n self.prelu3 = nn.PReLU()\r\n self.dconv4_s = nn.ConvTranspose2d(128, ch_s, kernel_size=6, stride=1, padding=1)\r\n self.dconv4_t = nn.ConvTranspose2d(128, ch_t, kernel_size=6, stride=1, padding=1)\r\n self.sig4_s = nn.Sigmoid()\r\n self.sig4_t = nn.Sigmoid()\r\n\r\n def forward(self, z):\r\n z = z.view(z.size(0), z.size(1), 1, 1)\r\n h0 = self.prelu0(self.bn0(self.dconv0(z)))\r\n h1 = self.prelu1(self.bn1(self.dconv1(h0)))\r\n h2 = self.prelu2(self.bn2(self.dconv2(h1)))\r\n h3 = self.prelu3(self.bn3(self.dconv3(h2)))\r\n out_s = self.sig4_s(self.dconv4_s(h3))\r\n out_t = self.sig4_t(self.dconv4_t(h3))\r\n return out_s, out_t\r\n\r\n\r\n# #### To be determined\r\n\r\n# In[9]:\r\n\r\n\r\ndef xavier_weights_init(m):\r\n classname = m.__class__.__name__\r\n # print(classname)\r\n if classname.find('Conv') != -1:\r\n init.xavier_uniform_(m.weight, gain=np.sqrt(2))\r\n init.constant_(m.bias, 0.1)\r\n\r\nclass C_contour(nn.Module):\r\n def __init__(self):\r\n super(C_contour, self).__init__()\r\n self.L1 = nn.Conv2d(3, 1, 3, bias=False)\r\n C1 = np.array([[-1,0,1],[-2,0,2], [-1,0,1]])\r\n C1 = torch.from_numpy(C1)\r\n list(self.L1.parameters())[0].data[0,:,:,:] = C1.unsqueeze(0)\r\n list(self.L1.parameters())[0].requires_grad = False\r\n\r\n self.L2 = nn.Conv2d(3, 1, 3, bias=False)\r\n C2 = np.array([[1,2,1],[0,0,0], [-1,-2,-1]])\r\n C2 = torch.from_numpy(C2)\r\n list(self.L2.parameters())[0].data[0,:,:,:] = C2.unsqueeze(0)\r\n list(self.L2.parameters())[0].requires_grad = False\r\n\r\n def forward(self, x, y):\r\n if x.shape[1] == 1:\r\n x = torch.cat([x,x,x],1)\r\n if y.shape[1] == 1:\r\n y = torch.cat([y,y,y],1)\r\n imgx1 = self.L1(x)\r\n imgx2 = self.L2(x)\r\n\r\n imgy1 = self.L1(y)\r\n imgy2 = self.L2(y)\r\n# img = img.view(img.shape[0], *img_shape)\r\n return torch.norm(imgx1-imgy1)+torch.norm(imgx2-imgy2)\r\n\r\n\r\n# ##### Test function\r\n\r\n# In[10]:\r\n\r\n\r\ndef test_f(verbose = True, print_period = 100):\r\n # VALIDATION\r\n j = 0\r\n cum_acc = 0\r\n total_len = 0\r\n\r\n netCl.eval()\r\n for y, y_label in dataloader_y:\r\n j = j+1\r\n\r\n y = y.to(device)\r\n y_label = y_label.to(device)\r\n\r\n # compute output\r\n outputs, _ = netCl.pred_t(y)\r\n test_loss = criterion(outputs, y_label)\r\n\r\n pred = torch.argmax(outputs,dim=-1)\r\n test_acc = torch.sum(pred==y_label).item()\r\n cum_acc = cum_acc+test_acc\r\n test_acc = test_acc/len(pred)\r\n total_len += len(pred)\r\n if j%print_period==0 and verbose:\r\n print('Iter: [%d/%d], Test Loss: %.8f, Test Acc: %.2f' % (j,len(dataloader_y),test_loss, test_acc))\r\n print(' Test acc for the epoch: %.8f\\n##############################################' % (cum_acc/total_len))\r\n return cum_acc/total_len\r\n\r\n\r\n# ##### Visualization and learning rate update\r\n\r\n# In[53]:\r\n\r\n\r\ndef show_tsne(xr, xl, yr, yl, xfr, xfl, yfr, yfl, epoch):\r\n import sklearn\r\n from sklearn.manifold import TSNE\r\n tsne = TSNE(n_components=2, random_state=0)\r\n X = np.concatenate((xr, yr, xfr, yfr), axis=0)\r\n X_2d = tsne.fit_transform(X)\r\n from matplotlib import pyplot as plt\r\n plt.figure(figsize=(6, 5))\r\n colors = np.array(['r', 'g', 'b', 'c', 'm', 'y', 'k', 'grey', 'orange', 'purple'])\r\n plt.scatter(X_2d[:batchsize, 0], X_2d[:batchsize, 1], c=colors[xl], marker=\"o\", label=[\"source\"])\r\n for i in range(batchsize):\r\n plt.text(X_2d[batchsize+i, 0], X_2d[batchsize+i, 1], str(yl[i]), color=colors[yl[i]], label=\"target\")\r\n plt.scatter(X_2d[batchsize:batchsize*2, 0], X_2d[batchsize:batchsize*2, 1], c=colors[yl], marker=\"*\", label=[\"target\"])\r\n plt.scatter(X_2d[batchsize*2:batchsize*3, 0], X_2d[batchsize*2:batchsize*3, 1], marker=\"_\", c=colors[xfl], label=\"source fake\")\r\n plt.scatter(X_2d[batchsize*3:batchsize*4, 0], X_2d[batchsize*3:batchsize*4, 1], marker=\"+\", c=colors[yfl], label=\"target fake\")\r\n plt.legend()\r\n plt.savefig(experiment +'/tsne_%05d.pdf'%(epoch), bbox_inches='tight',format=\"pdf\", dpi = 300)\r\n plt.close()\r\n\r\n\r\n\r\n# ## Main\r\n\r\n# ##### Directories organization\r\n\r\n# In[13]:\r\n\r\n\r\n\r\nname = s + \"two\" + t\r\nexperiment = \"Experiment_DASPOT/\" + name\r\nos.system('mkdir {0}'.format(experiment))\r\nstdout_backup = sys.stdout\r\nsys.stdout = Logger(experiment +\"/log.txt\",\"w\", stdout_backup)\r\nmanualseed = random.randint(1, 10000) \r\n\r\nrandom.seed(manualseed)\r\ntorch.manual_seed(manualseed)\r\n\r\n\r\n# ##### Cuda\r\n\r\n# In[14]:\r\n\r\n\r\nif torch.cuda.is_available():\r\n device = torch.device(\"cuda:0\")\r\n import torch.backends.cudnn as cudnn\r\n cudnn.benchmark = True\r\nelse:\r\n device = torch.device(\"cpu\")\r\n\r\n\r\n# ##### Dataset loaders\r\n\r\n# In[19]:\r\n\r\n\r\ndataset1 = createDataset(s, True)\r\ndataset2 = createDataset(t, True)\r\ndataset3 = createDataset(t, False)\r\n\r\ndataloader_x = torch.utils.data.DataLoader(dataset1, batch_size=batchsize,\r\n shuffle=True, num_workers=int(procs), pin_memory=True, drop_last = True)\r\ndataloader_y_ans = torch.utils.data.DataLoader(dataset2, batch_size=batchsize,\r\n shuffle=True, num_workers=int(procs), pin_memory=True, drop_last = True)\r\ndataloader_y = torch.utils.data.DataLoader(dataset3, batch_size=batchsize,\r\n shuffle=True, num_workers=int(procs), pin_memory=True)\r\n\r\n\r\n# ##### Nets\r\n\r\n# In[21]:\r\n\r\n\r\nnetCl = CoDis28x28(*DataAttDict[s],*DataAttDict[t]).to(device)\r\nnetD_1 = nn.Conv2d(20, 1, kernel_size=12, stride=1, padding=0).to(device)\r\nnetD_2 = nn.Sequential(\r\n nn.Conv2d(500, 100, kernel_size=1, stride=1, padding=0),\r\n nn.PReLU(),\r\n nn.Conv2d(100, 1, kernel_size=1, stride=1, padding=0)\r\n ).to(device)\r\nnetGen = CoGen28x28(*DataAttDict[s],*DataAttDict[t], zsize=z_dim).to(device)\r\n\r\n\r\n# ##### Optimizer setup\r\n\r\n# In[26]:\r\n\r\n\r\noptimizerCl = optim.Adam([p for p in netCl.parameters() if p.requires_grad], lr=lrCl, betas=(beta1, beta2), weight_decay=weight_decay)\r\noptimizerD = optim.Adam([p for p in netD_1.parameters() if p.requires_grad]+[p for p in netD_2.parameters() if p.requires_grad], lr=lrCl, betas=(beta1, beta2), weight_decay=weight_decay)\r\noptimizerGen = optim.Adam([p for p in netGen.parameters() if p.requires_grad], lr=lrGen, betas=(beta1, beta2), weight_decay=weight_decay)\r\n\r\n\r\n# ##### Loss criterion \r\n\r\n# In[27]:\r\n\r\n\r\ncriterion = nn.CrossEntropyLoss()\r\nc_loss = C_contour().to(device)\r\n\r\n\r\n# ##### GAN training\r\n\r\n# In[ ]:\r\n\r\n\r\nbest_test_acc = 0\r\nx_noise = torch.randn(batchsize, z_dim).to(device)\r\nfixed_noise = x_noise\r\nfixed_x = None\r\nfixed_y = None\r\n\r\n\r\ny_iter = InfIter(dataloader_y_ans)\r\nx_iter = InfIter(dataloader_x)\r\nfor i in range(tot_iter):\r\n netCl.train()\r\n netD_1.train()\r\n netD_2.train()\r\n netGen.train()\r\n for in_iter in range(2):\r\n netCl.zero_grad()\r\n netD_1.zero_grad()\r\n netD_2.zero_grad()\r\n\r\n y, y_labels = next(y_iter)\r\n x, x_labels = next(x_iter)\r\n z = torch.randn(batchsize, z_dim).to(device)\r\n\r\n x = x.to(device)\r\n y = y.to(device)\r\n x_labels = x_labels.to(device)\r\n\r\n # GAN training\r\n x_f, y_f = netGen(z)\r\n x_3,x_2,x_1, y_3,y_2,y_1 = netCl(x,y)\r\n x_3_f,x_2_f,_, y_3_f,y_2_f,_ = netCl(x_f.detach(), y_f.detach())\r\n\r\n errD_xy = loss(netD_1(x_1.detach())) \r\n errD_xy += loss(-netD_1(y_1.detach())) \r\n errD_xy += loss(netD_2(x_2.detach())) \r\n errD_xy += loss(-netD_2(y_2.detach())) \r\n errD_xy.backward()\r\n optimizerD.step()\r\n\r\n errD_xy = loss(-netD_1(x_1)) \r\n errD_xy += loss(netD_1(y_1)) \r\n errD_xy += loss(-netD_2(x_2.detach())) \r\n errD_xy += loss(netD_2(y_2.detach())) \r\n\r\n\r\n errD_x_real = loss(x_3) \r\n errD_y_real = loss(y_3) \r\n errD_x_fake = loss(-x_3_f) \r\n errD_y_fake = loss(-y_3_f) \r\n D_x_real = x_3.mean().item()\r\n D_y_real = y_3.mean().item()\r\n D_x_fake = x_3_f.mean().item()\r\n D_y_fake = y_3_f.mean().item()\r\n\r\n x_out = netCl.pred_fromrep(x_2)\r\n #netCl.eval()\r\n x_out_f = netCl.pred_fromrep(x_2_f)\r\n #netCl.train()\r\n\r\n x_prob_fake = F.softmax(x_out_f, dim=1)\r\n x_maxprob_fake,x_label_fake = x_prob_fake.max(dim=1)\r\n select_indices = x_maxprob_fake>0.9\r\n ys_rep_fake = y_2_f[select_indices,:,:,:]\r\n if(ys_rep_fake.shape[0]==0):\r\n errCl_x = 0\r\n else:\r\n ys_label = x_label_fake[select_indices].detach()\r\n ys_out_fake = netCl.pred_fromrep(ys_rep_fake)\r\n if ys_rep_fake.shape[0]==1:\r\n ys_out_fake = ys_out_fake[None, :]\r\n errCl_x = criterion(ys_out_fake,ys_label) \r\n\r\n\r\n optloss = ((x_2_f-y_2_f)**2).sum()/batchsize \r\n errCl_x += criterion(x_out,x_labels) \r\n # GAN training for y\r\n lossCl = errD_x_real+errD_y_real+errD_x_fake+errD_y_fake+optloss+errCl_x+errD_xy\r\n lossCl.backward()\r\n optimizerCl.step()\r\n \r\n netGen.zero_grad()\r\n\r\n x_f, y_f = netGen(z)\r\n x_3_f,x_2_f,_,y_3_f,y_2_f,_ = netCl(x_f, y_f)\r\n errD_x_fake = loss(x_3_f) \r\n errD_y_fake = loss(y_3_f) \r\n D_x_fake = x_3_f.mean().item()\r\n D_y_fake = y_3_f.mean().item()\r\n\r\n # train optimal transport loss\r\n optloss = ((x_2_f-y_2_f)**2).sum()/batchsize #+ opt_contour_loss(x_fake,y_fake) * OPTlossscale2\r\n\r\n # Total Loss\r\n total_loss = errD_x_fake+errD_y_fake+optloss\r\n total_loss.backward()\r\n\r\n optimizerGen.step()\r\n\r\n pred = torch.argmax(x_out,dim=-1)\r\n train_acc = torch.sum(pred==x_labels).item()/len(pred)\r\n\r\n if i%100==0:\r\n print('Iter: [%d/%d] D_x_real: %.4f, D_x_fake: %.4f, D_y_real: %.4f, D_y_fake: %.4f, Loss_GANx: %.4f, Loss_GANy: %.4f, Loss_OPT: %.4f, Loss_P: %.4f, Train Accu: %.4f' %\r\n (i, tot_iter, D_x_real, D_x_fake, D_y_real, D_y_fake, errD_x_real.item()+errD_x_fake.item(), errD_y_real.item()+errD_y_fake.item(), optloss.item(), errCl_x.item(), train_acc))\r\n\r\n netCl.eval()\r\n # show tsne\r\n if i%t_iter == 0:\r\n if fixed_x is None:\r\n fixed_x = x.clone()\r\n fixed_y = y.clone()\r\n fixed_xlabel = x_labels.to(\"cpu\").long().numpy()\r\n fixed_ylabel = y_labels.to(\"cpu\").long().numpy()\r\n if fixed_x.shape[1] == fixed_y.shape[1]:\r\n real_images = torch.cat((fixed_x, fixed_y), 2)\r\n elif fixed_x.shape[1] == 1:\r\n real_images = torch.cat((torch.cat((fixed_x,fixed_x,fixed_x), 1), fixed_y), 2)\r\n else:\r\n real_images = torch.cat((fixed_x, torch.cat((fixed_y,fixed_y,fixed_y), 1)), 2)\r\n torchvision.utils.save_image(real_images.data, experiment +'/realimage.jpg')\r\n _,fixedx_rep = netCl.pred_s(fixed_x)\r\n _,fixedy_rep = netCl.pred_t(fixed_y)\r\n fixed_x_fake, fixed_y_fake = netGen(fixed_noise)\r\n fixedx_rep_fake_l,fixedx_rep_fake = netCl.pred_s(fixed_x_fake)\r\n fixedx_rep_fake_l = fixedx_rep_fake_l.argmax(dim=1)\r\n fixedy_rep_fake_l,fixedy_rep_fake = netCl.pred_t(fixed_y_fake)\r\n fixedy_rep_fake_l = fixedy_rep_fake_l.argmax(dim=1)\r\n if fixed_x_fake.shape[1] == fixed_y_fake.shape[1]:\r\n fake_images = torch.cat((fixed_x_fake, fixed_y_fake), 2)\r\n elif fixed_x_fake.shape[1] == 1:\r\n fake_images = torch.cat((torch.cat((fixed_x_fake,fixed_x_fake,fixed_x_fake), 1), fixed_y_fake), 2)\r\n else:\r\n fake_images = torch.cat((fixed_x_fake, torch.cat((fixed_y_fake,fixed_y_fake,fixed_y_fake), 1)), 2)\r\n torchvision.utils.save_image(fake_images.data, experiment +'/fakeimage_%05d.jpg'%(i))\r\n show_tsne(\r\n fixedx_rep.to(\"cpu\").detach().numpy(),\r\n fixed_xlabel,\r\n fixedy_rep.to(\"cpu\").detach().numpy(),\r\n fixed_ylabel,\r\n fixedx_rep_fake.to(\"cpu\").detach().numpy(),\r\n fixedx_rep_fake_l,\r\n fixedy_rep_fake.to(\"cpu\").detach().numpy(),\r\n fixedy_rep_fake_l,\r\n i)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"dayeont/ML_project","sub_path":"DASPOT.py","file_name":"DASPOT.py","file_ext":"py","file_size_in_byte":28605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"9742337058","text":"#!/usr/bin/env python3\nfrom collections import Counter\nimport itertools\nimport copy\nfrom itertools import product\n\n\ndef count_black(array):\n flatten = itertools.chain.from_iterable(array)\n c = Counter(flatten)\n return c[\"#\"]\n\n\ndef red_row(array, i):\n length = len(array[0])\n array[i] = [\"r\"] * length\n return array\n\n\ndef red_col(array, j):\n for row in array:\n row[j] = \"r\"\n return array\n\n\nH, W, K = map(int, input().split())\nC = []\n\nfor _ in range(H):\n row = list(input())\n C.append(row)\n\ntotal = 0\n\nrow_select_patterns = product([True, False], repeat=H)\ncol_select_patterns = list(product([True, False], repeat=W))\n\nfor row_select_pattern in row_select_patterns:\n for col_select_pattern in col_select_patterns:\n c = copy.deepcopy(C)\n\n for i, i_flag in enumerate(row_select_pattern):\n for j, j_flag in enumerate(col_select_pattern):\n if i_flag:\n c = red_row(c, i)\n\n if j_flag:\n c = red_col(c, j)\n\n if count_black(c) == K:\n total += 1\n\nprint(total)\n","repo_name":"rmaruon/atcoder-workspace","sub_path":"contests/abc173/C/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"18439782245","text":"\"\"\"This example shows how to use ebonite with tensorflow<2.0.0\"\"\"\n\nimport logging\nfrom typing import Dict, List, Tuple, Union\n\nimport numpy as np\nimport tensorflow as tf\n\nimport ebonite\nfrom ebonite.runtime import run_model_server\n\ntf_logger = logging.getLogger('tensorflow')\ntf_logger.setLevel(logging.ERROR)\n\n\ndef train_regression() -> Tuple[tf.Session, Union[tf.Tensor, List[tf.Tensor]], Dict[tf.Tensor, np.array]]:\n \"\"\"This function emulates data scientist's work. It produces a tf.Session with trained regression model and\n some sample data in feed_dict format\"\"\"\n learning_rate = 0.01\n training_epochs = 10\n n_samples = 20\n\n weight = 0.5\n bias = -2\n\n rng = np.random\n\n train_X = rng.uniform(-10, 10, (n_samples,))\n train_Y = train_X * weight + bias + rng.uniform(-0.1, 0.1, train_X.shape)\n\n X = tf.placeholder(\"float\", name='X')\n Y = tf.placeholder(\"float\", name='y')\n W = tf.Variable(rng.randn(), name=\"weight\")\n b = tf.Variable(rng.randn(), name=\"bias\")\n\n pred = tf.add(tf.multiply(X, W), b)\n mse = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples)\n optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(mse)\n\n sess = tf.Session()\n\n # Run the initializer\n sess.run(tf.global_variables_initializer())\n\n # Fit all training data\n for epoch in range(training_epochs):\n for (x, y) in zip(train_X, train_Y):\n sess.run(optimizer, feed_dict={X: x, Y: y})\n\n train_mse = sess.run(mse, feed_dict={X: train_X, Y: train_Y})\n print('train mse', train_mse)\n # Testing example\n test_X = rng.uniform(-10, 10, (n_samples,))\n test_Y = test_X * weight + bias\n test_mse = sess.run(mse, feed_dict={X: test_X, Y: test_Y})\n print('test mse', test_mse)\n return sess, pred, {X: test_X}\n\n\ndef main():\n # obtain session, output tensor and feed_dict\n session, tensor, feed_dict = train_regression()\n\n # in provided session, create model 'tf_model' from output tensor and sample data\n with session.as_default():\n model = ebonite.create_model(tensor, feed_dict, 'tf_model')\n\n # run flask service with this model\n run_model_server(model)\n # now you can use client.py to call this service or go to http://localhost:9000/apidocs to view swagger ui\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"zyfra/ebonite","sub_path":"examples/tensorflow_model/train_and_serve.py","file_name":"train_and_serve.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","stars":199,"dataset":"github-code","pt":"69"} +{"seq_id":"30915951263","text":"from sanic.response import json, text\nfrom mongoengine.errors import NotUniqueError\nimport datetime\nimport json as pyjson\nfrom bson import json_util\nfrom copy import deepcopy\n\n# src module\nfrom handler import APIHandler\nfrom model import User, Task\nfrom web import error, generateSessionToken, log, AccountLock, validate, Authenticated, AuthenticatedError\n\nclass index(APIHandler):\n\n def get(self, requests):\n '''任务查询'''\n where = self.query_constraint(requests)\n # 返回的不是数组,就表示出现错误了\n if isinstance(where, tuple):\n return error(where[0], {'msg': where[1]})\n\n users = list()\n for user in User.get_users(where):\n users.append(user)\n\n data = {\n 'body': {'results': users}\n }\n return json(**data)\n\n def post(self, requests):\n '''新建任务'''\n data = requests.json\n # 检测用户是否登录\n session = requests.headers.get('X-LC-Session')\n where = self.query_constraint(requests)\n user = next(User.get_users(where, sessionToken=session, raw=True)).first()\n\n if user is None:\n return error(211)\n\n if not isinstance(data, dict) or data is None:\n return error(107)\n\n if 'title' not in data:\n return error(301)\n # if User.objects(username=request['username']).first() is not None:\n # return error(202)\n\n # 生成 sessionToken\n # sessionToken = generateSessionToken()\n data.update({\n # 'title': data.get('title').decode('utf-8'),\n # 'desc': data.get('desc').decode('utf-8'),\n 'own': user.id,\n 'createdAt': datetime.datetime.utcnow(),\n 'updatedAt': datetime.datetime.utcnow(),\n })\n print(data)\n task = Task(**data)\n task.save()\n\n body = deepcopy(data)\n body.update({\n # 'title': body['title'].encode('utf-8'),\n 'own': str(body['own']),\n 'createdAt': data['createdAt'].isoformat(),\n 'updatedAt': data['updatedAt'].isoformat()\n })\n\n # print(body)\n result = {\n 'body': body,\n 'headers': {'Location': '/tasks/{}'.format(task.id)},\n 'status': 201\n }\n # print(result)\n # return text(str(result))\n return json(**result)\n # task = Task(**)\n","repo_name":"xiaojieluo/webnav","sub_path":"src/handler/TaskHandler.py","file_name":"TaskHandler.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"74968301339","text":"from model.state import *\nfrom model.memory import *\nimport tkinter as tk\nfrom tkinter import filedialog, simpledialog, colorchooser\nfrom boardview import BoardView\nfrom controller import Controller\nimport json\n\n#create new game\ndef newcmd():\n rows = simpledialog.askinteger('Input', 'How many rows?', minvalue=3, parent=root)\n cols = simpledialog.askinteger('Input', 'How many columns?', minvalue=14, parent=root)\n sz = 3 if rows % 2 else 2\n bdv.discard_tiles()\n load_history(History(rows, cols, sz))\n#save current game\ndef saveascmd():\n save_file = filedialog.asksaveasfilename()\n with open(save_file, 'w') as sf:\n json.dump(controller.cache.hist.__dict__, sf)\n#open saved game\ndef opencmd():\n open_file = filedialog.askopenfilename()\n with open(open_file) as of:\n histdict = json.load(of)\n bdv.discard_tiles()\n load_history(History(**histdict))\n\ndef load_history(history):\n cache = Cache(history)\n board = cache.latest\n root.geometry(str(max(board.cols*40,800))+'x'+str(board.rows*40)+'+400+200')\n bdv.setup(controller, board.rows, board.cols)\n cache.link_gui(controller, bdv)\n#set colors of display\ndef set_color(player, base=False):\n rgb, color = colorchooser.askcolor()\n if base:\n bdv.basecolors[player] = color\n else:\n bdv.colors[player] = color\n bdv.set_view(controller.cache.latest)\n\n\nroot = tk.Tk()\nroot.title(\"Conquid\")\nroot.option_add('*tearOff', False)\nmenubar = tk.Menu(root)\nroot['menu'] = menubar\n# file menu creation\nfilemenu = tk.Menu(menubar)\nmenubar.add_cascade(menu=filemenu, label=\"File\")\nfilemenu.add_command(label='New', command=newcmd)\nfilemenu.add_command(label='Open', command=opencmd)\nfilemenu.add_command(label='Save As', command=saveascmd)\n# color menu creation\ncolormenu = tk.Menu(menubar)\nmenubar.add_cascade(menu=colormenu, label=\"Colors\")\ncolormenu.add_command(label='Player 1 Base', command=lambda:set_color(1,base=True))\ncolormenu.add_command(label='Player 1 Cell', command=lambda:set_color(1))\ncolormenu.add_command(label='Player 2 Base', command=lambda:set_color(2,base=True))\ncolormenu.add_command(label='Player 2 Cell', command=lambda:set_color(2))\n\n# controller and boardview setup\nbutton_frame = tk.Frame(root)\nturn_box = tk.Label(button_frame,text='PLAYER 1 TURN', width=15)\nbdv = BoardView(root, turn_box)\ncontroller = Controller()\ncontroller.boardview = bdv\n\n#move buttons\nmove_btns = {}\nmove_btns['A'] = tk.Button(button_frame, relief='groove', text='acquire', width=8, command=lambda:controller.button_pressed('A'))\nroot.bind('', lambda e: move_btns['A'].invoke())\nmove_btns['C'] = tk.Button(button_frame, relief='groove', text='conquer', width=8, command=lambda:controller.button_pressed('C'))\nroot.bind('', lambda e: move_btns['C'].invoke())\nmove_btns['V'] = tk.Button(button_frame, relief='groove', text='vanquish', width=8, command=lambda:controller.button_pressed('V'))\nroot.bind('', lambda e: move_btns['V'].invoke())\nmove_btns['Q'] = tk.Button(button_frame, relief='groove', text='conquest', width=8, command=lambda:controller.button_pressed('Q'))\nroot.bind('', lambda e: move_btns['Q'].invoke())\n#undo and confirm\nundo_btn = tk.Button(button_frame, relief='groove', text='undo', width=8, command=controller.undo)\nroot.bind('', lambda e: undo_btn.invoke())\nconfirm_btn = tk.Button(button_frame,relief='groove', text='confirm', width=8, command=controller.confirm)\nroot.bind('', lambda e: confirm_btn.invoke())\n#playback\nprev_btn = tk.Button(button_frame,relief='groove', text='<<', width=10, command=controller.prev_board)\nroot.bind('', lambda e: prev_btn.invoke())\npause_play = tk.Button(button_frame,relief='groove', text='#', width=10, command=controller.pauseplay)\nroot.bind('', lambda e: pause_play.invoke())\nnext_btn = tk.Button(button_frame,relief='groove', text='>>', width=10, command=controller.next_board)\nroot.bind('', lambda e: next_btn.invoke())\n#link to controller\ncontroller.link_buttons(move_btns, undo_btn, confirm_btn, prev_btn, pause_play, next_btn)\n\n#pack\nbutton_frame.pack(side='bottom')\nmove_btns['A'].grid(row=0, column=0)\nmove_btns['C'].grid(row=0, column=1)\nmove_btns['V'].grid(row=0, column=2)\nmove_btns['Q'].grid(row=0, column=3)\nturn_box.grid(row=0, column=4)\nundo_btn.grid(row=0, column=5)\nconfirm_btn.grid(row=0, column=6)\nprev_btn.grid(row=0, column=7)\npause_play.grid(row=0,column=8)\nnext_btn.grid(row=0, column=9)\n#rev it up\nload_history(History(14, 28, 2))\nroot.focus()\nroot.mainloop()\n","repo_name":"TortCode/ConquidPy","sub_path":"Conquid/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"12741617122","text":"import numpy as np\nimport cgt\nfrom cgt import nn, core\n\ndef test_flatvec():\n cgt.reset_config\n cgt.set_precision('double')\n cgt.core.update_config(backend=\"python\") # XXX\n\n N = 10\n K = 3\n\n Xval = np.random.randn(N,K)\n wval = np.random.randn(K)\n bval = np.random.randn()\n yval = np.random.randn(N)\n\n X_nk = cgt.shared(Xval, \"X\")\n y_n = cgt.shared(yval, \"y\")\n w_k = cgt.shared(wval, \"w\")\n b = cgt.shared(bval, name=\"b\")\n\n ypred = cgt.dot(X_nk, w_k) + b\n\n err = cgt.sum(cgt.square(ypred - y_n))\n g = cgt.grad(err, [w_k, b])\n g = core.simplify(g)\n\n pars = [w_k, b]\n flatx = nn.setup_contiguous_storage(pars)\n f = cgt.function([], [err,cgt.flatcat(g)])\n","repo_name":"joschu/cgt","sub_path":"cgt/tests/_test_flatvec.py","file_name":"_test_flatvec.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":627,"dataset":"github-code","pt":"69"} +{"seq_id":"16233691850","text":"banco = {'04954199158':\n {'CPF':'04954199158',\n 'Nome':'Daniel Sottovia Gomide',\n 'Idade':22,\n 'Curso':'Engenharia Mecânica'}}\n\ndef checagem(CPF,banco):\n cont = 0\n for individuos in banco:\n if CPF == individuos:\n cont += 1\n if cont == 0:\n return True\n else:\n return False\n\ndef cadastro(banco):\n CPF = str(input('CPF: '))\n while CPF != '-1':\n if checagem(CPF, banco):\n nome = str(input('Nome: '))\n idade = int(input('Idade: '))\n while idade <= 0:\n idade = int(input('Idade: '))\n curso = str(input('Curso: '))\n banco[CPF] = {'CPF': CPF, 'Nome': nome,\n 'Idade': idade, 'Curso':curso}\n elif checagem(CPF, banco):\n print('CPF já existente!!!')\n print('Informe outro CPF ou -1 para parar o cadastro.')\n CPF = str(input('CPF: '))\n print('Para continuar o cadastramento insira um novo CPF ou digite -1.')\n CPF = str(input('CPF: '))\n return banco\n\ndef consulta(banco):\n dados = str(input('CPF à consultar: '))\n for cpf in banco:\n if cpf == dados:\n return print(banco[cpf])\n\ndef deletar(banco):\n dados = str(input('CPF à remover: '))\n cont = 0\n for cpf in banco:\n if cpf == dados:\n cont += 1\n del banco[cpf]\n print('Cadastro excluído!')\n print(banco)\n return banco\n if cont == 0:\n print('CPF não encontrado.')\n return banco\n\ndef interface(banco):\n while True:\n pergunta = str(input('Adicionar: 1 \\nRemover: 2 \\nConsultar: 3 \\nDigite a sua escolha: '))\n if pergunta == '1':\n banco = cadastro(banco)\n elif pergunta == '2':\n banco = deletar(banco)\n elif pergunta == '3':\n consulta(banco)\n else:\n print('Obrigado pela atenção!!!')\n break\n\ninterface(banco)\n\n\n\n\n\ninterface(banco)\n\n\n\n","repo_name":"Daniel-Sottovia/INE5603","sub_path":"Exercícios/cadastro.py","file_name":"cadastro.py","file_ext":"py","file_size_in_byte":2036,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"32770297324","text":"import json\n\ntasks = {}\n\ndef load_tasks():\n try:\n with open('tasks.json', 'r') as f:\n return json.load(f)\n except (FileNotFoundError, json.JSONDecodeError):\n return {}\n\ndef save_tasks():\n with open('tasks.json', 'w') as f:\n json.dump(tasks, f)\n\ndef edit_task():\n task_name = input(\"Enter the name of the task to edit: \")\n if task_name in tasks:\n new_task_name = input(\"Enter the new name of the task: \")\n tasks[new_task_name] = tasks.pop(task_name)\n print(f\"Task '{task_name}' has been renamed to '{new_task_name}'.\")\n status = input(f\"Mark task '{new_task_name}' as completed? (y/n): \")\n if status.lower() == 'y':\n tasks[new_task_name] = True\n print(f\"Task '{new_task_name}' has been marked as completed.\")\n else:\n print(f\"Task '{task_name}' was not found in the to-do list.\")\n\ntasks = load_tasks()\n\nwhile True:\n action = input(\"Would you like to add, edit, remove, complete, or uncomplete a task? Type 'list', 'save', or 'quit' to exit: \")\n \n if action == \"add\":\n task_name = input(\"Enter the name of the task: \")\n tasks[task_name] = False\n print(f\"Task '{task_name}' has been added to the to-do list.\")\n elif action == \"edit\":\n edit_task()\n elif action == \"remove\":\n task_name = input(\"Enter the name of the task to remove: \")\n if task_name in tasks:\n del tasks[task_name]\n print(f\"Task '{task_name}' has been removed from the to-do list.\")\n else:\n print(f\"Task '{task_name}' was not found in the to-do list.\")\n elif action == \"complete\":\n task_name = input(\"Enter the name of the task to complete: \")\n if task_name in tasks:\n tasks[task_name] = True\n print(f\"Task '{task_name}' has been marked as completed.\")\n else:\n print(f\"Task '{task_name}' was not found in the to-do list.\")\n elif action == \"uncomplete\":\n task_name = input(\"Enter the name of the task to uncomplete: \")\n if task_name in tasks:\n tasks[task_name] = False\n print(f\"Task '{task_name}' has been marked as not completed.\")\n else:\n print(f\"Task '{task_name}' was not found in the to-do list.\")\n elif action == \"list\":\n print(\"Current to-do list:\")\n for task_name, completed in tasks.items():\n status = \"completed\" if completed else \"not completed\"\n print(f\"- {task_name} ({status})\")\n elif action == \"save\":\n save_tasks()\n print(\"To-do list saved to file.\")\n elif action == \"quit\":\n save_tasks()\n break\n else:\n print(\"Invalid action. Please type 'add', 'edit', 'remove', 'complete', 'uncomplete', 'list', 'save', or 'quit'.\")\n","repo_name":"Neurorazor/30-Days-of-Python","sub_path":"ToDo /task_manager.py","file_name":"task_manager.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"31039890325","text":"# -*- encoding: utf-8 -*-\nfrom django.test import TestCase\nimport requests\n\ndef api_connector():\n headers = {'Content-Type': 'application/json',\n 'credential': 'ZGpzOTAzaWZuc2Zpb25kZnNubm5u',}\n servername = \"api.moni.com.ar\"\n\n return servername, headers\n\ndef get_prestamo(dni):\n servername, headers = api_connector()\n URL = \"https://\" + servername + \"/api/v4/scoring/pre-score/\" + str(dni)\n \n response = requests.request(\"GET\",URL, headers=headers)\n if response.status_code == 200:\n response_json = response.json()\n has_error = response_json.get(\"has_error\")\n status = response_json.get(\"status\")\n\n return status, has_error","repo_name":"nbalmaceda/Prueba-tec","sub_path":"app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"22155116171","text":"num = int(input(\"Digite um número inteiro: \"))\n\nprimo = True\ndiv = 2\n\nwhile div < num:\n if num % div == 0:\n primo = False\n break\n div += 1\n\nif primo and num > 1:\n print('primo')\nelse:\n print('não primo')\n\n","repo_name":"andreztz/exercicios","sub_path":"ListaDeExercicios-3/exercicio-5.py","file_name":"exercicio-5.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"13897042057","text":"#-----------------------------------------------------\n# Mimas: conference submission and review system\n# (c) Allan Kelly 2016-2020 http://www.allankelly.net\n# Licensed under MIT License, see LICENSE file\n# -----------------------------------------------------\n\n# System imports\n\n# Google imports\nimport logging\n\nfrom google.appengine.ext import ndb\n\n# Local imports\nimport roundreviews\nimport basehandler\nfrom submission_lib import submissionrecord\n\n\nclass ClassicReviewDecisionPage(basehandler.BaseHandler):\n\n def make_page(self, crrt_conf):\n review_round = int(self.request.get(\"round\"))\n tracks = crrt_conf.mapped_track_obects()\n crrt_track = self.request.get(\"track\", default_value=tracks.keys()[0])\n\n submissions = self.sorted_submissions(crrt_conf, crrt_track, review_round)\n\n template_values = {\n 'crrt_conf': crrt_conf,\n \"track_objects\": tracks,\n \"crrt_track\": crrt_track,\n \"submissions\": submissions,\n \"submissions_len\": len(submissions),\n \"decisions\": submissionrecord.get_decision_summary(crrt_conf.key, crrt_track, review_round),\n \"decision_maker\": crrt_conf.user_rights().has_decision_right_for_round(\n self.get_crrt_user().email(), review_round),\n \"review_round\": review_round,\n \"track_slots\": crrt_conf.mapped_track_obects()[crrt_track].slots,\n }\n\n self.write_page('subreview_lib/classicreviewdecisionpage.html', template_values)\n\n def sorted_submissions(self, crrt_conf, crrt_track, review_round):\n submissions = submissionrecord.retrieve_conference_submissions_by_track_and_round(\n crrt_conf.key, crrt_track, review_round)\n\n if self.request.params.has_key(\"mean\"):\n sorted = submissionrecord.sort_submissions_by_mean_high_to_low(submissions, review_round)\n else:\n sorted = submissionrecord.sort_submissions_by_total_high_to_low(submissions, review_round)\n return sorted\n\n def get(self):\n if not (self.session.has_key(\"crrt_conference\")):\n logging.debug(\"Conference key session variable missing\")\n return\n\n crrt_conf = ndb.Key(urlsafe=self.session[\"crrt_conference\"]).get()\n\n self.make_page(crrt_conf)\n\n def submit_decisions(self, review_round):\n if not (self.session.has_key(\"crrt_conference\")):\n logging.debug(\"Conference key session variable missing\")\n return\n\n roundreviews.submit_decisions(\n ndb.Key(urlsafe=self.session[\"crrt_conference\"]),\n self.request.get(\"tracklist\"),\n review_round,\n self.request)\n\n def decline_no_decisions(self, review_round):\n self.submit_decisions(review_round)\n roundreviews.mass_track_change(\n ndb.Key(urlsafe=self.session[\"crrt_conference\"]),\n self.request.get(\"tracklist\"),\n review_round,\n \"No decision\",\n \"Decline\")\n\n def post(self):\n review_round = int(self.request.get(\"review_round\"))\n if self.request.get(\"SubmitDecision\"):\n self.submit_decisions(review_round)\n if self.request.get(\"DeclineNoDecisions\"):\n self.decline_no_decisions(review_round)\n\n self.redirect(\"/classic_review_decisions?track=\" +\n self.request.get(\"tracklist\") +\n \"&round=\" + str(review_round))\n","repo_name":"allankellynet/mimas","sub_path":"subreview_lib/classicreviewdecisionpage.py","file_name":"classicreviewdecisionpage.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"28658371325","text":"def operate(a, op, b):\n if op == 0:\n return a+b\n elif op == 1:\n return a-b\n elif op == 2:\n return a*b\n else:\n if a >= 0:\n return a//b\n else:\n return (-a)//b * -1\n\n\ndef dfs(n, s):\n global minA, maxA\n if n == N:\n minA = min(minA, s)\n maxA = max(maxA, s)\n return\n for i in range(4):\n if op[i]:\n op[i] -= 1\n dfs(n+1, operate(s, i, A[n]))\n op[i] += 1\n\n\nN = int(input())\nA = [*map(int, input().split())]\nop = [*map(int, input().split())]\nminA, maxA = 1e9, -1e9\ndfs(1, A[0])\nprint(maxA, minA, sep='\\n')\n\n# 연산을 eval 함수를 이용하면 C++14 기준으로 되나봄.. 통과됨..","repo_name":"hjle2/Algorithm","sub_path":"baekjoon/백트래킹/14888_연산자 끼워넣기.py","file_name":"14888_연산자 끼워넣기.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"13917647459","text":"#-*- coding: utf-8 -*-\n\n# /usr/include/linux/fadvise.h\nPOSIX_FADV_SEQUENTIAL = 2\nPOSIX_FADV_DONTNEED = 4\n\nlibc = offset = length = None\n\ndef fadvise(fd, seq=False, drop_cache=False):\n\t'Avoid filling disk cache with discardable data.'\n\tglobal libc, offset, length\n\tif not libc: # only import and initialize ctypes if used\n\t\timport ctypes, ctypes.util\n\t\tlibc = ctypes.CDLL(ctypes.util.find_library('c'))\n\t\toffset = length = ctypes.c_uint64(0)\n\tif not isinstance(fd, (int, long)): fd = fd.fileno()\n\n\t# These don't work (EINVAL) when or'ed\n\tif seq: libc.posix_fadvise(fd, offset, length, POSIX_FADV_SEQUENTIAL)\n\tif drop_cache: libc.posix_fadvise(fd, offset, length, POSIX_FADV_DONTNEED)\n","repo_name":"mk-fg/fs-bitrot-scrubber","sub_path":"fs_bitrot_scrubber/fadvise.py","file_name":"fadvise.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"69"} +{"seq_id":"4067682597","text":"from lona.protocol import OPERATION, PATCH_TYPE\n\n\nclass AttributeDict:\n PATCH_TYPE = PATCH_TYPE.ATTRIBUTES\n\n def __init__(self, node, *args, **kwargs):\n self._node = node\n self._attributes = dict(*args, **kwargs)\n\n # dict helper #############################################################\n def keys(self):\n with self._node.lock:\n return self._attributes.keys()\n\n def items(self):\n with self._node.lock:\n return self._attributes.items()\n\n def pop(self, name):\n with self._node.lock:\n attribute = self._attributes.pop(name)\n\n self._node.document.add_patch(\n node_id=self._node.id,\n patch_type=self.PATCH_TYPE,\n operation=OPERATION.REMOVE,\n payload=[\n name,\n ],\n )\n\n return attribute\n\n def clear(self):\n with self._node.lock:\n if not self._attributes:\n return\n\n self._attributes.clear()\n\n self._node.document.add_patch(\n node_id=self._node.id,\n patch_type=self.PATCH_TYPE,\n operation=OPERATION.CLEAR,\n payload=[],\n )\n\n def get(self, *args, **kwargs):\n with self._node.lock:\n return self._attributes.get(*args, **kwargs)\n\n def update(self, value):\n if not isinstance(value, dict):\n raise ValueError('dict required')\n\n with self._node.lock:\n for key, value in value.items():\n self[key] = value\n\n def __getitem__(self, name):\n with self._node.lock:\n return self._attributes[name]\n\n def __setitem__(self, name, value, issuer=None):\n if not isinstance(value, (int, bool, float, str)):\n raise ValueError('unsupported type: {}'.format(type(value)))\n\n if name in ('id', 'class', 'style'):\n raise RuntimeError(\n \"Node.attributes['{}'] is not supported. \"\n 'Use Node.{}{} instead.'.format(\n name, name, '_list' if name != 'style' else '')\n )\n\n with self._node.lock:\n if name in self._attributes and self._attributes[name] == value:\n return\n\n self._attributes[name] = value\n\n self._node.document.add_patch(\n node_id=self._node.id,\n patch_type=self.PATCH_TYPE,\n operation=OPERATION.SET,\n payload=[\n name,\n value,\n ],\n issuer=issuer,\n )\n\n def __delitem__(self, name):\n with self._node.lock:\n del self._attributes[name]\n\n self._node.document.add_patch(\n node_id=self._node.id,\n patch_type=self.PATCH_TYPE,\n operation=OPERATION.REMOVE,\n payload=[\n name,\n ],\n )\n\n def __eq__(self, other):\n with self._node.lock:\n if isinstance(other, self.__class__):\n other = other._attributes\n\n return self._attributes == other\n\n def __iter__(self):\n with self._node.lock:\n return self._attributes.__iter__()\n\n def __bool__(self):\n with self._node.lock:\n return bool(self._attributes)\n\n # serialisation ###########################################################\n def _reset(self, value):\n if not isinstance(value, dict):\n raise ValueError('unsupported type')\n\n for k, v in value.items():\n if not isinstance(v, (int, bool, float, str)):\n raise ValueError('unsupported type')\n\n if k in ('id', 'class', 'style'):\n raise RuntimeError(\n \"Node.attributes['{}'] is not supported. \"\n 'Use Node.{}{} instead.'.format(\n k, k, 'list' if k != 'style' else '')\n )\n\n with self._node.lock:\n self._attributes = value\n\n self._node.document.add_patch(\n node_id=self._node.id,\n patch_type=self.PATCH_TYPE,\n operation=OPERATION.RESET,\n payload=[\n dict(value),\n ],\n )\n\n def _serialize(self):\n return dict(self._attributes)\n\n # string representation ###################################################\n def to_attribute_string(self, skip_value=False):\n with self._node.lock:\n string = []\n\n for key, value in self._attributes.items():\n if skip_value and key == 'value':\n continue\n\n string.append('{}=\"{}\"'.format(key, value))\n\n return ' '.join(string)\n\n def to_sub_attribute_string(self):\n with self._node.lock:\n string = []\n\n for key, value in self._attributes.items():\n string.append('{}: {}'.format(key, value))\n\n return '; '.join(string)\n\n def __repr__(self):\n return ''.format(repr(self._attributes))\n\n\nclass StyleDict(AttributeDict):\n PATCH_TYPE = PATCH_TYPE.STYLE\n\n def __repr__(self):\n return ''.format(repr(self._attributes))\n","repo_name":"simrit1/lona","sub_path":"lona/html/attribute_dict.py","file_name":"attribute_dict.py","file_ext":"py","file_size_in_byte":5347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"69"} +{"seq_id":"21763729292","text":"# %%\n##\nimport calendar as cal\nfrom datetime import date as dt\nimport re\nimport string as st\nfrom collections import Counter\nimport math\nimport random\nimport pickle\nimport json\n\n# %%\n##\ns = cal.calendar(2020)\n\nprint(s)\n\n# %%\n##\ns = cal.month(2020, 6)\n\nprint(s)\n\n# %%\n##\nd_1 = dt(2020, 6, 1)\nd_2 = dt(2020, 7, 18)\n\nprint(d_2 - d_1)\n\n# %%\n##\nstring = 'Python 3.8'\n\np = re.compile(pattern=r\"\\d\").findall(string)\n\nprint(p)\n\n# different solution\n\nresult = re.findall(pattern=r\"\\d\", string=string)\nprint(result)\n\n# %%\n##\nstring = '!@#$%^&45wc'\n\nres = re.findall(r\"\\w\", string=string)\n\nprint(res)\n\n# %%\n##\nraw_text = \"Wyślij email na adres: info@template.com lub sales-info@template.it\"\n\nres = re.findall(r\"[\\w.-]+@[\\w.-]+\", raw_text)\n\nprint(res)\n\n# %%\n##\ntext = 'Programowanie w języku Python - od A do Z'\n\nres = re.split(r\"\\s+\", text)\n\nprint(res)\n\n# %%\n##\nres = st.ascii_letters\n\nprint(res)\n\n# %%\n##\nitems = ['YES', 'NO', 'NO', 'YES', 'EMPTY', 'YES', 'NO']\n\nres = Counter(items)\n\nprint(res)\n\n# or\ncounter = Counter()\nitems = ['YES', 'NO', 'NO', 'YES', 'EMPTY', 'YES', 'NO']\nfor item in items:\n counter[item] += 1\nprint(counter)\n\n\n# %%\n##\n\n\ndef sigmoid(x):\n func = 1 / (1 + math.exp(-x))\n return func\n\n\n# %%\n##\n\n\nrandom.seed(12)\n\nitems = ['python', 'java', 'sql', 'c++', 'c']\n\nprint(random.choice(items))\n\n# %%\n##\n\n\nrandom.seed(15)\n\nitems = ['python', 'java', 'sql', 'c++', 'c']\nrandom.shuffle(items)\n\nprint(items)\n\n# %%\n##\nids = ['001', '003', '011']\n\npickle.dump(ids, open('part_25/data.pickle', 'wb'))\n\n# %%\n##\nstocks = {'PLW': 360.0, 'TEN': 320.0, 'CDR': 329.0}\n\nprint(json.dumps(stocks, sort_keys=True, indent=4))\n","repo_name":"LukasKodym/python_exercises","sub_path":"part_25/builtin_packs.py","file_name":"builtin_packs.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"2055979603","text":"import csv\n\ndef setup():\n global years, scores, margin, graphHeight, positions, title\n size(1000, 200)\n background(20)\n fill(255)\n title = \"Average math scores for 3rd graders by year.\"\n years = []\n scores = []\n margin = 40\n positions = []\n graphHeight = (height - margin) - margin\n positions = processData(filename=\"math_scores.csv\")\n drawData(positions, vertices=True,edges=False,grooves=True)\n drawCurves(vList=positions)\n \ndef processData(filename):\n global overallMin, overallMax, xSpacer\n f = open(filename, \"r\")\n with f:\n reader = csv.reader(f)\n for i, row in enumerate(reader):\n if i > 0:\n years.append(int(row[1]))\n scores.append(int(row[2]))\n overallMin = min(scores) # What does Cairo say?\n overallMax = max(scores)\n xSpacer = (width - margin - margin) / (len(years) - 1)\n for i in range(0, len(scores)):\n adjScore = map(scores[i], overallMin, overallMax, 0, graphHeight)\n yPos = height - margin - adjScore\n xPos = margin + (xSpacer * i)\n positions.append(PVector(xPos, yPos))\n return positions\n\ndef drawData(positions, vertices=False,edges=False,grooves=False):\n for i in range(0, len(positions)):\n textSize(12)\n textAlign(CENTER,CENTER)\n text(years[i], positions[i].x, height - margin + 20)\n if grooves:\n # draw grooves\n stroke(200, 100)\n strokeWeight(1)\n line(positions[i].x, margin, positions[i].x, height - margin)\n if vertices:\n # Draw the vertices\n stroke(200, 100)\n circle(positions[i].x, positions[i].y, 7)\n if edges:\n # Draw the edges\n if(i > 0):\n # stroke(200)\n strokeWeight(2)\n line(positions[i].x, positions[i].y,\n positions[i - 1].x, positions[i - 1].y)\n textSize(14)\n textAlign(LEFT,CENTER)\n text(overallMax, 5, margin)\n text(overallMin, 5, height - margin)\n textAlign(RIGHT,BOTTOM)\n text(title, width-margin,margin/2)\n \ndef drawCurves(vList):\n # Draw curved lines\n stroke(23, 225, 0)\n strokeWeight(3)\n noFill()\n beginShape()\n # start point\n curveVertex(vList[0].x,vList[0].y)\n for i in range(0,len(vList)):\n curveVertex(vList[i].x,vList[i].y)\n # end point\n curveVertex(vList[-1].x,vList[-1].y)\n endShape()\n","repo_name":"untr-aditi-onal/Python-Vis","sub_path":"mathscores/mathscores.pyde","file_name":"mathscores.pyde","file_ext":"pyde","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"9418011898","text":"# Tarea 12.\n# Extender el programa happiness.py para que incluya lo siguiente:\n# 1. La descarga de datos del índice de felicidad debe\n# enviarse a un archivo csv.\n# 2. El programa debe contener una función para seleccionar los\n# valores de felicidad en fechas determinadas.\n# Las fechas deben poderse especificar en una forma similar a los \"Index slices\".\n# German Jordi Arreortua Reyes\n# 07/06/2022\n# ! ./venv/bin/python3.8\n# -*- coding: utf-8 -*-\n\"\"\"\nDownload world happiness time series from hedonometer project.\nSee https://hedonometer.org/timeseries/en_all/?from=2020-08-24&to=2022-02-23\nCreated on Tue Feb 24 15:35:23 2022\n\n@author: Feliú Sagols\nCDMX\n\"\"\"\n\nimport csv\nimport datetime\n# import pandas as pd\nimport requests\n# import psycopg2\n\nimport loggers\n\nTIMESERIES_DATABASE = \"ts_db\"\n\nglobal LOGGER\n\n\n# def last_available_date():\n# \"\"\"\n# Returns the newest record base_date in happiness table\n# \"\"\"\n# conn = psycopg2.connect(\"dbname=%s user=fsagols host=localhost\" %\n# TIMESERIES_DATABASE)\n# cur = conn.cursor()\n# cur.execute(\"\"\"\n# select date_\n# from happiness\n# order by date_ desc\n# limit 1;\n# \"\"\")\n# date_ = cur.fetchone()[0]\n# conn.close()\n# return date_\n\n\n# def get_happiness_ts(last_date, last_days):\n# \"\"\"\n# Returns the happiness time series.\n#\n# Parameters\n# ----------\n# last_date : datetime.pyi\n# Last base_date in the time period to download.\n# last_days:\n# Number of days previous to the last base_date to download.\n#\n# Examples\n# --------\n# >>> get_happiness_ts(datetime.datetime(2022, 2, 26), 700)\n#\n# Returns\n# -------\n# A dataframe with the time series.\n# \"\"\"\n# conn = psycopg2.connect(\"dbname=%s user=fsagols host=localhost\" %\n# TIMESERIES_DATABASE)\n# cur = conn.cursor()\n# cur.execute(\n# \"\"\"\n# select date_, happiness\n# from happiness\n# where date_ <= %(last_date)s\n# order by date_ desc limit %(last_days)s;\n# \"\"\", {\n# 'last_date': last_date,\n# 'last_days': last_days\n# })\n# answer = cur.fetchall()\n# answer.reverse()\n# answer = [[a[0], a[1]] for a in answer]\n# df = pd.DataFrame(data=answer, columns=['base_date', 'happiness'])\n# df.set_index('base_date', inplace=True)\n# return df\n\ndef download_happiness(start_date, records):\n \"\"\"\n Download happiness records from the url below. Happiness records are stored\n into happiness database table.\n\n Parameters\n ----------\n start_date : datetime.pyi\n Initial downloading base_date.\n records : int\n Maximum number of records after start_date to download.\n \"\"\"\n\n LOGGER.debug(\"Downloading happiness time series.\")\n data_json = requests.get(\n 'https://hedonometer.org/api/v1/happiness/?format=json×eries__'\n f'title=en_all&date__gte='\n f'{start_date.strftime(\"%Y-%m-%d\")}&limit={records}')\n data = data_json.json()\n data = [[\n datetime.datetime.strptime(d['date'], \"%Y-%m-%d\"), d['frequency'],\n float(d['happiness'])\n ] for d in data['objects']]\n # conn = psycopg2.connect(\"dbname=%s user=fsagols host=localhost\" %\n # TIMESERIES_DATABASE)\n LOGGER.info(\"Storing happiness time series.\")\n # cur = conn.cursor()\n # cur.executemany(\n # \"\"\"\n # insert into happiness\n # values (%s, %s, %s)\n # on conflict (date_)\n # do nothing;\n # \"\"\", data)\n # conn.commit()\n # conn.close()\n data = sorted(data, key=lambda a: a[0])\n\n with open(\"data.csv\", \"w\", newline=\"\") as archivo:\n archivo_writer = csv.writer(archivo, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n archivo_writer.writerow(['date', 'frequency', 'happiness'])\n for elemento in data:\n archivo_writer.writerow(elemento)\n archivo.close()\n\n\ndef retrieve_happiness(intervalo_fechas):\n \"\"\"\n Selecciona los valores de felicidad en fechas determinadas\n :param intervalo_fechas: lista en forma similar a los \"Index slices\".\n \"\"\"\n for intervalo in intervalo_fechas:\n fecha_ini = datetime.datetime.strptime(intervalo[0], \"%Y-%m-%d\")\n\n if len(intervalo) > 1:\n fecha_ter = datetime.datetime.strptime(intervalo[1], \"%Y-%m-%d\")\n else:\n fecha_ter = fecha_ini\n\n if len(intervalo) > 2:\n salto = intervalo[2]\n else:\n salto = 1\n\n contador = salto\n with open(\"data.csv\", \"r\") as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n print(f'\\t Intervalo de fechas: {intervalo}.')\n for row in csv_reader:\n if fecha_ini <= \\\n datetime.datetime.strptime(row['date'], '%Y-%m-%d %H:%M:%S') <= \\\n fecha_ter:\n if contador == salto:\n print(f'\\t date: {row[\"date\"]}, frequency: {row[\"frequency\"]},'\n f' happiness: {row[\"happiness\"]}')\n contador = 0\n contador += 1\n\n\nif __name__ == \"__main__\":\n LOGGER = loggers.define_logger(\"happiness.log\")\n date = datetime.datetime(2022, 1, 1)\n download_happiness(date, 5000)\n\nretrieve_happiness([['2022-01-01', '2022-05-24', 3], ['2022-02-01', '2022-02-07']])\n# retrieve_happiness([['2022-01-01', '2022-03-27', 6],\n# ['2022-02-01', '2022-02-07', 2], ['2022-04-01']])\n","repo_name":"German-Jordi/Programacion_Avanzada","sub_path":"Tarea_12_German_Jordi_Arreortua.py","file_name":"Tarea_12_German_Jordi_Arreortua.py","file_ext":"py","file_size_in_byte":5643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"37614626920","text":"# Vanilla policy gradient (VPG) using Pytorch, continuous action space\n# VPG Algorithm can be found in https://spinningup.openai.com/en/latest/algorithms/vpg.html, http://joschu.net/docs/thesis.pdf\n# Intrinsic Curiosity Module (ICM): https://arxiv.org/abs/1705.05363\n# MountainCarContinuous-v0 in Gym environment\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport gym\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ndevice = torch.device(\"cpu\")\n\n# policy network - gaussian mean and std (output=2)\n# gaussian mean (output=1)\nclass policy_network(nn.Module):\n def __init__(self, input=4, hidden=64, output=2):\n super(policy_network, self).__init__()\n self.fc1 = nn.Linear(input, hidden)\n self.fc2 = nn.Linear(hidden, hidden)\n self.fc3 = nn.Linear(hidden, output)\n\n def forward(self, input):\n x = F.relu(self.fc1(input))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n# state-value estimator (critic)\nclass state_value_network(nn.Module):\n def __init__(self, input=4, hidden=64, output=1):\n super(state_value_network, self).__init__()\n self.fc1 = nn.Linear(input, hidden)\n self.fc2 = nn.Linear(hidden, hidden)\n self.fc3 = nn.Linear(hidden, output)\n\n def forward(self, input):\n x = F.relu(self.fc1(input))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\n# forward dynamics model - s_{t+1} = f(s_t, a_t)\nclass forward_dynamics_network(nn.Module):\n def __init__(self, input=5, hidden=64, output=4):\n super(forward_dynamics_network, self).__init__()\n self.fc1 = nn.Linear(input, hidden)\n self.fc2 = nn.Linear(hidden, hidden)\n self.fc3 = nn.Linear(hidden, output)\n\n def forward(self, input):\n x = F.relu(self.fc1(input))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\ndef choose_action(ob):\n # choose action using current policy pi (continuous action space)\n # assume the variance of Gaussian policy \\sigma = 1.0\n mean_a = pi(torch.tensor(ob, dtype=torch.float32).to(device)).detach()\n mean_a = mean_a.cpu().numpy()\n a = np.random.normal(loc=mean_a, scale=1.0)\n # if a < 0:\n # a_env = 0\n # else:\n # a_env = 1\n return np.array(a)\n\ndef compute_advantage(rewards, states, gamma):\n # get returns of states from a trajectory\n # R_t = r_t + gamma * r_{t+1} + gamma^2 * r_{t+2} + ...\n returns = rewards.copy()\n advs = rewards.copy()\n states_ep = states.copy()[-len(rewards):]\n for i in reversed(range(len(returns)-1)):\n r = returns[i] + gamma * returns[i+1]\n returns[i] = r\n states_ep = torch.tensor(states_ep, dtype=torch.float32).to(device)\n returns = torch.tensor(returns, dtype=torch.float32).to(device)\n advs = returns - V(states_ep).squeeze(1)\n # advs = returns\n return advs\n\ndef compute_epsilon_adv(states, epsilon):\n # get another adv according to the discrepancy of current state to its average\n states_o = states.copy()\n states = np.array(states)\n means = []\n advs = []\n for i in range(len(states[0])):\n mean_ = np.mean(states[:,i])\n means.append(mean_)\n for i in range(len(states_o)):\n a = states_o[i]\n e = a - means\n cur_error = sum(t*t for t in e)\n advs.append(cur_error)\n if np.random.uniform() < epsilon:\n value = [t * 100 for t in advs]\n else:\n value = [t/t for t in advs]\n return value\n\ndef intrinsic_r(ob, a, ob_):\n # calculated the intrinsic reward as prediction error\n loss = nn.MSELoss(reduction='sum').to(device)\n input_tensor = torch.tensor(np.concatenate((ob, a)), dtype=torch.float32).to(device)\n ob_tensor = torch.tensor(ob, dtype=torch.float32).to(device)\n ob_p = forward_net(input_tensor).detach()\n pred_error = loss(ob_p, ob_tensor)\n return pred_error.item() # as a number\n \ndef model_validate():\n ep_reward = 0\n ob = env.reset()\n done = False\n while not done:\n env.render() \n a = choose_action(ob)\n ob_, r, done, _ = env.step(a)\n ep_reward += r\n ob = ob_\n return ep_reward\n\nK = 10000\nBATCH_SIZE = 500\nGAMMA = 0.95\nETA = 0.9 # regularizer of loss function\nLEARNING_RATE = 0.0005\ndecay = 0.995\n\nenv = gym.make('MountainCarContinuous-v0')\n\nn_actions = env.action_space.shape[0]\n# n_actions = 1\nstate_length = env.observation_space.shape[0]\n\n# initialize policy network, forward dynamics network and state-value network\npi = policy_network(input=state_length, output=1).to(device)\n# forward_net = forward_dynamics_network(input=(n_actions+state_length), output=state_length).to(device)\nV = state_value_network(input=state_length).to(device)\n\n# define optimizers\n# params = list(pi.parameters()) + list(forward_net.parameters())\n# global_optimizer = torch.optim.Adam(params, lr=LEARNING_RATE)\nV_optimizer = torch.optim.Adam(V.parameters(), lr=LEARNING_RATE)\npi_optimizer = torch.optim.Adam(pi.parameters(), lr=LEARNING_RATE)\n# forward_net_optimizer = torch.optim.Adam(forward_net.parameters(), lr=LEARNING_RATE)\n\n# define loss\nloss_MSE = nn.MSELoss(reduction='mean').to(device)\n\ntraining_rewards = []\n\n# epsilon = 1.0\nepsilon = 0.0\n\nfor k in range(K):\n # save trajectories\n states = []\n actions = []\n states_ = []\n advantages = torch.tensor([], dtype=torch.float32).to(device)\n # collect trajectories\n ob = env.reset()\n done = False\n rewards = [] \n step_e = 0 # allowed steps in one episode\n total_r = 0 \n while not done:\n a = choose_action(ob)\n ob_, r_e, done, _ = env.step(a)\n # r_i = intrinsic_r(ob, a, ob_) #add intrinsic reward\n # r = r_i + r_e\n r = r_e\n # save trajectories\n states.append(ob)\n actions.append(a)\n rewards.append(r)\n states_.append(ob_)\n ob = ob_\n step_e += 1\n total_r += r_e\n # calculate discounted return and advantages\n advs = compute_advantage(rewards, states, GAMMA)\n advantages = torch.cat((advantages, advs), 0)\n\n adv_epsilon = compute_epsilon_adv(states, epsilon)\n adv_epsilon = torch.tensor(adv_epsilon, dtype=torch.float32).to(device)\n advantages = torch.mul(advantages, adv_epsilon)\n # epsilon = np.max((epsilon*decay, 0.0001))\n\n states = torch.tensor(states, dtype=torch.float32).to(device)\n actions = torch.tensor(actions, dtype=torch.float32).to(device)\n states_actions = torch.cat((states, actions), 1).to(device)\n states_ = torch.tensor(states_, dtype=torch.float32).to(device)\n # update policy pi and forward dynamics model\n # global_optimizer.zero_grad() \n # log_pi = - (((actions - pi(states)))**2) / 2\n # pi_loss = - torch.sum(torch.mul(log_pi.squeeze(), advantages)) # negative: .backward() use gradient descent, (-loss) with gradient descnet = gradient ascent\n # forward_net_loss = loss_MSE(states_, forward_net(states_actions))\n # global_loss = (1-ETA) * pi_loss + ETA * forward_net_loss\n # global_loss.backward(retain_graph=True)\n # global_optimizer.step()\n #\n pi_optimizer.zero_grad() \n log_pi = - (((actions - pi(states)))**2) / 2\n pi_loss = - torch.sum(torch.mul(log_pi.squeeze(), advantages)) # negative: .backward() use gradient descent, (-loss) with gradient descnet = gradient ascent\n pi_loss.backward(retain_graph=True)\n pi_optimizer.step()\n\n V_optimizer.zero_grad() # clear gradient\n v_loss = loss_MSE(advantages, V(states).squeeze())\n v_loss.backward()\n V_optimizer.step()\n\n # validate current policy\n if k % 50 == 0:\n # if k % 500 == 0:\n # training_reward = model_validate()\n # training_rewards.append(training_reward)\n # print('Step: ', k, ' Total reward (model validation): ', training_reward)\n print('Step: ', k, ' Total reward: ', total_r, 'epsilon', epsilon)\n # re-fit state-value network (critic)\n\n training_rewards.append(total_r)\n\n# plt.plot(smooth_reward(ep_rewards, 50))\nplt.plot(training_rewards)\nplt.show()\n\nep_rewards = []\nfor ii in range(10):\n ep_reward = model_validate()\n ep_rewards.append(ep_reward)\nprint('Average rewards of last 10 eps: ', np.mean(ep_rewards))\n# Vanilla policy gradient using Pytorch, continuous action space\n","repo_name":"Wang-Xiaoyang/RL-Implementations","sub_path":"2_vanilla_policy_gradient/idea_trial_backup.py","file_name":"idea_trial_backup.py","file_ext":"py","file_size_in_byte":8367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"14401968629","text":"import random\nfrom mullermsm import muller\nmullerforce = muller.muller_force()\nimport scipy.linalg\nfrom matplotlib.pyplot import *\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument('--dt', dest='dt', type=float, default=0.1)\nparser.add_argument('-n', dest='num_frames', type=int, default=100000)\nparser.add_argument('-o', dest='output', default='pos.npy')\n\nargs = parser.parse_args()\n\nkT = 15.0\ndt = args.dt\nmGamma = 1000.0\ntraj_length = args.num_frames \ninitial_x = [random.uniform(-1.5, 1.2), random.uniform(-0.2, 2)]\npositions = muller.propagate(traj_length, initial_x, kT, dt, mGamma, mullerforce)\n\nnp.save(args.output, positions)\n","repo_name":"rmcgibbo/opt-k","sub_path":"muller/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"69"} +{"seq_id":"74027730459","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport sklearn.datasets\n\n\ndef bunch_to_df(bunch):\n df = pd.DataFrame(bunch.data, columns=bunch.feature_names)\n df['target'] = pd.Series(bunch.target)\n return df\n\ndef visualize_df(df):\n pd.plotting.scatter_matrix(df, c=pd.Categorical(df['target']), marker='o')\n plt.show()\n\n\niris = sklearn.datasets.load_iris()\niris_df = bunch_to_df(iris)\n\nprint(iris_df.head())\nprint(iris_df.describe())\n\nvisualize_df(iris_df)\n","repo_name":"fpecek/machine-learning-algorithms","sub_path":"part2/iris_explore.py","file_name":"iris_explore.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"30435651759","text":"\"\"\"\nTest libvirt support features in qemu cmdline.\nBTW it not limited to hypervisors CPU/machine features.\n\"\"\"\nimport re\nimport logging as log\nimport platform\n\nfrom virttest import virsh\nfrom virttest.libvirt_xml import vm_xml\nfrom virttest.utils_test import libvirt\n\nfrom virttest import libvirt_version\n\nfrom avocado.utils import process, astring\n\n\n# Using as lower capital is not the best way to do, but this is just a\n# workaround to avoid changing the entire file.\nlogging = log.getLogger('avocado.' + __name__)\n\n\ndef config_feature_pv_eoi(test, vmxml, **kwargs):\n \"\"\"\n Config libvirt VM XML to enable/disable PV EOI feature.\n\n :param vmxml: VMXML instance\n :param kwargs: Function keywords\n :return: Corresponding feature flag in qem cmdline\n \"\"\"\n # This attribute supported since 0.10.2 (QEMU only)\n if not libvirt_version.version_compare(0, 10, 2):\n test.cancel(\"PV eoi is not supported in current\"\n \" libvirt version\")\n qemu_flags = []\n eoi_enable = kwargs.get('eoi_enable', 'on')\n get_hostos_version = astring.to_text(process.run(\"cat /etc/redhat-release\", shell=True).stdout)\n if re.search(r'(\\d+(\\.\\d+)?)', get_hostos_version) is not None:\n hostos_version = float(re.search(r'(\\d+(\\.\\d+)?)', get_hostos_version).group(0))\n if hostos_version < float(8.1):\n if eoi_enable == 'on':\n qemu_flags.append('+kvm_pv_eoi')\n elif eoi_enable == 'off':\n qemu_flags.append('-kvm_pv_eoi')\n else:\n logging.error(\"Invalid value %s, eoi_enable must be 'on' or 'off'\", eoi_enable)\n elif hostos_version > float(8.0):\n if eoi_enable == 'on':\n qemu_flags.append('kvm-pv-eoi=on')\n elif eoi_enable == 'off':\n qemu_flags.append('kvm-pv-eoi=off')\n else:\n logging.error(\"Invalid value %s, eoi_enable must be 'on' or 'off'\", eoi_enable)\n else:\n test.fail(\"Can not decide the expected qemu cmd line because of no expected hostos version\")\n\n # Create features tag if not existed\n if not vmxml.xmltreefile.find('features'):\n vmxml.features = vm_xml.VMFeaturesXML()\n vmxml_feature = vmxml.features\n if vmxml_feature.has_feature('apic'):\n vmxml_feature.remove_feature('apic')\n vmxml_feature.add_feature('apic', 'eoi', eoi_enable)\n vmxml.features = vmxml_feature\n logging.debug(\"Update VM XML:\\n%s\", vmxml)\n expect_fail = False if 'expect_define_vm_fail' not in kwargs \\\n else kwargs['expect_define_vm_fail']\n result = virsh.define(vmxml.xml, debug=True)\n libvirt.check_exit_status(result, expect_fail)\n if expect_fail:\n libvirt.check_result(result, kwargs.get('expected_msg'))\n return\n return qemu_flags\n\n\ndef config_feature_memory_backing(test, vmxml, **kwargs):\n \"\"\"\n Config libvirt VM XML to influence how virtual memory pages are backed\n by host pages.\n\n :param vmxml: VMXML instance\n :param kwargs: Function keywords\n :return: Corresponding feature flag in qem cmdline\n \"\"\"\n # Both 'nosharepages' and 'locked' are supported since 1.0.6\n if not libvirt_version.version_compare(1, 0, 6):\n test.cancel(\"Element is not supported in current\"\n \" libvirt version\")\n qemu_flags = []\n no_sharepages = \"yes\" == kwargs.get(\"nosharepages\", \"no\")\n locked = \"yes\" == kwargs.get(\"locked\", \"no\")\n if no_sharepages:\n # On RHEL6, the flag is 'redhat-disable-KSM'\n # On RHEL7 & Fedora, the flag is 'mem-merge=off'\n qemu_flags.append(['mem-merge=off', 'redhat-disable-KSM'])\n if locked:\n if not libvirt_version.version_compare(5, 3, 0):\n qemu_flags.append(\"mlock=on\")\n else:\n qemu_flags.append(\"mem-lock=on\")\n memtune_xml = vm_xml.VMMemTuneXML()\n memtune_xml.hard_limit = vmxml.max_mem * 4\n vmxml.memtune = memtune_xml\n vmxml.sync()\n try:\n vm_xml.VMXML.set_memoryBacking_tag(vmxml.vm_name,\n hpgs=False,\n nosp=no_sharepages,\n locked=locked)\n logging.debug(\"xml updated to %s\", vmxml.xmltreefile)\n except Exception as detail:\n logging.error(\"Update VM XML fail: %s\", detail)\n return qemu_flags\n\n\ndef run(test, params, env):\n \"\"\"\n Test libvirt support features in qemu cmdline.\n\n 1) Config test feature in VM XML;\n 2) Try to start VM;\n 3) Check corresponding feature flags in qemu cmdline;\n 4) Login VM to test feature if necessary.\n \"\"\"\n vm_name = params.get(\"main_vm\", \"avocado-vt-vm1\")\n vm = env.get_vm(vm_name)\n expect_fail = \"yes\" == params.get(\"expect_start_vm_fail\", \"no\")\n expect_define_vm_fail = 'yes' == params.get('expect_define_vm_fail', 'no')\n test_feature = params.get(\"test_feature\")\n # All test case Function start with 'test_feature' prefix\n testcase = globals()['config_feature_%s' % test_feature]\n test_feature_attr = params.get(\"test_feature_attr\", '').split(\",\")\n test_feature_valu = params.get(\"test_feature_valu\", '').split(\",\")\n # Parameters for test case\n if len(test_feature_attr) != len(test_feature_valu):\n test.error(\"Attribute number not match with value number\")\n test_dargs = dict(list(zip(test_feature_attr, test_feature_valu)))\n if expect_define_vm_fail:\n test_dargs.update({'expect_define_vm_fail': expect_define_vm_fail,\n 'expected_msg': params.get('expected_msg', '')})\n if vm.is_alive():\n vm.destroy()\n vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)\n vmxml_backup = vmxml.copy()\n virsh_dargs = {'debug': True, 'ignore_status': False}\n\n if 'ppc64le' in platform.machine().lower() and test_feature == 'pv_eoi':\n if not libvirt_version.version_compare(6, 0, 0):\n test.cancel('Feature %s is supported since version 6.0.0' % test_feature)\n try:\n # Run test case\n qemu_flags = testcase(test, vmxml, **test_dargs)\n if not qemu_flags and expect_define_vm_fail:\n return\n result = virsh.start(vm_name, **virsh_dargs)\n libvirt.check_exit_status(result, expect_fail)\n\n # Check qemu flag\n vm_pid = vm.get_pid()\n with open(\"/proc/%s/cmdline\" % vm_pid) as cmdline_f:\n cmdline_content = cmdline_f.read()\n logging.debug(\"VM cmdline:\\n%s\",\n cmdline_content.replace('\\x00', ' '))\n msg = \"Find '%s' in qemu cmdline? %s\"\n found_flags = []\n index = 0\n for flag in qemu_flags:\n # Here, flag could be a list, so uniform it to list for next\n # step check. And, check can pass if any element in the list\n # exist in cmdline\n if not isinstance(flag, list):\n flag = [flag]\n found_f = []\n for f in flag:\n if f in cmdline_content:\n found_f.append(True)\n break\n else:\n found_f.append(False)\n found_flags.append(any(found_f))\n logging.info(msg % (flag, found_flags[index]))\n index += 1\n if False in found_flags:\n test.fail(\"Not find all flags\")\n finally:\n vmxml_backup.sync()\n","repo_name":"autotest/tp-libvirt","sub_path":"libvirt/tests/src/libvirt_qemu_cmdline.py","file_name":"libvirt_qemu_cmdline.py","file_ext":"py","file_size_in_byte":7395,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"69"} +{"seq_id":"14939540799","text":"#!/usr/bin/python3\n\nimport subprocess\n\n\nprint('So did you find anything?\\nyou can send me the link here',flush=True)\n\nlink = input('link: ')\n\nprint('ok, i will click on it',flush=True)\n\nsubprocess.call(\n [\"adb\", \"shell\", f\"am start -a android.intent.action.VIEW -d '{link}' com.alphactf.deepnews\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL,\n)\n","repo_name":"vvxhid/alphaCTF-2022","sub_path":"reverse/deepnews/chall/messenger/messenger.py","file_name":"messenger.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"69"} +{"seq_id":"19076276722","text":"from marshmallow import post_load\nfrom marshmallow.decorators import post_dump\n\nfrom app.extensions.marshmallow_ext import ma\nfrom app.models.commerce import Commerce\nfrom app.models.user import User\n\n\nclass Commerce_Schema(ma.Schema):\n class Meta:\n fields = (\n \"id\",\n \"id_user\",\n \"trading_name\",\n \"company_name\",\n \"cover_path\",\n \"segment\",\n \"description\",\n \"cell_number\",\n \"email\",\n \"street\",\n \"number\",\n \"complement\",\n \"neighborhood\",\n \"city\",\n \"state\",\n \"zipcode\",\n \"created_at\",\n \"updated_at\",\n )\n\n @post_load\n def make_commerce(self, data, **kwargs):\n return Commerce(**data)\n\n @post_dump(pass_many=True)\n def serialize(self, data, many, **kwargs):\n if type(data) is not dict:\n for commerce in data:\n user = User.query.filter_by(id=commerce[\"id_user\"]).first()\n commerce['user'] = {\n 'name': user.name,\n 'email': user.email\n }\n return data","repo_name":"afonsomedeiros/LogoAliAPI","sub_path":"app/serializer/commerce.py","file_name":"commerce.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"19521659901","text":"class Solution:\n def add(self, a: int, b: int) -> int:\n z = 0xffffffff\n a, b = a & z, b & z\n while b != 0:\n a, b = a ^ b, ((a & b) << 1) & z\n if a > 0x7fffffff:\n a = ~(a ^ z)\n return a\n\na, b = -2, 1\nc = Solution()\nprint(c.add(a, b))","repo_name":"zhulf0804/Coding.Python","sub_path":"剑指offer/65_g_h_不用加减乘除做加法.py","file_name":"65_g_h_不用加减乘除做加法.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"69"} +{"seq_id":"380445849","text":"from PIL import Image\nimport random as rd\n\nimgx = 512\nimgy = 512\nimage = Image.new(\"RGB\", (imgx, imgy))\nfor x in range(imgx):\n\tfor y in range(imgy):\n\t\timage.putpixel((x,y),(0,0,0))\nsnakenum = 65\nr= 0\ng = 0\nb = 0\nlol = 0\nglobal turn\nglobal snakey\nturn = 0\n\n\ndef pr():\n\tglobal turn, snakey\n\tturn = snakey\n\nfor x in range(snakenum):\n\tsnakey = 0\n\tr = rd.randrange(255)\n\tg = rd.randrange(255)\n\tb = rd.randrange(255)\n\tsnakex = rd.randrange(imgx)\n\t\n\tfor x in range(3000):\n\t\tif snakey <= 510:\n\t\t\tlol = rd.randrange(10)\n\t\t\tif lol < 2:\n\t\t\t\tif snakex != 0:\n\t\t\t\t\tsnakex -= 1\n\t\t\t\telse:\n\t\t\t\t\tif snakey < imgy:\n\t\t\t\t\t\tsnakey +=1\n\t\t\t\timage.putpixel((snakex, snakey),(r,g,b))\n\t\t\t\tpr()\n\t\t\telif lol > 7:\n\t\t\t\tif snakex != imgx:\n\t\t\t\t\tsnakex += 1\n\t\t\t\telse:\n\t\t\t\t\tif snakey < imgy:\n\t\t\t\t\t\tsnakey += 1\n\t\t\t\timage.putpixel((snakex, snakey),(r,g,b))\n\t\t\t\tpr()\n\t\t\telse:\n\t\t\t\tif snakey < imgy:\n\t\t\t\t\tsnakey += 1\n\t\t\t\timage.putpixel((snakex, snakey),(r,g,b))\n\t\t\t\tpr()\n\nimage.save(\"snake.png\", \"PNG\")","repo_name":"miaalexkatz/CS550","sub_path":"snakes.py","file_name":"snakes.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"12370453730","text":"#\n# @lc app=leetcode.cn id=701 lang=python3\n#\n# [701] 二叉搜索树中的插入操作\n# insert-into-a-binary-search-tree\n\n# 给定二叉搜索树(BST)的根节点和要插入树中的值,将值插入二叉搜索树。\n# 返回插入后二叉搜索树的根节点。\n# 保证原始二叉搜索树中不存在新值。\n# 注意,可能存在多种有效的插入方式,只要树在插入后仍保持为二叉搜索树即可。 你可以返回任意有效的结果。\n\n# @lc code=start\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def insertIntoBST(self, root: TreeNode, val: int) -> TreeNode:\n return self.insertIntoBST1(root, val)\n\n @staticmethod\n def insertIntoBST1(root, val):\n def helper(node):\n if node is None:\n return TreeNode(val)\n if val > node.val:\n node.right = helper(node.right)\n else:\n node.left = helper(node.left)\n return node\n return helper(root)\n\n @staticmethod\n def insertIntoBST2(root, val):\n if root is None:\n return TreeNode(val)\n\n node, parent = root, root\n while node:\n parent = node\n node = parent.left if val < parent.val else parent.right\n \n if val < parent.val:\n parent.left = TreeNode(val)\n elif val > parent.val:\n parent.right = TreeNode(val)\n\n return root\n# @lc code=end\n\n","repo_name":"yekingyan/leetcode","sub_path":"701.二叉搜索树中的插入操作.py","file_name":"701.二叉搜索树中的插入操作.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"35528340811","text":"import numpy as np\nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n long = min(len(matrix), len(matrix[0]))\n matrix = np.array(matrix).astype('int')\n\n for wid in range(long,0,-1):\n i,j = 0, 0\n for i in range(len(matrix) - wid + 1):\n for j in range(len(matrix[0]) - wid + 1):\n print(i,j,wid)\n if np.sum(matrix[i:i+wid,j:j+wid]) == wid**2:\n return wid**2\n return 0\n \n############################################################################# \n \nclass Solution:\n def maximalSquare(self, matrix: List[List[str]]) -> int:\n m = len(matrix)\n n = len(matrix[0])\n cache = [[int(matrix[i][j]) if(i == 0 or j == 0) else 0 for j in range(n)] for i in range(m)]\n \n maxL = 0\n for i in range(m):\n for j in range(n):\n if(matrix[i][j] == '1' and i > 0 and j > 0):\n cache[i][j] = min(cache[i-1][j], cache[i][j-1], cache[i-1][j-1]) + 1\n maxL = max(cache[i][j], maxL)\n \n \n return(maxL**2) \n","repo_name":"KaiaX926/Leetcode","sub_path":"221 Maximal Square.py","file_name":"221 Maximal Square.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"70524949659","text":"\"\"\"Classes, functions, and data structures to create \"Builders\" for circuits.\n\"\"\"\n\nfrom functools import partial\nfrom typing import Any, Callable\n\nfrom qcware_transpile.circuits import Circuit\nfrom qcware_transpile.gates import Dialect\nfrom qcware_transpile.instructions import Instruction\n\n\nclass Builder:\n def __init__(self, dialect: Dialect):\n self.dialect = dialect\n self.instructions: list[Instruction] = []\n\n def add_instruction(\n self, gate_name: str, bits: list, parameters: dict, metadata: dict\n ) -> \"Builder\":\n self.instructions.append(\n Instruction(\n gate_def=self.dialect.gate_named(gate_name),\n bit_bindings=bits,\n parameter_bindings=parameters,\n metadata=metadata,\n )\n )\n return self\n\n\ndef create_builder(dialect: Dialect, to_native_func: Callable[[Circuit], Any]):\n \"\"\"\n Creates a builder object, starting from a basically empty class and\n adding functions to \"build\" a circuit from scratch based on the gates\n in the dialect and the parameters involved.\n \"\"\"\n result = Builder(dialect)\n\n def create_method(gate_def):\n def add_this_gate(self: Builder, *args, **kwargs):\n gate_name = gate_def.name\n bits = list(args)\n parameters: dict = kwargs\n metadata: dict = dict()\n self.add_instruction(gate_name, bits, parameters, metadata)\n return self\n\n return add_this_gate\n\n for gate_def in dialect.gate_defs:\n\n # now bind this method to the object instance. It may be better to create\n # a class dynamically and then create members of that class; let's try this\n # first.\n # see https://newbedev.com/adding-a-method-to-an-existing-object-instance\n # for descriptions\n setattr(result, gate_def.name, partial(create_method(gate_def), result))\n\n setattr(\n result,\n \"circuit\",\n partial(\n lambda self: Circuit.from_instructions(\n self.dialect.name, self.instructions\n ),\n result,\n ),\n )\n setattr(\n result,\n \"native_circuit\",\n partial(lambda self: to_native_func(self.circuit()), result),\n )\n\n return result\n","repo_name":"qcware/qcware_transpile","sub_path":"qcware_transpile/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"35838207413","text":"from django.shortcuts import render, redirect\nfrom .models import *\nfrom django.contrib import messages\nimport bcrypt\nfrom datetime import date\n\ndef register(request):\n\treturn render(request, 'index.html') \n\ndef createuser(request):\n\tprint(request.POST)\n\terrors = User.objects.userValidator(request.POST)\n\tif len(errors) > 0:\n\t\tfor key, value in errors.items():\n\t\t\tmessages.error(request, value)\n\t\treturn redirect('/')\n\telse:\n\t\thashedpassword = bcrypt.hashpw(request.POST ['pw'].encode(), bcrypt.gensalt()).decode()\n\t\tnewuser = User.objects.create(firstName = request.POST['fname'], lastName = request.POST ['lname'], email = request.POST['useremail'], password = hashedpassword )\n\t\tprint (newuser.id)\n\t\trequest.session['loggedInID'] = newuser.id\n\treturn redirect('/book')\n\ndef book(request):\n\tif 'loggedInID' not in request.session:\n\t\treturn redirect('/')\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tcontext = {\n\t\t'loggedUser': loggedInUser,\n\t\t'allbooks': Book.objects.all()\n\t}\n\treturn render (request, 'book.html', context)\t\n\ndef login(request):\n\tvalidationErrors = User.objects.loginValidator(request.POST)\n\tif len(validationErrors) > 0:\n\t\tfor key, value in validationErrors.items():\n\t\t\tmessages.error(request, value)\n\t\treturn redirect('/')\n\tloggedInUser = User.objects.get(email = request.POST['useremail'])\n\tprint(\"*******\")\n\tprint(loggedInUser)\n\tprint(\"********\")\n\trequest.session['loggedInID'] = loggedInUser.id\n\treturn redirect('/book')\n\ndef add(request):\n\tprint(request.POST)\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tnewBook = Book.objects.create(title = request.POST['title'], desc = request.POST['desc'], creator = loggedInUser)\n\treturn redirect('/book')\n\ndef delete(request, bookId):\n\tbook = Book.objects.get (id = bookId)\n\tbook.delete()\n\treturn redirect('/book')\n\ndef edit(request, bookId):\n\tprint(request.POST)\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tbook = Book.objects.get (id = bookId)\n\tcontext = {\n\t'loggedUser' : loggedInUser,\n\n\t}\n\treturn redirect('edit.html', context)\n\ndef update(request, bookId):\n\tprint(request.POST)\n\tbook = Book.objects.get(id = bookId)\n\tbook.title = request.POST['title']\n\tbook.desc = request.POST['desc']\n\tbook.save()\n\treturn redirect('/book')\n\ndef display(request, bookId):\n\tif 'loggedInID' not in request.session:\n\t\treturn redirect('/')\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tcontext = {\n\t\t'loggedUser' : loggedInUser,\n\t\t'book': Book.objects.get(id=bookId)\n\t}\n\treturn render(request, 'showbook.html', context)\n\ndef addfavor(request, bookId):\n\tif 'loggedInID' not in request.session:\n\t\treturn redirect('/')\n\tprint(request.POST)\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tbook = Book.objects.get(id=bookId)\n\tbook.like.add(loggedInUser)\n\treturn redirect('/book')\n\t\ndef removefavor(request, bookId):\n\tif 'loggedInID' not in request.session:\n\t\treturn redirect('/')\n\tprint(request.POST)\n\tloggedInUser = User.objects.get(id=request.session['loggedInID'])\n\tbook = Book.objects.get(id=bookId)\n\tbook.like.remove(loggedInUser)\n\treturn redirect('/book')\n\ndef logout(request):\n\trequest.session.clear()\n\treturn redirect('/')","repo_name":"MoDev20/Zaki","sub_path":"favoriteBooksApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"83055538","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 14 16:44:42 2021\r\n@author: Eric Born\r\n!!! TODO !!!\r\ncreate room class which stores description text and inventory list for items \r\nto be taken by player\r\nFind_Item # looks for item of same class \r\nKilled\r\nShowDeathScreen\r\nRestart\r\n\"\"\"\r\n#import inspect\r\nimport random as rand\r\n\r\nlocation_dict: dict = {'start': ('This is the starting room, choose to inspect'\r\n ' the room by typing inspect. Move to another area by '\r\n 'typing move and the direction. Pickup an item by '\r\n 'typing pickup item name. Attack by typing attack and '\r\n 'the monsters name. Equip an item by typing equip and '\r\n 'the items name. Use an item by typing use and the '\r\n 'items name. Check your stats by typing stats.'),\r\n 'north': ('The north side of the castle, There is a '\r\n 'rusty sword lying on the ground. You can move south'),\r\n 'east': 'The east side of the castle, you can move west',\r\n 'south': 'The south side of the castle, you can move north',\r\n 'west': 'The west side of the castle, you can move to the end',\r\n 'end': 'The dragon'}\r\n\r\ndamage_type_dict: dict = {'Physical': 'physical_resistance',\r\n 'Cold': 'cold_resistance',\r\n 'Fire': 'fire_resistance',\r\n 'Lightning': 'lightning_resistance'}\r\n\r\ngame_active = True \r\n\r\nclass Location:\r\n def __init__(self, location: str):\r\n self.location = location\r\n \r\n #def Get_Description(self):\r\n \r\n\r\nclass BaseItem:\r\n\r\n # base item constructor\r\n def __init__(self, name: str, description: str, use_action: str, \r\n quantity: int):\r\n self.name = name\r\n self.description = description\r\n self.use_action = use_action\r\n self.quantity = quantity\r\n\r\n # set the items quantity\r\n def set_quantity(self, quantity):\r\n if self.quantity + quantity >= 0:\r\n self.quantity = self.quantity + quantity\r\n else:\r\n return('Invalid amount')\r\n\r\n # used to get the items quantity\r\n def get_quantity(self):\r\n return self.quantity\r\n \r\n # checks if an item is valid\r\n # def check_if_item(item):\r\n # return isinstance(item, BaseItem)\r\n \r\n def use_item(item):\r\n return()\r\n \r\nclass FoodItem(BaseItem):\r\n heal_amount: int = 5\r\n \r\n def use_item(self, player: str, item: str):\r\n # check for valid\r\n if(player and player.inventory.Find_Item(item)):\r\n player.Set_Health(self.heal_amount)\r\n else:\r\n return()\r\n\r\nclass Melee_Weapon(BaseItem):\r\n slot: str = ''\r\n weapon_type = 'Fists'\r\n equipped = 'n'\r\n damage_amount: list = [1, 3]\r\n damage_type: str = 'Physical'\r\n attack_type: str = 'Punch'\r\n hit_chance: int = 5\r\n \r\nclass Armor(BaseItem):\r\n slot: str = ''\r\n equipped = 'n'\r\n armor_type: str = ''\r\n stats: dict = {'physical_resistance': 0,\r\n 'cold_resistance': 0,\r\n 'lightning_resistance': 0,\r\n 'fire_resistance': 0}\r\n \r\n \r\n# create a weapon\r\nrusty_sword = Melee_Weapon('Rusty Sword', 'A Rusty Sword', 'Slash', 1)\r\n\r\nrusty_breastplate = Armor('Rusty Breastplate', 'A Rusty Breastplate', 'Equip', 1)\r\nrusty_breastplate.slot = 'body_armor'\r\nrusty_breastplate.stats['physical_resistance'] = 5\r\n\r\n# equip_dict = {'hand_slot_1': Melee_Weapon('Fists', 'Punching machines', 'Punch', 1),\r\n# 'hand_slot_2': 'Empty',\r\n# 'helmet': 'Empty',\r\n# 'body_armor': 'Empty',\r\n# 'gloves': 'Empty',\r\n# 'boots': 'Empty',\r\n# 'ring1': 'Empty',\r\n# 'ring2': 'Empty',\r\n# 'amulet': 'Empty'}\r\n\r\n# equip_dict[rusty_breastplate.slot] = rusty_breastplate\r\n# print(equip_dict[rusty_breastplate.slot])\r\n\r\n\r\n# if rusty_breastplate.slot in equip_dict:\r\n# print('yes')\r\n# else:\r\n# print('no')\r\n\r\n# def Equip_Item(self, item):\r\n# if self.inventory.Find_Item(item):\r\n# if item.slot in self.equip_dict:\r\n# self.equip_dict[item.slot] = item\r\n \r\n# else:\r\n# return('No slot')\r\n\r\n\r\n# class Command():\r\n# def execute():\r\n# return()\r\n \r\n# class Move(Command):\r\n \r\n\r\nclass Inventory:\r\n def __init__(self):\r\n self.item_list = []\r\n \r\n # try to add an item to the players inventory\r\n # just performs checks then performs add to inventory if successful\r\n def Try_Add_Item(self, item_to_give):\r\n if (item_to_give.get_quantity() > 0):\r\n self.Add_Item(item_to_give)\r\n else:\r\n return('Invalid item')\r\n \r\n # add the item to the players item list\r\n def Add_Item(self, item: str):\r\n new_item = item\r\n new_item.set_quantity(item.get_quantity())\r\n self.item_list.append(new_item)\r\n #return(new_item)\r\n \r\n def Remove_Item(self, item: str):\r\n if item in self.item_list:\r\n self.item_list.remove(item)\r\n \r\n # returns all inventory items\r\n def Get_Inventory(self):\r\n if len(self.item_list) > 0:\r\n for item in self.item_list:\r\n print(item.name)\r\n else:\r\n print('No items')\r\n \r\n def Find_Item(self, item: str):\r\n if len(self.item_list) > 0:\r\n for inv_item in self.item_list:\r\n if item == inv_item:\r\n print(item.name)\r\n return(inv_item)\r\n else:\r\n print('No Items')\r\n\r\nclass Player:\r\n \r\n # Create a dict with available actions?\r\n #actions: dict = {}\r\n \r\n # physical_resistance: int = 5\r\n # cold_resistance: int = 1\r\n # lightning_resistance: int = 1\r\n # fire_resistance: int = 1\r\n \r\n resistance_dict: dict = {'physical_resistance': 5,\r\n 'cold_resistance': 1,\r\n 'lightning_resistance': 1,\r\n 'fire_resistance': 1}\r\n\r\n # initialize fists as players weapon\r\n \r\n \r\n equip_dict = {'hand_slot_1': Melee_Weapon('Fists', 'Punching machines', 'Punch', 1),\r\n 'hand_slot_2': 'Empty',\r\n 'helmet': 'Empty',\r\n 'body_armor': 'Empty',\r\n 'gloves': 'Empty',\r\n 'boots': 'Empty',\r\n 'ring1': 'Empty',\r\n 'ring2': 'Empty',\r\n 'amulet': 'Empty'}\r\n \r\n # equipment = [hand_slot_1, hand_slot_2, helmet, body_armor, gloves, boots,\r\n # ring1, ring2, amulet]\r\n \r\n # set starting accuracy and evasion to 5\r\n accuracy: int = 5\r\n evasion_rating: int = 5\r\n \r\n # Player constructor\r\n def __init__(self, name: str, location: str = 'Start', health: int = 100):\r\n self.name = name\r\n self.health = health\r\n self.inventory = Inventory()\r\n self.location = location \r\n \r\n def Player_command():\r\n command = input('Please choose a command: ')\r\n # if command == 'quit':\r\n # quit_command()\r\n \r\n \r\n def Return_status(self):\r\n print('Your name is ' + str(self.name) + \r\n '\\nYour location is ' + str(self.location) + \r\n '\\nYour health is ' + str(self.Get_health())) #+\r\n # '\\nYour hunger is ' + str(self.hunger) + \r\n # '\\nYour thirst is ' + str(self.thirst))\r\n\r\n \r\n # used to move the player\r\n def Set_Location(self, location: str):\r\n if location in location_dict:\r\n self.location = location\r\n return('Moved to the ' + str(location))\r\n else:\r\n return('Invalid move')\r\n \r\n # used to find where the player currently is\r\n def Get_Location(self):\r\n return(self.location.lower())\r\n \r\n def Inspect_Room(self):\r\n print(location_dict[self.Get_Location()])\r\n \r\n # moved to inventory class\r\n # used to pick up items\r\n # def Loot_Item(self, item_to_give):\r\n # self.item_list.append(item_to_give)\r\n \r\n # kill event for the player\r\n def Killed(self, damage_event, damage_causer):\r\n print('You were killed by ' + str(damage_causer) + ' with a ' + \r\n str(damage_event))\r\n #return(\"Dead\")\r\n #ShowDeathScreen(damage_causer)\r\n \r\n # set the players health\r\n def Set_Health(self, change_amount):\r\n self.health = self.health + change_amount\r\n \r\n # !!! TODO !!!\r\n # need to find a better method for positive/negative stat changes\r\n def Set_Stats(self, direction, item):\r\n if direction == 'positive':\r\n for resistance in item.stats:\r\n self.resistance_dict[resistance] = (self.resistance_dict[resistance] + \r\n item.stats[resistance]) \r\n else:\r\n for resistance in item.stats:\r\n self.resistance_dict[resistance] = (self.resistance_dict[resistance] - \r\n item.stats[resistance]) \r\n def Get_Stats(self):\r\n for resistance in self.resistance_dict:\r\n print(self.resistance_dict[resistance])\r\n\r\n # damage the player,\r\n # takes damage event, amount and causer\r\n # sets health with negative change_amount\r\n def Take_Damage(self, damage_event, change_amount, damage_causer): \r\n \r\n self.Set_Health(-change_amount)\r\n # concat self, type of attack, amount and attacker then return\r\n damage_message = (str(self.name) + ' was hit by a ' + str(damage_event) \r\n + ' for ' + str(change_amount) + \r\n ' points of damage from ' + str(damage_causer.name) \r\n + '!')\r\n\r\n # check if lost all health, otherwise return damage message\r\n if self.health <= 0:\r\n self.Killed(damage_event, damage_causer)\r\n else: \r\n return(damage_message)\r\n\r\n # check the players health \r\n def Get_health(self):\r\n return(self.health)\r\n \r\n # use item and call remove_item\r\n def Use_Item(self, item):\r\n if self.inventory.Find_Item(item):\r\n item.use_item(self, item)\r\n self.Remove_Item(item)\r\n else:\r\n return('No item')\r\n \r\n # removes an item from the inventory item_list\r\n def Remove_Item(self, item):\r\n self.inventory.item_list.remove(item)\r\n \r\n # equip item\r\n # check if item in inventory, has a valid slot.\r\n # if already equipped, call unequip\r\n # otherwise set equip to yes, put in dict and change stats\r\n def Equip_Item(self, item):\r\n if self.inventory.Find_Item(item):\r\n if item.slot in self.equip_dict:\r\n if item.equipped == 'y':\r\n self.Unequip_Item(item)\r\n return('Unequipped item') \r\n else:\r\n self.equip_dict[item] = item\r\n self.Set_Stats('positive', item)\r\n item.equipped = 'y'\r\n else:\r\n return('No slot found')\r\n else:\r\n return('No item found')\r\n\r\n # unequip item\r\n # check if item is in equipment dictionary and is equipped\r\n # set dict to empty, subtract stats, set equipped to no\r\n def Unequip_Item(self, item):\r\n if item in self.equip_dict and item.equipped == 'y':\r\n self.equip_dict[item] = 'Empty'\r\n self.Set_Stats('negative', item)\r\n item.equipped = 'n'\r\n return('Unequipped item')\r\n else:\r\n return('No item')\r\n\r\n # check if player is hit by enemy\r\n def hit_check(self, attacker):\r\n \r\n # players evasion and resistance\r\n evade: int = self.evasion_rating\r\n res_dict: dict = self.resistance_dict\r\n \r\n # cold_res: int = attacked.resistance_dict['cold_resistance']\r\n # lightning_res: int = attacked.resistance_dict['lightning_resistance']\r\n # fire_res: int = attacked.'fire_resistance'\r\n \r\n # attackers accuracy and weapon\r\n accuracy = attacker.accuracy\r\n weapon = attacker.weapon\r\n \r\n # miss check\r\n # if random 0-99 > weapon hit chance * character accuracy, miss is true\r\n # Base chance to miss is 75%\r\n if (rand.randrange(0, 100) > \r\n rand.randrange(0, weapon.hit_chance * accuracy)):\r\n return('Miss')\r\n \r\n # if 0-evasion_rating > random 0-99, evade is true\r\n # base chance to evade is 5%\r\n elif (rand.randrange(0, evade) >\r\n rand.randrange(0, 100)):\r\n return('Evade')\r\n \r\n # if neither a miss or an evade, the hit landed\r\n else:\r\n # Calculate damage\r\n # +1 included to hit weapons cap number \r\n # since range stops 1 below the value\r\n damage_roll: int = rand.randrange(weapon.damage_amount[0],\r\n weapon.damage_amount[1] + 1)\r\n \r\n # calculate damage after resistance\r\n # damage roll - the roll multiplied by resistance for particular\r\n # weapon type\r\n damage_roll: int = damage_roll - (damage_roll * \r\n (res_dict[damage_type_dict[\r\n weapon.damage_type]] * 0.01))\r\n \r\n return(self.Take_Damage(weapon.use_action, damage_roll, attacker))\r\n \r\n \r\n # def Melee_Attack(self, weapon):\r\n # damage = self.hit_check(attacked, damage_type)\r\n\r\n\r\n# class Enemy(Player):\r\n# self.enemy_type = enemy_type\r\n\r\n\r\n\r\nTim = Player('Tim')\r\n\r\nTim.inventory.Add_Item(rusty_sword)\r\nTim.inventory.Add_Item(rusty_breastplate)\r\n\r\nTim.inventory.Get_Inventory()\r\n\r\nTim.Get_Stats()\r\n\r\nTim.Equip_Item(rusty_breastplate)\r\n\r\n# Tim.Get_Stats()\r\n\r\nTim.Unequip_Item(rusty_breastplate)\r\n\r\n# Tim.Get_Stats()\r\n\r\n# inv_test = Inventory()\r\n\r\n# inv_test.Add_Item(rusty_breastplate)\r\n\r\n# equip_dict = {'hand_slot_1': Melee_Weapon('Fists', 'Punching machines', 'Punch', 1),\r\n# 'hand_slot_2': 'Empty',\r\n# 'helmet': 'Empty',\r\n# 'body_armor': 'Empty',\r\n# 'gloves': 'Empty',\r\n# 'boots': 'Empty',\r\n# 'ring1': 'Empty',\r\n# 'ring2': 'Empty',\r\n# 'amulet': 'Empty'}\r\n \r\n# equip item\r\n# TODO\r\n# add check for already equipped\r\n\r\n# print(rusty_breastplate.slot)\r\n\r\n# test_stats = {'phys': 5}\r\n\r\n# if inv_test.Find_Item(rusty_breastplate):\r\n# if rusty_breastplate.slot in equip_dict:\r\n# if rusty_breastplate.equipped == 'y':\r\n# #Unequip_Item(rusty_breastplate)\r\n# equip_dict[rusty_breastplate.slot] = 'Empty'\r\n# #Set_Stats('negative', item)\r\n# test_stats['phys'] -= 5 \r\n# else:\r\n# #print(equip_dict[rusty_breastplate.slot])\r\n# equip_dict[rusty_breastplate.slot] = rusty_breastplate\r\n# test_stats['phys'] += 5\r\n# rusty_breastplate.equipped = 'y'\r\n# else:\r\n# print('No slot')\r\n\r\n\r\n\r\n# def Equip_Item(self, item):\r\n# if self.inventory.Find_Item(item):\r\n# if item.slot in self.equip_dict:\r\n# if item.equipped == 'y':\r\n# Unequip_Item(item) \r\n# else:\r\n# self.equip_dict[item] = item\r\n# self.Set_Stats('positive', item)\r\n# item.equipped = 'y'\r\n# else:\r\n# return('No slot')\r\n\r\n# # unequip item\r\n# def Unequip_Item(self, item):\r\n# if item in self.equip_dict:\r\n# self.equip_dict[item] = 'Empty'\r\n# self.Set_Stats('negative', item)\r\n# else:\r\n# return('No item')\r\n\r\n# Jim = Player('Jim')\r\n\r\n# Tim.Get_health()\r\n\r\n# Tim.hit_check(Jim)\r\n\r\n# #Tim.Take_Damage('Slash', -10, 'dragon')\r\n\r\n# #Tim.Take_Damage('Slash', -100, 'dragon')\r\n\r\n# Tim.Get_health()\r\n\r\n# apple = FoodItem('Apple', 'A bruised red apple', 'Eat', 1)\r\n\r\n# Tim.inventory.Try_Add_Item(apple)\r\n\r\n# Tim.inventory.Get_Inventory()\r\n\r\n# Tim.Use_Item(apple)\r\n\r\n# Tim.Inspect_Room()\r\n\r\n# Tim.Set_Location('west')\r\n\r\n\r\n# test_list = [apple, apple, 'apple']\r\n# print(test_list[0].name)\r\n\r\n# def move_player(room_selection):\r\n# print(room_text)\r\n\r\n#class commands():\r\n \r\n\r\n# while game_active: \r\n \r\n# # ask player for name or quit command, if quit, run quit_command\r\n# PLAYER_NAME = input('Please choose a name or enter \"quit\" to exit: ')\r\n \r\n# if PLAYER_NAME == 'quit':\r\n# print('Please play again soon!')\r\n# game_active = False\r\n# else:\r\n# player_controller = Player(PLAYER_NAME)\r\n# player_controller.Return_status()\r\n# print('You must journey to the final room and defeat the dragon to' \r\n# 'win the game. \\nCollect items along the way to aid you in'\r\n# 'your quest. \\nType look around to examine the room you''re'\r\n# 'in. \\nType \"quit\" to exit')\r\n \r\n# player_controller.Player_command()\r\n \r\n ","repo_name":"ericborn/RPG","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":17425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"38915088539","text":"#!/usr/bin/python\nimport math\nimport rospy\nimport pickle\nimport math\nimport cv2\nimport sys\nimport os\nimport rosbag\nimport multiprocessing\nimport time\nfrom numpy.linalg import eig\nfrom os import devnull\nfrom sklearn.cluster import DBSCAN\nfrom contextlib import contextmanager, redirect_stderr, redirect_stdout\nfrom tf_bag import BagTfTransformer\nfrom scipy.spatial.transform import Rotation as R\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ncombinePath = \"../data/results/lanoising\"\nmodelName = \"scans-07-05-22-16_07_18.pth\"\n\ndatasetPath = \"../data/results/maskrcnn_scans\"\ndatasetPath = \"/home/broughtong/external/broughtong/imgs/\"\noutputPath = \"../data/results/maskrcnn_scans_reprocessed\"\noutputPath = \"/home/broughtong/external/broughtong/maskrcnn_scans_reprocessed\"\ncombinedOutPath = \"../data/results/maskrcnn_scans_rectified\"\ncombinedOutPath = \"/home/broughtong/external/broughtong/maskrcnn_scans_rectified\"\n\n@contextmanager\ndef suppress_stdout_stderr():\n with open(devnull, 'w') as fnull:\n with redirect_stderr(fnull) as err, redirect_stdout(fnull) as out:\n yield (err, out)\n\nclass Converter(multiprocessing.Process):\n def __init__(self, path, filename):\n multiprocessing.Process.__init__(self)\n\n self.filename = filename\n self.path = path\n\n def run(self):\n\n print(\"Process spawned for file %s\" % (self.filename), flush = True)\n\n with open(os.path.join(datasetPath, self.filename), \"rb\") as f:\n self.data = pickle.load(f)\n #combinefn = os.path.join(combinePath, self.filename.split(\".pickle\")[0]+\".pickle\")\n #with open(combinefn, \"rb\") as f:\n # self.combinedata = pickle.load(f)\n\n self.convert()\n self.data = None\n \n os.makedirs(os.path.join(outputPath, self.path), exist_ok=True)\n with open(os.path.join(outputPath, self.path, self.filename + \".annotations\"), \"wb\") as f:\n pickle.dump(self.annotations, f, protocol=2)\n\n def convert(self):\n\n annotations = []\n for idx in range(len(self.data[0][\"boxes\"])):\n box = self.data[0][\"boxes\"][idx]\n label = self.data[0][\"labels\"][idx]\n score = self.data[0][\"scores\"][idx]\n mask = self.data[0][\"masks\"][idx]\n\n if score < 0.9:\n continue\n\n x = (float(box[1]) + float(box[3])) / 2\n y = (float(box[0]) + float(box[2])) / 2\n res = 1024\n scale = 25\n x = (x - (res/2)) / scale\n y = (y - (res/2)) / scale\n\n mask = mask[math.floor(box[1]):math.ceil(box[3]), math.floor(box[0]):math.ceil(box[2])]\n\n coords = []\n for row in range(len(mask)):\n for val in range(len(mask[row])):\n mask[row][val] = (mask[row][val])*255\n if mask[row][val] > 50:\n coords.append([row, val])\n\n cov = np.cov(np.transpose(coords))\n w, v = eig(cov)\n bigIdx = 0\n if w[1] > w[0]:\n bigIdx = 1\n ev = v[bigIdx]\n rot = -math.atan2(ev[1], ev[0])\n annotation = [x, y, rot]\n annotations.append(annotation)\n self.annotations = annotations\n\nif __name__ == \"__main__\":\n \n #extract result from network output into files\n jobs = []\n for files in os.walk(os.path.join(datasetPath, modelName)):\n for filename in files[2]:\n if filename[-7:] == \".pickle\":\n if \".annotations.\" not in filename:\n jobs.append(Converter(modelName, filename))\n print(\"Spawned %i processes\" % (len(jobs)), flush = True)\n cpuCores = 50\n limit = cpuCores\n batch = cpuCores\n for i in range(len(jobs)):\n print(\"%i frame of %i\" % (i, len(jobs)))\n if i < limit:\n jobs[i].start()\n else:\n for j in range(limit):\n try:\n jobs[j].join()\n except ValueError:\n pass\n jobs[j].close()\n limit += batch\n jobs[i].start()\n\n print(\"Jobs finished, checking them too\", flush=True)\n for job in jobs:\n try:\n job.join()\n #print(\"Job fine\", flush=True)\n except:\n #print(\"Job not fine\", flush=True)\n pass\n\n print(\"All jobs checked\", flush=True)\n\n #now we merge those detections into the original file structures\n #basically bring it into a common format\n combinableFilenames = []\n for files in os.walk(os.path.join(outputPath, modelName)):\n for filename in files[2]:\n if filename[-12:] == \".annotations\":\n combinableFilenames.append(os.path.join(files[0], filename.split(\".\")[0]))\n combinableFilenames = list(set(combinableFilenames))\n\n print(\"Combining %i files\" % (len(combinableFilenames)))\n\n for base in combinableFilenames:\n\n print(\"Combining bag\", base)\n modelPath = base.split(\"/\")[-2]\n print(modelPath)\n\n #open combinable file\n combineFile = \"\"\n for files in os.walk(combinePath):\n for filename in files[2]:\n if filename.split(\".\")[0] == base.split(\"/\")[-1]:\n subPath = files[0][len(combinePath)+1:]\n combineFile = os.path.join(subPath, filename)\n break\n\n if combineFile == \"\":\n print(\"Error combining \", filename)\n break\n\n data = []\n with open(os.path.join(combinePath, combineFile), \"rb\") as f:\n data = pickle.load(f)\n\n readyFiles = []\n for files in os.walk(outputPath):\n for filename in files[2]:\n print(filename)\n if filename[-12:] == \".annotations\":\n if filename.split(\".\")[0] == base.split(\"/\")[-1]:\n readyFiles.append(filename)\n\n print(len(readyFiles), len(data[\"ts\"]))\n #if len(readyFiles) != len(data[\"ts\"]):\n # print(\"Warning, frame mismatch\", base)\n\n data[\"maskrcnn\"] = []\n for i in range(len(data[\"scans\"])):\n data[\"maskrcnn\"].append([])\n\n #open each file, add it to the correct frame\n for filename in readyFiles:\n idx = int(filename.split(\".pickle-\")[1].split(\".\")[0])\n with open(os.path.join(outputPath, modelPath, filename), \"rb\") as f:\n annotations = pickle.load(f)\n data[\"maskrcnn\"][idx] = annotations\n \n #for i in range(len(readyFiles)):\n # if data[\"maskrcnn\"][i] == None:\n # print(\"Warning, empty frame found\")\n\n os.makedirs(os.path.join(combinedOutPath, modelPath), exist_ok=True)\n print(\"Saving to %s\" % (os.path.join(combinedOutPath, modelPath, base.split(\"/\")[-1] + \".bag.pickle\")))\n with open(os.path.join(combinedOutPath, modelPath, base.split(\"/\")[-1] + \".bag.pickle\"), \"wb\") as f:\n pickle.dump(data, f, protocol=2)\n","repo_name":"broughtong/car-detector","sub_path":"evaluation/prepare_maskrcnn.py","file_name":"prepare_maskrcnn.py","file_ext":"py","file_size_in_byte":7019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"31818841692","text":"from codecs import open\nfrom os import path\n\nfrom setuptools import setup\n\nbasedir = path.abspath(path.dirname(__file__))\n\nwith open(path.join(basedir, \"README.md\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=\"macaroni\",\n version=\"0.0.3\",\n description=\"A lib to help you avoid spaghetti code\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/rudineirk/py-macaroni\",\n author=\"Rudinei Goi Roecker\",\n author_email=\"rudinei.roecker@gmail.com\",\n license=\"MIT\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"Topic :: Software Development :: Libraries :: Application Frameworks\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n keywords=\"clean cleancode logic functional\",\n packages=[\"macaroni\"],\n)\n","repo_name":"rudineirk/py-macaroni","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"9706761878","text":"import os\n\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument, IncludeLaunchDescription\nfrom launch.conditions import IfCondition\nfrom launch.substitutions import LaunchConfiguration\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch_ros.actions import Node\nfrom ament_index_python.packages import get_package_share_directory\n\n\ndef generate_launch_description():\n stretch_core_path = get_package_share_directory('stretch_core')\n stretch_navigation_path = get_package_share_directory('stretch_nav2')\n navigation_bringup_path = get_package_share_directory('nav2_bringup')\n \n teleop_type_param = DeclareLaunchArgument(\n 'teleop_type', default_value=\"joystick\", description=\"how to teleop ('keyboard', 'joystick' or 'none')\")\n \n use_sim_time_param = DeclareLaunchArgument(\n 'use_sim_time',\n default_value='false',\n description='Use simulation/Gazebo clock')\n\n autostart_param = DeclareLaunchArgument(\n 'autostart',\n default_value='false',\n description='Whether to autostart lifecycle nodes on launch')\n\n map_path_param = DeclareLaunchArgument(\n 'map',\n default_value=os.path.join(stretch_navigation_path,\n 'map', 'home2.yaml'),\n description='Full path to the map.yaml file to use for navigation')\n\n params_file_param = DeclareLaunchArgument(\n 'params_file',\n default_value=os.path.join(stretch_navigation_path, 'config', 'nav2_params.yaml'),\n description='Full path to the ROS2 parameters file to use for all launched nodes')\n\n rviz_param = DeclareLaunchArgument('use_rviz', default_value='true', choices=['true', 'false'])\n\n stretch_driver_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([stretch_core_path, '/launch/stretch_driver.launch.py']),\n launch_arguments={'mode': 'navigation', 'broadcast_odom_tf': 'True'}.items())\n\n rplidar_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([stretch_core_path, '/launch/rplidar.launch.py']))\n\n base_teleop_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([stretch_navigation_path, '/launch/teleop_twist.launch.py']),\n launch_arguments={'teleop_type': LaunchConfiguration('teleop_type')}.items())\n\n navigation_bringup_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([stretch_navigation_path, '/launch/bringup_launch.py']),\n launch_arguments={'use_sim_time': LaunchConfiguration('use_sim_time'), \n 'autostart': LaunchConfiguration('autostart'),\n 'map': LaunchConfiguration('map'),\n 'params_file': LaunchConfiguration('params_file'),\n 'use_rviz': LaunchConfiguration('use_rviz')}.items())\n\n rviz_launch = IncludeLaunchDescription(\n PythonLaunchDescriptionSource([navigation_bringup_path, '/launch/rviz_launch.py']),\n condition=IfCondition(LaunchConfiguration('use_rviz')))\n\n return LaunchDescription([\n teleop_type_param,\n use_sim_time_param,\n autostart_param,\n map_path_param,\n params_file_param,\n rviz_param,\n stretch_driver_launch,\n rplidar_launch,\n base_teleop_launch,\n navigation_bringup_launch,\n rviz_launch,\n ])\n","repo_name":"hello-robot/stretch_ros2","sub_path":"stretch_nav2/launch/navigation.launch.py","file_name":"navigation.launch.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"69"} +{"seq_id":"33583815194","text":"infile = \"input.txt\"\n\nwith open(infile) as f:\n data = [ int(i) for i in f.read().split(\",\") ]\n\nmx = max(data)\nmn = min(data)\nfuel = float(\"inf\")\n\nfor i in range(mn, mx):\n s = 0\n for x in data:\n s += sum([x for x in range(1, abs(x - i) + 1)])\n fuel = min(fuel, s)\nprint(fuel)","repo_name":"calle2021/aoc2021","sub_path":"d7/d7_part2.py","file_name":"d7_part2.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"69828013980","text":"from feature_engineering import *\n\nclass PipelineCorrelation:\n \n \n @staticmethod\n def clean(X, y, options: dict):\n \"\"\"\n mca_keep_features_min\n min_correlation\n tree_percentage_important\n \"\"\"\n \n history = dict()\n \n # Multicolinearity\n multicolinear_columns = getMulticolinearColumns(np.abs(X.corr()), keep_features_min = options.get('mca_keep_features_min', 6))\n X = X.drop(multicolinear_columns, axis=1)\n history['multicolinear_columns'] = multicolinear_columns\n\n # Low correlation\n history['low_correlations'] = dict()\n low_correlations_labels, low_correlations_abs = getLowCorrelationsWithTarget(X, y, min_correlation=options.get('min_correlation', 0.1))\n \n history['low_correlations']['low_correlations_labels'] = low_correlations_labels\n history['low_correlations']['low_correlations_abs'] = low_correlations_abs\n \n \n X = X.drop(low_correlations_labels, axis=1)\n \n \n number_features_to_select = int(len(X.columns) * options.get('tree_percentage_important', 0.8))\n # Important features, the 80% most important\n if number_features_to_select > 0 and number_features_to_select < len(X.columns):\n features_important = getImportantFeatures(X, y, number_features_to_select, max_features = number_features_to_select)\n history['features_important'] = features_important\n X = X.loc[:,features_important]\n else:\n history['features_important'] = X.columns \n \n return X, history\n \n \n# Basic cleaning for consuming\nimport math\n\nclass PA:\n # Pipeline Actions\n DROP = 0\n TYPECAST = 1\n # SCALE = 2\n REPLACE = 3\n RENAME = 4\n REPLACE_WITH_MODE = 5\n REPLACE_WITH_MEDIAN = 6\n RENAME_LOWER_CASE = 7\n CREATE_DUMMIES = 8\n\n PIPELINE_BASE_COMMON_INT = [\n [REPLACE_WITH_MEDIAN,[np.nan]], \n [TYPECAST,np.int16],\n [RENAME_LOWER_CASE],\n ]\n\n PIPELINE_BASE_COMMON_MODE_INT = [\n [REPLACE_WITH_MODE,[np.nan]],\n [TYPECAST,np.int16],\n [RENAME_LOWER_CASE],\n ]\n\n @staticmethod\n def is_real_number(x):\n try:\n cast_x = float(x)\n if math.isnan(cast_x):\n return False\n else:\n return True\n except:\n return False\n\n @staticmethod\n def exec(df: pd.DataFrame, data_scheme:dict):\n df_result: pd.DataFrame = df.copy()\n\n columns = df.columns\n for c in columns:\n try:\n column = c\n pipeline_actions = data_scheme.get(column,[])\n \n for pa_group in pipeline_actions:\n pa = pa_group[0]\n\n if pa == PA.DROP:\n df_result.drop(column, axis=1,inplace=True)\n elif pa == PA.REPLACE:\n map_replace = {}\n for key,value in zip(pa_group[1],pa_group[2]):\n map_replace[key] = value\n\n df_result[column].replace(map_replace, inplace=True)\n elif pa == PA.TYPECAST:\n df_result[column] = df_result[column].astype(pa_group[1])\n\n elif pa == PA.REPLACE_WITH_MODE:\n mode = df_result[df_result[column].notna()][column].mode()[0]\n map_replace = {}\n for key in pa_group[1]:\n map_replace[key] = mode\n df_result[column].replace(map_replace, inplace=True)\n\n elif pa == PA.REPLACE_WITH_MEDIAN:\n # works even if exist text in the column\n median = df_result[df_result[column].apply(PA.is_real_number)][column].median()\n map_replace = {}\n for key in pa_group[1]:\n map_replace[key] = median\n df_result[column].replace(map_replace, inplace=True)\n elif pa == PA.RENAME:\n column_new = pa_group[1]\n df_result.rename(columns={column: column_new}, inplace=True)\n column = column_new\n elif pa == PA.RENAME_LOWER_CASE:\n column_new = column.lower()\n df_result.rename(columns={column: column_new}, inplace=True)\n column = column_new\n elif pa == PA.CREATE_DUMMIES:\n df_result = df_result.join(pd.get_dummies(df_result[column], prefix=column))\n df_result.drop(column, axis=1, inplace=True)\n else:\n print(\"ERROR, unknown PA\", pa)\n\n except Exception as e:\n print(f\"Error while processing column '{column}'\", e)\n\n\n return df_result\n","repo_name":"Magody/DataScience","sub_path":"lib/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"18212334749","text":"import sys\nimport time as t\nimport numpy as np\n\nfrom depthy.misc import Normalizer\n\n\ndef compute_census(img_l: np.ndarray = None, img_r: np.ndarray = None, offset: int = 7) -> (np.ndarray, np.ndarray):\n \"\"\"\n Census feature extraction (for more details see https://en.wikipedia.org/wiki/Census_transform)\n\n :param img_l: left image\n :param img_r: right image\n :param offset: pixel offset on the four image borders\n :return: lcensus_values, rcensus_values\n \"\"\"\n\n h, w, c = img_l.shape if len(img_l.shape) == 3 else img_l.shape + (1,)\n\n # convert to float\n img_l, img_r = Normalizer(img_l).norm_fun(), Normalizer(img_r).norm_fun()\n\n lcensus_values = np.zeros(shape=(h, w), dtype=np.uint64)\n rcensus_values = np.zeros(shape=(h, w), dtype=np.uint64)\n print('\\tLeft and right census...', end='')\n sys.stdout.flush()\n dawn = t.time()\n # exclude pixels on the border (they will have no census values)\n for y in range(offset, h-offset):\n for x in range(offset, w-offset):\n\n # extract left block region and subtract current pixel intensity as offset from it\n image = img_l[y - offset:y + offset + 1, x - offset:x + offset + 1]\n roi_offset = image - img_l[y, x]\n # census calculation left image\n lcensus_values[y, x] = vectorized_census(roi_offset)\n\n # extract right block region and subtract current pixel intensity as offset from it\n image = img_r[y - offset:y + offset + 1, x - offset:x + offset + 1]\n roi_offset = image - img_r[y, x]\n # census calculation right image\n rcensus_values[y, x] = vectorized_census(roi_offset)\n\n dusk = t.time()\n print('\\t(done in {:.2f}s)'.format(dusk - dawn))\n\n return lcensus_values, rcensus_values\n\n\ndef vectorized_census(roi: np.ndarray = None) -> int:\n \"\"\"\n Compute census in a numpy-vectorized fashion.\n\n :param roi: Region of Interest (RoI)\n :return: census value\n \"\"\"\n\n if len(roi.shape) != 2:\n raise Exception('Data must be 2-dimensional')\n\n # binary census vector\n b = np.array(roi < 0).flatten()\n # remove central value\n central_idx = (roi.shape[0]*roi.shape[1])//2\n b = np.delete(b, central_idx)\n # convert binary vector to integer\n num = b.dot(1 << np.arange(b.size)[::-1])\n\n return num\n","repo_name":"hahnec/depthy","sub_path":"depthy/stereo/feature_methods.py","file_name":"feature_methods.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"69"} +{"seq_id":"14323504024","text":"import tkinter as tk\nroot = tk.Tk()\nbackClr = \"white\"\nfrontClr = \"black\"\nmidClr = \"EEEEEE\"\n\ncWidth = 100\ncHeight = 100\nx1 = 0\ny1 = 0\nx2 = cWidth\ny2 = cHeight\nbWidth = 25\next = 28\n\ncanvas = tk.Canvas(root, width=cWidth, height= cHeight, background=backClr)\ncanvas.pack()\n\ncanvas.create_oval(x1, y1, x2, y2, fill=midClr , outline=midClr)\ncanvas.create_arc(x1, y1, x2, y2, fill=frontClr , extent=ext)\ncanvas.create_oval(x1 + bWidth, y1 + bWidth, x2 - bWidth ,y2 - bWidth, fill = backClr, outline = backClr)\ncanvas.create_text(cWidth / 2, cHeight/2,text=ext)\ntk.mainloop()","repo_name":"ftp5500/circular-progress-bar","sub_path":"circular_progressPar.py","file_name":"circular_progressPar.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"8834225408","text":"import pytest\n\nfrom v6_carrier_py.encryption import salthash, encrypt_identifiers\nimport pandas as pd\n\n\ndef test_salthash():\n salt = 'a' * 128\n string = 'hash me please'\n result = salthash(salt, string)\n assert result == 'EQ2Fczw0MVu0zqH30I0Vffbkga7SJO9tmnWjU2ZNf9gJeHa' \\\n 'EETF9wJY13YPqdhsVwSMK1v+zVYYB6cgjDjHIjA=='\n\n\nclass TestEncryptIdentifiers:\n test_df = pd.DataFrame.from_dict({\n 'identifier1': ['a', 'b'],\n 'identifier2': ['c', 'd'],\n 'value1': [1, 2]\n })\n test_identifiers = ['identifier1', 'identifier2']\n test_salt = 'a' * 128\n\n def test_encrypt_identifiers(self):\n result_df = encrypt_identifiers(self.test_df, self.test_salt,\n identifiers=self.test_identifiers)\n for identifier in self.test_identifiers:\n assert identifier not in result_df\n assert 'encrypted_identifier' in result_df\n\n def test_encrypt_identifiers_wrong_identifiers(self):\n test_wrong_identifiers = ['wrong_variable']\n with pytest.raises(KeyError):\n encrypt_identifiers(self.test_df, self.test_salt,\n identifiers=test_wrong_identifiers)\n\n def test_encrypt_identifiers_null_values(self):\n test_df = self.test_df.copy()\n test_df['identifier1'] = None\n with pytest.raises(ValueError):\n encrypt_identifiers(test_df, self.test_salt,\n identifiers=self.test_identifiers)\n","repo_name":"CARRIER-project/vantage6-algorithms","sub_path":"tests/test_encryption.py","file_name":"test_encryption.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"9223731962","text":"\"\"\"\nadmin.py\n=======================================\nThis module contains the classes which register Locations and Coord to the\ndjango Admin framework which allows superusers to modify and edit data sorted in\nthe database tables created from the models.\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.http import urlencode\nfrom django.utils.safestring import SafeString\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.contrib.auth.models import User\nfrom exeterDomination.models import Locations, CoOrds\n\n\n\nclass CustomUserAdmin(UserAdmin):\n \"\"\"\n This class is required for an Admin to create their own user of any type\n They can also assign buildings to them\n \"\"\"\n list_display = [\n 'id',\n 'username',\n 'is_active',\n 'date_joined',\n 'is_staff',\n \"claimedBy\"]\n\n def claimedBy(self, obj):\n \"\"\"\n This function is used to assign a building to a newly created user\n \"\"\"\n url = (\n reverse(\"admin:exeterDomination_locations_changelist\")\n + \"?\"\n + urlencode({\"claimedBy_id\": obj.id})\n )\n return format_html('{} Locations Claimed', url,\n len(Locations.objects.filter(claimedBy_id=obj.id)))\n\n\n\nadmin.site.unregister(User)\nCustomUserAdmin.claimedBy.short_description = \"Current Claims\"\nadmin.site.register(User, CustomUserAdmin)\n\n\n@admin.register(Locations)\nclass LocationsAdmin(admin.ModelAdmin):\n \"\"\"\n This class is required to register the Locations Model with the Django Admin.\n This therefore allows any superusers (Game Keepers) acessing the project via\n the admin page to modify the Locations in the database.\n \"\"\"\n list_display = (\n \"name\",\n \"topRightCoordinate\",\n \"bottomLeftCoordinate\",\n \"claimedLink\",\n )\n\n def claimedLink(self, obj):\n \"\"\"\n This function is used to give the class it's functionality in changing\n locations in the database\n \"\"\"\n url = (\n reverse(\"admin:auth_user_changelist\")\n + \"?\"\n + urlencode({\"username\": f\"{obj.claimedBy}\"})\n )\n return format_html('{}', url, obj.claimedBy)\n\n claimedLink.short_description = \"Claimed By\"\n\n\n@admin.register(CoOrds)\nclass CoOrdsAdmin(admin.ModelAdmin):\n \"\"\"\n This class is required to register the Coord Model with the Django Admin.\n This therefore allows any superusers (Game Keepers) acessing the project via\n the admin page to modify the Coord in the database.\n \"\"\"\n list_display = (\"id\", \"longitude\", \"latitude\", \"linked_location\")\n\n def linked_location(self, obj) -> SafeString:\n \"\"\"\n This function allows the superuser to change coordinates for\n a specific location\n \"\"\"\n if len(Locations.objects.filter(bottomLeftCoordinate_id=obj.id)) == 1:\n url = (\n reverse(\"admin:exeterDomination_locations_changelist\")\n + \"?\"\n + urlencode({\"bottomLeftCoordinate_id\": f\"{obj.id}\"})\n )\n return format_html(\n '{}',\n url,\n Locations.objects.get(\n bottomLeftCoordinate=obj.id).name)\n elif len(Locations.objects.filter(topRightCoordinate_id=obj.id)):\n url = (\n reverse(\"admin:exeterDomination_locations_changelist\")\n + \"?\"\n + urlencode({\"topRightCoordinate_id\": f\"{obj.id}\"})\n )\n return format_html(\n '{}',\n url,\n Locations.objects.get(\n topRightCoordinate_id=obj.id).name)\n","repo_name":"TreeveWhite/ecm2434-project","sub_path":"TheProject/exeterDomination/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"69"} +{"seq_id":"72209002141","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport tweepy, time, sys \n\nargfile = str(sys.argv[1])\n\n#connect to Twitter API:\nCONSUMER_KEY = 'secret'\nCONSUMER_SECRET = 'secret'\nACCESS_KEY = 'secret'\nACCESS_SECRET = 'secret'\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\napi = tweepy.API(auth)\n\nfilename=open(argfile,'r')\nf=filename.readlines()\nfilename.close()\n\nfor line in f:\n trends1 = api.trends_place(1)\n hashtags = [x['name'] for x in trends1[0]['trends'] if x['name'].startswith('#')]\n trend_hashtag = None\n if len(hashtags[0]) <= 20:\n trend_hashtag = hashtags[0]\n elif len(hashtags[1]) <= 20:\n trend_hashtag = hashtags[1]\n elif len(hashtags[2]) <= 20:\n trend_hashtag = hashtags[2]\n if trend_hashtag:\n api.update_status(line + trend_hashtag)\n time.sleep(3*60*60)\n else:\n time.sleep(15*60)\n","repo_name":"bcrvc/gilles_delulz","sub_path":"bot_improved.py","file_name":"bot_improved.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"69"} +{"seq_id":"70313413981","text":"import pygame\n\nfrom Game import Game\n\nGO = True\nCOUNT = 0\n\ngame = Game(\"level1.txt\")\n\nwhile GO:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n GO = False\n\n game.move_particles(COUNT)\n\n COUNT += 1\n if(COUNT >= 99):\n COUNT = 0\n\n game.draw_window()\n \ngame.quitAlgo()","repo_name":"billonalex/AlgoGen_Parcours","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"16908607345","text":"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\n\n\nfrom sklearn.model_selection import train_test_split\nfrom datetime import datetime\nstart_time = datetime.now()\n\n\nclass Logistics_Gradient_Descent():\n \n\n def __init__(self):\n \n self.bias_weights = None\n self.log_bias_weights = None\n self.log_error = None\n \n def prediction(self, X):\n linear_prediction = np.dot(X, self.bias_weights.T)\n predict = self.sigmoid_activation(linear_prediction)\n predict = np.round(predict)\n return predict\n \n def sigmoid_activation(self, predicted):\n\n sigmoid_function = np.array([])\n for i in range(len(predicted)):\n \n sigmoid_function = np.append(sigmoid_function, 1.0 / (1.0 + np.exp(-predicted[i])))\n \n \n #return 1.0 if sigmoid_function >= .5 else 0\n return sigmoid_function.reshape(-1,1)\n \n #Cost/Loss function: Logistic\n def cost_function(self, actual, predicted, X):\n \n m = len(actual)\n\n cost = - np.sum(actual * np.log(predicted) + (1-actual) * np.log(1-predicted)) / m \n # for i in range(len(actual)):\n # cost += actual[i] * np.log(self.sigmoid_activation(predicted[i])) + (1-actual[i]) * np.log(1-self.sigmoid_activation(predicted[i]))\n \n # return - cost / m\n \n def gradient_descent(self, actual, X, n_iterations = 1000 ,learn_rate = 0.1 ):\n \n \n self.bias_weights = np.array(np.zeros(X.shape[1]))\n\n self.bias_weights = self.bias_weights.reshape(1,X.shape[1])\n \n self.log_bias_weights = []\n self.log_error = []\n \n m = len(actual)\n for _ in range(n_iterations):\n \n linear_prediction = np.dot(X, self.bias_weights.T)\n sigmoid_prediction = self.sigmoid_activation(linear_prediction)\n error = sigmoid_prediction - actual\n\n gradient = np.dot(error.T, X) / m\n \n self.bias_weights = self.bias_weights - learn_rate * gradient\n \n self.log_bias_weights.append(self.bias_weights)\n self.log_error.append(error)\n\n return self.log_bias_weights, self.log_error\n\n \ndef normalize(X):\n X_normed = (X - X.min()) / (X.max() - X.min())\n \n return X_normed\n\n#load dataset\ndataset2 = pd.read_csv('pima-indians-diabetes.csv',header=None)\ndataset = normalize(dataset2)\n\n#seperate the features\nX = np.array(dataset.iloc[:,0:dataset.shape[1]-1])\n\nX = np.pad(X, [(0,0),(1,0)], mode='constant', constant_values = 1)\n\n\n#seperate the values\ny = np.array(dataset.iloc[:,-1])\ny = y.reshape(-1,1)\n\nX_train, X_test, y_train, y_test= train_test_split(X, y, test_size = 0.1)\n\n\n#initialize the Logistic Regression Class\n\nLR = Logistics_Gradient_Descent()\n\nlog_bw, log_error = LR.gradient_descent(y_train, X_train, n_iterations=100, learn_rate = 0.1)\n\n\nprediction = LR.prediction(X_test)\n\ncorrect = 0\nfor i in range(len(y_test)):\n if prediction[i] == y_test[i]:\n correct += 1\nprint(correct / float(len(y_test)) * 100.0)\n\n\n\nend_time = datetime.now()\n\nprint(\"--- %s seconds ---\" % (end_time - start_time))","repo_name":"thonghuunguyen/MachineLearning","sub_path":"Logistic_Regression/class-logistics-regression.py","file_name":"class-logistics-regression.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"39597267211","text":"import tornado.web\nimport tornado.ioloop\n\nclass basicRequestHandler(tornado.web.RequestHandler):\n def get(self):\n self.write(\"Sugeng Enjing Cak !!!\")\n \nclass queryStringRequestHandler(tornado.web.RequestHandler):\n def get(self):\n n = int(self.get_argument(\"n\"))\n r = \"bejo\" if n % 2 else \"even\"\n self.write(\"nomere \" + str(n) + \" adalah \" + r)\n \nclass resourceRequestHandler(tornado.web.RequestHandler):\n def get(self, id):\n self.write(\"Kedah menika nomer \" + id )\n \nclass staticRequestHandler(tornado.web.RequestHandler):\n def get(self):\n self.render(\"index.html\")\n \n\nif __name__ == '__main__':\n app = tornado.web.Application([\n (r\"/\", basicRequestHandler), \n (r\"/site\", staticRequestHandler),\n (r\"/isEven\", queryStringRequestHandler),\n (r\"/togel/([0-9]+)\", resourceRequestHandler)\n ])\n \n app.listen(8881)\n print(\"I'm listening on port 8881\")\n tornado.ioloop.IOLoop.current().start()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# import motor.motor_tornado\n# import tornado.ioloop\n# import tornado.web\n\n# client = motor.motor_tornado.MotorClient()\n# client = motor.motor_tornado.MotorClient('localhost', 27017)\n\n# # start with connection url\n# # client = motor.motor_tornado.MotorClient('mongodb://localhost:27017')\n# # client = motor.motor_tornado.MotorClient('mongodb://host1,host2/?replicaSet=my-replicaset-name')\n\n# db = client.test_database\n# db = client[\"test_database\"]\n# db = motor.motor_tornado.MotorClient().test_database\n\n# class MainHandler(tornado.web.RequestHandler):\n# def get(self):\n# self.write(\"Hello, world\")\n \n# application = tornado.web.Application([\n# (r'/', MainHandler)\n# ], db=db)\n\n# application.listen(8888)\n# tornado.ioloop.IOLoop.current().start()\n\n# class MainHandler(tornado.web.RequestHandler):\n# def get(self):\n# db = self.settings['db']","repo_name":"oksipjogja/praxis-academy","sub_path":"kemampuan-dasar/kemampuan-dasar-2/minggu-5/hari-2/server/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"7532480537","text":"from speedtest import main\n\n\ndef some_executable(a):\n if callable(a):\n return a()\n else:\n print('Is not callable')\n\n\ndef finditer(text, pattern):\n pos = 1\n while True:\n pos = text.find(pattern, pos + 1)\n if pos < 1:\n yield pos\n\n\ndef some_function_of_sorting(some_iterable):\n alist = list(some_iterable)\n alist.sort()\n return alist\n\n\nif __name__ == '__main__':\n some_executable(main)\n","repo_name":"Erickdrakus93/Script_Coroutines","sub_path":"some_executable.py","file_name":"some_executable.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"69"} +{"seq_id":"7841432486","text":"#!/usr/bin/env python3\n\"\"\"maji.\n\nUsage:\n maji init\n maji make \n maji render